1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2022 by Delphix. All rights reserved. 25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2017, Intel Corporation. 27 * Copyright (c) 2019, 2023, 2024, 2025, Klara, Inc. 28 * Copyright (c) 2019, Allan Jude 29 * Copyright (c) 2021, Datto, Inc. 30 * Copyright (c) 2021, 2024 by George Melikov. All rights reserved. 31 */ 32 33 #include <sys/sysmacros.h> 34 #include <sys/zfs_context.h> 35 #include <sys/fm/fs/zfs.h> 36 #include <sys/spa.h> 37 #include <sys/txg.h> 38 #include <sys/spa_impl.h> 39 #include <sys/vdev_impl.h> 40 #include <sys/vdev_trim.h> 41 #include <sys/zio_impl.h> 42 #include <sys/zio_compress.h> 43 #include <sys/zio_checksum.h> 44 #include <sys/dmu_objset.h> 45 #include <sys/arc.h> 46 #include <sys/brt.h> 47 #include <sys/ddt.h> 48 #include <sys/blkptr.h> 49 #include <sys/zfeature.h> 50 #include <sys/dsl_scan.h> 51 #include <sys/metaslab_impl.h> 52 #include <sys/time.h> 53 #include <sys/trace_zfs.h> 54 #include <sys/abd.h> 55 #include <sys/dsl_crypt.h> 56 #include <cityhash.h> 57 58 /* 59 * ========================================================================== 60 * I/O type descriptions 61 * ========================================================================== 62 */ 63 const char *const zio_type_name[ZIO_TYPES] = { 64 /* 65 * Note: Linux kernel thread name length is limited 66 * so these names will differ from upstream open zfs. 67 */ 68 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim" 69 }; 70 71 int zio_dva_throttle_enabled = B_TRUE; 72 static int zio_deadman_log_all = B_FALSE; 73 74 /* 75 * ========================================================================== 76 * I/O kmem caches 77 * ========================================================================== 78 */ 79 static kmem_cache_t *zio_cache; 80 static kmem_cache_t *zio_link_cache; 81 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 82 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 83 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 84 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 85 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 86 #endif 87 88 /* Mark IOs as "slow" if they take longer than 30 seconds */ 89 static uint_t zio_slow_io_ms = (30 * MILLISEC); 90 91 #define BP_SPANB(indblkshift, level) \ 92 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 93 #define COMPARE_META_LEVEL 0x80000000ul 94 /* 95 * The following actions directly effect the spa's sync-to-convergence logic. 96 * The values below define the sync pass when we start performing the action. 97 * Care should be taken when changing these values as they directly impact 98 * spa_sync() performance. Tuning these values may introduce subtle performance 99 * pathologies and should only be done in the context of performance analysis. 100 * These tunables will eventually be removed and replaced with #defines once 101 * enough analysis has been done to determine optimal values. 102 * 103 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 104 * regular blocks are not deferred. 105 * 106 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable 107 * compression (including of metadata). In practice, we don't have this 108 * many sync passes, so this has no effect. 109 * 110 * The original intent was that disabling compression would help the sync 111 * passes to converge. However, in practice disabling compression increases 112 * the average number of sync passes, because when we turn compression off, a 113 * lot of block's size will change and thus we have to re-allocate (not 114 * overwrite) them. It also increases the number of 128KB allocations (e.g. 115 * for indirect blocks and spacemaps) because these will not be compressed. 116 * The 128K allocations are especially detrimental to performance on highly 117 * fragmented systems, which may have very few free segments of this size, 118 * and may need to load new metaslabs to satisfy 128K allocations. 119 */ 120 121 /* defer frees starting in this pass */ 122 uint_t zfs_sync_pass_deferred_free = 2; 123 124 /* don't compress starting in this pass */ 125 static uint_t zfs_sync_pass_dont_compress = 8; 126 127 /* rewrite new bps starting in this pass */ 128 static uint_t zfs_sync_pass_rewrite = 2; 129 130 /* 131 * An allocating zio is one that either currently has the DVA allocate 132 * stage set or will have it later in its lifetime. 133 */ 134 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 135 136 /* 137 * Enable smaller cores by excluding metadata 138 * allocations as well. 139 */ 140 int zio_exclude_metadata = 0; 141 static int zio_requeue_io_start_cut_in_line = 1; 142 143 #ifdef ZFS_DEBUG 144 static const int zio_buf_debug_limit = 16384; 145 #else 146 static const int zio_buf_debug_limit = 0; 147 #endif 148 149 typedef struct zio_stats { 150 kstat_named_t ziostat_total_allocations; 151 kstat_named_t ziostat_alloc_class_fallbacks; 152 kstat_named_t ziostat_gang_writes; 153 kstat_named_t ziostat_gang_multilevel; 154 } zio_stats_t; 155 156 static zio_stats_t zio_stats = { 157 { "total_allocations", KSTAT_DATA_UINT64 }, 158 { "alloc_class_fallbacks", KSTAT_DATA_UINT64 }, 159 { "gang_writes", KSTAT_DATA_UINT64 }, 160 { "gang_multilevel", KSTAT_DATA_UINT64 }, 161 }; 162 163 struct { 164 wmsum_t ziostat_total_allocations; 165 wmsum_t ziostat_alloc_class_fallbacks; 166 wmsum_t ziostat_gang_writes; 167 wmsum_t ziostat_gang_multilevel; 168 } ziostat_sums; 169 170 #define ZIOSTAT_BUMP(stat) wmsum_add(&ziostat_sums.stat, 1); 171 172 static kstat_t *zio_ksp; 173 174 static inline void __zio_execute(zio_t *zio); 175 176 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 177 178 static int 179 zio_kstats_update(kstat_t *ksp, int rw) 180 { 181 zio_stats_t *zs = ksp->ks_data; 182 if (rw == KSTAT_WRITE) 183 return (EACCES); 184 185 zs->ziostat_total_allocations.value.ui64 = 186 wmsum_value(&ziostat_sums.ziostat_total_allocations); 187 zs->ziostat_alloc_class_fallbacks.value.ui64 = 188 wmsum_value(&ziostat_sums.ziostat_alloc_class_fallbacks); 189 zs->ziostat_gang_writes.value.ui64 = 190 wmsum_value(&ziostat_sums.ziostat_gang_writes); 191 zs->ziostat_gang_multilevel.value.ui64 = 192 wmsum_value(&ziostat_sums.ziostat_gang_multilevel); 193 return (0); 194 } 195 196 void 197 zio_init(void) 198 { 199 size_t c; 200 201 zio_cache = kmem_cache_create("zio_cache", 202 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 203 zio_link_cache = kmem_cache_create("zio_link_cache", 204 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 205 206 wmsum_init(&ziostat_sums.ziostat_total_allocations, 0); 207 wmsum_init(&ziostat_sums.ziostat_alloc_class_fallbacks, 0); 208 wmsum_init(&ziostat_sums.ziostat_gang_writes, 0); 209 wmsum_init(&ziostat_sums.ziostat_gang_multilevel, 0); 210 zio_ksp = kstat_create("zfs", 0, "zio_stats", 211 "misc", KSTAT_TYPE_NAMED, sizeof (zio_stats) / 212 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 213 if (zio_ksp != NULL) { 214 zio_ksp->ks_data = &zio_stats; 215 zio_ksp->ks_update = zio_kstats_update; 216 kstat_install(zio_ksp); 217 } 218 219 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 220 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 221 size_t align, cflags, data_cflags; 222 char name[32]; 223 224 /* 225 * Create cache for each half-power of 2 size, starting from 226 * SPA_MINBLOCKSIZE. It should give us memory space efficiency 227 * of ~7/8, sufficient for transient allocations mostly using 228 * these caches. 229 */ 230 size_t p2 = size; 231 while (!ISP2(p2)) 232 p2 &= p2 - 1; 233 if (!IS_P2ALIGNED(size, p2 / 2)) 234 continue; 235 236 #ifndef _KERNEL 237 /* 238 * If we are using watchpoints, put each buffer on its own page, 239 * to eliminate the performance overhead of trapping to the 240 * kernel when modifying a non-watched buffer that shares the 241 * page with a watched buffer. 242 */ 243 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 244 continue; 245 #endif 246 247 if (IS_P2ALIGNED(size, PAGESIZE)) 248 align = PAGESIZE; 249 else 250 align = 1 << (highbit64(size ^ (size - 1)) - 1); 251 252 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ? 253 KMC_NODEBUG : 0; 254 data_cflags = KMC_NODEBUG; 255 if (abd_size_alloc_linear(size)) { 256 cflags |= KMC_RECLAIMABLE; 257 data_cflags |= KMC_RECLAIMABLE; 258 } 259 if (cflags == data_cflags) { 260 /* 261 * Resulting kmem caches would be identical. 262 * Save memory by creating only one. 263 */ 264 (void) snprintf(name, sizeof (name), 265 "zio_buf_comb_%lu", (ulong_t)size); 266 zio_buf_cache[c] = kmem_cache_create(name, size, align, 267 NULL, NULL, NULL, NULL, NULL, cflags); 268 zio_data_buf_cache[c] = zio_buf_cache[c]; 269 continue; 270 } 271 (void) snprintf(name, sizeof (name), "zio_buf_%lu", 272 (ulong_t)size); 273 zio_buf_cache[c] = kmem_cache_create(name, size, align, 274 NULL, NULL, NULL, NULL, NULL, cflags); 275 276 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu", 277 (ulong_t)size); 278 zio_data_buf_cache[c] = kmem_cache_create(name, size, align, 279 NULL, NULL, NULL, NULL, NULL, data_cflags); 280 } 281 282 while (--c != 0) { 283 ASSERT(zio_buf_cache[c] != NULL); 284 if (zio_buf_cache[c - 1] == NULL) 285 zio_buf_cache[c - 1] = zio_buf_cache[c]; 286 287 ASSERT(zio_data_buf_cache[c] != NULL); 288 if (zio_data_buf_cache[c - 1] == NULL) 289 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 290 } 291 292 zio_inject_init(); 293 294 lz4_init(); 295 } 296 297 void 298 zio_fini(void) 299 { 300 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; 301 302 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 303 for (size_t i = 0; i < n; i++) { 304 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i]) 305 (void) printf("zio_fini: [%d] %llu != %llu\n", 306 (int)((i + 1) << SPA_MINBLOCKSHIFT), 307 (long long unsigned)zio_buf_cache_allocs[i], 308 (long long unsigned)zio_buf_cache_frees[i]); 309 } 310 #endif 311 312 /* 313 * The same kmem cache can show up multiple times in both zio_buf_cache 314 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to 315 * sort it out. 316 */ 317 for (size_t i = 0; i < n; i++) { 318 kmem_cache_t *cache = zio_buf_cache[i]; 319 if (cache == NULL) 320 continue; 321 for (size_t j = i; j < n; j++) { 322 if (cache == zio_buf_cache[j]) 323 zio_buf_cache[j] = NULL; 324 if (cache == zio_data_buf_cache[j]) 325 zio_data_buf_cache[j] = NULL; 326 } 327 kmem_cache_destroy(cache); 328 } 329 330 for (size_t i = 0; i < n; i++) { 331 kmem_cache_t *cache = zio_data_buf_cache[i]; 332 if (cache == NULL) 333 continue; 334 for (size_t j = i; j < n; j++) { 335 if (cache == zio_data_buf_cache[j]) 336 zio_data_buf_cache[j] = NULL; 337 } 338 kmem_cache_destroy(cache); 339 } 340 341 for (size_t i = 0; i < n; i++) { 342 VERIFY3P(zio_buf_cache[i], ==, NULL); 343 VERIFY3P(zio_data_buf_cache[i], ==, NULL); 344 } 345 346 if (zio_ksp != NULL) { 347 kstat_delete(zio_ksp); 348 zio_ksp = NULL; 349 } 350 351 wmsum_fini(&ziostat_sums.ziostat_total_allocations); 352 wmsum_fini(&ziostat_sums.ziostat_alloc_class_fallbacks); 353 wmsum_fini(&ziostat_sums.ziostat_gang_writes); 354 wmsum_fini(&ziostat_sums.ziostat_gang_multilevel); 355 356 kmem_cache_destroy(zio_link_cache); 357 kmem_cache_destroy(zio_cache); 358 359 zio_inject_fini(); 360 361 lz4_fini(); 362 } 363 364 /* 365 * ========================================================================== 366 * Allocate and free I/O buffers 367 * ========================================================================== 368 */ 369 370 #if defined(ZFS_DEBUG) && defined(_KERNEL) 371 #define ZFS_ZIO_BUF_CANARY 1 372 #endif 373 374 #ifdef ZFS_ZIO_BUF_CANARY 375 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b; 376 377 /* 378 * Use empty space after the buffer to detect overflows. 379 * 380 * Since zio_init() creates kmem caches only for certain set of buffer sizes, 381 * allocations of different sizes may have some unused space after the data. 382 * Filling part of that space with a known pattern on allocation and checking 383 * it on free should allow us to detect some buffer overflows. 384 */ 385 static void 386 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c) 387 { 388 size_t off = P2ROUNDUP(size, sizeof (ulong_t)); 389 ulong_t *canary = p + off / sizeof (ulong_t); 390 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT; 391 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT && 392 cache[c] == cache[c + 1]) 393 asize = (c + 2) << SPA_MINBLOCKSHIFT; 394 for (; off < asize; canary++, off += sizeof (ulong_t)) 395 *canary = zio_buf_canary; 396 } 397 398 static void 399 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c) 400 { 401 size_t off = P2ROUNDUP(size, sizeof (ulong_t)); 402 ulong_t *canary = p + off / sizeof (ulong_t); 403 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT; 404 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT && 405 cache[c] == cache[c + 1]) 406 asize = (c + 2) << SPA_MINBLOCKSHIFT; 407 for (; off < asize; canary++, off += sizeof (ulong_t)) { 408 if (unlikely(*canary != zio_buf_canary)) { 409 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx", 410 p, size, (canary - p) * sizeof (ulong_t), 411 *canary, zio_buf_canary); 412 } 413 } 414 } 415 #endif 416 417 /* 418 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 419 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 420 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 421 * excess / transient data in-core during a crashdump. 422 */ 423 void * 424 zio_buf_alloc(size_t size) 425 { 426 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 427 428 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 429 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 430 atomic_add_64(&zio_buf_cache_allocs[c], 1); 431 #endif 432 433 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE); 434 #ifdef ZFS_ZIO_BUF_CANARY 435 zio_buf_put_canary(p, size, zio_buf_cache, c); 436 #endif 437 return (p); 438 } 439 440 /* 441 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 442 * crashdump if the kernel panics. This exists so that we will limit the amount 443 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 444 * of kernel heap dumped to disk when the kernel panics) 445 */ 446 void * 447 zio_data_buf_alloc(size_t size) 448 { 449 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 450 451 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 452 453 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE); 454 #ifdef ZFS_ZIO_BUF_CANARY 455 zio_buf_put_canary(p, size, zio_data_buf_cache, c); 456 #endif 457 return (p); 458 } 459 460 void 461 zio_buf_free(void *buf, size_t size) 462 { 463 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 464 465 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 466 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 467 atomic_add_64(&zio_buf_cache_frees[c], 1); 468 #endif 469 470 #ifdef ZFS_ZIO_BUF_CANARY 471 zio_buf_check_canary(buf, size, zio_buf_cache, c); 472 #endif 473 kmem_cache_free(zio_buf_cache[c], buf); 474 } 475 476 void 477 zio_data_buf_free(void *buf, size_t size) 478 { 479 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 480 481 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 482 483 #ifdef ZFS_ZIO_BUF_CANARY 484 zio_buf_check_canary(buf, size, zio_data_buf_cache, c); 485 #endif 486 kmem_cache_free(zio_data_buf_cache[c], buf); 487 } 488 489 static void 490 zio_abd_free(void *abd, size_t size) 491 { 492 (void) size; 493 abd_free((abd_t *)abd); 494 } 495 496 /* 497 * ========================================================================== 498 * Push and pop I/O transform buffers 499 * ========================================================================== 500 */ 501 void 502 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 503 zio_transform_func_t *transform) 504 { 505 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 506 507 zt->zt_orig_abd = zio->io_abd; 508 zt->zt_orig_size = zio->io_size; 509 zt->zt_bufsize = bufsize; 510 zt->zt_transform = transform; 511 512 zt->zt_next = zio->io_transform_stack; 513 zio->io_transform_stack = zt; 514 515 zio->io_abd = data; 516 zio->io_size = size; 517 } 518 519 void 520 zio_pop_transforms(zio_t *zio) 521 { 522 zio_transform_t *zt; 523 524 while ((zt = zio->io_transform_stack) != NULL) { 525 if (zt->zt_transform != NULL) 526 zt->zt_transform(zio, 527 zt->zt_orig_abd, zt->zt_orig_size); 528 529 if (zt->zt_bufsize != 0) 530 abd_free(zio->io_abd); 531 532 zio->io_abd = zt->zt_orig_abd; 533 zio->io_size = zt->zt_orig_size; 534 zio->io_transform_stack = zt->zt_next; 535 536 kmem_free(zt, sizeof (zio_transform_t)); 537 } 538 } 539 540 /* 541 * ========================================================================== 542 * I/O transform callbacks for subblocks, decompression, and decryption 543 * ========================================================================== 544 */ 545 static void 546 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 547 { 548 ASSERT(zio->io_size > size); 549 550 if (zio->io_type == ZIO_TYPE_READ) 551 abd_copy(data, zio->io_abd, size); 552 } 553 554 static void 555 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 556 { 557 if (zio->io_error == 0) { 558 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 559 zio->io_abd, data, zio->io_size, size, 560 &zio->io_prop.zp_complevel); 561 562 if (zio_injection_enabled && ret == 0) 563 ret = zio_handle_fault_injection(zio, EINVAL); 564 565 if (ret != 0) 566 zio->io_error = SET_ERROR(EIO); 567 } 568 } 569 570 static void 571 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) 572 { 573 int ret; 574 void *tmp; 575 blkptr_t *bp = zio->io_bp; 576 spa_t *spa = zio->io_spa; 577 uint64_t dsobj = zio->io_bookmark.zb_objset; 578 uint64_t lsize = BP_GET_LSIZE(bp); 579 dmu_object_type_t ot = BP_GET_TYPE(bp); 580 uint8_t salt[ZIO_DATA_SALT_LEN]; 581 uint8_t iv[ZIO_DATA_IV_LEN]; 582 uint8_t mac[ZIO_DATA_MAC_LEN]; 583 boolean_t no_crypt = B_FALSE; 584 585 ASSERT(BP_USES_CRYPT(bp)); 586 ASSERT3U(size, !=, 0); 587 588 if (zio->io_error != 0) 589 return; 590 591 /* 592 * Verify the cksum of MACs stored in an indirect bp. It will always 593 * be possible to verify this since it does not require an encryption 594 * key. 595 */ 596 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { 597 zio_crypt_decode_mac_bp(bp, mac); 598 599 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 600 /* 601 * We haven't decompressed the data yet, but 602 * zio_crypt_do_indirect_mac_checksum() requires 603 * decompressed data to be able to parse out the MACs 604 * from the indirect block. We decompress it now and 605 * throw away the result after we are finished. 606 */ 607 abd_t *abd = abd_alloc_linear(lsize, B_TRUE); 608 ret = zio_decompress_data(BP_GET_COMPRESS(bp), 609 zio->io_abd, abd, zio->io_size, lsize, 610 &zio->io_prop.zp_complevel); 611 if (ret != 0) { 612 abd_free(abd); 613 ret = SET_ERROR(EIO); 614 goto error; 615 } 616 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 617 abd, lsize, BP_SHOULD_BYTESWAP(bp), mac); 618 abd_free(abd); 619 } else { 620 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 621 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); 622 } 623 abd_copy(data, zio->io_abd, size); 624 625 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { 626 ret = zio_handle_decrypt_injection(spa, 627 &zio->io_bookmark, ot, ECKSUM); 628 } 629 if (ret != 0) 630 goto error; 631 632 return; 633 } 634 635 /* 636 * If this is an authenticated block, just check the MAC. It would be 637 * nice to separate this out into its own flag, but when this was done, 638 * we had run out of bits in what is now zio_flag_t. Future cleanup 639 * could make this a flag bit. 640 */ 641 if (BP_IS_AUTHENTICATED(bp)) { 642 if (ot == DMU_OT_OBJSET) { 643 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, 644 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); 645 } else { 646 zio_crypt_decode_mac_bp(bp, mac); 647 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, 648 zio->io_abd, size, mac); 649 if (zio_injection_enabled && ret == 0) { 650 ret = zio_handle_decrypt_injection(spa, 651 &zio->io_bookmark, ot, ECKSUM); 652 } 653 } 654 abd_copy(data, zio->io_abd, size); 655 656 if (ret != 0) 657 goto error; 658 659 return; 660 } 661 662 zio_crypt_decode_params_bp(bp, salt, iv); 663 664 if (ot == DMU_OT_INTENT_LOG) { 665 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); 666 zio_crypt_decode_mac_zil(tmp, mac); 667 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); 668 } else { 669 zio_crypt_decode_mac_bp(bp, mac); 670 } 671 672 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), 673 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, 674 zio->io_abd, &no_crypt); 675 if (no_crypt) 676 abd_copy(data, zio->io_abd, size); 677 678 if (ret != 0) 679 goto error; 680 681 return; 682 683 error: 684 /* assert that the key was found unless this was speculative */ 685 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); 686 687 /* 688 * If there was a decryption / authentication error return EIO as 689 * the io_error. If this was not a speculative zio, create an ereport. 690 */ 691 if (ret == ECKSUM) { 692 zio->io_error = SET_ERROR(EIO); 693 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 694 spa_log_error(spa, &zio->io_bookmark, 695 BP_GET_LOGICAL_BIRTH(zio->io_bp)); 696 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 697 spa, NULL, &zio->io_bookmark, zio, 0); 698 } 699 } else { 700 zio->io_error = ret; 701 } 702 } 703 704 /* 705 * ========================================================================== 706 * I/O parent/child relationships and pipeline interlocks 707 * ========================================================================== 708 */ 709 zio_t * 710 zio_walk_parents(zio_t *cio, zio_link_t **zl) 711 { 712 list_t *pl = &cio->io_parent_list; 713 714 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 715 if (*zl == NULL) 716 return (NULL); 717 718 ASSERT((*zl)->zl_child == cio); 719 return ((*zl)->zl_parent); 720 } 721 722 zio_t * 723 zio_walk_children(zio_t *pio, zio_link_t **zl) 724 { 725 list_t *cl = &pio->io_child_list; 726 727 ASSERT(MUTEX_HELD(&pio->io_lock)); 728 729 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 730 if (*zl == NULL) 731 return (NULL); 732 733 ASSERT((*zl)->zl_parent == pio); 734 return ((*zl)->zl_child); 735 } 736 737 zio_t * 738 zio_unique_parent(zio_t *cio) 739 { 740 zio_link_t *zl = NULL; 741 zio_t *pio = zio_walk_parents(cio, &zl); 742 743 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 744 return (pio); 745 } 746 747 void 748 zio_add_child(zio_t *pio, zio_t *cio) 749 { 750 /* 751 * Logical I/Os can have logical, gang, or vdev children. 752 * Gang I/Os can have gang or vdev children. 753 * Vdev I/Os can only have vdev children. 754 * The following ASSERT captures all of these constraints. 755 */ 756 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 757 758 /* Parent should not have READY stage if child doesn't have it. */ 759 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 && 760 (cio->io_child_type != ZIO_CHILD_VDEV), 761 (pio->io_pipeline & ZIO_STAGE_READY) == 0); 762 763 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 764 zl->zl_parent = pio; 765 zl->zl_child = cio; 766 767 mutex_enter(&pio->io_lock); 768 mutex_enter(&cio->io_lock); 769 770 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 771 772 uint64_t *countp = pio->io_children[cio->io_child_type]; 773 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 774 countp[w] += !cio->io_state[w]; 775 776 list_insert_head(&pio->io_child_list, zl); 777 list_insert_head(&cio->io_parent_list, zl); 778 779 mutex_exit(&cio->io_lock); 780 mutex_exit(&pio->io_lock); 781 } 782 783 void 784 zio_add_child_first(zio_t *pio, zio_t *cio) 785 { 786 /* 787 * Logical I/Os can have logical, gang, or vdev children. 788 * Gang I/Os can have gang or vdev children. 789 * Vdev I/Os can only have vdev children. 790 * The following ASSERT captures all of these constraints. 791 */ 792 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 793 794 /* Parent should not have READY stage if child doesn't have it. */ 795 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 && 796 (cio->io_child_type != ZIO_CHILD_VDEV), 797 (pio->io_pipeline & ZIO_STAGE_READY) == 0); 798 799 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 800 zl->zl_parent = pio; 801 zl->zl_child = cio; 802 803 ASSERT(list_is_empty(&cio->io_parent_list)); 804 list_insert_head(&cio->io_parent_list, zl); 805 806 mutex_enter(&pio->io_lock); 807 808 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 809 810 uint64_t *countp = pio->io_children[cio->io_child_type]; 811 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 812 countp[w] += !cio->io_state[w]; 813 814 list_insert_head(&pio->io_child_list, zl); 815 816 mutex_exit(&pio->io_lock); 817 } 818 819 static void 820 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 821 { 822 ASSERT(zl->zl_parent == pio); 823 ASSERT(zl->zl_child == cio); 824 825 mutex_enter(&pio->io_lock); 826 mutex_enter(&cio->io_lock); 827 828 list_remove(&pio->io_child_list, zl); 829 list_remove(&cio->io_parent_list, zl); 830 831 mutex_exit(&cio->io_lock); 832 mutex_exit(&pio->io_lock); 833 kmem_cache_free(zio_link_cache, zl); 834 } 835 836 static boolean_t 837 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 838 { 839 boolean_t waiting = B_FALSE; 840 841 mutex_enter(&zio->io_lock); 842 ASSERT(zio->io_stall == NULL); 843 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 844 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 845 continue; 846 847 uint64_t *countp = &zio->io_children[c][wait]; 848 if (*countp != 0) { 849 zio->io_stage >>= 1; 850 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 851 zio->io_stall = countp; 852 waiting = B_TRUE; 853 break; 854 } 855 } 856 mutex_exit(&zio->io_lock); 857 return (waiting); 858 } 859 860 __attribute__((always_inline)) 861 static inline void 862 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait, 863 zio_t **next_to_executep) 864 { 865 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 866 int *errorp = &pio->io_child_error[zio->io_child_type]; 867 868 mutex_enter(&pio->io_lock); 869 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 870 *errorp = zio_worst_error(*errorp, zio->io_error); 871 pio->io_reexecute |= zio->io_reexecute; 872 ASSERT3U(*countp, >, 0); 873 874 /* 875 * Propogate the Direct I/O checksum verify failure to the parent. 876 */ 877 if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) 878 pio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR; 879 880 (*countp)--; 881 882 if (*countp == 0 && pio->io_stall == countp) { 883 zio_taskq_type_t type = 884 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 885 ZIO_TASKQ_INTERRUPT; 886 pio->io_stall = NULL; 887 mutex_exit(&pio->io_lock); 888 889 /* 890 * If we can tell the caller to execute this parent next, do 891 * so. We do this if the parent's zio type matches the child's 892 * type, or if it's a zio_null() with no done callback, and so 893 * has no actual work to do. Otherwise dispatch the parent zio 894 * in its own taskq. 895 * 896 * Having the caller execute the parent when possible reduces 897 * locking on the zio taskq's, reduces context switch 898 * overhead, and has no recursion penalty. Note that one 899 * read from disk typically causes at least 3 zio's: a 900 * zio_null(), the logical zio_read(), and then a physical 901 * zio. When the physical ZIO completes, we are able to call 902 * zio_done() on all 3 of these zio's from one invocation of 903 * zio_execute() by returning the parent back to 904 * zio_execute(). Since the parent isn't executed until this 905 * thread returns back to zio_execute(), the caller should do 906 * so promptly. 907 * 908 * In other cases, dispatching the parent prevents 909 * overflowing the stack when we have deeply nested 910 * parent-child relationships, as we do with the "mega zio" 911 * of writes for spa_sync(), and the chain of ZIL blocks. 912 */ 913 if (next_to_executep != NULL && *next_to_executep == NULL && 914 (pio->io_type == zio->io_type || 915 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) { 916 *next_to_executep = pio; 917 } else { 918 zio_taskq_dispatch(pio, type, B_FALSE); 919 } 920 } else { 921 mutex_exit(&pio->io_lock); 922 } 923 } 924 925 static void 926 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 927 { 928 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 929 zio->io_error = zio->io_child_error[c]; 930 } 931 932 int 933 zio_bookmark_compare(const void *x1, const void *x2) 934 { 935 const zio_t *z1 = x1; 936 const zio_t *z2 = x2; 937 938 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 939 return (-1); 940 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 941 return (1); 942 943 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 944 return (-1); 945 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 946 return (1); 947 948 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 949 return (-1); 950 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 951 return (1); 952 953 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 954 return (-1); 955 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 956 return (1); 957 958 if (z1 < z2) 959 return (-1); 960 if (z1 > z2) 961 return (1); 962 963 return (0); 964 } 965 966 /* 967 * ========================================================================== 968 * Create the various types of I/O (read, write, free, etc) 969 * ========================================================================== 970 */ 971 static zio_t * 972 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 973 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 974 void *private, zio_type_t type, zio_priority_t priority, 975 zio_flag_t flags, vdev_t *vd, uint64_t offset, 976 const zbookmark_phys_t *zb, enum zio_stage stage, 977 enum zio_stage pipeline) 978 { 979 zio_t *zio; 980 981 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE); 982 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 983 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 984 985 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 986 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 987 ASSERT(vd || stage == ZIO_STAGE_OPEN); 988 989 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); 990 991 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 992 memset(zio, 0, sizeof (zio_t)); 993 994 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL); 995 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 996 997 list_create(&zio->io_parent_list, sizeof (zio_link_t), 998 offsetof(zio_link_t, zl_parent_node)); 999 list_create(&zio->io_child_list, sizeof (zio_link_t), 1000 offsetof(zio_link_t, zl_child_node)); 1001 metaslab_trace_init(&zio->io_alloc_list); 1002 1003 if (vd != NULL) 1004 zio->io_child_type = ZIO_CHILD_VDEV; 1005 else if (flags & ZIO_FLAG_GANG_CHILD) 1006 zio->io_child_type = ZIO_CHILD_GANG; 1007 else if (flags & ZIO_FLAG_DDT_CHILD) 1008 zio->io_child_type = ZIO_CHILD_DDT; 1009 else 1010 zio->io_child_type = ZIO_CHILD_LOGICAL; 1011 1012 if (bp != NULL) { 1013 if (type != ZIO_TYPE_WRITE || 1014 zio->io_child_type == ZIO_CHILD_DDT) { 1015 zio->io_bp_copy = *bp; 1016 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 1017 } else { 1018 zio->io_bp = (blkptr_t *)bp; 1019 } 1020 zio->io_bp_orig = *bp; 1021 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 1022 zio->io_logical = zio; 1023 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 1024 pipeline |= ZIO_GANG_STAGES; 1025 } 1026 1027 zio->io_spa = spa; 1028 zio->io_txg = txg; 1029 zio->io_done = done; 1030 zio->io_private = private; 1031 zio->io_type = type; 1032 zio->io_priority = priority; 1033 zio->io_vd = vd; 1034 zio->io_offset = offset; 1035 zio->io_orig_abd = zio->io_abd = data; 1036 zio->io_orig_size = zio->io_size = psize; 1037 zio->io_lsize = lsize; 1038 zio->io_orig_flags = zio->io_flags = flags; 1039 zio->io_orig_stage = zio->io_stage = stage; 1040 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 1041 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 1042 zio->io_allocator = ZIO_ALLOCATOR_NONE; 1043 1044 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) || 1045 (pipeline & ZIO_STAGE_READY) == 0; 1046 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 1047 1048 if (zb != NULL) 1049 zio->io_bookmark = *zb; 1050 1051 if (pio != NULL) { 1052 zio->io_metaslab_class = pio->io_metaslab_class; 1053 if (zio->io_logical == NULL) 1054 zio->io_logical = pio->io_logical; 1055 if (zio->io_child_type == ZIO_CHILD_GANG) 1056 zio->io_gang_leader = pio->io_gang_leader; 1057 zio_add_child_first(pio, zio); 1058 } 1059 1060 taskq_init_ent(&zio->io_tqent); 1061 1062 return (zio); 1063 } 1064 1065 void 1066 zio_destroy(zio_t *zio) 1067 { 1068 metaslab_trace_fini(&zio->io_alloc_list); 1069 list_destroy(&zio->io_parent_list); 1070 list_destroy(&zio->io_child_list); 1071 mutex_destroy(&zio->io_lock); 1072 cv_destroy(&zio->io_cv); 1073 kmem_cache_free(zio_cache, zio); 1074 } 1075 1076 /* 1077 * ZIO intended to be between others. Provides synchronization at READY 1078 * and DONE pipeline stages and calls the respective callbacks. 1079 */ 1080 zio_t * 1081 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 1082 void *private, zio_flag_t flags) 1083 { 1084 zio_t *zio; 1085 1086 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 1087 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1088 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 1089 1090 return (zio); 1091 } 1092 1093 /* 1094 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a 1095 * READY pipeline stage (is ready on creation), so it should not be used 1096 * as child of any ZIO that may need waiting for grandchildren READY stage 1097 * (any other ZIO type). 1098 */ 1099 zio_t * 1100 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags) 1101 { 1102 zio_t *zio; 1103 1104 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private, 1105 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL, 1106 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE); 1107 1108 return (zio); 1109 } 1110 1111 static int 1112 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp, 1113 enum blk_verify_flag blk_verify, const char *fmt, ...) 1114 { 1115 va_list adx; 1116 char buf[256]; 1117 1118 va_start(adx, fmt); 1119 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 1120 va_end(adx); 1121 1122 zfs_dbgmsg("bad blkptr at %px: " 1123 "DVA[0]=%#llx/%#llx " 1124 "DVA[1]=%#llx/%#llx " 1125 "DVA[2]=%#llx/%#llx " 1126 "prop=%#llx " 1127 "pad=%#llx,%#llx " 1128 "phys_birth=%#llx " 1129 "birth=%#llx " 1130 "fill=%#llx " 1131 "cksum=%#llx/%#llx/%#llx/%#llx", 1132 bp, 1133 (long long)bp->blk_dva[0].dva_word[0], 1134 (long long)bp->blk_dva[0].dva_word[1], 1135 (long long)bp->blk_dva[1].dva_word[0], 1136 (long long)bp->blk_dva[1].dva_word[1], 1137 (long long)bp->blk_dva[2].dva_word[0], 1138 (long long)bp->blk_dva[2].dva_word[1], 1139 (long long)bp->blk_prop, 1140 (long long)bp->blk_pad[0], 1141 (long long)bp->blk_pad[1], 1142 (long long)BP_GET_PHYSICAL_BIRTH(bp), 1143 (long long)BP_GET_LOGICAL_BIRTH(bp), 1144 (long long)bp->blk_fill, 1145 (long long)bp->blk_cksum.zc_word[0], 1146 (long long)bp->blk_cksum.zc_word[1], 1147 (long long)bp->blk_cksum.zc_word[2], 1148 (long long)bp->blk_cksum.zc_word[3]); 1149 switch (blk_verify) { 1150 case BLK_VERIFY_HALT: 1151 zfs_panic_recover("%s: %s", spa_name(spa), buf); 1152 break; 1153 case BLK_VERIFY_LOG: 1154 zfs_dbgmsg("%s: %s", spa_name(spa), buf); 1155 break; 1156 case BLK_VERIFY_ONLY: 1157 break; 1158 } 1159 1160 return (1); 1161 } 1162 1163 /* 1164 * Verify the block pointer fields contain reasonable values. This means 1165 * it only contains known object types, checksum/compression identifiers, 1166 * block sizes within the maximum allowed limits, valid DVAs, etc. 1167 * 1168 * If everything checks out 0 is returned. The zfs_blkptr_verify 1169 * argument controls the behavior when an invalid field is detected. 1170 * 1171 * Values for blk_verify_flag: 1172 * BLK_VERIFY_ONLY: evaluate the block 1173 * BLK_VERIFY_LOG: evaluate the block and log problems 1174 * BLK_VERIFY_HALT: call zfs_panic_recover on error 1175 * 1176 * Values for blk_config_flag: 1177 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer 1178 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be 1179 * obtained for reader 1180 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better 1181 * performance 1182 */ 1183 int 1184 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, 1185 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify) 1186 { 1187 int errors = 0; 1188 1189 if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) { 1190 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1191 "blkptr at %px has invalid TYPE %llu", 1192 bp, (longlong_t)BP_GET_TYPE(bp)); 1193 } 1194 if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) { 1195 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1196 "blkptr at %px has invalid COMPRESS %llu", 1197 bp, (longlong_t)BP_GET_COMPRESS(bp)); 1198 } 1199 if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) { 1200 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1201 "blkptr at %px has invalid LSIZE %llu", 1202 bp, (longlong_t)BP_GET_LSIZE(bp)); 1203 } 1204 if (BP_IS_EMBEDDED(bp)) { 1205 if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) { 1206 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1207 "blkptr at %px has invalid ETYPE %llu", 1208 bp, (longlong_t)BPE_GET_ETYPE(bp)); 1209 } 1210 if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) { 1211 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1212 "blkptr at %px has invalid PSIZE %llu", 1213 bp, (longlong_t)BPE_GET_PSIZE(bp)); 1214 } 1215 return (errors ? ECKSUM : 0); 1216 } else if (BP_IS_HOLE(bp)) { 1217 /* 1218 * Holes are allowed (expected, even) to have no DVAs, no 1219 * checksum, and no psize. 1220 */ 1221 return (errors ? ECKSUM : 0); 1222 } else if (unlikely(!DVA_IS_VALID(&bp->blk_dva[0]))) { 1223 /* Non-hole, non-embedded BPs _must_ have at least one DVA */ 1224 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1225 "blkptr at %px has no valid DVAs", bp); 1226 } 1227 if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) { 1228 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1229 "blkptr at %px has invalid CHECKSUM %llu", 1230 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 1231 } 1232 if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) { 1233 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1234 "blkptr at %px has invalid PSIZE %llu", 1235 bp, (longlong_t)BP_GET_PSIZE(bp)); 1236 } 1237 1238 /* 1239 * Do not verify individual DVAs if the config is not trusted. This 1240 * will be done once the zio is executed in vdev_mirror_map_alloc. 1241 */ 1242 if (unlikely(!spa->spa_trust_config)) 1243 return (errors ? ECKSUM : 0); 1244 1245 switch (blk_config) { 1246 case BLK_CONFIG_HELD: 1247 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER)); 1248 break; 1249 case BLK_CONFIG_NEEDED: 1250 spa_config_enter(spa, SCL_VDEV, bp, RW_READER); 1251 break; 1252 case BLK_CONFIG_NEEDED_TRY: 1253 if (!spa_config_tryenter(spa, SCL_VDEV, bp, RW_READER)) 1254 return (EBUSY); 1255 break; 1256 case BLK_CONFIG_SKIP: 1257 return (errors ? ECKSUM : 0); 1258 default: 1259 panic("invalid blk_config %u", blk_config); 1260 } 1261 1262 /* 1263 * Pool-specific checks. 1264 * 1265 * Note: it would be nice to verify that the logical birth 1266 * and physical birth are not too large. However, 1267 * spa_freeze() allows the birth time of log blocks (and 1268 * dmu_sync()-ed blocks that are in the log) to be arbitrarily 1269 * large. 1270 */ 1271 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 1272 const dva_t *dva = &bp->blk_dva[i]; 1273 uint64_t vdevid = DVA_GET_VDEV(dva); 1274 1275 if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) { 1276 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1277 "blkptr at %px DVA %u has invalid VDEV %llu", 1278 bp, i, (longlong_t)vdevid); 1279 continue; 1280 } 1281 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1282 if (unlikely(vd == NULL)) { 1283 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1284 "blkptr at %px DVA %u has invalid VDEV %llu", 1285 bp, i, (longlong_t)vdevid); 1286 continue; 1287 } 1288 if (unlikely(vd->vdev_ops == &vdev_hole_ops)) { 1289 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1290 "blkptr at %px DVA %u has hole VDEV %llu", 1291 bp, i, (longlong_t)vdevid); 1292 continue; 1293 } 1294 if (vd->vdev_ops == &vdev_missing_ops) { 1295 /* 1296 * "missing" vdevs are valid during import, but we 1297 * don't have their detailed info (e.g. asize), so 1298 * we can't perform any more checks on them. 1299 */ 1300 continue; 1301 } 1302 uint64_t offset = DVA_GET_OFFSET(dva); 1303 uint64_t asize = DVA_GET_ASIZE(dva); 1304 if (DVA_GET_GANG(dva)) 1305 asize = vdev_gang_header_asize(vd); 1306 if (unlikely(offset + asize > vd->vdev_asize)) { 1307 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1308 "blkptr at %px DVA %u has invalid OFFSET %llu", 1309 bp, i, (longlong_t)offset); 1310 } 1311 } 1312 if (blk_config == BLK_CONFIG_NEEDED || blk_config == 1313 BLK_CONFIG_NEEDED_TRY) 1314 spa_config_exit(spa, SCL_VDEV, bp); 1315 1316 return (errors ? ECKSUM : 0); 1317 } 1318 1319 boolean_t 1320 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 1321 { 1322 (void) bp; 1323 uint64_t vdevid = DVA_GET_VDEV(dva); 1324 1325 if (vdevid >= spa->spa_root_vdev->vdev_children) 1326 return (B_FALSE); 1327 1328 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1329 if (vd == NULL) 1330 return (B_FALSE); 1331 1332 if (vd->vdev_ops == &vdev_hole_ops) 1333 return (B_FALSE); 1334 1335 if (vd->vdev_ops == &vdev_missing_ops) { 1336 return (B_FALSE); 1337 } 1338 1339 uint64_t offset = DVA_GET_OFFSET(dva); 1340 uint64_t asize = DVA_GET_ASIZE(dva); 1341 1342 if (DVA_GET_GANG(dva)) 1343 asize = vdev_gang_header_asize(vd); 1344 if (offset + asize > vd->vdev_asize) 1345 return (B_FALSE); 1346 1347 return (B_TRUE); 1348 } 1349 1350 zio_t * 1351 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 1352 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 1353 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb) 1354 { 1355 zio_t *zio; 1356 1357 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp, 1358 data, size, size, done, private, 1359 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 1360 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 1361 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 1362 1363 return (zio); 1364 } 1365 1366 zio_t * 1367 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 1368 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 1369 zio_done_func_t *ready, zio_done_func_t *children_ready, 1370 zio_done_func_t *done, void *private, zio_priority_t priority, 1371 zio_flag_t flags, const zbookmark_phys_t *zb) 1372 { 1373 zio_t *zio; 1374 enum zio_stage pipeline = zp->zp_direct_write == B_TRUE ? 1375 ZIO_DIRECT_WRITE_PIPELINE : (flags & ZIO_FLAG_DDT_CHILD) ? 1376 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE; 1377 1378 1379 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 1380 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 1381 ZIO_STAGE_OPEN, pipeline); 1382 1383 zio->io_ready = ready; 1384 zio->io_children_ready = children_ready; 1385 zio->io_prop = *zp; 1386 1387 /* 1388 * Data can be NULL if we are going to call zio_write_override() to 1389 * provide the already-allocated BP. But we may need the data to 1390 * verify a dedup hit (if requested). In this case, don't try to 1391 * dedup (just take the already-allocated BP verbatim). Encrypted 1392 * dedup blocks need data as well so we also disable dedup in this 1393 * case. 1394 */ 1395 if (data == NULL && 1396 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { 1397 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 1398 } 1399 1400 return (zio); 1401 } 1402 1403 zio_t * 1404 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 1405 uint64_t size, zio_done_func_t *done, void *private, 1406 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb) 1407 { 1408 zio_t *zio; 1409 1410 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 1411 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 1412 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 1413 1414 return (zio); 1415 } 1416 1417 void 1418 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, int gang_copies, 1419 boolean_t nopwrite, boolean_t brtwrite) 1420 { 1421 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1422 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1423 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1424 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 1425 ASSERT(!brtwrite || !nopwrite); 1426 1427 /* 1428 * We must reset the io_prop to match the values that existed 1429 * when the bp was first written by dmu_sync() keeping in mind 1430 * that nopwrite and dedup are mutually exclusive. 1431 */ 1432 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 1433 zio->io_prop.zp_nopwrite = nopwrite; 1434 zio->io_prop.zp_brtwrite = brtwrite; 1435 zio->io_prop.zp_copies = copies; 1436 zio->io_prop.zp_gang_copies = gang_copies; 1437 zio->io_bp_override = bp; 1438 } 1439 1440 void 1441 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 1442 { 1443 1444 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1445 1446 /* 1447 * The check for EMBEDDED is a performance optimization. We 1448 * process the free here (by ignoring it) rather than 1449 * putting it on the list and then processing it in zio_free_sync(). 1450 */ 1451 if (BP_IS_EMBEDDED(bp)) 1452 return; 1453 1454 /* 1455 * Frees that are for the currently-syncing txg, are not going to be 1456 * deferred, and which will not need to do a read (i.e. not GANG or 1457 * DEDUP), can be processed immediately. Otherwise, put them on the 1458 * in-memory list for later processing. 1459 * 1460 * Note that we only defer frees after zfs_sync_pass_deferred_free 1461 * when the log space map feature is disabled. [see relevant comment 1462 * in spa_sync_iterate_to_convergence()] 1463 */ 1464 if (BP_IS_GANG(bp) || 1465 BP_GET_DEDUP(bp) || 1466 txg != spa->spa_syncing_txg || 1467 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free && 1468 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) || 1469 brt_maybe_exists(spa, bp)) { 1470 metaslab_check_free(spa, bp); 1471 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 1472 } else { 1473 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL); 1474 } 1475 } 1476 1477 /* 1478 * To improve performance, this function may return NULL if we were able 1479 * to do the free immediately. This avoids the cost of creating a zio 1480 * (and linking it to the parent, etc). 1481 */ 1482 zio_t * 1483 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1484 zio_flag_t flags) 1485 { 1486 ASSERT(!BP_IS_HOLE(bp)); 1487 ASSERT(spa_syncing_txg(spa) == txg); 1488 1489 if (BP_IS_EMBEDDED(bp)) 1490 return (NULL); 1491 1492 metaslab_check_free(spa, bp); 1493 arc_freed(spa, bp); 1494 dsl_scan_freed(spa, bp); 1495 1496 if (BP_IS_GANG(bp) || 1497 BP_GET_DEDUP(bp) || 1498 brt_maybe_exists(spa, bp)) { 1499 /* 1500 * GANG, DEDUP and BRT blocks can induce a read (for the gang 1501 * block header, the DDT or the BRT), so issue them 1502 * asynchronously so that this thread is not tied up. 1503 */ 1504 enum zio_stage stage = 1505 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC; 1506 1507 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1508 BP_GET_PSIZE(bp), NULL, NULL, 1509 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 1510 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage)); 1511 } else { 1512 metaslab_free(spa, bp, txg, B_FALSE); 1513 return (NULL); 1514 } 1515 } 1516 1517 zio_t * 1518 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1519 zio_done_func_t *done, void *private, zio_flag_t flags) 1520 { 1521 zio_t *zio; 1522 1523 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ? 1524 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1525 1526 if (BP_IS_EMBEDDED(bp)) 1527 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 1528 1529 /* 1530 * A claim is an allocation of a specific block. Claims are needed 1531 * to support immediate writes in the intent log. The issue is that 1532 * immediate writes contain committed data, but in a txg that was 1533 * *not* committed. Upon opening the pool after an unclean shutdown, 1534 * the intent log claims all blocks that contain immediate write data 1535 * so that the SPA knows they're in use. 1536 * 1537 * All claims *must* be resolved in the first txg -- before the SPA 1538 * starts allocating blocks -- so that nothing is allocated twice. 1539 * If txg == 0 we just verify that the block is claimable. 1540 */ 1541 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <, 1542 spa_min_claim_txg(spa)); 1543 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1544 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */ 1545 1546 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1547 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 1548 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 1549 ASSERT0(zio->io_queued_timestamp); 1550 1551 return (zio); 1552 } 1553 1554 zio_t * 1555 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1556 zio_done_func_t *done, void *private, zio_priority_t priority, 1557 zio_flag_t flags, enum trim_flag trim_flags) 1558 { 1559 zio_t *zio; 1560 1561 ASSERT0(vd->vdev_children); 1562 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 1563 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 1564 ASSERT3U(size, !=, 0); 1565 1566 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done, 1567 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL, 1568 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE); 1569 zio->io_trim_flags = trim_flags; 1570 1571 return (zio); 1572 } 1573 1574 zio_t * 1575 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1576 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1577 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1578 { 1579 zio_t *zio; 1580 1581 ASSERT(vd->vdev_children == 0); 1582 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1583 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1584 ASSERT3U(offset + size, <=, vd->vdev_psize); 1585 1586 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1587 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1588 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1589 1590 zio->io_prop.zp_checksum = checksum; 1591 1592 return (zio); 1593 } 1594 1595 zio_t * 1596 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1597 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1598 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1599 { 1600 zio_t *zio; 1601 1602 ASSERT(vd->vdev_children == 0); 1603 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1604 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1605 ASSERT3U(offset + size, <=, vd->vdev_psize); 1606 1607 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1608 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1609 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1610 1611 zio->io_prop.zp_checksum = checksum; 1612 1613 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1614 /* 1615 * zec checksums are necessarily destructive -- they modify 1616 * the end of the write buffer to hold the verifier/checksum. 1617 * Therefore, we must make a local copy in case the data is 1618 * being written to multiple places in parallel. 1619 */ 1620 abd_t *wbuf = abd_alloc_sametype(data, size); 1621 abd_copy(wbuf, data, size); 1622 1623 zio_push_transform(zio, wbuf, size, size, NULL); 1624 } 1625 1626 return (zio); 1627 } 1628 1629 /* 1630 * Create a child I/O to do some work for us. 1631 */ 1632 zio_t * 1633 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1634 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1635 zio_flag_t flags, zio_done_func_t *done, void *private) 1636 { 1637 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1638 zio_t *zio; 1639 1640 /* 1641 * vdev child I/Os do not propagate their error to the parent. 1642 * Therefore, for correct operation the caller *must* check for 1643 * and handle the error in the child i/o's done callback. 1644 * The only exceptions are i/os that we don't care about 1645 * (OPTIONAL or REPAIR). 1646 */ 1647 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1648 done != NULL); 1649 1650 if (type == ZIO_TYPE_READ && bp != NULL) { 1651 /* 1652 * If we have the bp, then the child should perform the 1653 * checksum and the parent need not. This pushes error 1654 * detection as close to the leaves as possible and 1655 * eliminates redundant checksums in the interior nodes. 1656 */ 1657 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1658 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1659 /* 1660 * We never allow the mirror VDEV to attempt reading from any 1661 * additional data copies after the first Direct I/O checksum 1662 * verify failure. This is to avoid bad data being written out 1663 * through the mirror during self healing. See comment in 1664 * vdev_mirror_io_done() for more details. 1665 */ 1666 ASSERT0(pio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR); 1667 } else if (type == ZIO_TYPE_WRITE && 1668 pio->io_prop.zp_direct_write == B_TRUE) { 1669 /* 1670 * By default we only will verify checksums for Direct I/O 1671 * writes for Linux. FreeBSD is able to place user pages under 1672 * write protection before issuing them to the ZIO pipeline. 1673 * 1674 * Checksum validation errors will only be reported through 1675 * the top-level VDEV, which is set by this child ZIO. 1676 */ 1677 ASSERT3P(bp, !=, NULL); 1678 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL); 1679 pipeline |= ZIO_STAGE_DIO_CHECKSUM_VERIFY; 1680 } 1681 1682 if (vd->vdev_ops->vdev_op_leaf) { 1683 ASSERT0(vd->vdev_children); 1684 offset += VDEV_LABEL_START_SIZE; 1685 } 1686 1687 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1688 1689 /* 1690 * If we've decided to do a repair, the write is not speculative -- 1691 * even if the original read was. 1692 */ 1693 if (flags & ZIO_FLAG_IO_REPAIR) 1694 flags &= ~ZIO_FLAG_SPECULATIVE; 1695 1696 /* 1697 * If we're creating a child I/O that is not associated with a 1698 * top-level vdev, then the child zio is not an allocating I/O. 1699 * If this is a retried I/O then we ignore it since we will 1700 * have already processed the original allocating I/O. 1701 */ 1702 if (flags & ZIO_FLAG_IO_ALLOCATING && 1703 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1704 ASSERT(pio->io_metaslab_class != NULL); 1705 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 1706 ASSERT(type == ZIO_TYPE_WRITE); 1707 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1708 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1709 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1710 pio->io_child_type == ZIO_CHILD_GANG); 1711 1712 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1713 } 1714 1715 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1716 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1717 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1718 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1719 1720 return (zio); 1721 } 1722 1723 zio_t * 1724 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1725 zio_type_t type, zio_priority_t priority, zio_flag_t flags, 1726 zio_done_func_t *done, void *private) 1727 { 1728 zio_t *zio; 1729 1730 ASSERT(vd->vdev_ops->vdev_op_leaf); 1731 1732 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1733 data, size, size, done, private, type, priority, 1734 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1735 vd, offset, NULL, 1736 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1737 1738 return (zio); 1739 } 1740 1741 1742 /* 1743 * Send a flush command to the given vdev. Unlike most zio creation functions, 1744 * the flush zios are issued immediately. You can wait on pio to pause until 1745 * the flushes complete. 1746 */ 1747 void 1748 zio_flush(zio_t *pio, vdev_t *vd) 1749 { 1750 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | 1751 ZIO_FLAG_DONT_RETRY; 1752 1753 if (vd->vdev_nowritecache) 1754 return; 1755 1756 if (vd->vdev_children == 0) { 1757 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0, 1758 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0, 1759 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE)); 1760 } else { 1761 for (uint64_t c = 0; c < vd->vdev_children; c++) 1762 zio_flush(pio, vd->vdev_child[c]); 1763 } 1764 } 1765 1766 void 1767 zio_shrink(zio_t *zio, uint64_t size) 1768 { 1769 ASSERT3P(zio->io_executor, ==, NULL); 1770 ASSERT3U(zio->io_orig_size, ==, zio->io_size); 1771 ASSERT3U(size, <=, zio->io_size); 1772 1773 /* 1774 * We don't shrink for raidz because of problems with the 1775 * reconstruction when reading back less than the block size. 1776 * Note, BP_IS_RAIDZ() assumes no compression. 1777 */ 1778 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1779 if (!BP_IS_RAIDZ(zio->io_bp)) { 1780 /* we are not doing a raw write */ 1781 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1782 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1783 } 1784 } 1785 1786 /* 1787 * Round provided allocation size up to a value that can be allocated 1788 * by at least some vdev(s) in the pool with minimum or no additional 1789 * padding and without extra space usage on others 1790 */ 1791 static uint64_t 1792 zio_roundup_alloc_size(spa_t *spa, uint64_t size) 1793 { 1794 if (size > spa->spa_min_alloc) 1795 return (roundup(size, spa->spa_gcd_alloc)); 1796 return (spa->spa_min_alloc); 1797 } 1798 1799 size_t 1800 zio_get_compression_max_size(enum zio_compress compress, uint64_t gcd_alloc, 1801 uint64_t min_alloc, size_t s_len) 1802 { 1803 size_t d_len; 1804 1805 /* minimum 12.5% must be saved (legacy value, may be changed later) */ 1806 d_len = s_len - (s_len >> 3); 1807 1808 /* ZLE can't use exactly d_len bytes, it needs more, so ignore it */ 1809 if (compress == ZIO_COMPRESS_ZLE) 1810 return (d_len); 1811 1812 d_len = d_len - d_len % gcd_alloc; 1813 1814 if (d_len < min_alloc) 1815 return (BPE_PAYLOAD_SIZE); 1816 return (d_len); 1817 } 1818 1819 /* 1820 * ========================================================================== 1821 * Prepare to read and write logical blocks 1822 * ========================================================================== 1823 */ 1824 1825 static zio_t * 1826 zio_read_bp_init(zio_t *zio) 1827 { 1828 blkptr_t *bp = zio->io_bp; 1829 uint64_t psize = 1830 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1831 1832 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1833 1834 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1835 zio->io_child_type == ZIO_CHILD_LOGICAL && 1836 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1837 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1838 psize, psize, zio_decompress); 1839 } 1840 1841 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || 1842 BP_HAS_INDIRECT_MAC_CKSUM(bp)) && 1843 zio->io_child_type == ZIO_CHILD_LOGICAL) { 1844 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1845 psize, psize, zio_decrypt); 1846 } 1847 1848 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1849 int psize = BPE_GET_PSIZE(bp); 1850 void *data = abd_borrow_buf(zio->io_abd, psize); 1851 1852 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1853 decode_embedded_bp_compressed(bp, data); 1854 abd_return_buf_copy(zio->io_abd, data, psize); 1855 } else { 1856 ASSERT(!BP_IS_EMBEDDED(bp)); 1857 } 1858 1859 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1860 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1861 1862 return (zio); 1863 } 1864 1865 static zio_t * 1866 zio_write_bp_init(zio_t *zio) 1867 { 1868 if (!IO_IS_ALLOCATING(zio)) 1869 return (zio); 1870 1871 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1872 1873 if (zio->io_bp_override) { 1874 blkptr_t *bp = zio->io_bp; 1875 zio_prop_t *zp = &zio->io_prop; 1876 1877 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg); 1878 1879 *bp = *zio->io_bp_override; 1880 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1881 1882 if (zp->zp_brtwrite) 1883 return (zio); 1884 1885 ASSERT(!BP_GET_DEDUP(zio->io_bp_override)); 1886 1887 if (BP_IS_EMBEDDED(bp)) 1888 return (zio); 1889 1890 /* 1891 * If we've been overridden and nopwrite is set then 1892 * set the flag accordingly to indicate that a nopwrite 1893 * has already occurred. 1894 */ 1895 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1896 ASSERT(!zp->zp_dedup); 1897 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1898 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1899 return (zio); 1900 } 1901 1902 ASSERT(!zp->zp_nopwrite); 1903 1904 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1905 return (zio); 1906 1907 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1908 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1909 1910 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && 1911 !zp->zp_encrypt) { 1912 BP_SET_DEDUP(bp, 1); 1913 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1914 return (zio); 1915 } 1916 1917 /* 1918 * We were unable to handle this as an override bp, treat 1919 * it as a regular write I/O. 1920 */ 1921 zio->io_bp_override = NULL; 1922 *bp = zio->io_bp_orig; 1923 zio->io_pipeline = zio->io_orig_pipeline; 1924 } 1925 1926 return (zio); 1927 } 1928 1929 static zio_t * 1930 zio_write_compress(zio_t *zio) 1931 { 1932 spa_t *spa = zio->io_spa; 1933 zio_prop_t *zp = &zio->io_prop; 1934 enum zio_compress compress = zp->zp_compress; 1935 blkptr_t *bp = zio->io_bp; 1936 uint64_t lsize = zio->io_lsize; 1937 uint64_t psize = zio->io_size; 1938 uint32_t pass = 1; 1939 1940 /* 1941 * If our children haven't all reached the ready stage, 1942 * wait for them and then repeat this pipeline stage. 1943 */ 1944 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1945 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1946 return (NULL); 1947 } 1948 1949 if (!IO_IS_ALLOCATING(zio)) 1950 return (zio); 1951 1952 if (zio->io_children_ready != NULL) { 1953 /* 1954 * Now that all our children are ready, run the callback 1955 * associated with this zio in case it wants to modify the 1956 * data to be written. 1957 */ 1958 ASSERT3U(zp->zp_level, >, 0); 1959 zio->io_children_ready(zio); 1960 } 1961 1962 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1963 ASSERT(zio->io_bp_override == NULL); 1964 1965 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) { 1966 /* 1967 * We're rewriting an existing block, which means we're 1968 * working on behalf of spa_sync(). For spa_sync() to 1969 * converge, it must eventually be the case that we don't 1970 * have to allocate new blocks. But compression changes 1971 * the blocksize, which forces a reallocate, and makes 1972 * convergence take longer. Therefore, after the first 1973 * few passes, stop compressing to ensure convergence. 1974 */ 1975 pass = spa_sync_pass(spa); 1976 1977 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1978 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1979 ASSERT(!BP_GET_DEDUP(bp)); 1980 1981 if (pass >= zfs_sync_pass_dont_compress) 1982 compress = ZIO_COMPRESS_OFF; 1983 1984 /* Make sure someone doesn't change their mind on overwrites */ 1985 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) || 1986 MIN(zp->zp_copies, spa_max_replication(spa)) 1987 == BP_GET_NDVAS(bp)); 1988 } 1989 1990 /* If it's a compressed write that is not raw, compress the buffer. */ 1991 if (compress != ZIO_COMPRESS_OFF && 1992 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1993 abd_t *cabd = NULL; 1994 if (abd_cmp_zero(zio->io_abd, lsize) == 0) 1995 psize = 0; 1996 else if (compress == ZIO_COMPRESS_EMPTY) 1997 psize = lsize; 1998 else 1999 psize = zio_compress_data(compress, zio->io_abd, &cabd, 2000 lsize, 2001 zio_get_compression_max_size(compress, 2002 spa->spa_gcd_alloc, spa->spa_min_alloc, lsize), 2003 zp->zp_complevel); 2004 if (psize == 0) { 2005 compress = ZIO_COMPRESS_OFF; 2006 } else if (psize >= lsize) { 2007 compress = ZIO_COMPRESS_OFF; 2008 if (cabd != NULL) 2009 abd_free(cabd); 2010 } else if (psize <= BPE_PAYLOAD_SIZE && !zp->zp_encrypt && 2011 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 2012 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 2013 void *cbuf = abd_borrow_buf_copy(cabd, lsize); 2014 encode_embedded_bp_compressed(bp, 2015 cbuf, compress, lsize, psize); 2016 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 2017 BP_SET_TYPE(bp, zio->io_prop.zp_type); 2018 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 2019 abd_return_buf(cabd, cbuf, lsize); 2020 abd_free(cabd); 2021 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg); 2022 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2023 ASSERT(spa_feature_is_active(spa, 2024 SPA_FEATURE_EMBEDDED_DATA)); 2025 return (zio); 2026 } else { 2027 /* 2028 * Round compressed size up to the minimum allocation 2029 * size of the smallest-ashift device, and zero the 2030 * tail. This ensures that the compressed size of the 2031 * BP (and thus compressratio property) are correct, 2032 * in that we charge for the padding used to fill out 2033 * the last sector. 2034 */ 2035 size_t rounded = (size_t)zio_roundup_alloc_size(spa, 2036 psize); 2037 if (rounded >= lsize) { 2038 compress = ZIO_COMPRESS_OFF; 2039 abd_free(cabd); 2040 psize = lsize; 2041 } else { 2042 abd_zero_off(cabd, psize, rounded - psize); 2043 psize = rounded; 2044 zio_push_transform(zio, cabd, 2045 psize, lsize, NULL); 2046 } 2047 } 2048 2049 /* 2050 * We were unable to handle this as an override bp, treat 2051 * it as a regular write I/O. 2052 */ 2053 zio->io_bp_override = NULL; 2054 *bp = zio->io_bp_orig; 2055 zio->io_pipeline = zio->io_orig_pipeline; 2056 2057 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && 2058 zp->zp_type == DMU_OT_DNODE) { 2059 /* 2060 * The DMU actually relies on the zio layer's compression 2061 * to free metadnode blocks that have had all contained 2062 * dnodes freed. As a result, even when doing a raw 2063 * receive, we must check whether the block can be compressed 2064 * to a hole. 2065 */ 2066 if (abd_cmp_zero(zio->io_abd, lsize) == 0) { 2067 psize = 0; 2068 compress = ZIO_COMPRESS_OFF; 2069 } else { 2070 psize = lsize; 2071 } 2072 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS && 2073 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) { 2074 /* 2075 * If we are raw receiving an encrypted dataset we should not 2076 * take this codepath because it will change the on-disk block 2077 * and decryption will fail. 2078 */ 2079 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize), 2080 lsize); 2081 2082 if (rounded != psize) { 2083 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE); 2084 abd_zero_off(cdata, psize, rounded - psize); 2085 abd_copy_off(cdata, zio->io_abd, 0, 0, psize); 2086 psize = rounded; 2087 zio_push_transform(zio, cdata, 2088 psize, rounded, NULL); 2089 } 2090 } else { 2091 ASSERT3U(psize, !=, 0); 2092 } 2093 2094 /* 2095 * The final pass of spa_sync() must be all rewrites, but the first 2096 * few passes offer a trade-off: allocating blocks defers convergence, 2097 * but newly allocated blocks are sequential, so they can be written 2098 * to disk faster. Therefore, we allow the first few passes of 2099 * spa_sync() to allocate new blocks, but force rewrites after that. 2100 * There should only be a handful of blocks after pass 1 in any case. 2101 */ 2102 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg && 2103 BP_GET_PSIZE(bp) == psize && 2104 pass >= zfs_sync_pass_rewrite) { 2105 VERIFY3U(psize, !=, 0); 2106 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 2107 2108 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 2109 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 2110 } else { 2111 BP_ZERO(bp); 2112 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2113 } 2114 2115 if (psize == 0) { 2116 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 && 2117 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 2118 BP_SET_LSIZE(bp, lsize); 2119 BP_SET_TYPE(bp, zp->zp_type); 2120 BP_SET_LEVEL(bp, zp->zp_level); 2121 BP_SET_BIRTH(bp, zio->io_txg, 0); 2122 } 2123 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2124 } else { 2125 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 2126 BP_SET_LSIZE(bp, lsize); 2127 BP_SET_TYPE(bp, zp->zp_type); 2128 BP_SET_LEVEL(bp, zp->zp_level); 2129 BP_SET_PSIZE(bp, psize); 2130 BP_SET_COMPRESS(bp, compress); 2131 BP_SET_CHECKSUM(bp, zp->zp_checksum); 2132 BP_SET_DEDUP(bp, zp->zp_dedup); 2133 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 2134 if (zp->zp_dedup) { 2135 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2136 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2137 ASSERT(!zp->zp_encrypt || 2138 DMU_OT_IS_ENCRYPTED(zp->zp_type)); 2139 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 2140 } 2141 if (zp->zp_nopwrite) { 2142 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2143 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2144 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 2145 } 2146 } 2147 return (zio); 2148 } 2149 2150 static zio_t * 2151 zio_free_bp_init(zio_t *zio) 2152 { 2153 blkptr_t *bp = zio->io_bp; 2154 2155 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 2156 if (BP_GET_DEDUP(bp)) 2157 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 2158 } 2159 2160 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 2161 2162 return (zio); 2163 } 2164 2165 /* 2166 * ========================================================================== 2167 * Execute the I/O pipeline 2168 * ========================================================================== 2169 */ 2170 2171 static void 2172 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 2173 { 2174 spa_t *spa = zio->io_spa; 2175 zio_type_t t = zio->io_type; 2176 2177 /* 2178 * If we're a config writer or a probe, the normal issue and 2179 * interrupt threads may all be blocked waiting for the config lock. 2180 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 2181 */ 2182 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 2183 t = ZIO_TYPE_NULL; 2184 2185 /* 2186 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 2187 */ 2188 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 2189 t = ZIO_TYPE_NULL; 2190 2191 /* 2192 * If this is a high priority I/O, then use the high priority taskq if 2193 * available or cut the line otherwise. 2194 */ 2195 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) { 2196 if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 2197 q++; 2198 else 2199 cutinline = B_TRUE; 2200 } 2201 2202 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 2203 2204 spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline); 2205 } 2206 2207 static boolean_t 2208 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 2209 { 2210 spa_t *spa = zio->io_spa; 2211 2212 taskq_t *tq = taskq_of_curthread(); 2213 2214 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 2215 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 2216 uint_t i; 2217 for (i = 0; i < tqs->stqs_count; i++) { 2218 if (tqs->stqs_taskq[i] == tq) 2219 return (B_TRUE); 2220 } 2221 } 2222 2223 return (B_FALSE); 2224 } 2225 2226 static zio_t * 2227 zio_issue_async(zio_t *zio) 2228 { 2229 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio)); 2230 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2231 return (NULL); 2232 } 2233 2234 void 2235 zio_interrupt(void *zio) 2236 { 2237 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 2238 } 2239 2240 void 2241 zio_delay_interrupt(zio_t *zio) 2242 { 2243 /* 2244 * The timeout_generic() function isn't defined in userspace, so 2245 * rather than trying to implement the function, the zio delay 2246 * functionality has been disabled for userspace builds. 2247 */ 2248 2249 #ifdef _KERNEL 2250 /* 2251 * If io_target_timestamp is zero, then no delay has been registered 2252 * for this IO, thus jump to the end of this function and "skip" the 2253 * delay; issuing it directly to the zio layer. 2254 */ 2255 if (zio->io_target_timestamp != 0) { 2256 hrtime_t now = gethrtime(); 2257 2258 if (now >= zio->io_target_timestamp) { 2259 /* 2260 * This IO has already taken longer than the target 2261 * delay to complete, so we don't want to delay it 2262 * any longer; we "miss" the delay and issue it 2263 * directly to the zio layer. This is likely due to 2264 * the target latency being set to a value less than 2265 * the underlying hardware can satisfy (e.g. delay 2266 * set to 1ms, but the disks take 10ms to complete an 2267 * IO request). 2268 */ 2269 2270 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 2271 hrtime_t, now); 2272 2273 zio_interrupt(zio); 2274 } else { 2275 taskqid_t tid; 2276 hrtime_t diff = zio->io_target_timestamp - now; 2277 int ticks = MAX(1, NSEC_TO_TICK(diff)); 2278 clock_t expire_at_tick = ddi_get_lbolt() + ticks; 2279 2280 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 2281 hrtime_t, now, hrtime_t, diff); 2282 2283 tid = taskq_dispatch_delay(system_taskq, zio_interrupt, 2284 zio, TQ_NOSLEEP, expire_at_tick); 2285 if (tid == TASKQID_INVALID) { 2286 /* 2287 * Couldn't allocate a task. Just finish the 2288 * zio without a delay. 2289 */ 2290 zio_interrupt(zio); 2291 } 2292 } 2293 return; 2294 } 2295 #endif 2296 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 2297 zio_interrupt(zio); 2298 } 2299 2300 static void 2301 zio_deadman_impl(zio_t *pio, int ziodepth) 2302 { 2303 zio_t *cio, *cio_next; 2304 zio_link_t *zl = NULL; 2305 vdev_t *vd = pio->io_vd; 2306 2307 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) { 2308 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL; 2309 zbookmark_phys_t *zb = &pio->io_bookmark; 2310 uint64_t delta = gethrtime() - pio->io_timestamp; 2311 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa); 2312 2313 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu " 2314 "delta=%llu queued=%llu io=%llu " 2315 "path=%s " 2316 "last=%llu type=%d " 2317 "priority=%d flags=0x%llx stage=0x%x " 2318 "pipeline=0x%x pipeline-trace=0x%x " 2319 "objset=%llu object=%llu " 2320 "level=%llu blkid=%llu " 2321 "offset=%llu size=%llu " 2322 "error=%d", 2323 ziodepth, pio, pio->io_timestamp, 2324 (u_longlong_t)delta, pio->io_delta, pio->io_delay, 2325 vd ? vd->vdev_path : "NULL", 2326 vq ? vq->vq_io_complete_ts : 0, pio->io_type, 2327 pio->io_priority, (u_longlong_t)pio->io_flags, 2328 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace, 2329 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, 2330 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid, 2331 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size, 2332 pio->io_error); 2333 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN, 2334 pio->io_spa, vd, zb, pio, 0); 2335 2336 if (failmode == ZIO_FAILURE_MODE_CONTINUE && 2337 taskq_empty_ent(&pio->io_tqent)) { 2338 zio_interrupt(pio); 2339 } 2340 } 2341 2342 mutex_enter(&pio->io_lock); 2343 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2344 cio_next = zio_walk_children(pio, &zl); 2345 zio_deadman_impl(cio, ziodepth + 1); 2346 } 2347 mutex_exit(&pio->io_lock); 2348 } 2349 2350 /* 2351 * Log the critical information describing this zio and all of its children 2352 * using the zfs_dbgmsg() interface then post deadman event for the ZED. 2353 */ 2354 void 2355 zio_deadman(zio_t *pio, const char *tag) 2356 { 2357 spa_t *spa = pio->io_spa; 2358 char *name = spa_name(spa); 2359 2360 if (!zfs_deadman_enabled || spa_suspended(spa)) 2361 return; 2362 2363 zio_deadman_impl(pio, 0); 2364 2365 switch (spa_get_deadman_failmode(spa)) { 2366 case ZIO_FAILURE_MODE_WAIT: 2367 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name); 2368 break; 2369 2370 case ZIO_FAILURE_MODE_CONTINUE: 2371 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name); 2372 break; 2373 2374 case ZIO_FAILURE_MODE_PANIC: 2375 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name); 2376 break; 2377 } 2378 } 2379 2380 /* 2381 * Execute the I/O pipeline until one of the following occurs: 2382 * (1) the I/O completes; (2) the pipeline stalls waiting for 2383 * dependent child I/Os; (3) the I/O issues, so we're waiting 2384 * for an I/O completion interrupt; (4) the I/O is delegated by 2385 * vdev-level caching or aggregation; (5) the I/O is deferred 2386 * due to vdev-level queueing; (6) the I/O is handed off to 2387 * another thread. In all cases, the pipeline stops whenever 2388 * there's no CPU work; it never burns a thread in cv_wait_io(). 2389 * 2390 * There's no locking on io_stage because there's no legitimate way 2391 * for multiple threads to be attempting to process the same I/O. 2392 */ 2393 static zio_pipe_stage_t *zio_pipeline[]; 2394 2395 /* 2396 * zio_execute() is a wrapper around the static function 2397 * __zio_execute() so that we can force __zio_execute() to be 2398 * inlined. This reduces stack overhead which is important 2399 * because __zio_execute() is called recursively in several zio 2400 * code paths. zio_execute() itself cannot be inlined because 2401 * it is externally visible. 2402 */ 2403 void 2404 zio_execute(void *zio) 2405 { 2406 fstrans_cookie_t cookie; 2407 2408 cookie = spl_fstrans_mark(); 2409 __zio_execute(zio); 2410 spl_fstrans_unmark(cookie); 2411 } 2412 2413 /* 2414 * Used to determine if in the current context the stack is sized large 2415 * enough to allow zio_execute() to be called recursively. A minimum 2416 * stack size of 16K is required to avoid needing to re-dispatch the zio. 2417 */ 2418 static boolean_t 2419 zio_execute_stack_check(zio_t *zio) 2420 { 2421 #if !defined(HAVE_LARGE_STACKS) 2422 dsl_pool_t *dp = spa_get_dsl(zio->io_spa); 2423 2424 /* Executing in txg_sync_thread() context. */ 2425 if (dp && curthread == dp->dp_tx.tx_sync_thread) 2426 return (B_TRUE); 2427 2428 /* Pool initialization outside of zio_taskq context. */ 2429 if (dp && spa_is_initializing(dp->dp_spa) && 2430 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) && 2431 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH)) 2432 return (B_TRUE); 2433 #else 2434 (void) zio; 2435 #endif /* HAVE_LARGE_STACKS */ 2436 2437 return (B_FALSE); 2438 } 2439 2440 __attribute__((always_inline)) 2441 static inline void 2442 __zio_execute(zio_t *zio) 2443 { 2444 ASSERT3U(zio->io_queued_timestamp, >, 0); 2445 2446 while (zio->io_stage < ZIO_STAGE_DONE) { 2447 enum zio_stage pipeline = zio->io_pipeline; 2448 enum zio_stage stage = zio->io_stage; 2449 2450 zio->io_executor = curthread; 2451 2452 ASSERT(!MUTEX_HELD(&zio->io_lock)); 2453 ASSERT(ISP2(stage)); 2454 ASSERT(zio->io_stall == NULL); 2455 2456 do { 2457 stage <<= 1; 2458 } while ((stage & pipeline) == 0); 2459 2460 ASSERT(stage <= ZIO_STAGE_DONE); 2461 2462 /* 2463 * If we are in interrupt context and this pipeline stage 2464 * will grab a config lock that is held across I/O, 2465 * or may wait for an I/O that needs an interrupt thread 2466 * to complete, issue async to avoid deadlock. 2467 * 2468 * For VDEV_IO_START, we cut in line so that the io will 2469 * be sent to disk promptly. 2470 */ 2471 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 2472 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 2473 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2474 zio_requeue_io_start_cut_in_line : B_FALSE; 2475 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2476 return; 2477 } 2478 2479 /* 2480 * If the current context doesn't have large enough stacks 2481 * the zio must be issued asynchronously to prevent overflow. 2482 */ 2483 if (zio_execute_stack_check(zio)) { 2484 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2485 zio_requeue_io_start_cut_in_line : B_FALSE; 2486 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2487 return; 2488 } 2489 2490 zio->io_stage = stage; 2491 zio->io_pipeline_trace |= zio->io_stage; 2492 2493 /* 2494 * The zio pipeline stage returns the next zio to execute 2495 * (typically the same as this one), or NULL if we should 2496 * stop. 2497 */ 2498 zio = zio_pipeline[highbit64(stage) - 1](zio); 2499 2500 if (zio == NULL) 2501 return; 2502 } 2503 } 2504 2505 2506 /* 2507 * ========================================================================== 2508 * Initiate I/O, either sync or async 2509 * ========================================================================== 2510 */ 2511 int 2512 zio_wait(zio_t *zio) 2513 { 2514 /* 2515 * Some routines, like zio_free_sync(), may return a NULL zio 2516 * to avoid the performance overhead of creating and then destroying 2517 * an unneeded zio. For the callers' simplicity, we accept a NULL 2518 * zio and ignore it. 2519 */ 2520 if (zio == NULL) 2521 return (0); 2522 2523 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms); 2524 int error; 2525 2526 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN); 2527 ASSERT3P(zio->io_executor, ==, NULL); 2528 2529 zio->io_waiter = curthread; 2530 ASSERT0(zio->io_queued_timestamp); 2531 zio->io_queued_timestamp = gethrtime(); 2532 2533 if (zio->io_type == ZIO_TYPE_WRITE) { 2534 spa_select_allocator(zio); 2535 } 2536 __zio_execute(zio); 2537 2538 mutex_enter(&zio->io_lock); 2539 while (zio->io_executor != NULL) { 2540 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock, 2541 ddi_get_lbolt() + timeout); 2542 2543 if (zfs_deadman_enabled && error == -1 && 2544 gethrtime() - zio->io_queued_timestamp > 2545 spa_deadman_ziotime(zio->io_spa)) { 2546 mutex_exit(&zio->io_lock); 2547 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms); 2548 zio_deadman(zio, FTAG); 2549 mutex_enter(&zio->io_lock); 2550 } 2551 } 2552 mutex_exit(&zio->io_lock); 2553 2554 error = zio->io_error; 2555 zio_destroy(zio); 2556 2557 return (error); 2558 } 2559 2560 void 2561 zio_nowait(zio_t *zio) 2562 { 2563 /* 2564 * See comment in zio_wait(). 2565 */ 2566 if (zio == NULL) 2567 return; 2568 2569 ASSERT3P(zio->io_executor, ==, NULL); 2570 2571 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 2572 list_is_empty(&zio->io_parent_list)) { 2573 zio_t *pio; 2574 2575 /* 2576 * This is a logical async I/O with no parent to wait for it. 2577 * We add it to the spa_async_root_zio "Godfather" I/O which 2578 * will ensure they complete prior to unloading the pool. 2579 */ 2580 spa_t *spa = zio->io_spa; 2581 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE]; 2582 2583 zio_add_child(pio, zio); 2584 } 2585 2586 ASSERT0(zio->io_queued_timestamp); 2587 zio->io_queued_timestamp = gethrtime(); 2588 if (zio->io_type == ZIO_TYPE_WRITE) { 2589 spa_select_allocator(zio); 2590 } 2591 __zio_execute(zio); 2592 } 2593 2594 /* 2595 * ========================================================================== 2596 * Reexecute, cancel, or suspend/resume failed I/O 2597 * ========================================================================== 2598 */ 2599 2600 static void 2601 zio_reexecute(void *arg) 2602 { 2603 zio_t *pio = arg; 2604 zio_t *cio, *cio_next, *gio; 2605 2606 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 2607 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 2608 ASSERT(pio->io_gang_leader == NULL); 2609 ASSERT(pio->io_gang_tree == NULL); 2610 2611 mutex_enter(&pio->io_lock); 2612 pio->io_flags = pio->io_orig_flags; 2613 pio->io_stage = pio->io_orig_stage; 2614 pio->io_pipeline = pio->io_orig_pipeline; 2615 pio->io_reexecute = 0; 2616 pio->io_flags |= ZIO_FLAG_REEXECUTED; 2617 pio->io_pipeline_trace = 0; 2618 pio->io_error = 0; 2619 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) || 2620 (pio->io_pipeline & ZIO_STAGE_READY) == 0; 2621 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE); 2622 2623 /* 2624 * It's possible for a failed ZIO to be a descendant of more than one 2625 * ZIO tree. When reexecuting it, we have to be sure to add its wait 2626 * states to all parent wait counts. 2627 * 2628 * Those parents, in turn, may have other children that are currently 2629 * active, usually because they've already been reexecuted after 2630 * resuming. Those children may be executing and may call 2631 * zio_notify_parent() at the same time as we're updating our parent's 2632 * counts. To avoid races while updating the counts, we take 2633 * gio->io_lock before each update. 2634 */ 2635 zio_link_t *zl = NULL; 2636 while ((gio = zio_walk_parents(pio, &zl)) != NULL) { 2637 mutex_enter(&gio->io_lock); 2638 for (int w = 0; w < ZIO_WAIT_TYPES; w++) { 2639 gio->io_children[pio->io_child_type][w] += 2640 !pio->io_state[w]; 2641 } 2642 mutex_exit(&gio->io_lock); 2643 } 2644 2645 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2646 pio->io_child_error[c] = 0; 2647 2648 if (IO_IS_ALLOCATING(pio)) 2649 BP_ZERO(pio->io_bp); 2650 2651 /* 2652 * As we reexecute pio's children, new children could be created. 2653 * New children go to the head of pio's io_child_list, however, 2654 * so we will (correctly) not reexecute them. The key is that 2655 * the remainder of pio's io_child_list, from 'cio_next' onward, 2656 * cannot be affected by any side effects of reexecuting 'cio'. 2657 */ 2658 zl = NULL; 2659 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2660 cio_next = zio_walk_children(pio, &zl); 2661 mutex_exit(&pio->io_lock); 2662 zio_reexecute(cio); 2663 mutex_enter(&pio->io_lock); 2664 } 2665 mutex_exit(&pio->io_lock); 2666 2667 /* 2668 * Now that all children have been reexecuted, execute the parent. 2669 * We don't reexecute "The Godfather" I/O here as it's the 2670 * responsibility of the caller to wait on it. 2671 */ 2672 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 2673 pio->io_queued_timestamp = gethrtime(); 2674 __zio_execute(pio); 2675 } 2676 } 2677 2678 void 2679 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 2680 { 2681 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 2682 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 2683 "failure and the failure mode property for this pool " 2684 "is set to panic.", spa_name(spa)); 2685 2686 if (reason != ZIO_SUSPEND_MMP) { 2687 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable " 2688 "I/O failure and has been suspended.", spa_name(spa)); 2689 } 2690 2691 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, 2692 NULL, NULL, 0); 2693 2694 mutex_enter(&spa->spa_suspend_lock); 2695 2696 if (spa->spa_suspend_zio_root == NULL) 2697 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 2698 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2699 ZIO_FLAG_GODFATHER); 2700 2701 spa->spa_suspended = reason; 2702 2703 if (zio != NULL) { 2704 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2705 ASSERT(zio != spa->spa_suspend_zio_root); 2706 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2707 ASSERT(zio_unique_parent(zio) == NULL); 2708 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 2709 zio_add_child(spa->spa_suspend_zio_root, zio); 2710 } 2711 2712 mutex_exit(&spa->spa_suspend_lock); 2713 } 2714 2715 int 2716 zio_resume(spa_t *spa) 2717 { 2718 zio_t *pio; 2719 2720 /* 2721 * Reexecute all previously suspended i/o. 2722 */ 2723 mutex_enter(&spa->spa_suspend_lock); 2724 if (spa->spa_suspended != ZIO_SUSPEND_NONE) 2725 cmn_err(CE_WARN, "Pool '%s' was suspended and is being " 2726 "resumed. Failed I/O will be retried.", 2727 spa_name(spa)); 2728 spa->spa_suspended = ZIO_SUSPEND_NONE; 2729 cv_broadcast(&spa->spa_suspend_cv); 2730 pio = spa->spa_suspend_zio_root; 2731 spa->spa_suspend_zio_root = NULL; 2732 mutex_exit(&spa->spa_suspend_lock); 2733 2734 if (pio == NULL) 2735 return (0); 2736 2737 zio_reexecute(pio); 2738 return (zio_wait(pio)); 2739 } 2740 2741 void 2742 zio_resume_wait(spa_t *spa) 2743 { 2744 mutex_enter(&spa->spa_suspend_lock); 2745 while (spa_suspended(spa)) 2746 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 2747 mutex_exit(&spa->spa_suspend_lock); 2748 } 2749 2750 /* 2751 * ========================================================================== 2752 * Gang blocks. 2753 * 2754 * A gang block is a collection of small blocks that looks to the DMU 2755 * like one large block. When zio_dva_allocate() cannot find a block 2756 * of the requested size, due to either severe fragmentation or the pool 2757 * being nearly full, it calls zio_write_gang_block() to construct the 2758 * block from smaller fragments. 2759 * 2760 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 2761 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 2762 * an indirect block: it's an array of block pointers. It consumes 2763 * only one sector and hence is allocatable regardless of fragmentation. 2764 * The gang header's bps point to its gang members, which hold the data. 2765 * 2766 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 2767 * as the verifier to ensure uniqueness of the SHA256 checksum. 2768 * Critically, the gang block bp's blk_cksum is the checksum of the data, 2769 * not the gang header. This ensures that data block signatures (needed for 2770 * deduplication) are independent of how the block is physically stored. 2771 * 2772 * Gang blocks can be nested: a gang member may itself be a gang block. 2773 * Thus every gang block is a tree in which root and all interior nodes are 2774 * gang headers, and the leaves are normal blocks that contain user data. 2775 * The root of the gang tree is called the gang leader. 2776 * 2777 * To perform any operation (read, rewrite, free, claim) on a gang block, 2778 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 2779 * in the io_gang_tree field of the original logical i/o by recursively 2780 * reading the gang leader and all gang headers below it. This yields 2781 * an in-core tree containing the contents of every gang header and the 2782 * bps for every constituent of the gang block. 2783 * 2784 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 2785 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 2786 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 2787 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 2788 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 2789 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 2790 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 2791 * of the gang header plus zio_checksum_compute() of the data to update the 2792 * gang header's blk_cksum as described above. 2793 * 2794 * The two-phase assemble/issue model solves the problem of partial failure -- 2795 * what if you'd freed part of a gang block but then couldn't read the 2796 * gang header for another part? Assembling the entire gang tree first 2797 * ensures that all the necessary gang header I/O has succeeded before 2798 * starting the actual work of free, claim, or write. Once the gang tree 2799 * is assembled, free and claim are in-memory operations that cannot fail. 2800 * 2801 * In the event that a gang write fails, zio_dva_unallocate() walks the 2802 * gang tree to immediately free (i.e. insert back into the space map) 2803 * everything we've allocated. This ensures that we don't get ENOSPC 2804 * errors during repeated suspend/resume cycles due to a flaky device. 2805 * 2806 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2807 * the gang tree, we won't modify the block, so we can safely defer the free 2808 * (knowing that the block is still intact). If we *can* assemble the gang 2809 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2810 * each constituent bp and we can allocate a new block on the next sync pass. 2811 * 2812 * In all cases, the gang tree allows complete recovery from partial failure. 2813 * ========================================================================== 2814 */ 2815 2816 static void 2817 zio_gang_issue_func_done(zio_t *zio) 2818 { 2819 abd_free(zio->io_abd); 2820 } 2821 2822 static zio_t * 2823 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2824 uint64_t offset) 2825 { 2826 if (gn != NULL) 2827 return (pio); 2828 2829 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2830 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2831 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2832 &pio->io_bookmark)); 2833 } 2834 2835 static zio_t * 2836 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2837 uint64_t offset) 2838 { 2839 zio_t *zio; 2840 2841 if (gn != NULL) { 2842 abd_t *gbh_abd = 2843 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2844 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2845 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2846 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2847 &pio->io_bookmark); 2848 /* 2849 * As we rewrite each gang header, the pipeline will compute 2850 * a new gang block header checksum for it; but no one will 2851 * compute a new data checksum, so we do that here. The one 2852 * exception is the gang leader: the pipeline already computed 2853 * its data checksum because that stage precedes gang assembly. 2854 * (Presently, nothing actually uses interior data checksums; 2855 * this is just good hygiene.) 2856 */ 2857 if (gn != pio->io_gang_leader->io_gang_tree) { 2858 abd_t *buf = abd_get_offset(data, offset); 2859 2860 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2861 buf, BP_GET_PSIZE(bp)); 2862 2863 abd_free(buf); 2864 } 2865 /* 2866 * If we are here to damage data for testing purposes, 2867 * leave the GBH alone so that we can detect the damage. 2868 */ 2869 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2870 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2871 } else { 2872 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2873 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2874 zio_gang_issue_func_done, NULL, pio->io_priority, 2875 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2876 } 2877 2878 return (zio); 2879 } 2880 2881 static zio_t * 2882 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2883 uint64_t offset) 2884 { 2885 (void) gn, (void) data, (void) offset; 2886 2887 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2888 ZIO_GANG_CHILD_FLAGS(pio)); 2889 if (zio == NULL) { 2890 zio = zio_null(pio, pio->io_spa, 2891 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)); 2892 } 2893 return (zio); 2894 } 2895 2896 static zio_t * 2897 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2898 uint64_t offset) 2899 { 2900 (void) gn, (void) data, (void) offset; 2901 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2902 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2903 } 2904 2905 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2906 NULL, 2907 zio_read_gang, 2908 zio_rewrite_gang, 2909 zio_free_gang, 2910 zio_claim_gang, 2911 NULL 2912 }; 2913 2914 static void zio_gang_tree_assemble_done(zio_t *zio); 2915 2916 static zio_gang_node_t * 2917 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2918 { 2919 zio_gang_node_t *gn; 2920 2921 ASSERT(*gnpp == NULL); 2922 2923 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2924 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2925 *gnpp = gn; 2926 2927 return (gn); 2928 } 2929 2930 static void 2931 zio_gang_node_free(zio_gang_node_t **gnpp) 2932 { 2933 zio_gang_node_t *gn = *gnpp; 2934 2935 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2936 ASSERT(gn->gn_child[g] == NULL); 2937 2938 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2939 kmem_free(gn, sizeof (*gn)); 2940 *gnpp = NULL; 2941 } 2942 2943 static void 2944 zio_gang_tree_free(zio_gang_node_t **gnpp) 2945 { 2946 zio_gang_node_t *gn = *gnpp; 2947 2948 if (gn == NULL) 2949 return; 2950 2951 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2952 zio_gang_tree_free(&gn->gn_child[g]); 2953 2954 zio_gang_node_free(gnpp); 2955 } 2956 2957 static void 2958 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2959 { 2960 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2961 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2962 2963 ASSERT(gio->io_gang_leader == gio); 2964 ASSERT(BP_IS_GANG(bp)); 2965 2966 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2967 zio_gang_tree_assemble_done, gn, gio->io_priority, 2968 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2969 } 2970 2971 static void 2972 zio_gang_tree_assemble_done(zio_t *zio) 2973 { 2974 zio_t *gio = zio->io_gang_leader; 2975 zio_gang_node_t *gn = zio->io_private; 2976 blkptr_t *bp = zio->io_bp; 2977 2978 ASSERT(gio == zio_unique_parent(zio)); 2979 ASSERT(list_is_empty(&zio->io_child_list)); 2980 2981 if (zio->io_error) 2982 return; 2983 2984 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2985 if (BP_SHOULD_BYTESWAP(bp)) 2986 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2987 2988 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2989 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2990 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2991 2992 abd_free(zio->io_abd); 2993 2994 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2995 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2996 if (!BP_IS_GANG(gbp)) 2997 continue; 2998 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2999 } 3000 } 3001 3002 static void 3003 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 3004 uint64_t offset) 3005 { 3006 zio_t *gio = pio->io_gang_leader; 3007 zio_t *zio; 3008 3009 ASSERT(BP_IS_GANG(bp) == !!gn); 3010 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 3011 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 3012 3013 /* 3014 * If you're a gang header, your data is in gn->gn_gbh. 3015 * If you're a gang member, your data is in 'data' and gn == NULL. 3016 */ 3017 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 3018 3019 if (gn != NULL) { 3020 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 3021 3022 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3023 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 3024 if (BP_IS_HOLE(gbp)) 3025 continue; 3026 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 3027 offset); 3028 offset += BP_GET_PSIZE(gbp); 3029 } 3030 } 3031 3032 if (gn == gio->io_gang_tree) 3033 ASSERT3U(gio->io_size, ==, offset); 3034 3035 if (zio != pio) 3036 zio_nowait(zio); 3037 } 3038 3039 static zio_t * 3040 zio_gang_assemble(zio_t *zio) 3041 { 3042 blkptr_t *bp = zio->io_bp; 3043 3044 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 3045 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3046 3047 zio->io_gang_leader = zio; 3048 3049 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 3050 3051 return (zio); 3052 } 3053 3054 static zio_t * 3055 zio_gang_issue(zio_t *zio) 3056 { 3057 blkptr_t *bp = zio->io_bp; 3058 3059 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 3060 return (NULL); 3061 } 3062 3063 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 3064 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3065 3066 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 3067 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 3068 0); 3069 else 3070 zio_gang_tree_free(&zio->io_gang_tree); 3071 3072 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3073 3074 return (zio); 3075 } 3076 3077 static void 3078 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio) 3079 { 3080 cio->io_allocator = pio->io_allocator; 3081 } 3082 3083 static void 3084 zio_write_gang_member_ready(zio_t *zio) 3085 { 3086 zio_t *pio = zio_unique_parent(zio); 3087 dva_t *cdva = zio->io_bp->blk_dva; 3088 dva_t *pdva = pio->io_bp->blk_dva; 3089 uint64_t asize; 3090 zio_t *gio __maybe_unused = zio->io_gang_leader; 3091 3092 if (BP_IS_HOLE(zio->io_bp)) 3093 return; 3094 3095 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 3096 3097 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 3098 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 3099 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 3100 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 3101 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 3102 3103 mutex_enter(&pio->io_lock); 3104 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 3105 ASSERT(DVA_GET_GANG(&pdva[d])); 3106 asize = DVA_GET_ASIZE(&pdva[d]); 3107 asize += DVA_GET_ASIZE(&cdva[d]); 3108 DVA_SET_ASIZE(&pdva[d], asize); 3109 } 3110 mutex_exit(&pio->io_lock); 3111 } 3112 3113 static void 3114 zio_write_gang_done(zio_t *zio) 3115 { 3116 /* 3117 * The io_abd field will be NULL for a zio with no data. The io_flags 3118 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 3119 * check for it here as it is cleared in zio_ready. 3120 */ 3121 if (zio->io_abd != NULL) 3122 abd_free(zio->io_abd); 3123 } 3124 3125 static zio_t * 3126 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc) 3127 { 3128 spa_t *spa = pio->io_spa; 3129 blkptr_t *bp = pio->io_bp; 3130 zio_t *gio = pio->io_gang_leader; 3131 zio_t *zio; 3132 zio_gang_node_t *gn, **gnpp; 3133 zio_gbh_phys_t *gbh; 3134 abd_t *gbh_abd; 3135 uint64_t txg = pio->io_txg; 3136 uint64_t resid = pio->io_size; 3137 uint64_t psize; 3138 zio_prop_t zp; 3139 int error; 3140 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 3141 3142 /* 3143 * Store multiple copies of the GBH, so that we can still traverse 3144 * all the data (e.g. to free or scrub) even if a block is damaged. 3145 * This value respects the redundant_metadata property. 3146 */ 3147 int gbh_copies = gio->io_prop.zp_gang_copies; 3148 ASSERT3S(gbh_copies, >, 0); 3149 ASSERT3S(gbh_copies, <=, SPA_DVAS_PER_BP); 3150 3151 ASSERT(ZIO_HAS_ALLOCATOR(pio)); 3152 int flags = METASLAB_GANG_HEADER; 3153 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3154 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3155 ASSERT(has_data); 3156 3157 flags |= METASLAB_ASYNC_ALLOC; 3158 } 3159 3160 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 3161 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 3162 &pio->io_alloc_list, pio->io_allocator, pio); 3163 if (error) { 3164 pio->io_error = error; 3165 return (pio); 3166 } 3167 3168 if (pio == gio) { 3169 gnpp = &gio->io_gang_tree; 3170 } else { 3171 gnpp = pio->io_private; 3172 ASSERT(pio->io_ready == zio_write_gang_member_ready); 3173 } 3174 3175 gn = zio_gang_node_alloc(gnpp); 3176 gbh = gn->gn_gbh; 3177 memset(gbh, 0, SPA_GANGBLOCKSIZE); 3178 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 3179 3180 /* 3181 * Create the gang header. 3182 */ 3183 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 3184 zio_write_gang_done, NULL, pio->io_priority, 3185 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 3186 3187 zio_gang_inherit_allocator(pio, zio); 3188 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3189 boolean_t more; 3190 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies, 3191 zio, B_TRUE, &more)); 3192 } 3193 3194 /* 3195 * Create and nowait the gang children. 3196 */ 3197 for (int g = 0; resid != 0; resid -= psize, g++) { 3198 psize = zio_roundup_alloc_size(spa, 3199 resid / (SPA_GBH_NBLKPTRS - g)); 3200 psize = MIN(resid, psize); 3201 ASSERT3U(psize, >=, SPA_MINBLOCKSIZE); 3202 3203 zp.zp_checksum = gio->io_prop.zp_checksum; 3204 zp.zp_compress = ZIO_COMPRESS_OFF; 3205 zp.zp_complevel = gio->io_prop.zp_complevel; 3206 zp.zp_type = zp.zp_storage_type = DMU_OT_NONE; 3207 zp.zp_level = 0; 3208 zp.zp_copies = gio->io_prop.zp_copies; 3209 zp.zp_gang_copies = gio->io_prop.zp_gang_copies; 3210 zp.zp_dedup = B_FALSE; 3211 zp.zp_dedup_verify = B_FALSE; 3212 zp.zp_nopwrite = B_FALSE; 3213 zp.zp_encrypt = gio->io_prop.zp_encrypt; 3214 zp.zp_byteorder = gio->io_prop.zp_byteorder; 3215 zp.zp_direct_write = B_FALSE; 3216 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN); 3217 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN); 3218 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN); 3219 3220 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 3221 has_data ? abd_get_offset(pio->io_abd, pio->io_size - 3222 resid) : NULL, psize, psize, &zp, 3223 zio_write_gang_member_ready, NULL, 3224 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 3225 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 3226 3227 zio_gang_inherit_allocator(zio, cio); 3228 /* 3229 * We do not reserve for the child writes, since we already 3230 * reserved for the parent. Unreserve though will be called 3231 * for individual children. We can do this since sum of all 3232 * child's physical sizes is equal to parent's physical size. 3233 * It would not work for potentially bigger allocation sizes. 3234 */ 3235 3236 zio_nowait(cio); 3237 } 3238 3239 /* 3240 * Set pio's pipeline to just wait for zio to finish. 3241 */ 3242 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3243 3244 zio_nowait(zio); 3245 3246 return (pio); 3247 } 3248 3249 /* 3250 * The zio_nop_write stage in the pipeline determines if allocating a 3251 * new bp is necessary. The nopwrite feature can handle writes in 3252 * either syncing or open context (i.e. zil writes) and as a result is 3253 * mutually exclusive with dedup. 3254 * 3255 * By leveraging a cryptographically secure checksum, such as SHA256, we 3256 * can compare the checksums of the new data and the old to determine if 3257 * allocating a new block is required. Note that our requirements for 3258 * cryptographic strength are fairly weak: there can't be any accidental 3259 * hash collisions, but we don't need to be secure against intentional 3260 * (malicious) collisions. To trigger a nopwrite, you have to be able 3261 * to write the file to begin with, and triggering an incorrect (hash 3262 * collision) nopwrite is no worse than simply writing to the file. 3263 * That said, there are no known attacks against the checksum algorithms 3264 * used for nopwrite, assuming that the salt and the checksums 3265 * themselves remain secret. 3266 */ 3267 static zio_t * 3268 zio_nop_write(zio_t *zio) 3269 { 3270 blkptr_t *bp = zio->io_bp; 3271 blkptr_t *bp_orig = &zio->io_bp_orig; 3272 zio_prop_t *zp = &zio->io_prop; 3273 3274 ASSERT(BP_IS_HOLE(bp)); 3275 ASSERT(BP_GET_LEVEL(bp) == 0); 3276 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 3277 ASSERT(zp->zp_nopwrite); 3278 ASSERT(!zp->zp_dedup); 3279 ASSERT(zio->io_bp_override == NULL); 3280 ASSERT(IO_IS_ALLOCATING(zio)); 3281 3282 /* 3283 * Check to see if the original bp and the new bp have matching 3284 * characteristics (i.e. same checksum, compression algorithms, etc). 3285 * If they don't then just continue with the pipeline which will 3286 * allocate a new bp. 3287 */ 3288 if (BP_IS_HOLE(bp_orig) || 3289 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 3290 ZCHECKSUM_FLAG_NOPWRITE) || 3291 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || 3292 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 3293 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 3294 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 3295 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 3296 return (zio); 3297 3298 /* 3299 * If the checksums match then reset the pipeline so that we 3300 * avoid allocating a new bp and issuing any I/O. 3301 */ 3302 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 3303 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 3304 ZCHECKSUM_FLAG_NOPWRITE); 3305 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 3306 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 3307 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 3308 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop); 3309 3310 /* 3311 * If we're overwriting a block that is currently on an 3312 * indirect vdev, then ignore the nopwrite request and 3313 * allow a new block to be allocated on a concrete vdev. 3314 */ 3315 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER); 3316 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) { 3317 vdev_t *tvd = vdev_lookup_top(zio->io_spa, 3318 DVA_GET_VDEV(&bp_orig->blk_dva[d])); 3319 if (tvd->vdev_ops == &vdev_indirect_ops) { 3320 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3321 return (zio); 3322 } 3323 } 3324 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3325 3326 *bp = *bp_orig; 3327 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3328 zio->io_flags |= ZIO_FLAG_NOPWRITE; 3329 } 3330 3331 return (zio); 3332 } 3333 3334 /* 3335 * ========================================================================== 3336 * Block Reference Table 3337 * ========================================================================== 3338 */ 3339 static zio_t * 3340 zio_brt_free(zio_t *zio) 3341 { 3342 blkptr_t *bp; 3343 3344 bp = zio->io_bp; 3345 3346 if (BP_GET_LEVEL(bp) > 0 || 3347 BP_IS_METADATA(bp) || 3348 !brt_maybe_exists(zio->io_spa, bp)) { 3349 return (zio); 3350 } 3351 3352 if (!brt_entry_decref(zio->io_spa, bp)) { 3353 /* 3354 * This isn't the last reference, so we cannot free 3355 * the data yet. 3356 */ 3357 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3358 } 3359 3360 return (zio); 3361 } 3362 3363 /* 3364 * ========================================================================== 3365 * Dedup 3366 * ========================================================================== 3367 */ 3368 static void 3369 zio_ddt_child_read_done(zio_t *zio) 3370 { 3371 blkptr_t *bp = zio->io_bp; 3372 ddt_t *ddt; 3373 ddt_entry_t *dde = zio->io_private; 3374 zio_t *pio = zio_unique_parent(zio); 3375 3376 mutex_enter(&pio->io_lock); 3377 ddt = ddt_select(zio->io_spa, bp); 3378 3379 if (zio->io_error == 0) { 3380 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp); 3381 /* this phys variant doesn't need repair */ 3382 ddt_phys_clear(dde->dde_phys, v); 3383 } 3384 3385 if (zio->io_error == 0 && dde->dde_io->dde_repair_abd == NULL) 3386 dde->dde_io->dde_repair_abd = zio->io_abd; 3387 else 3388 abd_free(zio->io_abd); 3389 mutex_exit(&pio->io_lock); 3390 } 3391 3392 static zio_t * 3393 zio_ddt_read_start(zio_t *zio) 3394 { 3395 blkptr_t *bp = zio->io_bp; 3396 3397 ASSERT(BP_GET_DEDUP(bp)); 3398 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3399 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3400 3401 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3402 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3403 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 3404 ddt_phys_variant_t v_self = ddt_phys_select(ddt, dde, bp); 3405 ddt_univ_phys_t *ddp = dde->dde_phys; 3406 blkptr_t blk; 3407 3408 ASSERT(zio->io_vsd == NULL); 3409 zio->io_vsd = dde; 3410 3411 if (v_self == DDT_PHYS_NONE) 3412 return (zio); 3413 3414 /* issue I/O for the other copies */ 3415 for (int p = 0; p < DDT_NPHYS(ddt); p++) { 3416 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p); 3417 3418 if (ddt_phys_birth(ddp, v) == 0 || v == v_self) 3419 continue; 3420 3421 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, 3422 ddp, v, &blk); 3423 zio_nowait(zio_read(zio, zio->io_spa, &blk, 3424 abd_alloc_for_io(zio->io_size, B_TRUE), 3425 zio->io_size, zio_ddt_child_read_done, dde, 3426 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 3427 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 3428 } 3429 return (zio); 3430 } 3431 3432 zio_nowait(zio_read(zio, zio->io_spa, bp, 3433 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 3434 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 3435 3436 return (zio); 3437 } 3438 3439 static zio_t * 3440 zio_ddt_read_done(zio_t *zio) 3441 { 3442 blkptr_t *bp = zio->io_bp; 3443 3444 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 3445 return (NULL); 3446 } 3447 3448 ASSERT(BP_GET_DEDUP(bp)); 3449 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3450 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3451 3452 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3453 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3454 ddt_entry_t *dde = zio->io_vsd; 3455 if (ddt == NULL) { 3456 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 3457 return (zio); 3458 } 3459 if (dde == NULL) { 3460 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 3461 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 3462 return (NULL); 3463 } 3464 if (dde->dde_io->dde_repair_abd != NULL) { 3465 abd_copy(zio->io_abd, dde->dde_io->dde_repair_abd, 3466 zio->io_size); 3467 zio->io_child_error[ZIO_CHILD_DDT] = 0; 3468 } 3469 ddt_repair_done(ddt, dde); 3470 zio->io_vsd = NULL; 3471 } 3472 3473 ASSERT(zio->io_vsd == NULL); 3474 3475 return (zio); 3476 } 3477 3478 static boolean_t 3479 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 3480 { 3481 spa_t *spa = zio->io_spa; 3482 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); 3483 3484 ASSERT(!(zio->io_bp_override && do_raw)); 3485 3486 /* 3487 * Note: we compare the original data, not the transformed data, 3488 * because when zio->io_bp is an override bp, we will not have 3489 * pushed the I/O transforms. That's an important optimization 3490 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 3491 * However, we should never get a raw, override zio so in these 3492 * cases we can compare the io_abd directly. This is useful because 3493 * it allows us to do dedup verification even if we don't have access 3494 * to the original data (for instance, if the encryption keys aren't 3495 * loaded). 3496 */ 3497 3498 for (int p = 0; p < DDT_NPHYS(ddt); p++) { 3499 if (DDT_PHYS_IS_DITTO(ddt, p)) 3500 continue; 3501 3502 if (dde->dde_io == NULL) 3503 continue; 3504 3505 zio_t *lio = dde->dde_io->dde_lead_zio[p]; 3506 if (lio == NULL) 3507 continue; 3508 3509 if (do_raw) 3510 return (lio->io_size != zio->io_size || 3511 abd_cmp(zio->io_abd, lio->io_abd) != 0); 3512 3513 return (lio->io_orig_size != zio->io_orig_size || 3514 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0); 3515 } 3516 3517 for (int p = 0; p < DDT_NPHYS(ddt); p++) { 3518 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p); 3519 uint64_t phys_birth = ddt_phys_birth(dde->dde_phys, v); 3520 3521 if (phys_birth != 0 && do_raw) { 3522 blkptr_t blk = *zio->io_bp; 3523 uint64_t psize; 3524 abd_t *tmpabd; 3525 int error; 3526 3527 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth); 3528 psize = BP_GET_PSIZE(&blk); 3529 3530 if (psize != zio->io_size) 3531 return (B_TRUE); 3532 3533 ddt_exit(ddt); 3534 3535 tmpabd = abd_alloc_for_io(psize, B_TRUE); 3536 3537 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, 3538 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, 3539 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3540 ZIO_FLAG_RAW, &zio->io_bookmark)); 3541 3542 if (error == 0) { 3543 if (abd_cmp(tmpabd, zio->io_abd) != 0) 3544 error = SET_ERROR(ENOENT); 3545 } 3546 3547 abd_free(tmpabd); 3548 ddt_enter(ddt); 3549 return (error != 0); 3550 } else if (phys_birth != 0) { 3551 arc_buf_t *abuf = NULL; 3552 arc_flags_t aflags = ARC_FLAG_WAIT; 3553 blkptr_t blk = *zio->io_bp; 3554 int error; 3555 3556 ddt_bp_fill(dde->dde_phys, v, &blk, phys_birth); 3557 3558 if (BP_GET_LSIZE(&blk) != zio->io_orig_size) 3559 return (B_TRUE); 3560 3561 ddt_exit(ddt); 3562 3563 error = arc_read(NULL, spa, &blk, 3564 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 3565 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3566 &aflags, &zio->io_bookmark); 3567 3568 if (error == 0) { 3569 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 3570 zio->io_orig_size) != 0) 3571 error = SET_ERROR(ENOENT); 3572 arc_buf_destroy(abuf, &abuf); 3573 } 3574 3575 ddt_enter(ddt); 3576 return (error != 0); 3577 } 3578 } 3579 3580 return (B_FALSE); 3581 } 3582 3583 static void 3584 zio_ddt_child_write_done(zio_t *zio) 3585 { 3586 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3587 ddt_entry_t *dde = zio->io_private; 3588 3589 zio_link_t *zl = NULL; 3590 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL); 3591 3592 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies); 3593 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p); 3594 ddt_univ_phys_t *ddp = dde->dde_phys; 3595 3596 ddt_enter(ddt); 3597 3598 /* we're the lead, so once we're done there's no one else outstanding */ 3599 if (dde->dde_io->dde_lead_zio[p] == zio) 3600 dde->dde_io->dde_lead_zio[p] = NULL; 3601 3602 ddt_univ_phys_t *orig = &dde->dde_io->dde_orig_phys; 3603 3604 if (zio->io_error != 0) { 3605 /* 3606 * The write failed, so we're about to abort the entire IO 3607 * chain. We need to revert the entry back to what it was at 3608 * the last time it was successfully extended. 3609 */ 3610 ddt_phys_copy(ddp, orig, v); 3611 ddt_phys_clear(orig, v); 3612 3613 ddt_exit(ddt); 3614 return; 3615 } 3616 3617 /* 3618 * We've successfully added new DVAs to the entry. Clear the saved 3619 * state or, if there's still outstanding IO, remember it so we can 3620 * revert to a known good state if that IO fails. 3621 */ 3622 if (dde->dde_io->dde_lead_zio[p] == NULL) 3623 ddt_phys_clear(orig, v); 3624 else 3625 ddt_phys_copy(orig, ddp, v); 3626 3627 /* 3628 * Add references for all dedup writes that were waiting on the 3629 * physical one, skipping any other physical writes that are waiting. 3630 */ 3631 zio_t *pio; 3632 zl = NULL; 3633 while ((pio = zio_walk_parents(zio, &zl)) != NULL) { 3634 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD)) 3635 ddt_phys_addref(ddp, v); 3636 } 3637 3638 ddt_exit(ddt); 3639 } 3640 3641 static void 3642 zio_ddt_child_write_ready(zio_t *zio) 3643 { 3644 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3645 ddt_entry_t *dde = zio->io_private; 3646 3647 zio_link_t *zl = NULL; 3648 ASSERT3P(zio_walk_parents(zio, &zl), !=, NULL); 3649 3650 int p = DDT_PHYS_FOR_COPIES(ddt, zio->io_prop.zp_copies); 3651 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p); 3652 3653 if (zio->io_error != 0) 3654 return; 3655 3656 ddt_enter(ddt); 3657 3658 ddt_phys_extend(dde->dde_phys, v, zio->io_bp); 3659 3660 zio_t *pio; 3661 zl = NULL; 3662 while ((pio = zio_walk_parents(zio, &zl)) != NULL) { 3663 if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD)) 3664 ddt_bp_fill(dde->dde_phys, v, pio->io_bp, zio->io_txg); 3665 } 3666 3667 ddt_exit(ddt); 3668 } 3669 3670 static zio_t * 3671 zio_ddt_write(zio_t *zio) 3672 { 3673 spa_t *spa = zio->io_spa; 3674 blkptr_t *bp = zio->io_bp; 3675 uint64_t txg = zio->io_txg; 3676 zio_prop_t *zp = &zio->io_prop; 3677 ddt_t *ddt = ddt_select(spa, bp); 3678 ddt_entry_t *dde; 3679 3680 ASSERT(BP_GET_DEDUP(bp)); 3681 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 3682 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 3683 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 3684 /* 3685 * Deduplication will not take place for Direct I/O writes. The 3686 * ddt_tree will be emptied in syncing context. Direct I/O writes take 3687 * place in the open-context. Direct I/O write can not attempt to 3688 * modify the ddt_tree while issuing out a write. 3689 */ 3690 ASSERT3B(zio->io_prop.zp_direct_write, ==, B_FALSE); 3691 3692 ddt_enter(ddt); 3693 /* 3694 * Search DDT for matching entry. Skip DVAs verification here, since 3695 * they can go only from override, and once we get here the override 3696 * pointer can't have "D" flag to be confused with pruned DDT entries. 3697 */ 3698 IMPLY(zio->io_bp_override, !BP_GET_DEDUP(zio->io_bp_override)); 3699 dde = ddt_lookup(ddt, bp, B_FALSE); 3700 if (dde == NULL) { 3701 /* DDT size is over its quota so no new entries */ 3702 zp->zp_dedup = B_FALSE; 3703 BP_SET_DEDUP(bp, B_FALSE); 3704 if (zio->io_bp_override == NULL) 3705 zio->io_pipeline = ZIO_WRITE_PIPELINE; 3706 ddt_exit(ddt); 3707 return (zio); 3708 } 3709 3710 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 3711 /* 3712 * If we're using a weak checksum, upgrade to a strong checksum 3713 * and try again. If we're already using a strong checksum, 3714 * we can't resolve it, so just convert to an ordinary write. 3715 * (And automatically e-mail a paper to Nature?) 3716 */ 3717 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 3718 ZCHECKSUM_FLAG_DEDUP)) { 3719 zp->zp_checksum = spa_dedup_checksum(spa); 3720 zio_pop_transforms(zio); 3721 zio->io_stage = ZIO_STAGE_OPEN; 3722 BP_ZERO(bp); 3723 } else { 3724 zp->zp_dedup = B_FALSE; 3725 BP_SET_DEDUP(bp, B_FALSE); 3726 } 3727 ASSERT(!BP_GET_DEDUP(bp)); 3728 zio->io_pipeline = ZIO_WRITE_PIPELINE; 3729 ddt_exit(ddt); 3730 return (zio); 3731 } 3732 3733 int p = DDT_PHYS_FOR_COPIES(ddt, zp->zp_copies); 3734 ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p); 3735 ddt_univ_phys_t *ddp = dde->dde_phys; 3736 3737 /* 3738 * In the common cases, at this point we have a regular BP with no 3739 * allocated DVAs, and the corresponding DDT entry for its checksum. 3740 * Our goal is to fill the BP with enough DVAs to satisfy its copies= 3741 * requirement. 3742 * 3743 * One of three things needs to happen to fulfill this: 3744 * 3745 * - if the DDT entry has enough DVAs to satisfy the BP, we just copy 3746 * them out of the entry and return; 3747 * 3748 * - if the DDT entry has no DVAs (ie its brand new), then we have to 3749 * issue the write as normal so that DVAs can be allocated and the 3750 * data land on disk. We then copy the DVAs into the DDT entry on 3751 * return. 3752 * 3753 * - if the DDT entry has some DVAs, but too few, we have to issue the 3754 * write, adjusted to have allocate fewer copies. When it returns, we 3755 * add the new DVAs to the DDT entry, and update the BP to have the 3756 * full amount it originally requested. 3757 * 3758 * In all cases, if there's already a writing IO in flight, we need to 3759 * defer the action until after the write is done. If our action is to 3760 * write, we need to adjust our request for additional DVAs to match 3761 * what will be in the DDT entry after it completes. In this way every 3762 * IO can be guaranteed to recieve enough DVAs simply by joining the 3763 * end of the chain and letting the sequence play out. 3764 */ 3765 3766 /* 3767 * Number of DVAs in the DDT entry. If the BP is encrypted we ignore 3768 * the third one as normal. 3769 */ 3770 int have_dvas = ddt_phys_dva_count(ddp, v, BP_IS_ENCRYPTED(bp)); 3771 IMPLY(have_dvas == 0, ddt_phys_birth(ddp, v) == 0); 3772 3773 /* Number of DVAs requested bya the IO. */ 3774 uint8_t need_dvas = zp->zp_copies; 3775 3776 /* 3777 * What we do next depends on whether or not there's IO outstanding that 3778 * will update this entry. 3779 */ 3780 if (dde->dde_io == NULL || dde->dde_io->dde_lead_zio[p] == NULL) { 3781 /* 3782 * No IO outstanding, so we only need to worry about ourselves. 3783 */ 3784 3785 /* 3786 * Override BPs bring their own DVAs and their own problems. 3787 */ 3788 if (zio->io_bp_override) { 3789 /* 3790 * For a brand-new entry, all the work has been done 3791 * for us, and we can just fill it out from the provided 3792 * block and leave. 3793 */ 3794 if (have_dvas == 0) { 3795 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg); 3796 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 3797 ddt_phys_extend(ddp, v, bp); 3798 ddt_phys_addref(ddp, v); 3799 ddt_exit(ddt); 3800 return (zio); 3801 } 3802 3803 /* 3804 * If we already have this entry, then we want to treat 3805 * it like a regular write. To do this we just wipe 3806 * them out and proceed like a regular write. 3807 * 3808 * Even if there are some DVAs in the entry, we still 3809 * have to clear them out. We can't use them to fill 3810 * out the dedup entry, as they are all referenced 3811 * together by a bp already on disk, and will be freed 3812 * as a group. 3813 */ 3814 BP_ZERO_DVAS(bp); 3815 BP_SET_BIRTH(bp, 0, 0); 3816 } 3817 3818 /* 3819 * If there are enough DVAs in the entry to service our request, 3820 * then we can just use them as-is. 3821 */ 3822 if (have_dvas >= need_dvas) { 3823 ddt_bp_fill(ddp, v, bp, txg); 3824 ddt_phys_addref(ddp, v); 3825 ddt_exit(ddt); 3826 return (zio); 3827 } 3828 3829 /* 3830 * Otherwise, we have to issue IO to fill the entry up to the 3831 * amount we need. 3832 */ 3833 need_dvas -= have_dvas; 3834 } else { 3835 /* 3836 * There's a write in-flight. If there's already enough DVAs on 3837 * the entry, then either there were already enough to start 3838 * with, or the in-flight IO is between READY and DONE, and so 3839 * has extended the entry with new DVAs. Either way, we don't 3840 * need to do anything, we can just slot in behind it. 3841 */ 3842 3843 if (zio->io_bp_override) { 3844 /* 3845 * If there's a write out, then we're soon going to 3846 * have our own copies of this block, so clear out the 3847 * override block and treat it as a regular dedup 3848 * write. See comment above. 3849 */ 3850 BP_ZERO_DVAS(bp); 3851 BP_SET_BIRTH(bp, 0, 0); 3852 } 3853 3854 if (have_dvas >= need_dvas) { 3855 /* 3856 * A minor point: there might already be enough 3857 * committed DVAs in the entry to service our request, 3858 * but we don't know which are completed and which are 3859 * allocated but not yet written. In this case, should 3860 * the IO for the new DVAs fail, we will be on the end 3861 * of the IO chain and will also recieve an error, even 3862 * though our request could have been serviced. 3863 * 3864 * This is an extremely rare case, as it requires the 3865 * original block to be copied with a request for a 3866 * larger number of DVAs, then copied again requesting 3867 * the same (or already fulfilled) number of DVAs while 3868 * the first request is active, and then that first 3869 * request errors. In return, the logic required to 3870 * catch and handle it is complex. For now, I'm just 3871 * not going to bother with it. 3872 */ 3873 3874 /* 3875 * We always fill the bp here as we may have arrived 3876 * after the in-flight write has passed READY, and so 3877 * missed out. 3878 */ 3879 ddt_bp_fill(ddp, v, bp, txg); 3880 zio_add_child(zio, dde->dde_io->dde_lead_zio[p]); 3881 ddt_exit(ddt); 3882 return (zio); 3883 } 3884 3885 /* 3886 * There's not enough in the entry yet, so we need to look at 3887 * the write in-flight and see how many DVAs it will have once 3888 * it completes. 3889 * 3890 * The in-flight write has potentially had its copies request 3891 * reduced (if we're filling out an existing entry), so we need 3892 * to reach in and get the original write to find out what it is 3893 * expecting. 3894 * 3895 * Note that the parent of the lead zio will always have the 3896 * highest zp_copies of any zio in the chain, because ones that 3897 * can be serviced without additional IO are always added to 3898 * the back of the chain. 3899 */ 3900 zio_link_t *zl = NULL; 3901 zio_t *pio = 3902 zio_walk_parents(dde->dde_io->dde_lead_zio[p], &zl); 3903 ASSERT(pio); 3904 uint8_t parent_dvas = pio->io_prop.zp_copies; 3905 3906 if (parent_dvas >= need_dvas) { 3907 zio_add_child(zio, dde->dde_io->dde_lead_zio[p]); 3908 ddt_exit(ddt); 3909 return (zio); 3910 } 3911 3912 /* 3913 * Still not enough, so we will need to issue to get the 3914 * shortfall. 3915 */ 3916 need_dvas -= parent_dvas; 3917 } 3918 3919 /* 3920 * We need to write. We will create a new write with the copies 3921 * property adjusted to match the number of DVAs we need to need to 3922 * grow the DDT entry by to satisfy the request. 3923 */ 3924 zio_prop_t czp = *zp; 3925 czp.zp_copies = czp.zp_gang_copies = need_dvas; 3926 zio_t *cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 3927 zio->io_orig_size, zio->io_orig_size, &czp, 3928 zio_ddt_child_write_ready, NULL, 3929 zio_ddt_child_write_done, dde, zio->io_priority, 3930 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 3931 3932 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 3933 3934 /* 3935 * We are the new lead zio, because our parent has the highest 3936 * zp_copies that has been requested for this entry so far. 3937 */ 3938 ddt_alloc_entry_io(dde); 3939 if (dde->dde_io->dde_lead_zio[p] == NULL) { 3940 /* 3941 * First time out, take a copy of the stable entry to revert 3942 * to if there's an error (see zio_ddt_child_write_done()) 3943 */ 3944 ddt_phys_copy(&dde->dde_io->dde_orig_phys, dde->dde_phys, v); 3945 } else { 3946 /* 3947 * Make the existing chain our child, because it cannot 3948 * complete until we have. 3949 */ 3950 zio_add_child(cio, dde->dde_io->dde_lead_zio[p]); 3951 } 3952 dde->dde_io->dde_lead_zio[p] = cio; 3953 3954 ddt_exit(ddt); 3955 3956 zio_nowait(cio); 3957 3958 return (zio); 3959 } 3960 3961 static ddt_entry_t *freedde; /* for debugging */ 3962 3963 static zio_t * 3964 zio_ddt_free(zio_t *zio) 3965 { 3966 spa_t *spa = zio->io_spa; 3967 blkptr_t *bp = zio->io_bp; 3968 ddt_t *ddt = ddt_select(spa, bp); 3969 ddt_entry_t *dde = NULL; 3970 3971 ASSERT(BP_GET_DEDUP(bp)); 3972 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3973 3974 ddt_enter(ddt); 3975 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 3976 if (dde) { 3977 ddt_phys_variant_t v = ddt_phys_select(ddt, dde, bp); 3978 if (v != DDT_PHYS_NONE) 3979 ddt_phys_decref(dde->dde_phys, v); 3980 } 3981 ddt_exit(ddt); 3982 3983 /* 3984 * When no entry was found, it must have been pruned, 3985 * so we can free it now instead of decrementing the 3986 * refcount in the DDT. 3987 */ 3988 if (!dde) { 3989 BP_SET_DEDUP(bp, 0); 3990 zio->io_pipeline |= ZIO_STAGE_DVA_FREE; 3991 } 3992 3993 return (zio); 3994 } 3995 3996 /* 3997 * ========================================================================== 3998 * Allocate and free blocks 3999 * ========================================================================== 4000 */ 4001 4002 static zio_t * 4003 zio_io_to_allocate(metaslab_class_allocator_t *mca, boolean_t *more) 4004 { 4005 zio_t *zio; 4006 4007 ASSERT(MUTEX_HELD(&mca->mca_lock)); 4008 4009 zio = avl_first(&mca->mca_tree); 4010 if (zio == NULL) { 4011 *more = B_FALSE; 4012 return (NULL); 4013 } 4014 4015 ASSERT(IO_IS_ALLOCATING(zio)); 4016 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 4017 4018 /* 4019 * Try to place a reservation for this zio. If we're unable to 4020 * reserve then we throttle. 4021 */ 4022 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 4023 zio->io_prop.zp_copies, zio, B_FALSE, more)) { 4024 return (NULL); 4025 } 4026 4027 avl_remove(&mca->mca_tree, zio); 4028 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 4029 4030 if (avl_is_empty(&mca->mca_tree)) 4031 *more = B_FALSE; 4032 return (zio); 4033 } 4034 4035 static zio_t * 4036 zio_dva_throttle(zio_t *zio) 4037 { 4038 spa_t *spa = zio->io_spa; 4039 zio_t *nio; 4040 metaslab_class_t *mc; 4041 boolean_t more; 4042 4043 /* 4044 * If not already chosen, choose an appropriate allocation class. 4045 */ 4046 mc = zio->io_metaslab_class; 4047 if (mc == NULL) 4048 mc = spa_preferred_class(spa, zio); 4049 4050 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 4051 !mc->mc_alloc_throttle_enabled || 4052 zio->io_child_type == ZIO_CHILD_GANG || 4053 zio->io_flags & ZIO_FLAG_NODATA) { 4054 return (zio); 4055 } 4056 4057 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4058 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 4059 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 4060 ASSERT3U(zio->io_queued_timestamp, >, 0); 4061 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 4062 4063 zio->io_metaslab_class = mc; 4064 metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator]; 4065 mutex_enter(&mca->mca_lock); 4066 avl_add(&mca->mca_tree, zio); 4067 nio = zio_io_to_allocate(mca, &more); 4068 mutex_exit(&mca->mca_lock); 4069 return (nio); 4070 } 4071 4072 static void 4073 zio_allocate_dispatch(metaslab_class_t *mc, int allocator) 4074 { 4075 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 4076 zio_t *zio; 4077 boolean_t more; 4078 4079 do { 4080 mutex_enter(&mca->mca_lock); 4081 zio = zio_io_to_allocate(mca, &more); 4082 mutex_exit(&mca->mca_lock); 4083 if (zio == NULL) 4084 return; 4085 4086 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 4087 ASSERT0(zio->io_error); 4088 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 4089 } while (more); 4090 } 4091 4092 static zio_t * 4093 zio_dva_allocate(zio_t *zio) 4094 { 4095 spa_t *spa = zio->io_spa; 4096 metaslab_class_t *mc; 4097 blkptr_t *bp = zio->io_bp; 4098 int error; 4099 int flags = 0; 4100 4101 if (zio->io_gang_leader == NULL) { 4102 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 4103 zio->io_gang_leader = zio; 4104 } 4105 4106 ASSERT(BP_IS_HOLE(bp)); 4107 ASSERT0(BP_GET_NDVAS(bp)); 4108 ASSERT3U(zio->io_prop.zp_copies, >, 0); 4109 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 4110 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 4111 4112 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 4113 flags |= METASLAB_GANG_CHILD; 4114 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 4115 flags |= METASLAB_ASYNC_ALLOC; 4116 4117 /* 4118 * If not already chosen, choose an appropriate allocation class. 4119 */ 4120 mc = zio->io_metaslab_class; 4121 if (mc == NULL) { 4122 mc = spa_preferred_class(spa, zio); 4123 zio->io_metaslab_class = mc; 4124 } 4125 ZIOSTAT_BUMP(ziostat_total_allocations); 4126 4127 again: 4128 /* 4129 * Try allocating the block in the usual metaslab class. 4130 * If that's full, allocate it in the normal class. 4131 * If that's full, allocate as a gang block, 4132 * and if all are full, the allocation fails (which shouldn't happen). 4133 * 4134 * Note that we do not fall back on embedded slog (ZIL) space, to 4135 * preserve unfragmented slog space, which is critical for decent 4136 * sync write performance. If a log allocation fails, we will fall 4137 * back to spa_sync() which is abysmal for performance. 4138 */ 4139 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 4140 error = metaslab_alloc(spa, mc, zio->io_size, bp, 4141 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 4142 &zio->io_alloc_list, zio->io_allocator, zio); 4143 4144 /* 4145 * Fallback to normal class when an alloc class is full 4146 */ 4147 if (error == ENOSPC && mc != spa_normal_class(spa)) { 4148 /* 4149 * When the dedup or special class is spilling into the normal 4150 * class, there can still be significant space available due 4151 * to deferred frees that are in-flight. We track the txg when 4152 * this occurred and back off adding new DDT entries for a few 4153 * txgs to allow the free blocks to be processed. 4154 */ 4155 if ((mc == spa_dedup_class(spa) || (spa_special_has_ddt(spa) && 4156 mc == spa_special_class(spa))) && 4157 spa->spa_dedup_class_full_txg != zio->io_txg) { 4158 spa->spa_dedup_class_full_txg = zio->io_txg; 4159 zfs_dbgmsg("%s[%d]: %s class spilling, req size %d, " 4160 "%llu allocated of %llu", 4161 spa_name(spa), (int)zio->io_txg, 4162 mc == spa_dedup_class(spa) ? "dedup" : "special", 4163 (int)zio->io_size, 4164 (u_longlong_t)metaslab_class_get_alloc(mc), 4165 (u_longlong_t)metaslab_class_get_space(mc)); 4166 } 4167 4168 /* 4169 * If we are holding old class reservation, drop it. 4170 * Dispatch the next ZIO(s) there if some are waiting. 4171 */ 4172 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4173 if (metaslab_class_throttle_unreserve(mc, 4174 zio->io_prop.zp_copies, zio)) { 4175 zio_allocate_dispatch(zio->io_metaslab_class, 4176 zio->io_allocator); 4177 } 4178 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 4179 } 4180 4181 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 4182 zfs_dbgmsg("%s: metaslab allocation failure, " 4183 "trying normal class: zio %px, size %llu, error %d", 4184 spa_name(spa), zio, (u_longlong_t)zio->io_size, 4185 error); 4186 } 4187 zio->io_metaslab_class = mc = spa_normal_class(spa); 4188 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks); 4189 4190 /* 4191 * If normal class uses throttling, return to that pipeline 4192 * stage. Otherwise just do another allocation attempt. 4193 */ 4194 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 4195 mc->mc_alloc_throttle_enabled && 4196 zio->io_child_type != ZIO_CHILD_GANG && 4197 !(zio->io_flags & ZIO_FLAG_NODATA)) { 4198 zio->io_stage = ZIO_STAGE_DVA_THROTTLE >> 1; 4199 return (zio); 4200 } 4201 goto again; 4202 } 4203 4204 if (error == ENOSPC && zio->io_size > spa->spa_min_alloc) { 4205 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 4206 zfs_dbgmsg("%s: metaslab allocation failure, " 4207 "trying ganging: zio %px, size %llu, error %d", 4208 spa_name(spa), zio, (u_longlong_t)zio->io_size, 4209 error); 4210 } 4211 ZIOSTAT_BUMP(ziostat_gang_writes); 4212 if (flags & METASLAB_GANG_CHILD) 4213 ZIOSTAT_BUMP(ziostat_gang_multilevel); 4214 return (zio_write_gang_block(zio, mc)); 4215 } 4216 if (error != 0) { 4217 if (error != ENOSPC || 4218 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) { 4219 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, " 4220 "size %llu, error %d", 4221 spa_name(spa), zio, (u_longlong_t)zio->io_size, 4222 error); 4223 } 4224 zio->io_error = error; 4225 } 4226 4227 return (zio); 4228 } 4229 4230 static zio_t * 4231 zio_dva_free(zio_t *zio) 4232 { 4233 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 4234 4235 return (zio); 4236 } 4237 4238 static zio_t * 4239 zio_dva_claim(zio_t *zio) 4240 { 4241 int error; 4242 4243 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 4244 if (error) 4245 zio->io_error = error; 4246 4247 return (zio); 4248 } 4249 4250 /* 4251 * Undo an allocation. This is used by zio_done() when an I/O fails 4252 * and we want to give back the block we just allocated. 4253 * This handles both normal blocks and gang blocks. 4254 */ 4255 static void 4256 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 4257 { 4258 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp)); 4259 ASSERT(zio->io_bp_override == NULL); 4260 4261 if (!BP_IS_HOLE(bp)) { 4262 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp), 4263 B_TRUE); 4264 } 4265 4266 if (gn != NULL) { 4267 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 4268 zio_dva_unallocate(zio, gn->gn_child[g], 4269 &gn->gn_gbh->zg_blkptr[g]); 4270 } 4271 } 4272 } 4273 4274 /* 4275 * Try to allocate an intent log block. Return 0 on success, errno on failure. 4276 */ 4277 int 4278 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, 4279 uint64_t size, boolean_t *slog) 4280 { 4281 int error = 1; 4282 zio_alloc_list_t io_alloc_list; 4283 4284 ASSERT(txg > spa_syncing_txg(spa)); 4285 4286 metaslab_trace_init(&io_alloc_list); 4287 4288 /* 4289 * Block pointer fields are useful to metaslabs for stats and debugging. 4290 * Fill in the obvious ones before calling into metaslab_alloc(). 4291 */ 4292 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 4293 BP_SET_PSIZE(new_bp, size); 4294 BP_SET_LEVEL(new_bp, 0); 4295 4296 /* 4297 * When allocating a zil block, we don't have information about 4298 * the final destination of the block except the objset it's part 4299 * of, so we just hash the objset ID to pick the allocator to get 4300 * some parallelism. 4301 */ 4302 int flags = METASLAB_ZIL; 4303 int allocator = (uint_t)cityhash1(os->os_dsl_dataset->ds_object) 4304 % spa->spa_alloc_count; 4305 ZIOSTAT_BUMP(ziostat_total_allocations); 4306 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 4307 txg, NULL, flags, &io_alloc_list, allocator, NULL); 4308 *slog = (error == 0); 4309 if (error != 0) { 4310 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size, 4311 new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator, 4312 NULL); 4313 } 4314 if (error != 0) { 4315 ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks); 4316 error = metaslab_alloc(spa, spa_normal_class(spa), size, 4317 new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator, 4318 NULL); 4319 } 4320 metaslab_trace_fini(&io_alloc_list); 4321 4322 if (error == 0) { 4323 BP_SET_LSIZE(new_bp, size); 4324 BP_SET_PSIZE(new_bp, size); 4325 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 4326 BP_SET_CHECKSUM(new_bp, 4327 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 4328 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 4329 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 4330 BP_SET_LEVEL(new_bp, 0); 4331 BP_SET_DEDUP(new_bp, 0); 4332 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 4333 4334 /* 4335 * encrypted blocks will require an IV and salt. We generate 4336 * these now since we will not be rewriting the bp at 4337 * rewrite time. 4338 */ 4339 if (os->os_encrypted) { 4340 uint8_t iv[ZIO_DATA_IV_LEN]; 4341 uint8_t salt[ZIO_DATA_SALT_LEN]; 4342 4343 BP_SET_CRYPT(new_bp, B_TRUE); 4344 VERIFY0(spa_crypt_get_salt(spa, 4345 dmu_objset_id(os), salt)); 4346 VERIFY0(zio_crypt_generate_iv(iv)); 4347 4348 zio_crypt_encode_params_bp(new_bp, salt, iv); 4349 } 4350 } else { 4351 zfs_dbgmsg("%s: zil block allocation failure: " 4352 "size %llu, error %d", spa_name(spa), (u_longlong_t)size, 4353 error); 4354 } 4355 4356 return (error); 4357 } 4358 4359 /* 4360 * ========================================================================== 4361 * Read and write to physical devices 4362 * ========================================================================== 4363 */ 4364 4365 /* 4366 * Issue an I/O to the underlying vdev. Typically the issue pipeline 4367 * stops after this stage and will resume upon I/O completion. 4368 * However, there are instances where the vdev layer may need to 4369 * continue the pipeline when an I/O was not issued. Since the I/O 4370 * that was sent to the vdev layer might be different than the one 4371 * currently active in the pipeline (see vdev_queue_io()), we explicitly 4372 * force the underlying vdev layers to call either zio_execute() or 4373 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 4374 */ 4375 static zio_t * 4376 zio_vdev_io_start(zio_t *zio) 4377 { 4378 vdev_t *vd = zio->io_vd; 4379 uint64_t align; 4380 spa_t *spa = zio->io_spa; 4381 4382 zio->io_delay = 0; 4383 4384 ASSERT(zio->io_error == 0); 4385 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 4386 4387 if (vd == NULL) { 4388 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 4389 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 4390 4391 /* 4392 * The mirror_ops handle multiple DVAs in a single BP. 4393 */ 4394 vdev_mirror_ops.vdev_op_io_start(zio); 4395 return (NULL); 4396 } 4397 4398 ASSERT3P(zio->io_logical, !=, zio); 4399 if (zio->io_type == ZIO_TYPE_WRITE) { 4400 ASSERT(spa->spa_trust_config); 4401 4402 /* 4403 * Note: the code can handle other kinds of writes, 4404 * but we don't expect them. 4405 */ 4406 if (zio->io_vd->vdev_noalloc) { 4407 ASSERT(zio->io_flags & 4408 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 4409 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 4410 } 4411 } 4412 4413 align = 1ULL << vd->vdev_top->vdev_ashift; 4414 4415 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 4416 P2PHASE(zio->io_size, align) != 0) { 4417 /* Transform logical writes to be a full physical block size. */ 4418 uint64_t asize = P2ROUNDUP(zio->io_size, align); 4419 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 4420 ASSERT(vd == vd->vdev_top); 4421 if (zio->io_type == ZIO_TYPE_WRITE) { 4422 abd_copy(abuf, zio->io_abd, zio->io_size); 4423 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 4424 } 4425 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 4426 } 4427 4428 /* 4429 * If this is not a physical io, make sure that it is properly aligned 4430 * before proceeding. 4431 */ 4432 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 4433 ASSERT0(P2PHASE(zio->io_offset, align)); 4434 ASSERT0(P2PHASE(zio->io_size, align)); 4435 } else { 4436 /* 4437 * For physical writes, we allow 512b aligned writes and assume 4438 * the device will perform a read-modify-write as necessary. 4439 */ 4440 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 4441 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 4442 } 4443 4444 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 4445 4446 /* 4447 * If this is a repair I/O, and there's no self-healing involved -- 4448 * that is, we're just resilvering what we expect to resilver -- 4449 * then don't do the I/O unless zio's txg is actually in vd's DTL. 4450 * This prevents spurious resilvering. 4451 * 4452 * There are a few ways that we can end up creating these spurious 4453 * resilver i/os: 4454 * 4455 * 1. A resilver i/o will be issued if any DVA in the BP has a 4456 * dirty DTL. The mirror code will issue resilver writes to 4457 * each DVA, including the one(s) that are not on vdevs with dirty 4458 * DTLs. 4459 * 4460 * 2. With nested replication, which happens when we have a 4461 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 4462 * For example, given mirror(replacing(A+B), C), it's likely that 4463 * only A is out of date (it's the new device). In this case, we'll 4464 * read from C, then use the data to resilver A+B -- but we don't 4465 * actually want to resilver B, just A. The top-level mirror has no 4466 * way to know this, so instead we just discard unnecessary repairs 4467 * as we work our way down the vdev tree. 4468 * 4469 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 4470 * The same logic applies to any form of nested replication: ditto 4471 * + mirror, RAID-Z + replacing, etc. 4472 * 4473 * However, indirect vdevs point off to other vdevs which may have 4474 * DTL's, so we never bypass them. The child i/os on concrete vdevs 4475 * will be properly bypassed instead. 4476 * 4477 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from 4478 * a dRAID spare vdev. For example, when a dRAID spare is first 4479 * used, its spare blocks need to be written to but the leaf vdev's 4480 * of such blocks can have empty DTL_PARTIAL. 4481 * 4482 * There seemed no clean way to allow such writes while bypassing 4483 * spurious ones. At this point, just avoid all bypassing for dRAID 4484 * for correctness. 4485 */ 4486 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 4487 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 4488 zio->io_txg != 0 && /* not a delegated i/o */ 4489 vd->vdev_ops != &vdev_indirect_ops && 4490 vd->vdev_top->vdev_ops != &vdev_draid_ops && 4491 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 4492 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4493 zio_vdev_io_bypass(zio); 4494 return (zio); 4495 } 4496 4497 /* 4498 * Select the next best leaf I/O to process. Distributed spares are 4499 * excluded since they dispatch the I/O directly to a leaf vdev after 4500 * applying the dRAID mapping. 4501 */ 4502 if (vd->vdev_ops->vdev_op_leaf && 4503 vd->vdev_ops != &vdev_draid_spare_ops && 4504 (zio->io_type == ZIO_TYPE_READ || 4505 zio->io_type == ZIO_TYPE_WRITE || 4506 zio->io_type == ZIO_TYPE_TRIM)) { 4507 4508 if ((zio = vdev_queue_io(zio)) == NULL) 4509 return (NULL); 4510 4511 if (!vdev_accessible(vd, zio)) { 4512 zio->io_error = SET_ERROR(ENXIO); 4513 zio_interrupt(zio); 4514 return (NULL); 4515 } 4516 zio->io_delay = gethrtime(); 4517 4518 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) { 4519 /* 4520 * "no-op" injections return success, but do no actual 4521 * work. Just return it. 4522 */ 4523 zio_delay_interrupt(zio); 4524 return (NULL); 4525 } 4526 } 4527 4528 vd->vdev_ops->vdev_op_io_start(zio); 4529 return (NULL); 4530 } 4531 4532 static zio_t * 4533 zio_vdev_io_done(zio_t *zio) 4534 { 4535 vdev_t *vd = zio->io_vd; 4536 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 4537 boolean_t unexpected_error = B_FALSE; 4538 4539 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 4540 return (NULL); 4541 } 4542 4543 ASSERT(zio->io_type == ZIO_TYPE_READ || 4544 zio->io_type == ZIO_TYPE_WRITE || 4545 zio->io_type == ZIO_TYPE_FLUSH || 4546 zio->io_type == ZIO_TYPE_TRIM); 4547 4548 if (zio->io_delay) 4549 zio->io_delay = gethrtime() - zio->io_delay; 4550 4551 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 4552 vd->vdev_ops != &vdev_draid_spare_ops) { 4553 if (zio->io_type != ZIO_TYPE_FLUSH) 4554 vdev_queue_io_done(zio); 4555 4556 if (zio_injection_enabled && zio->io_error == 0) 4557 zio->io_error = zio_handle_device_injections(vd, zio, 4558 EIO, EILSEQ); 4559 4560 if (zio_injection_enabled && zio->io_error == 0) 4561 zio->io_error = zio_handle_label_injection(zio, EIO); 4562 4563 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH && 4564 zio->io_type != ZIO_TYPE_TRIM) { 4565 if (!vdev_accessible(vd, zio)) { 4566 zio->io_error = SET_ERROR(ENXIO); 4567 } else { 4568 unexpected_error = B_TRUE; 4569 } 4570 } 4571 } 4572 4573 ops->vdev_op_io_done(zio); 4574 4575 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE) 4576 VERIFY(vdev_probe(vd, zio) == NULL); 4577 4578 return (zio); 4579 } 4580 4581 /* 4582 * This function is used to change the priority of an existing zio that is 4583 * currently in-flight. This is used by the arc to upgrade priority in the 4584 * event that a demand read is made for a block that is currently queued 4585 * as a scrub or async read IO. Otherwise, the high priority read request 4586 * would end up having to wait for the lower priority IO. 4587 */ 4588 void 4589 zio_change_priority(zio_t *pio, zio_priority_t priority) 4590 { 4591 zio_t *cio, *cio_next; 4592 zio_link_t *zl = NULL; 4593 4594 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 4595 4596 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 4597 vdev_queue_change_io_priority(pio, priority); 4598 } else { 4599 pio->io_priority = priority; 4600 } 4601 4602 mutex_enter(&pio->io_lock); 4603 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 4604 cio_next = zio_walk_children(pio, &zl); 4605 zio_change_priority(cio, priority); 4606 } 4607 mutex_exit(&pio->io_lock); 4608 } 4609 4610 /* 4611 * For non-raidz ZIOs, we can just copy aside the bad data read from the 4612 * disk, and use that to finish the checksum ereport later. 4613 */ 4614 static void 4615 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 4616 const abd_t *good_buf) 4617 { 4618 /* no processing needed */ 4619 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 4620 } 4621 4622 void 4623 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr) 4624 { 4625 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); 4626 4627 abd_copy(abd, zio->io_abd, zio->io_size); 4628 4629 zcr->zcr_cbinfo = zio->io_size; 4630 zcr->zcr_cbdata = abd; 4631 zcr->zcr_finish = zio_vsd_default_cksum_finish; 4632 zcr->zcr_free = zio_abd_free; 4633 } 4634 4635 static zio_t * 4636 zio_vdev_io_assess(zio_t *zio) 4637 { 4638 vdev_t *vd = zio->io_vd; 4639 4640 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 4641 return (NULL); 4642 } 4643 4644 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 4645 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 4646 4647 if (zio->io_vsd != NULL) { 4648 zio->io_vsd_ops->vsd_free(zio); 4649 zio->io_vsd = NULL; 4650 } 4651 4652 /* 4653 * If a Direct I/O operation has a checksum verify error then this I/O 4654 * should not attempt to be issued again. 4655 */ 4656 if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) { 4657 if (zio->io_type == ZIO_TYPE_WRITE) { 4658 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_LOGICAL); 4659 ASSERT3U(zio->io_error, ==, EIO); 4660 } 4661 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4662 return (zio); 4663 } 4664 4665 if (zio_injection_enabled && zio->io_error == 0) 4666 zio->io_error = zio_handle_fault_injection(zio, EIO); 4667 4668 /* 4669 * If the I/O failed, determine whether we should attempt to retry it. 4670 * 4671 * On retry, we cut in line in the issue queue, since we don't want 4672 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 4673 */ 4674 if (zio->io_error && vd == NULL && 4675 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 4676 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 4677 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 4678 zio->io_error = 0; 4679 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE; 4680 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 4681 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 4682 zio_requeue_io_start_cut_in_line); 4683 return (NULL); 4684 } 4685 4686 /* 4687 * If we got an error on a leaf device, convert it to ENXIO 4688 * if the device is not accessible at all. 4689 */ 4690 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 4691 !vdev_accessible(vd, zio)) 4692 zio->io_error = SET_ERROR(ENXIO); 4693 4694 /* 4695 * If we can't write to an interior vdev (mirror or RAID-Z), 4696 * set vdev_cant_write so that we stop trying to allocate from it. 4697 */ 4698 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 4699 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 4700 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting " 4701 "cant_write=TRUE due to write failure with ENXIO", 4702 zio); 4703 vd->vdev_cant_write = B_TRUE; 4704 } 4705 4706 /* 4707 * If a cache flush returns ENOTSUP we know that no future 4708 * attempts will ever succeed. In this case we set a persistent 4709 * boolean flag so that we don't bother with it in the future, and 4710 * then we act like the flush succeeded. 4711 */ 4712 if (zio->io_error == ENOTSUP && zio->io_type == ZIO_TYPE_FLUSH && 4713 vd != NULL) { 4714 vd->vdev_nowritecache = B_TRUE; 4715 zio->io_error = 0; 4716 } 4717 4718 if (zio->io_error) 4719 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4720 4721 return (zio); 4722 } 4723 4724 void 4725 zio_vdev_io_reissue(zio_t *zio) 4726 { 4727 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4728 ASSERT(zio->io_error == 0); 4729 4730 zio->io_stage >>= 1; 4731 } 4732 4733 void 4734 zio_vdev_io_redone(zio_t *zio) 4735 { 4736 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 4737 4738 zio->io_stage >>= 1; 4739 } 4740 4741 void 4742 zio_vdev_io_bypass(zio_t *zio) 4743 { 4744 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4745 ASSERT(zio->io_error == 0); 4746 4747 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 4748 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 4749 } 4750 4751 /* 4752 * ========================================================================== 4753 * Encrypt and store encryption parameters 4754 * ========================================================================== 4755 */ 4756 4757 4758 /* 4759 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for 4760 * managing the storage of encryption parameters and passing them to the 4761 * lower-level encryption functions. 4762 */ 4763 static zio_t * 4764 zio_encrypt(zio_t *zio) 4765 { 4766 zio_prop_t *zp = &zio->io_prop; 4767 spa_t *spa = zio->io_spa; 4768 blkptr_t *bp = zio->io_bp; 4769 uint64_t psize = BP_GET_PSIZE(bp); 4770 uint64_t dsobj = zio->io_bookmark.zb_objset; 4771 dmu_object_type_t ot = BP_GET_TYPE(bp); 4772 void *enc_buf = NULL; 4773 abd_t *eabd = NULL; 4774 uint8_t salt[ZIO_DATA_SALT_LEN]; 4775 uint8_t iv[ZIO_DATA_IV_LEN]; 4776 uint8_t mac[ZIO_DATA_MAC_LEN]; 4777 boolean_t no_crypt = B_FALSE; 4778 4779 /* the root zio already encrypted the data */ 4780 if (zio->io_child_type == ZIO_CHILD_GANG) 4781 return (zio); 4782 4783 /* only ZIL blocks are re-encrypted on rewrite */ 4784 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) 4785 return (zio); 4786 4787 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { 4788 BP_SET_CRYPT(bp, B_FALSE); 4789 return (zio); 4790 } 4791 4792 /* if we are doing raw encryption set the provided encryption params */ 4793 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { 4794 ASSERT0(BP_GET_LEVEL(bp)); 4795 BP_SET_CRYPT(bp, B_TRUE); 4796 BP_SET_BYTEORDER(bp, zp->zp_byteorder); 4797 if (ot != DMU_OT_OBJSET) 4798 zio_crypt_encode_mac_bp(bp, zp->zp_mac); 4799 4800 /* dnode blocks must be written out in the provided byteorder */ 4801 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && 4802 ot == DMU_OT_DNODE) { 4803 void *bswap_buf = zio_buf_alloc(psize); 4804 abd_t *babd = abd_get_from_buf(bswap_buf, psize); 4805 4806 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4807 abd_copy_to_buf(bswap_buf, zio->io_abd, psize); 4808 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, 4809 psize); 4810 4811 abd_take_ownership_of_buf(babd, B_TRUE); 4812 zio_push_transform(zio, babd, psize, psize, NULL); 4813 } 4814 4815 if (DMU_OT_IS_ENCRYPTED(ot)) 4816 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); 4817 return (zio); 4818 } 4819 4820 /* indirect blocks only maintain a cksum of the lower level MACs */ 4821 if (BP_GET_LEVEL(bp) > 0) { 4822 BP_SET_CRYPT(bp, B_TRUE); 4823 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, 4824 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), 4825 mac)); 4826 zio_crypt_encode_mac_bp(bp, mac); 4827 return (zio); 4828 } 4829 4830 /* 4831 * Objset blocks are a special case since they have 2 256-bit MACs 4832 * embedded within them. 4833 */ 4834 if (ot == DMU_OT_OBJSET) { 4835 ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); 4836 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4837 BP_SET_CRYPT(bp, B_TRUE); 4838 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, 4839 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); 4840 return (zio); 4841 } 4842 4843 /* unencrypted object types are only authenticated with a MAC */ 4844 if (!DMU_OT_IS_ENCRYPTED(ot)) { 4845 BP_SET_CRYPT(bp, B_TRUE); 4846 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, 4847 zio->io_abd, psize, mac)); 4848 zio_crypt_encode_mac_bp(bp, mac); 4849 return (zio); 4850 } 4851 4852 /* 4853 * Later passes of sync-to-convergence may decide to rewrite data 4854 * in place to avoid more disk reallocations. This presents a problem 4855 * for encryption because this constitutes rewriting the new data with 4856 * the same encryption key and IV. However, this only applies to blocks 4857 * in the MOS (particularly the spacemaps) and we do not encrypt the 4858 * MOS. We assert that the zio is allocating or an intent log write 4859 * to enforce this. 4860 */ 4861 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); 4862 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); 4863 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); 4864 ASSERT3U(psize, !=, 0); 4865 4866 enc_buf = zio_buf_alloc(psize); 4867 eabd = abd_get_from_buf(enc_buf, psize); 4868 abd_take_ownership_of_buf(eabd, B_TRUE); 4869 4870 /* 4871 * For an explanation of what encryption parameters are stored 4872 * where, see the block comment in zio_crypt.c. 4873 */ 4874 if (ot == DMU_OT_INTENT_LOG) { 4875 zio_crypt_decode_params_bp(bp, salt, iv); 4876 } else { 4877 BP_SET_CRYPT(bp, B_TRUE); 4878 } 4879 4880 /* Perform the encryption. This should not fail */ 4881 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, 4882 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 4883 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); 4884 4885 /* encode encryption metadata into the bp */ 4886 if (ot == DMU_OT_INTENT_LOG) { 4887 /* 4888 * ZIL blocks store the MAC in the embedded checksum, so the 4889 * transform must always be applied. 4890 */ 4891 zio_crypt_encode_mac_zil(enc_buf, mac); 4892 zio_push_transform(zio, eabd, psize, psize, NULL); 4893 } else { 4894 BP_SET_CRYPT(bp, B_TRUE); 4895 zio_crypt_encode_params_bp(bp, salt, iv); 4896 zio_crypt_encode_mac_bp(bp, mac); 4897 4898 if (no_crypt) { 4899 ASSERT3U(ot, ==, DMU_OT_DNODE); 4900 abd_free(eabd); 4901 } else { 4902 zio_push_transform(zio, eabd, psize, psize, NULL); 4903 } 4904 } 4905 4906 return (zio); 4907 } 4908 4909 /* 4910 * ========================================================================== 4911 * Generate and verify checksums 4912 * ========================================================================== 4913 */ 4914 static zio_t * 4915 zio_checksum_generate(zio_t *zio) 4916 { 4917 blkptr_t *bp = zio->io_bp; 4918 enum zio_checksum checksum; 4919 4920 if (bp == NULL) { 4921 /* 4922 * This is zio_write_phys(). 4923 * We're either generating a label checksum, or none at all. 4924 */ 4925 checksum = zio->io_prop.zp_checksum; 4926 4927 if (checksum == ZIO_CHECKSUM_OFF) 4928 return (zio); 4929 4930 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 4931 } else { 4932 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 4933 ASSERT(!IO_IS_ALLOCATING(zio)); 4934 checksum = ZIO_CHECKSUM_GANG_HEADER; 4935 } else { 4936 checksum = BP_GET_CHECKSUM(bp); 4937 } 4938 } 4939 4940 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 4941 4942 return (zio); 4943 } 4944 4945 static zio_t * 4946 zio_checksum_verify(zio_t *zio) 4947 { 4948 zio_bad_cksum_t info; 4949 blkptr_t *bp = zio->io_bp; 4950 int error; 4951 4952 ASSERT(zio->io_vd != NULL); 4953 4954 if (bp == NULL) { 4955 /* 4956 * This is zio_read_phys(). 4957 * We're either verifying a label checksum, or nothing at all. 4958 */ 4959 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 4960 return (zio); 4961 4962 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL); 4963 } 4964 4965 ASSERT0(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR); 4966 IMPLY(zio->io_flags & ZIO_FLAG_DIO_READ, 4967 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)); 4968 4969 if ((error = zio_checksum_error(zio, &info)) != 0) { 4970 zio->io_error = error; 4971 if (error == ECKSUM && 4972 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 4973 if (zio->io_flags & ZIO_FLAG_DIO_READ) { 4974 zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR; 4975 zio_t *pio = zio_unique_parent(zio); 4976 /* 4977 * Any Direct I/O read that has a checksum 4978 * error must be treated as suspicous as the 4979 * contents of the buffer could be getting 4980 * manipulated while the I/O is taking place. 4981 * 4982 * The checksum verify error will only be 4983 * reported here for disk and file VDEV's and 4984 * will be reported on those that the failure 4985 * occurred on. Other types of VDEV's report the 4986 * verify failure in their own code paths. 4987 */ 4988 if (pio->io_child_type == ZIO_CHILD_LOGICAL) { 4989 zio_dio_chksum_verify_error_report(zio); 4990 } 4991 } else { 4992 mutex_enter(&zio->io_vd->vdev_stat_lock); 4993 zio->io_vd->vdev_stat.vs_checksum_errors++; 4994 mutex_exit(&zio->io_vd->vdev_stat_lock); 4995 (void) zfs_ereport_start_checksum(zio->io_spa, 4996 zio->io_vd, &zio->io_bookmark, zio, 4997 zio->io_offset, zio->io_size, &info); 4998 } 4999 } 5000 } 5001 5002 return (zio); 5003 } 5004 5005 static zio_t * 5006 zio_dio_checksum_verify(zio_t *zio) 5007 { 5008 zio_t *pio = zio_unique_parent(zio); 5009 int error; 5010 5011 ASSERT3P(zio->io_vd, !=, NULL); 5012 ASSERT3P(zio->io_bp, !=, NULL); 5013 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 5014 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 5015 ASSERT3B(pio->io_prop.zp_direct_write, ==, B_TRUE); 5016 ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL); 5017 5018 if (zfs_vdev_direct_write_verify == 0 || zio->io_error != 0) 5019 goto out; 5020 5021 if ((error = zio_checksum_error(zio, NULL)) != 0) { 5022 zio->io_error = error; 5023 if (error == ECKSUM) { 5024 zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR; 5025 zio_dio_chksum_verify_error_report(zio); 5026 } 5027 } 5028 5029 out: 5030 return (zio); 5031 } 5032 5033 5034 /* 5035 * Called by RAID-Z to ensure we don't compute the checksum twice. 5036 */ 5037 void 5038 zio_checksum_verified(zio_t *zio) 5039 { 5040 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 5041 } 5042 5043 /* 5044 * Report Direct I/O checksum verify error and create ZED event. 5045 */ 5046 void 5047 zio_dio_chksum_verify_error_report(zio_t *zio) 5048 { 5049 ASSERT(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR); 5050 5051 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 5052 return; 5053 5054 mutex_enter(&zio->io_vd->vdev_stat_lock); 5055 zio->io_vd->vdev_stat.vs_dio_verify_errors++; 5056 mutex_exit(&zio->io_vd->vdev_stat_lock); 5057 if (zio->io_type == ZIO_TYPE_WRITE) { 5058 /* 5059 * Convert checksum error for writes into EIO. 5060 */ 5061 zio->io_error = SET_ERROR(EIO); 5062 /* 5063 * Report dio_verify_wr ZED event. 5064 */ 5065 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_WR, 5066 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0); 5067 } else { 5068 /* 5069 * Report dio_verify_rd ZED event. 5070 */ 5071 (void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_RD, 5072 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0); 5073 } 5074 } 5075 5076 /* 5077 * ========================================================================== 5078 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 5079 * An error of 0 indicates success. ENXIO indicates whole-device failure, 5080 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO 5081 * indicate errors that are specific to one I/O, and most likely permanent. 5082 * Any other error is presumed to be worse because we weren't expecting it. 5083 * ========================================================================== 5084 */ 5085 int 5086 zio_worst_error(int e1, int e2) 5087 { 5088 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 5089 int r1, r2; 5090 5091 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 5092 if (e1 == zio_error_rank[r1]) 5093 break; 5094 5095 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 5096 if (e2 == zio_error_rank[r2]) 5097 break; 5098 5099 return (r1 > r2 ? e1 : e2); 5100 } 5101 5102 /* 5103 * ========================================================================== 5104 * I/O completion 5105 * ========================================================================== 5106 */ 5107 static zio_t * 5108 zio_ready(zio_t *zio) 5109 { 5110 blkptr_t *bp = zio->io_bp; 5111 zio_t *pio, *pio_next; 5112 zio_link_t *zl = NULL; 5113 5114 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 5115 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) { 5116 return (NULL); 5117 } 5118 5119 if (zio->io_ready) { 5120 ASSERT(IO_IS_ALLOCATING(zio)); 5121 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || 5122 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE)); 5123 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 5124 5125 zio->io_ready(zio); 5126 } 5127 5128 #ifdef ZFS_DEBUG 5129 if (bp != NULL && bp != &zio->io_bp_copy) 5130 zio->io_bp_copy = *bp; 5131 #endif 5132 5133 if (zio->io_error != 0) { 5134 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 5135 5136 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 5137 ASSERT(IO_IS_ALLOCATING(zio)); 5138 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 5139 ASSERT(zio->io_metaslab_class != NULL); 5140 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 5141 5142 /* 5143 * We were unable to allocate anything, unreserve and 5144 * issue the next I/O to allocate. 5145 */ 5146 if (metaslab_class_throttle_unreserve( 5147 zio->io_metaslab_class, zio->io_prop.zp_copies, 5148 zio)) { 5149 zio_allocate_dispatch(zio->io_metaslab_class, 5150 zio->io_allocator); 5151 } 5152 } 5153 } 5154 5155 mutex_enter(&zio->io_lock); 5156 zio->io_state[ZIO_WAIT_READY] = 1; 5157 pio = zio_walk_parents(zio, &zl); 5158 mutex_exit(&zio->io_lock); 5159 5160 /* 5161 * As we notify zio's parents, new parents could be added. 5162 * New parents go to the head of zio's io_parent_list, however, 5163 * so we will (correctly) not notify them. The remainder of zio's 5164 * io_parent_list, from 'pio_next' onward, cannot change because 5165 * all parents must wait for us to be done before they can be done. 5166 */ 5167 for (; pio != NULL; pio = pio_next) { 5168 pio_next = zio_walk_parents(zio, &zl); 5169 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL); 5170 } 5171 5172 if (zio->io_flags & ZIO_FLAG_NODATA) { 5173 if (bp != NULL && BP_IS_GANG(bp)) { 5174 zio->io_flags &= ~ZIO_FLAG_NODATA; 5175 } else { 5176 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 5177 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 5178 } 5179 } 5180 5181 if (zio_injection_enabled && 5182 zio->io_spa->spa_syncing_txg == zio->io_txg) 5183 zio_handle_ignored_writes(zio); 5184 5185 return (zio); 5186 } 5187 5188 /* 5189 * Update the allocation throttle accounting. 5190 */ 5191 static void 5192 zio_dva_throttle_done(zio_t *zio) 5193 { 5194 zio_t *pio = zio_unique_parent(zio); 5195 vdev_t *vd = zio->io_vd; 5196 int flags = METASLAB_ASYNC_ALLOC; 5197 const void *tag = pio; 5198 5199 ASSERT3P(zio->io_bp, !=, NULL); 5200 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 5201 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 5202 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 5203 ASSERT(vd != NULL); 5204 ASSERT3P(vd, ==, vd->vdev_top); 5205 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY)); 5206 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 5207 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 5208 5209 /* 5210 * Parents of gang children can have two flavors -- ones that allocated 5211 * the gang header (will have ZIO_FLAG_IO_REWRITE set) and ones that 5212 * allocated the constituent blocks. The first use their parent as tag. 5213 */ 5214 if (pio->io_child_type == ZIO_CHILD_GANG && 5215 (pio->io_flags & ZIO_FLAG_IO_REWRITE)) 5216 tag = zio_unique_parent(pio); 5217 5218 ASSERT(IO_IS_ALLOCATING(pio) || (pio->io_child_type == ZIO_CHILD_GANG && 5219 (pio->io_flags & ZIO_FLAG_IO_REWRITE))); 5220 ASSERT(ZIO_HAS_ALLOCATOR(pio)); 5221 ASSERT3P(zio, !=, zio->io_logical); 5222 ASSERT(zio->io_logical != NULL); 5223 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 5224 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 5225 ASSERT(zio->io_metaslab_class != NULL); 5226 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 5227 5228 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, 5229 pio->io_allocator, flags, pio->io_size, tag); 5230 5231 if (metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, pio)) { 5232 zio_allocate_dispatch(zio->io_metaslab_class, 5233 pio->io_allocator); 5234 } 5235 } 5236 5237 static zio_t * 5238 zio_done(zio_t *zio) 5239 { 5240 /* 5241 * Always attempt to keep stack usage minimal here since 5242 * we can be called recursively up to 19 levels deep. 5243 */ 5244 const uint64_t psize = zio->io_size; 5245 zio_t *pio, *pio_next; 5246 zio_link_t *zl = NULL; 5247 5248 /* 5249 * If our children haven't all completed, 5250 * wait for them and then repeat this pipeline stage. 5251 */ 5252 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 5253 return (NULL); 5254 } 5255 5256 /* 5257 * If the allocation throttle is enabled, then update the accounting. 5258 * We only track child I/Os that are part of an allocating async 5259 * write. We must do this since the allocation is performed 5260 * by the logical I/O but the actual write is done by child I/Os. 5261 */ 5262 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 5263 zio->io_child_type == ZIO_CHILD_VDEV) 5264 zio_dva_throttle_done(zio); 5265 5266 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 5267 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 5268 ASSERT(zio->io_children[c][w] == 0); 5269 5270 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { 5271 ASSERT(zio->io_bp->blk_pad[0] == 0); 5272 ASSERT(zio->io_bp->blk_pad[1] == 0); 5273 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy, 5274 sizeof (blkptr_t)) == 0 || 5275 (zio->io_bp == zio_unique_parent(zio)->io_bp)); 5276 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && 5277 zio->io_bp_override == NULL && 5278 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 5279 ASSERT3U(zio->io_prop.zp_copies, <=, 5280 BP_GET_NDVAS(zio->io_bp)); 5281 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || 5282 (BP_COUNT_GANG(zio->io_bp) == 5283 BP_GET_NDVAS(zio->io_bp))); 5284 } 5285 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 5286 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 5287 } 5288 5289 /* 5290 * If there were child vdev/gang/ddt errors, they apply to us now. 5291 */ 5292 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 5293 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 5294 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 5295 5296 /* 5297 * If the I/O on the transformed data was successful, generate any 5298 * checksum reports now while we still have the transformed data. 5299 */ 5300 if (zio->io_error == 0) { 5301 while (zio->io_cksum_report != NULL) { 5302 zio_cksum_report_t *zcr = zio->io_cksum_report; 5303 uint64_t align = zcr->zcr_align; 5304 uint64_t asize = P2ROUNDUP(psize, align); 5305 abd_t *adata = zio->io_abd; 5306 5307 if (adata != NULL && asize != psize) { 5308 adata = abd_alloc(asize, B_TRUE); 5309 abd_copy(adata, zio->io_abd, psize); 5310 abd_zero_off(adata, psize, asize - psize); 5311 } 5312 5313 zio->io_cksum_report = zcr->zcr_next; 5314 zcr->zcr_next = NULL; 5315 zcr->zcr_finish(zcr, adata); 5316 zfs_ereport_free_checksum(zcr); 5317 5318 if (adata != NULL && asize != psize) 5319 abd_free(adata); 5320 } 5321 } 5322 5323 zio_pop_transforms(zio); /* note: may set zio->io_error */ 5324 5325 vdev_stat_update(zio, psize); 5326 5327 /* 5328 * If this I/O is attached to a particular vdev is slow, exceeding 5329 * 30 seconds to complete, post an error described the I/O delay. 5330 * We ignore these errors if the device is currently unavailable. 5331 */ 5332 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) { 5333 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) { 5334 /* 5335 * We want to only increment our slow IO counters if 5336 * the IO is valid (i.e. not if the drive is removed). 5337 * 5338 * zfs_ereport_post() will also do these checks, but 5339 * it can also ratelimit and have other failures, so we 5340 * need to increment the slow_io counters independent 5341 * of it. 5342 */ 5343 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY, 5344 zio->io_spa, zio->io_vd, zio)) { 5345 mutex_enter(&zio->io_vd->vdev_stat_lock); 5346 zio->io_vd->vdev_stat.vs_slow_ios++; 5347 mutex_exit(&zio->io_vd->vdev_stat_lock); 5348 5349 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY, 5350 zio->io_spa, zio->io_vd, &zio->io_bookmark, 5351 zio, 0); 5352 } 5353 } 5354 } 5355 5356 if (zio->io_error) { 5357 /* 5358 * If this I/O is attached to a particular vdev, 5359 * generate an error message describing the I/O failure 5360 * at the block level. We ignore these errors if the 5361 * device is currently unavailable. 5362 */ 5363 if (zio->io_error != ECKSUM && zio->io_vd != NULL && 5364 !vdev_is_dead(zio->io_vd) && 5365 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) { 5366 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO, 5367 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0); 5368 if (ret != EALREADY) { 5369 mutex_enter(&zio->io_vd->vdev_stat_lock); 5370 if (zio->io_type == ZIO_TYPE_READ) 5371 zio->io_vd->vdev_stat.vs_read_errors++; 5372 else if (zio->io_type == ZIO_TYPE_WRITE) 5373 zio->io_vd->vdev_stat.vs_write_errors++; 5374 mutex_exit(&zio->io_vd->vdev_stat_lock); 5375 } 5376 } 5377 5378 if ((zio->io_error == EIO || !(zio->io_flags & 5379 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 5380 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) && 5381 zio == zio->io_logical) { 5382 /* 5383 * For logical I/O requests, tell the SPA to log the 5384 * error and generate a logical data ereport. 5385 */ 5386 spa_log_error(zio->io_spa, &zio->io_bookmark, 5387 BP_GET_LOGICAL_BIRTH(zio->io_bp)); 5388 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA, 5389 zio->io_spa, NULL, &zio->io_bookmark, zio, 0); 5390 } 5391 } 5392 5393 if (zio->io_error && zio == zio->io_logical) { 5394 /* 5395 * Determine whether zio should be reexecuted. This will 5396 * propagate all the way to the root via zio_notify_parent(). 5397 */ 5398 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL); 5399 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 5400 5401 if (IO_IS_ALLOCATING(zio) && 5402 !(zio->io_flags & ZIO_FLAG_CANFAIL) && 5403 !(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) { 5404 if (zio->io_error != ENOSPC) 5405 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 5406 else 5407 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 5408 } 5409 5410 if ((zio->io_type == ZIO_TYPE_READ || 5411 zio->io_type == ZIO_TYPE_FREE) && 5412 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 5413 zio->io_error == ENXIO && 5414 spa_load_state(zio->io_spa) == SPA_LOAD_NONE && 5415 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE) 5416 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 5417 5418 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 5419 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 5420 5421 /* 5422 * Here is a possibly good place to attempt to do 5423 * either combinatorial reconstruction or error correction 5424 * based on checksums. It also might be a good place 5425 * to send out preliminary ereports before we suspend 5426 * processing. 5427 */ 5428 } 5429 5430 /* 5431 * If there were logical child errors, they apply to us now. 5432 * We defer this until now to avoid conflating logical child 5433 * errors with errors that happened to the zio itself when 5434 * updating vdev stats and reporting FMA events above. 5435 */ 5436 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 5437 5438 if ((zio->io_error || zio->io_reexecute) && 5439 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 5440 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 5441 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp); 5442 5443 zio_gang_tree_free(&zio->io_gang_tree); 5444 5445 /* 5446 * Godfather I/Os should never suspend. 5447 */ 5448 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 5449 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 5450 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND; 5451 5452 if (zio->io_reexecute) { 5453 /* 5454 * A Direct I/O operation that has a checksum verify error 5455 * should not attempt to reexecute. Instead, the error should 5456 * just be propagated back. 5457 */ 5458 ASSERT(!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)); 5459 5460 /* 5461 * This is a logical I/O that wants to reexecute. 5462 * 5463 * Reexecute is top-down. When an i/o fails, if it's not 5464 * the root, it simply notifies its parent and sticks around. 5465 * The parent, seeing that it still has children in zio_done(), 5466 * does the same. This percolates all the way up to the root. 5467 * The root i/o will reexecute or suspend the entire tree. 5468 * 5469 * This approach ensures that zio_reexecute() honors 5470 * all the original i/o dependency relationships, e.g. 5471 * parents not executing until children are ready. 5472 */ 5473 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 5474 5475 zio->io_gang_leader = NULL; 5476 5477 mutex_enter(&zio->io_lock); 5478 zio->io_state[ZIO_WAIT_DONE] = 1; 5479 mutex_exit(&zio->io_lock); 5480 5481 /* 5482 * "The Godfather" I/O monitors its children but is 5483 * not a true parent to them. It will track them through 5484 * the pipeline but severs its ties whenever they get into 5485 * trouble (e.g. suspended). This allows "The Godfather" 5486 * I/O to return status without blocking. 5487 */ 5488 zl = NULL; 5489 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 5490 pio = pio_next) { 5491 zio_link_t *remove_zl = zl; 5492 pio_next = zio_walk_parents(zio, &zl); 5493 5494 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 5495 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 5496 zio_remove_child(pio, zio, remove_zl); 5497 /* 5498 * This is a rare code path, so we don't 5499 * bother with "next_to_execute". 5500 */ 5501 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, 5502 NULL); 5503 } 5504 } 5505 5506 if ((pio = zio_unique_parent(zio)) != NULL) { 5507 /* 5508 * We're not a root i/o, so there's nothing to do 5509 * but notify our parent. Don't propagate errors 5510 * upward since we haven't permanently failed yet. 5511 */ 5512 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 5513 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 5514 /* 5515 * This is a rare code path, so we don't bother with 5516 * "next_to_execute". 5517 */ 5518 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL); 5519 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 5520 /* 5521 * We'd fail again if we reexecuted now, so suspend 5522 * until conditions improve (e.g. device comes online). 5523 */ 5524 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 5525 } else { 5526 /* 5527 * Reexecution is potentially a huge amount of work. 5528 * Hand it off to the otherwise-unused claim taskq. 5529 */ 5530 spa_taskq_dispatch(zio->io_spa, 5531 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE, 5532 zio_reexecute, zio, B_FALSE); 5533 } 5534 return (NULL); 5535 } 5536 5537 ASSERT(list_is_empty(&zio->io_child_list)); 5538 ASSERT(zio->io_reexecute == 0); 5539 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 5540 5541 /* 5542 * Report any checksum errors, since the I/O is complete. 5543 */ 5544 while (zio->io_cksum_report != NULL) { 5545 zio_cksum_report_t *zcr = zio->io_cksum_report; 5546 zio->io_cksum_report = zcr->zcr_next; 5547 zcr->zcr_next = NULL; 5548 zcr->zcr_finish(zcr, NULL); 5549 zfs_ereport_free_checksum(zcr); 5550 } 5551 5552 /* 5553 * It is the responsibility of the done callback to ensure that this 5554 * particular zio is no longer discoverable for adoption, and as 5555 * such, cannot acquire any new parents. 5556 */ 5557 if (zio->io_done) 5558 zio->io_done(zio); 5559 5560 mutex_enter(&zio->io_lock); 5561 zio->io_state[ZIO_WAIT_DONE] = 1; 5562 mutex_exit(&zio->io_lock); 5563 5564 /* 5565 * We are done executing this zio. We may want to execute a parent 5566 * next. See the comment in zio_notify_parent(). 5567 */ 5568 zio_t *next_to_execute = NULL; 5569 zl = NULL; 5570 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 5571 zio_link_t *remove_zl = zl; 5572 pio_next = zio_walk_parents(zio, &zl); 5573 zio_remove_child(pio, zio, remove_zl); 5574 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute); 5575 } 5576 5577 if (zio->io_waiter != NULL) { 5578 mutex_enter(&zio->io_lock); 5579 zio->io_executor = NULL; 5580 cv_broadcast(&zio->io_cv); 5581 mutex_exit(&zio->io_lock); 5582 } else { 5583 zio_destroy(zio); 5584 } 5585 5586 return (next_to_execute); 5587 } 5588 5589 /* 5590 * ========================================================================== 5591 * I/O pipeline definition 5592 * ========================================================================== 5593 */ 5594 static zio_pipe_stage_t *zio_pipeline[] = { 5595 NULL, 5596 zio_read_bp_init, 5597 zio_write_bp_init, 5598 zio_free_bp_init, 5599 zio_issue_async, 5600 zio_write_compress, 5601 zio_encrypt, 5602 zio_checksum_generate, 5603 zio_nop_write, 5604 zio_brt_free, 5605 zio_ddt_read_start, 5606 zio_ddt_read_done, 5607 zio_ddt_write, 5608 zio_ddt_free, 5609 zio_gang_assemble, 5610 zio_gang_issue, 5611 zio_dva_throttle, 5612 zio_dva_allocate, 5613 zio_dva_free, 5614 zio_dva_claim, 5615 zio_ready, 5616 zio_vdev_io_start, 5617 zio_vdev_io_done, 5618 zio_vdev_io_assess, 5619 zio_checksum_verify, 5620 zio_dio_checksum_verify, 5621 zio_done 5622 }; 5623 5624 5625 5626 5627 /* 5628 * Compare two zbookmark_phys_t's to see which we would reach first in a 5629 * pre-order traversal of the object tree. 5630 * 5631 * This is simple in every case aside from the meta-dnode object. For all other 5632 * objects, we traverse them in order (object 1 before object 2, and so on). 5633 * However, all of these objects are traversed while traversing object 0, since 5634 * the data it points to is the list of objects. Thus, we need to convert to a 5635 * canonical representation so we can compare meta-dnode bookmarks to 5636 * non-meta-dnode bookmarks. 5637 * 5638 * We do this by calculating "equivalents" for each field of the zbookmark. 5639 * zbookmarks outside of the meta-dnode use their own object and level, and 5640 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 5641 * blocks this bookmark refers to) by multiplying their blkid by their span 5642 * (the number of L0 blocks contained within one block at their level). 5643 * zbookmarks inside the meta-dnode calculate their object equivalent 5644 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 5645 * level + 1<<31 (any value larger than a level could ever be) for their level. 5646 * This causes them to always compare before a bookmark in their object 5647 * equivalent, compare appropriately to bookmarks in other objects, and to 5648 * compare appropriately to other bookmarks in the meta-dnode. 5649 */ 5650 int 5651 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 5652 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 5653 { 5654 /* 5655 * These variables represent the "equivalent" values for the zbookmark, 5656 * after converting zbookmarks inside the meta dnode to their 5657 * normal-object equivalents. 5658 */ 5659 uint64_t zb1obj, zb2obj; 5660 uint64_t zb1L0, zb2L0; 5661 uint64_t zb1level, zb2level; 5662 5663 if (zb1->zb_object == zb2->zb_object && 5664 zb1->zb_level == zb2->zb_level && 5665 zb1->zb_blkid == zb2->zb_blkid) 5666 return (0); 5667 5668 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT); 5669 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT); 5670 5671 /* 5672 * BP_SPANB calculates the span in blocks. 5673 */ 5674 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 5675 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 5676 5677 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 5678 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5679 zb1L0 = 0; 5680 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 5681 } else { 5682 zb1obj = zb1->zb_object; 5683 zb1level = zb1->zb_level; 5684 } 5685 5686 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 5687 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5688 zb2L0 = 0; 5689 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 5690 } else { 5691 zb2obj = zb2->zb_object; 5692 zb2level = zb2->zb_level; 5693 } 5694 5695 /* Now that we have a canonical representation, do the comparison. */ 5696 if (zb1obj != zb2obj) 5697 return (zb1obj < zb2obj ? -1 : 1); 5698 else if (zb1L0 != zb2L0) 5699 return (zb1L0 < zb2L0 ? -1 : 1); 5700 else if (zb1level != zb2level) 5701 return (zb1level > zb2level ? -1 : 1); 5702 /* 5703 * This can (theoretically) happen if the bookmarks have the same object 5704 * and level, but different blkids, if the block sizes are not the same. 5705 * There is presently no way to change the indirect block sizes 5706 */ 5707 return (0); 5708 } 5709 5710 /* 5711 * This function checks the following: given that last_block is the place that 5712 * our traversal stopped last time, does that guarantee that we've visited 5713 * every node under subtree_root? Therefore, we can't just use the raw output 5714 * of zbookmark_compare. We have to pass in a modified version of 5715 * subtree_root; by incrementing the block id, and then checking whether 5716 * last_block is before or equal to that, we can tell whether or not having 5717 * visited last_block implies that all of subtree_root's children have been 5718 * visited. 5719 */ 5720 boolean_t 5721 zbookmark_subtree_completed(const dnode_phys_t *dnp, 5722 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5723 { 5724 zbookmark_phys_t mod_zb = *subtree_root; 5725 mod_zb.zb_blkid++; 5726 ASSERT0(last_block->zb_level); 5727 5728 /* The objset_phys_t isn't before anything. */ 5729 if (dnp == NULL) 5730 return (B_FALSE); 5731 5732 /* 5733 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 5734 * data block size in sectors, because that variable is only used if 5735 * the bookmark refers to a block in the meta-dnode. Since we don't 5736 * know without examining it what object it refers to, and there's no 5737 * harm in passing in this value in other cases, we always pass it in. 5738 * 5739 * We pass in 0 for the indirect block size shift because zb2 must be 5740 * level 0. The indirect block size is only used to calculate the span 5741 * of the bookmark, but since the bookmark must be level 0, the span is 5742 * always 1, so the math works out. 5743 * 5744 * If you make changes to how the zbookmark_compare code works, be sure 5745 * to make sure that this code still works afterwards. 5746 */ 5747 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5748 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 5749 last_block) <= 0); 5750 } 5751 5752 /* 5753 * This function is similar to zbookmark_subtree_completed(), but returns true 5754 * if subtree_root is equal or ahead of last_block, i.e. still to be done. 5755 */ 5756 boolean_t 5757 zbookmark_subtree_tbd(const dnode_phys_t *dnp, 5758 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5759 { 5760 ASSERT0(last_block->zb_level); 5761 if (dnp == NULL) 5762 return (B_FALSE); 5763 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5764 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root, 5765 last_block) >= 0); 5766 } 5767 5768 EXPORT_SYMBOL(zio_type_name); 5769 EXPORT_SYMBOL(zio_buf_alloc); 5770 EXPORT_SYMBOL(zio_data_buf_alloc); 5771 EXPORT_SYMBOL(zio_buf_free); 5772 EXPORT_SYMBOL(zio_data_buf_free); 5773 5774 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW, 5775 "Max I/O completion time (milliseconds) before marking it as slow"); 5776 5777 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW, 5778 "Prioritize requeued I/O"); 5779 5780 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW, 5781 "Defer frees starting in this pass"); 5782 5783 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW, 5784 "Don't compress starting in this pass"); 5785 5786 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW, 5787 "Rewrite new bps starting in this pass"); 5788 5789 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW, 5790 "Throttle block allocations in the ZIO pipeline"); 5791 5792 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW, 5793 "Log all slow ZIOs, not just those with vdevs"); 5794