1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2022 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2017, Intel Corporation. 26 * Copyright (c) 2019, Klara Inc. 27 * Copyright (c) 2019, Allan Jude 28 * Copyright (c) 2021, Datto, Inc. 29 */ 30 31 #include <sys/sysmacros.h> 32 #include <sys/zfs_context.h> 33 #include <sys/fm/fs/zfs.h> 34 #include <sys/spa.h> 35 #include <sys/txg.h> 36 #include <sys/spa_impl.h> 37 #include <sys/vdev_impl.h> 38 #include <sys/vdev_trim.h> 39 #include <sys/zio_impl.h> 40 #include <sys/zio_compress.h> 41 #include <sys/zio_checksum.h> 42 #include <sys/dmu_objset.h> 43 #include <sys/arc.h> 44 #include <sys/brt.h> 45 #include <sys/ddt.h> 46 #include <sys/blkptr.h> 47 #include <sys/zfeature.h> 48 #include <sys/dsl_scan.h> 49 #include <sys/metaslab_impl.h> 50 #include <sys/time.h> 51 #include <sys/trace_zfs.h> 52 #include <sys/abd.h> 53 #include <sys/dsl_crypt.h> 54 #include <cityhash.h> 55 56 /* 57 * ========================================================================== 58 * I/O type descriptions 59 * ========================================================================== 60 */ 61 const char *const zio_type_name[ZIO_TYPES] = { 62 /* 63 * Note: Linux kernel thread name length is limited 64 * so these names will differ from upstream open zfs. 65 */ 66 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim" 67 }; 68 69 int zio_dva_throttle_enabled = B_TRUE; 70 static int zio_deadman_log_all = B_FALSE; 71 72 /* 73 * ========================================================================== 74 * I/O kmem caches 75 * ========================================================================== 76 */ 77 static kmem_cache_t *zio_cache; 78 static kmem_cache_t *zio_link_cache; 79 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 80 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 81 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 82 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 83 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 84 #endif 85 86 /* Mark IOs as "slow" if they take longer than 30 seconds */ 87 static uint_t zio_slow_io_ms = (30 * MILLISEC); 88 89 #define BP_SPANB(indblkshift, level) \ 90 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 91 #define COMPARE_META_LEVEL 0x80000000ul 92 /* 93 * The following actions directly effect the spa's sync-to-convergence logic. 94 * The values below define the sync pass when we start performing the action. 95 * Care should be taken when changing these values as they directly impact 96 * spa_sync() performance. Tuning these values may introduce subtle performance 97 * pathologies and should only be done in the context of performance analysis. 98 * These tunables will eventually be removed and replaced with #defines once 99 * enough analysis has been done to determine optimal values. 100 * 101 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 102 * regular blocks are not deferred. 103 * 104 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable 105 * compression (including of metadata). In practice, we don't have this 106 * many sync passes, so this has no effect. 107 * 108 * The original intent was that disabling compression would help the sync 109 * passes to converge. However, in practice disabling compression increases 110 * the average number of sync passes, because when we turn compression off, a 111 * lot of block's size will change and thus we have to re-allocate (not 112 * overwrite) them. It also increases the number of 128KB allocations (e.g. 113 * for indirect blocks and spacemaps) because these will not be compressed. 114 * The 128K allocations are especially detrimental to performance on highly 115 * fragmented systems, which may have very few free segments of this size, 116 * and may need to load new metaslabs to satisfy 128K allocations. 117 */ 118 119 /* defer frees starting in this pass */ 120 uint_t zfs_sync_pass_deferred_free = 2; 121 122 /* don't compress starting in this pass */ 123 static uint_t zfs_sync_pass_dont_compress = 8; 124 125 /* rewrite new bps starting in this pass */ 126 static uint_t zfs_sync_pass_rewrite = 2; 127 128 /* 129 * An allocating zio is one that either currently has the DVA allocate 130 * stage set or will have it later in its lifetime. 131 */ 132 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 133 134 /* 135 * Enable smaller cores by excluding metadata 136 * allocations as well. 137 */ 138 int zio_exclude_metadata = 0; 139 static int zio_requeue_io_start_cut_in_line = 1; 140 141 #ifdef ZFS_DEBUG 142 static const int zio_buf_debug_limit = 16384; 143 #else 144 static const int zio_buf_debug_limit = 0; 145 #endif 146 147 static inline void __zio_execute(zio_t *zio); 148 149 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 150 151 void 152 zio_init(void) 153 { 154 size_t c; 155 156 zio_cache = kmem_cache_create("zio_cache", 157 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 158 zio_link_cache = kmem_cache_create("zio_link_cache", 159 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 160 161 /* 162 * For small buffers, we want a cache for each multiple of 163 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 164 * for each quarter-power of 2. 165 */ 166 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 167 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 168 size_t p2 = size; 169 size_t align = 0; 170 size_t data_cflags, cflags; 171 172 data_cflags = KMC_NODEBUG; 173 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ? 174 KMC_NODEBUG : 0; 175 176 while (!ISP2(p2)) 177 p2 &= p2 - 1; 178 179 #ifndef _KERNEL 180 /* 181 * If we are using watchpoints, put each buffer on its own page, 182 * to eliminate the performance overhead of trapping to the 183 * kernel when modifying a non-watched buffer that shares the 184 * page with a watched buffer. 185 */ 186 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 187 continue; 188 /* 189 * Here's the problem - on 4K native devices in userland on 190 * Linux using O_DIRECT, buffers must be 4K aligned or I/O 191 * will fail with EINVAL, causing zdb (and others) to coredump. 192 * Since userland probably doesn't need optimized buffer caches, 193 * we just force 4K alignment on everything. 194 */ 195 align = 8 * SPA_MINBLOCKSIZE; 196 #else 197 if (size < PAGESIZE) { 198 align = SPA_MINBLOCKSIZE; 199 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 200 align = PAGESIZE; 201 } 202 #endif 203 204 if (align != 0) { 205 char name[36]; 206 if (cflags == data_cflags) { 207 /* 208 * Resulting kmem caches would be identical. 209 * Save memory by creating only one. 210 */ 211 (void) snprintf(name, sizeof (name), 212 "zio_buf_comb_%lu", (ulong_t)size); 213 zio_buf_cache[c] = kmem_cache_create(name, 214 size, align, NULL, NULL, NULL, NULL, NULL, 215 cflags); 216 zio_data_buf_cache[c] = zio_buf_cache[c]; 217 continue; 218 } 219 (void) snprintf(name, sizeof (name), "zio_buf_%lu", 220 (ulong_t)size); 221 zio_buf_cache[c] = kmem_cache_create(name, size, 222 align, NULL, NULL, NULL, NULL, NULL, cflags); 223 224 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu", 225 (ulong_t)size); 226 zio_data_buf_cache[c] = kmem_cache_create(name, size, 227 align, NULL, NULL, NULL, NULL, NULL, data_cflags); 228 } 229 } 230 231 while (--c != 0) { 232 ASSERT(zio_buf_cache[c] != NULL); 233 if (zio_buf_cache[c - 1] == NULL) 234 zio_buf_cache[c - 1] = zio_buf_cache[c]; 235 236 ASSERT(zio_data_buf_cache[c] != NULL); 237 if (zio_data_buf_cache[c - 1] == NULL) 238 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 239 } 240 241 zio_inject_init(); 242 243 lz4_init(); 244 } 245 246 void 247 zio_fini(void) 248 { 249 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; 250 251 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 252 for (size_t i = 0; i < n; i++) { 253 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i]) 254 (void) printf("zio_fini: [%d] %llu != %llu\n", 255 (int)((i + 1) << SPA_MINBLOCKSHIFT), 256 (long long unsigned)zio_buf_cache_allocs[i], 257 (long long unsigned)zio_buf_cache_frees[i]); 258 } 259 #endif 260 261 /* 262 * The same kmem cache can show up multiple times in both zio_buf_cache 263 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to 264 * sort it out. 265 */ 266 for (size_t i = 0; i < n; i++) { 267 kmem_cache_t *cache = zio_buf_cache[i]; 268 if (cache == NULL) 269 continue; 270 for (size_t j = i; j < n; j++) { 271 if (cache == zio_buf_cache[j]) 272 zio_buf_cache[j] = NULL; 273 if (cache == zio_data_buf_cache[j]) 274 zio_data_buf_cache[j] = NULL; 275 } 276 kmem_cache_destroy(cache); 277 } 278 279 for (size_t i = 0; i < n; i++) { 280 kmem_cache_t *cache = zio_data_buf_cache[i]; 281 if (cache == NULL) 282 continue; 283 for (size_t j = i; j < n; j++) { 284 if (cache == zio_data_buf_cache[j]) 285 zio_data_buf_cache[j] = NULL; 286 } 287 kmem_cache_destroy(cache); 288 } 289 290 for (size_t i = 0; i < n; i++) { 291 VERIFY3P(zio_buf_cache[i], ==, NULL); 292 VERIFY3P(zio_data_buf_cache[i], ==, NULL); 293 } 294 295 kmem_cache_destroy(zio_link_cache); 296 kmem_cache_destroy(zio_cache); 297 298 zio_inject_fini(); 299 300 lz4_fini(); 301 } 302 303 /* 304 * ========================================================================== 305 * Allocate and free I/O buffers 306 * ========================================================================== 307 */ 308 309 /* 310 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 311 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 312 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 313 * excess / transient data in-core during a crashdump. 314 */ 315 void * 316 zio_buf_alloc(size_t size) 317 { 318 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 319 320 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 321 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 322 atomic_add_64(&zio_buf_cache_allocs[c], 1); 323 #endif 324 325 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 326 } 327 328 /* 329 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 330 * crashdump if the kernel panics. This exists so that we will limit the amount 331 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 332 * of kernel heap dumped to disk when the kernel panics) 333 */ 334 void * 335 zio_data_buf_alloc(size_t size) 336 { 337 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 338 339 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 340 341 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 342 } 343 344 void 345 zio_buf_free(void *buf, size_t size) 346 { 347 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 348 349 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 350 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 351 atomic_add_64(&zio_buf_cache_frees[c], 1); 352 #endif 353 354 kmem_cache_free(zio_buf_cache[c], buf); 355 } 356 357 void 358 zio_data_buf_free(void *buf, size_t size) 359 { 360 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 361 362 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 363 364 kmem_cache_free(zio_data_buf_cache[c], buf); 365 } 366 367 static void 368 zio_abd_free(void *abd, size_t size) 369 { 370 (void) size; 371 abd_free((abd_t *)abd); 372 } 373 374 /* 375 * ========================================================================== 376 * Push and pop I/O transform buffers 377 * ========================================================================== 378 */ 379 void 380 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 381 zio_transform_func_t *transform) 382 { 383 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 384 385 zt->zt_orig_abd = zio->io_abd; 386 zt->zt_orig_size = zio->io_size; 387 zt->zt_bufsize = bufsize; 388 zt->zt_transform = transform; 389 390 zt->zt_next = zio->io_transform_stack; 391 zio->io_transform_stack = zt; 392 393 zio->io_abd = data; 394 zio->io_size = size; 395 } 396 397 void 398 zio_pop_transforms(zio_t *zio) 399 { 400 zio_transform_t *zt; 401 402 while ((zt = zio->io_transform_stack) != NULL) { 403 if (zt->zt_transform != NULL) 404 zt->zt_transform(zio, 405 zt->zt_orig_abd, zt->zt_orig_size); 406 407 if (zt->zt_bufsize != 0) 408 abd_free(zio->io_abd); 409 410 zio->io_abd = zt->zt_orig_abd; 411 zio->io_size = zt->zt_orig_size; 412 zio->io_transform_stack = zt->zt_next; 413 414 kmem_free(zt, sizeof (zio_transform_t)); 415 } 416 } 417 418 /* 419 * ========================================================================== 420 * I/O transform callbacks for subblocks, decompression, and decryption 421 * ========================================================================== 422 */ 423 static void 424 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 425 { 426 ASSERT(zio->io_size > size); 427 428 if (zio->io_type == ZIO_TYPE_READ) 429 abd_copy(data, zio->io_abd, size); 430 } 431 432 static void 433 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 434 { 435 if (zio->io_error == 0) { 436 void *tmp = abd_borrow_buf(data, size); 437 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 438 zio->io_abd, tmp, zio->io_size, size, 439 &zio->io_prop.zp_complevel); 440 abd_return_buf_copy(data, tmp, size); 441 442 if (zio_injection_enabled && ret == 0) 443 ret = zio_handle_fault_injection(zio, EINVAL); 444 445 if (ret != 0) 446 zio->io_error = SET_ERROR(EIO); 447 } 448 } 449 450 static void 451 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) 452 { 453 int ret; 454 void *tmp; 455 blkptr_t *bp = zio->io_bp; 456 spa_t *spa = zio->io_spa; 457 uint64_t dsobj = zio->io_bookmark.zb_objset; 458 uint64_t lsize = BP_GET_LSIZE(bp); 459 dmu_object_type_t ot = BP_GET_TYPE(bp); 460 uint8_t salt[ZIO_DATA_SALT_LEN]; 461 uint8_t iv[ZIO_DATA_IV_LEN]; 462 uint8_t mac[ZIO_DATA_MAC_LEN]; 463 boolean_t no_crypt = B_FALSE; 464 465 ASSERT(BP_USES_CRYPT(bp)); 466 ASSERT3U(size, !=, 0); 467 468 if (zio->io_error != 0) 469 return; 470 471 /* 472 * Verify the cksum of MACs stored in an indirect bp. It will always 473 * be possible to verify this since it does not require an encryption 474 * key. 475 */ 476 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { 477 zio_crypt_decode_mac_bp(bp, mac); 478 479 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 480 /* 481 * We haven't decompressed the data yet, but 482 * zio_crypt_do_indirect_mac_checksum() requires 483 * decompressed data to be able to parse out the MACs 484 * from the indirect block. We decompress it now and 485 * throw away the result after we are finished. 486 */ 487 tmp = zio_buf_alloc(lsize); 488 ret = zio_decompress_data(BP_GET_COMPRESS(bp), 489 zio->io_abd, tmp, zio->io_size, lsize, 490 &zio->io_prop.zp_complevel); 491 if (ret != 0) { 492 ret = SET_ERROR(EIO); 493 goto error; 494 } 495 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE, 496 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac); 497 zio_buf_free(tmp, lsize); 498 } else { 499 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 500 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); 501 } 502 abd_copy(data, zio->io_abd, size); 503 504 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { 505 ret = zio_handle_decrypt_injection(spa, 506 &zio->io_bookmark, ot, ECKSUM); 507 } 508 if (ret != 0) 509 goto error; 510 511 return; 512 } 513 514 /* 515 * If this is an authenticated block, just check the MAC. It would be 516 * nice to separate this out into its own flag, but when this was done, 517 * we had run out of bits in what is now zio_flag_t. Future cleanup 518 * could make this a flag bit. 519 */ 520 if (BP_IS_AUTHENTICATED(bp)) { 521 if (ot == DMU_OT_OBJSET) { 522 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, 523 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); 524 } else { 525 zio_crypt_decode_mac_bp(bp, mac); 526 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, 527 zio->io_abd, size, mac); 528 if (zio_injection_enabled && ret == 0) { 529 ret = zio_handle_decrypt_injection(spa, 530 &zio->io_bookmark, ot, ECKSUM); 531 } 532 } 533 abd_copy(data, zio->io_abd, size); 534 535 if (ret != 0) 536 goto error; 537 538 return; 539 } 540 541 zio_crypt_decode_params_bp(bp, salt, iv); 542 543 if (ot == DMU_OT_INTENT_LOG) { 544 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); 545 zio_crypt_decode_mac_zil(tmp, mac); 546 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); 547 } else { 548 zio_crypt_decode_mac_bp(bp, mac); 549 } 550 551 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), 552 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, 553 zio->io_abd, &no_crypt); 554 if (no_crypt) 555 abd_copy(data, zio->io_abd, size); 556 557 if (ret != 0) 558 goto error; 559 560 return; 561 562 error: 563 /* assert that the key was found unless this was speculative */ 564 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); 565 566 /* 567 * If there was a decryption / authentication error return EIO as 568 * the io_error. If this was not a speculative zio, create an ereport. 569 */ 570 if (ret == ECKSUM) { 571 zio->io_error = SET_ERROR(EIO); 572 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 573 spa_log_error(spa, &zio->io_bookmark, 574 &zio->io_bp->blk_birth); 575 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 576 spa, NULL, &zio->io_bookmark, zio, 0); 577 } 578 } else { 579 zio->io_error = ret; 580 } 581 } 582 583 /* 584 * ========================================================================== 585 * I/O parent/child relationships and pipeline interlocks 586 * ========================================================================== 587 */ 588 zio_t * 589 zio_walk_parents(zio_t *cio, zio_link_t **zl) 590 { 591 list_t *pl = &cio->io_parent_list; 592 593 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 594 if (*zl == NULL) 595 return (NULL); 596 597 ASSERT((*zl)->zl_child == cio); 598 return ((*zl)->zl_parent); 599 } 600 601 zio_t * 602 zio_walk_children(zio_t *pio, zio_link_t **zl) 603 { 604 list_t *cl = &pio->io_child_list; 605 606 ASSERT(MUTEX_HELD(&pio->io_lock)); 607 608 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 609 if (*zl == NULL) 610 return (NULL); 611 612 ASSERT((*zl)->zl_parent == pio); 613 return ((*zl)->zl_child); 614 } 615 616 zio_t * 617 zio_unique_parent(zio_t *cio) 618 { 619 zio_link_t *zl = NULL; 620 zio_t *pio = zio_walk_parents(cio, &zl); 621 622 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 623 return (pio); 624 } 625 626 void 627 zio_add_child(zio_t *pio, zio_t *cio) 628 { 629 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 630 631 /* 632 * Logical I/Os can have logical, gang, or vdev children. 633 * Gang I/Os can have gang or vdev children. 634 * Vdev I/Os can only have vdev children. 635 * The following ASSERT captures all of these constraints. 636 */ 637 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 638 639 zl->zl_parent = pio; 640 zl->zl_child = cio; 641 642 mutex_enter(&pio->io_lock); 643 mutex_enter(&cio->io_lock); 644 645 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 646 647 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 648 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 649 650 list_insert_head(&pio->io_child_list, zl); 651 list_insert_head(&cio->io_parent_list, zl); 652 653 mutex_exit(&cio->io_lock); 654 mutex_exit(&pio->io_lock); 655 } 656 657 static void 658 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 659 { 660 ASSERT(zl->zl_parent == pio); 661 ASSERT(zl->zl_child == cio); 662 663 mutex_enter(&pio->io_lock); 664 mutex_enter(&cio->io_lock); 665 666 list_remove(&pio->io_child_list, zl); 667 list_remove(&cio->io_parent_list, zl); 668 669 mutex_exit(&cio->io_lock); 670 mutex_exit(&pio->io_lock); 671 kmem_cache_free(zio_link_cache, zl); 672 } 673 674 static boolean_t 675 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 676 { 677 boolean_t waiting = B_FALSE; 678 679 mutex_enter(&zio->io_lock); 680 ASSERT(zio->io_stall == NULL); 681 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 682 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 683 continue; 684 685 uint64_t *countp = &zio->io_children[c][wait]; 686 if (*countp != 0) { 687 zio->io_stage >>= 1; 688 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 689 zio->io_stall = countp; 690 waiting = B_TRUE; 691 break; 692 } 693 } 694 mutex_exit(&zio->io_lock); 695 return (waiting); 696 } 697 698 __attribute__((always_inline)) 699 static inline void 700 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait, 701 zio_t **next_to_executep) 702 { 703 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 704 int *errorp = &pio->io_child_error[zio->io_child_type]; 705 706 mutex_enter(&pio->io_lock); 707 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 708 *errorp = zio_worst_error(*errorp, zio->io_error); 709 pio->io_reexecute |= zio->io_reexecute; 710 ASSERT3U(*countp, >, 0); 711 712 (*countp)--; 713 714 if (*countp == 0 && pio->io_stall == countp) { 715 zio_taskq_type_t type = 716 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 717 ZIO_TASKQ_INTERRUPT; 718 pio->io_stall = NULL; 719 mutex_exit(&pio->io_lock); 720 721 /* 722 * If we can tell the caller to execute this parent next, do 723 * so. We only do this if the parent's zio type matches the 724 * child's type. Otherwise dispatch the parent zio in its 725 * own taskq. 726 * 727 * Having the caller execute the parent when possible reduces 728 * locking on the zio taskq's, reduces context switch 729 * overhead, and has no recursion penalty. Note that one 730 * read from disk typically causes at least 3 zio's: a 731 * zio_null(), the logical zio_read(), and then a physical 732 * zio. When the physical ZIO completes, we are able to call 733 * zio_done() on all 3 of these zio's from one invocation of 734 * zio_execute() by returning the parent back to 735 * zio_execute(). Since the parent isn't executed until this 736 * thread returns back to zio_execute(), the caller should do 737 * so promptly. 738 * 739 * In other cases, dispatching the parent prevents 740 * overflowing the stack when we have deeply nested 741 * parent-child relationships, as we do with the "mega zio" 742 * of writes for spa_sync(), and the chain of ZIL blocks. 743 */ 744 if (next_to_executep != NULL && *next_to_executep == NULL && 745 pio->io_type == zio->io_type) { 746 *next_to_executep = pio; 747 } else { 748 zio_taskq_dispatch(pio, type, B_FALSE); 749 } 750 } else { 751 mutex_exit(&pio->io_lock); 752 } 753 } 754 755 static void 756 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 757 { 758 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 759 zio->io_error = zio->io_child_error[c]; 760 } 761 762 int 763 zio_bookmark_compare(const void *x1, const void *x2) 764 { 765 const zio_t *z1 = x1; 766 const zio_t *z2 = x2; 767 768 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 769 return (-1); 770 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 771 return (1); 772 773 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 774 return (-1); 775 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 776 return (1); 777 778 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 779 return (-1); 780 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 781 return (1); 782 783 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 784 return (-1); 785 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 786 return (1); 787 788 if (z1 < z2) 789 return (-1); 790 if (z1 > z2) 791 return (1); 792 793 return (0); 794 } 795 796 /* 797 * ========================================================================== 798 * Create the various types of I/O (read, write, free, etc) 799 * ========================================================================== 800 */ 801 static zio_t * 802 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 803 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 804 void *private, zio_type_t type, zio_priority_t priority, 805 zio_flag_t flags, vdev_t *vd, uint64_t offset, 806 const zbookmark_phys_t *zb, enum zio_stage stage, 807 enum zio_stage pipeline) 808 { 809 zio_t *zio; 810 811 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE); 812 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 813 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 814 815 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 816 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 817 ASSERT(vd || stage == ZIO_STAGE_OPEN); 818 819 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); 820 821 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 822 memset(zio, 0, sizeof (zio_t)); 823 824 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL); 825 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 826 827 list_create(&zio->io_parent_list, sizeof (zio_link_t), 828 offsetof(zio_link_t, zl_parent_node)); 829 list_create(&zio->io_child_list, sizeof (zio_link_t), 830 offsetof(zio_link_t, zl_child_node)); 831 metaslab_trace_init(&zio->io_alloc_list); 832 833 if (vd != NULL) 834 zio->io_child_type = ZIO_CHILD_VDEV; 835 else if (flags & ZIO_FLAG_GANG_CHILD) 836 zio->io_child_type = ZIO_CHILD_GANG; 837 else if (flags & ZIO_FLAG_DDT_CHILD) 838 zio->io_child_type = ZIO_CHILD_DDT; 839 else 840 zio->io_child_type = ZIO_CHILD_LOGICAL; 841 842 if (bp != NULL) { 843 zio->io_bp = (blkptr_t *)bp; 844 zio->io_bp_copy = *bp; 845 zio->io_bp_orig = *bp; 846 if (type != ZIO_TYPE_WRITE || 847 zio->io_child_type == ZIO_CHILD_DDT) 848 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 849 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 850 zio->io_logical = zio; 851 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 852 pipeline |= ZIO_GANG_STAGES; 853 } 854 855 zio->io_spa = spa; 856 zio->io_txg = txg; 857 zio->io_done = done; 858 zio->io_private = private; 859 zio->io_type = type; 860 zio->io_priority = priority; 861 zio->io_vd = vd; 862 zio->io_offset = offset; 863 zio->io_orig_abd = zio->io_abd = data; 864 zio->io_orig_size = zio->io_size = psize; 865 zio->io_lsize = lsize; 866 zio->io_orig_flags = zio->io_flags = flags; 867 zio->io_orig_stage = zio->io_stage = stage; 868 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 869 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 870 871 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 872 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 873 874 if (zb != NULL) 875 zio->io_bookmark = *zb; 876 877 if (pio != NULL) { 878 zio->io_metaslab_class = pio->io_metaslab_class; 879 if (zio->io_logical == NULL) 880 zio->io_logical = pio->io_logical; 881 if (zio->io_child_type == ZIO_CHILD_GANG) 882 zio->io_gang_leader = pio->io_gang_leader; 883 zio_add_child(pio, zio); 884 } 885 886 taskq_init_ent(&zio->io_tqent); 887 888 return (zio); 889 } 890 891 void 892 zio_destroy(zio_t *zio) 893 { 894 metaslab_trace_fini(&zio->io_alloc_list); 895 list_destroy(&zio->io_parent_list); 896 list_destroy(&zio->io_child_list); 897 mutex_destroy(&zio->io_lock); 898 cv_destroy(&zio->io_cv); 899 kmem_cache_free(zio_cache, zio); 900 } 901 902 zio_t * 903 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 904 void *private, zio_flag_t flags) 905 { 906 zio_t *zio; 907 908 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 909 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 910 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 911 912 return (zio); 913 } 914 915 zio_t * 916 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags) 917 { 918 return (zio_null(NULL, spa, NULL, done, private, flags)); 919 } 920 921 static int 922 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp, 923 enum blk_verify_flag blk_verify, const char *fmt, ...) 924 { 925 va_list adx; 926 char buf[256]; 927 928 va_start(adx, fmt); 929 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 930 va_end(adx); 931 932 zfs_dbgmsg("bad blkptr at %px: " 933 "DVA[0]=%#llx/%#llx " 934 "DVA[1]=%#llx/%#llx " 935 "DVA[2]=%#llx/%#llx " 936 "prop=%#llx " 937 "pad=%#llx,%#llx " 938 "phys_birth=%#llx " 939 "birth=%#llx " 940 "fill=%#llx " 941 "cksum=%#llx/%#llx/%#llx/%#llx", 942 bp, 943 (long long)bp->blk_dva[0].dva_word[0], 944 (long long)bp->blk_dva[0].dva_word[1], 945 (long long)bp->blk_dva[1].dva_word[0], 946 (long long)bp->blk_dva[1].dva_word[1], 947 (long long)bp->blk_dva[2].dva_word[0], 948 (long long)bp->blk_dva[2].dva_word[1], 949 (long long)bp->blk_prop, 950 (long long)bp->blk_pad[0], 951 (long long)bp->blk_pad[1], 952 (long long)bp->blk_phys_birth, 953 (long long)bp->blk_birth, 954 (long long)bp->blk_fill, 955 (long long)bp->blk_cksum.zc_word[0], 956 (long long)bp->blk_cksum.zc_word[1], 957 (long long)bp->blk_cksum.zc_word[2], 958 (long long)bp->blk_cksum.zc_word[3]); 959 switch (blk_verify) { 960 case BLK_VERIFY_HALT: 961 zfs_panic_recover("%s: %s", spa_name(spa), buf); 962 break; 963 case BLK_VERIFY_LOG: 964 zfs_dbgmsg("%s: %s", spa_name(spa), buf); 965 break; 966 case BLK_VERIFY_ONLY: 967 break; 968 } 969 970 return (1); 971 } 972 973 /* 974 * Verify the block pointer fields contain reasonable values. This means 975 * it only contains known object types, checksum/compression identifiers, 976 * block sizes within the maximum allowed limits, valid DVAs, etc. 977 * 978 * If everything checks out B_TRUE is returned. The zfs_blkptr_verify 979 * argument controls the behavior when an invalid field is detected. 980 * 981 * Values for blk_verify_flag: 982 * BLK_VERIFY_ONLY: evaluate the block 983 * BLK_VERIFY_LOG: evaluate the block and log problems 984 * BLK_VERIFY_HALT: call zfs_panic_recover on error 985 * 986 * Values for blk_config_flag: 987 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer 988 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be 989 * obtained for reader 990 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better 991 * performance 992 */ 993 boolean_t 994 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, 995 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify) 996 { 997 int errors = 0; 998 999 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 1000 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1001 "blkptr at %px has invalid TYPE %llu", 1002 bp, (longlong_t)BP_GET_TYPE(bp)); 1003 } 1004 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) { 1005 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1006 "blkptr at %px has invalid CHECKSUM %llu", 1007 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 1008 } 1009 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) { 1010 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1011 "blkptr at %px has invalid COMPRESS %llu", 1012 bp, (longlong_t)BP_GET_COMPRESS(bp)); 1013 } 1014 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 1015 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1016 "blkptr at %px has invalid LSIZE %llu", 1017 bp, (longlong_t)BP_GET_LSIZE(bp)); 1018 } 1019 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 1020 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1021 "blkptr at %px has invalid PSIZE %llu", 1022 bp, (longlong_t)BP_GET_PSIZE(bp)); 1023 } 1024 1025 if (BP_IS_EMBEDDED(bp)) { 1026 if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) { 1027 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1028 "blkptr at %px has invalid ETYPE %llu", 1029 bp, (longlong_t)BPE_GET_ETYPE(bp)); 1030 } 1031 } 1032 1033 /* 1034 * Do not verify individual DVAs if the config is not trusted. This 1035 * will be done once the zio is executed in vdev_mirror_map_alloc. 1036 */ 1037 if (!spa->spa_trust_config) 1038 return (errors == 0); 1039 1040 switch (blk_config) { 1041 case BLK_CONFIG_HELD: 1042 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER)); 1043 break; 1044 case BLK_CONFIG_NEEDED: 1045 spa_config_enter(spa, SCL_VDEV, bp, RW_READER); 1046 break; 1047 case BLK_CONFIG_SKIP: 1048 return (errors == 0); 1049 default: 1050 panic("invalid blk_config %u", blk_config); 1051 } 1052 1053 /* 1054 * Pool-specific checks. 1055 * 1056 * Note: it would be nice to verify that the blk_birth and 1057 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 1058 * allows the birth time of log blocks (and dmu_sync()-ed blocks 1059 * that are in the log) to be arbitrarily large. 1060 */ 1061 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 1062 const dva_t *dva = &bp->blk_dva[i]; 1063 uint64_t vdevid = DVA_GET_VDEV(dva); 1064 1065 if (vdevid >= spa->spa_root_vdev->vdev_children) { 1066 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1067 "blkptr at %px DVA %u has invalid VDEV %llu", 1068 bp, i, (longlong_t)vdevid); 1069 continue; 1070 } 1071 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1072 if (vd == NULL) { 1073 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1074 "blkptr at %px DVA %u has invalid VDEV %llu", 1075 bp, i, (longlong_t)vdevid); 1076 continue; 1077 } 1078 if (vd->vdev_ops == &vdev_hole_ops) { 1079 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1080 "blkptr at %px DVA %u has hole VDEV %llu", 1081 bp, i, (longlong_t)vdevid); 1082 continue; 1083 } 1084 if (vd->vdev_ops == &vdev_missing_ops) { 1085 /* 1086 * "missing" vdevs are valid during import, but we 1087 * don't have their detailed info (e.g. asize), so 1088 * we can't perform any more checks on them. 1089 */ 1090 continue; 1091 } 1092 uint64_t offset = DVA_GET_OFFSET(dva); 1093 uint64_t asize = DVA_GET_ASIZE(dva); 1094 if (DVA_GET_GANG(dva)) 1095 asize = vdev_gang_header_asize(vd); 1096 if (offset + asize > vd->vdev_asize) { 1097 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1098 "blkptr at %px DVA %u has invalid OFFSET %llu", 1099 bp, i, (longlong_t)offset); 1100 } 1101 } 1102 if (blk_config == BLK_CONFIG_NEEDED) 1103 spa_config_exit(spa, SCL_VDEV, bp); 1104 1105 return (errors == 0); 1106 } 1107 1108 boolean_t 1109 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 1110 { 1111 (void) bp; 1112 uint64_t vdevid = DVA_GET_VDEV(dva); 1113 1114 if (vdevid >= spa->spa_root_vdev->vdev_children) 1115 return (B_FALSE); 1116 1117 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1118 if (vd == NULL) 1119 return (B_FALSE); 1120 1121 if (vd->vdev_ops == &vdev_hole_ops) 1122 return (B_FALSE); 1123 1124 if (vd->vdev_ops == &vdev_missing_ops) { 1125 return (B_FALSE); 1126 } 1127 1128 uint64_t offset = DVA_GET_OFFSET(dva); 1129 uint64_t asize = DVA_GET_ASIZE(dva); 1130 1131 if (DVA_GET_GANG(dva)) 1132 asize = vdev_gang_header_asize(vd); 1133 if (offset + asize > vd->vdev_asize) 1134 return (B_FALSE); 1135 1136 return (B_TRUE); 1137 } 1138 1139 zio_t * 1140 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 1141 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 1142 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb) 1143 { 1144 zio_t *zio; 1145 1146 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 1147 data, size, size, done, private, 1148 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 1149 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 1150 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 1151 1152 return (zio); 1153 } 1154 1155 zio_t * 1156 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 1157 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 1158 zio_done_func_t *ready, zio_done_func_t *children_ready, 1159 zio_done_func_t *done, void *private, zio_priority_t priority, 1160 zio_flag_t flags, const zbookmark_phys_t *zb) 1161 { 1162 zio_t *zio; 1163 1164 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 1165 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 1166 zp->zp_compress >= ZIO_COMPRESS_OFF && 1167 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 1168 DMU_OT_IS_VALID(zp->zp_type) && 1169 zp->zp_level < 32 && 1170 zp->zp_copies > 0 && 1171 zp->zp_copies <= spa_max_replication(spa)); 1172 1173 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 1174 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 1175 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 1176 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 1177 1178 zio->io_ready = ready; 1179 zio->io_children_ready = children_ready; 1180 zio->io_prop = *zp; 1181 1182 /* 1183 * Data can be NULL if we are going to call zio_write_override() to 1184 * provide the already-allocated BP. But we may need the data to 1185 * verify a dedup hit (if requested). In this case, don't try to 1186 * dedup (just take the already-allocated BP verbatim). Encrypted 1187 * dedup blocks need data as well so we also disable dedup in this 1188 * case. 1189 */ 1190 if (data == NULL && 1191 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { 1192 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 1193 } 1194 1195 return (zio); 1196 } 1197 1198 zio_t * 1199 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 1200 uint64_t size, zio_done_func_t *done, void *private, 1201 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb) 1202 { 1203 zio_t *zio; 1204 1205 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 1206 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 1207 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 1208 1209 return (zio); 1210 } 1211 1212 void 1213 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite, 1214 boolean_t brtwrite) 1215 { 1216 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1217 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1218 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1219 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 1220 ASSERT(!brtwrite || !nopwrite); 1221 1222 /* 1223 * We must reset the io_prop to match the values that existed 1224 * when the bp was first written by dmu_sync() keeping in mind 1225 * that nopwrite and dedup are mutually exclusive. 1226 */ 1227 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 1228 zio->io_prop.zp_nopwrite = nopwrite; 1229 zio->io_prop.zp_brtwrite = brtwrite; 1230 zio->io_prop.zp_copies = copies; 1231 zio->io_bp_override = bp; 1232 } 1233 1234 void 1235 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 1236 { 1237 1238 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1239 1240 /* 1241 * The check for EMBEDDED is a performance optimization. We 1242 * process the free here (by ignoring it) rather than 1243 * putting it on the list and then processing it in zio_free_sync(). 1244 */ 1245 if (BP_IS_EMBEDDED(bp)) 1246 return; 1247 1248 /* 1249 * Frees that are for the currently-syncing txg, are not going to be 1250 * deferred, and which will not need to do a read (i.e. not GANG or 1251 * DEDUP), can be processed immediately. Otherwise, put them on the 1252 * in-memory list for later processing. 1253 * 1254 * Note that we only defer frees after zfs_sync_pass_deferred_free 1255 * when the log space map feature is disabled. [see relevant comment 1256 * in spa_sync_iterate_to_convergence()] 1257 */ 1258 if (BP_IS_GANG(bp) || 1259 BP_GET_DEDUP(bp) || 1260 txg != spa->spa_syncing_txg || 1261 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free && 1262 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) || 1263 brt_maybe_exists(spa, bp)) { 1264 metaslab_check_free(spa, bp); 1265 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 1266 } else { 1267 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL); 1268 } 1269 } 1270 1271 /* 1272 * To improve performance, this function may return NULL if we were able 1273 * to do the free immediately. This avoids the cost of creating a zio 1274 * (and linking it to the parent, etc). 1275 */ 1276 zio_t * 1277 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1278 zio_flag_t flags) 1279 { 1280 ASSERT(!BP_IS_HOLE(bp)); 1281 ASSERT(spa_syncing_txg(spa) == txg); 1282 1283 if (BP_IS_EMBEDDED(bp)) 1284 return (NULL); 1285 1286 metaslab_check_free(spa, bp); 1287 arc_freed(spa, bp); 1288 dsl_scan_freed(spa, bp); 1289 1290 if (BP_IS_GANG(bp) || 1291 BP_GET_DEDUP(bp) || 1292 brt_maybe_exists(spa, bp)) { 1293 /* 1294 * GANG, DEDUP and BRT blocks can induce a read (for the gang 1295 * block header, the DDT or the BRT), so issue them 1296 * asynchronously so that this thread is not tied up. 1297 */ 1298 enum zio_stage stage = 1299 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC; 1300 1301 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1302 BP_GET_PSIZE(bp), NULL, NULL, 1303 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 1304 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage)); 1305 } else { 1306 metaslab_free(spa, bp, txg, B_FALSE); 1307 return (NULL); 1308 } 1309 } 1310 1311 zio_t * 1312 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1313 zio_done_func_t *done, void *private, zio_flag_t flags) 1314 { 1315 zio_t *zio; 1316 1317 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ? 1318 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1319 1320 if (BP_IS_EMBEDDED(bp)) 1321 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 1322 1323 /* 1324 * A claim is an allocation of a specific block. Claims are needed 1325 * to support immediate writes in the intent log. The issue is that 1326 * immediate writes contain committed data, but in a txg that was 1327 * *not* committed. Upon opening the pool after an unclean shutdown, 1328 * the intent log claims all blocks that contain immediate write data 1329 * so that the SPA knows they're in use. 1330 * 1331 * All claims *must* be resolved in the first txg -- before the SPA 1332 * starts allocating blocks -- so that nothing is allocated twice. 1333 * If txg == 0 we just verify that the block is claimable. 1334 */ 1335 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 1336 spa_min_claim_txg(spa)); 1337 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1338 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */ 1339 1340 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1341 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 1342 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 1343 ASSERT0(zio->io_queued_timestamp); 1344 1345 return (zio); 1346 } 1347 1348 zio_t * 1349 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 1350 zio_done_func_t *done, void *private, zio_flag_t flags) 1351 { 1352 zio_t *zio; 1353 int c; 1354 1355 if (vd->vdev_children == 0) { 1356 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 1357 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1358 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 1359 1360 zio->io_cmd = cmd; 1361 } else { 1362 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 1363 1364 for (c = 0; c < vd->vdev_children; c++) 1365 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 1366 done, private, flags)); 1367 } 1368 1369 return (zio); 1370 } 1371 1372 zio_t * 1373 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1374 zio_done_func_t *done, void *private, zio_priority_t priority, 1375 zio_flag_t flags, enum trim_flag trim_flags) 1376 { 1377 zio_t *zio; 1378 1379 ASSERT0(vd->vdev_children); 1380 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 1381 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 1382 ASSERT3U(size, !=, 0); 1383 1384 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done, 1385 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL, 1386 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE); 1387 zio->io_trim_flags = trim_flags; 1388 1389 return (zio); 1390 } 1391 1392 zio_t * 1393 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1394 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1395 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1396 { 1397 zio_t *zio; 1398 1399 ASSERT(vd->vdev_children == 0); 1400 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1401 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1402 ASSERT3U(offset + size, <=, vd->vdev_psize); 1403 1404 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1405 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1406 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1407 1408 zio->io_prop.zp_checksum = checksum; 1409 1410 return (zio); 1411 } 1412 1413 zio_t * 1414 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1415 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1416 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1417 { 1418 zio_t *zio; 1419 1420 ASSERT(vd->vdev_children == 0); 1421 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1422 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1423 ASSERT3U(offset + size, <=, vd->vdev_psize); 1424 1425 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1426 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1427 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1428 1429 zio->io_prop.zp_checksum = checksum; 1430 1431 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1432 /* 1433 * zec checksums are necessarily destructive -- they modify 1434 * the end of the write buffer to hold the verifier/checksum. 1435 * Therefore, we must make a local copy in case the data is 1436 * being written to multiple places in parallel. 1437 */ 1438 abd_t *wbuf = abd_alloc_sametype(data, size); 1439 abd_copy(wbuf, data, size); 1440 1441 zio_push_transform(zio, wbuf, size, size, NULL); 1442 } 1443 1444 return (zio); 1445 } 1446 1447 /* 1448 * Create a child I/O to do some work for us. 1449 */ 1450 zio_t * 1451 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1452 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1453 zio_flag_t flags, zio_done_func_t *done, void *private) 1454 { 1455 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1456 zio_t *zio; 1457 1458 /* 1459 * vdev child I/Os do not propagate their error to the parent. 1460 * Therefore, for correct operation the caller *must* check for 1461 * and handle the error in the child i/o's done callback. 1462 * The only exceptions are i/os that we don't care about 1463 * (OPTIONAL or REPAIR). 1464 */ 1465 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1466 done != NULL); 1467 1468 if (type == ZIO_TYPE_READ && bp != NULL) { 1469 /* 1470 * If we have the bp, then the child should perform the 1471 * checksum and the parent need not. This pushes error 1472 * detection as close to the leaves as possible and 1473 * eliminates redundant checksums in the interior nodes. 1474 */ 1475 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1476 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1477 } 1478 1479 if (vd->vdev_ops->vdev_op_leaf) { 1480 ASSERT0(vd->vdev_children); 1481 offset += VDEV_LABEL_START_SIZE; 1482 } 1483 1484 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1485 1486 /* 1487 * If we've decided to do a repair, the write is not speculative -- 1488 * even if the original read was. 1489 */ 1490 if (flags & ZIO_FLAG_IO_REPAIR) 1491 flags &= ~ZIO_FLAG_SPECULATIVE; 1492 1493 /* 1494 * If we're creating a child I/O that is not associated with a 1495 * top-level vdev, then the child zio is not an allocating I/O. 1496 * If this is a retried I/O then we ignore it since we will 1497 * have already processed the original allocating I/O. 1498 */ 1499 if (flags & ZIO_FLAG_IO_ALLOCATING && 1500 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1501 ASSERT(pio->io_metaslab_class != NULL); 1502 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 1503 ASSERT(type == ZIO_TYPE_WRITE); 1504 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1505 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1506 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1507 pio->io_child_type == ZIO_CHILD_GANG); 1508 1509 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1510 } 1511 1512 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1513 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1514 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1515 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1516 1517 return (zio); 1518 } 1519 1520 zio_t * 1521 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1522 zio_type_t type, zio_priority_t priority, zio_flag_t flags, 1523 zio_done_func_t *done, void *private) 1524 { 1525 zio_t *zio; 1526 1527 ASSERT(vd->vdev_ops->vdev_op_leaf); 1528 1529 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1530 data, size, size, done, private, type, priority, 1531 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1532 vd, offset, NULL, 1533 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1534 1535 return (zio); 1536 } 1537 1538 void 1539 zio_flush(zio_t *zio, vdev_t *vd) 1540 { 1541 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 1542 NULL, NULL, 1543 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1544 } 1545 1546 void 1547 zio_shrink(zio_t *zio, uint64_t size) 1548 { 1549 ASSERT3P(zio->io_executor, ==, NULL); 1550 ASSERT3U(zio->io_orig_size, ==, zio->io_size); 1551 ASSERT3U(size, <=, zio->io_size); 1552 1553 /* 1554 * We don't shrink for raidz because of problems with the 1555 * reconstruction when reading back less than the block size. 1556 * Note, BP_IS_RAIDZ() assumes no compression. 1557 */ 1558 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1559 if (!BP_IS_RAIDZ(zio->io_bp)) { 1560 /* we are not doing a raw write */ 1561 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1562 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1563 } 1564 } 1565 1566 /* 1567 * ========================================================================== 1568 * Prepare to read and write logical blocks 1569 * ========================================================================== 1570 */ 1571 1572 static zio_t * 1573 zio_read_bp_init(zio_t *zio) 1574 { 1575 blkptr_t *bp = zio->io_bp; 1576 uint64_t psize = 1577 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1578 1579 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1580 1581 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1582 zio->io_child_type == ZIO_CHILD_LOGICAL && 1583 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1584 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1585 psize, psize, zio_decompress); 1586 } 1587 1588 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || 1589 BP_HAS_INDIRECT_MAC_CKSUM(bp)) && 1590 zio->io_child_type == ZIO_CHILD_LOGICAL) { 1591 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1592 psize, psize, zio_decrypt); 1593 } 1594 1595 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1596 int psize = BPE_GET_PSIZE(bp); 1597 void *data = abd_borrow_buf(zio->io_abd, psize); 1598 1599 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1600 decode_embedded_bp_compressed(bp, data); 1601 abd_return_buf_copy(zio->io_abd, data, psize); 1602 } else { 1603 ASSERT(!BP_IS_EMBEDDED(bp)); 1604 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1605 } 1606 1607 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1608 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1609 1610 return (zio); 1611 } 1612 1613 static zio_t * 1614 zio_write_bp_init(zio_t *zio) 1615 { 1616 if (!IO_IS_ALLOCATING(zio)) 1617 return (zio); 1618 1619 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1620 1621 if (zio->io_bp_override) { 1622 blkptr_t *bp = zio->io_bp; 1623 zio_prop_t *zp = &zio->io_prop; 1624 1625 ASSERT(bp->blk_birth != zio->io_txg); 1626 1627 *bp = *zio->io_bp_override; 1628 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1629 1630 if (zp->zp_brtwrite) 1631 return (zio); 1632 1633 ASSERT(!BP_GET_DEDUP(zio->io_bp_override)); 1634 1635 if (BP_IS_EMBEDDED(bp)) 1636 return (zio); 1637 1638 /* 1639 * If we've been overridden and nopwrite is set then 1640 * set the flag accordingly to indicate that a nopwrite 1641 * has already occurred. 1642 */ 1643 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1644 ASSERT(!zp->zp_dedup); 1645 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1646 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1647 return (zio); 1648 } 1649 1650 ASSERT(!zp->zp_nopwrite); 1651 1652 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1653 return (zio); 1654 1655 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1656 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1657 1658 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && 1659 !zp->zp_encrypt) { 1660 BP_SET_DEDUP(bp, 1); 1661 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1662 return (zio); 1663 } 1664 1665 /* 1666 * We were unable to handle this as an override bp, treat 1667 * it as a regular write I/O. 1668 */ 1669 zio->io_bp_override = NULL; 1670 *bp = zio->io_bp_orig; 1671 zio->io_pipeline = zio->io_orig_pipeline; 1672 } 1673 1674 return (zio); 1675 } 1676 1677 static zio_t * 1678 zio_write_compress(zio_t *zio) 1679 { 1680 spa_t *spa = zio->io_spa; 1681 zio_prop_t *zp = &zio->io_prop; 1682 enum zio_compress compress = zp->zp_compress; 1683 blkptr_t *bp = zio->io_bp; 1684 uint64_t lsize = zio->io_lsize; 1685 uint64_t psize = zio->io_size; 1686 uint32_t pass = 1; 1687 1688 /* 1689 * If our children haven't all reached the ready stage, 1690 * wait for them and then repeat this pipeline stage. 1691 */ 1692 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1693 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1694 return (NULL); 1695 } 1696 1697 if (!IO_IS_ALLOCATING(zio)) 1698 return (zio); 1699 1700 if (zio->io_children_ready != NULL) { 1701 /* 1702 * Now that all our children are ready, run the callback 1703 * associated with this zio in case it wants to modify the 1704 * data to be written. 1705 */ 1706 ASSERT3U(zp->zp_level, >, 0); 1707 zio->io_children_ready(zio); 1708 } 1709 1710 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1711 ASSERT(zio->io_bp_override == NULL); 1712 1713 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1714 /* 1715 * We're rewriting an existing block, which means we're 1716 * working on behalf of spa_sync(). For spa_sync() to 1717 * converge, it must eventually be the case that we don't 1718 * have to allocate new blocks. But compression changes 1719 * the blocksize, which forces a reallocate, and makes 1720 * convergence take longer. Therefore, after the first 1721 * few passes, stop compressing to ensure convergence. 1722 */ 1723 pass = spa_sync_pass(spa); 1724 1725 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1726 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1727 ASSERT(!BP_GET_DEDUP(bp)); 1728 1729 if (pass >= zfs_sync_pass_dont_compress) 1730 compress = ZIO_COMPRESS_OFF; 1731 1732 /* Make sure someone doesn't change their mind on overwrites */ 1733 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1734 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1735 } 1736 1737 /* If it's a compressed write that is not raw, compress the buffer. */ 1738 if (compress != ZIO_COMPRESS_OFF && 1739 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1740 void *cbuf = NULL; 1741 psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize, 1742 zp->zp_complevel); 1743 if (psize == 0) { 1744 compress = ZIO_COMPRESS_OFF; 1745 } else if (psize >= lsize) { 1746 compress = ZIO_COMPRESS_OFF; 1747 if (cbuf != NULL) 1748 zio_buf_free(cbuf, lsize); 1749 } else if (!zp->zp_dedup && !zp->zp_encrypt && 1750 psize <= BPE_PAYLOAD_SIZE && 1751 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1752 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1753 encode_embedded_bp_compressed(bp, 1754 cbuf, compress, lsize, psize); 1755 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1756 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1757 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1758 zio_buf_free(cbuf, lsize); 1759 bp->blk_birth = zio->io_txg; 1760 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1761 ASSERT(spa_feature_is_active(spa, 1762 SPA_FEATURE_EMBEDDED_DATA)); 1763 return (zio); 1764 } else { 1765 /* 1766 * Round compressed size up to the minimum allocation 1767 * size of the smallest-ashift device, and zero the 1768 * tail. This ensures that the compressed size of the 1769 * BP (and thus compressratio property) are correct, 1770 * in that we charge for the padding used to fill out 1771 * the last sector. 1772 */ 1773 ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT); 1774 size_t rounded = (size_t)roundup(psize, 1775 spa->spa_min_alloc); 1776 if (rounded >= lsize) { 1777 compress = ZIO_COMPRESS_OFF; 1778 zio_buf_free(cbuf, lsize); 1779 psize = lsize; 1780 } else { 1781 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1782 abd_take_ownership_of_buf(cdata, B_TRUE); 1783 abd_zero_off(cdata, psize, rounded - psize); 1784 psize = rounded; 1785 zio_push_transform(zio, cdata, 1786 psize, lsize, NULL); 1787 } 1788 } 1789 1790 /* 1791 * We were unable to handle this as an override bp, treat 1792 * it as a regular write I/O. 1793 */ 1794 zio->io_bp_override = NULL; 1795 *bp = zio->io_bp_orig; 1796 zio->io_pipeline = zio->io_orig_pipeline; 1797 1798 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && 1799 zp->zp_type == DMU_OT_DNODE) { 1800 /* 1801 * The DMU actually relies on the zio layer's compression 1802 * to free metadnode blocks that have had all contained 1803 * dnodes freed. As a result, even when doing a raw 1804 * receive, we must check whether the block can be compressed 1805 * to a hole. 1806 */ 1807 psize = zio_compress_data(ZIO_COMPRESS_EMPTY, 1808 zio->io_abd, NULL, lsize, zp->zp_complevel); 1809 if (psize == 0 || psize >= lsize) 1810 compress = ZIO_COMPRESS_OFF; 1811 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS && 1812 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) { 1813 /* 1814 * If we are raw receiving an encrypted dataset we should not 1815 * take this codepath because it will change the on-disk block 1816 * and decryption will fail. 1817 */ 1818 size_t rounded = MIN((size_t)roundup(psize, 1819 spa->spa_min_alloc), lsize); 1820 1821 if (rounded != psize) { 1822 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE); 1823 abd_zero_off(cdata, psize, rounded - psize); 1824 abd_copy_off(cdata, zio->io_abd, 0, 0, psize); 1825 psize = rounded; 1826 zio_push_transform(zio, cdata, 1827 psize, rounded, NULL); 1828 } 1829 } else { 1830 ASSERT3U(psize, !=, 0); 1831 } 1832 1833 /* 1834 * The final pass of spa_sync() must be all rewrites, but the first 1835 * few passes offer a trade-off: allocating blocks defers convergence, 1836 * but newly allocated blocks are sequential, so they can be written 1837 * to disk faster. Therefore, we allow the first few passes of 1838 * spa_sync() to allocate new blocks, but force rewrites after that. 1839 * There should only be a handful of blocks after pass 1 in any case. 1840 */ 1841 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1842 BP_GET_PSIZE(bp) == psize && 1843 pass >= zfs_sync_pass_rewrite) { 1844 VERIFY3U(psize, !=, 0); 1845 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1846 1847 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1848 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1849 } else { 1850 BP_ZERO(bp); 1851 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1852 } 1853 1854 if (psize == 0) { 1855 if (zio->io_bp_orig.blk_birth != 0 && 1856 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1857 BP_SET_LSIZE(bp, lsize); 1858 BP_SET_TYPE(bp, zp->zp_type); 1859 BP_SET_LEVEL(bp, zp->zp_level); 1860 BP_SET_BIRTH(bp, zio->io_txg, 0); 1861 } 1862 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1863 } else { 1864 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1865 BP_SET_LSIZE(bp, lsize); 1866 BP_SET_TYPE(bp, zp->zp_type); 1867 BP_SET_LEVEL(bp, zp->zp_level); 1868 BP_SET_PSIZE(bp, psize); 1869 BP_SET_COMPRESS(bp, compress); 1870 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1871 BP_SET_DEDUP(bp, zp->zp_dedup); 1872 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1873 if (zp->zp_dedup) { 1874 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1875 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1876 ASSERT(!zp->zp_encrypt || 1877 DMU_OT_IS_ENCRYPTED(zp->zp_type)); 1878 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1879 } 1880 if (zp->zp_nopwrite) { 1881 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1882 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1883 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1884 } 1885 } 1886 return (zio); 1887 } 1888 1889 static zio_t * 1890 zio_free_bp_init(zio_t *zio) 1891 { 1892 blkptr_t *bp = zio->io_bp; 1893 1894 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1895 if (BP_GET_DEDUP(bp)) 1896 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1897 } 1898 1899 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1900 1901 return (zio); 1902 } 1903 1904 /* 1905 * ========================================================================== 1906 * Execute the I/O pipeline 1907 * ========================================================================== 1908 */ 1909 1910 static void 1911 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1912 { 1913 spa_t *spa = zio->io_spa; 1914 zio_type_t t = zio->io_type; 1915 int flags = (cutinline ? TQ_FRONT : 0); 1916 1917 /* 1918 * If we're a config writer or a probe, the normal issue and 1919 * interrupt threads may all be blocked waiting for the config lock. 1920 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1921 */ 1922 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1923 t = ZIO_TYPE_NULL; 1924 1925 /* 1926 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1927 */ 1928 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1929 t = ZIO_TYPE_NULL; 1930 1931 /* 1932 * If this is a high priority I/O, then use the high priority taskq if 1933 * available. 1934 */ 1935 if ((zio->io_priority == ZIO_PRIORITY_NOW || 1936 zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && 1937 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1938 q++; 1939 1940 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1941 1942 /* 1943 * NB: We are assuming that the zio can only be dispatched 1944 * to a single taskq at a time. It would be a grievous error 1945 * to dispatch the zio to another taskq at the same time. 1946 */ 1947 ASSERT(taskq_empty_ent(&zio->io_tqent)); 1948 spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags, 1949 &zio->io_tqent); 1950 } 1951 1952 static boolean_t 1953 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1954 { 1955 spa_t *spa = zio->io_spa; 1956 1957 taskq_t *tq = taskq_of_curthread(); 1958 1959 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1960 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1961 uint_t i; 1962 for (i = 0; i < tqs->stqs_count; i++) { 1963 if (tqs->stqs_taskq[i] == tq) 1964 return (B_TRUE); 1965 } 1966 } 1967 1968 return (B_FALSE); 1969 } 1970 1971 static zio_t * 1972 zio_issue_async(zio_t *zio) 1973 { 1974 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1975 1976 return (NULL); 1977 } 1978 1979 void 1980 zio_interrupt(void *zio) 1981 { 1982 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1983 } 1984 1985 void 1986 zio_delay_interrupt(zio_t *zio) 1987 { 1988 /* 1989 * The timeout_generic() function isn't defined in userspace, so 1990 * rather than trying to implement the function, the zio delay 1991 * functionality has been disabled for userspace builds. 1992 */ 1993 1994 #ifdef _KERNEL 1995 /* 1996 * If io_target_timestamp is zero, then no delay has been registered 1997 * for this IO, thus jump to the end of this function and "skip" the 1998 * delay; issuing it directly to the zio layer. 1999 */ 2000 if (zio->io_target_timestamp != 0) { 2001 hrtime_t now = gethrtime(); 2002 2003 if (now >= zio->io_target_timestamp) { 2004 /* 2005 * This IO has already taken longer than the target 2006 * delay to complete, so we don't want to delay it 2007 * any longer; we "miss" the delay and issue it 2008 * directly to the zio layer. This is likely due to 2009 * the target latency being set to a value less than 2010 * the underlying hardware can satisfy (e.g. delay 2011 * set to 1ms, but the disks take 10ms to complete an 2012 * IO request). 2013 */ 2014 2015 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 2016 hrtime_t, now); 2017 2018 zio_interrupt(zio); 2019 } else { 2020 taskqid_t tid; 2021 hrtime_t diff = zio->io_target_timestamp - now; 2022 clock_t expire_at_tick = ddi_get_lbolt() + 2023 NSEC_TO_TICK(diff); 2024 2025 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 2026 hrtime_t, now, hrtime_t, diff); 2027 2028 if (NSEC_TO_TICK(diff) == 0) { 2029 /* Our delay is less than a jiffy - just spin */ 2030 zfs_sleep_until(zio->io_target_timestamp); 2031 zio_interrupt(zio); 2032 } else { 2033 /* 2034 * Use taskq_dispatch_delay() in the place of 2035 * OpenZFS's timeout_generic(). 2036 */ 2037 tid = taskq_dispatch_delay(system_taskq, 2038 zio_interrupt, zio, TQ_NOSLEEP, 2039 expire_at_tick); 2040 if (tid == TASKQID_INVALID) { 2041 /* 2042 * Couldn't allocate a task. Just 2043 * finish the zio without a delay. 2044 */ 2045 zio_interrupt(zio); 2046 } 2047 } 2048 } 2049 return; 2050 } 2051 #endif 2052 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 2053 zio_interrupt(zio); 2054 } 2055 2056 static void 2057 zio_deadman_impl(zio_t *pio, int ziodepth) 2058 { 2059 zio_t *cio, *cio_next; 2060 zio_link_t *zl = NULL; 2061 vdev_t *vd = pio->io_vd; 2062 2063 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) { 2064 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL; 2065 zbookmark_phys_t *zb = &pio->io_bookmark; 2066 uint64_t delta = gethrtime() - pio->io_timestamp; 2067 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa); 2068 2069 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu " 2070 "delta=%llu queued=%llu io=%llu " 2071 "path=%s " 2072 "last=%llu type=%d " 2073 "priority=%d flags=0x%llx stage=0x%x " 2074 "pipeline=0x%x pipeline-trace=0x%x " 2075 "objset=%llu object=%llu " 2076 "level=%llu blkid=%llu " 2077 "offset=%llu size=%llu " 2078 "error=%d", 2079 ziodepth, pio, pio->io_timestamp, 2080 (u_longlong_t)delta, pio->io_delta, pio->io_delay, 2081 vd ? vd->vdev_path : "NULL", 2082 vq ? vq->vq_io_complete_ts : 0, pio->io_type, 2083 pio->io_priority, (u_longlong_t)pio->io_flags, 2084 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace, 2085 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, 2086 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid, 2087 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size, 2088 pio->io_error); 2089 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN, 2090 pio->io_spa, vd, zb, pio, 0); 2091 2092 if (failmode == ZIO_FAILURE_MODE_CONTINUE && 2093 taskq_empty_ent(&pio->io_tqent)) { 2094 zio_interrupt(pio); 2095 } 2096 } 2097 2098 mutex_enter(&pio->io_lock); 2099 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2100 cio_next = zio_walk_children(pio, &zl); 2101 zio_deadman_impl(cio, ziodepth + 1); 2102 } 2103 mutex_exit(&pio->io_lock); 2104 } 2105 2106 /* 2107 * Log the critical information describing this zio and all of its children 2108 * using the zfs_dbgmsg() interface then post deadman event for the ZED. 2109 */ 2110 void 2111 zio_deadman(zio_t *pio, const char *tag) 2112 { 2113 spa_t *spa = pio->io_spa; 2114 char *name = spa_name(spa); 2115 2116 if (!zfs_deadman_enabled || spa_suspended(spa)) 2117 return; 2118 2119 zio_deadman_impl(pio, 0); 2120 2121 switch (spa_get_deadman_failmode(spa)) { 2122 case ZIO_FAILURE_MODE_WAIT: 2123 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name); 2124 break; 2125 2126 case ZIO_FAILURE_MODE_CONTINUE: 2127 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name); 2128 break; 2129 2130 case ZIO_FAILURE_MODE_PANIC: 2131 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name); 2132 break; 2133 } 2134 } 2135 2136 /* 2137 * Execute the I/O pipeline until one of the following occurs: 2138 * (1) the I/O completes; (2) the pipeline stalls waiting for 2139 * dependent child I/Os; (3) the I/O issues, so we're waiting 2140 * for an I/O completion interrupt; (4) the I/O is delegated by 2141 * vdev-level caching or aggregation; (5) the I/O is deferred 2142 * due to vdev-level queueing; (6) the I/O is handed off to 2143 * another thread. In all cases, the pipeline stops whenever 2144 * there's no CPU work; it never burns a thread in cv_wait_io(). 2145 * 2146 * There's no locking on io_stage because there's no legitimate way 2147 * for multiple threads to be attempting to process the same I/O. 2148 */ 2149 static zio_pipe_stage_t *zio_pipeline[]; 2150 2151 /* 2152 * zio_execute() is a wrapper around the static function 2153 * __zio_execute() so that we can force __zio_execute() to be 2154 * inlined. This reduces stack overhead which is important 2155 * because __zio_execute() is called recursively in several zio 2156 * code paths. zio_execute() itself cannot be inlined because 2157 * it is externally visible. 2158 */ 2159 void 2160 zio_execute(void *zio) 2161 { 2162 fstrans_cookie_t cookie; 2163 2164 cookie = spl_fstrans_mark(); 2165 __zio_execute(zio); 2166 spl_fstrans_unmark(cookie); 2167 } 2168 2169 /* 2170 * Used to determine if in the current context the stack is sized large 2171 * enough to allow zio_execute() to be called recursively. A minimum 2172 * stack size of 16K is required to avoid needing to re-dispatch the zio. 2173 */ 2174 static boolean_t 2175 zio_execute_stack_check(zio_t *zio) 2176 { 2177 #if !defined(HAVE_LARGE_STACKS) 2178 dsl_pool_t *dp = spa_get_dsl(zio->io_spa); 2179 2180 /* Executing in txg_sync_thread() context. */ 2181 if (dp && curthread == dp->dp_tx.tx_sync_thread) 2182 return (B_TRUE); 2183 2184 /* Pool initialization outside of zio_taskq context. */ 2185 if (dp && spa_is_initializing(dp->dp_spa) && 2186 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) && 2187 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH)) 2188 return (B_TRUE); 2189 #else 2190 (void) zio; 2191 #endif /* HAVE_LARGE_STACKS */ 2192 2193 return (B_FALSE); 2194 } 2195 2196 __attribute__((always_inline)) 2197 static inline void 2198 __zio_execute(zio_t *zio) 2199 { 2200 ASSERT3U(zio->io_queued_timestamp, >, 0); 2201 2202 while (zio->io_stage < ZIO_STAGE_DONE) { 2203 enum zio_stage pipeline = zio->io_pipeline; 2204 enum zio_stage stage = zio->io_stage; 2205 2206 zio->io_executor = curthread; 2207 2208 ASSERT(!MUTEX_HELD(&zio->io_lock)); 2209 ASSERT(ISP2(stage)); 2210 ASSERT(zio->io_stall == NULL); 2211 2212 do { 2213 stage <<= 1; 2214 } while ((stage & pipeline) == 0); 2215 2216 ASSERT(stage <= ZIO_STAGE_DONE); 2217 2218 /* 2219 * If we are in interrupt context and this pipeline stage 2220 * will grab a config lock that is held across I/O, 2221 * or may wait for an I/O that needs an interrupt thread 2222 * to complete, issue async to avoid deadlock. 2223 * 2224 * For VDEV_IO_START, we cut in line so that the io will 2225 * be sent to disk promptly. 2226 */ 2227 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 2228 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 2229 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2230 zio_requeue_io_start_cut_in_line : B_FALSE; 2231 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2232 return; 2233 } 2234 2235 /* 2236 * If the current context doesn't have large enough stacks 2237 * the zio must be issued asynchronously to prevent overflow. 2238 */ 2239 if (zio_execute_stack_check(zio)) { 2240 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2241 zio_requeue_io_start_cut_in_line : B_FALSE; 2242 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2243 return; 2244 } 2245 2246 zio->io_stage = stage; 2247 zio->io_pipeline_trace |= zio->io_stage; 2248 2249 /* 2250 * The zio pipeline stage returns the next zio to execute 2251 * (typically the same as this one), or NULL if we should 2252 * stop. 2253 */ 2254 zio = zio_pipeline[highbit64(stage) - 1](zio); 2255 2256 if (zio == NULL) 2257 return; 2258 } 2259 } 2260 2261 2262 /* 2263 * ========================================================================== 2264 * Initiate I/O, either sync or async 2265 * ========================================================================== 2266 */ 2267 int 2268 zio_wait(zio_t *zio) 2269 { 2270 /* 2271 * Some routines, like zio_free_sync(), may return a NULL zio 2272 * to avoid the performance overhead of creating and then destroying 2273 * an unneeded zio. For the callers' simplicity, we accept a NULL 2274 * zio and ignore it. 2275 */ 2276 if (zio == NULL) 2277 return (0); 2278 2279 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms); 2280 int error; 2281 2282 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN); 2283 ASSERT3P(zio->io_executor, ==, NULL); 2284 2285 zio->io_waiter = curthread; 2286 ASSERT0(zio->io_queued_timestamp); 2287 zio->io_queued_timestamp = gethrtime(); 2288 2289 __zio_execute(zio); 2290 2291 mutex_enter(&zio->io_lock); 2292 while (zio->io_executor != NULL) { 2293 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock, 2294 ddi_get_lbolt() + timeout); 2295 2296 if (zfs_deadman_enabled && error == -1 && 2297 gethrtime() - zio->io_queued_timestamp > 2298 spa_deadman_ziotime(zio->io_spa)) { 2299 mutex_exit(&zio->io_lock); 2300 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms); 2301 zio_deadman(zio, FTAG); 2302 mutex_enter(&zio->io_lock); 2303 } 2304 } 2305 mutex_exit(&zio->io_lock); 2306 2307 error = zio->io_error; 2308 zio_destroy(zio); 2309 2310 return (error); 2311 } 2312 2313 void 2314 zio_nowait(zio_t *zio) 2315 { 2316 /* 2317 * See comment in zio_wait(). 2318 */ 2319 if (zio == NULL) 2320 return; 2321 2322 ASSERT3P(zio->io_executor, ==, NULL); 2323 2324 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 2325 list_is_empty(&zio->io_parent_list)) { 2326 zio_t *pio; 2327 2328 /* 2329 * This is a logical async I/O with no parent to wait for it. 2330 * We add it to the spa_async_root_zio "Godfather" I/O which 2331 * will ensure they complete prior to unloading the pool. 2332 */ 2333 spa_t *spa = zio->io_spa; 2334 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE]; 2335 2336 zio_add_child(pio, zio); 2337 } 2338 2339 ASSERT0(zio->io_queued_timestamp); 2340 zio->io_queued_timestamp = gethrtime(); 2341 __zio_execute(zio); 2342 } 2343 2344 /* 2345 * ========================================================================== 2346 * Reexecute, cancel, or suspend/resume failed I/O 2347 * ========================================================================== 2348 */ 2349 2350 static void 2351 zio_reexecute(void *arg) 2352 { 2353 zio_t *pio = arg; 2354 zio_t *cio, *cio_next; 2355 2356 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 2357 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 2358 ASSERT(pio->io_gang_leader == NULL); 2359 ASSERT(pio->io_gang_tree == NULL); 2360 2361 pio->io_flags = pio->io_orig_flags; 2362 pio->io_stage = pio->io_orig_stage; 2363 pio->io_pipeline = pio->io_orig_pipeline; 2364 pio->io_reexecute = 0; 2365 pio->io_flags |= ZIO_FLAG_REEXECUTED; 2366 pio->io_pipeline_trace = 0; 2367 pio->io_error = 0; 2368 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2369 pio->io_state[w] = 0; 2370 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2371 pio->io_child_error[c] = 0; 2372 2373 if (IO_IS_ALLOCATING(pio)) 2374 BP_ZERO(pio->io_bp); 2375 2376 /* 2377 * As we reexecute pio's children, new children could be created. 2378 * New children go to the head of pio's io_child_list, however, 2379 * so we will (correctly) not reexecute them. The key is that 2380 * the remainder of pio's io_child_list, from 'cio_next' onward, 2381 * cannot be affected by any side effects of reexecuting 'cio'. 2382 */ 2383 zio_link_t *zl = NULL; 2384 mutex_enter(&pio->io_lock); 2385 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2386 cio_next = zio_walk_children(pio, &zl); 2387 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2388 pio->io_children[cio->io_child_type][w]++; 2389 mutex_exit(&pio->io_lock); 2390 zio_reexecute(cio); 2391 mutex_enter(&pio->io_lock); 2392 } 2393 mutex_exit(&pio->io_lock); 2394 2395 /* 2396 * Now that all children have been reexecuted, execute the parent. 2397 * We don't reexecute "The Godfather" I/O here as it's the 2398 * responsibility of the caller to wait on it. 2399 */ 2400 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 2401 pio->io_queued_timestamp = gethrtime(); 2402 __zio_execute(pio); 2403 } 2404 } 2405 2406 void 2407 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 2408 { 2409 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 2410 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 2411 "failure and the failure mode property for this pool " 2412 "is set to panic.", spa_name(spa)); 2413 2414 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O " 2415 "failure and has been suspended.\n", spa_name(spa)); 2416 2417 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, 2418 NULL, NULL, 0); 2419 2420 mutex_enter(&spa->spa_suspend_lock); 2421 2422 if (spa->spa_suspend_zio_root == NULL) 2423 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 2424 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2425 ZIO_FLAG_GODFATHER); 2426 2427 spa->spa_suspended = reason; 2428 2429 if (zio != NULL) { 2430 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2431 ASSERT(zio != spa->spa_suspend_zio_root); 2432 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2433 ASSERT(zio_unique_parent(zio) == NULL); 2434 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 2435 zio_add_child(spa->spa_suspend_zio_root, zio); 2436 } 2437 2438 mutex_exit(&spa->spa_suspend_lock); 2439 } 2440 2441 int 2442 zio_resume(spa_t *spa) 2443 { 2444 zio_t *pio; 2445 2446 /* 2447 * Reexecute all previously suspended i/o. 2448 */ 2449 mutex_enter(&spa->spa_suspend_lock); 2450 spa->spa_suspended = ZIO_SUSPEND_NONE; 2451 cv_broadcast(&spa->spa_suspend_cv); 2452 pio = spa->spa_suspend_zio_root; 2453 spa->spa_suspend_zio_root = NULL; 2454 mutex_exit(&spa->spa_suspend_lock); 2455 2456 if (pio == NULL) 2457 return (0); 2458 2459 zio_reexecute(pio); 2460 return (zio_wait(pio)); 2461 } 2462 2463 void 2464 zio_resume_wait(spa_t *spa) 2465 { 2466 mutex_enter(&spa->spa_suspend_lock); 2467 while (spa_suspended(spa)) 2468 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 2469 mutex_exit(&spa->spa_suspend_lock); 2470 } 2471 2472 /* 2473 * ========================================================================== 2474 * Gang blocks. 2475 * 2476 * A gang block is a collection of small blocks that looks to the DMU 2477 * like one large block. When zio_dva_allocate() cannot find a block 2478 * of the requested size, due to either severe fragmentation or the pool 2479 * being nearly full, it calls zio_write_gang_block() to construct the 2480 * block from smaller fragments. 2481 * 2482 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 2483 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 2484 * an indirect block: it's an array of block pointers. It consumes 2485 * only one sector and hence is allocatable regardless of fragmentation. 2486 * The gang header's bps point to its gang members, which hold the data. 2487 * 2488 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 2489 * as the verifier to ensure uniqueness of the SHA256 checksum. 2490 * Critically, the gang block bp's blk_cksum is the checksum of the data, 2491 * not the gang header. This ensures that data block signatures (needed for 2492 * deduplication) are independent of how the block is physically stored. 2493 * 2494 * Gang blocks can be nested: a gang member may itself be a gang block. 2495 * Thus every gang block is a tree in which root and all interior nodes are 2496 * gang headers, and the leaves are normal blocks that contain user data. 2497 * The root of the gang tree is called the gang leader. 2498 * 2499 * To perform any operation (read, rewrite, free, claim) on a gang block, 2500 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 2501 * in the io_gang_tree field of the original logical i/o by recursively 2502 * reading the gang leader and all gang headers below it. This yields 2503 * an in-core tree containing the contents of every gang header and the 2504 * bps for every constituent of the gang block. 2505 * 2506 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 2507 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 2508 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 2509 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 2510 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 2511 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 2512 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 2513 * of the gang header plus zio_checksum_compute() of the data to update the 2514 * gang header's blk_cksum as described above. 2515 * 2516 * The two-phase assemble/issue model solves the problem of partial failure -- 2517 * what if you'd freed part of a gang block but then couldn't read the 2518 * gang header for another part? Assembling the entire gang tree first 2519 * ensures that all the necessary gang header I/O has succeeded before 2520 * starting the actual work of free, claim, or write. Once the gang tree 2521 * is assembled, free and claim are in-memory operations that cannot fail. 2522 * 2523 * In the event that a gang write fails, zio_dva_unallocate() walks the 2524 * gang tree to immediately free (i.e. insert back into the space map) 2525 * everything we've allocated. This ensures that we don't get ENOSPC 2526 * errors during repeated suspend/resume cycles due to a flaky device. 2527 * 2528 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2529 * the gang tree, we won't modify the block, so we can safely defer the free 2530 * (knowing that the block is still intact). If we *can* assemble the gang 2531 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2532 * each constituent bp and we can allocate a new block on the next sync pass. 2533 * 2534 * In all cases, the gang tree allows complete recovery from partial failure. 2535 * ========================================================================== 2536 */ 2537 2538 static void 2539 zio_gang_issue_func_done(zio_t *zio) 2540 { 2541 abd_free(zio->io_abd); 2542 } 2543 2544 static zio_t * 2545 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2546 uint64_t offset) 2547 { 2548 if (gn != NULL) 2549 return (pio); 2550 2551 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2552 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2553 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2554 &pio->io_bookmark)); 2555 } 2556 2557 static zio_t * 2558 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2559 uint64_t offset) 2560 { 2561 zio_t *zio; 2562 2563 if (gn != NULL) { 2564 abd_t *gbh_abd = 2565 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2566 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2567 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2568 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2569 &pio->io_bookmark); 2570 /* 2571 * As we rewrite each gang header, the pipeline will compute 2572 * a new gang block header checksum for it; but no one will 2573 * compute a new data checksum, so we do that here. The one 2574 * exception is the gang leader: the pipeline already computed 2575 * its data checksum because that stage precedes gang assembly. 2576 * (Presently, nothing actually uses interior data checksums; 2577 * this is just good hygiene.) 2578 */ 2579 if (gn != pio->io_gang_leader->io_gang_tree) { 2580 abd_t *buf = abd_get_offset(data, offset); 2581 2582 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2583 buf, BP_GET_PSIZE(bp)); 2584 2585 abd_free(buf); 2586 } 2587 /* 2588 * If we are here to damage data for testing purposes, 2589 * leave the GBH alone so that we can detect the damage. 2590 */ 2591 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2592 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2593 } else { 2594 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2595 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2596 zio_gang_issue_func_done, NULL, pio->io_priority, 2597 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2598 } 2599 2600 return (zio); 2601 } 2602 2603 static zio_t * 2604 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2605 uint64_t offset) 2606 { 2607 (void) gn, (void) data, (void) offset; 2608 2609 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2610 ZIO_GANG_CHILD_FLAGS(pio)); 2611 if (zio == NULL) { 2612 zio = zio_null(pio, pio->io_spa, 2613 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)); 2614 } 2615 return (zio); 2616 } 2617 2618 static zio_t * 2619 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2620 uint64_t offset) 2621 { 2622 (void) gn, (void) data, (void) offset; 2623 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2624 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2625 } 2626 2627 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2628 NULL, 2629 zio_read_gang, 2630 zio_rewrite_gang, 2631 zio_free_gang, 2632 zio_claim_gang, 2633 NULL 2634 }; 2635 2636 static void zio_gang_tree_assemble_done(zio_t *zio); 2637 2638 static zio_gang_node_t * 2639 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2640 { 2641 zio_gang_node_t *gn; 2642 2643 ASSERT(*gnpp == NULL); 2644 2645 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2646 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2647 *gnpp = gn; 2648 2649 return (gn); 2650 } 2651 2652 static void 2653 zio_gang_node_free(zio_gang_node_t **gnpp) 2654 { 2655 zio_gang_node_t *gn = *gnpp; 2656 2657 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2658 ASSERT(gn->gn_child[g] == NULL); 2659 2660 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2661 kmem_free(gn, sizeof (*gn)); 2662 *gnpp = NULL; 2663 } 2664 2665 static void 2666 zio_gang_tree_free(zio_gang_node_t **gnpp) 2667 { 2668 zio_gang_node_t *gn = *gnpp; 2669 2670 if (gn == NULL) 2671 return; 2672 2673 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2674 zio_gang_tree_free(&gn->gn_child[g]); 2675 2676 zio_gang_node_free(gnpp); 2677 } 2678 2679 static void 2680 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2681 { 2682 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2683 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2684 2685 ASSERT(gio->io_gang_leader == gio); 2686 ASSERT(BP_IS_GANG(bp)); 2687 2688 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2689 zio_gang_tree_assemble_done, gn, gio->io_priority, 2690 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2691 } 2692 2693 static void 2694 zio_gang_tree_assemble_done(zio_t *zio) 2695 { 2696 zio_t *gio = zio->io_gang_leader; 2697 zio_gang_node_t *gn = zio->io_private; 2698 blkptr_t *bp = zio->io_bp; 2699 2700 ASSERT(gio == zio_unique_parent(zio)); 2701 ASSERT(list_is_empty(&zio->io_child_list)); 2702 2703 if (zio->io_error) 2704 return; 2705 2706 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2707 if (BP_SHOULD_BYTESWAP(bp)) 2708 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2709 2710 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2711 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2712 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2713 2714 abd_free(zio->io_abd); 2715 2716 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2717 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2718 if (!BP_IS_GANG(gbp)) 2719 continue; 2720 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2721 } 2722 } 2723 2724 static void 2725 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2726 uint64_t offset) 2727 { 2728 zio_t *gio = pio->io_gang_leader; 2729 zio_t *zio; 2730 2731 ASSERT(BP_IS_GANG(bp) == !!gn); 2732 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2733 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2734 2735 /* 2736 * If you're a gang header, your data is in gn->gn_gbh. 2737 * If you're a gang member, your data is in 'data' and gn == NULL. 2738 */ 2739 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2740 2741 if (gn != NULL) { 2742 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2743 2744 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2745 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2746 if (BP_IS_HOLE(gbp)) 2747 continue; 2748 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2749 offset); 2750 offset += BP_GET_PSIZE(gbp); 2751 } 2752 } 2753 2754 if (gn == gio->io_gang_tree) 2755 ASSERT3U(gio->io_size, ==, offset); 2756 2757 if (zio != pio) 2758 zio_nowait(zio); 2759 } 2760 2761 static zio_t * 2762 zio_gang_assemble(zio_t *zio) 2763 { 2764 blkptr_t *bp = zio->io_bp; 2765 2766 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2767 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2768 2769 zio->io_gang_leader = zio; 2770 2771 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2772 2773 return (zio); 2774 } 2775 2776 static zio_t * 2777 zio_gang_issue(zio_t *zio) 2778 { 2779 blkptr_t *bp = zio->io_bp; 2780 2781 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2782 return (NULL); 2783 } 2784 2785 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2786 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2787 2788 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2789 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2790 0); 2791 else 2792 zio_gang_tree_free(&zio->io_gang_tree); 2793 2794 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2795 2796 return (zio); 2797 } 2798 2799 static void 2800 zio_write_gang_member_ready(zio_t *zio) 2801 { 2802 zio_t *pio = zio_unique_parent(zio); 2803 dva_t *cdva = zio->io_bp->blk_dva; 2804 dva_t *pdva = pio->io_bp->blk_dva; 2805 uint64_t asize; 2806 zio_t *gio __maybe_unused = zio->io_gang_leader; 2807 2808 if (BP_IS_HOLE(zio->io_bp)) 2809 return; 2810 2811 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2812 2813 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2814 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2815 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2816 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2817 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2818 2819 mutex_enter(&pio->io_lock); 2820 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2821 ASSERT(DVA_GET_GANG(&pdva[d])); 2822 asize = DVA_GET_ASIZE(&pdva[d]); 2823 asize += DVA_GET_ASIZE(&cdva[d]); 2824 DVA_SET_ASIZE(&pdva[d], asize); 2825 } 2826 mutex_exit(&pio->io_lock); 2827 } 2828 2829 static void 2830 zio_write_gang_done(zio_t *zio) 2831 { 2832 /* 2833 * The io_abd field will be NULL for a zio with no data. The io_flags 2834 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 2835 * check for it here as it is cleared in zio_ready. 2836 */ 2837 if (zio->io_abd != NULL) 2838 abd_free(zio->io_abd); 2839 } 2840 2841 static zio_t * 2842 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc) 2843 { 2844 spa_t *spa = pio->io_spa; 2845 blkptr_t *bp = pio->io_bp; 2846 zio_t *gio = pio->io_gang_leader; 2847 zio_t *zio; 2848 zio_gang_node_t *gn, **gnpp; 2849 zio_gbh_phys_t *gbh; 2850 abd_t *gbh_abd; 2851 uint64_t txg = pio->io_txg; 2852 uint64_t resid = pio->io_size; 2853 uint64_t lsize; 2854 int copies = gio->io_prop.zp_copies; 2855 zio_prop_t zp; 2856 int error; 2857 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2858 2859 /* 2860 * If one copy was requested, store 2 copies of the GBH, so that we 2861 * can still traverse all the data (e.g. to free or scrub) even if a 2862 * block is damaged. Note that we can't store 3 copies of the GBH in 2863 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt. 2864 */ 2865 int gbh_copies = copies; 2866 if (gbh_copies == 1) { 2867 gbh_copies = MIN(2, spa_max_replication(spa)); 2868 } 2869 2870 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 2871 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2872 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2873 ASSERT(has_data); 2874 2875 flags |= METASLAB_ASYNC_ALLOC; 2876 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator]. 2877 mca_alloc_slots, pio)); 2878 2879 /* 2880 * The logical zio has already placed a reservation for 2881 * 'copies' allocation slots but gang blocks may require 2882 * additional copies. These additional copies 2883 * (i.e. gbh_copies - copies) are guaranteed to succeed 2884 * since metaslab_class_throttle_reserve() always allows 2885 * additional reservations for gang blocks. 2886 */ 2887 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2888 pio->io_allocator, pio, flags)); 2889 } 2890 2891 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 2892 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2893 &pio->io_alloc_list, pio, pio->io_allocator); 2894 if (error) { 2895 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2896 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2897 ASSERT(has_data); 2898 2899 /* 2900 * If we failed to allocate the gang block header then 2901 * we remove any additional allocation reservations that 2902 * we placed here. The original reservation will 2903 * be removed when the logical I/O goes to the ready 2904 * stage. 2905 */ 2906 metaslab_class_throttle_unreserve(mc, 2907 gbh_copies - copies, pio->io_allocator, pio); 2908 } 2909 2910 pio->io_error = error; 2911 return (pio); 2912 } 2913 2914 if (pio == gio) { 2915 gnpp = &gio->io_gang_tree; 2916 } else { 2917 gnpp = pio->io_private; 2918 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2919 } 2920 2921 gn = zio_gang_node_alloc(gnpp); 2922 gbh = gn->gn_gbh; 2923 memset(gbh, 0, SPA_GANGBLOCKSIZE); 2924 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2925 2926 /* 2927 * Create the gang header. 2928 */ 2929 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2930 zio_write_gang_done, NULL, pio->io_priority, 2931 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2932 2933 /* 2934 * Create and nowait the gang children. 2935 */ 2936 for (int g = 0; resid != 0; resid -= lsize, g++) { 2937 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2938 SPA_MINBLOCKSIZE); 2939 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2940 2941 zp.zp_checksum = gio->io_prop.zp_checksum; 2942 zp.zp_compress = ZIO_COMPRESS_OFF; 2943 zp.zp_complevel = gio->io_prop.zp_complevel; 2944 zp.zp_type = DMU_OT_NONE; 2945 zp.zp_level = 0; 2946 zp.zp_copies = gio->io_prop.zp_copies; 2947 zp.zp_dedup = B_FALSE; 2948 zp.zp_dedup_verify = B_FALSE; 2949 zp.zp_nopwrite = B_FALSE; 2950 zp.zp_encrypt = gio->io_prop.zp_encrypt; 2951 zp.zp_byteorder = gio->io_prop.zp_byteorder; 2952 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN); 2953 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN); 2954 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN); 2955 2956 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2957 has_data ? abd_get_offset(pio->io_abd, pio->io_size - 2958 resid) : NULL, lsize, lsize, &zp, 2959 zio_write_gang_member_ready, NULL, 2960 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 2961 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2962 2963 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2964 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2965 ASSERT(has_data); 2966 2967 /* 2968 * Gang children won't throttle but we should 2969 * account for their work, so reserve an allocation 2970 * slot for them here. 2971 */ 2972 VERIFY(metaslab_class_throttle_reserve(mc, 2973 zp.zp_copies, cio->io_allocator, cio, flags)); 2974 } 2975 zio_nowait(cio); 2976 } 2977 2978 /* 2979 * Set pio's pipeline to just wait for zio to finish. 2980 */ 2981 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2982 2983 /* 2984 * We didn't allocate this bp, so make sure it doesn't get unmarked. 2985 */ 2986 pio->io_flags &= ~ZIO_FLAG_FASTWRITE; 2987 2988 zio_nowait(zio); 2989 2990 return (pio); 2991 } 2992 2993 /* 2994 * The zio_nop_write stage in the pipeline determines if allocating a 2995 * new bp is necessary. The nopwrite feature can handle writes in 2996 * either syncing or open context (i.e. zil writes) and as a result is 2997 * mutually exclusive with dedup. 2998 * 2999 * By leveraging a cryptographically secure checksum, such as SHA256, we 3000 * can compare the checksums of the new data and the old to determine if 3001 * allocating a new block is required. Note that our requirements for 3002 * cryptographic strength are fairly weak: there can't be any accidental 3003 * hash collisions, but we don't need to be secure against intentional 3004 * (malicious) collisions. To trigger a nopwrite, you have to be able 3005 * to write the file to begin with, and triggering an incorrect (hash 3006 * collision) nopwrite is no worse than simply writing to the file. 3007 * That said, there are no known attacks against the checksum algorithms 3008 * used for nopwrite, assuming that the salt and the checksums 3009 * themselves remain secret. 3010 */ 3011 static zio_t * 3012 zio_nop_write(zio_t *zio) 3013 { 3014 blkptr_t *bp = zio->io_bp; 3015 blkptr_t *bp_orig = &zio->io_bp_orig; 3016 zio_prop_t *zp = &zio->io_prop; 3017 3018 ASSERT(BP_IS_HOLE(bp)); 3019 ASSERT(BP_GET_LEVEL(bp) == 0); 3020 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 3021 ASSERT(zp->zp_nopwrite); 3022 ASSERT(!zp->zp_dedup); 3023 ASSERT(zio->io_bp_override == NULL); 3024 ASSERT(IO_IS_ALLOCATING(zio)); 3025 3026 /* 3027 * Check to see if the original bp and the new bp have matching 3028 * characteristics (i.e. same checksum, compression algorithms, etc). 3029 * If they don't then just continue with the pipeline which will 3030 * allocate a new bp. 3031 */ 3032 if (BP_IS_HOLE(bp_orig) || 3033 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 3034 ZCHECKSUM_FLAG_NOPWRITE) || 3035 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || 3036 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 3037 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 3038 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 3039 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 3040 return (zio); 3041 3042 /* 3043 * If the checksums match then reset the pipeline so that we 3044 * avoid allocating a new bp and issuing any I/O. 3045 */ 3046 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 3047 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 3048 ZCHECKSUM_FLAG_NOPWRITE); 3049 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 3050 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 3051 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 3052 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop); 3053 3054 /* 3055 * If we're overwriting a block that is currently on an 3056 * indirect vdev, then ignore the nopwrite request and 3057 * allow a new block to be allocated on a concrete vdev. 3058 */ 3059 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER); 3060 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) { 3061 vdev_t *tvd = vdev_lookup_top(zio->io_spa, 3062 DVA_GET_VDEV(&bp_orig->blk_dva[d])); 3063 if (tvd->vdev_ops == &vdev_indirect_ops) { 3064 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3065 return (zio); 3066 } 3067 } 3068 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3069 3070 *bp = *bp_orig; 3071 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3072 zio->io_flags |= ZIO_FLAG_NOPWRITE; 3073 } 3074 3075 return (zio); 3076 } 3077 3078 /* 3079 * ========================================================================== 3080 * Block Reference Table 3081 * ========================================================================== 3082 */ 3083 static zio_t * 3084 zio_brt_free(zio_t *zio) 3085 { 3086 blkptr_t *bp; 3087 3088 bp = zio->io_bp; 3089 3090 if (BP_GET_LEVEL(bp) > 0 || 3091 BP_IS_METADATA(bp) || 3092 !brt_maybe_exists(zio->io_spa, bp)) { 3093 return (zio); 3094 } 3095 3096 if (!brt_entry_decref(zio->io_spa, bp)) { 3097 /* 3098 * This isn't the last reference, so we cannot free 3099 * the data yet. 3100 */ 3101 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3102 } 3103 3104 return (zio); 3105 } 3106 3107 /* 3108 * ========================================================================== 3109 * Dedup 3110 * ========================================================================== 3111 */ 3112 static void 3113 zio_ddt_child_read_done(zio_t *zio) 3114 { 3115 blkptr_t *bp = zio->io_bp; 3116 ddt_entry_t *dde = zio->io_private; 3117 ddt_phys_t *ddp; 3118 zio_t *pio = zio_unique_parent(zio); 3119 3120 mutex_enter(&pio->io_lock); 3121 ddp = ddt_phys_select(dde, bp); 3122 if (zio->io_error == 0) 3123 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 3124 3125 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 3126 dde->dde_repair_abd = zio->io_abd; 3127 else 3128 abd_free(zio->io_abd); 3129 mutex_exit(&pio->io_lock); 3130 } 3131 3132 static zio_t * 3133 zio_ddt_read_start(zio_t *zio) 3134 { 3135 blkptr_t *bp = zio->io_bp; 3136 3137 ASSERT(BP_GET_DEDUP(bp)); 3138 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3139 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3140 3141 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3142 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3143 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 3144 ddt_phys_t *ddp = dde->dde_phys; 3145 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 3146 blkptr_t blk; 3147 3148 ASSERT(zio->io_vsd == NULL); 3149 zio->io_vsd = dde; 3150 3151 if (ddp_self == NULL) 3152 return (zio); 3153 3154 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 3155 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 3156 continue; 3157 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 3158 &blk); 3159 zio_nowait(zio_read(zio, zio->io_spa, &blk, 3160 abd_alloc_for_io(zio->io_size, B_TRUE), 3161 zio->io_size, zio_ddt_child_read_done, dde, 3162 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 3163 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 3164 } 3165 return (zio); 3166 } 3167 3168 zio_nowait(zio_read(zio, zio->io_spa, bp, 3169 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 3170 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 3171 3172 return (zio); 3173 } 3174 3175 static zio_t * 3176 zio_ddt_read_done(zio_t *zio) 3177 { 3178 blkptr_t *bp = zio->io_bp; 3179 3180 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 3181 return (NULL); 3182 } 3183 3184 ASSERT(BP_GET_DEDUP(bp)); 3185 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3186 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3187 3188 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3189 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3190 ddt_entry_t *dde = zio->io_vsd; 3191 if (ddt == NULL) { 3192 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 3193 return (zio); 3194 } 3195 if (dde == NULL) { 3196 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 3197 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 3198 return (NULL); 3199 } 3200 if (dde->dde_repair_abd != NULL) { 3201 abd_copy(zio->io_abd, dde->dde_repair_abd, 3202 zio->io_size); 3203 zio->io_child_error[ZIO_CHILD_DDT] = 0; 3204 } 3205 ddt_repair_done(ddt, dde); 3206 zio->io_vsd = NULL; 3207 } 3208 3209 ASSERT(zio->io_vsd == NULL); 3210 3211 return (zio); 3212 } 3213 3214 static boolean_t 3215 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 3216 { 3217 spa_t *spa = zio->io_spa; 3218 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); 3219 3220 ASSERT(!(zio->io_bp_override && do_raw)); 3221 3222 /* 3223 * Note: we compare the original data, not the transformed data, 3224 * because when zio->io_bp is an override bp, we will not have 3225 * pushed the I/O transforms. That's an important optimization 3226 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 3227 * However, we should never get a raw, override zio so in these 3228 * cases we can compare the io_abd directly. This is useful because 3229 * it allows us to do dedup verification even if we don't have access 3230 * to the original data (for instance, if the encryption keys aren't 3231 * loaded). 3232 */ 3233 3234 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 3235 zio_t *lio = dde->dde_lead_zio[p]; 3236 3237 if (lio != NULL && do_raw) { 3238 return (lio->io_size != zio->io_size || 3239 abd_cmp(zio->io_abd, lio->io_abd) != 0); 3240 } else if (lio != NULL) { 3241 return (lio->io_orig_size != zio->io_orig_size || 3242 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0); 3243 } 3244 } 3245 3246 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 3247 ddt_phys_t *ddp = &dde->dde_phys[p]; 3248 3249 if (ddp->ddp_phys_birth != 0 && do_raw) { 3250 blkptr_t blk = *zio->io_bp; 3251 uint64_t psize; 3252 abd_t *tmpabd; 3253 int error; 3254 3255 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 3256 psize = BP_GET_PSIZE(&blk); 3257 3258 if (psize != zio->io_size) 3259 return (B_TRUE); 3260 3261 ddt_exit(ddt); 3262 3263 tmpabd = abd_alloc_for_io(psize, B_TRUE); 3264 3265 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, 3266 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, 3267 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3268 ZIO_FLAG_RAW, &zio->io_bookmark)); 3269 3270 if (error == 0) { 3271 if (abd_cmp(tmpabd, zio->io_abd) != 0) 3272 error = SET_ERROR(ENOENT); 3273 } 3274 3275 abd_free(tmpabd); 3276 ddt_enter(ddt); 3277 return (error != 0); 3278 } else if (ddp->ddp_phys_birth != 0) { 3279 arc_buf_t *abuf = NULL; 3280 arc_flags_t aflags = ARC_FLAG_WAIT; 3281 blkptr_t blk = *zio->io_bp; 3282 int error; 3283 3284 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 3285 3286 if (BP_GET_LSIZE(&blk) != zio->io_orig_size) 3287 return (B_TRUE); 3288 3289 ddt_exit(ddt); 3290 3291 error = arc_read(NULL, spa, &blk, 3292 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 3293 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3294 &aflags, &zio->io_bookmark); 3295 3296 if (error == 0) { 3297 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 3298 zio->io_orig_size) != 0) 3299 error = SET_ERROR(ENOENT); 3300 arc_buf_destroy(abuf, &abuf); 3301 } 3302 3303 ddt_enter(ddt); 3304 return (error != 0); 3305 } 3306 } 3307 3308 return (B_FALSE); 3309 } 3310 3311 static void 3312 zio_ddt_child_write_ready(zio_t *zio) 3313 { 3314 int p = zio->io_prop.zp_copies; 3315 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3316 ddt_entry_t *dde = zio->io_private; 3317 ddt_phys_t *ddp = &dde->dde_phys[p]; 3318 zio_t *pio; 3319 3320 if (zio->io_error) 3321 return; 3322 3323 ddt_enter(ddt); 3324 3325 ASSERT(dde->dde_lead_zio[p] == zio); 3326 3327 ddt_phys_fill(ddp, zio->io_bp); 3328 3329 zio_link_t *zl = NULL; 3330 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 3331 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 3332 3333 ddt_exit(ddt); 3334 } 3335 3336 static void 3337 zio_ddt_child_write_done(zio_t *zio) 3338 { 3339 int p = zio->io_prop.zp_copies; 3340 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3341 ddt_entry_t *dde = zio->io_private; 3342 ddt_phys_t *ddp = &dde->dde_phys[p]; 3343 3344 ddt_enter(ddt); 3345 3346 ASSERT(ddp->ddp_refcnt == 0); 3347 ASSERT(dde->dde_lead_zio[p] == zio); 3348 dde->dde_lead_zio[p] = NULL; 3349 3350 if (zio->io_error == 0) { 3351 zio_link_t *zl = NULL; 3352 while (zio_walk_parents(zio, &zl) != NULL) 3353 ddt_phys_addref(ddp); 3354 } else { 3355 ddt_phys_clear(ddp); 3356 } 3357 3358 ddt_exit(ddt); 3359 } 3360 3361 static zio_t * 3362 zio_ddt_write(zio_t *zio) 3363 { 3364 spa_t *spa = zio->io_spa; 3365 blkptr_t *bp = zio->io_bp; 3366 uint64_t txg = zio->io_txg; 3367 zio_prop_t *zp = &zio->io_prop; 3368 int p = zp->zp_copies; 3369 zio_t *cio = NULL; 3370 ddt_t *ddt = ddt_select(spa, bp); 3371 ddt_entry_t *dde; 3372 ddt_phys_t *ddp; 3373 3374 ASSERT(BP_GET_DEDUP(bp)); 3375 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 3376 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 3377 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 3378 3379 ddt_enter(ddt); 3380 dde = ddt_lookup(ddt, bp, B_TRUE); 3381 ddp = &dde->dde_phys[p]; 3382 3383 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 3384 /* 3385 * If we're using a weak checksum, upgrade to a strong checksum 3386 * and try again. If we're already using a strong checksum, 3387 * we can't resolve it, so just convert to an ordinary write. 3388 * (And automatically e-mail a paper to Nature?) 3389 */ 3390 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 3391 ZCHECKSUM_FLAG_DEDUP)) { 3392 zp->zp_checksum = spa_dedup_checksum(spa); 3393 zio_pop_transforms(zio); 3394 zio->io_stage = ZIO_STAGE_OPEN; 3395 BP_ZERO(bp); 3396 } else { 3397 zp->zp_dedup = B_FALSE; 3398 BP_SET_DEDUP(bp, B_FALSE); 3399 } 3400 ASSERT(!BP_GET_DEDUP(bp)); 3401 zio->io_pipeline = ZIO_WRITE_PIPELINE; 3402 ddt_exit(ddt); 3403 return (zio); 3404 } 3405 3406 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 3407 if (ddp->ddp_phys_birth != 0) 3408 ddt_bp_fill(ddp, bp, txg); 3409 if (dde->dde_lead_zio[p] != NULL) 3410 zio_add_child(zio, dde->dde_lead_zio[p]); 3411 else 3412 ddt_phys_addref(ddp); 3413 } else if (zio->io_bp_override) { 3414 ASSERT(bp->blk_birth == txg); 3415 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 3416 ddt_phys_fill(ddp, bp); 3417 ddt_phys_addref(ddp); 3418 } else { 3419 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 3420 zio->io_orig_size, zio->io_orig_size, zp, 3421 zio_ddt_child_write_ready, NULL, 3422 zio_ddt_child_write_done, dde, zio->io_priority, 3423 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 3424 3425 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 3426 dde->dde_lead_zio[p] = cio; 3427 } 3428 3429 ddt_exit(ddt); 3430 3431 zio_nowait(cio); 3432 3433 return (zio); 3434 } 3435 3436 static ddt_entry_t *freedde; /* for debugging */ 3437 3438 static zio_t * 3439 zio_ddt_free(zio_t *zio) 3440 { 3441 spa_t *spa = zio->io_spa; 3442 blkptr_t *bp = zio->io_bp; 3443 ddt_t *ddt = ddt_select(spa, bp); 3444 ddt_entry_t *dde; 3445 ddt_phys_t *ddp; 3446 3447 ASSERT(BP_GET_DEDUP(bp)); 3448 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3449 3450 ddt_enter(ddt); 3451 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 3452 if (dde) { 3453 ddp = ddt_phys_select(dde, bp); 3454 if (ddp) 3455 ddt_phys_decref(ddp); 3456 } 3457 ddt_exit(ddt); 3458 3459 return (zio); 3460 } 3461 3462 /* 3463 * ========================================================================== 3464 * Allocate and free blocks 3465 * ========================================================================== 3466 */ 3467 3468 static zio_t * 3469 zio_io_to_allocate(spa_t *spa, int allocator) 3470 { 3471 zio_t *zio; 3472 3473 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock)); 3474 3475 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree); 3476 if (zio == NULL) 3477 return (NULL); 3478 3479 ASSERT(IO_IS_ALLOCATING(zio)); 3480 3481 /* 3482 * Try to place a reservation for this zio. If we're unable to 3483 * reserve then we throttle. 3484 */ 3485 ASSERT3U(zio->io_allocator, ==, allocator); 3486 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 3487 zio->io_prop.zp_copies, allocator, zio, 0)) { 3488 return (NULL); 3489 } 3490 3491 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio); 3492 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 3493 3494 return (zio); 3495 } 3496 3497 static zio_t * 3498 zio_dva_throttle(zio_t *zio) 3499 { 3500 spa_t *spa = zio->io_spa; 3501 zio_t *nio; 3502 metaslab_class_t *mc; 3503 3504 /* locate an appropriate allocation class */ 3505 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, 3506 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); 3507 3508 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 3509 !mc->mc_alloc_throttle_enabled || 3510 zio->io_child_type == ZIO_CHILD_GANG || 3511 zio->io_flags & ZIO_FLAG_NODATA) { 3512 return (zio); 3513 } 3514 3515 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3516 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3517 ASSERT3U(zio->io_queued_timestamp, >, 0); 3518 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 3519 3520 zbookmark_phys_t *bm = &zio->io_bookmark; 3521 /* 3522 * We want to try to use as many allocators as possible to help improve 3523 * performance, but we also want logically adjacent IOs to be physically 3524 * adjacent to improve sequential read performance. We chunk each object 3525 * into 2^20 block regions, and then hash based on the objset, object, 3526 * level, and region to accomplish both of these goals. 3527 */ 3528 int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object, 3529 bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 3530 zio->io_allocator = allocator; 3531 zio->io_metaslab_class = mc; 3532 mutex_enter(&spa->spa_allocs[allocator].spaa_lock); 3533 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio); 3534 nio = zio_io_to_allocate(spa, allocator); 3535 mutex_exit(&spa->spa_allocs[allocator].spaa_lock); 3536 return (nio); 3537 } 3538 3539 static void 3540 zio_allocate_dispatch(spa_t *spa, int allocator) 3541 { 3542 zio_t *zio; 3543 3544 mutex_enter(&spa->spa_allocs[allocator].spaa_lock); 3545 zio = zio_io_to_allocate(spa, allocator); 3546 mutex_exit(&spa->spa_allocs[allocator].spaa_lock); 3547 if (zio == NULL) 3548 return; 3549 3550 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 3551 ASSERT0(zio->io_error); 3552 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 3553 } 3554 3555 static zio_t * 3556 zio_dva_allocate(zio_t *zio) 3557 { 3558 spa_t *spa = zio->io_spa; 3559 metaslab_class_t *mc; 3560 blkptr_t *bp = zio->io_bp; 3561 int error; 3562 int flags = 0; 3563 3564 if (zio->io_gang_leader == NULL) { 3565 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3566 zio->io_gang_leader = zio; 3567 } 3568 3569 ASSERT(BP_IS_HOLE(bp)); 3570 ASSERT0(BP_GET_NDVAS(bp)); 3571 ASSERT3U(zio->io_prop.zp_copies, >, 0); 3572 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 3573 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 3574 3575 flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0; 3576 if (zio->io_flags & ZIO_FLAG_NODATA) 3577 flags |= METASLAB_DONT_THROTTLE; 3578 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 3579 flags |= METASLAB_GANG_CHILD; 3580 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 3581 flags |= METASLAB_ASYNC_ALLOC; 3582 3583 /* 3584 * if not already chosen, locate an appropriate allocation class 3585 */ 3586 mc = zio->io_metaslab_class; 3587 if (mc == NULL) { 3588 mc = spa_preferred_class(spa, zio->io_size, 3589 zio->io_prop.zp_type, zio->io_prop.zp_level, 3590 zio->io_prop.zp_zpl_smallblk); 3591 zio->io_metaslab_class = mc; 3592 } 3593 3594 /* 3595 * Try allocating the block in the usual metaslab class. 3596 * If that's full, allocate it in the normal class. 3597 * If that's full, allocate as a gang block, 3598 * and if all are full, the allocation fails (which shouldn't happen). 3599 * 3600 * Note that we do not fall back on embedded slog (ZIL) space, to 3601 * preserve unfragmented slog space, which is critical for decent 3602 * sync write performance. If a log allocation fails, we will fall 3603 * back to spa_sync() which is abysmal for performance. 3604 */ 3605 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3606 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3607 &zio->io_alloc_list, zio, zio->io_allocator); 3608 3609 /* 3610 * Fallback to normal class when an alloc class is full 3611 */ 3612 if (error == ENOSPC && mc != spa_normal_class(spa)) { 3613 /* 3614 * If throttling, transfer reservation over to normal class. 3615 * The io_allocator slot can remain the same even though we 3616 * are switching classes. 3617 */ 3618 if (mc->mc_alloc_throttle_enabled && 3619 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 3620 metaslab_class_throttle_unreserve(mc, 3621 zio->io_prop.zp_copies, zio->io_allocator, zio); 3622 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 3623 3624 VERIFY(metaslab_class_throttle_reserve( 3625 spa_normal_class(spa), 3626 zio->io_prop.zp_copies, zio->io_allocator, zio, 3627 flags | METASLAB_MUST_RESERVE)); 3628 } 3629 zio->io_metaslab_class = mc = spa_normal_class(spa); 3630 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 3631 zfs_dbgmsg("%s: metaslab allocation failure, " 3632 "trying normal class: zio %px, size %llu, error %d", 3633 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3634 error); 3635 } 3636 3637 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3638 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3639 &zio->io_alloc_list, zio, zio->io_allocator); 3640 } 3641 3642 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) { 3643 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 3644 zfs_dbgmsg("%s: metaslab allocation failure, " 3645 "trying ganging: zio %px, size %llu, error %d", 3646 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3647 error); 3648 } 3649 return (zio_write_gang_block(zio, mc)); 3650 } 3651 if (error != 0) { 3652 if (error != ENOSPC || 3653 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) { 3654 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, " 3655 "size %llu, error %d", 3656 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3657 error); 3658 } 3659 zio->io_error = error; 3660 } 3661 3662 return (zio); 3663 } 3664 3665 static zio_t * 3666 zio_dva_free(zio_t *zio) 3667 { 3668 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3669 3670 return (zio); 3671 } 3672 3673 static zio_t * 3674 zio_dva_claim(zio_t *zio) 3675 { 3676 int error; 3677 3678 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3679 if (error) 3680 zio->io_error = error; 3681 3682 return (zio); 3683 } 3684 3685 /* 3686 * Undo an allocation. This is used by zio_done() when an I/O fails 3687 * and we want to give back the block we just allocated. 3688 * This handles both normal blocks and gang blocks. 3689 */ 3690 static void 3691 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3692 { 3693 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 3694 ASSERT(zio->io_bp_override == NULL); 3695 3696 if (!BP_IS_HOLE(bp)) 3697 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 3698 3699 if (gn != NULL) { 3700 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3701 zio_dva_unallocate(zio, gn->gn_child[g], 3702 &gn->gn_gbh->zg_blkptr[g]); 3703 } 3704 } 3705 } 3706 3707 /* 3708 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3709 */ 3710 int 3711 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, 3712 uint64_t size, boolean_t *slog) 3713 { 3714 int error = 1; 3715 zio_alloc_list_t io_alloc_list; 3716 3717 ASSERT(txg > spa_syncing_txg(spa)); 3718 3719 metaslab_trace_init(&io_alloc_list); 3720 3721 /* 3722 * Block pointer fields are useful to metaslabs for stats and debugging. 3723 * Fill in the obvious ones before calling into metaslab_alloc(). 3724 */ 3725 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3726 BP_SET_PSIZE(new_bp, size); 3727 BP_SET_LEVEL(new_bp, 0); 3728 3729 /* 3730 * When allocating a zil block, we don't have information about 3731 * the final destination of the block except the objset it's part 3732 * of, so we just hash the objset ID to pick the allocator to get 3733 * some parallelism. 3734 */ 3735 int flags = METASLAB_FASTWRITE | METASLAB_ZIL; 3736 int allocator = (uint_t)cityhash4(0, 0, 0, 3737 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count; 3738 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3739 txg, NULL, flags, &io_alloc_list, NULL, allocator); 3740 *slog = (error == 0); 3741 if (error != 0) { 3742 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size, 3743 new_bp, 1, txg, NULL, flags, 3744 &io_alloc_list, NULL, allocator); 3745 } 3746 if (error != 0) { 3747 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3748 new_bp, 1, txg, NULL, flags, 3749 &io_alloc_list, NULL, allocator); 3750 } 3751 metaslab_trace_fini(&io_alloc_list); 3752 3753 if (error == 0) { 3754 BP_SET_LSIZE(new_bp, size); 3755 BP_SET_PSIZE(new_bp, size); 3756 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3757 BP_SET_CHECKSUM(new_bp, 3758 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3759 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3760 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3761 BP_SET_LEVEL(new_bp, 0); 3762 BP_SET_DEDUP(new_bp, 0); 3763 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3764 3765 /* 3766 * encrypted blocks will require an IV and salt. We generate 3767 * these now since we will not be rewriting the bp at 3768 * rewrite time. 3769 */ 3770 if (os->os_encrypted) { 3771 uint8_t iv[ZIO_DATA_IV_LEN]; 3772 uint8_t salt[ZIO_DATA_SALT_LEN]; 3773 3774 BP_SET_CRYPT(new_bp, B_TRUE); 3775 VERIFY0(spa_crypt_get_salt(spa, 3776 dmu_objset_id(os), salt)); 3777 VERIFY0(zio_crypt_generate_iv(iv)); 3778 3779 zio_crypt_encode_params_bp(new_bp, salt, iv); 3780 } 3781 } else { 3782 zfs_dbgmsg("%s: zil block allocation failure: " 3783 "size %llu, error %d", spa_name(spa), (u_longlong_t)size, 3784 error); 3785 } 3786 3787 return (error); 3788 } 3789 3790 /* 3791 * ========================================================================== 3792 * Read and write to physical devices 3793 * ========================================================================== 3794 */ 3795 3796 /* 3797 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3798 * stops after this stage and will resume upon I/O completion. 3799 * However, there are instances where the vdev layer may need to 3800 * continue the pipeline when an I/O was not issued. Since the I/O 3801 * that was sent to the vdev layer might be different than the one 3802 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3803 * force the underlying vdev layers to call either zio_execute() or 3804 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3805 */ 3806 static zio_t * 3807 zio_vdev_io_start(zio_t *zio) 3808 { 3809 vdev_t *vd = zio->io_vd; 3810 uint64_t align; 3811 spa_t *spa = zio->io_spa; 3812 3813 zio->io_delay = 0; 3814 3815 ASSERT(zio->io_error == 0); 3816 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3817 3818 if (vd == NULL) { 3819 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3820 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3821 3822 /* 3823 * The mirror_ops handle multiple DVAs in a single BP. 3824 */ 3825 vdev_mirror_ops.vdev_op_io_start(zio); 3826 return (NULL); 3827 } 3828 3829 ASSERT3P(zio->io_logical, !=, zio); 3830 if (zio->io_type == ZIO_TYPE_WRITE) { 3831 ASSERT(spa->spa_trust_config); 3832 3833 /* 3834 * Note: the code can handle other kinds of writes, 3835 * but we don't expect them. 3836 */ 3837 if (zio->io_vd->vdev_noalloc) { 3838 ASSERT(zio->io_flags & 3839 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 3840 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 3841 } 3842 } 3843 3844 align = 1ULL << vd->vdev_top->vdev_ashift; 3845 3846 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 3847 P2PHASE(zio->io_size, align) != 0) { 3848 /* Transform logical writes to be a full physical block size. */ 3849 uint64_t asize = P2ROUNDUP(zio->io_size, align); 3850 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3851 ASSERT(vd == vd->vdev_top); 3852 if (zio->io_type == ZIO_TYPE_WRITE) { 3853 abd_copy(abuf, zio->io_abd, zio->io_size); 3854 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3855 } 3856 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3857 } 3858 3859 /* 3860 * If this is not a physical io, make sure that it is properly aligned 3861 * before proceeding. 3862 */ 3863 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 3864 ASSERT0(P2PHASE(zio->io_offset, align)); 3865 ASSERT0(P2PHASE(zio->io_size, align)); 3866 } else { 3867 /* 3868 * For physical writes, we allow 512b aligned writes and assume 3869 * the device will perform a read-modify-write as necessary. 3870 */ 3871 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 3872 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 3873 } 3874 3875 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 3876 3877 /* 3878 * If this is a repair I/O, and there's no self-healing involved -- 3879 * that is, we're just resilvering what we expect to resilver -- 3880 * then don't do the I/O unless zio's txg is actually in vd's DTL. 3881 * This prevents spurious resilvering. 3882 * 3883 * There are a few ways that we can end up creating these spurious 3884 * resilver i/os: 3885 * 3886 * 1. A resilver i/o will be issued if any DVA in the BP has a 3887 * dirty DTL. The mirror code will issue resilver writes to 3888 * each DVA, including the one(s) that are not on vdevs with dirty 3889 * DTLs. 3890 * 3891 * 2. With nested replication, which happens when we have a 3892 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 3893 * For example, given mirror(replacing(A+B), C), it's likely that 3894 * only A is out of date (it's the new device). In this case, we'll 3895 * read from C, then use the data to resilver A+B -- but we don't 3896 * actually want to resilver B, just A. The top-level mirror has no 3897 * way to know this, so instead we just discard unnecessary repairs 3898 * as we work our way down the vdev tree. 3899 * 3900 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 3901 * The same logic applies to any form of nested replication: ditto 3902 * + mirror, RAID-Z + replacing, etc. 3903 * 3904 * However, indirect vdevs point off to other vdevs which may have 3905 * DTL's, so we never bypass them. The child i/os on concrete vdevs 3906 * will be properly bypassed instead. 3907 * 3908 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from 3909 * a dRAID spare vdev. For example, when a dRAID spare is first 3910 * used, its spare blocks need to be written to but the leaf vdev's 3911 * of such blocks can have empty DTL_PARTIAL. 3912 * 3913 * There seemed no clean way to allow such writes while bypassing 3914 * spurious ones. At this point, just avoid all bypassing for dRAID 3915 * for correctness. 3916 */ 3917 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 3918 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 3919 zio->io_txg != 0 && /* not a delegated i/o */ 3920 vd->vdev_ops != &vdev_indirect_ops && 3921 vd->vdev_top->vdev_ops != &vdev_draid_ops && 3922 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 3923 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3924 zio_vdev_io_bypass(zio); 3925 return (zio); 3926 } 3927 3928 /* 3929 * Select the next best leaf I/O to process. Distributed spares are 3930 * excluded since they dispatch the I/O directly to a leaf vdev after 3931 * applying the dRAID mapping. 3932 */ 3933 if (vd->vdev_ops->vdev_op_leaf && 3934 vd->vdev_ops != &vdev_draid_spare_ops && 3935 (zio->io_type == ZIO_TYPE_READ || 3936 zio->io_type == ZIO_TYPE_WRITE || 3937 zio->io_type == ZIO_TYPE_TRIM)) { 3938 3939 if ((zio = vdev_queue_io(zio)) == NULL) 3940 return (NULL); 3941 3942 if (!vdev_accessible(vd, zio)) { 3943 zio->io_error = SET_ERROR(ENXIO); 3944 zio_interrupt(zio); 3945 return (NULL); 3946 } 3947 zio->io_delay = gethrtime(); 3948 } 3949 3950 vd->vdev_ops->vdev_op_io_start(zio); 3951 return (NULL); 3952 } 3953 3954 static zio_t * 3955 zio_vdev_io_done(zio_t *zio) 3956 { 3957 vdev_t *vd = zio->io_vd; 3958 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3959 boolean_t unexpected_error = B_FALSE; 3960 3961 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3962 return (NULL); 3963 } 3964 3965 ASSERT(zio->io_type == ZIO_TYPE_READ || 3966 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM); 3967 3968 if (zio->io_delay) 3969 zio->io_delay = gethrtime() - zio->io_delay; 3970 3971 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3972 vd->vdev_ops != &vdev_draid_spare_ops) { 3973 vdev_queue_io_done(zio); 3974 3975 if (zio_injection_enabled && zio->io_error == 0) 3976 zio->io_error = zio_handle_device_injections(vd, zio, 3977 EIO, EILSEQ); 3978 3979 if (zio_injection_enabled && zio->io_error == 0) 3980 zio->io_error = zio_handle_label_injection(zio, EIO); 3981 3982 if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) { 3983 if (!vdev_accessible(vd, zio)) { 3984 zio->io_error = SET_ERROR(ENXIO); 3985 } else { 3986 unexpected_error = B_TRUE; 3987 } 3988 } 3989 } 3990 3991 ops->vdev_op_io_done(zio); 3992 3993 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE) 3994 VERIFY(vdev_probe(vd, zio) == NULL); 3995 3996 return (zio); 3997 } 3998 3999 /* 4000 * This function is used to change the priority of an existing zio that is 4001 * currently in-flight. This is used by the arc to upgrade priority in the 4002 * event that a demand read is made for a block that is currently queued 4003 * as a scrub or async read IO. Otherwise, the high priority read request 4004 * would end up having to wait for the lower priority IO. 4005 */ 4006 void 4007 zio_change_priority(zio_t *pio, zio_priority_t priority) 4008 { 4009 zio_t *cio, *cio_next; 4010 zio_link_t *zl = NULL; 4011 4012 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 4013 4014 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 4015 vdev_queue_change_io_priority(pio, priority); 4016 } else { 4017 pio->io_priority = priority; 4018 } 4019 4020 mutex_enter(&pio->io_lock); 4021 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 4022 cio_next = zio_walk_children(pio, &zl); 4023 zio_change_priority(cio, priority); 4024 } 4025 mutex_exit(&pio->io_lock); 4026 } 4027 4028 /* 4029 * For non-raidz ZIOs, we can just copy aside the bad data read from the 4030 * disk, and use that to finish the checksum ereport later. 4031 */ 4032 static void 4033 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 4034 const abd_t *good_buf) 4035 { 4036 /* no processing needed */ 4037 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 4038 } 4039 4040 void 4041 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr) 4042 { 4043 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); 4044 4045 abd_copy(abd, zio->io_abd, zio->io_size); 4046 4047 zcr->zcr_cbinfo = zio->io_size; 4048 zcr->zcr_cbdata = abd; 4049 zcr->zcr_finish = zio_vsd_default_cksum_finish; 4050 zcr->zcr_free = zio_abd_free; 4051 } 4052 4053 static zio_t * 4054 zio_vdev_io_assess(zio_t *zio) 4055 { 4056 vdev_t *vd = zio->io_vd; 4057 4058 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 4059 return (NULL); 4060 } 4061 4062 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 4063 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 4064 4065 if (zio->io_vsd != NULL) { 4066 zio->io_vsd_ops->vsd_free(zio); 4067 zio->io_vsd = NULL; 4068 } 4069 4070 if (zio_injection_enabled && zio->io_error == 0) 4071 zio->io_error = zio_handle_fault_injection(zio, EIO); 4072 4073 /* 4074 * If the I/O failed, determine whether we should attempt to retry it. 4075 * 4076 * On retry, we cut in line in the issue queue, since we don't want 4077 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 4078 */ 4079 if (zio->io_error && vd == NULL && 4080 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 4081 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 4082 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 4083 zio->io_error = 0; 4084 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE; 4085 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 4086 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 4087 zio_requeue_io_start_cut_in_line); 4088 return (NULL); 4089 } 4090 4091 /* 4092 * If we got an error on a leaf device, convert it to ENXIO 4093 * if the device is not accessible at all. 4094 */ 4095 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 4096 !vdev_accessible(vd, zio)) 4097 zio->io_error = SET_ERROR(ENXIO); 4098 4099 /* 4100 * If we can't write to an interior vdev (mirror or RAID-Z), 4101 * set vdev_cant_write so that we stop trying to allocate from it. 4102 */ 4103 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 4104 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 4105 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting " 4106 "cant_write=TRUE due to write failure with ENXIO", 4107 zio); 4108 vd->vdev_cant_write = B_TRUE; 4109 } 4110 4111 /* 4112 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 4113 * attempts will ever succeed. In this case we set a persistent 4114 * boolean flag so that we don't bother with it in the future. 4115 */ 4116 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 4117 zio->io_type == ZIO_TYPE_IOCTL && 4118 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 4119 vd->vdev_nowritecache = B_TRUE; 4120 4121 if (zio->io_error) 4122 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4123 4124 return (zio); 4125 } 4126 4127 void 4128 zio_vdev_io_reissue(zio_t *zio) 4129 { 4130 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4131 ASSERT(zio->io_error == 0); 4132 4133 zio->io_stage >>= 1; 4134 } 4135 4136 void 4137 zio_vdev_io_redone(zio_t *zio) 4138 { 4139 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 4140 4141 zio->io_stage >>= 1; 4142 } 4143 4144 void 4145 zio_vdev_io_bypass(zio_t *zio) 4146 { 4147 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4148 ASSERT(zio->io_error == 0); 4149 4150 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 4151 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 4152 } 4153 4154 /* 4155 * ========================================================================== 4156 * Encrypt and store encryption parameters 4157 * ========================================================================== 4158 */ 4159 4160 4161 /* 4162 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for 4163 * managing the storage of encryption parameters and passing them to the 4164 * lower-level encryption functions. 4165 */ 4166 static zio_t * 4167 zio_encrypt(zio_t *zio) 4168 { 4169 zio_prop_t *zp = &zio->io_prop; 4170 spa_t *spa = zio->io_spa; 4171 blkptr_t *bp = zio->io_bp; 4172 uint64_t psize = BP_GET_PSIZE(bp); 4173 uint64_t dsobj = zio->io_bookmark.zb_objset; 4174 dmu_object_type_t ot = BP_GET_TYPE(bp); 4175 void *enc_buf = NULL; 4176 abd_t *eabd = NULL; 4177 uint8_t salt[ZIO_DATA_SALT_LEN]; 4178 uint8_t iv[ZIO_DATA_IV_LEN]; 4179 uint8_t mac[ZIO_DATA_MAC_LEN]; 4180 boolean_t no_crypt = B_FALSE; 4181 4182 /* the root zio already encrypted the data */ 4183 if (zio->io_child_type == ZIO_CHILD_GANG) 4184 return (zio); 4185 4186 /* only ZIL blocks are re-encrypted on rewrite */ 4187 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) 4188 return (zio); 4189 4190 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { 4191 BP_SET_CRYPT(bp, B_FALSE); 4192 return (zio); 4193 } 4194 4195 /* if we are doing raw encryption set the provided encryption params */ 4196 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { 4197 ASSERT0(BP_GET_LEVEL(bp)); 4198 BP_SET_CRYPT(bp, B_TRUE); 4199 BP_SET_BYTEORDER(bp, zp->zp_byteorder); 4200 if (ot != DMU_OT_OBJSET) 4201 zio_crypt_encode_mac_bp(bp, zp->zp_mac); 4202 4203 /* dnode blocks must be written out in the provided byteorder */ 4204 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && 4205 ot == DMU_OT_DNODE) { 4206 void *bswap_buf = zio_buf_alloc(psize); 4207 abd_t *babd = abd_get_from_buf(bswap_buf, psize); 4208 4209 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4210 abd_copy_to_buf(bswap_buf, zio->io_abd, psize); 4211 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, 4212 psize); 4213 4214 abd_take_ownership_of_buf(babd, B_TRUE); 4215 zio_push_transform(zio, babd, psize, psize, NULL); 4216 } 4217 4218 if (DMU_OT_IS_ENCRYPTED(ot)) 4219 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); 4220 return (zio); 4221 } 4222 4223 /* indirect blocks only maintain a cksum of the lower level MACs */ 4224 if (BP_GET_LEVEL(bp) > 0) { 4225 BP_SET_CRYPT(bp, B_TRUE); 4226 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, 4227 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), 4228 mac)); 4229 zio_crypt_encode_mac_bp(bp, mac); 4230 return (zio); 4231 } 4232 4233 /* 4234 * Objset blocks are a special case since they have 2 256-bit MACs 4235 * embedded within them. 4236 */ 4237 if (ot == DMU_OT_OBJSET) { 4238 ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); 4239 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4240 BP_SET_CRYPT(bp, B_TRUE); 4241 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, 4242 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); 4243 return (zio); 4244 } 4245 4246 /* unencrypted object types are only authenticated with a MAC */ 4247 if (!DMU_OT_IS_ENCRYPTED(ot)) { 4248 BP_SET_CRYPT(bp, B_TRUE); 4249 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, 4250 zio->io_abd, psize, mac)); 4251 zio_crypt_encode_mac_bp(bp, mac); 4252 return (zio); 4253 } 4254 4255 /* 4256 * Later passes of sync-to-convergence may decide to rewrite data 4257 * in place to avoid more disk reallocations. This presents a problem 4258 * for encryption because this constitutes rewriting the new data with 4259 * the same encryption key and IV. However, this only applies to blocks 4260 * in the MOS (particularly the spacemaps) and we do not encrypt the 4261 * MOS. We assert that the zio is allocating or an intent log write 4262 * to enforce this. 4263 */ 4264 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); 4265 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); 4266 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); 4267 ASSERT3U(psize, !=, 0); 4268 4269 enc_buf = zio_buf_alloc(psize); 4270 eabd = abd_get_from_buf(enc_buf, psize); 4271 abd_take_ownership_of_buf(eabd, B_TRUE); 4272 4273 /* 4274 * For an explanation of what encryption parameters are stored 4275 * where, see the block comment in zio_crypt.c. 4276 */ 4277 if (ot == DMU_OT_INTENT_LOG) { 4278 zio_crypt_decode_params_bp(bp, salt, iv); 4279 } else { 4280 BP_SET_CRYPT(bp, B_TRUE); 4281 } 4282 4283 /* Perform the encryption. This should not fail */ 4284 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, 4285 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 4286 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); 4287 4288 /* encode encryption metadata into the bp */ 4289 if (ot == DMU_OT_INTENT_LOG) { 4290 /* 4291 * ZIL blocks store the MAC in the embedded checksum, so the 4292 * transform must always be applied. 4293 */ 4294 zio_crypt_encode_mac_zil(enc_buf, mac); 4295 zio_push_transform(zio, eabd, psize, psize, NULL); 4296 } else { 4297 BP_SET_CRYPT(bp, B_TRUE); 4298 zio_crypt_encode_params_bp(bp, salt, iv); 4299 zio_crypt_encode_mac_bp(bp, mac); 4300 4301 if (no_crypt) { 4302 ASSERT3U(ot, ==, DMU_OT_DNODE); 4303 abd_free(eabd); 4304 } else { 4305 zio_push_transform(zio, eabd, psize, psize, NULL); 4306 } 4307 } 4308 4309 return (zio); 4310 } 4311 4312 /* 4313 * ========================================================================== 4314 * Generate and verify checksums 4315 * ========================================================================== 4316 */ 4317 static zio_t * 4318 zio_checksum_generate(zio_t *zio) 4319 { 4320 blkptr_t *bp = zio->io_bp; 4321 enum zio_checksum checksum; 4322 4323 if (bp == NULL) { 4324 /* 4325 * This is zio_write_phys(). 4326 * We're either generating a label checksum, or none at all. 4327 */ 4328 checksum = zio->io_prop.zp_checksum; 4329 4330 if (checksum == ZIO_CHECKSUM_OFF) 4331 return (zio); 4332 4333 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 4334 } else { 4335 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 4336 ASSERT(!IO_IS_ALLOCATING(zio)); 4337 checksum = ZIO_CHECKSUM_GANG_HEADER; 4338 } else { 4339 checksum = BP_GET_CHECKSUM(bp); 4340 } 4341 } 4342 4343 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 4344 4345 return (zio); 4346 } 4347 4348 static zio_t * 4349 zio_checksum_verify(zio_t *zio) 4350 { 4351 zio_bad_cksum_t info; 4352 blkptr_t *bp = zio->io_bp; 4353 int error; 4354 4355 ASSERT(zio->io_vd != NULL); 4356 4357 if (bp == NULL) { 4358 /* 4359 * This is zio_read_phys(). 4360 * We're either verifying a label checksum, or nothing at all. 4361 */ 4362 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 4363 return (zio); 4364 4365 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL); 4366 } 4367 4368 if ((error = zio_checksum_error(zio, &info)) != 0) { 4369 zio->io_error = error; 4370 if (error == ECKSUM && 4371 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 4372 mutex_enter(&zio->io_vd->vdev_stat_lock); 4373 zio->io_vd->vdev_stat.vs_checksum_errors++; 4374 mutex_exit(&zio->io_vd->vdev_stat_lock); 4375 (void) zfs_ereport_start_checksum(zio->io_spa, 4376 zio->io_vd, &zio->io_bookmark, zio, 4377 zio->io_offset, zio->io_size, &info); 4378 } 4379 } 4380 4381 return (zio); 4382 } 4383 4384 /* 4385 * Called by RAID-Z to ensure we don't compute the checksum twice. 4386 */ 4387 void 4388 zio_checksum_verified(zio_t *zio) 4389 { 4390 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 4391 } 4392 4393 /* 4394 * ========================================================================== 4395 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 4396 * An error of 0 indicates success. ENXIO indicates whole-device failure, 4397 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO 4398 * indicate errors that are specific to one I/O, and most likely permanent. 4399 * Any other error is presumed to be worse because we weren't expecting it. 4400 * ========================================================================== 4401 */ 4402 int 4403 zio_worst_error(int e1, int e2) 4404 { 4405 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 4406 int r1, r2; 4407 4408 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 4409 if (e1 == zio_error_rank[r1]) 4410 break; 4411 4412 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 4413 if (e2 == zio_error_rank[r2]) 4414 break; 4415 4416 return (r1 > r2 ? e1 : e2); 4417 } 4418 4419 /* 4420 * ========================================================================== 4421 * I/O completion 4422 * ========================================================================== 4423 */ 4424 static zio_t * 4425 zio_ready(zio_t *zio) 4426 { 4427 blkptr_t *bp = zio->io_bp; 4428 zio_t *pio, *pio_next; 4429 zio_link_t *zl = NULL; 4430 4431 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 4432 ZIO_WAIT_READY)) { 4433 return (NULL); 4434 } 4435 4436 if (zio->io_ready) { 4437 ASSERT(IO_IS_ALLOCATING(zio)); 4438 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 4439 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 4440 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 4441 4442 zio->io_ready(zio); 4443 } 4444 4445 if (bp != NULL && bp != &zio->io_bp_copy) 4446 zio->io_bp_copy = *bp; 4447 4448 if (zio->io_error != 0) { 4449 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4450 4451 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4452 ASSERT(IO_IS_ALLOCATING(zio)); 4453 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4454 ASSERT(zio->io_metaslab_class != NULL); 4455 4456 /* 4457 * We were unable to allocate anything, unreserve and 4458 * issue the next I/O to allocate. 4459 */ 4460 metaslab_class_throttle_unreserve( 4461 zio->io_metaslab_class, zio->io_prop.zp_copies, 4462 zio->io_allocator, zio); 4463 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 4464 } 4465 } 4466 4467 mutex_enter(&zio->io_lock); 4468 zio->io_state[ZIO_WAIT_READY] = 1; 4469 pio = zio_walk_parents(zio, &zl); 4470 mutex_exit(&zio->io_lock); 4471 4472 /* 4473 * As we notify zio's parents, new parents could be added. 4474 * New parents go to the head of zio's io_parent_list, however, 4475 * so we will (correctly) not notify them. The remainder of zio's 4476 * io_parent_list, from 'pio_next' onward, cannot change because 4477 * all parents must wait for us to be done before they can be done. 4478 */ 4479 for (; pio != NULL; pio = pio_next) { 4480 pio_next = zio_walk_parents(zio, &zl); 4481 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL); 4482 } 4483 4484 if (zio->io_flags & ZIO_FLAG_NODATA) { 4485 if (bp != NULL && BP_IS_GANG(bp)) { 4486 zio->io_flags &= ~ZIO_FLAG_NODATA; 4487 } else { 4488 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 4489 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 4490 } 4491 } 4492 4493 if (zio_injection_enabled && 4494 zio->io_spa->spa_syncing_txg == zio->io_txg) 4495 zio_handle_ignored_writes(zio); 4496 4497 return (zio); 4498 } 4499 4500 /* 4501 * Update the allocation throttle accounting. 4502 */ 4503 static void 4504 zio_dva_throttle_done(zio_t *zio) 4505 { 4506 zio_t *lio __maybe_unused = zio->io_logical; 4507 zio_t *pio = zio_unique_parent(zio); 4508 vdev_t *vd = zio->io_vd; 4509 int flags = METASLAB_ASYNC_ALLOC; 4510 4511 ASSERT3P(zio->io_bp, !=, NULL); 4512 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 4513 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 4514 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 4515 ASSERT(vd != NULL); 4516 ASSERT3P(vd, ==, vd->vdev_top); 4517 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY)); 4518 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 4519 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 4520 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 4521 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 4522 4523 /* 4524 * Parents of gang children can have two flavors -- ones that 4525 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 4526 * and ones that allocated the constituent blocks. The allocation 4527 * throttle needs to know the allocating parent zio so we must find 4528 * it here. 4529 */ 4530 if (pio->io_child_type == ZIO_CHILD_GANG) { 4531 /* 4532 * If our parent is a rewrite gang child then our grandparent 4533 * would have been the one that performed the allocation. 4534 */ 4535 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 4536 pio = zio_unique_parent(pio); 4537 flags |= METASLAB_GANG_CHILD; 4538 } 4539 4540 ASSERT(IO_IS_ALLOCATING(pio)); 4541 ASSERT3P(zio, !=, zio->io_logical); 4542 ASSERT(zio->io_logical != NULL); 4543 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 4544 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 4545 ASSERT(zio->io_metaslab_class != NULL); 4546 4547 mutex_enter(&pio->io_lock); 4548 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 4549 pio->io_allocator, B_TRUE); 4550 mutex_exit(&pio->io_lock); 4551 4552 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 4553 pio->io_allocator, pio); 4554 4555 /* 4556 * Call into the pipeline to see if there is more work that 4557 * needs to be done. If there is work to be done it will be 4558 * dispatched to another taskq thread. 4559 */ 4560 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 4561 } 4562 4563 static zio_t * 4564 zio_done(zio_t *zio) 4565 { 4566 /* 4567 * Always attempt to keep stack usage minimal here since 4568 * we can be called recursively up to 19 levels deep. 4569 */ 4570 const uint64_t psize = zio->io_size; 4571 zio_t *pio, *pio_next; 4572 zio_link_t *zl = NULL; 4573 4574 /* 4575 * If our children haven't all completed, 4576 * wait for them and then repeat this pipeline stage. 4577 */ 4578 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 4579 return (NULL); 4580 } 4581 4582 /* 4583 * If the allocation throttle is enabled, then update the accounting. 4584 * We only track child I/Os that are part of an allocating async 4585 * write. We must do this since the allocation is performed 4586 * by the logical I/O but the actual write is done by child I/Os. 4587 */ 4588 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 4589 zio->io_child_type == ZIO_CHILD_VDEV) { 4590 ASSERT(zio->io_metaslab_class != NULL); 4591 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 4592 zio_dva_throttle_done(zio); 4593 } 4594 4595 /* 4596 * If the allocation throttle is enabled, verify that 4597 * we have decremented the refcounts for every I/O that was throttled. 4598 */ 4599 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4600 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4601 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4602 ASSERT(zio->io_bp != NULL); 4603 4604 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio, 4605 zio->io_allocator); 4606 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class-> 4607 mc_allocator[zio->io_allocator].mca_alloc_slots, zio)); 4608 } 4609 4610 4611 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 4612 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 4613 ASSERT(zio->io_children[c][w] == 0); 4614 4615 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { 4616 ASSERT(zio->io_bp->blk_pad[0] == 0); 4617 ASSERT(zio->io_bp->blk_pad[1] == 0); 4618 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy, 4619 sizeof (blkptr_t)) == 0 || 4620 (zio->io_bp == zio_unique_parent(zio)->io_bp)); 4621 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && 4622 zio->io_bp_override == NULL && 4623 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 4624 ASSERT3U(zio->io_prop.zp_copies, <=, 4625 BP_GET_NDVAS(zio->io_bp)); 4626 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || 4627 (BP_COUNT_GANG(zio->io_bp) == 4628 BP_GET_NDVAS(zio->io_bp))); 4629 } 4630 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 4631 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4632 } 4633 4634 /* 4635 * If there were child vdev/gang/ddt errors, they apply to us now. 4636 */ 4637 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 4638 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 4639 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 4640 4641 /* 4642 * If the I/O on the transformed data was successful, generate any 4643 * checksum reports now while we still have the transformed data. 4644 */ 4645 if (zio->io_error == 0) { 4646 while (zio->io_cksum_report != NULL) { 4647 zio_cksum_report_t *zcr = zio->io_cksum_report; 4648 uint64_t align = zcr->zcr_align; 4649 uint64_t asize = P2ROUNDUP(psize, align); 4650 abd_t *adata = zio->io_abd; 4651 4652 if (adata != NULL && asize != psize) { 4653 adata = abd_alloc(asize, B_TRUE); 4654 abd_copy(adata, zio->io_abd, psize); 4655 abd_zero_off(adata, psize, asize - psize); 4656 } 4657 4658 zio->io_cksum_report = zcr->zcr_next; 4659 zcr->zcr_next = NULL; 4660 zcr->zcr_finish(zcr, adata); 4661 zfs_ereport_free_checksum(zcr); 4662 4663 if (adata != NULL && asize != psize) 4664 abd_free(adata); 4665 } 4666 } 4667 4668 zio_pop_transforms(zio); /* note: may set zio->io_error */ 4669 4670 vdev_stat_update(zio, psize); 4671 4672 /* 4673 * If this I/O is attached to a particular vdev is slow, exceeding 4674 * 30 seconds to complete, post an error described the I/O delay. 4675 * We ignore these errors if the device is currently unavailable. 4676 */ 4677 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) { 4678 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) { 4679 /* 4680 * We want to only increment our slow IO counters if 4681 * the IO is valid (i.e. not if the drive is removed). 4682 * 4683 * zfs_ereport_post() will also do these checks, but 4684 * it can also ratelimit and have other failures, so we 4685 * need to increment the slow_io counters independent 4686 * of it. 4687 */ 4688 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY, 4689 zio->io_spa, zio->io_vd, zio)) { 4690 mutex_enter(&zio->io_vd->vdev_stat_lock); 4691 zio->io_vd->vdev_stat.vs_slow_ios++; 4692 mutex_exit(&zio->io_vd->vdev_stat_lock); 4693 4694 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY, 4695 zio->io_spa, zio->io_vd, &zio->io_bookmark, 4696 zio, 0); 4697 } 4698 } 4699 } 4700 4701 if (zio->io_error) { 4702 /* 4703 * If this I/O is attached to a particular vdev, 4704 * generate an error message describing the I/O failure 4705 * at the block level. We ignore these errors if the 4706 * device is currently unavailable. 4707 */ 4708 if (zio->io_error != ECKSUM && zio->io_vd != NULL && 4709 !vdev_is_dead(zio->io_vd)) { 4710 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO, 4711 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0); 4712 if (ret != EALREADY) { 4713 mutex_enter(&zio->io_vd->vdev_stat_lock); 4714 if (zio->io_type == ZIO_TYPE_READ) 4715 zio->io_vd->vdev_stat.vs_read_errors++; 4716 else if (zio->io_type == ZIO_TYPE_WRITE) 4717 zio->io_vd->vdev_stat.vs_write_errors++; 4718 mutex_exit(&zio->io_vd->vdev_stat_lock); 4719 } 4720 } 4721 4722 if ((zio->io_error == EIO || !(zio->io_flags & 4723 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 4724 zio == zio->io_logical) { 4725 /* 4726 * For logical I/O requests, tell the SPA to log the 4727 * error and generate a logical data ereport. 4728 */ 4729 spa_log_error(zio->io_spa, &zio->io_bookmark, 4730 &zio->io_bp->blk_birth); 4731 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA, 4732 zio->io_spa, NULL, &zio->io_bookmark, zio, 0); 4733 } 4734 } 4735 4736 if (zio->io_error && zio == zio->io_logical) { 4737 /* 4738 * Determine whether zio should be reexecuted. This will 4739 * propagate all the way to the root via zio_notify_parent(). 4740 */ 4741 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL); 4742 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4743 4744 if (IO_IS_ALLOCATING(zio) && 4745 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 4746 if (zio->io_error != ENOSPC) 4747 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 4748 else 4749 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4750 } 4751 4752 if ((zio->io_type == ZIO_TYPE_READ || 4753 zio->io_type == ZIO_TYPE_FREE) && 4754 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 4755 zio->io_error == ENXIO && 4756 spa_load_state(zio->io_spa) == SPA_LOAD_NONE && 4757 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE) 4758 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4759 4760 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 4761 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4762 4763 /* 4764 * Here is a possibly good place to attempt to do 4765 * either combinatorial reconstruction or error correction 4766 * based on checksums. It also might be a good place 4767 * to send out preliminary ereports before we suspend 4768 * processing. 4769 */ 4770 } 4771 4772 /* 4773 * If there were logical child errors, they apply to us now. 4774 * We defer this until now to avoid conflating logical child 4775 * errors with errors that happened to the zio itself when 4776 * updating vdev stats and reporting FMA events above. 4777 */ 4778 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 4779 4780 if ((zio->io_error || zio->io_reexecute) && 4781 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 4782 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 4783 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp); 4784 4785 zio_gang_tree_free(&zio->io_gang_tree); 4786 4787 /* 4788 * Godfather I/Os should never suspend. 4789 */ 4790 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 4791 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 4792 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND; 4793 4794 if (zio->io_reexecute) { 4795 /* 4796 * This is a logical I/O that wants to reexecute. 4797 * 4798 * Reexecute is top-down. When an i/o fails, if it's not 4799 * the root, it simply notifies its parent and sticks around. 4800 * The parent, seeing that it still has children in zio_done(), 4801 * does the same. This percolates all the way up to the root. 4802 * The root i/o will reexecute or suspend the entire tree. 4803 * 4804 * This approach ensures that zio_reexecute() honors 4805 * all the original i/o dependency relationships, e.g. 4806 * parents not executing until children are ready. 4807 */ 4808 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4809 4810 zio->io_gang_leader = NULL; 4811 4812 mutex_enter(&zio->io_lock); 4813 zio->io_state[ZIO_WAIT_DONE] = 1; 4814 mutex_exit(&zio->io_lock); 4815 4816 /* 4817 * "The Godfather" I/O monitors its children but is 4818 * not a true parent to them. It will track them through 4819 * the pipeline but severs its ties whenever they get into 4820 * trouble (e.g. suspended). This allows "The Godfather" 4821 * I/O to return status without blocking. 4822 */ 4823 zl = NULL; 4824 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 4825 pio = pio_next) { 4826 zio_link_t *remove_zl = zl; 4827 pio_next = zio_walk_parents(zio, &zl); 4828 4829 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 4830 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 4831 zio_remove_child(pio, zio, remove_zl); 4832 /* 4833 * This is a rare code path, so we don't 4834 * bother with "next_to_execute". 4835 */ 4836 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, 4837 NULL); 4838 } 4839 } 4840 4841 if ((pio = zio_unique_parent(zio)) != NULL) { 4842 /* 4843 * We're not a root i/o, so there's nothing to do 4844 * but notify our parent. Don't propagate errors 4845 * upward since we haven't permanently failed yet. 4846 */ 4847 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 4848 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 4849 /* 4850 * This is a rare code path, so we don't bother with 4851 * "next_to_execute". 4852 */ 4853 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL); 4854 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 4855 /* 4856 * We'd fail again if we reexecuted now, so suspend 4857 * until conditions improve (e.g. device comes online). 4858 */ 4859 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 4860 } else { 4861 /* 4862 * Reexecution is potentially a huge amount of work. 4863 * Hand it off to the otherwise-unused claim taskq. 4864 */ 4865 ASSERT(taskq_empty_ent(&zio->io_tqent)); 4866 spa_taskq_dispatch_ent(zio->io_spa, 4867 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE, 4868 zio_reexecute, zio, 0, &zio->io_tqent); 4869 } 4870 return (NULL); 4871 } 4872 4873 ASSERT(list_is_empty(&zio->io_child_list)); 4874 ASSERT(zio->io_reexecute == 0); 4875 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 4876 4877 /* 4878 * Report any checksum errors, since the I/O is complete. 4879 */ 4880 while (zio->io_cksum_report != NULL) { 4881 zio_cksum_report_t *zcr = zio->io_cksum_report; 4882 zio->io_cksum_report = zcr->zcr_next; 4883 zcr->zcr_next = NULL; 4884 zcr->zcr_finish(zcr, NULL); 4885 zfs_ereport_free_checksum(zcr); 4886 } 4887 4888 if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp && 4889 !BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) && 4890 !(zio->io_flags & ZIO_FLAG_NOPWRITE)) { 4891 metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp); 4892 } 4893 4894 /* 4895 * It is the responsibility of the done callback to ensure that this 4896 * particular zio is no longer discoverable for adoption, and as 4897 * such, cannot acquire any new parents. 4898 */ 4899 if (zio->io_done) 4900 zio->io_done(zio); 4901 4902 mutex_enter(&zio->io_lock); 4903 zio->io_state[ZIO_WAIT_DONE] = 1; 4904 mutex_exit(&zio->io_lock); 4905 4906 /* 4907 * We are done executing this zio. We may want to execute a parent 4908 * next. See the comment in zio_notify_parent(). 4909 */ 4910 zio_t *next_to_execute = NULL; 4911 zl = NULL; 4912 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 4913 zio_link_t *remove_zl = zl; 4914 pio_next = zio_walk_parents(zio, &zl); 4915 zio_remove_child(pio, zio, remove_zl); 4916 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute); 4917 } 4918 4919 if (zio->io_waiter != NULL) { 4920 mutex_enter(&zio->io_lock); 4921 zio->io_executor = NULL; 4922 cv_broadcast(&zio->io_cv); 4923 mutex_exit(&zio->io_lock); 4924 } else { 4925 zio_destroy(zio); 4926 } 4927 4928 return (next_to_execute); 4929 } 4930 4931 /* 4932 * ========================================================================== 4933 * I/O pipeline definition 4934 * ========================================================================== 4935 */ 4936 static zio_pipe_stage_t *zio_pipeline[] = { 4937 NULL, 4938 zio_read_bp_init, 4939 zio_write_bp_init, 4940 zio_free_bp_init, 4941 zio_issue_async, 4942 zio_write_compress, 4943 zio_encrypt, 4944 zio_checksum_generate, 4945 zio_nop_write, 4946 zio_brt_free, 4947 zio_ddt_read_start, 4948 zio_ddt_read_done, 4949 zio_ddt_write, 4950 zio_ddt_free, 4951 zio_gang_assemble, 4952 zio_gang_issue, 4953 zio_dva_throttle, 4954 zio_dva_allocate, 4955 zio_dva_free, 4956 zio_dva_claim, 4957 zio_ready, 4958 zio_vdev_io_start, 4959 zio_vdev_io_done, 4960 zio_vdev_io_assess, 4961 zio_checksum_verify, 4962 zio_done 4963 }; 4964 4965 4966 4967 4968 /* 4969 * Compare two zbookmark_phys_t's to see which we would reach first in a 4970 * pre-order traversal of the object tree. 4971 * 4972 * This is simple in every case aside from the meta-dnode object. For all other 4973 * objects, we traverse them in order (object 1 before object 2, and so on). 4974 * However, all of these objects are traversed while traversing object 0, since 4975 * the data it points to is the list of objects. Thus, we need to convert to a 4976 * canonical representation so we can compare meta-dnode bookmarks to 4977 * non-meta-dnode bookmarks. 4978 * 4979 * We do this by calculating "equivalents" for each field of the zbookmark. 4980 * zbookmarks outside of the meta-dnode use their own object and level, and 4981 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 4982 * blocks this bookmark refers to) by multiplying their blkid by their span 4983 * (the number of L0 blocks contained within one block at their level). 4984 * zbookmarks inside the meta-dnode calculate their object equivalent 4985 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 4986 * level + 1<<31 (any value larger than a level could ever be) for their level. 4987 * This causes them to always compare before a bookmark in their object 4988 * equivalent, compare appropriately to bookmarks in other objects, and to 4989 * compare appropriately to other bookmarks in the meta-dnode. 4990 */ 4991 int 4992 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 4993 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 4994 { 4995 /* 4996 * These variables represent the "equivalent" values for the zbookmark, 4997 * after converting zbookmarks inside the meta dnode to their 4998 * normal-object equivalents. 4999 */ 5000 uint64_t zb1obj, zb2obj; 5001 uint64_t zb1L0, zb2L0; 5002 uint64_t zb1level, zb2level; 5003 5004 if (zb1->zb_object == zb2->zb_object && 5005 zb1->zb_level == zb2->zb_level && 5006 zb1->zb_blkid == zb2->zb_blkid) 5007 return (0); 5008 5009 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT); 5010 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT); 5011 5012 /* 5013 * BP_SPANB calculates the span in blocks. 5014 */ 5015 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 5016 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 5017 5018 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 5019 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5020 zb1L0 = 0; 5021 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 5022 } else { 5023 zb1obj = zb1->zb_object; 5024 zb1level = zb1->zb_level; 5025 } 5026 5027 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 5028 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5029 zb2L0 = 0; 5030 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 5031 } else { 5032 zb2obj = zb2->zb_object; 5033 zb2level = zb2->zb_level; 5034 } 5035 5036 /* Now that we have a canonical representation, do the comparison. */ 5037 if (zb1obj != zb2obj) 5038 return (zb1obj < zb2obj ? -1 : 1); 5039 else if (zb1L0 != zb2L0) 5040 return (zb1L0 < zb2L0 ? -1 : 1); 5041 else if (zb1level != zb2level) 5042 return (zb1level > zb2level ? -1 : 1); 5043 /* 5044 * This can (theoretically) happen if the bookmarks have the same object 5045 * and level, but different blkids, if the block sizes are not the same. 5046 * There is presently no way to change the indirect block sizes 5047 */ 5048 return (0); 5049 } 5050 5051 /* 5052 * This function checks the following: given that last_block is the place that 5053 * our traversal stopped last time, does that guarantee that we've visited 5054 * every node under subtree_root? Therefore, we can't just use the raw output 5055 * of zbookmark_compare. We have to pass in a modified version of 5056 * subtree_root; by incrementing the block id, and then checking whether 5057 * last_block is before or equal to that, we can tell whether or not having 5058 * visited last_block implies that all of subtree_root's children have been 5059 * visited. 5060 */ 5061 boolean_t 5062 zbookmark_subtree_completed(const dnode_phys_t *dnp, 5063 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5064 { 5065 zbookmark_phys_t mod_zb = *subtree_root; 5066 mod_zb.zb_blkid++; 5067 ASSERT0(last_block->zb_level); 5068 5069 /* The objset_phys_t isn't before anything. */ 5070 if (dnp == NULL) 5071 return (B_FALSE); 5072 5073 /* 5074 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 5075 * data block size in sectors, because that variable is only used if 5076 * the bookmark refers to a block in the meta-dnode. Since we don't 5077 * know without examining it what object it refers to, and there's no 5078 * harm in passing in this value in other cases, we always pass it in. 5079 * 5080 * We pass in 0 for the indirect block size shift because zb2 must be 5081 * level 0. The indirect block size is only used to calculate the span 5082 * of the bookmark, but since the bookmark must be level 0, the span is 5083 * always 1, so the math works out. 5084 * 5085 * If you make changes to how the zbookmark_compare code works, be sure 5086 * to make sure that this code still works afterwards. 5087 */ 5088 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5089 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 5090 last_block) <= 0); 5091 } 5092 5093 /* 5094 * This function is similar to zbookmark_subtree_completed(), but returns true 5095 * if subtree_root is equal or ahead of last_block, i.e. still to be done. 5096 */ 5097 boolean_t 5098 zbookmark_subtree_tbd(const dnode_phys_t *dnp, 5099 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5100 { 5101 ASSERT0(last_block->zb_level); 5102 if (dnp == NULL) 5103 return (B_FALSE); 5104 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5105 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root, 5106 last_block) >= 0); 5107 } 5108 5109 EXPORT_SYMBOL(zio_type_name); 5110 EXPORT_SYMBOL(zio_buf_alloc); 5111 EXPORT_SYMBOL(zio_data_buf_alloc); 5112 EXPORT_SYMBOL(zio_buf_free); 5113 EXPORT_SYMBOL(zio_data_buf_free); 5114 5115 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW, 5116 "Max I/O completion time (milliseconds) before marking it as slow"); 5117 5118 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW, 5119 "Prioritize requeued I/O"); 5120 5121 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW, 5122 "Defer frees starting in this pass"); 5123 5124 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW, 5125 "Don't compress starting in this pass"); 5126 5127 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW, 5128 "Rewrite new bps starting in this pass"); 5129 5130 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW, 5131 "Throttle block allocations in the ZIO pipeline"); 5132 5133 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW, 5134 "Log all slow ZIOs, not just those with vdevs"); 5135