1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2022 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2017, Intel Corporation. 26 * Copyright (c) 2019, 2023, 2024, Klara Inc. 27 * Copyright (c) 2019, Allan Jude 28 * Copyright (c) 2021, Datto, Inc. 29 */ 30 31 #include <sys/sysmacros.h> 32 #include <sys/zfs_context.h> 33 #include <sys/fm/fs/zfs.h> 34 #include <sys/spa.h> 35 #include <sys/txg.h> 36 #include <sys/spa_impl.h> 37 #include <sys/vdev_impl.h> 38 #include <sys/vdev_trim.h> 39 #include <sys/zio_impl.h> 40 #include <sys/zio_compress.h> 41 #include <sys/zio_checksum.h> 42 #include <sys/dmu_objset.h> 43 #include <sys/arc.h> 44 #include <sys/brt.h> 45 #include <sys/ddt.h> 46 #include <sys/blkptr.h> 47 #include <sys/zfeature.h> 48 #include <sys/dsl_scan.h> 49 #include <sys/metaslab_impl.h> 50 #include <sys/time.h> 51 #include <sys/trace_zfs.h> 52 #include <sys/abd.h> 53 #include <sys/dsl_crypt.h> 54 #include <cityhash.h> 55 56 /* 57 * ========================================================================== 58 * I/O type descriptions 59 * ========================================================================== 60 */ 61 const char *const zio_type_name[ZIO_TYPES] = { 62 /* 63 * Note: Linux kernel thread name length is limited 64 * so these names will differ from upstream open zfs. 65 */ 66 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim" 67 }; 68 69 int zio_dva_throttle_enabled = B_TRUE; 70 static int zio_deadman_log_all = B_FALSE; 71 72 /* 73 * ========================================================================== 74 * I/O kmem caches 75 * ========================================================================== 76 */ 77 static kmem_cache_t *zio_cache; 78 static kmem_cache_t *zio_link_cache; 79 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 80 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 81 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 82 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 83 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 84 #endif 85 86 /* Mark IOs as "slow" if they take longer than 30 seconds */ 87 static uint_t zio_slow_io_ms = (30 * MILLISEC); 88 89 #define BP_SPANB(indblkshift, level) \ 90 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 91 #define COMPARE_META_LEVEL 0x80000000ul 92 /* 93 * The following actions directly effect the spa's sync-to-convergence logic. 94 * The values below define the sync pass when we start performing the action. 95 * Care should be taken when changing these values as they directly impact 96 * spa_sync() performance. Tuning these values may introduce subtle performance 97 * pathologies and should only be done in the context of performance analysis. 98 * These tunables will eventually be removed and replaced with #defines once 99 * enough analysis has been done to determine optimal values. 100 * 101 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 102 * regular blocks are not deferred. 103 * 104 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable 105 * compression (including of metadata). In practice, we don't have this 106 * many sync passes, so this has no effect. 107 * 108 * The original intent was that disabling compression would help the sync 109 * passes to converge. However, in practice disabling compression increases 110 * the average number of sync passes, because when we turn compression off, a 111 * lot of block's size will change and thus we have to re-allocate (not 112 * overwrite) them. It also increases the number of 128KB allocations (e.g. 113 * for indirect blocks and spacemaps) because these will not be compressed. 114 * The 128K allocations are especially detrimental to performance on highly 115 * fragmented systems, which may have very few free segments of this size, 116 * and may need to load new metaslabs to satisfy 128K allocations. 117 */ 118 119 /* defer frees starting in this pass */ 120 uint_t zfs_sync_pass_deferred_free = 2; 121 122 /* don't compress starting in this pass */ 123 static uint_t zfs_sync_pass_dont_compress = 8; 124 125 /* rewrite new bps starting in this pass */ 126 static uint_t zfs_sync_pass_rewrite = 2; 127 128 /* 129 * An allocating zio is one that either currently has the DVA allocate 130 * stage set or will have it later in its lifetime. 131 */ 132 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 133 134 /* 135 * Enable smaller cores by excluding metadata 136 * allocations as well. 137 */ 138 int zio_exclude_metadata = 0; 139 static int zio_requeue_io_start_cut_in_line = 1; 140 141 #ifdef ZFS_DEBUG 142 static const int zio_buf_debug_limit = 16384; 143 #else 144 static const int zio_buf_debug_limit = 0; 145 #endif 146 147 static inline void __zio_execute(zio_t *zio); 148 149 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 150 151 void 152 zio_init(void) 153 { 154 size_t c; 155 156 zio_cache = kmem_cache_create("zio_cache", 157 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 158 zio_link_cache = kmem_cache_create("zio_link_cache", 159 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 160 161 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 162 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 163 size_t align, cflags, data_cflags; 164 char name[32]; 165 166 /* 167 * Create cache for each half-power of 2 size, starting from 168 * SPA_MINBLOCKSIZE. It should give us memory space efficiency 169 * of ~7/8, sufficient for transient allocations mostly using 170 * these caches. 171 */ 172 size_t p2 = size; 173 while (!ISP2(p2)) 174 p2 &= p2 - 1; 175 if (!IS_P2ALIGNED(size, p2 / 2)) 176 continue; 177 178 #ifndef _KERNEL 179 /* 180 * If we are using watchpoints, put each buffer on its own page, 181 * to eliminate the performance overhead of trapping to the 182 * kernel when modifying a non-watched buffer that shares the 183 * page with a watched buffer. 184 */ 185 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 186 continue; 187 #endif 188 189 if (IS_P2ALIGNED(size, PAGESIZE)) 190 align = PAGESIZE; 191 else 192 align = 1 << (highbit64(size ^ (size - 1)) - 1); 193 194 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ? 195 KMC_NODEBUG : 0; 196 data_cflags = KMC_NODEBUG; 197 if (cflags == data_cflags) { 198 /* 199 * Resulting kmem caches would be identical. 200 * Save memory by creating only one. 201 */ 202 (void) snprintf(name, sizeof (name), 203 "zio_buf_comb_%lu", (ulong_t)size); 204 zio_buf_cache[c] = kmem_cache_create(name, size, align, 205 NULL, NULL, NULL, NULL, NULL, cflags); 206 zio_data_buf_cache[c] = zio_buf_cache[c]; 207 continue; 208 } 209 (void) snprintf(name, sizeof (name), "zio_buf_%lu", 210 (ulong_t)size); 211 zio_buf_cache[c] = kmem_cache_create(name, size, align, 212 NULL, NULL, NULL, NULL, NULL, cflags); 213 214 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu", 215 (ulong_t)size); 216 zio_data_buf_cache[c] = kmem_cache_create(name, size, align, 217 NULL, NULL, NULL, NULL, NULL, data_cflags); 218 } 219 220 while (--c != 0) { 221 ASSERT(zio_buf_cache[c] != NULL); 222 if (zio_buf_cache[c - 1] == NULL) 223 zio_buf_cache[c - 1] = zio_buf_cache[c]; 224 225 ASSERT(zio_data_buf_cache[c] != NULL); 226 if (zio_data_buf_cache[c - 1] == NULL) 227 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 228 } 229 230 zio_inject_init(); 231 232 lz4_init(); 233 } 234 235 void 236 zio_fini(void) 237 { 238 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; 239 240 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 241 for (size_t i = 0; i < n; i++) { 242 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i]) 243 (void) printf("zio_fini: [%d] %llu != %llu\n", 244 (int)((i + 1) << SPA_MINBLOCKSHIFT), 245 (long long unsigned)zio_buf_cache_allocs[i], 246 (long long unsigned)zio_buf_cache_frees[i]); 247 } 248 #endif 249 250 /* 251 * The same kmem cache can show up multiple times in both zio_buf_cache 252 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to 253 * sort it out. 254 */ 255 for (size_t i = 0; i < n; i++) { 256 kmem_cache_t *cache = zio_buf_cache[i]; 257 if (cache == NULL) 258 continue; 259 for (size_t j = i; j < n; j++) { 260 if (cache == zio_buf_cache[j]) 261 zio_buf_cache[j] = NULL; 262 if (cache == zio_data_buf_cache[j]) 263 zio_data_buf_cache[j] = NULL; 264 } 265 kmem_cache_destroy(cache); 266 } 267 268 for (size_t i = 0; i < n; i++) { 269 kmem_cache_t *cache = zio_data_buf_cache[i]; 270 if (cache == NULL) 271 continue; 272 for (size_t j = i; j < n; j++) { 273 if (cache == zio_data_buf_cache[j]) 274 zio_data_buf_cache[j] = NULL; 275 } 276 kmem_cache_destroy(cache); 277 } 278 279 for (size_t i = 0; i < n; i++) { 280 VERIFY3P(zio_buf_cache[i], ==, NULL); 281 VERIFY3P(zio_data_buf_cache[i], ==, NULL); 282 } 283 284 kmem_cache_destroy(zio_link_cache); 285 kmem_cache_destroy(zio_cache); 286 287 zio_inject_fini(); 288 289 lz4_fini(); 290 } 291 292 /* 293 * ========================================================================== 294 * Allocate and free I/O buffers 295 * ========================================================================== 296 */ 297 298 #ifdef ZFS_DEBUG 299 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b; 300 #endif 301 302 /* 303 * Use empty space after the buffer to detect overflows. 304 * 305 * Since zio_init() creates kmem caches only for certain set of buffer sizes, 306 * allocations of different sizes may have some unused space after the data. 307 * Filling part of that space with a known pattern on allocation and checking 308 * it on free should allow us to detect some buffer overflows. 309 */ 310 static void 311 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c) 312 { 313 #ifdef ZFS_DEBUG 314 size_t off = P2ROUNDUP(size, sizeof (ulong_t)); 315 ulong_t *canary = p + off / sizeof (ulong_t); 316 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT; 317 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT && 318 cache[c] == cache[c + 1]) 319 asize = (c + 2) << SPA_MINBLOCKSHIFT; 320 for (; off < asize; canary++, off += sizeof (ulong_t)) 321 *canary = zio_buf_canary; 322 #endif 323 } 324 325 static void 326 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c) 327 { 328 #ifdef ZFS_DEBUG 329 size_t off = P2ROUNDUP(size, sizeof (ulong_t)); 330 ulong_t *canary = p + off / sizeof (ulong_t); 331 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT; 332 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT && 333 cache[c] == cache[c + 1]) 334 asize = (c + 2) << SPA_MINBLOCKSHIFT; 335 for (; off < asize; canary++, off += sizeof (ulong_t)) { 336 if (unlikely(*canary != zio_buf_canary)) { 337 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx", 338 p, size, (canary - p) * sizeof (ulong_t), 339 *canary, zio_buf_canary); 340 } 341 } 342 #endif 343 } 344 345 /* 346 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 347 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 348 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 349 * excess / transient data in-core during a crashdump. 350 */ 351 void * 352 zio_buf_alloc(size_t size) 353 { 354 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 355 356 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 357 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 358 atomic_add_64(&zio_buf_cache_allocs[c], 1); 359 #endif 360 361 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE); 362 zio_buf_put_canary(p, size, zio_buf_cache, c); 363 return (p); 364 } 365 366 /* 367 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 368 * crashdump if the kernel panics. This exists so that we will limit the amount 369 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 370 * of kernel heap dumped to disk when the kernel panics) 371 */ 372 void * 373 zio_data_buf_alloc(size_t size) 374 { 375 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 376 377 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 378 379 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE); 380 zio_buf_put_canary(p, size, zio_data_buf_cache, c); 381 return (p); 382 } 383 384 void 385 zio_buf_free(void *buf, size_t size) 386 { 387 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 388 389 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 390 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 391 atomic_add_64(&zio_buf_cache_frees[c], 1); 392 #endif 393 394 zio_buf_check_canary(buf, size, zio_buf_cache, c); 395 kmem_cache_free(zio_buf_cache[c], buf); 396 } 397 398 void 399 zio_data_buf_free(void *buf, size_t size) 400 { 401 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 402 403 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 404 405 zio_buf_check_canary(buf, size, zio_data_buf_cache, c); 406 kmem_cache_free(zio_data_buf_cache[c], buf); 407 } 408 409 static void 410 zio_abd_free(void *abd, size_t size) 411 { 412 (void) size; 413 abd_free((abd_t *)abd); 414 } 415 416 /* 417 * ========================================================================== 418 * Push and pop I/O transform buffers 419 * ========================================================================== 420 */ 421 void 422 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 423 zio_transform_func_t *transform) 424 { 425 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 426 427 zt->zt_orig_abd = zio->io_abd; 428 zt->zt_orig_size = zio->io_size; 429 zt->zt_bufsize = bufsize; 430 zt->zt_transform = transform; 431 432 zt->zt_next = zio->io_transform_stack; 433 zio->io_transform_stack = zt; 434 435 zio->io_abd = data; 436 zio->io_size = size; 437 } 438 439 void 440 zio_pop_transforms(zio_t *zio) 441 { 442 zio_transform_t *zt; 443 444 while ((zt = zio->io_transform_stack) != NULL) { 445 if (zt->zt_transform != NULL) 446 zt->zt_transform(zio, 447 zt->zt_orig_abd, zt->zt_orig_size); 448 449 if (zt->zt_bufsize != 0) 450 abd_free(zio->io_abd); 451 452 zio->io_abd = zt->zt_orig_abd; 453 zio->io_size = zt->zt_orig_size; 454 zio->io_transform_stack = zt->zt_next; 455 456 kmem_free(zt, sizeof (zio_transform_t)); 457 } 458 } 459 460 /* 461 * ========================================================================== 462 * I/O transform callbacks for subblocks, decompression, and decryption 463 * ========================================================================== 464 */ 465 static void 466 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 467 { 468 ASSERT(zio->io_size > size); 469 470 if (zio->io_type == ZIO_TYPE_READ) 471 abd_copy(data, zio->io_abd, size); 472 } 473 474 static void 475 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 476 { 477 if (zio->io_error == 0) { 478 void *tmp = abd_borrow_buf(data, size); 479 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 480 zio->io_abd, tmp, zio->io_size, size, 481 &zio->io_prop.zp_complevel); 482 abd_return_buf_copy(data, tmp, size); 483 484 if (zio_injection_enabled && ret == 0) 485 ret = zio_handle_fault_injection(zio, EINVAL); 486 487 if (ret != 0) 488 zio->io_error = SET_ERROR(EIO); 489 } 490 } 491 492 static void 493 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) 494 { 495 int ret; 496 void *tmp; 497 blkptr_t *bp = zio->io_bp; 498 spa_t *spa = zio->io_spa; 499 uint64_t dsobj = zio->io_bookmark.zb_objset; 500 uint64_t lsize = BP_GET_LSIZE(bp); 501 dmu_object_type_t ot = BP_GET_TYPE(bp); 502 uint8_t salt[ZIO_DATA_SALT_LEN]; 503 uint8_t iv[ZIO_DATA_IV_LEN]; 504 uint8_t mac[ZIO_DATA_MAC_LEN]; 505 boolean_t no_crypt = B_FALSE; 506 507 ASSERT(BP_USES_CRYPT(bp)); 508 ASSERT3U(size, !=, 0); 509 510 if (zio->io_error != 0) 511 return; 512 513 /* 514 * Verify the cksum of MACs stored in an indirect bp. It will always 515 * be possible to verify this since it does not require an encryption 516 * key. 517 */ 518 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { 519 zio_crypt_decode_mac_bp(bp, mac); 520 521 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 522 /* 523 * We haven't decompressed the data yet, but 524 * zio_crypt_do_indirect_mac_checksum() requires 525 * decompressed data to be able to parse out the MACs 526 * from the indirect block. We decompress it now and 527 * throw away the result after we are finished. 528 */ 529 tmp = zio_buf_alloc(lsize); 530 ret = zio_decompress_data(BP_GET_COMPRESS(bp), 531 zio->io_abd, tmp, zio->io_size, lsize, 532 &zio->io_prop.zp_complevel); 533 if (ret != 0) { 534 ret = SET_ERROR(EIO); 535 goto error; 536 } 537 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE, 538 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac); 539 zio_buf_free(tmp, lsize); 540 } else { 541 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 542 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); 543 } 544 abd_copy(data, zio->io_abd, size); 545 546 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { 547 ret = zio_handle_decrypt_injection(spa, 548 &zio->io_bookmark, ot, ECKSUM); 549 } 550 if (ret != 0) 551 goto error; 552 553 return; 554 } 555 556 /* 557 * If this is an authenticated block, just check the MAC. It would be 558 * nice to separate this out into its own flag, but when this was done, 559 * we had run out of bits in what is now zio_flag_t. Future cleanup 560 * could make this a flag bit. 561 */ 562 if (BP_IS_AUTHENTICATED(bp)) { 563 if (ot == DMU_OT_OBJSET) { 564 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, 565 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); 566 } else { 567 zio_crypt_decode_mac_bp(bp, mac); 568 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, 569 zio->io_abd, size, mac); 570 if (zio_injection_enabled && ret == 0) { 571 ret = zio_handle_decrypt_injection(spa, 572 &zio->io_bookmark, ot, ECKSUM); 573 } 574 } 575 abd_copy(data, zio->io_abd, size); 576 577 if (ret != 0) 578 goto error; 579 580 return; 581 } 582 583 zio_crypt_decode_params_bp(bp, salt, iv); 584 585 if (ot == DMU_OT_INTENT_LOG) { 586 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); 587 zio_crypt_decode_mac_zil(tmp, mac); 588 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); 589 } else { 590 zio_crypt_decode_mac_bp(bp, mac); 591 } 592 593 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), 594 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, 595 zio->io_abd, &no_crypt); 596 if (no_crypt) 597 abd_copy(data, zio->io_abd, size); 598 599 if (ret != 0) 600 goto error; 601 602 return; 603 604 error: 605 /* assert that the key was found unless this was speculative */ 606 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); 607 608 /* 609 * If there was a decryption / authentication error return EIO as 610 * the io_error. If this was not a speculative zio, create an ereport. 611 */ 612 if (ret == ECKSUM) { 613 zio->io_error = SET_ERROR(EIO); 614 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 615 spa_log_error(spa, &zio->io_bookmark, 616 BP_GET_LOGICAL_BIRTH(zio->io_bp)); 617 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 618 spa, NULL, &zio->io_bookmark, zio, 0); 619 } 620 } else { 621 zio->io_error = ret; 622 } 623 } 624 625 /* 626 * ========================================================================== 627 * I/O parent/child relationships and pipeline interlocks 628 * ========================================================================== 629 */ 630 zio_t * 631 zio_walk_parents(zio_t *cio, zio_link_t **zl) 632 { 633 list_t *pl = &cio->io_parent_list; 634 635 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 636 if (*zl == NULL) 637 return (NULL); 638 639 ASSERT((*zl)->zl_child == cio); 640 return ((*zl)->zl_parent); 641 } 642 643 zio_t * 644 zio_walk_children(zio_t *pio, zio_link_t **zl) 645 { 646 list_t *cl = &pio->io_child_list; 647 648 ASSERT(MUTEX_HELD(&pio->io_lock)); 649 650 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 651 if (*zl == NULL) 652 return (NULL); 653 654 ASSERT((*zl)->zl_parent == pio); 655 return ((*zl)->zl_child); 656 } 657 658 zio_t * 659 zio_unique_parent(zio_t *cio) 660 { 661 zio_link_t *zl = NULL; 662 zio_t *pio = zio_walk_parents(cio, &zl); 663 664 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 665 return (pio); 666 } 667 668 void 669 zio_add_child(zio_t *pio, zio_t *cio) 670 { 671 /* 672 * Logical I/Os can have logical, gang, or vdev children. 673 * Gang I/Os can have gang or vdev children. 674 * Vdev I/Os can only have vdev children. 675 * The following ASSERT captures all of these constraints. 676 */ 677 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 678 679 /* Parent should not have READY stage if child doesn't have it. */ 680 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 && 681 (cio->io_child_type != ZIO_CHILD_VDEV), 682 (pio->io_pipeline & ZIO_STAGE_READY) == 0); 683 684 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 685 zl->zl_parent = pio; 686 zl->zl_child = cio; 687 688 mutex_enter(&pio->io_lock); 689 mutex_enter(&cio->io_lock); 690 691 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 692 693 uint64_t *countp = pio->io_children[cio->io_child_type]; 694 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 695 countp[w] += !cio->io_state[w]; 696 697 list_insert_head(&pio->io_child_list, zl); 698 list_insert_head(&cio->io_parent_list, zl); 699 700 mutex_exit(&cio->io_lock); 701 mutex_exit(&pio->io_lock); 702 } 703 704 void 705 zio_add_child_first(zio_t *pio, zio_t *cio) 706 { 707 /* 708 * Logical I/Os can have logical, gang, or vdev children. 709 * Gang I/Os can have gang or vdev children. 710 * Vdev I/Os can only have vdev children. 711 * The following ASSERT captures all of these constraints. 712 */ 713 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 714 715 /* Parent should not have READY stage if child doesn't have it. */ 716 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 && 717 (cio->io_child_type != ZIO_CHILD_VDEV), 718 (pio->io_pipeline & ZIO_STAGE_READY) == 0); 719 720 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 721 zl->zl_parent = pio; 722 zl->zl_child = cio; 723 724 ASSERT(list_is_empty(&cio->io_parent_list)); 725 list_insert_head(&cio->io_parent_list, zl); 726 727 mutex_enter(&pio->io_lock); 728 729 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 730 731 uint64_t *countp = pio->io_children[cio->io_child_type]; 732 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 733 countp[w] += !cio->io_state[w]; 734 735 list_insert_head(&pio->io_child_list, zl); 736 737 mutex_exit(&pio->io_lock); 738 } 739 740 static void 741 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 742 { 743 ASSERT(zl->zl_parent == pio); 744 ASSERT(zl->zl_child == cio); 745 746 mutex_enter(&pio->io_lock); 747 mutex_enter(&cio->io_lock); 748 749 list_remove(&pio->io_child_list, zl); 750 list_remove(&cio->io_parent_list, zl); 751 752 mutex_exit(&cio->io_lock); 753 mutex_exit(&pio->io_lock); 754 kmem_cache_free(zio_link_cache, zl); 755 } 756 757 static boolean_t 758 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 759 { 760 boolean_t waiting = B_FALSE; 761 762 mutex_enter(&zio->io_lock); 763 ASSERT(zio->io_stall == NULL); 764 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 765 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 766 continue; 767 768 uint64_t *countp = &zio->io_children[c][wait]; 769 if (*countp != 0) { 770 zio->io_stage >>= 1; 771 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 772 zio->io_stall = countp; 773 waiting = B_TRUE; 774 break; 775 } 776 } 777 mutex_exit(&zio->io_lock); 778 return (waiting); 779 } 780 781 __attribute__((always_inline)) 782 static inline void 783 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait, 784 zio_t **next_to_executep) 785 { 786 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 787 int *errorp = &pio->io_child_error[zio->io_child_type]; 788 789 mutex_enter(&pio->io_lock); 790 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 791 *errorp = zio_worst_error(*errorp, zio->io_error); 792 pio->io_reexecute |= zio->io_reexecute; 793 ASSERT3U(*countp, >, 0); 794 795 (*countp)--; 796 797 if (*countp == 0 && pio->io_stall == countp) { 798 zio_taskq_type_t type = 799 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 800 ZIO_TASKQ_INTERRUPT; 801 pio->io_stall = NULL; 802 mutex_exit(&pio->io_lock); 803 804 /* 805 * If we can tell the caller to execute this parent next, do 806 * so. We only do this if the parent's zio type matches the 807 * child's type. Otherwise dispatch the parent zio in its 808 * own taskq. 809 * 810 * Having the caller execute the parent when possible reduces 811 * locking on the zio taskq's, reduces context switch 812 * overhead, and has no recursion penalty. Note that one 813 * read from disk typically causes at least 3 zio's: a 814 * zio_null(), the logical zio_read(), and then a physical 815 * zio. When the physical ZIO completes, we are able to call 816 * zio_done() on all 3 of these zio's from one invocation of 817 * zio_execute() by returning the parent back to 818 * zio_execute(). Since the parent isn't executed until this 819 * thread returns back to zio_execute(), the caller should do 820 * so promptly. 821 * 822 * In other cases, dispatching the parent prevents 823 * overflowing the stack when we have deeply nested 824 * parent-child relationships, as we do with the "mega zio" 825 * of writes for spa_sync(), and the chain of ZIL blocks. 826 */ 827 if (next_to_executep != NULL && *next_to_executep == NULL && 828 pio->io_type == zio->io_type) { 829 *next_to_executep = pio; 830 } else { 831 zio_taskq_dispatch(pio, type, B_FALSE); 832 } 833 } else { 834 mutex_exit(&pio->io_lock); 835 } 836 } 837 838 static void 839 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 840 { 841 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 842 zio->io_error = zio->io_child_error[c]; 843 } 844 845 int 846 zio_bookmark_compare(const void *x1, const void *x2) 847 { 848 const zio_t *z1 = x1; 849 const zio_t *z2 = x2; 850 851 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 852 return (-1); 853 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 854 return (1); 855 856 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 857 return (-1); 858 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 859 return (1); 860 861 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 862 return (-1); 863 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 864 return (1); 865 866 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 867 return (-1); 868 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 869 return (1); 870 871 if (z1 < z2) 872 return (-1); 873 if (z1 > z2) 874 return (1); 875 876 return (0); 877 } 878 879 /* 880 * ========================================================================== 881 * Create the various types of I/O (read, write, free, etc) 882 * ========================================================================== 883 */ 884 static zio_t * 885 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 886 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 887 void *private, zio_type_t type, zio_priority_t priority, 888 zio_flag_t flags, vdev_t *vd, uint64_t offset, 889 const zbookmark_phys_t *zb, enum zio_stage stage, 890 enum zio_stage pipeline) 891 { 892 zio_t *zio; 893 894 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE); 895 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 896 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 897 898 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 899 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 900 ASSERT(vd || stage == ZIO_STAGE_OPEN); 901 902 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); 903 904 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 905 memset(zio, 0, sizeof (zio_t)); 906 907 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL); 908 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 909 910 list_create(&zio->io_parent_list, sizeof (zio_link_t), 911 offsetof(zio_link_t, zl_parent_node)); 912 list_create(&zio->io_child_list, sizeof (zio_link_t), 913 offsetof(zio_link_t, zl_child_node)); 914 metaslab_trace_init(&zio->io_alloc_list); 915 916 if (vd != NULL) 917 zio->io_child_type = ZIO_CHILD_VDEV; 918 else if (flags & ZIO_FLAG_GANG_CHILD) 919 zio->io_child_type = ZIO_CHILD_GANG; 920 else if (flags & ZIO_FLAG_DDT_CHILD) 921 zio->io_child_type = ZIO_CHILD_DDT; 922 else 923 zio->io_child_type = ZIO_CHILD_LOGICAL; 924 925 if (bp != NULL) { 926 if (type != ZIO_TYPE_WRITE || 927 zio->io_child_type == ZIO_CHILD_DDT) { 928 zio->io_bp_copy = *bp; 929 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 930 } else { 931 zio->io_bp = (blkptr_t *)bp; 932 } 933 zio->io_bp_orig = *bp; 934 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 935 zio->io_logical = zio; 936 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 937 pipeline |= ZIO_GANG_STAGES; 938 } 939 940 zio->io_spa = spa; 941 zio->io_txg = txg; 942 zio->io_done = done; 943 zio->io_private = private; 944 zio->io_type = type; 945 zio->io_priority = priority; 946 zio->io_vd = vd; 947 zio->io_offset = offset; 948 zio->io_orig_abd = zio->io_abd = data; 949 zio->io_orig_size = zio->io_size = psize; 950 zio->io_lsize = lsize; 951 zio->io_orig_flags = zio->io_flags = flags; 952 zio->io_orig_stage = zio->io_stage = stage; 953 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 954 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 955 zio->io_allocator = ZIO_ALLOCATOR_NONE; 956 957 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) || 958 (pipeline & ZIO_STAGE_READY) == 0; 959 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 960 961 if (zb != NULL) 962 zio->io_bookmark = *zb; 963 964 if (pio != NULL) { 965 zio->io_metaslab_class = pio->io_metaslab_class; 966 if (zio->io_logical == NULL) 967 zio->io_logical = pio->io_logical; 968 if (zio->io_child_type == ZIO_CHILD_GANG) 969 zio->io_gang_leader = pio->io_gang_leader; 970 zio_add_child_first(pio, zio); 971 } 972 973 taskq_init_ent(&zio->io_tqent); 974 975 return (zio); 976 } 977 978 void 979 zio_destroy(zio_t *zio) 980 { 981 metaslab_trace_fini(&zio->io_alloc_list); 982 list_destroy(&zio->io_parent_list); 983 list_destroy(&zio->io_child_list); 984 mutex_destroy(&zio->io_lock); 985 cv_destroy(&zio->io_cv); 986 kmem_cache_free(zio_cache, zio); 987 } 988 989 /* 990 * ZIO intended to be between others. Provides synchronization at READY 991 * and DONE pipeline stages and calls the respective callbacks. 992 */ 993 zio_t * 994 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 995 void *private, zio_flag_t flags) 996 { 997 zio_t *zio; 998 999 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 1000 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1001 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 1002 1003 return (zio); 1004 } 1005 1006 /* 1007 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a 1008 * READY pipeline stage (is ready on creation), so it should not be used 1009 * as child of any ZIO that may need waiting for grandchildren READY stage 1010 * (any other ZIO type). 1011 */ 1012 zio_t * 1013 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags) 1014 { 1015 zio_t *zio; 1016 1017 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private, 1018 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL, 1019 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE); 1020 1021 return (zio); 1022 } 1023 1024 static int 1025 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp, 1026 enum blk_verify_flag blk_verify, const char *fmt, ...) 1027 { 1028 va_list adx; 1029 char buf[256]; 1030 1031 va_start(adx, fmt); 1032 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 1033 va_end(adx); 1034 1035 zfs_dbgmsg("bad blkptr at %px: " 1036 "DVA[0]=%#llx/%#llx " 1037 "DVA[1]=%#llx/%#llx " 1038 "DVA[2]=%#llx/%#llx " 1039 "prop=%#llx " 1040 "pad=%#llx,%#llx " 1041 "phys_birth=%#llx " 1042 "birth=%#llx " 1043 "fill=%#llx " 1044 "cksum=%#llx/%#llx/%#llx/%#llx", 1045 bp, 1046 (long long)bp->blk_dva[0].dva_word[0], 1047 (long long)bp->blk_dva[0].dva_word[1], 1048 (long long)bp->blk_dva[1].dva_word[0], 1049 (long long)bp->blk_dva[1].dva_word[1], 1050 (long long)bp->blk_dva[2].dva_word[0], 1051 (long long)bp->blk_dva[2].dva_word[1], 1052 (long long)bp->blk_prop, 1053 (long long)bp->blk_pad[0], 1054 (long long)bp->blk_pad[1], 1055 (long long)BP_GET_PHYSICAL_BIRTH(bp), 1056 (long long)BP_GET_LOGICAL_BIRTH(bp), 1057 (long long)bp->blk_fill, 1058 (long long)bp->blk_cksum.zc_word[0], 1059 (long long)bp->blk_cksum.zc_word[1], 1060 (long long)bp->blk_cksum.zc_word[2], 1061 (long long)bp->blk_cksum.zc_word[3]); 1062 switch (blk_verify) { 1063 case BLK_VERIFY_HALT: 1064 zfs_panic_recover("%s: %s", spa_name(spa), buf); 1065 break; 1066 case BLK_VERIFY_LOG: 1067 zfs_dbgmsg("%s: %s", spa_name(spa), buf); 1068 break; 1069 case BLK_VERIFY_ONLY: 1070 break; 1071 } 1072 1073 return (1); 1074 } 1075 1076 /* 1077 * Verify the block pointer fields contain reasonable values. This means 1078 * it only contains known object types, checksum/compression identifiers, 1079 * block sizes within the maximum allowed limits, valid DVAs, etc. 1080 * 1081 * If everything checks out B_TRUE is returned. The zfs_blkptr_verify 1082 * argument controls the behavior when an invalid field is detected. 1083 * 1084 * Values for blk_verify_flag: 1085 * BLK_VERIFY_ONLY: evaluate the block 1086 * BLK_VERIFY_LOG: evaluate the block and log problems 1087 * BLK_VERIFY_HALT: call zfs_panic_recover on error 1088 * 1089 * Values for blk_config_flag: 1090 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer 1091 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be 1092 * obtained for reader 1093 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better 1094 * performance 1095 */ 1096 boolean_t 1097 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, 1098 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify) 1099 { 1100 int errors = 0; 1101 1102 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 1103 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1104 "blkptr at %px has invalid TYPE %llu", 1105 bp, (longlong_t)BP_GET_TYPE(bp)); 1106 } 1107 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) { 1108 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1109 "blkptr at %px has invalid CHECKSUM %llu", 1110 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 1111 } 1112 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) { 1113 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1114 "blkptr at %px has invalid COMPRESS %llu", 1115 bp, (longlong_t)BP_GET_COMPRESS(bp)); 1116 } 1117 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 1118 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1119 "blkptr at %px has invalid LSIZE %llu", 1120 bp, (longlong_t)BP_GET_LSIZE(bp)); 1121 } 1122 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 1123 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1124 "blkptr at %px has invalid PSIZE %llu", 1125 bp, (longlong_t)BP_GET_PSIZE(bp)); 1126 } 1127 1128 if (BP_IS_EMBEDDED(bp)) { 1129 if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) { 1130 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1131 "blkptr at %px has invalid ETYPE %llu", 1132 bp, (longlong_t)BPE_GET_ETYPE(bp)); 1133 } 1134 } 1135 1136 /* 1137 * Do not verify individual DVAs if the config is not trusted. This 1138 * will be done once the zio is executed in vdev_mirror_map_alloc. 1139 */ 1140 if (!spa->spa_trust_config) 1141 return (errors == 0); 1142 1143 switch (blk_config) { 1144 case BLK_CONFIG_HELD: 1145 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER)); 1146 break; 1147 case BLK_CONFIG_NEEDED: 1148 spa_config_enter(spa, SCL_VDEV, bp, RW_READER); 1149 break; 1150 case BLK_CONFIG_SKIP: 1151 return (errors == 0); 1152 default: 1153 panic("invalid blk_config %u", blk_config); 1154 } 1155 1156 /* 1157 * Pool-specific checks. 1158 * 1159 * Note: it would be nice to verify that the logical birth 1160 * and physical birth are not too large. However, 1161 * spa_freeze() allows the birth time of log blocks (and 1162 * dmu_sync()-ed blocks that are in the log) to be arbitrarily 1163 * large. 1164 */ 1165 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 1166 const dva_t *dva = &bp->blk_dva[i]; 1167 uint64_t vdevid = DVA_GET_VDEV(dva); 1168 1169 if (vdevid >= spa->spa_root_vdev->vdev_children) { 1170 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1171 "blkptr at %px DVA %u has invalid VDEV %llu", 1172 bp, i, (longlong_t)vdevid); 1173 continue; 1174 } 1175 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1176 if (vd == NULL) { 1177 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1178 "blkptr at %px DVA %u has invalid VDEV %llu", 1179 bp, i, (longlong_t)vdevid); 1180 continue; 1181 } 1182 if (vd->vdev_ops == &vdev_hole_ops) { 1183 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1184 "blkptr at %px DVA %u has hole VDEV %llu", 1185 bp, i, (longlong_t)vdevid); 1186 continue; 1187 } 1188 if (vd->vdev_ops == &vdev_missing_ops) { 1189 /* 1190 * "missing" vdevs are valid during import, but we 1191 * don't have their detailed info (e.g. asize), so 1192 * we can't perform any more checks on them. 1193 */ 1194 continue; 1195 } 1196 uint64_t offset = DVA_GET_OFFSET(dva); 1197 uint64_t asize = DVA_GET_ASIZE(dva); 1198 if (DVA_GET_GANG(dva)) 1199 asize = vdev_gang_header_asize(vd); 1200 if (offset + asize > vd->vdev_asize) { 1201 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1202 "blkptr at %px DVA %u has invalid OFFSET %llu", 1203 bp, i, (longlong_t)offset); 1204 } 1205 } 1206 if (blk_config == BLK_CONFIG_NEEDED) 1207 spa_config_exit(spa, SCL_VDEV, bp); 1208 1209 return (errors == 0); 1210 } 1211 1212 boolean_t 1213 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 1214 { 1215 (void) bp; 1216 uint64_t vdevid = DVA_GET_VDEV(dva); 1217 1218 if (vdevid >= spa->spa_root_vdev->vdev_children) 1219 return (B_FALSE); 1220 1221 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1222 if (vd == NULL) 1223 return (B_FALSE); 1224 1225 if (vd->vdev_ops == &vdev_hole_ops) 1226 return (B_FALSE); 1227 1228 if (vd->vdev_ops == &vdev_missing_ops) { 1229 return (B_FALSE); 1230 } 1231 1232 uint64_t offset = DVA_GET_OFFSET(dva); 1233 uint64_t asize = DVA_GET_ASIZE(dva); 1234 1235 if (DVA_GET_GANG(dva)) 1236 asize = vdev_gang_header_asize(vd); 1237 if (offset + asize > vd->vdev_asize) 1238 return (B_FALSE); 1239 1240 return (B_TRUE); 1241 } 1242 1243 zio_t * 1244 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 1245 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 1246 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb) 1247 { 1248 zio_t *zio; 1249 1250 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp, 1251 data, size, size, done, private, 1252 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 1253 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 1254 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 1255 1256 return (zio); 1257 } 1258 1259 zio_t * 1260 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 1261 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 1262 zio_done_func_t *ready, zio_done_func_t *children_ready, 1263 zio_done_func_t *done, void *private, zio_priority_t priority, 1264 zio_flag_t flags, const zbookmark_phys_t *zb) 1265 { 1266 zio_t *zio; 1267 1268 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 1269 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 1270 zp->zp_compress >= ZIO_COMPRESS_OFF && 1271 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 1272 DMU_OT_IS_VALID(zp->zp_type) && 1273 zp->zp_level < 32 && 1274 zp->zp_copies > 0 && 1275 zp->zp_copies <= spa_max_replication(spa)); 1276 1277 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 1278 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 1279 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 1280 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 1281 1282 zio->io_ready = ready; 1283 zio->io_children_ready = children_ready; 1284 zio->io_prop = *zp; 1285 1286 /* 1287 * Data can be NULL if we are going to call zio_write_override() to 1288 * provide the already-allocated BP. But we may need the data to 1289 * verify a dedup hit (if requested). In this case, don't try to 1290 * dedup (just take the already-allocated BP verbatim). Encrypted 1291 * dedup blocks need data as well so we also disable dedup in this 1292 * case. 1293 */ 1294 if (data == NULL && 1295 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { 1296 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 1297 } 1298 1299 return (zio); 1300 } 1301 1302 zio_t * 1303 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 1304 uint64_t size, zio_done_func_t *done, void *private, 1305 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb) 1306 { 1307 zio_t *zio; 1308 1309 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 1310 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 1311 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 1312 1313 return (zio); 1314 } 1315 1316 void 1317 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite, 1318 boolean_t brtwrite) 1319 { 1320 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1321 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1322 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1323 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 1324 ASSERT(!brtwrite || !nopwrite); 1325 1326 /* 1327 * We must reset the io_prop to match the values that existed 1328 * when the bp was first written by dmu_sync() keeping in mind 1329 * that nopwrite and dedup are mutually exclusive. 1330 */ 1331 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 1332 zio->io_prop.zp_nopwrite = nopwrite; 1333 zio->io_prop.zp_brtwrite = brtwrite; 1334 zio->io_prop.zp_copies = copies; 1335 zio->io_bp_override = bp; 1336 } 1337 1338 void 1339 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 1340 { 1341 1342 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1343 1344 /* 1345 * The check for EMBEDDED is a performance optimization. We 1346 * process the free here (by ignoring it) rather than 1347 * putting it on the list and then processing it in zio_free_sync(). 1348 */ 1349 if (BP_IS_EMBEDDED(bp)) 1350 return; 1351 1352 /* 1353 * Frees that are for the currently-syncing txg, are not going to be 1354 * deferred, and which will not need to do a read (i.e. not GANG or 1355 * DEDUP), can be processed immediately. Otherwise, put them on the 1356 * in-memory list for later processing. 1357 * 1358 * Note that we only defer frees after zfs_sync_pass_deferred_free 1359 * when the log space map feature is disabled. [see relevant comment 1360 * in spa_sync_iterate_to_convergence()] 1361 */ 1362 if (BP_IS_GANG(bp) || 1363 BP_GET_DEDUP(bp) || 1364 txg != spa->spa_syncing_txg || 1365 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free && 1366 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) || 1367 brt_maybe_exists(spa, bp)) { 1368 metaslab_check_free(spa, bp); 1369 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 1370 } else { 1371 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL); 1372 } 1373 } 1374 1375 /* 1376 * To improve performance, this function may return NULL if we were able 1377 * to do the free immediately. This avoids the cost of creating a zio 1378 * (and linking it to the parent, etc). 1379 */ 1380 zio_t * 1381 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1382 zio_flag_t flags) 1383 { 1384 ASSERT(!BP_IS_HOLE(bp)); 1385 ASSERT(spa_syncing_txg(spa) == txg); 1386 1387 if (BP_IS_EMBEDDED(bp)) 1388 return (NULL); 1389 1390 metaslab_check_free(spa, bp); 1391 arc_freed(spa, bp); 1392 dsl_scan_freed(spa, bp); 1393 1394 if (BP_IS_GANG(bp) || 1395 BP_GET_DEDUP(bp) || 1396 brt_maybe_exists(spa, bp)) { 1397 /* 1398 * GANG, DEDUP and BRT blocks can induce a read (for the gang 1399 * block header, the DDT or the BRT), so issue them 1400 * asynchronously so that this thread is not tied up. 1401 */ 1402 enum zio_stage stage = 1403 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC; 1404 1405 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1406 BP_GET_PSIZE(bp), NULL, NULL, 1407 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 1408 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage)); 1409 } else { 1410 metaslab_free(spa, bp, txg, B_FALSE); 1411 return (NULL); 1412 } 1413 } 1414 1415 zio_t * 1416 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1417 zio_done_func_t *done, void *private, zio_flag_t flags) 1418 { 1419 zio_t *zio; 1420 1421 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ? 1422 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1423 1424 if (BP_IS_EMBEDDED(bp)) 1425 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 1426 1427 /* 1428 * A claim is an allocation of a specific block. Claims are needed 1429 * to support immediate writes in the intent log. The issue is that 1430 * immediate writes contain committed data, but in a txg that was 1431 * *not* committed. Upon opening the pool after an unclean shutdown, 1432 * the intent log claims all blocks that contain immediate write data 1433 * so that the SPA knows they're in use. 1434 * 1435 * All claims *must* be resolved in the first txg -- before the SPA 1436 * starts allocating blocks -- so that nothing is allocated twice. 1437 * If txg == 0 we just verify that the block is claimable. 1438 */ 1439 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <, 1440 spa_min_claim_txg(spa)); 1441 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1442 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */ 1443 1444 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1445 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 1446 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 1447 ASSERT0(zio->io_queued_timestamp); 1448 1449 return (zio); 1450 } 1451 1452 zio_t * 1453 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1454 zio_done_func_t *done, void *private, zio_priority_t priority, 1455 zio_flag_t flags, enum trim_flag trim_flags) 1456 { 1457 zio_t *zio; 1458 1459 ASSERT0(vd->vdev_children); 1460 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 1461 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 1462 ASSERT3U(size, !=, 0); 1463 1464 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done, 1465 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL, 1466 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE); 1467 zio->io_trim_flags = trim_flags; 1468 1469 return (zio); 1470 } 1471 1472 zio_t * 1473 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1474 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1475 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1476 { 1477 zio_t *zio; 1478 1479 ASSERT(vd->vdev_children == 0); 1480 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1481 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1482 ASSERT3U(offset + size, <=, vd->vdev_psize); 1483 1484 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1485 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1486 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1487 1488 zio->io_prop.zp_checksum = checksum; 1489 1490 return (zio); 1491 } 1492 1493 zio_t * 1494 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1495 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1496 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1497 { 1498 zio_t *zio; 1499 1500 ASSERT(vd->vdev_children == 0); 1501 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1502 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1503 ASSERT3U(offset + size, <=, vd->vdev_psize); 1504 1505 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1506 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1507 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1508 1509 zio->io_prop.zp_checksum = checksum; 1510 1511 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1512 /* 1513 * zec checksums are necessarily destructive -- they modify 1514 * the end of the write buffer to hold the verifier/checksum. 1515 * Therefore, we must make a local copy in case the data is 1516 * being written to multiple places in parallel. 1517 */ 1518 abd_t *wbuf = abd_alloc_sametype(data, size); 1519 abd_copy(wbuf, data, size); 1520 1521 zio_push_transform(zio, wbuf, size, size, NULL); 1522 } 1523 1524 return (zio); 1525 } 1526 1527 /* 1528 * Create a child I/O to do some work for us. 1529 */ 1530 zio_t * 1531 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1532 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1533 zio_flag_t flags, zio_done_func_t *done, void *private) 1534 { 1535 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1536 zio_t *zio; 1537 1538 /* 1539 * vdev child I/Os do not propagate their error to the parent. 1540 * Therefore, for correct operation the caller *must* check for 1541 * and handle the error in the child i/o's done callback. 1542 * The only exceptions are i/os that we don't care about 1543 * (OPTIONAL or REPAIR). 1544 */ 1545 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1546 done != NULL); 1547 1548 if (type == ZIO_TYPE_READ && bp != NULL) { 1549 /* 1550 * If we have the bp, then the child should perform the 1551 * checksum and the parent need not. This pushes error 1552 * detection as close to the leaves as possible and 1553 * eliminates redundant checksums in the interior nodes. 1554 */ 1555 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1556 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1557 } 1558 1559 if (vd->vdev_ops->vdev_op_leaf) { 1560 ASSERT0(vd->vdev_children); 1561 offset += VDEV_LABEL_START_SIZE; 1562 } 1563 1564 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1565 1566 /* 1567 * If we've decided to do a repair, the write is not speculative -- 1568 * even if the original read was. 1569 */ 1570 if (flags & ZIO_FLAG_IO_REPAIR) 1571 flags &= ~ZIO_FLAG_SPECULATIVE; 1572 1573 /* 1574 * If we're creating a child I/O that is not associated with a 1575 * top-level vdev, then the child zio is not an allocating I/O. 1576 * If this is a retried I/O then we ignore it since we will 1577 * have already processed the original allocating I/O. 1578 */ 1579 if (flags & ZIO_FLAG_IO_ALLOCATING && 1580 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1581 ASSERT(pio->io_metaslab_class != NULL); 1582 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 1583 ASSERT(type == ZIO_TYPE_WRITE); 1584 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1585 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1586 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1587 pio->io_child_type == ZIO_CHILD_GANG); 1588 1589 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1590 } 1591 1592 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1593 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1594 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1595 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1596 1597 return (zio); 1598 } 1599 1600 zio_t * 1601 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1602 zio_type_t type, zio_priority_t priority, zio_flag_t flags, 1603 zio_done_func_t *done, void *private) 1604 { 1605 zio_t *zio; 1606 1607 ASSERT(vd->vdev_ops->vdev_op_leaf); 1608 1609 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1610 data, size, size, done, private, type, priority, 1611 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1612 vd, offset, NULL, 1613 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1614 1615 return (zio); 1616 } 1617 1618 1619 /* 1620 * Send a flush command to the given vdev. Unlike most zio creation functions, 1621 * the flush zios are issued immediately. You can wait on pio to pause until 1622 * the flushes complete. 1623 */ 1624 void 1625 zio_flush(zio_t *pio, vdev_t *vd) 1626 { 1627 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | 1628 ZIO_FLAG_DONT_RETRY; 1629 1630 if (vd->vdev_nowritecache) 1631 return; 1632 1633 if (vd->vdev_children == 0) { 1634 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0, 1635 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0, 1636 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE)); 1637 } else { 1638 for (uint64_t c = 0; c < vd->vdev_children; c++) 1639 zio_flush(pio, vd->vdev_child[c]); 1640 } 1641 } 1642 1643 void 1644 zio_shrink(zio_t *zio, uint64_t size) 1645 { 1646 ASSERT3P(zio->io_executor, ==, NULL); 1647 ASSERT3U(zio->io_orig_size, ==, zio->io_size); 1648 ASSERT3U(size, <=, zio->io_size); 1649 1650 /* 1651 * We don't shrink for raidz because of problems with the 1652 * reconstruction when reading back less than the block size. 1653 * Note, BP_IS_RAIDZ() assumes no compression. 1654 */ 1655 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1656 if (!BP_IS_RAIDZ(zio->io_bp)) { 1657 /* we are not doing a raw write */ 1658 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1659 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1660 } 1661 } 1662 1663 /* 1664 * Round provided allocation size up to a value that can be allocated 1665 * by at least some vdev(s) in the pool with minimum or no additional 1666 * padding and without extra space usage on others 1667 */ 1668 static uint64_t 1669 zio_roundup_alloc_size(spa_t *spa, uint64_t size) 1670 { 1671 if (size > spa->spa_min_alloc) 1672 return (roundup(size, spa->spa_gcd_alloc)); 1673 return (spa->spa_min_alloc); 1674 } 1675 1676 /* 1677 * ========================================================================== 1678 * Prepare to read and write logical blocks 1679 * ========================================================================== 1680 */ 1681 1682 static zio_t * 1683 zio_read_bp_init(zio_t *zio) 1684 { 1685 blkptr_t *bp = zio->io_bp; 1686 uint64_t psize = 1687 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1688 1689 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1690 1691 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1692 zio->io_child_type == ZIO_CHILD_LOGICAL && 1693 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1694 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1695 psize, psize, zio_decompress); 1696 } 1697 1698 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || 1699 BP_HAS_INDIRECT_MAC_CKSUM(bp)) && 1700 zio->io_child_type == ZIO_CHILD_LOGICAL) { 1701 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1702 psize, psize, zio_decrypt); 1703 } 1704 1705 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1706 int psize = BPE_GET_PSIZE(bp); 1707 void *data = abd_borrow_buf(zio->io_abd, psize); 1708 1709 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1710 decode_embedded_bp_compressed(bp, data); 1711 abd_return_buf_copy(zio->io_abd, data, psize); 1712 } else { 1713 ASSERT(!BP_IS_EMBEDDED(bp)); 1714 } 1715 1716 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1717 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1718 1719 return (zio); 1720 } 1721 1722 static zio_t * 1723 zio_write_bp_init(zio_t *zio) 1724 { 1725 if (!IO_IS_ALLOCATING(zio)) 1726 return (zio); 1727 1728 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1729 1730 if (zio->io_bp_override) { 1731 blkptr_t *bp = zio->io_bp; 1732 zio_prop_t *zp = &zio->io_prop; 1733 1734 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg); 1735 1736 *bp = *zio->io_bp_override; 1737 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1738 1739 if (zp->zp_brtwrite) 1740 return (zio); 1741 1742 ASSERT(!BP_GET_DEDUP(zio->io_bp_override)); 1743 1744 if (BP_IS_EMBEDDED(bp)) 1745 return (zio); 1746 1747 /* 1748 * If we've been overridden and nopwrite is set then 1749 * set the flag accordingly to indicate that a nopwrite 1750 * has already occurred. 1751 */ 1752 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1753 ASSERT(!zp->zp_dedup); 1754 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1755 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1756 return (zio); 1757 } 1758 1759 ASSERT(!zp->zp_nopwrite); 1760 1761 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1762 return (zio); 1763 1764 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1765 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1766 1767 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && 1768 !zp->zp_encrypt) { 1769 BP_SET_DEDUP(bp, 1); 1770 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1771 return (zio); 1772 } 1773 1774 /* 1775 * We were unable to handle this as an override bp, treat 1776 * it as a regular write I/O. 1777 */ 1778 zio->io_bp_override = NULL; 1779 *bp = zio->io_bp_orig; 1780 zio->io_pipeline = zio->io_orig_pipeline; 1781 } 1782 1783 return (zio); 1784 } 1785 1786 static zio_t * 1787 zio_write_compress(zio_t *zio) 1788 { 1789 spa_t *spa = zio->io_spa; 1790 zio_prop_t *zp = &zio->io_prop; 1791 enum zio_compress compress = zp->zp_compress; 1792 blkptr_t *bp = zio->io_bp; 1793 uint64_t lsize = zio->io_lsize; 1794 uint64_t psize = zio->io_size; 1795 uint32_t pass = 1; 1796 1797 /* 1798 * If our children haven't all reached the ready stage, 1799 * wait for them and then repeat this pipeline stage. 1800 */ 1801 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1802 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1803 return (NULL); 1804 } 1805 1806 if (!IO_IS_ALLOCATING(zio)) 1807 return (zio); 1808 1809 if (zio->io_children_ready != NULL) { 1810 /* 1811 * Now that all our children are ready, run the callback 1812 * associated with this zio in case it wants to modify the 1813 * data to be written. 1814 */ 1815 ASSERT3U(zp->zp_level, >, 0); 1816 zio->io_children_ready(zio); 1817 } 1818 1819 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1820 ASSERT(zio->io_bp_override == NULL); 1821 1822 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) { 1823 /* 1824 * We're rewriting an existing block, which means we're 1825 * working on behalf of spa_sync(). For spa_sync() to 1826 * converge, it must eventually be the case that we don't 1827 * have to allocate new blocks. But compression changes 1828 * the blocksize, which forces a reallocate, and makes 1829 * convergence take longer. Therefore, after the first 1830 * few passes, stop compressing to ensure convergence. 1831 */ 1832 pass = spa_sync_pass(spa); 1833 1834 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1835 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1836 ASSERT(!BP_GET_DEDUP(bp)); 1837 1838 if (pass >= zfs_sync_pass_dont_compress) 1839 compress = ZIO_COMPRESS_OFF; 1840 1841 /* Make sure someone doesn't change their mind on overwrites */ 1842 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) || 1843 MIN(zp->zp_copies, spa_max_replication(spa)) 1844 == BP_GET_NDVAS(bp)); 1845 } 1846 1847 /* If it's a compressed write that is not raw, compress the buffer. */ 1848 if (compress != ZIO_COMPRESS_OFF && 1849 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1850 void *cbuf = NULL; 1851 psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize, 1852 zp->zp_complevel); 1853 if (psize == 0) { 1854 compress = ZIO_COMPRESS_OFF; 1855 } else if (psize >= lsize) { 1856 compress = ZIO_COMPRESS_OFF; 1857 if (cbuf != NULL) 1858 zio_buf_free(cbuf, lsize); 1859 } else if (!zp->zp_dedup && !zp->zp_encrypt && 1860 psize <= BPE_PAYLOAD_SIZE && 1861 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1862 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1863 encode_embedded_bp_compressed(bp, 1864 cbuf, compress, lsize, psize); 1865 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1866 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1867 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1868 zio_buf_free(cbuf, lsize); 1869 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg); 1870 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1871 ASSERT(spa_feature_is_active(spa, 1872 SPA_FEATURE_EMBEDDED_DATA)); 1873 return (zio); 1874 } else { 1875 /* 1876 * Round compressed size up to the minimum allocation 1877 * size of the smallest-ashift device, and zero the 1878 * tail. This ensures that the compressed size of the 1879 * BP (and thus compressratio property) are correct, 1880 * in that we charge for the padding used to fill out 1881 * the last sector. 1882 */ 1883 size_t rounded = (size_t)zio_roundup_alloc_size(spa, 1884 psize); 1885 if (rounded >= lsize) { 1886 compress = ZIO_COMPRESS_OFF; 1887 zio_buf_free(cbuf, lsize); 1888 psize = lsize; 1889 } else { 1890 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1891 abd_take_ownership_of_buf(cdata, B_TRUE); 1892 abd_zero_off(cdata, psize, rounded - psize); 1893 psize = rounded; 1894 zio_push_transform(zio, cdata, 1895 psize, lsize, NULL); 1896 } 1897 } 1898 1899 /* 1900 * We were unable to handle this as an override bp, treat 1901 * it as a regular write I/O. 1902 */ 1903 zio->io_bp_override = NULL; 1904 *bp = zio->io_bp_orig; 1905 zio->io_pipeline = zio->io_orig_pipeline; 1906 1907 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && 1908 zp->zp_type == DMU_OT_DNODE) { 1909 /* 1910 * The DMU actually relies on the zio layer's compression 1911 * to free metadnode blocks that have had all contained 1912 * dnodes freed. As a result, even when doing a raw 1913 * receive, we must check whether the block can be compressed 1914 * to a hole. 1915 */ 1916 psize = zio_compress_data(ZIO_COMPRESS_EMPTY, 1917 zio->io_abd, NULL, lsize, zp->zp_complevel); 1918 if (psize == 0 || psize >= lsize) 1919 compress = ZIO_COMPRESS_OFF; 1920 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS && 1921 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) { 1922 /* 1923 * If we are raw receiving an encrypted dataset we should not 1924 * take this codepath because it will change the on-disk block 1925 * and decryption will fail. 1926 */ 1927 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize), 1928 lsize); 1929 1930 if (rounded != psize) { 1931 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE); 1932 abd_zero_off(cdata, psize, rounded - psize); 1933 abd_copy_off(cdata, zio->io_abd, 0, 0, psize); 1934 psize = rounded; 1935 zio_push_transform(zio, cdata, 1936 psize, rounded, NULL); 1937 } 1938 } else { 1939 ASSERT3U(psize, !=, 0); 1940 } 1941 1942 /* 1943 * The final pass of spa_sync() must be all rewrites, but the first 1944 * few passes offer a trade-off: allocating blocks defers convergence, 1945 * but newly allocated blocks are sequential, so they can be written 1946 * to disk faster. Therefore, we allow the first few passes of 1947 * spa_sync() to allocate new blocks, but force rewrites after that. 1948 * There should only be a handful of blocks after pass 1 in any case. 1949 */ 1950 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg && 1951 BP_GET_PSIZE(bp) == psize && 1952 pass >= zfs_sync_pass_rewrite) { 1953 VERIFY3U(psize, !=, 0); 1954 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1955 1956 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1957 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1958 } else { 1959 BP_ZERO(bp); 1960 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1961 } 1962 1963 if (psize == 0) { 1964 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 && 1965 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1966 BP_SET_LSIZE(bp, lsize); 1967 BP_SET_TYPE(bp, zp->zp_type); 1968 BP_SET_LEVEL(bp, zp->zp_level); 1969 BP_SET_BIRTH(bp, zio->io_txg, 0); 1970 } 1971 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1972 } else { 1973 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1974 BP_SET_LSIZE(bp, lsize); 1975 BP_SET_TYPE(bp, zp->zp_type); 1976 BP_SET_LEVEL(bp, zp->zp_level); 1977 BP_SET_PSIZE(bp, psize); 1978 BP_SET_COMPRESS(bp, compress); 1979 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1980 BP_SET_DEDUP(bp, zp->zp_dedup); 1981 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1982 if (zp->zp_dedup) { 1983 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1984 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1985 ASSERT(!zp->zp_encrypt || 1986 DMU_OT_IS_ENCRYPTED(zp->zp_type)); 1987 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1988 } 1989 if (zp->zp_nopwrite) { 1990 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1991 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1992 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1993 } 1994 } 1995 return (zio); 1996 } 1997 1998 static zio_t * 1999 zio_free_bp_init(zio_t *zio) 2000 { 2001 blkptr_t *bp = zio->io_bp; 2002 2003 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 2004 if (BP_GET_DEDUP(bp)) 2005 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 2006 } 2007 2008 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 2009 2010 return (zio); 2011 } 2012 2013 /* 2014 * ========================================================================== 2015 * Execute the I/O pipeline 2016 * ========================================================================== 2017 */ 2018 2019 static void 2020 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 2021 { 2022 spa_t *spa = zio->io_spa; 2023 zio_type_t t = zio->io_type; 2024 int flags = (cutinline ? TQ_FRONT : 0); 2025 2026 /* 2027 * If we're a config writer or a probe, the normal issue and 2028 * interrupt threads may all be blocked waiting for the config lock. 2029 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 2030 */ 2031 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 2032 t = ZIO_TYPE_NULL; 2033 2034 /* 2035 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 2036 */ 2037 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 2038 t = ZIO_TYPE_NULL; 2039 2040 /* 2041 * If this is a high priority I/O, then use the high priority taskq if 2042 * available. 2043 */ 2044 if ((zio->io_priority == ZIO_PRIORITY_NOW || 2045 zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && 2046 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 2047 q++; 2048 2049 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 2050 2051 /* 2052 * NB: We are assuming that the zio can only be dispatched 2053 * to a single taskq at a time. It would be a grievous error 2054 * to dispatch the zio to another taskq at the same time. 2055 */ 2056 ASSERT(taskq_empty_ent(&zio->io_tqent)); 2057 spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags, 2058 &zio->io_tqent, zio); 2059 } 2060 2061 static boolean_t 2062 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 2063 { 2064 spa_t *spa = zio->io_spa; 2065 2066 taskq_t *tq = taskq_of_curthread(); 2067 2068 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 2069 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 2070 uint_t i; 2071 for (i = 0; i < tqs->stqs_count; i++) { 2072 if (tqs->stqs_taskq[i] == tq) 2073 return (B_TRUE); 2074 } 2075 } 2076 2077 return (B_FALSE); 2078 } 2079 2080 static zio_t * 2081 zio_issue_async(zio_t *zio) 2082 { 2083 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio)); 2084 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2085 return (NULL); 2086 } 2087 2088 void 2089 zio_interrupt(void *zio) 2090 { 2091 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 2092 } 2093 2094 void 2095 zio_delay_interrupt(zio_t *zio) 2096 { 2097 /* 2098 * The timeout_generic() function isn't defined in userspace, so 2099 * rather than trying to implement the function, the zio delay 2100 * functionality has been disabled for userspace builds. 2101 */ 2102 2103 #ifdef _KERNEL 2104 /* 2105 * If io_target_timestamp is zero, then no delay has been registered 2106 * for this IO, thus jump to the end of this function and "skip" the 2107 * delay; issuing it directly to the zio layer. 2108 */ 2109 if (zio->io_target_timestamp != 0) { 2110 hrtime_t now = gethrtime(); 2111 2112 if (now >= zio->io_target_timestamp) { 2113 /* 2114 * This IO has already taken longer than the target 2115 * delay to complete, so we don't want to delay it 2116 * any longer; we "miss" the delay and issue it 2117 * directly to the zio layer. This is likely due to 2118 * the target latency being set to a value less than 2119 * the underlying hardware can satisfy (e.g. delay 2120 * set to 1ms, but the disks take 10ms to complete an 2121 * IO request). 2122 */ 2123 2124 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 2125 hrtime_t, now); 2126 2127 zio_interrupt(zio); 2128 } else { 2129 taskqid_t tid; 2130 hrtime_t diff = zio->io_target_timestamp - now; 2131 clock_t expire_at_tick = ddi_get_lbolt() + 2132 NSEC_TO_TICK(diff); 2133 2134 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 2135 hrtime_t, now, hrtime_t, diff); 2136 2137 if (NSEC_TO_TICK(diff) == 0) { 2138 /* Our delay is less than a jiffy - just spin */ 2139 zfs_sleep_until(zio->io_target_timestamp); 2140 zio_interrupt(zio); 2141 } else { 2142 /* 2143 * Use taskq_dispatch_delay() in the place of 2144 * OpenZFS's timeout_generic(). 2145 */ 2146 tid = taskq_dispatch_delay(system_taskq, 2147 zio_interrupt, zio, TQ_NOSLEEP, 2148 expire_at_tick); 2149 if (tid == TASKQID_INVALID) { 2150 /* 2151 * Couldn't allocate a task. Just 2152 * finish the zio without a delay. 2153 */ 2154 zio_interrupt(zio); 2155 } 2156 } 2157 } 2158 return; 2159 } 2160 #endif 2161 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 2162 zio_interrupt(zio); 2163 } 2164 2165 static void 2166 zio_deadman_impl(zio_t *pio, int ziodepth) 2167 { 2168 zio_t *cio, *cio_next; 2169 zio_link_t *zl = NULL; 2170 vdev_t *vd = pio->io_vd; 2171 2172 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) { 2173 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL; 2174 zbookmark_phys_t *zb = &pio->io_bookmark; 2175 uint64_t delta = gethrtime() - pio->io_timestamp; 2176 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa); 2177 2178 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu " 2179 "delta=%llu queued=%llu io=%llu " 2180 "path=%s " 2181 "last=%llu type=%d " 2182 "priority=%d flags=0x%llx stage=0x%x " 2183 "pipeline=0x%x pipeline-trace=0x%x " 2184 "objset=%llu object=%llu " 2185 "level=%llu blkid=%llu " 2186 "offset=%llu size=%llu " 2187 "error=%d", 2188 ziodepth, pio, pio->io_timestamp, 2189 (u_longlong_t)delta, pio->io_delta, pio->io_delay, 2190 vd ? vd->vdev_path : "NULL", 2191 vq ? vq->vq_io_complete_ts : 0, pio->io_type, 2192 pio->io_priority, (u_longlong_t)pio->io_flags, 2193 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace, 2194 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, 2195 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid, 2196 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size, 2197 pio->io_error); 2198 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN, 2199 pio->io_spa, vd, zb, pio, 0); 2200 2201 if (failmode == ZIO_FAILURE_MODE_CONTINUE && 2202 taskq_empty_ent(&pio->io_tqent)) { 2203 zio_interrupt(pio); 2204 } 2205 } 2206 2207 mutex_enter(&pio->io_lock); 2208 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2209 cio_next = zio_walk_children(pio, &zl); 2210 zio_deadman_impl(cio, ziodepth + 1); 2211 } 2212 mutex_exit(&pio->io_lock); 2213 } 2214 2215 /* 2216 * Log the critical information describing this zio and all of its children 2217 * using the zfs_dbgmsg() interface then post deadman event for the ZED. 2218 */ 2219 void 2220 zio_deadman(zio_t *pio, const char *tag) 2221 { 2222 spa_t *spa = pio->io_spa; 2223 char *name = spa_name(spa); 2224 2225 if (!zfs_deadman_enabled || spa_suspended(spa)) 2226 return; 2227 2228 zio_deadman_impl(pio, 0); 2229 2230 switch (spa_get_deadman_failmode(spa)) { 2231 case ZIO_FAILURE_MODE_WAIT: 2232 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name); 2233 break; 2234 2235 case ZIO_FAILURE_MODE_CONTINUE: 2236 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name); 2237 break; 2238 2239 case ZIO_FAILURE_MODE_PANIC: 2240 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name); 2241 break; 2242 } 2243 } 2244 2245 /* 2246 * Execute the I/O pipeline until one of the following occurs: 2247 * (1) the I/O completes; (2) the pipeline stalls waiting for 2248 * dependent child I/Os; (3) the I/O issues, so we're waiting 2249 * for an I/O completion interrupt; (4) the I/O is delegated by 2250 * vdev-level caching or aggregation; (5) the I/O is deferred 2251 * due to vdev-level queueing; (6) the I/O is handed off to 2252 * another thread. In all cases, the pipeline stops whenever 2253 * there's no CPU work; it never burns a thread in cv_wait_io(). 2254 * 2255 * There's no locking on io_stage because there's no legitimate way 2256 * for multiple threads to be attempting to process the same I/O. 2257 */ 2258 static zio_pipe_stage_t *zio_pipeline[]; 2259 2260 /* 2261 * zio_execute() is a wrapper around the static function 2262 * __zio_execute() so that we can force __zio_execute() to be 2263 * inlined. This reduces stack overhead which is important 2264 * because __zio_execute() is called recursively in several zio 2265 * code paths. zio_execute() itself cannot be inlined because 2266 * it is externally visible. 2267 */ 2268 void 2269 zio_execute(void *zio) 2270 { 2271 fstrans_cookie_t cookie; 2272 2273 cookie = spl_fstrans_mark(); 2274 __zio_execute(zio); 2275 spl_fstrans_unmark(cookie); 2276 } 2277 2278 /* 2279 * Used to determine if in the current context the stack is sized large 2280 * enough to allow zio_execute() to be called recursively. A minimum 2281 * stack size of 16K is required to avoid needing to re-dispatch the zio. 2282 */ 2283 static boolean_t 2284 zio_execute_stack_check(zio_t *zio) 2285 { 2286 #if !defined(HAVE_LARGE_STACKS) 2287 dsl_pool_t *dp = spa_get_dsl(zio->io_spa); 2288 2289 /* Executing in txg_sync_thread() context. */ 2290 if (dp && curthread == dp->dp_tx.tx_sync_thread) 2291 return (B_TRUE); 2292 2293 /* Pool initialization outside of zio_taskq context. */ 2294 if (dp && spa_is_initializing(dp->dp_spa) && 2295 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) && 2296 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH)) 2297 return (B_TRUE); 2298 #else 2299 (void) zio; 2300 #endif /* HAVE_LARGE_STACKS */ 2301 2302 return (B_FALSE); 2303 } 2304 2305 __attribute__((always_inline)) 2306 static inline void 2307 __zio_execute(zio_t *zio) 2308 { 2309 ASSERT3U(zio->io_queued_timestamp, >, 0); 2310 2311 while (zio->io_stage < ZIO_STAGE_DONE) { 2312 enum zio_stage pipeline = zio->io_pipeline; 2313 enum zio_stage stage = zio->io_stage; 2314 2315 zio->io_executor = curthread; 2316 2317 ASSERT(!MUTEX_HELD(&zio->io_lock)); 2318 ASSERT(ISP2(stage)); 2319 ASSERT(zio->io_stall == NULL); 2320 2321 do { 2322 stage <<= 1; 2323 } while ((stage & pipeline) == 0); 2324 2325 ASSERT(stage <= ZIO_STAGE_DONE); 2326 2327 /* 2328 * If we are in interrupt context and this pipeline stage 2329 * will grab a config lock that is held across I/O, 2330 * or may wait for an I/O that needs an interrupt thread 2331 * to complete, issue async to avoid deadlock. 2332 * 2333 * For VDEV_IO_START, we cut in line so that the io will 2334 * be sent to disk promptly. 2335 */ 2336 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 2337 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 2338 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2339 zio_requeue_io_start_cut_in_line : B_FALSE; 2340 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2341 return; 2342 } 2343 2344 /* 2345 * If the current context doesn't have large enough stacks 2346 * the zio must be issued asynchronously to prevent overflow. 2347 */ 2348 if (zio_execute_stack_check(zio)) { 2349 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2350 zio_requeue_io_start_cut_in_line : B_FALSE; 2351 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2352 return; 2353 } 2354 2355 zio->io_stage = stage; 2356 zio->io_pipeline_trace |= zio->io_stage; 2357 2358 /* 2359 * The zio pipeline stage returns the next zio to execute 2360 * (typically the same as this one), or NULL if we should 2361 * stop. 2362 */ 2363 zio = zio_pipeline[highbit64(stage) - 1](zio); 2364 2365 if (zio == NULL) 2366 return; 2367 } 2368 } 2369 2370 2371 /* 2372 * ========================================================================== 2373 * Initiate I/O, either sync or async 2374 * ========================================================================== 2375 */ 2376 int 2377 zio_wait(zio_t *zio) 2378 { 2379 /* 2380 * Some routines, like zio_free_sync(), may return a NULL zio 2381 * to avoid the performance overhead of creating and then destroying 2382 * an unneeded zio. For the callers' simplicity, we accept a NULL 2383 * zio and ignore it. 2384 */ 2385 if (zio == NULL) 2386 return (0); 2387 2388 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms); 2389 int error; 2390 2391 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN); 2392 ASSERT3P(zio->io_executor, ==, NULL); 2393 2394 zio->io_waiter = curthread; 2395 ASSERT0(zio->io_queued_timestamp); 2396 zio->io_queued_timestamp = gethrtime(); 2397 2398 if (zio->io_type == ZIO_TYPE_WRITE) { 2399 spa_select_allocator(zio); 2400 } 2401 __zio_execute(zio); 2402 2403 mutex_enter(&zio->io_lock); 2404 while (zio->io_executor != NULL) { 2405 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock, 2406 ddi_get_lbolt() + timeout); 2407 2408 if (zfs_deadman_enabled && error == -1 && 2409 gethrtime() - zio->io_queued_timestamp > 2410 spa_deadman_ziotime(zio->io_spa)) { 2411 mutex_exit(&zio->io_lock); 2412 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms); 2413 zio_deadman(zio, FTAG); 2414 mutex_enter(&zio->io_lock); 2415 } 2416 } 2417 mutex_exit(&zio->io_lock); 2418 2419 error = zio->io_error; 2420 zio_destroy(zio); 2421 2422 return (error); 2423 } 2424 2425 void 2426 zio_nowait(zio_t *zio) 2427 { 2428 /* 2429 * See comment in zio_wait(). 2430 */ 2431 if (zio == NULL) 2432 return; 2433 2434 ASSERT3P(zio->io_executor, ==, NULL); 2435 2436 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 2437 list_is_empty(&zio->io_parent_list)) { 2438 zio_t *pio; 2439 2440 /* 2441 * This is a logical async I/O with no parent to wait for it. 2442 * We add it to the spa_async_root_zio "Godfather" I/O which 2443 * will ensure they complete prior to unloading the pool. 2444 */ 2445 spa_t *spa = zio->io_spa; 2446 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE]; 2447 2448 zio_add_child(pio, zio); 2449 } 2450 2451 ASSERT0(zio->io_queued_timestamp); 2452 zio->io_queued_timestamp = gethrtime(); 2453 if (zio->io_type == ZIO_TYPE_WRITE) { 2454 spa_select_allocator(zio); 2455 } 2456 __zio_execute(zio); 2457 } 2458 2459 /* 2460 * ========================================================================== 2461 * Reexecute, cancel, or suspend/resume failed I/O 2462 * ========================================================================== 2463 */ 2464 2465 static void 2466 zio_reexecute(void *arg) 2467 { 2468 zio_t *pio = arg; 2469 zio_t *cio, *cio_next, *gio; 2470 2471 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 2472 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 2473 ASSERT(pio->io_gang_leader == NULL); 2474 ASSERT(pio->io_gang_tree == NULL); 2475 2476 mutex_enter(&pio->io_lock); 2477 pio->io_flags = pio->io_orig_flags; 2478 pio->io_stage = pio->io_orig_stage; 2479 pio->io_pipeline = pio->io_orig_pipeline; 2480 pio->io_reexecute = 0; 2481 pio->io_flags |= ZIO_FLAG_REEXECUTED; 2482 pio->io_pipeline_trace = 0; 2483 pio->io_error = 0; 2484 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) || 2485 (pio->io_pipeline & ZIO_STAGE_READY) == 0; 2486 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE); 2487 zio_link_t *zl = NULL; 2488 while ((gio = zio_walk_parents(pio, &zl)) != NULL) { 2489 for (int w = 0; w < ZIO_WAIT_TYPES; w++) { 2490 gio->io_children[pio->io_child_type][w] += 2491 !pio->io_state[w]; 2492 } 2493 } 2494 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2495 pio->io_child_error[c] = 0; 2496 2497 if (IO_IS_ALLOCATING(pio)) 2498 BP_ZERO(pio->io_bp); 2499 2500 /* 2501 * As we reexecute pio's children, new children could be created. 2502 * New children go to the head of pio's io_child_list, however, 2503 * so we will (correctly) not reexecute them. The key is that 2504 * the remainder of pio's io_child_list, from 'cio_next' onward, 2505 * cannot be affected by any side effects of reexecuting 'cio'. 2506 */ 2507 zl = NULL; 2508 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2509 cio_next = zio_walk_children(pio, &zl); 2510 mutex_exit(&pio->io_lock); 2511 zio_reexecute(cio); 2512 mutex_enter(&pio->io_lock); 2513 } 2514 mutex_exit(&pio->io_lock); 2515 2516 /* 2517 * Now that all children have been reexecuted, execute the parent. 2518 * We don't reexecute "The Godfather" I/O here as it's the 2519 * responsibility of the caller to wait on it. 2520 */ 2521 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 2522 pio->io_queued_timestamp = gethrtime(); 2523 __zio_execute(pio); 2524 } 2525 } 2526 2527 void 2528 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 2529 { 2530 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 2531 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 2532 "failure and the failure mode property for this pool " 2533 "is set to panic.", spa_name(spa)); 2534 2535 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O " 2536 "failure and has been suspended.\n", spa_name(spa)); 2537 2538 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, 2539 NULL, NULL, 0); 2540 2541 mutex_enter(&spa->spa_suspend_lock); 2542 2543 if (spa->spa_suspend_zio_root == NULL) 2544 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 2545 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2546 ZIO_FLAG_GODFATHER); 2547 2548 spa->spa_suspended = reason; 2549 2550 if (zio != NULL) { 2551 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2552 ASSERT(zio != spa->spa_suspend_zio_root); 2553 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2554 ASSERT(zio_unique_parent(zio) == NULL); 2555 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 2556 zio_add_child(spa->spa_suspend_zio_root, zio); 2557 } 2558 2559 mutex_exit(&spa->spa_suspend_lock); 2560 } 2561 2562 int 2563 zio_resume(spa_t *spa) 2564 { 2565 zio_t *pio; 2566 2567 /* 2568 * Reexecute all previously suspended i/o. 2569 */ 2570 mutex_enter(&spa->spa_suspend_lock); 2571 spa->spa_suspended = ZIO_SUSPEND_NONE; 2572 cv_broadcast(&spa->spa_suspend_cv); 2573 pio = spa->spa_suspend_zio_root; 2574 spa->spa_suspend_zio_root = NULL; 2575 mutex_exit(&spa->spa_suspend_lock); 2576 2577 if (pio == NULL) 2578 return (0); 2579 2580 zio_reexecute(pio); 2581 return (zio_wait(pio)); 2582 } 2583 2584 void 2585 zio_resume_wait(spa_t *spa) 2586 { 2587 mutex_enter(&spa->spa_suspend_lock); 2588 while (spa_suspended(spa)) 2589 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 2590 mutex_exit(&spa->spa_suspend_lock); 2591 } 2592 2593 /* 2594 * ========================================================================== 2595 * Gang blocks. 2596 * 2597 * A gang block is a collection of small blocks that looks to the DMU 2598 * like one large block. When zio_dva_allocate() cannot find a block 2599 * of the requested size, due to either severe fragmentation or the pool 2600 * being nearly full, it calls zio_write_gang_block() to construct the 2601 * block from smaller fragments. 2602 * 2603 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 2604 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 2605 * an indirect block: it's an array of block pointers. It consumes 2606 * only one sector and hence is allocatable regardless of fragmentation. 2607 * The gang header's bps point to its gang members, which hold the data. 2608 * 2609 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 2610 * as the verifier to ensure uniqueness of the SHA256 checksum. 2611 * Critically, the gang block bp's blk_cksum is the checksum of the data, 2612 * not the gang header. This ensures that data block signatures (needed for 2613 * deduplication) are independent of how the block is physically stored. 2614 * 2615 * Gang blocks can be nested: a gang member may itself be a gang block. 2616 * Thus every gang block is a tree in which root and all interior nodes are 2617 * gang headers, and the leaves are normal blocks that contain user data. 2618 * The root of the gang tree is called the gang leader. 2619 * 2620 * To perform any operation (read, rewrite, free, claim) on a gang block, 2621 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 2622 * in the io_gang_tree field of the original logical i/o by recursively 2623 * reading the gang leader and all gang headers below it. This yields 2624 * an in-core tree containing the contents of every gang header and the 2625 * bps for every constituent of the gang block. 2626 * 2627 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 2628 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 2629 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 2630 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 2631 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 2632 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 2633 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 2634 * of the gang header plus zio_checksum_compute() of the data to update the 2635 * gang header's blk_cksum as described above. 2636 * 2637 * The two-phase assemble/issue model solves the problem of partial failure -- 2638 * what if you'd freed part of a gang block but then couldn't read the 2639 * gang header for another part? Assembling the entire gang tree first 2640 * ensures that all the necessary gang header I/O has succeeded before 2641 * starting the actual work of free, claim, or write. Once the gang tree 2642 * is assembled, free and claim are in-memory operations that cannot fail. 2643 * 2644 * In the event that a gang write fails, zio_dva_unallocate() walks the 2645 * gang tree to immediately free (i.e. insert back into the space map) 2646 * everything we've allocated. This ensures that we don't get ENOSPC 2647 * errors during repeated suspend/resume cycles due to a flaky device. 2648 * 2649 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2650 * the gang tree, we won't modify the block, so we can safely defer the free 2651 * (knowing that the block is still intact). If we *can* assemble the gang 2652 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2653 * each constituent bp and we can allocate a new block on the next sync pass. 2654 * 2655 * In all cases, the gang tree allows complete recovery from partial failure. 2656 * ========================================================================== 2657 */ 2658 2659 static void 2660 zio_gang_issue_func_done(zio_t *zio) 2661 { 2662 abd_free(zio->io_abd); 2663 } 2664 2665 static zio_t * 2666 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2667 uint64_t offset) 2668 { 2669 if (gn != NULL) 2670 return (pio); 2671 2672 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2673 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2674 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2675 &pio->io_bookmark)); 2676 } 2677 2678 static zio_t * 2679 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2680 uint64_t offset) 2681 { 2682 zio_t *zio; 2683 2684 if (gn != NULL) { 2685 abd_t *gbh_abd = 2686 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2687 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2688 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2689 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2690 &pio->io_bookmark); 2691 /* 2692 * As we rewrite each gang header, the pipeline will compute 2693 * a new gang block header checksum for it; but no one will 2694 * compute a new data checksum, so we do that here. The one 2695 * exception is the gang leader: the pipeline already computed 2696 * its data checksum because that stage precedes gang assembly. 2697 * (Presently, nothing actually uses interior data checksums; 2698 * this is just good hygiene.) 2699 */ 2700 if (gn != pio->io_gang_leader->io_gang_tree) { 2701 abd_t *buf = abd_get_offset(data, offset); 2702 2703 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2704 buf, BP_GET_PSIZE(bp)); 2705 2706 abd_free(buf); 2707 } 2708 /* 2709 * If we are here to damage data for testing purposes, 2710 * leave the GBH alone so that we can detect the damage. 2711 */ 2712 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2713 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2714 } else { 2715 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2716 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2717 zio_gang_issue_func_done, NULL, pio->io_priority, 2718 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2719 } 2720 2721 return (zio); 2722 } 2723 2724 static zio_t * 2725 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2726 uint64_t offset) 2727 { 2728 (void) gn, (void) data, (void) offset; 2729 2730 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2731 ZIO_GANG_CHILD_FLAGS(pio)); 2732 if (zio == NULL) { 2733 zio = zio_null(pio, pio->io_spa, 2734 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)); 2735 } 2736 return (zio); 2737 } 2738 2739 static zio_t * 2740 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2741 uint64_t offset) 2742 { 2743 (void) gn, (void) data, (void) offset; 2744 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2745 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2746 } 2747 2748 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2749 NULL, 2750 zio_read_gang, 2751 zio_rewrite_gang, 2752 zio_free_gang, 2753 zio_claim_gang, 2754 NULL 2755 }; 2756 2757 static void zio_gang_tree_assemble_done(zio_t *zio); 2758 2759 static zio_gang_node_t * 2760 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2761 { 2762 zio_gang_node_t *gn; 2763 2764 ASSERT(*gnpp == NULL); 2765 2766 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2767 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2768 *gnpp = gn; 2769 2770 return (gn); 2771 } 2772 2773 static void 2774 zio_gang_node_free(zio_gang_node_t **gnpp) 2775 { 2776 zio_gang_node_t *gn = *gnpp; 2777 2778 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2779 ASSERT(gn->gn_child[g] == NULL); 2780 2781 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2782 kmem_free(gn, sizeof (*gn)); 2783 *gnpp = NULL; 2784 } 2785 2786 static void 2787 zio_gang_tree_free(zio_gang_node_t **gnpp) 2788 { 2789 zio_gang_node_t *gn = *gnpp; 2790 2791 if (gn == NULL) 2792 return; 2793 2794 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2795 zio_gang_tree_free(&gn->gn_child[g]); 2796 2797 zio_gang_node_free(gnpp); 2798 } 2799 2800 static void 2801 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2802 { 2803 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2804 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2805 2806 ASSERT(gio->io_gang_leader == gio); 2807 ASSERT(BP_IS_GANG(bp)); 2808 2809 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2810 zio_gang_tree_assemble_done, gn, gio->io_priority, 2811 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2812 } 2813 2814 static void 2815 zio_gang_tree_assemble_done(zio_t *zio) 2816 { 2817 zio_t *gio = zio->io_gang_leader; 2818 zio_gang_node_t *gn = zio->io_private; 2819 blkptr_t *bp = zio->io_bp; 2820 2821 ASSERT(gio == zio_unique_parent(zio)); 2822 ASSERT(list_is_empty(&zio->io_child_list)); 2823 2824 if (zio->io_error) 2825 return; 2826 2827 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2828 if (BP_SHOULD_BYTESWAP(bp)) 2829 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2830 2831 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2832 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2833 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2834 2835 abd_free(zio->io_abd); 2836 2837 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2838 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2839 if (!BP_IS_GANG(gbp)) 2840 continue; 2841 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2842 } 2843 } 2844 2845 static void 2846 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2847 uint64_t offset) 2848 { 2849 zio_t *gio = pio->io_gang_leader; 2850 zio_t *zio; 2851 2852 ASSERT(BP_IS_GANG(bp) == !!gn); 2853 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2854 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2855 2856 /* 2857 * If you're a gang header, your data is in gn->gn_gbh. 2858 * If you're a gang member, your data is in 'data' and gn == NULL. 2859 */ 2860 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2861 2862 if (gn != NULL) { 2863 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2864 2865 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2866 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2867 if (BP_IS_HOLE(gbp)) 2868 continue; 2869 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2870 offset); 2871 offset += BP_GET_PSIZE(gbp); 2872 } 2873 } 2874 2875 if (gn == gio->io_gang_tree) 2876 ASSERT3U(gio->io_size, ==, offset); 2877 2878 if (zio != pio) 2879 zio_nowait(zio); 2880 } 2881 2882 static zio_t * 2883 zio_gang_assemble(zio_t *zio) 2884 { 2885 blkptr_t *bp = zio->io_bp; 2886 2887 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2888 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2889 2890 zio->io_gang_leader = zio; 2891 2892 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2893 2894 return (zio); 2895 } 2896 2897 static zio_t * 2898 zio_gang_issue(zio_t *zio) 2899 { 2900 blkptr_t *bp = zio->io_bp; 2901 2902 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2903 return (NULL); 2904 } 2905 2906 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2907 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2908 2909 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2910 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2911 0); 2912 else 2913 zio_gang_tree_free(&zio->io_gang_tree); 2914 2915 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2916 2917 return (zio); 2918 } 2919 2920 static void 2921 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio) 2922 { 2923 cio->io_allocator = pio->io_allocator; 2924 cio->io_wr_iss_tq = pio->io_wr_iss_tq; 2925 } 2926 2927 static void 2928 zio_write_gang_member_ready(zio_t *zio) 2929 { 2930 zio_t *pio = zio_unique_parent(zio); 2931 dva_t *cdva = zio->io_bp->blk_dva; 2932 dva_t *pdva = pio->io_bp->blk_dva; 2933 uint64_t asize; 2934 zio_t *gio __maybe_unused = zio->io_gang_leader; 2935 2936 if (BP_IS_HOLE(zio->io_bp)) 2937 return; 2938 2939 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2940 2941 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2942 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2943 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2944 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2945 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2946 2947 mutex_enter(&pio->io_lock); 2948 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2949 ASSERT(DVA_GET_GANG(&pdva[d])); 2950 asize = DVA_GET_ASIZE(&pdva[d]); 2951 asize += DVA_GET_ASIZE(&cdva[d]); 2952 DVA_SET_ASIZE(&pdva[d], asize); 2953 } 2954 mutex_exit(&pio->io_lock); 2955 } 2956 2957 static void 2958 zio_write_gang_done(zio_t *zio) 2959 { 2960 /* 2961 * The io_abd field will be NULL for a zio with no data. The io_flags 2962 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 2963 * check for it here as it is cleared in zio_ready. 2964 */ 2965 if (zio->io_abd != NULL) 2966 abd_free(zio->io_abd); 2967 } 2968 2969 static zio_t * 2970 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc) 2971 { 2972 spa_t *spa = pio->io_spa; 2973 blkptr_t *bp = pio->io_bp; 2974 zio_t *gio = pio->io_gang_leader; 2975 zio_t *zio; 2976 zio_gang_node_t *gn, **gnpp; 2977 zio_gbh_phys_t *gbh; 2978 abd_t *gbh_abd; 2979 uint64_t txg = pio->io_txg; 2980 uint64_t resid = pio->io_size; 2981 uint64_t lsize; 2982 int copies = gio->io_prop.zp_copies; 2983 zio_prop_t zp; 2984 int error; 2985 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2986 2987 /* 2988 * If one copy was requested, store 2 copies of the GBH, so that we 2989 * can still traverse all the data (e.g. to free or scrub) even if a 2990 * block is damaged. Note that we can't store 3 copies of the GBH in 2991 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt. 2992 */ 2993 int gbh_copies = copies; 2994 if (gbh_copies == 1) { 2995 gbh_copies = MIN(2, spa_max_replication(spa)); 2996 } 2997 2998 ASSERT(ZIO_HAS_ALLOCATOR(pio)); 2999 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 3000 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3001 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3002 ASSERT(has_data); 3003 3004 flags |= METASLAB_ASYNC_ALLOC; 3005 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator]. 3006 mca_alloc_slots, pio)); 3007 3008 /* 3009 * The logical zio has already placed a reservation for 3010 * 'copies' allocation slots but gang blocks may require 3011 * additional copies. These additional copies 3012 * (i.e. gbh_copies - copies) are guaranteed to succeed 3013 * since metaslab_class_throttle_reserve() always allows 3014 * additional reservations for gang blocks. 3015 */ 3016 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 3017 pio->io_allocator, pio, flags)); 3018 } 3019 3020 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 3021 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 3022 &pio->io_alloc_list, pio, pio->io_allocator); 3023 if (error) { 3024 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3025 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3026 ASSERT(has_data); 3027 3028 /* 3029 * If we failed to allocate the gang block header then 3030 * we remove any additional allocation reservations that 3031 * we placed here. The original reservation will 3032 * be removed when the logical I/O goes to the ready 3033 * stage. 3034 */ 3035 metaslab_class_throttle_unreserve(mc, 3036 gbh_copies - copies, pio->io_allocator, pio); 3037 } 3038 3039 pio->io_error = error; 3040 return (pio); 3041 } 3042 3043 if (pio == gio) { 3044 gnpp = &gio->io_gang_tree; 3045 } else { 3046 gnpp = pio->io_private; 3047 ASSERT(pio->io_ready == zio_write_gang_member_ready); 3048 } 3049 3050 gn = zio_gang_node_alloc(gnpp); 3051 gbh = gn->gn_gbh; 3052 memset(gbh, 0, SPA_GANGBLOCKSIZE); 3053 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 3054 3055 /* 3056 * Create the gang header. 3057 */ 3058 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 3059 zio_write_gang_done, NULL, pio->io_priority, 3060 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 3061 3062 zio_gang_inherit_allocator(pio, zio); 3063 3064 /* 3065 * Create and nowait the gang children. 3066 */ 3067 for (int g = 0; resid != 0; resid -= lsize, g++) { 3068 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 3069 SPA_MINBLOCKSIZE); 3070 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 3071 3072 zp.zp_checksum = gio->io_prop.zp_checksum; 3073 zp.zp_compress = ZIO_COMPRESS_OFF; 3074 zp.zp_complevel = gio->io_prop.zp_complevel; 3075 zp.zp_type = DMU_OT_NONE; 3076 zp.zp_level = 0; 3077 zp.zp_copies = gio->io_prop.zp_copies; 3078 zp.zp_dedup = B_FALSE; 3079 zp.zp_dedup_verify = B_FALSE; 3080 zp.zp_nopwrite = B_FALSE; 3081 zp.zp_encrypt = gio->io_prop.zp_encrypt; 3082 zp.zp_byteorder = gio->io_prop.zp_byteorder; 3083 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN); 3084 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN); 3085 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN); 3086 3087 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 3088 has_data ? abd_get_offset(pio->io_abd, pio->io_size - 3089 resid) : NULL, lsize, lsize, &zp, 3090 zio_write_gang_member_ready, NULL, 3091 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 3092 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 3093 3094 zio_gang_inherit_allocator(zio, cio); 3095 3096 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3097 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3098 ASSERT(has_data); 3099 3100 /* 3101 * Gang children won't throttle but we should 3102 * account for their work, so reserve an allocation 3103 * slot for them here. 3104 */ 3105 VERIFY(metaslab_class_throttle_reserve(mc, 3106 zp.zp_copies, cio->io_allocator, cio, flags)); 3107 } 3108 zio_nowait(cio); 3109 } 3110 3111 /* 3112 * Set pio's pipeline to just wait for zio to finish. 3113 */ 3114 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3115 3116 zio_nowait(zio); 3117 3118 return (pio); 3119 } 3120 3121 /* 3122 * The zio_nop_write stage in the pipeline determines if allocating a 3123 * new bp is necessary. The nopwrite feature can handle writes in 3124 * either syncing or open context (i.e. zil writes) and as a result is 3125 * mutually exclusive with dedup. 3126 * 3127 * By leveraging a cryptographically secure checksum, such as SHA256, we 3128 * can compare the checksums of the new data and the old to determine if 3129 * allocating a new block is required. Note that our requirements for 3130 * cryptographic strength are fairly weak: there can't be any accidental 3131 * hash collisions, but we don't need to be secure against intentional 3132 * (malicious) collisions. To trigger a nopwrite, you have to be able 3133 * to write the file to begin with, and triggering an incorrect (hash 3134 * collision) nopwrite is no worse than simply writing to the file. 3135 * That said, there are no known attacks against the checksum algorithms 3136 * used for nopwrite, assuming that the salt and the checksums 3137 * themselves remain secret. 3138 */ 3139 static zio_t * 3140 zio_nop_write(zio_t *zio) 3141 { 3142 blkptr_t *bp = zio->io_bp; 3143 blkptr_t *bp_orig = &zio->io_bp_orig; 3144 zio_prop_t *zp = &zio->io_prop; 3145 3146 ASSERT(BP_IS_HOLE(bp)); 3147 ASSERT(BP_GET_LEVEL(bp) == 0); 3148 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 3149 ASSERT(zp->zp_nopwrite); 3150 ASSERT(!zp->zp_dedup); 3151 ASSERT(zio->io_bp_override == NULL); 3152 ASSERT(IO_IS_ALLOCATING(zio)); 3153 3154 /* 3155 * Check to see if the original bp and the new bp have matching 3156 * characteristics (i.e. same checksum, compression algorithms, etc). 3157 * If they don't then just continue with the pipeline which will 3158 * allocate a new bp. 3159 */ 3160 if (BP_IS_HOLE(bp_orig) || 3161 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 3162 ZCHECKSUM_FLAG_NOPWRITE) || 3163 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || 3164 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 3165 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 3166 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 3167 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 3168 return (zio); 3169 3170 /* 3171 * If the checksums match then reset the pipeline so that we 3172 * avoid allocating a new bp and issuing any I/O. 3173 */ 3174 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 3175 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 3176 ZCHECKSUM_FLAG_NOPWRITE); 3177 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 3178 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 3179 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 3180 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop); 3181 3182 /* 3183 * If we're overwriting a block that is currently on an 3184 * indirect vdev, then ignore the nopwrite request and 3185 * allow a new block to be allocated on a concrete vdev. 3186 */ 3187 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER); 3188 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) { 3189 vdev_t *tvd = vdev_lookup_top(zio->io_spa, 3190 DVA_GET_VDEV(&bp_orig->blk_dva[d])); 3191 if (tvd->vdev_ops == &vdev_indirect_ops) { 3192 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3193 return (zio); 3194 } 3195 } 3196 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3197 3198 *bp = *bp_orig; 3199 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3200 zio->io_flags |= ZIO_FLAG_NOPWRITE; 3201 } 3202 3203 return (zio); 3204 } 3205 3206 /* 3207 * ========================================================================== 3208 * Block Reference Table 3209 * ========================================================================== 3210 */ 3211 static zio_t * 3212 zio_brt_free(zio_t *zio) 3213 { 3214 blkptr_t *bp; 3215 3216 bp = zio->io_bp; 3217 3218 if (BP_GET_LEVEL(bp) > 0 || 3219 BP_IS_METADATA(bp) || 3220 !brt_maybe_exists(zio->io_spa, bp)) { 3221 return (zio); 3222 } 3223 3224 if (!brt_entry_decref(zio->io_spa, bp)) { 3225 /* 3226 * This isn't the last reference, so we cannot free 3227 * the data yet. 3228 */ 3229 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3230 } 3231 3232 return (zio); 3233 } 3234 3235 /* 3236 * ========================================================================== 3237 * Dedup 3238 * ========================================================================== 3239 */ 3240 static void 3241 zio_ddt_child_read_done(zio_t *zio) 3242 { 3243 blkptr_t *bp = zio->io_bp; 3244 ddt_entry_t *dde = zio->io_private; 3245 ddt_phys_t *ddp; 3246 zio_t *pio = zio_unique_parent(zio); 3247 3248 mutex_enter(&pio->io_lock); 3249 ddp = ddt_phys_select(dde, bp); 3250 if (zio->io_error == 0) 3251 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 3252 3253 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 3254 dde->dde_repair_abd = zio->io_abd; 3255 else 3256 abd_free(zio->io_abd); 3257 mutex_exit(&pio->io_lock); 3258 } 3259 3260 static zio_t * 3261 zio_ddt_read_start(zio_t *zio) 3262 { 3263 blkptr_t *bp = zio->io_bp; 3264 3265 ASSERT(BP_GET_DEDUP(bp)); 3266 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3267 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3268 3269 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3270 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3271 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 3272 ddt_phys_t *ddp = dde->dde_phys; 3273 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 3274 blkptr_t blk; 3275 3276 ASSERT(zio->io_vsd == NULL); 3277 zio->io_vsd = dde; 3278 3279 if (ddp_self == NULL) 3280 return (zio); 3281 3282 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 3283 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 3284 continue; 3285 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 3286 &blk); 3287 zio_nowait(zio_read(zio, zio->io_spa, &blk, 3288 abd_alloc_for_io(zio->io_size, B_TRUE), 3289 zio->io_size, zio_ddt_child_read_done, dde, 3290 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 3291 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 3292 } 3293 return (zio); 3294 } 3295 3296 zio_nowait(zio_read(zio, zio->io_spa, bp, 3297 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 3298 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 3299 3300 return (zio); 3301 } 3302 3303 static zio_t * 3304 zio_ddt_read_done(zio_t *zio) 3305 { 3306 blkptr_t *bp = zio->io_bp; 3307 3308 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 3309 return (NULL); 3310 } 3311 3312 ASSERT(BP_GET_DEDUP(bp)); 3313 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3314 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3315 3316 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3317 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3318 ddt_entry_t *dde = zio->io_vsd; 3319 if (ddt == NULL) { 3320 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 3321 return (zio); 3322 } 3323 if (dde == NULL) { 3324 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 3325 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 3326 return (NULL); 3327 } 3328 if (dde->dde_repair_abd != NULL) { 3329 abd_copy(zio->io_abd, dde->dde_repair_abd, 3330 zio->io_size); 3331 zio->io_child_error[ZIO_CHILD_DDT] = 0; 3332 } 3333 ddt_repair_done(ddt, dde); 3334 zio->io_vsd = NULL; 3335 } 3336 3337 ASSERT(zio->io_vsd == NULL); 3338 3339 return (zio); 3340 } 3341 3342 static boolean_t 3343 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 3344 { 3345 spa_t *spa = zio->io_spa; 3346 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); 3347 3348 ASSERT(!(zio->io_bp_override && do_raw)); 3349 3350 /* 3351 * Note: we compare the original data, not the transformed data, 3352 * because when zio->io_bp is an override bp, we will not have 3353 * pushed the I/O transforms. That's an important optimization 3354 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 3355 * However, we should never get a raw, override zio so in these 3356 * cases we can compare the io_abd directly. This is useful because 3357 * it allows us to do dedup verification even if we don't have access 3358 * to the original data (for instance, if the encryption keys aren't 3359 * loaded). 3360 */ 3361 3362 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 3363 zio_t *lio = dde->dde_lead_zio[p]; 3364 3365 if (lio != NULL && do_raw) { 3366 return (lio->io_size != zio->io_size || 3367 abd_cmp(zio->io_abd, lio->io_abd) != 0); 3368 } else if (lio != NULL) { 3369 return (lio->io_orig_size != zio->io_orig_size || 3370 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0); 3371 } 3372 } 3373 3374 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 3375 ddt_phys_t *ddp = &dde->dde_phys[p]; 3376 3377 if (ddp->ddp_phys_birth != 0 && do_raw) { 3378 blkptr_t blk = *zio->io_bp; 3379 uint64_t psize; 3380 abd_t *tmpabd; 3381 int error; 3382 3383 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 3384 psize = BP_GET_PSIZE(&blk); 3385 3386 if (psize != zio->io_size) 3387 return (B_TRUE); 3388 3389 ddt_exit(ddt); 3390 3391 tmpabd = abd_alloc_for_io(psize, B_TRUE); 3392 3393 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, 3394 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, 3395 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3396 ZIO_FLAG_RAW, &zio->io_bookmark)); 3397 3398 if (error == 0) { 3399 if (abd_cmp(tmpabd, zio->io_abd) != 0) 3400 error = SET_ERROR(ENOENT); 3401 } 3402 3403 abd_free(tmpabd); 3404 ddt_enter(ddt); 3405 return (error != 0); 3406 } else if (ddp->ddp_phys_birth != 0) { 3407 arc_buf_t *abuf = NULL; 3408 arc_flags_t aflags = ARC_FLAG_WAIT; 3409 blkptr_t blk = *zio->io_bp; 3410 int error; 3411 3412 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 3413 3414 if (BP_GET_LSIZE(&blk) != zio->io_orig_size) 3415 return (B_TRUE); 3416 3417 ddt_exit(ddt); 3418 3419 error = arc_read(NULL, spa, &blk, 3420 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 3421 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3422 &aflags, &zio->io_bookmark); 3423 3424 if (error == 0) { 3425 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 3426 zio->io_orig_size) != 0) 3427 error = SET_ERROR(ENOENT); 3428 arc_buf_destroy(abuf, &abuf); 3429 } 3430 3431 ddt_enter(ddt); 3432 return (error != 0); 3433 } 3434 } 3435 3436 return (B_FALSE); 3437 } 3438 3439 static void 3440 zio_ddt_child_write_ready(zio_t *zio) 3441 { 3442 int p = zio->io_prop.zp_copies; 3443 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3444 ddt_entry_t *dde = zio->io_private; 3445 ddt_phys_t *ddp = &dde->dde_phys[p]; 3446 zio_t *pio; 3447 3448 if (zio->io_error) 3449 return; 3450 3451 ddt_enter(ddt); 3452 3453 ASSERT(dde->dde_lead_zio[p] == zio); 3454 3455 ddt_phys_fill(ddp, zio->io_bp); 3456 3457 zio_link_t *zl = NULL; 3458 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 3459 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 3460 3461 ddt_exit(ddt); 3462 } 3463 3464 static void 3465 zio_ddt_child_write_done(zio_t *zio) 3466 { 3467 int p = zio->io_prop.zp_copies; 3468 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3469 ddt_entry_t *dde = zio->io_private; 3470 ddt_phys_t *ddp = &dde->dde_phys[p]; 3471 3472 ddt_enter(ddt); 3473 3474 ASSERT(ddp->ddp_refcnt == 0); 3475 ASSERT(dde->dde_lead_zio[p] == zio); 3476 dde->dde_lead_zio[p] = NULL; 3477 3478 if (zio->io_error == 0) { 3479 zio_link_t *zl = NULL; 3480 while (zio_walk_parents(zio, &zl) != NULL) 3481 ddt_phys_addref(ddp); 3482 } else { 3483 ddt_phys_clear(ddp); 3484 } 3485 3486 ddt_exit(ddt); 3487 } 3488 3489 static zio_t * 3490 zio_ddt_write(zio_t *zio) 3491 { 3492 spa_t *spa = zio->io_spa; 3493 blkptr_t *bp = zio->io_bp; 3494 uint64_t txg = zio->io_txg; 3495 zio_prop_t *zp = &zio->io_prop; 3496 int p = zp->zp_copies; 3497 zio_t *cio = NULL; 3498 ddt_t *ddt = ddt_select(spa, bp); 3499 ddt_entry_t *dde; 3500 ddt_phys_t *ddp; 3501 3502 ASSERT(BP_GET_DEDUP(bp)); 3503 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 3504 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 3505 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 3506 3507 ddt_enter(ddt); 3508 dde = ddt_lookup(ddt, bp, B_TRUE); 3509 ddp = &dde->dde_phys[p]; 3510 3511 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 3512 /* 3513 * If we're using a weak checksum, upgrade to a strong checksum 3514 * and try again. If we're already using a strong checksum, 3515 * we can't resolve it, so just convert to an ordinary write. 3516 * (And automatically e-mail a paper to Nature?) 3517 */ 3518 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 3519 ZCHECKSUM_FLAG_DEDUP)) { 3520 zp->zp_checksum = spa_dedup_checksum(spa); 3521 zio_pop_transforms(zio); 3522 zio->io_stage = ZIO_STAGE_OPEN; 3523 BP_ZERO(bp); 3524 } else { 3525 zp->zp_dedup = B_FALSE; 3526 BP_SET_DEDUP(bp, B_FALSE); 3527 } 3528 ASSERT(!BP_GET_DEDUP(bp)); 3529 zio->io_pipeline = ZIO_WRITE_PIPELINE; 3530 ddt_exit(ddt); 3531 return (zio); 3532 } 3533 3534 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 3535 if (ddp->ddp_phys_birth != 0) 3536 ddt_bp_fill(ddp, bp, txg); 3537 if (dde->dde_lead_zio[p] != NULL) 3538 zio_add_child(zio, dde->dde_lead_zio[p]); 3539 else 3540 ddt_phys_addref(ddp); 3541 } else if (zio->io_bp_override) { 3542 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg); 3543 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 3544 ddt_phys_fill(ddp, bp); 3545 ddt_phys_addref(ddp); 3546 } else { 3547 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 3548 zio->io_orig_size, zio->io_orig_size, zp, 3549 zio_ddt_child_write_ready, NULL, 3550 zio_ddt_child_write_done, dde, zio->io_priority, 3551 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 3552 3553 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 3554 dde->dde_lead_zio[p] = cio; 3555 } 3556 3557 ddt_exit(ddt); 3558 3559 zio_nowait(cio); 3560 3561 return (zio); 3562 } 3563 3564 static ddt_entry_t *freedde; /* for debugging */ 3565 3566 static zio_t * 3567 zio_ddt_free(zio_t *zio) 3568 { 3569 spa_t *spa = zio->io_spa; 3570 blkptr_t *bp = zio->io_bp; 3571 ddt_t *ddt = ddt_select(spa, bp); 3572 ddt_entry_t *dde; 3573 ddt_phys_t *ddp; 3574 3575 ASSERT(BP_GET_DEDUP(bp)); 3576 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3577 3578 ddt_enter(ddt); 3579 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 3580 if (dde) { 3581 ddp = ddt_phys_select(dde, bp); 3582 if (ddp) 3583 ddt_phys_decref(ddp); 3584 } 3585 ddt_exit(ddt); 3586 3587 return (zio); 3588 } 3589 3590 /* 3591 * ========================================================================== 3592 * Allocate and free blocks 3593 * ========================================================================== 3594 */ 3595 3596 static zio_t * 3597 zio_io_to_allocate(spa_t *spa, int allocator) 3598 { 3599 zio_t *zio; 3600 3601 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock)); 3602 3603 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree); 3604 if (zio == NULL) 3605 return (NULL); 3606 3607 ASSERT(IO_IS_ALLOCATING(zio)); 3608 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 3609 3610 /* 3611 * Try to place a reservation for this zio. If we're unable to 3612 * reserve then we throttle. 3613 */ 3614 ASSERT3U(zio->io_allocator, ==, allocator); 3615 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 3616 zio->io_prop.zp_copies, allocator, zio, 0)) { 3617 return (NULL); 3618 } 3619 3620 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio); 3621 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 3622 3623 return (zio); 3624 } 3625 3626 static zio_t * 3627 zio_dva_throttle(zio_t *zio) 3628 { 3629 spa_t *spa = zio->io_spa; 3630 zio_t *nio; 3631 metaslab_class_t *mc; 3632 3633 /* locate an appropriate allocation class */ 3634 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, 3635 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); 3636 3637 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 3638 !mc->mc_alloc_throttle_enabled || 3639 zio->io_child_type == ZIO_CHILD_GANG || 3640 zio->io_flags & ZIO_FLAG_NODATA) { 3641 return (zio); 3642 } 3643 3644 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3645 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 3646 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3647 ASSERT3U(zio->io_queued_timestamp, >, 0); 3648 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 3649 3650 int allocator = zio->io_allocator; 3651 zio->io_metaslab_class = mc; 3652 mutex_enter(&spa->spa_allocs[allocator].spaa_lock); 3653 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio); 3654 nio = zio_io_to_allocate(spa, allocator); 3655 mutex_exit(&spa->spa_allocs[allocator].spaa_lock); 3656 return (nio); 3657 } 3658 3659 static void 3660 zio_allocate_dispatch(spa_t *spa, int allocator) 3661 { 3662 zio_t *zio; 3663 3664 mutex_enter(&spa->spa_allocs[allocator].spaa_lock); 3665 zio = zio_io_to_allocate(spa, allocator); 3666 mutex_exit(&spa->spa_allocs[allocator].spaa_lock); 3667 if (zio == NULL) 3668 return; 3669 3670 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 3671 ASSERT0(zio->io_error); 3672 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 3673 } 3674 3675 static zio_t * 3676 zio_dva_allocate(zio_t *zio) 3677 { 3678 spa_t *spa = zio->io_spa; 3679 metaslab_class_t *mc; 3680 blkptr_t *bp = zio->io_bp; 3681 int error; 3682 int flags = 0; 3683 3684 if (zio->io_gang_leader == NULL) { 3685 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3686 zio->io_gang_leader = zio; 3687 } 3688 3689 ASSERT(BP_IS_HOLE(bp)); 3690 ASSERT0(BP_GET_NDVAS(bp)); 3691 ASSERT3U(zio->io_prop.zp_copies, >, 0); 3692 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 3693 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 3694 3695 if (zio->io_flags & ZIO_FLAG_NODATA) 3696 flags |= METASLAB_DONT_THROTTLE; 3697 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 3698 flags |= METASLAB_GANG_CHILD; 3699 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 3700 flags |= METASLAB_ASYNC_ALLOC; 3701 3702 /* 3703 * if not already chosen, locate an appropriate allocation class 3704 */ 3705 mc = zio->io_metaslab_class; 3706 if (mc == NULL) { 3707 mc = spa_preferred_class(spa, zio->io_size, 3708 zio->io_prop.zp_type, zio->io_prop.zp_level, 3709 zio->io_prop.zp_zpl_smallblk); 3710 zio->io_metaslab_class = mc; 3711 } 3712 3713 /* 3714 * Try allocating the block in the usual metaslab class. 3715 * If that's full, allocate it in the normal class. 3716 * If that's full, allocate as a gang block, 3717 * and if all are full, the allocation fails (which shouldn't happen). 3718 * 3719 * Note that we do not fall back on embedded slog (ZIL) space, to 3720 * preserve unfragmented slog space, which is critical for decent 3721 * sync write performance. If a log allocation fails, we will fall 3722 * back to spa_sync() which is abysmal for performance. 3723 */ 3724 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 3725 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3726 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3727 &zio->io_alloc_list, zio, zio->io_allocator); 3728 3729 /* 3730 * Fallback to normal class when an alloc class is full 3731 */ 3732 if (error == ENOSPC && mc != spa_normal_class(spa)) { 3733 /* 3734 * If throttling, transfer reservation over to normal class. 3735 * The io_allocator slot can remain the same even though we 3736 * are switching classes. 3737 */ 3738 if (mc->mc_alloc_throttle_enabled && 3739 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 3740 metaslab_class_throttle_unreserve(mc, 3741 zio->io_prop.zp_copies, zio->io_allocator, zio); 3742 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 3743 3744 VERIFY(metaslab_class_throttle_reserve( 3745 spa_normal_class(spa), 3746 zio->io_prop.zp_copies, zio->io_allocator, zio, 3747 flags | METASLAB_MUST_RESERVE)); 3748 } 3749 zio->io_metaslab_class = mc = spa_normal_class(spa); 3750 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 3751 zfs_dbgmsg("%s: metaslab allocation failure, " 3752 "trying normal class: zio %px, size %llu, error %d", 3753 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3754 error); 3755 } 3756 3757 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3758 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3759 &zio->io_alloc_list, zio, zio->io_allocator); 3760 } 3761 3762 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) { 3763 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 3764 zfs_dbgmsg("%s: metaslab allocation failure, " 3765 "trying ganging: zio %px, size %llu, error %d", 3766 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3767 error); 3768 } 3769 return (zio_write_gang_block(zio, mc)); 3770 } 3771 if (error != 0) { 3772 if (error != ENOSPC || 3773 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) { 3774 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, " 3775 "size %llu, error %d", 3776 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3777 error); 3778 } 3779 zio->io_error = error; 3780 } 3781 3782 return (zio); 3783 } 3784 3785 static zio_t * 3786 zio_dva_free(zio_t *zio) 3787 { 3788 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3789 3790 return (zio); 3791 } 3792 3793 static zio_t * 3794 zio_dva_claim(zio_t *zio) 3795 { 3796 int error; 3797 3798 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3799 if (error) 3800 zio->io_error = error; 3801 3802 return (zio); 3803 } 3804 3805 /* 3806 * Undo an allocation. This is used by zio_done() when an I/O fails 3807 * and we want to give back the block we just allocated. 3808 * This handles both normal blocks and gang blocks. 3809 */ 3810 static void 3811 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3812 { 3813 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp)); 3814 ASSERT(zio->io_bp_override == NULL); 3815 3816 if (!BP_IS_HOLE(bp)) { 3817 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp), 3818 B_TRUE); 3819 } 3820 3821 if (gn != NULL) { 3822 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3823 zio_dva_unallocate(zio, gn->gn_child[g], 3824 &gn->gn_gbh->zg_blkptr[g]); 3825 } 3826 } 3827 } 3828 3829 /* 3830 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3831 */ 3832 int 3833 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, 3834 uint64_t size, boolean_t *slog) 3835 { 3836 int error = 1; 3837 zio_alloc_list_t io_alloc_list; 3838 3839 ASSERT(txg > spa_syncing_txg(spa)); 3840 3841 metaslab_trace_init(&io_alloc_list); 3842 3843 /* 3844 * Block pointer fields are useful to metaslabs for stats and debugging. 3845 * Fill in the obvious ones before calling into metaslab_alloc(). 3846 */ 3847 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3848 BP_SET_PSIZE(new_bp, size); 3849 BP_SET_LEVEL(new_bp, 0); 3850 3851 /* 3852 * When allocating a zil block, we don't have information about 3853 * the final destination of the block except the objset it's part 3854 * of, so we just hash the objset ID to pick the allocator to get 3855 * some parallelism. 3856 */ 3857 int flags = METASLAB_ZIL; 3858 int allocator = (uint_t)cityhash4(0, 0, 0, 3859 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count; 3860 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3861 txg, NULL, flags, &io_alloc_list, NULL, allocator); 3862 *slog = (error == 0); 3863 if (error != 0) { 3864 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size, 3865 new_bp, 1, txg, NULL, flags, 3866 &io_alloc_list, NULL, allocator); 3867 } 3868 if (error != 0) { 3869 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3870 new_bp, 1, txg, NULL, flags, 3871 &io_alloc_list, NULL, allocator); 3872 } 3873 metaslab_trace_fini(&io_alloc_list); 3874 3875 if (error == 0) { 3876 BP_SET_LSIZE(new_bp, size); 3877 BP_SET_PSIZE(new_bp, size); 3878 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3879 BP_SET_CHECKSUM(new_bp, 3880 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3881 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3882 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3883 BP_SET_LEVEL(new_bp, 0); 3884 BP_SET_DEDUP(new_bp, 0); 3885 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3886 3887 /* 3888 * encrypted blocks will require an IV and salt. We generate 3889 * these now since we will not be rewriting the bp at 3890 * rewrite time. 3891 */ 3892 if (os->os_encrypted) { 3893 uint8_t iv[ZIO_DATA_IV_LEN]; 3894 uint8_t salt[ZIO_DATA_SALT_LEN]; 3895 3896 BP_SET_CRYPT(new_bp, B_TRUE); 3897 VERIFY0(spa_crypt_get_salt(spa, 3898 dmu_objset_id(os), salt)); 3899 VERIFY0(zio_crypt_generate_iv(iv)); 3900 3901 zio_crypt_encode_params_bp(new_bp, salt, iv); 3902 } 3903 } else { 3904 zfs_dbgmsg("%s: zil block allocation failure: " 3905 "size %llu, error %d", spa_name(spa), (u_longlong_t)size, 3906 error); 3907 } 3908 3909 return (error); 3910 } 3911 3912 /* 3913 * ========================================================================== 3914 * Read and write to physical devices 3915 * ========================================================================== 3916 */ 3917 3918 /* 3919 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3920 * stops after this stage and will resume upon I/O completion. 3921 * However, there are instances where the vdev layer may need to 3922 * continue the pipeline when an I/O was not issued. Since the I/O 3923 * that was sent to the vdev layer might be different than the one 3924 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3925 * force the underlying vdev layers to call either zio_execute() or 3926 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3927 */ 3928 static zio_t * 3929 zio_vdev_io_start(zio_t *zio) 3930 { 3931 vdev_t *vd = zio->io_vd; 3932 uint64_t align; 3933 spa_t *spa = zio->io_spa; 3934 3935 zio->io_delay = 0; 3936 3937 ASSERT(zio->io_error == 0); 3938 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3939 3940 if (vd == NULL) { 3941 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3942 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3943 3944 /* 3945 * The mirror_ops handle multiple DVAs in a single BP. 3946 */ 3947 vdev_mirror_ops.vdev_op_io_start(zio); 3948 return (NULL); 3949 } 3950 3951 ASSERT3P(zio->io_logical, !=, zio); 3952 if (zio->io_type == ZIO_TYPE_WRITE) { 3953 ASSERT(spa->spa_trust_config); 3954 3955 /* 3956 * Note: the code can handle other kinds of writes, 3957 * but we don't expect them. 3958 */ 3959 if (zio->io_vd->vdev_noalloc) { 3960 ASSERT(zio->io_flags & 3961 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 3962 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 3963 } 3964 } 3965 3966 align = 1ULL << vd->vdev_top->vdev_ashift; 3967 3968 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 3969 P2PHASE(zio->io_size, align) != 0) { 3970 /* Transform logical writes to be a full physical block size. */ 3971 uint64_t asize = P2ROUNDUP(zio->io_size, align); 3972 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3973 ASSERT(vd == vd->vdev_top); 3974 if (zio->io_type == ZIO_TYPE_WRITE) { 3975 abd_copy(abuf, zio->io_abd, zio->io_size); 3976 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3977 } 3978 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3979 } 3980 3981 /* 3982 * If this is not a physical io, make sure that it is properly aligned 3983 * before proceeding. 3984 */ 3985 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 3986 ASSERT0(P2PHASE(zio->io_offset, align)); 3987 ASSERT0(P2PHASE(zio->io_size, align)); 3988 } else { 3989 /* 3990 * For physical writes, we allow 512b aligned writes and assume 3991 * the device will perform a read-modify-write as necessary. 3992 */ 3993 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 3994 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 3995 } 3996 3997 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 3998 3999 /* 4000 * If this is a repair I/O, and there's no self-healing involved -- 4001 * that is, we're just resilvering what we expect to resilver -- 4002 * then don't do the I/O unless zio's txg is actually in vd's DTL. 4003 * This prevents spurious resilvering. 4004 * 4005 * There are a few ways that we can end up creating these spurious 4006 * resilver i/os: 4007 * 4008 * 1. A resilver i/o will be issued if any DVA in the BP has a 4009 * dirty DTL. The mirror code will issue resilver writes to 4010 * each DVA, including the one(s) that are not on vdevs with dirty 4011 * DTLs. 4012 * 4013 * 2. With nested replication, which happens when we have a 4014 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 4015 * For example, given mirror(replacing(A+B), C), it's likely that 4016 * only A is out of date (it's the new device). In this case, we'll 4017 * read from C, then use the data to resilver A+B -- but we don't 4018 * actually want to resilver B, just A. The top-level mirror has no 4019 * way to know this, so instead we just discard unnecessary repairs 4020 * as we work our way down the vdev tree. 4021 * 4022 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 4023 * The same logic applies to any form of nested replication: ditto 4024 * + mirror, RAID-Z + replacing, etc. 4025 * 4026 * However, indirect vdevs point off to other vdevs which may have 4027 * DTL's, so we never bypass them. The child i/os on concrete vdevs 4028 * will be properly bypassed instead. 4029 * 4030 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from 4031 * a dRAID spare vdev. For example, when a dRAID spare is first 4032 * used, its spare blocks need to be written to but the leaf vdev's 4033 * of such blocks can have empty DTL_PARTIAL. 4034 * 4035 * There seemed no clean way to allow such writes while bypassing 4036 * spurious ones. At this point, just avoid all bypassing for dRAID 4037 * for correctness. 4038 */ 4039 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 4040 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 4041 zio->io_txg != 0 && /* not a delegated i/o */ 4042 vd->vdev_ops != &vdev_indirect_ops && 4043 vd->vdev_top->vdev_ops != &vdev_draid_ops && 4044 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 4045 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4046 zio_vdev_io_bypass(zio); 4047 return (zio); 4048 } 4049 4050 /* 4051 * Select the next best leaf I/O to process. Distributed spares are 4052 * excluded since they dispatch the I/O directly to a leaf vdev after 4053 * applying the dRAID mapping. 4054 */ 4055 if (vd->vdev_ops->vdev_op_leaf && 4056 vd->vdev_ops != &vdev_draid_spare_ops && 4057 (zio->io_type == ZIO_TYPE_READ || 4058 zio->io_type == ZIO_TYPE_WRITE || 4059 zio->io_type == ZIO_TYPE_TRIM)) { 4060 4061 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) { 4062 /* 4063 * "no-op" injections return success, but do no actual 4064 * work. Just skip the remaining vdev stages. 4065 */ 4066 zio_vdev_io_bypass(zio); 4067 zio_interrupt(zio); 4068 return (NULL); 4069 } 4070 4071 if ((zio = vdev_queue_io(zio)) == NULL) 4072 return (NULL); 4073 4074 if (!vdev_accessible(vd, zio)) { 4075 zio->io_error = SET_ERROR(ENXIO); 4076 zio_interrupt(zio); 4077 return (NULL); 4078 } 4079 zio->io_delay = gethrtime(); 4080 } 4081 4082 vd->vdev_ops->vdev_op_io_start(zio); 4083 return (NULL); 4084 } 4085 4086 static zio_t * 4087 zio_vdev_io_done(zio_t *zio) 4088 { 4089 vdev_t *vd = zio->io_vd; 4090 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 4091 boolean_t unexpected_error = B_FALSE; 4092 4093 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 4094 return (NULL); 4095 } 4096 4097 ASSERT(zio->io_type == ZIO_TYPE_READ || 4098 zio->io_type == ZIO_TYPE_WRITE || 4099 zio->io_type == ZIO_TYPE_FLUSH || 4100 zio->io_type == ZIO_TYPE_TRIM); 4101 4102 if (zio->io_delay) 4103 zio->io_delay = gethrtime() - zio->io_delay; 4104 4105 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 4106 vd->vdev_ops != &vdev_draid_spare_ops) { 4107 if (zio->io_type != ZIO_TYPE_FLUSH) 4108 vdev_queue_io_done(zio); 4109 4110 if (zio_injection_enabled && zio->io_error == 0) 4111 zio->io_error = zio_handle_device_injections(vd, zio, 4112 EIO, EILSEQ); 4113 4114 if (zio_injection_enabled && zio->io_error == 0) 4115 zio->io_error = zio_handle_label_injection(zio, EIO); 4116 4117 if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) { 4118 if (!vdev_accessible(vd, zio)) { 4119 zio->io_error = SET_ERROR(ENXIO); 4120 } else { 4121 unexpected_error = B_TRUE; 4122 } 4123 } 4124 } 4125 4126 ops->vdev_op_io_done(zio); 4127 4128 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE) 4129 VERIFY(vdev_probe(vd, zio) == NULL); 4130 4131 return (zio); 4132 } 4133 4134 /* 4135 * This function is used to change the priority of an existing zio that is 4136 * currently in-flight. This is used by the arc to upgrade priority in the 4137 * event that a demand read is made for a block that is currently queued 4138 * as a scrub or async read IO. Otherwise, the high priority read request 4139 * would end up having to wait for the lower priority IO. 4140 */ 4141 void 4142 zio_change_priority(zio_t *pio, zio_priority_t priority) 4143 { 4144 zio_t *cio, *cio_next; 4145 zio_link_t *zl = NULL; 4146 4147 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 4148 4149 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 4150 vdev_queue_change_io_priority(pio, priority); 4151 } else { 4152 pio->io_priority = priority; 4153 } 4154 4155 mutex_enter(&pio->io_lock); 4156 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 4157 cio_next = zio_walk_children(pio, &zl); 4158 zio_change_priority(cio, priority); 4159 } 4160 mutex_exit(&pio->io_lock); 4161 } 4162 4163 /* 4164 * For non-raidz ZIOs, we can just copy aside the bad data read from the 4165 * disk, and use that to finish the checksum ereport later. 4166 */ 4167 static void 4168 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 4169 const abd_t *good_buf) 4170 { 4171 /* no processing needed */ 4172 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 4173 } 4174 4175 void 4176 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr) 4177 { 4178 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); 4179 4180 abd_copy(abd, zio->io_abd, zio->io_size); 4181 4182 zcr->zcr_cbinfo = zio->io_size; 4183 zcr->zcr_cbdata = abd; 4184 zcr->zcr_finish = zio_vsd_default_cksum_finish; 4185 zcr->zcr_free = zio_abd_free; 4186 } 4187 4188 static zio_t * 4189 zio_vdev_io_assess(zio_t *zio) 4190 { 4191 vdev_t *vd = zio->io_vd; 4192 4193 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 4194 return (NULL); 4195 } 4196 4197 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 4198 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 4199 4200 if (zio->io_vsd != NULL) { 4201 zio->io_vsd_ops->vsd_free(zio); 4202 zio->io_vsd = NULL; 4203 } 4204 4205 if (zio_injection_enabled && zio->io_error == 0) 4206 zio->io_error = zio_handle_fault_injection(zio, EIO); 4207 4208 /* 4209 * If the I/O failed, determine whether we should attempt to retry it. 4210 * 4211 * On retry, we cut in line in the issue queue, since we don't want 4212 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 4213 */ 4214 if (zio->io_error && vd == NULL && 4215 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 4216 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 4217 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 4218 zio->io_error = 0; 4219 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE; 4220 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 4221 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 4222 zio_requeue_io_start_cut_in_line); 4223 return (NULL); 4224 } 4225 4226 /* 4227 * If we got an error on a leaf device, convert it to ENXIO 4228 * if the device is not accessible at all. 4229 */ 4230 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 4231 !vdev_accessible(vd, zio)) 4232 zio->io_error = SET_ERROR(ENXIO); 4233 4234 /* 4235 * If we can't write to an interior vdev (mirror or RAID-Z), 4236 * set vdev_cant_write so that we stop trying to allocate from it. 4237 */ 4238 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 4239 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 4240 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting " 4241 "cant_write=TRUE due to write failure with ENXIO", 4242 zio); 4243 vd->vdev_cant_write = B_TRUE; 4244 } 4245 4246 /* 4247 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 4248 * attempts will ever succeed. In this case we set a persistent 4249 * boolean flag so that we don't bother with it in the future. 4250 */ 4251 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 4252 zio->io_type == ZIO_TYPE_FLUSH && vd != NULL) 4253 vd->vdev_nowritecache = B_TRUE; 4254 4255 if (zio->io_error) 4256 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4257 4258 return (zio); 4259 } 4260 4261 void 4262 zio_vdev_io_reissue(zio_t *zio) 4263 { 4264 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4265 ASSERT(zio->io_error == 0); 4266 4267 zio->io_stage >>= 1; 4268 } 4269 4270 void 4271 zio_vdev_io_redone(zio_t *zio) 4272 { 4273 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 4274 4275 zio->io_stage >>= 1; 4276 } 4277 4278 void 4279 zio_vdev_io_bypass(zio_t *zio) 4280 { 4281 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4282 ASSERT(zio->io_error == 0); 4283 4284 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 4285 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 4286 } 4287 4288 /* 4289 * ========================================================================== 4290 * Encrypt and store encryption parameters 4291 * ========================================================================== 4292 */ 4293 4294 4295 /* 4296 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for 4297 * managing the storage of encryption parameters and passing them to the 4298 * lower-level encryption functions. 4299 */ 4300 static zio_t * 4301 zio_encrypt(zio_t *zio) 4302 { 4303 zio_prop_t *zp = &zio->io_prop; 4304 spa_t *spa = zio->io_spa; 4305 blkptr_t *bp = zio->io_bp; 4306 uint64_t psize = BP_GET_PSIZE(bp); 4307 uint64_t dsobj = zio->io_bookmark.zb_objset; 4308 dmu_object_type_t ot = BP_GET_TYPE(bp); 4309 void *enc_buf = NULL; 4310 abd_t *eabd = NULL; 4311 uint8_t salt[ZIO_DATA_SALT_LEN]; 4312 uint8_t iv[ZIO_DATA_IV_LEN]; 4313 uint8_t mac[ZIO_DATA_MAC_LEN]; 4314 boolean_t no_crypt = B_FALSE; 4315 4316 /* the root zio already encrypted the data */ 4317 if (zio->io_child_type == ZIO_CHILD_GANG) 4318 return (zio); 4319 4320 /* only ZIL blocks are re-encrypted on rewrite */ 4321 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) 4322 return (zio); 4323 4324 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { 4325 BP_SET_CRYPT(bp, B_FALSE); 4326 return (zio); 4327 } 4328 4329 /* if we are doing raw encryption set the provided encryption params */ 4330 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { 4331 ASSERT0(BP_GET_LEVEL(bp)); 4332 BP_SET_CRYPT(bp, B_TRUE); 4333 BP_SET_BYTEORDER(bp, zp->zp_byteorder); 4334 if (ot != DMU_OT_OBJSET) 4335 zio_crypt_encode_mac_bp(bp, zp->zp_mac); 4336 4337 /* dnode blocks must be written out in the provided byteorder */ 4338 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && 4339 ot == DMU_OT_DNODE) { 4340 void *bswap_buf = zio_buf_alloc(psize); 4341 abd_t *babd = abd_get_from_buf(bswap_buf, psize); 4342 4343 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4344 abd_copy_to_buf(bswap_buf, zio->io_abd, psize); 4345 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, 4346 psize); 4347 4348 abd_take_ownership_of_buf(babd, B_TRUE); 4349 zio_push_transform(zio, babd, psize, psize, NULL); 4350 } 4351 4352 if (DMU_OT_IS_ENCRYPTED(ot)) 4353 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); 4354 return (zio); 4355 } 4356 4357 /* indirect blocks only maintain a cksum of the lower level MACs */ 4358 if (BP_GET_LEVEL(bp) > 0) { 4359 BP_SET_CRYPT(bp, B_TRUE); 4360 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, 4361 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), 4362 mac)); 4363 zio_crypt_encode_mac_bp(bp, mac); 4364 return (zio); 4365 } 4366 4367 /* 4368 * Objset blocks are a special case since they have 2 256-bit MACs 4369 * embedded within them. 4370 */ 4371 if (ot == DMU_OT_OBJSET) { 4372 ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); 4373 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4374 BP_SET_CRYPT(bp, B_TRUE); 4375 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, 4376 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); 4377 return (zio); 4378 } 4379 4380 /* unencrypted object types are only authenticated with a MAC */ 4381 if (!DMU_OT_IS_ENCRYPTED(ot)) { 4382 BP_SET_CRYPT(bp, B_TRUE); 4383 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, 4384 zio->io_abd, psize, mac)); 4385 zio_crypt_encode_mac_bp(bp, mac); 4386 return (zio); 4387 } 4388 4389 /* 4390 * Later passes of sync-to-convergence may decide to rewrite data 4391 * in place to avoid more disk reallocations. This presents a problem 4392 * for encryption because this constitutes rewriting the new data with 4393 * the same encryption key and IV. However, this only applies to blocks 4394 * in the MOS (particularly the spacemaps) and we do not encrypt the 4395 * MOS. We assert that the zio is allocating or an intent log write 4396 * to enforce this. 4397 */ 4398 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); 4399 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); 4400 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); 4401 ASSERT3U(psize, !=, 0); 4402 4403 enc_buf = zio_buf_alloc(psize); 4404 eabd = abd_get_from_buf(enc_buf, psize); 4405 abd_take_ownership_of_buf(eabd, B_TRUE); 4406 4407 /* 4408 * For an explanation of what encryption parameters are stored 4409 * where, see the block comment in zio_crypt.c. 4410 */ 4411 if (ot == DMU_OT_INTENT_LOG) { 4412 zio_crypt_decode_params_bp(bp, salt, iv); 4413 } else { 4414 BP_SET_CRYPT(bp, B_TRUE); 4415 } 4416 4417 /* Perform the encryption. This should not fail */ 4418 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, 4419 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 4420 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); 4421 4422 /* encode encryption metadata into the bp */ 4423 if (ot == DMU_OT_INTENT_LOG) { 4424 /* 4425 * ZIL blocks store the MAC in the embedded checksum, so the 4426 * transform must always be applied. 4427 */ 4428 zio_crypt_encode_mac_zil(enc_buf, mac); 4429 zio_push_transform(zio, eabd, psize, psize, NULL); 4430 } else { 4431 BP_SET_CRYPT(bp, B_TRUE); 4432 zio_crypt_encode_params_bp(bp, salt, iv); 4433 zio_crypt_encode_mac_bp(bp, mac); 4434 4435 if (no_crypt) { 4436 ASSERT3U(ot, ==, DMU_OT_DNODE); 4437 abd_free(eabd); 4438 } else { 4439 zio_push_transform(zio, eabd, psize, psize, NULL); 4440 } 4441 } 4442 4443 return (zio); 4444 } 4445 4446 /* 4447 * ========================================================================== 4448 * Generate and verify checksums 4449 * ========================================================================== 4450 */ 4451 static zio_t * 4452 zio_checksum_generate(zio_t *zio) 4453 { 4454 blkptr_t *bp = zio->io_bp; 4455 enum zio_checksum checksum; 4456 4457 if (bp == NULL) { 4458 /* 4459 * This is zio_write_phys(). 4460 * We're either generating a label checksum, or none at all. 4461 */ 4462 checksum = zio->io_prop.zp_checksum; 4463 4464 if (checksum == ZIO_CHECKSUM_OFF) 4465 return (zio); 4466 4467 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 4468 } else { 4469 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 4470 ASSERT(!IO_IS_ALLOCATING(zio)); 4471 checksum = ZIO_CHECKSUM_GANG_HEADER; 4472 } else { 4473 checksum = BP_GET_CHECKSUM(bp); 4474 } 4475 } 4476 4477 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 4478 4479 return (zio); 4480 } 4481 4482 static zio_t * 4483 zio_checksum_verify(zio_t *zio) 4484 { 4485 zio_bad_cksum_t info; 4486 blkptr_t *bp = zio->io_bp; 4487 int error; 4488 4489 ASSERT(zio->io_vd != NULL); 4490 4491 if (bp == NULL) { 4492 /* 4493 * This is zio_read_phys(). 4494 * We're either verifying a label checksum, or nothing at all. 4495 */ 4496 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 4497 return (zio); 4498 4499 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL); 4500 } 4501 4502 if ((error = zio_checksum_error(zio, &info)) != 0) { 4503 zio->io_error = error; 4504 if (error == ECKSUM && 4505 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 4506 mutex_enter(&zio->io_vd->vdev_stat_lock); 4507 zio->io_vd->vdev_stat.vs_checksum_errors++; 4508 mutex_exit(&zio->io_vd->vdev_stat_lock); 4509 (void) zfs_ereport_start_checksum(zio->io_spa, 4510 zio->io_vd, &zio->io_bookmark, zio, 4511 zio->io_offset, zio->io_size, &info); 4512 } 4513 } 4514 4515 return (zio); 4516 } 4517 4518 /* 4519 * Called by RAID-Z to ensure we don't compute the checksum twice. 4520 */ 4521 void 4522 zio_checksum_verified(zio_t *zio) 4523 { 4524 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 4525 } 4526 4527 /* 4528 * ========================================================================== 4529 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 4530 * An error of 0 indicates success. ENXIO indicates whole-device failure, 4531 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO 4532 * indicate errors that are specific to one I/O, and most likely permanent. 4533 * Any other error is presumed to be worse because we weren't expecting it. 4534 * ========================================================================== 4535 */ 4536 int 4537 zio_worst_error(int e1, int e2) 4538 { 4539 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 4540 int r1, r2; 4541 4542 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 4543 if (e1 == zio_error_rank[r1]) 4544 break; 4545 4546 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 4547 if (e2 == zio_error_rank[r2]) 4548 break; 4549 4550 return (r1 > r2 ? e1 : e2); 4551 } 4552 4553 /* 4554 * ========================================================================== 4555 * I/O completion 4556 * ========================================================================== 4557 */ 4558 static zio_t * 4559 zio_ready(zio_t *zio) 4560 { 4561 blkptr_t *bp = zio->io_bp; 4562 zio_t *pio, *pio_next; 4563 zio_link_t *zl = NULL; 4564 4565 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 4566 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) { 4567 return (NULL); 4568 } 4569 4570 if (zio->io_ready) { 4571 ASSERT(IO_IS_ALLOCATING(zio)); 4572 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || 4573 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE)); 4574 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 4575 4576 zio->io_ready(zio); 4577 } 4578 4579 #ifdef ZFS_DEBUG 4580 if (bp != NULL && bp != &zio->io_bp_copy) 4581 zio->io_bp_copy = *bp; 4582 #endif 4583 4584 if (zio->io_error != 0) { 4585 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4586 4587 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4588 ASSERT(IO_IS_ALLOCATING(zio)); 4589 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4590 ASSERT(zio->io_metaslab_class != NULL); 4591 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 4592 4593 /* 4594 * We were unable to allocate anything, unreserve and 4595 * issue the next I/O to allocate. 4596 */ 4597 metaslab_class_throttle_unreserve( 4598 zio->io_metaslab_class, zio->io_prop.zp_copies, 4599 zio->io_allocator, zio); 4600 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 4601 } 4602 } 4603 4604 mutex_enter(&zio->io_lock); 4605 zio->io_state[ZIO_WAIT_READY] = 1; 4606 pio = zio_walk_parents(zio, &zl); 4607 mutex_exit(&zio->io_lock); 4608 4609 /* 4610 * As we notify zio's parents, new parents could be added. 4611 * New parents go to the head of zio's io_parent_list, however, 4612 * so we will (correctly) not notify them. The remainder of zio's 4613 * io_parent_list, from 'pio_next' onward, cannot change because 4614 * all parents must wait for us to be done before they can be done. 4615 */ 4616 for (; pio != NULL; pio = pio_next) { 4617 pio_next = zio_walk_parents(zio, &zl); 4618 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL); 4619 } 4620 4621 if (zio->io_flags & ZIO_FLAG_NODATA) { 4622 if (bp != NULL && BP_IS_GANG(bp)) { 4623 zio->io_flags &= ~ZIO_FLAG_NODATA; 4624 } else { 4625 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 4626 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 4627 } 4628 } 4629 4630 if (zio_injection_enabled && 4631 zio->io_spa->spa_syncing_txg == zio->io_txg) 4632 zio_handle_ignored_writes(zio); 4633 4634 return (zio); 4635 } 4636 4637 /* 4638 * Update the allocation throttle accounting. 4639 */ 4640 static void 4641 zio_dva_throttle_done(zio_t *zio) 4642 { 4643 zio_t *lio __maybe_unused = zio->io_logical; 4644 zio_t *pio = zio_unique_parent(zio); 4645 vdev_t *vd = zio->io_vd; 4646 int flags = METASLAB_ASYNC_ALLOC; 4647 4648 ASSERT3P(zio->io_bp, !=, NULL); 4649 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 4650 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 4651 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 4652 ASSERT(vd != NULL); 4653 ASSERT3P(vd, ==, vd->vdev_top); 4654 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY)); 4655 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 4656 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 4657 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 4658 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 4659 4660 /* 4661 * Parents of gang children can have two flavors -- ones that 4662 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 4663 * and ones that allocated the constituent blocks. The allocation 4664 * throttle needs to know the allocating parent zio so we must find 4665 * it here. 4666 */ 4667 if (pio->io_child_type == ZIO_CHILD_GANG) { 4668 /* 4669 * If our parent is a rewrite gang child then our grandparent 4670 * would have been the one that performed the allocation. 4671 */ 4672 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 4673 pio = zio_unique_parent(pio); 4674 flags |= METASLAB_GANG_CHILD; 4675 } 4676 4677 ASSERT(IO_IS_ALLOCATING(pio)); 4678 ASSERT(ZIO_HAS_ALLOCATOR(pio)); 4679 ASSERT3P(zio, !=, zio->io_logical); 4680 ASSERT(zio->io_logical != NULL); 4681 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 4682 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 4683 ASSERT(zio->io_metaslab_class != NULL); 4684 4685 mutex_enter(&pio->io_lock); 4686 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 4687 pio->io_allocator, B_TRUE); 4688 mutex_exit(&pio->io_lock); 4689 4690 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 4691 pio->io_allocator, pio); 4692 4693 /* 4694 * Call into the pipeline to see if there is more work that 4695 * needs to be done. If there is work to be done it will be 4696 * dispatched to another taskq thread. 4697 */ 4698 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 4699 } 4700 4701 static zio_t * 4702 zio_done(zio_t *zio) 4703 { 4704 /* 4705 * Always attempt to keep stack usage minimal here since 4706 * we can be called recursively up to 19 levels deep. 4707 */ 4708 const uint64_t psize = zio->io_size; 4709 zio_t *pio, *pio_next; 4710 zio_link_t *zl = NULL; 4711 4712 /* 4713 * If our children haven't all completed, 4714 * wait for them and then repeat this pipeline stage. 4715 */ 4716 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 4717 return (NULL); 4718 } 4719 4720 /* 4721 * If the allocation throttle is enabled, then update the accounting. 4722 * We only track child I/Os that are part of an allocating async 4723 * write. We must do this since the allocation is performed 4724 * by the logical I/O but the actual write is done by child I/Os. 4725 */ 4726 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 4727 zio->io_child_type == ZIO_CHILD_VDEV) { 4728 ASSERT(zio->io_metaslab_class != NULL); 4729 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 4730 zio_dva_throttle_done(zio); 4731 } 4732 4733 /* 4734 * If the allocation throttle is enabled, verify that 4735 * we have decremented the refcounts for every I/O that was throttled. 4736 */ 4737 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4738 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4739 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4740 ASSERT(zio->io_bp != NULL); 4741 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 4742 4743 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio, 4744 zio->io_allocator); 4745 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class-> 4746 mc_allocator[zio->io_allocator].mca_alloc_slots, zio)); 4747 } 4748 4749 4750 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 4751 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 4752 ASSERT(zio->io_children[c][w] == 0); 4753 4754 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { 4755 ASSERT(zio->io_bp->blk_pad[0] == 0); 4756 ASSERT(zio->io_bp->blk_pad[1] == 0); 4757 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy, 4758 sizeof (blkptr_t)) == 0 || 4759 (zio->io_bp == zio_unique_parent(zio)->io_bp)); 4760 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && 4761 zio->io_bp_override == NULL && 4762 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 4763 ASSERT3U(zio->io_prop.zp_copies, <=, 4764 BP_GET_NDVAS(zio->io_bp)); 4765 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || 4766 (BP_COUNT_GANG(zio->io_bp) == 4767 BP_GET_NDVAS(zio->io_bp))); 4768 } 4769 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 4770 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4771 } 4772 4773 /* 4774 * If there were child vdev/gang/ddt errors, they apply to us now. 4775 */ 4776 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 4777 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 4778 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 4779 4780 /* 4781 * If the I/O on the transformed data was successful, generate any 4782 * checksum reports now while we still have the transformed data. 4783 */ 4784 if (zio->io_error == 0) { 4785 while (zio->io_cksum_report != NULL) { 4786 zio_cksum_report_t *zcr = zio->io_cksum_report; 4787 uint64_t align = zcr->zcr_align; 4788 uint64_t asize = P2ROUNDUP(psize, align); 4789 abd_t *adata = zio->io_abd; 4790 4791 if (adata != NULL && asize != psize) { 4792 adata = abd_alloc(asize, B_TRUE); 4793 abd_copy(adata, zio->io_abd, psize); 4794 abd_zero_off(adata, psize, asize - psize); 4795 } 4796 4797 zio->io_cksum_report = zcr->zcr_next; 4798 zcr->zcr_next = NULL; 4799 zcr->zcr_finish(zcr, adata); 4800 zfs_ereport_free_checksum(zcr); 4801 4802 if (adata != NULL && asize != psize) 4803 abd_free(adata); 4804 } 4805 } 4806 4807 zio_pop_transforms(zio); /* note: may set zio->io_error */ 4808 4809 vdev_stat_update(zio, psize); 4810 4811 /* 4812 * If this I/O is attached to a particular vdev is slow, exceeding 4813 * 30 seconds to complete, post an error described the I/O delay. 4814 * We ignore these errors if the device is currently unavailable. 4815 */ 4816 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) { 4817 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) { 4818 /* 4819 * We want to only increment our slow IO counters if 4820 * the IO is valid (i.e. not if the drive is removed). 4821 * 4822 * zfs_ereport_post() will also do these checks, but 4823 * it can also ratelimit and have other failures, so we 4824 * need to increment the slow_io counters independent 4825 * of it. 4826 */ 4827 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY, 4828 zio->io_spa, zio->io_vd, zio)) { 4829 mutex_enter(&zio->io_vd->vdev_stat_lock); 4830 zio->io_vd->vdev_stat.vs_slow_ios++; 4831 mutex_exit(&zio->io_vd->vdev_stat_lock); 4832 4833 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY, 4834 zio->io_spa, zio->io_vd, &zio->io_bookmark, 4835 zio, 0); 4836 } 4837 } 4838 } 4839 4840 if (zio->io_error) { 4841 /* 4842 * If this I/O is attached to a particular vdev, 4843 * generate an error message describing the I/O failure 4844 * at the block level. We ignore these errors if the 4845 * device is currently unavailable. 4846 */ 4847 if (zio->io_error != ECKSUM && zio->io_vd != NULL && 4848 !vdev_is_dead(zio->io_vd)) { 4849 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO, 4850 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0); 4851 if (ret != EALREADY) { 4852 mutex_enter(&zio->io_vd->vdev_stat_lock); 4853 if (zio->io_type == ZIO_TYPE_READ) 4854 zio->io_vd->vdev_stat.vs_read_errors++; 4855 else if (zio->io_type == ZIO_TYPE_WRITE) 4856 zio->io_vd->vdev_stat.vs_write_errors++; 4857 mutex_exit(&zio->io_vd->vdev_stat_lock); 4858 } 4859 } 4860 4861 if ((zio->io_error == EIO || !(zio->io_flags & 4862 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 4863 zio == zio->io_logical) { 4864 /* 4865 * For logical I/O requests, tell the SPA to log the 4866 * error and generate a logical data ereport. 4867 */ 4868 spa_log_error(zio->io_spa, &zio->io_bookmark, 4869 BP_GET_LOGICAL_BIRTH(zio->io_bp)); 4870 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA, 4871 zio->io_spa, NULL, &zio->io_bookmark, zio, 0); 4872 } 4873 } 4874 4875 if (zio->io_error && zio == zio->io_logical) { 4876 /* 4877 * Determine whether zio should be reexecuted. This will 4878 * propagate all the way to the root via zio_notify_parent(). 4879 */ 4880 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL); 4881 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4882 4883 if (IO_IS_ALLOCATING(zio) && 4884 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 4885 if (zio->io_error != ENOSPC) 4886 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 4887 else 4888 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4889 } 4890 4891 if ((zio->io_type == ZIO_TYPE_READ || 4892 zio->io_type == ZIO_TYPE_FREE) && 4893 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 4894 zio->io_error == ENXIO && 4895 spa_load_state(zio->io_spa) == SPA_LOAD_NONE && 4896 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE) 4897 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4898 4899 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 4900 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4901 4902 /* 4903 * Here is a possibly good place to attempt to do 4904 * either combinatorial reconstruction or error correction 4905 * based on checksums. It also might be a good place 4906 * to send out preliminary ereports before we suspend 4907 * processing. 4908 */ 4909 } 4910 4911 /* 4912 * If there were logical child errors, they apply to us now. 4913 * We defer this until now to avoid conflating logical child 4914 * errors with errors that happened to the zio itself when 4915 * updating vdev stats and reporting FMA events above. 4916 */ 4917 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 4918 4919 if ((zio->io_error || zio->io_reexecute) && 4920 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 4921 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 4922 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp); 4923 4924 zio_gang_tree_free(&zio->io_gang_tree); 4925 4926 /* 4927 * Godfather I/Os should never suspend. 4928 */ 4929 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 4930 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 4931 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND; 4932 4933 if (zio->io_reexecute) { 4934 /* 4935 * This is a logical I/O that wants to reexecute. 4936 * 4937 * Reexecute is top-down. When an i/o fails, if it's not 4938 * the root, it simply notifies its parent and sticks around. 4939 * The parent, seeing that it still has children in zio_done(), 4940 * does the same. This percolates all the way up to the root. 4941 * The root i/o will reexecute or suspend the entire tree. 4942 * 4943 * This approach ensures that zio_reexecute() honors 4944 * all the original i/o dependency relationships, e.g. 4945 * parents not executing until children are ready. 4946 */ 4947 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4948 4949 zio->io_gang_leader = NULL; 4950 4951 mutex_enter(&zio->io_lock); 4952 zio->io_state[ZIO_WAIT_DONE] = 1; 4953 mutex_exit(&zio->io_lock); 4954 4955 /* 4956 * "The Godfather" I/O monitors its children but is 4957 * not a true parent to them. It will track them through 4958 * the pipeline but severs its ties whenever they get into 4959 * trouble (e.g. suspended). This allows "The Godfather" 4960 * I/O to return status without blocking. 4961 */ 4962 zl = NULL; 4963 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 4964 pio = pio_next) { 4965 zio_link_t *remove_zl = zl; 4966 pio_next = zio_walk_parents(zio, &zl); 4967 4968 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 4969 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 4970 zio_remove_child(pio, zio, remove_zl); 4971 /* 4972 * This is a rare code path, so we don't 4973 * bother with "next_to_execute". 4974 */ 4975 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, 4976 NULL); 4977 } 4978 } 4979 4980 if ((pio = zio_unique_parent(zio)) != NULL) { 4981 /* 4982 * We're not a root i/o, so there's nothing to do 4983 * but notify our parent. Don't propagate errors 4984 * upward since we haven't permanently failed yet. 4985 */ 4986 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 4987 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 4988 /* 4989 * This is a rare code path, so we don't bother with 4990 * "next_to_execute". 4991 */ 4992 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL); 4993 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 4994 /* 4995 * We'd fail again if we reexecuted now, so suspend 4996 * until conditions improve (e.g. device comes online). 4997 */ 4998 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 4999 } else { 5000 /* 5001 * Reexecution is potentially a huge amount of work. 5002 * Hand it off to the otherwise-unused claim taskq. 5003 */ 5004 ASSERT(taskq_empty_ent(&zio->io_tqent)); 5005 spa_taskq_dispatch_ent(zio->io_spa, 5006 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE, 5007 zio_reexecute, zio, 0, &zio->io_tqent, NULL); 5008 } 5009 return (NULL); 5010 } 5011 5012 ASSERT(list_is_empty(&zio->io_child_list)); 5013 ASSERT(zio->io_reexecute == 0); 5014 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 5015 5016 /* 5017 * Report any checksum errors, since the I/O is complete. 5018 */ 5019 while (zio->io_cksum_report != NULL) { 5020 zio_cksum_report_t *zcr = zio->io_cksum_report; 5021 zio->io_cksum_report = zcr->zcr_next; 5022 zcr->zcr_next = NULL; 5023 zcr->zcr_finish(zcr, NULL); 5024 zfs_ereport_free_checksum(zcr); 5025 } 5026 5027 /* 5028 * It is the responsibility of the done callback to ensure that this 5029 * particular zio is no longer discoverable for adoption, and as 5030 * such, cannot acquire any new parents. 5031 */ 5032 if (zio->io_done) 5033 zio->io_done(zio); 5034 5035 mutex_enter(&zio->io_lock); 5036 zio->io_state[ZIO_WAIT_DONE] = 1; 5037 mutex_exit(&zio->io_lock); 5038 5039 /* 5040 * We are done executing this zio. We may want to execute a parent 5041 * next. See the comment in zio_notify_parent(). 5042 */ 5043 zio_t *next_to_execute = NULL; 5044 zl = NULL; 5045 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 5046 zio_link_t *remove_zl = zl; 5047 pio_next = zio_walk_parents(zio, &zl); 5048 zio_remove_child(pio, zio, remove_zl); 5049 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute); 5050 } 5051 5052 if (zio->io_waiter != NULL) { 5053 mutex_enter(&zio->io_lock); 5054 zio->io_executor = NULL; 5055 cv_broadcast(&zio->io_cv); 5056 mutex_exit(&zio->io_lock); 5057 } else { 5058 zio_destroy(zio); 5059 } 5060 5061 return (next_to_execute); 5062 } 5063 5064 /* 5065 * ========================================================================== 5066 * I/O pipeline definition 5067 * ========================================================================== 5068 */ 5069 static zio_pipe_stage_t *zio_pipeline[] = { 5070 NULL, 5071 zio_read_bp_init, 5072 zio_write_bp_init, 5073 zio_free_bp_init, 5074 zio_issue_async, 5075 zio_write_compress, 5076 zio_encrypt, 5077 zio_checksum_generate, 5078 zio_nop_write, 5079 zio_brt_free, 5080 zio_ddt_read_start, 5081 zio_ddt_read_done, 5082 zio_ddt_write, 5083 zio_ddt_free, 5084 zio_gang_assemble, 5085 zio_gang_issue, 5086 zio_dva_throttle, 5087 zio_dva_allocate, 5088 zio_dva_free, 5089 zio_dva_claim, 5090 zio_ready, 5091 zio_vdev_io_start, 5092 zio_vdev_io_done, 5093 zio_vdev_io_assess, 5094 zio_checksum_verify, 5095 zio_done 5096 }; 5097 5098 5099 5100 5101 /* 5102 * Compare two zbookmark_phys_t's to see which we would reach first in a 5103 * pre-order traversal of the object tree. 5104 * 5105 * This is simple in every case aside from the meta-dnode object. For all other 5106 * objects, we traverse them in order (object 1 before object 2, and so on). 5107 * However, all of these objects are traversed while traversing object 0, since 5108 * the data it points to is the list of objects. Thus, we need to convert to a 5109 * canonical representation so we can compare meta-dnode bookmarks to 5110 * non-meta-dnode bookmarks. 5111 * 5112 * We do this by calculating "equivalents" for each field of the zbookmark. 5113 * zbookmarks outside of the meta-dnode use their own object and level, and 5114 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 5115 * blocks this bookmark refers to) by multiplying their blkid by their span 5116 * (the number of L0 blocks contained within one block at their level). 5117 * zbookmarks inside the meta-dnode calculate their object equivalent 5118 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 5119 * level + 1<<31 (any value larger than a level could ever be) for their level. 5120 * This causes them to always compare before a bookmark in their object 5121 * equivalent, compare appropriately to bookmarks in other objects, and to 5122 * compare appropriately to other bookmarks in the meta-dnode. 5123 */ 5124 int 5125 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 5126 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 5127 { 5128 /* 5129 * These variables represent the "equivalent" values for the zbookmark, 5130 * after converting zbookmarks inside the meta dnode to their 5131 * normal-object equivalents. 5132 */ 5133 uint64_t zb1obj, zb2obj; 5134 uint64_t zb1L0, zb2L0; 5135 uint64_t zb1level, zb2level; 5136 5137 if (zb1->zb_object == zb2->zb_object && 5138 zb1->zb_level == zb2->zb_level && 5139 zb1->zb_blkid == zb2->zb_blkid) 5140 return (0); 5141 5142 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT); 5143 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT); 5144 5145 /* 5146 * BP_SPANB calculates the span in blocks. 5147 */ 5148 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 5149 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 5150 5151 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 5152 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5153 zb1L0 = 0; 5154 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 5155 } else { 5156 zb1obj = zb1->zb_object; 5157 zb1level = zb1->zb_level; 5158 } 5159 5160 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 5161 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5162 zb2L0 = 0; 5163 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 5164 } else { 5165 zb2obj = zb2->zb_object; 5166 zb2level = zb2->zb_level; 5167 } 5168 5169 /* Now that we have a canonical representation, do the comparison. */ 5170 if (zb1obj != zb2obj) 5171 return (zb1obj < zb2obj ? -1 : 1); 5172 else if (zb1L0 != zb2L0) 5173 return (zb1L0 < zb2L0 ? -1 : 1); 5174 else if (zb1level != zb2level) 5175 return (zb1level > zb2level ? -1 : 1); 5176 /* 5177 * This can (theoretically) happen if the bookmarks have the same object 5178 * and level, but different blkids, if the block sizes are not the same. 5179 * There is presently no way to change the indirect block sizes 5180 */ 5181 return (0); 5182 } 5183 5184 /* 5185 * This function checks the following: given that last_block is the place that 5186 * our traversal stopped last time, does that guarantee that we've visited 5187 * every node under subtree_root? Therefore, we can't just use the raw output 5188 * of zbookmark_compare. We have to pass in a modified version of 5189 * subtree_root; by incrementing the block id, and then checking whether 5190 * last_block is before or equal to that, we can tell whether or not having 5191 * visited last_block implies that all of subtree_root's children have been 5192 * visited. 5193 */ 5194 boolean_t 5195 zbookmark_subtree_completed(const dnode_phys_t *dnp, 5196 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5197 { 5198 zbookmark_phys_t mod_zb = *subtree_root; 5199 mod_zb.zb_blkid++; 5200 ASSERT0(last_block->zb_level); 5201 5202 /* The objset_phys_t isn't before anything. */ 5203 if (dnp == NULL) 5204 return (B_FALSE); 5205 5206 /* 5207 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 5208 * data block size in sectors, because that variable is only used if 5209 * the bookmark refers to a block in the meta-dnode. Since we don't 5210 * know without examining it what object it refers to, and there's no 5211 * harm in passing in this value in other cases, we always pass it in. 5212 * 5213 * We pass in 0 for the indirect block size shift because zb2 must be 5214 * level 0. The indirect block size is only used to calculate the span 5215 * of the bookmark, but since the bookmark must be level 0, the span is 5216 * always 1, so the math works out. 5217 * 5218 * If you make changes to how the zbookmark_compare code works, be sure 5219 * to make sure that this code still works afterwards. 5220 */ 5221 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5222 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 5223 last_block) <= 0); 5224 } 5225 5226 /* 5227 * This function is similar to zbookmark_subtree_completed(), but returns true 5228 * if subtree_root is equal or ahead of last_block, i.e. still to be done. 5229 */ 5230 boolean_t 5231 zbookmark_subtree_tbd(const dnode_phys_t *dnp, 5232 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5233 { 5234 ASSERT0(last_block->zb_level); 5235 if (dnp == NULL) 5236 return (B_FALSE); 5237 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5238 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root, 5239 last_block) >= 0); 5240 } 5241 5242 EXPORT_SYMBOL(zio_type_name); 5243 EXPORT_SYMBOL(zio_buf_alloc); 5244 EXPORT_SYMBOL(zio_data_buf_alloc); 5245 EXPORT_SYMBOL(zio_buf_free); 5246 EXPORT_SYMBOL(zio_data_buf_free); 5247 5248 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW, 5249 "Max I/O completion time (milliseconds) before marking it as slow"); 5250 5251 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW, 5252 "Prioritize requeued I/O"); 5253 5254 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW, 5255 "Defer frees starting in this pass"); 5256 5257 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW, 5258 "Don't compress starting in this pass"); 5259 5260 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW, 5261 "Rewrite new bps starting in this pass"); 5262 5263 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW, 5264 "Throttle block allocations in the ZIO pipeline"); 5265 5266 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW, 5267 "Log all slow ZIOs, not just those with vdevs"); 5268