1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2022 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2017, Intel Corporation. 26 * Copyright (c) 2019, 2023, 2024, Klara Inc. 27 * Copyright (c) 2019, Allan Jude 28 * Copyright (c) 2021, Datto, Inc. 29 */ 30 31 #include <sys/sysmacros.h> 32 #include <sys/zfs_context.h> 33 #include <sys/fm/fs/zfs.h> 34 #include <sys/spa.h> 35 #include <sys/txg.h> 36 #include <sys/spa_impl.h> 37 #include <sys/vdev_impl.h> 38 #include <sys/vdev_trim.h> 39 #include <sys/zio_impl.h> 40 #include <sys/zio_compress.h> 41 #include <sys/zio_checksum.h> 42 #include <sys/dmu_objset.h> 43 #include <sys/arc.h> 44 #include <sys/brt.h> 45 #include <sys/ddt.h> 46 #include <sys/blkptr.h> 47 #include <sys/zfeature.h> 48 #include <sys/dsl_scan.h> 49 #include <sys/metaslab_impl.h> 50 #include <sys/time.h> 51 #include <sys/trace_zfs.h> 52 #include <sys/abd.h> 53 #include <sys/dsl_crypt.h> 54 #include <cityhash.h> 55 56 /* 57 * ========================================================================== 58 * I/O type descriptions 59 * ========================================================================== 60 */ 61 const char *const zio_type_name[ZIO_TYPES] = { 62 /* 63 * Note: Linux kernel thread name length is limited 64 * so these names will differ from upstream open zfs. 65 */ 66 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim" 67 }; 68 69 int zio_dva_throttle_enabled = B_TRUE; 70 static int zio_deadman_log_all = B_FALSE; 71 72 /* 73 * ========================================================================== 74 * I/O kmem caches 75 * ========================================================================== 76 */ 77 static kmem_cache_t *zio_cache; 78 static kmem_cache_t *zio_link_cache; 79 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 80 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 81 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 82 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 83 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 84 #endif 85 86 /* Mark IOs as "slow" if they take longer than 30 seconds */ 87 static uint_t zio_slow_io_ms = (30 * MILLISEC); 88 89 #define BP_SPANB(indblkshift, level) \ 90 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 91 #define COMPARE_META_LEVEL 0x80000000ul 92 /* 93 * The following actions directly effect the spa's sync-to-convergence logic. 94 * The values below define the sync pass when we start performing the action. 95 * Care should be taken when changing these values as they directly impact 96 * spa_sync() performance. Tuning these values may introduce subtle performance 97 * pathologies and should only be done in the context of performance analysis. 98 * These tunables will eventually be removed and replaced with #defines once 99 * enough analysis has been done to determine optimal values. 100 * 101 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 102 * regular blocks are not deferred. 103 * 104 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable 105 * compression (including of metadata). In practice, we don't have this 106 * many sync passes, so this has no effect. 107 * 108 * The original intent was that disabling compression would help the sync 109 * passes to converge. However, in practice disabling compression increases 110 * the average number of sync passes, because when we turn compression off, a 111 * lot of block's size will change and thus we have to re-allocate (not 112 * overwrite) them. It also increases the number of 128KB allocations (e.g. 113 * for indirect blocks and spacemaps) because these will not be compressed. 114 * The 128K allocations are especially detrimental to performance on highly 115 * fragmented systems, which may have very few free segments of this size, 116 * and may need to load new metaslabs to satisfy 128K allocations. 117 */ 118 119 /* defer frees starting in this pass */ 120 uint_t zfs_sync_pass_deferred_free = 2; 121 122 /* don't compress starting in this pass */ 123 static uint_t zfs_sync_pass_dont_compress = 8; 124 125 /* rewrite new bps starting in this pass */ 126 static uint_t zfs_sync_pass_rewrite = 2; 127 128 /* 129 * An allocating zio is one that either currently has the DVA allocate 130 * stage set or will have it later in its lifetime. 131 */ 132 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 133 134 /* 135 * Enable smaller cores by excluding metadata 136 * allocations as well. 137 */ 138 int zio_exclude_metadata = 0; 139 static int zio_requeue_io_start_cut_in_line = 1; 140 141 #ifdef ZFS_DEBUG 142 static const int zio_buf_debug_limit = 16384; 143 #else 144 static const int zio_buf_debug_limit = 0; 145 #endif 146 147 static inline void __zio_execute(zio_t *zio); 148 149 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 150 151 void 152 zio_init(void) 153 { 154 size_t c; 155 156 zio_cache = kmem_cache_create("zio_cache", 157 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 158 zio_link_cache = kmem_cache_create("zio_link_cache", 159 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 160 161 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 162 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 163 size_t align, cflags, data_cflags; 164 char name[32]; 165 166 /* 167 * Create cache for each half-power of 2 size, starting from 168 * SPA_MINBLOCKSIZE. It should give us memory space efficiency 169 * of ~7/8, sufficient for transient allocations mostly using 170 * these caches. 171 */ 172 size_t p2 = size; 173 while (!ISP2(p2)) 174 p2 &= p2 - 1; 175 if (!IS_P2ALIGNED(size, p2 / 2)) 176 continue; 177 178 #ifndef _KERNEL 179 /* 180 * If we are using watchpoints, put each buffer on its own page, 181 * to eliminate the performance overhead of trapping to the 182 * kernel when modifying a non-watched buffer that shares the 183 * page with a watched buffer. 184 */ 185 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 186 continue; 187 #endif 188 189 if (IS_P2ALIGNED(size, PAGESIZE)) 190 align = PAGESIZE; 191 else 192 align = 1 << (highbit64(size ^ (size - 1)) - 1); 193 194 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ? 195 KMC_NODEBUG : 0; 196 data_cflags = KMC_NODEBUG; 197 if (abd_size_alloc_linear(size)) { 198 cflags |= KMC_RECLAIMABLE; 199 data_cflags |= KMC_RECLAIMABLE; 200 } 201 if (cflags == data_cflags) { 202 /* 203 * Resulting kmem caches would be identical. 204 * Save memory by creating only one. 205 */ 206 (void) snprintf(name, sizeof (name), 207 "zio_buf_comb_%lu", (ulong_t)size); 208 zio_buf_cache[c] = kmem_cache_create(name, size, align, 209 NULL, NULL, NULL, NULL, NULL, cflags); 210 zio_data_buf_cache[c] = zio_buf_cache[c]; 211 continue; 212 } 213 (void) snprintf(name, sizeof (name), "zio_buf_%lu", 214 (ulong_t)size); 215 zio_buf_cache[c] = kmem_cache_create(name, size, align, 216 NULL, NULL, NULL, NULL, NULL, cflags); 217 218 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu", 219 (ulong_t)size); 220 zio_data_buf_cache[c] = kmem_cache_create(name, size, align, 221 NULL, NULL, NULL, NULL, NULL, data_cflags); 222 } 223 224 while (--c != 0) { 225 ASSERT(zio_buf_cache[c] != NULL); 226 if (zio_buf_cache[c - 1] == NULL) 227 zio_buf_cache[c - 1] = zio_buf_cache[c]; 228 229 ASSERT(zio_data_buf_cache[c] != NULL); 230 if (zio_data_buf_cache[c - 1] == NULL) 231 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 232 } 233 234 zio_inject_init(); 235 236 lz4_init(); 237 } 238 239 void 240 zio_fini(void) 241 { 242 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; 243 244 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 245 for (size_t i = 0; i < n; i++) { 246 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i]) 247 (void) printf("zio_fini: [%d] %llu != %llu\n", 248 (int)((i + 1) << SPA_MINBLOCKSHIFT), 249 (long long unsigned)zio_buf_cache_allocs[i], 250 (long long unsigned)zio_buf_cache_frees[i]); 251 } 252 #endif 253 254 /* 255 * The same kmem cache can show up multiple times in both zio_buf_cache 256 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to 257 * sort it out. 258 */ 259 for (size_t i = 0; i < n; i++) { 260 kmem_cache_t *cache = zio_buf_cache[i]; 261 if (cache == NULL) 262 continue; 263 for (size_t j = i; j < n; j++) { 264 if (cache == zio_buf_cache[j]) 265 zio_buf_cache[j] = NULL; 266 if (cache == zio_data_buf_cache[j]) 267 zio_data_buf_cache[j] = NULL; 268 } 269 kmem_cache_destroy(cache); 270 } 271 272 for (size_t i = 0; i < n; i++) { 273 kmem_cache_t *cache = zio_data_buf_cache[i]; 274 if (cache == NULL) 275 continue; 276 for (size_t j = i; j < n; j++) { 277 if (cache == zio_data_buf_cache[j]) 278 zio_data_buf_cache[j] = NULL; 279 } 280 kmem_cache_destroy(cache); 281 } 282 283 for (size_t i = 0; i < n; i++) { 284 VERIFY3P(zio_buf_cache[i], ==, NULL); 285 VERIFY3P(zio_data_buf_cache[i], ==, NULL); 286 } 287 288 kmem_cache_destroy(zio_link_cache); 289 kmem_cache_destroy(zio_cache); 290 291 zio_inject_fini(); 292 293 lz4_fini(); 294 } 295 296 /* 297 * ========================================================================== 298 * Allocate and free I/O buffers 299 * ========================================================================== 300 */ 301 302 #ifdef ZFS_DEBUG 303 static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b; 304 #endif 305 306 /* 307 * Use empty space after the buffer to detect overflows. 308 * 309 * Since zio_init() creates kmem caches only for certain set of buffer sizes, 310 * allocations of different sizes may have some unused space after the data. 311 * Filling part of that space with a known pattern on allocation and checking 312 * it on free should allow us to detect some buffer overflows. 313 */ 314 static void 315 zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c) 316 { 317 #ifdef ZFS_DEBUG 318 size_t off = P2ROUNDUP(size, sizeof (ulong_t)); 319 ulong_t *canary = p + off / sizeof (ulong_t); 320 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT; 321 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT && 322 cache[c] == cache[c + 1]) 323 asize = (c + 2) << SPA_MINBLOCKSHIFT; 324 for (; off < asize; canary++, off += sizeof (ulong_t)) 325 *canary = zio_buf_canary; 326 #endif 327 } 328 329 static void 330 zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c) 331 { 332 #ifdef ZFS_DEBUG 333 size_t off = P2ROUNDUP(size, sizeof (ulong_t)); 334 ulong_t *canary = p + off / sizeof (ulong_t); 335 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT; 336 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT && 337 cache[c] == cache[c + 1]) 338 asize = (c + 2) << SPA_MINBLOCKSHIFT; 339 for (; off < asize; canary++, off += sizeof (ulong_t)) { 340 if (unlikely(*canary != zio_buf_canary)) { 341 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx", 342 p, size, (canary - p) * sizeof (ulong_t), 343 *canary, zio_buf_canary); 344 } 345 } 346 #endif 347 } 348 349 /* 350 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 351 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 352 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 353 * excess / transient data in-core during a crashdump. 354 */ 355 void * 356 zio_buf_alloc(size_t size) 357 { 358 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 359 360 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 361 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 362 atomic_add_64(&zio_buf_cache_allocs[c], 1); 363 #endif 364 365 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE); 366 zio_buf_put_canary(p, size, zio_buf_cache, c); 367 return (p); 368 } 369 370 /* 371 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 372 * crashdump if the kernel panics. This exists so that we will limit the amount 373 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 374 * of kernel heap dumped to disk when the kernel panics) 375 */ 376 void * 377 zio_data_buf_alloc(size_t size) 378 { 379 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 380 381 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 382 383 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE); 384 zio_buf_put_canary(p, size, zio_data_buf_cache, c); 385 return (p); 386 } 387 388 void 389 zio_buf_free(void *buf, size_t size) 390 { 391 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 392 393 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 394 #if defined(ZFS_DEBUG) && !defined(_KERNEL) 395 atomic_add_64(&zio_buf_cache_frees[c], 1); 396 #endif 397 398 zio_buf_check_canary(buf, size, zio_buf_cache, c); 399 kmem_cache_free(zio_buf_cache[c], buf); 400 } 401 402 void 403 zio_data_buf_free(void *buf, size_t size) 404 { 405 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 406 407 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 408 409 zio_buf_check_canary(buf, size, zio_data_buf_cache, c); 410 kmem_cache_free(zio_data_buf_cache[c], buf); 411 } 412 413 static void 414 zio_abd_free(void *abd, size_t size) 415 { 416 (void) size; 417 abd_free((abd_t *)abd); 418 } 419 420 /* 421 * ========================================================================== 422 * Push and pop I/O transform buffers 423 * ========================================================================== 424 */ 425 void 426 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 427 zio_transform_func_t *transform) 428 { 429 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 430 431 zt->zt_orig_abd = zio->io_abd; 432 zt->zt_orig_size = zio->io_size; 433 zt->zt_bufsize = bufsize; 434 zt->zt_transform = transform; 435 436 zt->zt_next = zio->io_transform_stack; 437 zio->io_transform_stack = zt; 438 439 zio->io_abd = data; 440 zio->io_size = size; 441 } 442 443 void 444 zio_pop_transforms(zio_t *zio) 445 { 446 zio_transform_t *zt; 447 448 while ((zt = zio->io_transform_stack) != NULL) { 449 if (zt->zt_transform != NULL) 450 zt->zt_transform(zio, 451 zt->zt_orig_abd, zt->zt_orig_size); 452 453 if (zt->zt_bufsize != 0) 454 abd_free(zio->io_abd); 455 456 zio->io_abd = zt->zt_orig_abd; 457 zio->io_size = zt->zt_orig_size; 458 zio->io_transform_stack = zt->zt_next; 459 460 kmem_free(zt, sizeof (zio_transform_t)); 461 } 462 } 463 464 /* 465 * ========================================================================== 466 * I/O transform callbacks for subblocks, decompression, and decryption 467 * ========================================================================== 468 */ 469 static void 470 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 471 { 472 ASSERT(zio->io_size > size); 473 474 if (zio->io_type == ZIO_TYPE_READ) 475 abd_copy(data, zio->io_abd, size); 476 } 477 478 static void 479 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 480 { 481 if (zio->io_error == 0) { 482 void *tmp = abd_borrow_buf(data, size); 483 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 484 zio->io_abd, tmp, zio->io_size, size, 485 &zio->io_prop.zp_complevel); 486 abd_return_buf_copy(data, tmp, size); 487 488 if (zio_injection_enabled && ret == 0) 489 ret = zio_handle_fault_injection(zio, EINVAL); 490 491 if (ret != 0) 492 zio->io_error = SET_ERROR(EIO); 493 } 494 } 495 496 static void 497 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) 498 { 499 int ret; 500 void *tmp; 501 blkptr_t *bp = zio->io_bp; 502 spa_t *spa = zio->io_spa; 503 uint64_t dsobj = zio->io_bookmark.zb_objset; 504 uint64_t lsize = BP_GET_LSIZE(bp); 505 dmu_object_type_t ot = BP_GET_TYPE(bp); 506 uint8_t salt[ZIO_DATA_SALT_LEN]; 507 uint8_t iv[ZIO_DATA_IV_LEN]; 508 uint8_t mac[ZIO_DATA_MAC_LEN]; 509 boolean_t no_crypt = B_FALSE; 510 511 ASSERT(BP_USES_CRYPT(bp)); 512 ASSERT3U(size, !=, 0); 513 514 if (zio->io_error != 0) 515 return; 516 517 /* 518 * Verify the cksum of MACs stored in an indirect bp. It will always 519 * be possible to verify this since it does not require an encryption 520 * key. 521 */ 522 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { 523 zio_crypt_decode_mac_bp(bp, mac); 524 525 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 526 /* 527 * We haven't decompressed the data yet, but 528 * zio_crypt_do_indirect_mac_checksum() requires 529 * decompressed data to be able to parse out the MACs 530 * from the indirect block. We decompress it now and 531 * throw away the result after we are finished. 532 */ 533 tmp = zio_buf_alloc(lsize); 534 ret = zio_decompress_data(BP_GET_COMPRESS(bp), 535 zio->io_abd, tmp, zio->io_size, lsize, 536 &zio->io_prop.zp_complevel); 537 if (ret != 0) { 538 ret = SET_ERROR(EIO); 539 goto error; 540 } 541 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE, 542 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac); 543 zio_buf_free(tmp, lsize); 544 } else { 545 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 546 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); 547 } 548 abd_copy(data, zio->io_abd, size); 549 550 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { 551 ret = zio_handle_decrypt_injection(spa, 552 &zio->io_bookmark, ot, ECKSUM); 553 } 554 if (ret != 0) 555 goto error; 556 557 return; 558 } 559 560 /* 561 * If this is an authenticated block, just check the MAC. It would be 562 * nice to separate this out into its own flag, but when this was done, 563 * we had run out of bits in what is now zio_flag_t. Future cleanup 564 * could make this a flag bit. 565 */ 566 if (BP_IS_AUTHENTICATED(bp)) { 567 if (ot == DMU_OT_OBJSET) { 568 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, 569 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); 570 } else { 571 zio_crypt_decode_mac_bp(bp, mac); 572 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, 573 zio->io_abd, size, mac); 574 if (zio_injection_enabled && ret == 0) { 575 ret = zio_handle_decrypt_injection(spa, 576 &zio->io_bookmark, ot, ECKSUM); 577 } 578 } 579 abd_copy(data, zio->io_abd, size); 580 581 if (ret != 0) 582 goto error; 583 584 return; 585 } 586 587 zio_crypt_decode_params_bp(bp, salt, iv); 588 589 if (ot == DMU_OT_INTENT_LOG) { 590 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); 591 zio_crypt_decode_mac_zil(tmp, mac); 592 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); 593 } else { 594 zio_crypt_decode_mac_bp(bp, mac); 595 } 596 597 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), 598 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, 599 zio->io_abd, &no_crypt); 600 if (no_crypt) 601 abd_copy(data, zio->io_abd, size); 602 603 if (ret != 0) 604 goto error; 605 606 return; 607 608 error: 609 /* assert that the key was found unless this was speculative */ 610 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); 611 612 /* 613 * If there was a decryption / authentication error return EIO as 614 * the io_error. If this was not a speculative zio, create an ereport. 615 */ 616 if (ret == ECKSUM) { 617 zio->io_error = SET_ERROR(EIO); 618 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 619 spa_log_error(spa, &zio->io_bookmark, 620 BP_GET_LOGICAL_BIRTH(zio->io_bp)); 621 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 622 spa, NULL, &zio->io_bookmark, zio, 0); 623 } 624 } else { 625 zio->io_error = ret; 626 } 627 } 628 629 /* 630 * ========================================================================== 631 * I/O parent/child relationships and pipeline interlocks 632 * ========================================================================== 633 */ 634 zio_t * 635 zio_walk_parents(zio_t *cio, zio_link_t **zl) 636 { 637 list_t *pl = &cio->io_parent_list; 638 639 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 640 if (*zl == NULL) 641 return (NULL); 642 643 ASSERT((*zl)->zl_child == cio); 644 return ((*zl)->zl_parent); 645 } 646 647 zio_t * 648 zio_walk_children(zio_t *pio, zio_link_t **zl) 649 { 650 list_t *cl = &pio->io_child_list; 651 652 ASSERT(MUTEX_HELD(&pio->io_lock)); 653 654 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 655 if (*zl == NULL) 656 return (NULL); 657 658 ASSERT((*zl)->zl_parent == pio); 659 return ((*zl)->zl_child); 660 } 661 662 zio_t * 663 zio_unique_parent(zio_t *cio) 664 { 665 zio_link_t *zl = NULL; 666 zio_t *pio = zio_walk_parents(cio, &zl); 667 668 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 669 return (pio); 670 } 671 672 void 673 zio_add_child(zio_t *pio, zio_t *cio) 674 { 675 /* 676 * Logical I/Os can have logical, gang, or vdev children. 677 * Gang I/Os can have gang or vdev children. 678 * Vdev I/Os can only have vdev children. 679 * The following ASSERT captures all of these constraints. 680 */ 681 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 682 683 /* Parent should not have READY stage if child doesn't have it. */ 684 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 && 685 (cio->io_child_type != ZIO_CHILD_VDEV), 686 (pio->io_pipeline & ZIO_STAGE_READY) == 0); 687 688 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 689 zl->zl_parent = pio; 690 zl->zl_child = cio; 691 692 mutex_enter(&pio->io_lock); 693 mutex_enter(&cio->io_lock); 694 695 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 696 697 uint64_t *countp = pio->io_children[cio->io_child_type]; 698 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 699 countp[w] += !cio->io_state[w]; 700 701 list_insert_head(&pio->io_child_list, zl); 702 list_insert_head(&cio->io_parent_list, zl); 703 704 mutex_exit(&cio->io_lock); 705 mutex_exit(&pio->io_lock); 706 } 707 708 void 709 zio_add_child_first(zio_t *pio, zio_t *cio) 710 { 711 /* 712 * Logical I/Os can have logical, gang, or vdev children. 713 * Gang I/Os can have gang or vdev children. 714 * Vdev I/Os can only have vdev children. 715 * The following ASSERT captures all of these constraints. 716 */ 717 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 718 719 /* Parent should not have READY stage if child doesn't have it. */ 720 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 && 721 (cio->io_child_type != ZIO_CHILD_VDEV), 722 (pio->io_pipeline & ZIO_STAGE_READY) == 0); 723 724 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 725 zl->zl_parent = pio; 726 zl->zl_child = cio; 727 728 ASSERT(list_is_empty(&cio->io_parent_list)); 729 list_insert_head(&cio->io_parent_list, zl); 730 731 mutex_enter(&pio->io_lock); 732 733 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 734 735 uint64_t *countp = pio->io_children[cio->io_child_type]; 736 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 737 countp[w] += !cio->io_state[w]; 738 739 list_insert_head(&pio->io_child_list, zl); 740 741 mutex_exit(&pio->io_lock); 742 } 743 744 static void 745 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 746 { 747 ASSERT(zl->zl_parent == pio); 748 ASSERT(zl->zl_child == cio); 749 750 mutex_enter(&pio->io_lock); 751 mutex_enter(&cio->io_lock); 752 753 list_remove(&pio->io_child_list, zl); 754 list_remove(&cio->io_parent_list, zl); 755 756 mutex_exit(&cio->io_lock); 757 mutex_exit(&pio->io_lock); 758 kmem_cache_free(zio_link_cache, zl); 759 } 760 761 static boolean_t 762 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 763 { 764 boolean_t waiting = B_FALSE; 765 766 mutex_enter(&zio->io_lock); 767 ASSERT(zio->io_stall == NULL); 768 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 769 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 770 continue; 771 772 uint64_t *countp = &zio->io_children[c][wait]; 773 if (*countp != 0) { 774 zio->io_stage >>= 1; 775 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 776 zio->io_stall = countp; 777 waiting = B_TRUE; 778 break; 779 } 780 } 781 mutex_exit(&zio->io_lock); 782 return (waiting); 783 } 784 785 __attribute__((always_inline)) 786 static inline void 787 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait, 788 zio_t **next_to_executep) 789 { 790 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 791 int *errorp = &pio->io_child_error[zio->io_child_type]; 792 793 mutex_enter(&pio->io_lock); 794 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 795 *errorp = zio_worst_error(*errorp, zio->io_error); 796 pio->io_reexecute |= zio->io_reexecute; 797 ASSERT3U(*countp, >, 0); 798 799 (*countp)--; 800 801 if (*countp == 0 && pio->io_stall == countp) { 802 zio_taskq_type_t type = 803 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 804 ZIO_TASKQ_INTERRUPT; 805 pio->io_stall = NULL; 806 mutex_exit(&pio->io_lock); 807 808 /* 809 * If we can tell the caller to execute this parent next, do 810 * so. We do this if the parent's zio type matches the child's 811 * type, or if it's a zio_null() with no done callback, and so 812 * has no actual work to do. Otherwise dispatch the parent zio 813 * in its own taskq. 814 * 815 * Having the caller execute the parent when possible reduces 816 * locking on the zio taskq's, reduces context switch 817 * overhead, and has no recursion penalty. Note that one 818 * read from disk typically causes at least 3 zio's: a 819 * zio_null(), the logical zio_read(), and then a physical 820 * zio. When the physical ZIO completes, we are able to call 821 * zio_done() on all 3 of these zio's from one invocation of 822 * zio_execute() by returning the parent back to 823 * zio_execute(). Since the parent isn't executed until this 824 * thread returns back to zio_execute(), the caller should do 825 * so promptly. 826 * 827 * In other cases, dispatching the parent prevents 828 * overflowing the stack when we have deeply nested 829 * parent-child relationships, as we do with the "mega zio" 830 * of writes for spa_sync(), and the chain of ZIL blocks. 831 */ 832 if (next_to_executep != NULL && *next_to_executep == NULL && 833 (pio->io_type == zio->io_type || 834 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) { 835 *next_to_executep = pio; 836 } else { 837 zio_taskq_dispatch(pio, type, B_FALSE); 838 } 839 } else { 840 mutex_exit(&pio->io_lock); 841 } 842 } 843 844 static void 845 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 846 { 847 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 848 zio->io_error = zio->io_child_error[c]; 849 } 850 851 int 852 zio_bookmark_compare(const void *x1, const void *x2) 853 { 854 const zio_t *z1 = x1; 855 const zio_t *z2 = x2; 856 857 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 858 return (-1); 859 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 860 return (1); 861 862 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 863 return (-1); 864 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 865 return (1); 866 867 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 868 return (-1); 869 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 870 return (1); 871 872 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 873 return (-1); 874 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 875 return (1); 876 877 if (z1 < z2) 878 return (-1); 879 if (z1 > z2) 880 return (1); 881 882 return (0); 883 } 884 885 /* 886 * ========================================================================== 887 * Create the various types of I/O (read, write, free, etc) 888 * ========================================================================== 889 */ 890 static zio_t * 891 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 892 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 893 void *private, zio_type_t type, zio_priority_t priority, 894 zio_flag_t flags, vdev_t *vd, uint64_t offset, 895 const zbookmark_phys_t *zb, enum zio_stage stage, 896 enum zio_stage pipeline) 897 { 898 zio_t *zio; 899 900 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE); 901 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 902 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 903 904 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 905 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 906 ASSERT(vd || stage == ZIO_STAGE_OPEN); 907 908 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); 909 910 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 911 memset(zio, 0, sizeof (zio_t)); 912 913 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL); 914 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 915 916 list_create(&zio->io_parent_list, sizeof (zio_link_t), 917 offsetof(zio_link_t, zl_parent_node)); 918 list_create(&zio->io_child_list, sizeof (zio_link_t), 919 offsetof(zio_link_t, zl_child_node)); 920 metaslab_trace_init(&zio->io_alloc_list); 921 922 if (vd != NULL) 923 zio->io_child_type = ZIO_CHILD_VDEV; 924 else if (flags & ZIO_FLAG_GANG_CHILD) 925 zio->io_child_type = ZIO_CHILD_GANG; 926 else if (flags & ZIO_FLAG_DDT_CHILD) 927 zio->io_child_type = ZIO_CHILD_DDT; 928 else 929 zio->io_child_type = ZIO_CHILD_LOGICAL; 930 931 if (bp != NULL) { 932 if (type != ZIO_TYPE_WRITE || 933 zio->io_child_type == ZIO_CHILD_DDT) { 934 zio->io_bp_copy = *bp; 935 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 936 } else { 937 zio->io_bp = (blkptr_t *)bp; 938 } 939 zio->io_bp_orig = *bp; 940 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 941 zio->io_logical = zio; 942 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 943 pipeline |= ZIO_GANG_STAGES; 944 } 945 946 zio->io_spa = spa; 947 zio->io_txg = txg; 948 zio->io_done = done; 949 zio->io_private = private; 950 zio->io_type = type; 951 zio->io_priority = priority; 952 zio->io_vd = vd; 953 zio->io_offset = offset; 954 zio->io_orig_abd = zio->io_abd = data; 955 zio->io_orig_size = zio->io_size = psize; 956 zio->io_lsize = lsize; 957 zio->io_orig_flags = zio->io_flags = flags; 958 zio->io_orig_stage = zio->io_stage = stage; 959 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 960 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 961 zio->io_allocator = ZIO_ALLOCATOR_NONE; 962 963 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) || 964 (pipeline & ZIO_STAGE_READY) == 0; 965 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 966 967 if (zb != NULL) 968 zio->io_bookmark = *zb; 969 970 if (pio != NULL) { 971 zio->io_metaslab_class = pio->io_metaslab_class; 972 if (zio->io_logical == NULL) 973 zio->io_logical = pio->io_logical; 974 if (zio->io_child_type == ZIO_CHILD_GANG) 975 zio->io_gang_leader = pio->io_gang_leader; 976 zio_add_child_first(pio, zio); 977 } 978 979 taskq_init_ent(&zio->io_tqent); 980 981 return (zio); 982 } 983 984 void 985 zio_destroy(zio_t *zio) 986 { 987 metaslab_trace_fini(&zio->io_alloc_list); 988 list_destroy(&zio->io_parent_list); 989 list_destroy(&zio->io_child_list); 990 mutex_destroy(&zio->io_lock); 991 cv_destroy(&zio->io_cv); 992 kmem_cache_free(zio_cache, zio); 993 } 994 995 /* 996 * ZIO intended to be between others. Provides synchronization at READY 997 * and DONE pipeline stages and calls the respective callbacks. 998 */ 999 zio_t * 1000 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 1001 void *private, zio_flag_t flags) 1002 { 1003 zio_t *zio; 1004 1005 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 1006 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1007 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 1008 1009 return (zio); 1010 } 1011 1012 /* 1013 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a 1014 * READY pipeline stage (is ready on creation), so it should not be used 1015 * as child of any ZIO that may need waiting for grandchildren READY stage 1016 * (any other ZIO type). 1017 */ 1018 zio_t * 1019 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags) 1020 { 1021 zio_t *zio; 1022 1023 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private, 1024 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL, 1025 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE); 1026 1027 return (zio); 1028 } 1029 1030 static int 1031 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp, 1032 enum blk_verify_flag blk_verify, const char *fmt, ...) 1033 { 1034 va_list adx; 1035 char buf[256]; 1036 1037 va_start(adx, fmt); 1038 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 1039 va_end(adx); 1040 1041 zfs_dbgmsg("bad blkptr at %px: " 1042 "DVA[0]=%#llx/%#llx " 1043 "DVA[1]=%#llx/%#llx " 1044 "DVA[2]=%#llx/%#llx " 1045 "prop=%#llx " 1046 "pad=%#llx,%#llx " 1047 "phys_birth=%#llx " 1048 "birth=%#llx " 1049 "fill=%#llx " 1050 "cksum=%#llx/%#llx/%#llx/%#llx", 1051 bp, 1052 (long long)bp->blk_dva[0].dva_word[0], 1053 (long long)bp->blk_dva[0].dva_word[1], 1054 (long long)bp->blk_dva[1].dva_word[0], 1055 (long long)bp->blk_dva[1].dva_word[1], 1056 (long long)bp->blk_dva[2].dva_word[0], 1057 (long long)bp->blk_dva[2].dva_word[1], 1058 (long long)bp->blk_prop, 1059 (long long)bp->blk_pad[0], 1060 (long long)bp->blk_pad[1], 1061 (long long)BP_GET_PHYSICAL_BIRTH(bp), 1062 (long long)BP_GET_LOGICAL_BIRTH(bp), 1063 (long long)bp->blk_fill, 1064 (long long)bp->blk_cksum.zc_word[0], 1065 (long long)bp->blk_cksum.zc_word[1], 1066 (long long)bp->blk_cksum.zc_word[2], 1067 (long long)bp->blk_cksum.zc_word[3]); 1068 switch (blk_verify) { 1069 case BLK_VERIFY_HALT: 1070 zfs_panic_recover("%s: %s", spa_name(spa), buf); 1071 break; 1072 case BLK_VERIFY_LOG: 1073 zfs_dbgmsg("%s: %s", spa_name(spa), buf); 1074 break; 1075 case BLK_VERIFY_ONLY: 1076 break; 1077 } 1078 1079 return (1); 1080 } 1081 1082 /* 1083 * Verify the block pointer fields contain reasonable values. This means 1084 * it only contains known object types, checksum/compression identifiers, 1085 * block sizes within the maximum allowed limits, valid DVAs, etc. 1086 * 1087 * If everything checks out B_TRUE is returned. The zfs_blkptr_verify 1088 * argument controls the behavior when an invalid field is detected. 1089 * 1090 * Values for blk_verify_flag: 1091 * BLK_VERIFY_ONLY: evaluate the block 1092 * BLK_VERIFY_LOG: evaluate the block and log problems 1093 * BLK_VERIFY_HALT: call zfs_panic_recover on error 1094 * 1095 * Values for blk_config_flag: 1096 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer 1097 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be 1098 * obtained for reader 1099 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better 1100 * performance 1101 */ 1102 boolean_t 1103 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, 1104 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify) 1105 { 1106 int errors = 0; 1107 1108 if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) { 1109 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1110 "blkptr at %px has invalid TYPE %llu", 1111 bp, (longlong_t)BP_GET_TYPE(bp)); 1112 } 1113 if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) { 1114 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1115 "blkptr at %px has invalid COMPRESS %llu", 1116 bp, (longlong_t)BP_GET_COMPRESS(bp)); 1117 } 1118 if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) { 1119 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1120 "blkptr at %px has invalid LSIZE %llu", 1121 bp, (longlong_t)BP_GET_LSIZE(bp)); 1122 } 1123 if (BP_IS_EMBEDDED(bp)) { 1124 if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) { 1125 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1126 "blkptr at %px has invalid ETYPE %llu", 1127 bp, (longlong_t)BPE_GET_ETYPE(bp)); 1128 } 1129 if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) { 1130 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1131 "blkptr at %px has invalid PSIZE %llu", 1132 bp, (longlong_t)BPE_GET_PSIZE(bp)); 1133 } 1134 return (errors == 0); 1135 } 1136 if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) { 1137 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1138 "blkptr at %px has invalid CHECKSUM %llu", 1139 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 1140 } 1141 if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) { 1142 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1143 "blkptr at %px has invalid PSIZE %llu", 1144 bp, (longlong_t)BP_GET_PSIZE(bp)); 1145 } 1146 1147 /* 1148 * Do not verify individual DVAs if the config is not trusted. This 1149 * will be done once the zio is executed in vdev_mirror_map_alloc. 1150 */ 1151 if (unlikely(!spa->spa_trust_config)) 1152 return (errors == 0); 1153 1154 switch (blk_config) { 1155 case BLK_CONFIG_HELD: 1156 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER)); 1157 break; 1158 case BLK_CONFIG_NEEDED: 1159 spa_config_enter(spa, SCL_VDEV, bp, RW_READER); 1160 break; 1161 case BLK_CONFIG_SKIP: 1162 return (errors == 0); 1163 default: 1164 panic("invalid blk_config %u", blk_config); 1165 } 1166 1167 /* 1168 * Pool-specific checks. 1169 * 1170 * Note: it would be nice to verify that the logical birth 1171 * and physical birth are not too large. However, 1172 * spa_freeze() allows the birth time of log blocks (and 1173 * dmu_sync()-ed blocks that are in the log) to be arbitrarily 1174 * large. 1175 */ 1176 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 1177 const dva_t *dva = &bp->blk_dva[i]; 1178 uint64_t vdevid = DVA_GET_VDEV(dva); 1179 1180 if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) { 1181 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1182 "blkptr at %px DVA %u has invalid VDEV %llu", 1183 bp, i, (longlong_t)vdevid); 1184 continue; 1185 } 1186 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1187 if (unlikely(vd == NULL)) { 1188 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1189 "blkptr at %px DVA %u has invalid VDEV %llu", 1190 bp, i, (longlong_t)vdevid); 1191 continue; 1192 } 1193 if (unlikely(vd->vdev_ops == &vdev_hole_ops)) { 1194 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1195 "blkptr at %px DVA %u has hole VDEV %llu", 1196 bp, i, (longlong_t)vdevid); 1197 continue; 1198 } 1199 if (vd->vdev_ops == &vdev_missing_ops) { 1200 /* 1201 * "missing" vdevs are valid during import, but we 1202 * don't have their detailed info (e.g. asize), so 1203 * we can't perform any more checks on them. 1204 */ 1205 continue; 1206 } 1207 uint64_t offset = DVA_GET_OFFSET(dva); 1208 uint64_t asize = DVA_GET_ASIZE(dva); 1209 if (DVA_GET_GANG(dva)) 1210 asize = vdev_gang_header_asize(vd); 1211 if (unlikely(offset + asize > vd->vdev_asize)) { 1212 errors += zfs_blkptr_verify_log(spa, bp, blk_verify, 1213 "blkptr at %px DVA %u has invalid OFFSET %llu", 1214 bp, i, (longlong_t)offset); 1215 } 1216 } 1217 if (blk_config == BLK_CONFIG_NEEDED) 1218 spa_config_exit(spa, SCL_VDEV, bp); 1219 1220 return (errors == 0); 1221 } 1222 1223 boolean_t 1224 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 1225 { 1226 (void) bp; 1227 uint64_t vdevid = DVA_GET_VDEV(dva); 1228 1229 if (vdevid >= spa->spa_root_vdev->vdev_children) 1230 return (B_FALSE); 1231 1232 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 1233 if (vd == NULL) 1234 return (B_FALSE); 1235 1236 if (vd->vdev_ops == &vdev_hole_ops) 1237 return (B_FALSE); 1238 1239 if (vd->vdev_ops == &vdev_missing_ops) { 1240 return (B_FALSE); 1241 } 1242 1243 uint64_t offset = DVA_GET_OFFSET(dva); 1244 uint64_t asize = DVA_GET_ASIZE(dva); 1245 1246 if (DVA_GET_GANG(dva)) 1247 asize = vdev_gang_header_asize(vd); 1248 if (offset + asize > vd->vdev_asize) 1249 return (B_FALSE); 1250 1251 return (B_TRUE); 1252 } 1253 1254 zio_t * 1255 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 1256 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 1257 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb) 1258 { 1259 zio_t *zio; 1260 1261 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp, 1262 data, size, size, done, private, 1263 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 1264 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 1265 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 1266 1267 return (zio); 1268 } 1269 1270 zio_t * 1271 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 1272 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 1273 zio_done_func_t *ready, zio_done_func_t *children_ready, 1274 zio_done_func_t *done, void *private, zio_priority_t priority, 1275 zio_flag_t flags, const zbookmark_phys_t *zb) 1276 { 1277 zio_t *zio; 1278 1279 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 1280 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 1281 zp->zp_compress >= ZIO_COMPRESS_OFF && 1282 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 1283 DMU_OT_IS_VALID(zp->zp_type) && 1284 zp->zp_level < 32 && 1285 zp->zp_copies > 0 && 1286 zp->zp_copies <= spa_max_replication(spa)); 1287 1288 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 1289 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 1290 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 1291 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 1292 1293 zio->io_ready = ready; 1294 zio->io_children_ready = children_ready; 1295 zio->io_prop = *zp; 1296 1297 /* 1298 * Data can be NULL if we are going to call zio_write_override() to 1299 * provide the already-allocated BP. But we may need the data to 1300 * verify a dedup hit (if requested). In this case, don't try to 1301 * dedup (just take the already-allocated BP verbatim). Encrypted 1302 * dedup blocks need data as well so we also disable dedup in this 1303 * case. 1304 */ 1305 if (data == NULL && 1306 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { 1307 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 1308 } 1309 1310 return (zio); 1311 } 1312 1313 zio_t * 1314 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 1315 uint64_t size, zio_done_func_t *done, void *private, 1316 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb) 1317 { 1318 zio_t *zio; 1319 1320 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 1321 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 1322 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 1323 1324 return (zio); 1325 } 1326 1327 void 1328 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite, 1329 boolean_t brtwrite) 1330 { 1331 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1332 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1333 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1334 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 1335 ASSERT(!brtwrite || !nopwrite); 1336 1337 /* 1338 * We must reset the io_prop to match the values that existed 1339 * when the bp was first written by dmu_sync() keeping in mind 1340 * that nopwrite and dedup are mutually exclusive. 1341 */ 1342 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 1343 zio->io_prop.zp_nopwrite = nopwrite; 1344 zio->io_prop.zp_brtwrite = brtwrite; 1345 zio->io_prop.zp_copies = copies; 1346 zio->io_bp_override = bp; 1347 } 1348 1349 void 1350 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 1351 { 1352 1353 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1354 1355 /* 1356 * The check for EMBEDDED is a performance optimization. We 1357 * process the free here (by ignoring it) rather than 1358 * putting it on the list and then processing it in zio_free_sync(). 1359 */ 1360 if (BP_IS_EMBEDDED(bp)) 1361 return; 1362 1363 /* 1364 * Frees that are for the currently-syncing txg, are not going to be 1365 * deferred, and which will not need to do a read (i.e. not GANG or 1366 * DEDUP), can be processed immediately. Otherwise, put them on the 1367 * in-memory list for later processing. 1368 * 1369 * Note that we only defer frees after zfs_sync_pass_deferred_free 1370 * when the log space map feature is disabled. [see relevant comment 1371 * in spa_sync_iterate_to_convergence()] 1372 */ 1373 if (BP_IS_GANG(bp) || 1374 BP_GET_DEDUP(bp) || 1375 txg != spa->spa_syncing_txg || 1376 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free && 1377 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) || 1378 brt_maybe_exists(spa, bp)) { 1379 metaslab_check_free(spa, bp); 1380 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 1381 } else { 1382 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL); 1383 } 1384 } 1385 1386 /* 1387 * To improve performance, this function may return NULL if we were able 1388 * to do the free immediately. This avoids the cost of creating a zio 1389 * (and linking it to the parent, etc). 1390 */ 1391 zio_t * 1392 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1393 zio_flag_t flags) 1394 { 1395 ASSERT(!BP_IS_HOLE(bp)); 1396 ASSERT(spa_syncing_txg(spa) == txg); 1397 1398 if (BP_IS_EMBEDDED(bp)) 1399 return (NULL); 1400 1401 metaslab_check_free(spa, bp); 1402 arc_freed(spa, bp); 1403 dsl_scan_freed(spa, bp); 1404 1405 if (BP_IS_GANG(bp) || 1406 BP_GET_DEDUP(bp) || 1407 brt_maybe_exists(spa, bp)) { 1408 /* 1409 * GANG, DEDUP and BRT blocks can induce a read (for the gang 1410 * block header, the DDT or the BRT), so issue them 1411 * asynchronously so that this thread is not tied up. 1412 */ 1413 enum zio_stage stage = 1414 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC; 1415 1416 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1417 BP_GET_PSIZE(bp), NULL, NULL, 1418 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 1419 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage)); 1420 } else { 1421 metaslab_free(spa, bp, txg, B_FALSE); 1422 return (NULL); 1423 } 1424 } 1425 1426 zio_t * 1427 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1428 zio_done_func_t *done, void *private, zio_flag_t flags) 1429 { 1430 zio_t *zio; 1431 1432 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ? 1433 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT); 1434 1435 if (BP_IS_EMBEDDED(bp)) 1436 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 1437 1438 /* 1439 * A claim is an allocation of a specific block. Claims are needed 1440 * to support immediate writes in the intent log. The issue is that 1441 * immediate writes contain committed data, but in a txg that was 1442 * *not* committed. Upon opening the pool after an unclean shutdown, 1443 * the intent log claims all blocks that contain immediate write data 1444 * so that the SPA knows they're in use. 1445 * 1446 * All claims *must* be resolved in the first txg -- before the SPA 1447 * starts allocating blocks -- so that nothing is allocated twice. 1448 * If txg == 0 we just verify that the block is claimable. 1449 */ 1450 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <, 1451 spa_min_claim_txg(spa)); 1452 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1453 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */ 1454 1455 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1456 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 1457 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 1458 ASSERT0(zio->io_queued_timestamp); 1459 1460 return (zio); 1461 } 1462 1463 zio_t * 1464 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1465 zio_done_func_t *done, void *private, zio_priority_t priority, 1466 zio_flag_t flags, enum trim_flag trim_flags) 1467 { 1468 zio_t *zio; 1469 1470 ASSERT0(vd->vdev_children); 1471 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 1472 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 1473 ASSERT3U(size, !=, 0); 1474 1475 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done, 1476 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL, 1477 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE); 1478 zio->io_trim_flags = trim_flags; 1479 1480 return (zio); 1481 } 1482 1483 zio_t * 1484 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1485 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1486 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1487 { 1488 zio_t *zio; 1489 1490 ASSERT(vd->vdev_children == 0); 1491 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1492 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1493 ASSERT3U(offset + size, <=, vd->vdev_psize); 1494 1495 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1496 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1497 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1498 1499 zio->io_prop.zp_checksum = checksum; 1500 1501 return (zio); 1502 } 1503 1504 zio_t * 1505 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1506 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1507 zio_priority_t priority, zio_flag_t flags, boolean_t labels) 1508 { 1509 zio_t *zio; 1510 1511 ASSERT(vd->vdev_children == 0); 1512 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1513 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1514 ASSERT3U(offset + size, <=, vd->vdev_psize); 1515 1516 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1517 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1518 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1519 1520 zio->io_prop.zp_checksum = checksum; 1521 1522 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1523 /* 1524 * zec checksums are necessarily destructive -- they modify 1525 * the end of the write buffer to hold the verifier/checksum. 1526 * Therefore, we must make a local copy in case the data is 1527 * being written to multiple places in parallel. 1528 */ 1529 abd_t *wbuf = abd_alloc_sametype(data, size); 1530 abd_copy(wbuf, data, size); 1531 1532 zio_push_transform(zio, wbuf, size, size, NULL); 1533 } 1534 1535 return (zio); 1536 } 1537 1538 /* 1539 * Create a child I/O to do some work for us. 1540 */ 1541 zio_t * 1542 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1543 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1544 zio_flag_t flags, zio_done_func_t *done, void *private) 1545 { 1546 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1547 zio_t *zio; 1548 1549 /* 1550 * vdev child I/Os do not propagate their error to the parent. 1551 * Therefore, for correct operation the caller *must* check for 1552 * and handle the error in the child i/o's done callback. 1553 * The only exceptions are i/os that we don't care about 1554 * (OPTIONAL or REPAIR). 1555 */ 1556 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1557 done != NULL); 1558 1559 if (type == ZIO_TYPE_READ && bp != NULL) { 1560 /* 1561 * If we have the bp, then the child should perform the 1562 * checksum and the parent need not. This pushes error 1563 * detection as close to the leaves as possible and 1564 * eliminates redundant checksums in the interior nodes. 1565 */ 1566 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1567 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1568 } 1569 1570 if (vd->vdev_ops->vdev_op_leaf) { 1571 ASSERT0(vd->vdev_children); 1572 offset += VDEV_LABEL_START_SIZE; 1573 } 1574 1575 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1576 1577 /* 1578 * If we've decided to do a repair, the write is not speculative -- 1579 * even if the original read was. 1580 */ 1581 if (flags & ZIO_FLAG_IO_REPAIR) 1582 flags &= ~ZIO_FLAG_SPECULATIVE; 1583 1584 /* 1585 * If we're creating a child I/O that is not associated with a 1586 * top-level vdev, then the child zio is not an allocating I/O. 1587 * If this is a retried I/O then we ignore it since we will 1588 * have already processed the original allocating I/O. 1589 */ 1590 if (flags & ZIO_FLAG_IO_ALLOCATING && 1591 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1592 ASSERT(pio->io_metaslab_class != NULL); 1593 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 1594 ASSERT(type == ZIO_TYPE_WRITE); 1595 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1596 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1597 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1598 pio->io_child_type == ZIO_CHILD_GANG); 1599 1600 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1601 } 1602 1603 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1604 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1605 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1606 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1607 1608 return (zio); 1609 } 1610 1611 zio_t * 1612 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1613 zio_type_t type, zio_priority_t priority, zio_flag_t flags, 1614 zio_done_func_t *done, void *private) 1615 { 1616 zio_t *zio; 1617 1618 ASSERT(vd->vdev_ops->vdev_op_leaf); 1619 1620 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1621 data, size, size, done, private, type, priority, 1622 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1623 vd, offset, NULL, 1624 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1625 1626 return (zio); 1627 } 1628 1629 1630 /* 1631 * Send a flush command to the given vdev. Unlike most zio creation functions, 1632 * the flush zios are issued immediately. You can wait on pio to pause until 1633 * the flushes complete. 1634 */ 1635 void 1636 zio_flush(zio_t *pio, vdev_t *vd) 1637 { 1638 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | 1639 ZIO_FLAG_DONT_RETRY; 1640 1641 if (vd->vdev_nowritecache) 1642 return; 1643 1644 if (vd->vdev_children == 0) { 1645 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0, 1646 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0, 1647 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE)); 1648 } else { 1649 for (uint64_t c = 0; c < vd->vdev_children; c++) 1650 zio_flush(pio, vd->vdev_child[c]); 1651 } 1652 } 1653 1654 void 1655 zio_shrink(zio_t *zio, uint64_t size) 1656 { 1657 ASSERT3P(zio->io_executor, ==, NULL); 1658 ASSERT3U(zio->io_orig_size, ==, zio->io_size); 1659 ASSERT3U(size, <=, zio->io_size); 1660 1661 /* 1662 * We don't shrink for raidz because of problems with the 1663 * reconstruction when reading back less than the block size. 1664 * Note, BP_IS_RAIDZ() assumes no compression. 1665 */ 1666 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1667 if (!BP_IS_RAIDZ(zio->io_bp)) { 1668 /* we are not doing a raw write */ 1669 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1670 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1671 } 1672 } 1673 1674 /* 1675 * Round provided allocation size up to a value that can be allocated 1676 * by at least some vdev(s) in the pool with minimum or no additional 1677 * padding and without extra space usage on others 1678 */ 1679 static uint64_t 1680 zio_roundup_alloc_size(spa_t *spa, uint64_t size) 1681 { 1682 if (size > spa->spa_min_alloc) 1683 return (roundup(size, spa->spa_gcd_alloc)); 1684 return (spa->spa_min_alloc); 1685 } 1686 1687 /* 1688 * ========================================================================== 1689 * Prepare to read and write logical blocks 1690 * ========================================================================== 1691 */ 1692 1693 static zio_t * 1694 zio_read_bp_init(zio_t *zio) 1695 { 1696 blkptr_t *bp = zio->io_bp; 1697 uint64_t psize = 1698 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1699 1700 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1701 1702 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1703 zio->io_child_type == ZIO_CHILD_LOGICAL && 1704 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1705 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1706 psize, psize, zio_decompress); 1707 } 1708 1709 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || 1710 BP_HAS_INDIRECT_MAC_CKSUM(bp)) && 1711 zio->io_child_type == ZIO_CHILD_LOGICAL) { 1712 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1713 psize, psize, zio_decrypt); 1714 } 1715 1716 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1717 int psize = BPE_GET_PSIZE(bp); 1718 void *data = abd_borrow_buf(zio->io_abd, psize); 1719 1720 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1721 decode_embedded_bp_compressed(bp, data); 1722 abd_return_buf_copy(zio->io_abd, data, psize); 1723 } else { 1724 ASSERT(!BP_IS_EMBEDDED(bp)); 1725 } 1726 1727 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1728 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1729 1730 return (zio); 1731 } 1732 1733 static zio_t * 1734 zio_write_bp_init(zio_t *zio) 1735 { 1736 if (!IO_IS_ALLOCATING(zio)) 1737 return (zio); 1738 1739 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1740 1741 if (zio->io_bp_override) { 1742 blkptr_t *bp = zio->io_bp; 1743 zio_prop_t *zp = &zio->io_prop; 1744 1745 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg); 1746 1747 *bp = *zio->io_bp_override; 1748 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1749 1750 if (zp->zp_brtwrite) 1751 return (zio); 1752 1753 ASSERT(!BP_GET_DEDUP(zio->io_bp_override)); 1754 1755 if (BP_IS_EMBEDDED(bp)) 1756 return (zio); 1757 1758 /* 1759 * If we've been overridden and nopwrite is set then 1760 * set the flag accordingly to indicate that a nopwrite 1761 * has already occurred. 1762 */ 1763 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1764 ASSERT(!zp->zp_dedup); 1765 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1766 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1767 return (zio); 1768 } 1769 1770 ASSERT(!zp->zp_nopwrite); 1771 1772 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1773 return (zio); 1774 1775 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1776 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1777 1778 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && 1779 !zp->zp_encrypt) { 1780 BP_SET_DEDUP(bp, 1); 1781 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1782 return (zio); 1783 } 1784 1785 /* 1786 * We were unable to handle this as an override bp, treat 1787 * it as a regular write I/O. 1788 */ 1789 zio->io_bp_override = NULL; 1790 *bp = zio->io_bp_orig; 1791 zio->io_pipeline = zio->io_orig_pipeline; 1792 } 1793 1794 return (zio); 1795 } 1796 1797 static zio_t * 1798 zio_write_compress(zio_t *zio) 1799 { 1800 spa_t *spa = zio->io_spa; 1801 zio_prop_t *zp = &zio->io_prop; 1802 enum zio_compress compress = zp->zp_compress; 1803 blkptr_t *bp = zio->io_bp; 1804 uint64_t lsize = zio->io_lsize; 1805 uint64_t psize = zio->io_size; 1806 uint32_t pass = 1; 1807 1808 /* 1809 * If our children haven't all reached the ready stage, 1810 * wait for them and then repeat this pipeline stage. 1811 */ 1812 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1813 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1814 return (NULL); 1815 } 1816 1817 if (!IO_IS_ALLOCATING(zio)) 1818 return (zio); 1819 1820 if (zio->io_children_ready != NULL) { 1821 /* 1822 * Now that all our children are ready, run the callback 1823 * associated with this zio in case it wants to modify the 1824 * data to be written. 1825 */ 1826 ASSERT3U(zp->zp_level, >, 0); 1827 zio->io_children_ready(zio); 1828 } 1829 1830 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1831 ASSERT(zio->io_bp_override == NULL); 1832 1833 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) { 1834 /* 1835 * We're rewriting an existing block, which means we're 1836 * working on behalf of spa_sync(). For spa_sync() to 1837 * converge, it must eventually be the case that we don't 1838 * have to allocate new blocks. But compression changes 1839 * the blocksize, which forces a reallocate, and makes 1840 * convergence take longer. Therefore, after the first 1841 * few passes, stop compressing to ensure convergence. 1842 */ 1843 pass = spa_sync_pass(spa); 1844 1845 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1846 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1847 ASSERT(!BP_GET_DEDUP(bp)); 1848 1849 if (pass >= zfs_sync_pass_dont_compress) 1850 compress = ZIO_COMPRESS_OFF; 1851 1852 /* Make sure someone doesn't change their mind on overwrites */ 1853 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) || 1854 MIN(zp->zp_copies, spa_max_replication(spa)) 1855 == BP_GET_NDVAS(bp)); 1856 } 1857 1858 /* If it's a compressed write that is not raw, compress the buffer. */ 1859 if (compress != ZIO_COMPRESS_OFF && 1860 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1861 void *cbuf = NULL; 1862 if (abd_cmp_zero(zio->io_abd, lsize) == 0) 1863 psize = 0; 1864 else if (compress == ZIO_COMPRESS_EMPTY) 1865 psize = lsize; 1866 else 1867 psize = zio_compress_data(compress, zio->io_abd, &cbuf, 1868 lsize, zp->zp_complevel); 1869 if (psize == 0) { 1870 compress = ZIO_COMPRESS_OFF; 1871 } else if (psize >= lsize) { 1872 compress = ZIO_COMPRESS_OFF; 1873 if (cbuf != NULL) 1874 zio_buf_free(cbuf, lsize); 1875 } else if (!zp->zp_dedup && !zp->zp_encrypt && 1876 psize <= BPE_PAYLOAD_SIZE && 1877 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1878 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1879 encode_embedded_bp_compressed(bp, 1880 cbuf, compress, lsize, psize); 1881 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1882 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1883 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1884 zio_buf_free(cbuf, lsize); 1885 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg); 1886 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1887 ASSERT(spa_feature_is_active(spa, 1888 SPA_FEATURE_EMBEDDED_DATA)); 1889 return (zio); 1890 } else { 1891 /* 1892 * Round compressed size up to the minimum allocation 1893 * size of the smallest-ashift device, and zero the 1894 * tail. This ensures that the compressed size of the 1895 * BP (and thus compressratio property) are correct, 1896 * in that we charge for the padding used to fill out 1897 * the last sector. 1898 */ 1899 size_t rounded = (size_t)zio_roundup_alloc_size(spa, 1900 psize); 1901 if (rounded >= lsize) { 1902 compress = ZIO_COMPRESS_OFF; 1903 zio_buf_free(cbuf, lsize); 1904 psize = lsize; 1905 } else { 1906 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1907 abd_take_ownership_of_buf(cdata, B_TRUE); 1908 abd_zero_off(cdata, psize, rounded - psize); 1909 psize = rounded; 1910 zio_push_transform(zio, cdata, 1911 psize, lsize, NULL); 1912 } 1913 } 1914 1915 /* 1916 * We were unable to handle this as an override bp, treat 1917 * it as a regular write I/O. 1918 */ 1919 zio->io_bp_override = NULL; 1920 *bp = zio->io_bp_orig; 1921 zio->io_pipeline = zio->io_orig_pipeline; 1922 1923 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && 1924 zp->zp_type == DMU_OT_DNODE) { 1925 /* 1926 * The DMU actually relies on the zio layer's compression 1927 * to free metadnode blocks that have had all contained 1928 * dnodes freed. As a result, even when doing a raw 1929 * receive, we must check whether the block can be compressed 1930 * to a hole. 1931 */ 1932 if (abd_cmp_zero(zio->io_abd, lsize) == 0) { 1933 psize = 0; 1934 compress = ZIO_COMPRESS_OFF; 1935 } else { 1936 psize = lsize; 1937 } 1938 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS && 1939 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) { 1940 /* 1941 * If we are raw receiving an encrypted dataset we should not 1942 * take this codepath because it will change the on-disk block 1943 * and decryption will fail. 1944 */ 1945 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize), 1946 lsize); 1947 1948 if (rounded != psize) { 1949 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE); 1950 abd_zero_off(cdata, psize, rounded - psize); 1951 abd_copy_off(cdata, zio->io_abd, 0, 0, psize); 1952 psize = rounded; 1953 zio_push_transform(zio, cdata, 1954 psize, rounded, NULL); 1955 } 1956 } else { 1957 ASSERT3U(psize, !=, 0); 1958 } 1959 1960 /* 1961 * The final pass of spa_sync() must be all rewrites, but the first 1962 * few passes offer a trade-off: allocating blocks defers convergence, 1963 * but newly allocated blocks are sequential, so they can be written 1964 * to disk faster. Therefore, we allow the first few passes of 1965 * spa_sync() to allocate new blocks, but force rewrites after that. 1966 * There should only be a handful of blocks after pass 1 in any case. 1967 */ 1968 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg && 1969 BP_GET_PSIZE(bp) == psize && 1970 pass >= zfs_sync_pass_rewrite) { 1971 VERIFY3U(psize, !=, 0); 1972 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1973 1974 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1975 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1976 } else { 1977 BP_ZERO(bp); 1978 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1979 } 1980 1981 if (psize == 0) { 1982 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 && 1983 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1984 BP_SET_LSIZE(bp, lsize); 1985 BP_SET_TYPE(bp, zp->zp_type); 1986 BP_SET_LEVEL(bp, zp->zp_level); 1987 BP_SET_BIRTH(bp, zio->io_txg, 0); 1988 } 1989 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1990 } else { 1991 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1992 BP_SET_LSIZE(bp, lsize); 1993 BP_SET_TYPE(bp, zp->zp_type); 1994 BP_SET_LEVEL(bp, zp->zp_level); 1995 BP_SET_PSIZE(bp, psize); 1996 BP_SET_COMPRESS(bp, compress); 1997 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1998 BP_SET_DEDUP(bp, zp->zp_dedup); 1999 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 2000 if (zp->zp_dedup) { 2001 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2002 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2003 ASSERT(!zp->zp_encrypt || 2004 DMU_OT_IS_ENCRYPTED(zp->zp_type)); 2005 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 2006 } 2007 if (zp->zp_nopwrite) { 2008 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2009 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2010 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 2011 } 2012 } 2013 return (zio); 2014 } 2015 2016 static zio_t * 2017 zio_free_bp_init(zio_t *zio) 2018 { 2019 blkptr_t *bp = zio->io_bp; 2020 2021 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 2022 if (BP_GET_DEDUP(bp)) 2023 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 2024 } 2025 2026 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 2027 2028 return (zio); 2029 } 2030 2031 /* 2032 * ========================================================================== 2033 * Execute the I/O pipeline 2034 * ========================================================================== 2035 */ 2036 2037 static void 2038 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 2039 { 2040 spa_t *spa = zio->io_spa; 2041 zio_type_t t = zio->io_type; 2042 2043 /* 2044 * If we're a config writer or a probe, the normal issue and 2045 * interrupt threads may all be blocked waiting for the config lock. 2046 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 2047 */ 2048 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 2049 t = ZIO_TYPE_NULL; 2050 2051 /* 2052 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 2053 */ 2054 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 2055 t = ZIO_TYPE_NULL; 2056 2057 /* 2058 * If this is a high priority I/O, then use the high priority taskq if 2059 * available or cut the line otherwise. 2060 */ 2061 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) { 2062 if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 2063 q++; 2064 else 2065 cutinline = B_TRUE; 2066 } 2067 2068 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 2069 2070 spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline); 2071 } 2072 2073 static boolean_t 2074 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 2075 { 2076 spa_t *spa = zio->io_spa; 2077 2078 taskq_t *tq = taskq_of_curthread(); 2079 2080 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 2081 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 2082 uint_t i; 2083 for (i = 0; i < tqs->stqs_count; i++) { 2084 if (tqs->stqs_taskq[i] == tq) 2085 return (B_TRUE); 2086 } 2087 } 2088 2089 return (B_FALSE); 2090 } 2091 2092 static zio_t * 2093 zio_issue_async(zio_t *zio) 2094 { 2095 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio)); 2096 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2097 return (NULL); 2098 } 2099 2100 void 2101 zio_interrupt(void *zio) 2102 { 2103 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 2104 } 2105 2106 void 2107 zio_delay_interrupt(zio_t *zio) 2108 { 2109 /* 2110 * The timeout_generic() function isn't defined in userspace, so 2111 * rather than trying to implement the function, the zio delay 2112 * functionality has been disabled for userspace builds. 2113 */ 2114 2115 #ifdef _KERNEL 2116 /* 2117 * If io_target_timestamp is zero, then no delay has been registered 2118 * for this IO, thus jump to the end of this function and "skip" the 2119 * delay; issuing it directly to the zio layer. 2120 */ 2121 if (zio->io_target_timestamp != 0) { 2122 hrtime_t now = gethrtime(); 2123 2124 if (now >= zio->io_target_timestamp) { 2125 /* 2126 * This IO has already taken longer than the target 2127 * delay to complete, so we don't want to delay it 2128 * any longer; we "miss" the delay and issue it 2129 * directly to the zio layer. This is likely due to 2130 * the target latency being set to a value less than 2131 * the underlying hardware can satisfy (e.g. delay 2132 * set to 1ms, but the disks take 10ms to complete an 2133 * IO request). 2134 */ 2135 2136 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 2137 hrtime_t, now); 2138 2139 zio_interrupt(zio); 2140 } else { 2141 taskqid_t tid; 2142 hrtime_t diff = zio->io_target_timestamp - now; 2143 clock_t expire_at_tick = ddi_get_lbolt() + 2144 NSEC_TO_TICK(diff); 2145 2146 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 2147 hrtime_t, now, hrtime_t, diff); 2148 2149 if (NSEC_TO_TICK(diff) == 0) { 2150 /* Our delay is less than a jiffy - just spin */ 2151 zfs_sleep_until(zio->io_target_timestamp); 2152 zio_interrupt(zio); 2153 } else { 2154 /* 2155 * Use taskq_dispatch_delay() in the place of 2156 * OpenZFS's timeout_generic(). 2157 */ 2158 tid = taskq_dispatch_delay(system_taskq, 2159 zio_interrupt, zio, TQ_NOSLEEP, 2160 expire_at_tick); 2161 if (tid == TASKQID_INVALID) { 2162 /* 2163 * Couldn't allocate a task. Just 2164 * finish the zio without a delay. 2165 */ 2166 zio_interrupt(zio); 2167 } 2168 } 2169 } 2170 return; 2171 } 2172 #endif 2173 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 2174 zio_interrupt(zio); 2175 } 2176 2177 static void 2178 zio_deadman_impl(zio_t *pio, int ziodepth) 2179 { 2180 zio_t *cio, *cio_next; 2181 zio_link_t *zl = NULL; 2182 vdev_t *vd = pio->io_vd; 2183 2184 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) { 2185 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL; 2186 zbookmark_phys_t *zb = &pio->io_bookmark; 2187 uint64_t delta = gethrtime() - pio->io_timestamp; 2188 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa); 2189 2190 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu " 2191 "delta=%llu queued=%llu io=%llu " 2192 "path=%s " 2193 "last=%llu type=%d " 2194 "priority=%d flags=0x%llx stage=0x%x " 2195 "pipeline=0x%x pipeline-trace=0x%x " 2196 "objset=%llu object=%llu " 2197 "level=%llu blkid=%llu " 2198 "offset=%llu size=%llu " 2199 "error=%d", 2200 ziodepth, pio, pio->io_timestamp, 2201 (u_longlong_t)delta, pio->io_delta, pio->io_delay, 2202 vd ? vd->vdev_path : "NULL", 2203 vq ? vq->vq_io_complete_ts : 0, pio->io_type, 2204 pio->io_priority, (u_longlong_t)pio->io_flags, 2205 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace, 2206 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, 2207 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid, 2208 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size, 2209 pio->io_error); 2210 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN, 2211 pio->io_spa, vd, zb, pio, 0); 2212 2213 if (failmode == ZIO_FAILURE_MODE_CONTINUE && 2214 taskq_empty_ent(&pio->io_tqent)) { 2215 zio_interrupt(pio); 2216 } 2217 } 2218 2219 mutex_enter(&pio->io_lock); 2220 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2221 cio_next = zio_walk_children(pio, &zl); 2222 zio_deadman_impl(cio, ziodepth + 1); 2223 } 2224 mutex_exit(&pio->io_lock); 2225 } 2226 2227 /* 2228 * Log the critical information describing this zio and all of its children 2229 * using the zfs_dbgmsg() interface then post deadman event for the ZED. 2230 */ 2231 void 2232 zio_deadman(zio_t *pio, const char *tag) 2233 { 2234 spa_t *spa = pio->io_spa; 2235 char *name = spa_name(spa); 2236 2237 if (!zfs_deadman_enabled || spa_suspended(spa)) 2238 return; 2239 2240 zio_deadman_impl(pio, 0); 2241 2242 switch (spa_get_deadman_failmode(spa)) { 2243 case ZIO_FAILURE_MODE_WAIT: 2244 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name); 2245 break; 2246 2247 case ZIO_FAILURE_MODE_CONTINUE: 2248 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name); 2249 break; 2250 2251 case ZIO_FAILURE_MODE_PANIC: 2252 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name); 2253 break; 2254 } 2255 } 2256 2257 /* 2258 * Execute the I/O pipeline until one of the following occurs: 2259 * (1) the I/O completes; (2) the pipeline stalls waiting for 2260 * dependent child I/Os; (3) the I/O issues, so we're waiting 2261 * for an I/O completion interrupt; (4) the I/O is delegated by 2262 * vdev-level caching or aggregation; (5) the I/O is deferred 2263 * due to vdev-level queueing; (6) the I/O is handed off to 2264 * another thread. In all cases, the pipeline stops whenever 2265 * there's no CPU work; it never burns a thread in cv_wait_io(). 2266 * 2267 * There's no locking on io_stage because there's no legitimate way 2268 * for multiple threads to be attempting to process the same I/O. 2269 */ 2270 static zio_pipe_stage_t *zio_pipeline[]; 2271 2272 /* 2273 * zio_execute() is a wrapper around the static function 2274 * __zio_execute() so that we can force __zio_execute() to be 2275 * inlined. This reduces stack overhead which is important 2276 * because __zio_execute() is called recursively in several zio 2277 * code paths. zio_execute() itself cannot be inlined because 2278 * it is externally visible. 2279 */ 2280 void 2281 zio_execute(void *zio) 2282 { 2283 fstrans_cookie_t cookie; 2284 2285 cookie = spl_fstrans_mark(); 2286 __zio_execute(zio); 2287 spl_fstrans_unmark(cookie); 2288 } 2289 2290 /* 2291 * Used to determine if in the current context the stack is sized large 2292 * enough to allow zio_execute() to be called recursively. A minimum 2293 * stack size of 16K is required to avoid needing to re-dispatch the zio. 2294 */ 2295 static boolean_t 2296 zio_execute_stack_check(zio_t *zio) 2297 { 2298 #if !defined(HAVE_LARGE_STACKS) 2299 dsl_pool_t *dp = spa_get_dsl(zio->io_spa); 2300 2301 /* Executing in txg_sync_thread() context. */ 2302 if (dp && curthread == dp->dp_tx.tx_sync_thread) 2303 return (B_TRUE); 2304 2305 /* Pool initialization outside of zio_taskq context. */ 2306 if (dp && spa_is_initializing(dp->dp_spa) && 2307 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) && 2308 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH)) 2309 return (B_TRUE); 2310 #else 2311 (void) zio; 2312 #endif /* HAVE_LARGE_STACKS */ 2313 2314 return (B_FALSE); 2315 } 2316 2317 __attribute__((always_inline)) 2318 static inline void 2319 __zio_execute(zio_t *zio) 2320 { 2321 ASSERT3U(zio->io_queued_timestamp, >, 0); 2322 2323 while (zio->io_stage < ZIO_STAGE_DONE) { 2324 enum zio_stage pipeline = zio->io_pipeline; 2325 enum zio_stage stage = zio->io_stage; 2326 2327 zio->io_executor = curthread; 2328 2329 ASSERT(!MUTEX_HELD(&zio->io_lock)); 2330 ASSERT(ISP2(stage)); 2331 ASSERT(zio->io_stall == NULL); 2332 2333 do { 2334 stage <<= 1; 2335 } while ((stage & pipeline) == 0); 2336 2337 ASSERT(stage <= ZIO_STAGE_DONE); 2338 2339 /* 2340 * If we are in interrupt context and this pipeline stage 2341 * will grab a config lock that is held across I/O, 2342 * or may wait for an I/O that needs an interrupt thread 2343 * to complete, issue async to avoid deadlock. 2344 * 2345 * For VDEV_IO_START, we cut in line so that the io will 2346 * be sent to disk promptly. 2347 */ 2348 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 2349 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 2350 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2351 zio_requeue_io_start_cut_in_line : B_FALSE; 2352 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2353 return; 2354 } 2355 2356 /* 2357 * If the current context doesn't have large enough stacks 2358 * the zio must be issued asynchronously to prevent overflow. 2359 */ 2360 if (zio_execute_stack_check(zio)) { 2361 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 2362 zio_requeue_io_start_cut_in_line : B_FALSE; 2363 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 2364 return; 2365 } 2366 2367 zio->io_stage = stage; 2368 zio->io_pipeline_trace |= zio->io_stage; 2369 2370 /* 2371 * The zio pipeline stage returns the next zio to execute 2372 * (typically the same as this one), or NULL if we should 2373 * stop. 2374 */ 2375 zio = zio_pipeline[highbit64(stage) - 1](zio); 2376 2377 if (zio == NULL) 2378 return; 2379 } 2380 } 2381 2382 2383 /* 2384 * ========================================================================== 2385 * Initiate I/O, either sync or async 2386 * ========================================================================== 2387 */ 2388 int 2389 zio_wait(zio_t *zio) 2390 { 2391 /* 2392 * Some routines, like zio_free_sync(), may return a NULL zio 2393 * to avoid the performance overhead of creating and then destroying 2394 * an unneeded zio. For the callers' simplicity, we accept a NULL 2395 * zio and ignore it. 2396 */ 2397 if (zio == NULL) 2398 return (0); 2399 2400 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms); 2401 int error; 2402 2403 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN); 2404 ASSERT3P(zio->io_executor, ==, NULL); 2405 2406 zio->io_waiter = curthread; 2407 ASSERT0(zio->io_queued_timestamp); 2408 zio->io_queued_timestamp = gethrtime(); 2409 2410 if (zio->io_type == ZIO_TYPE_WRITE) { 2411 spa_select_allocator(zio); 2412 } 2413 __zio_execute(zio); 2414 2415 mutex_enter(&zio->io_lock); 2416 while (zio->io_executor != NULL) { 2417 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock, 2418 ddi_get_lbolt() + timeout); 2419 2420 if (zfs_deadman_enabled && error == -1 && 2421 gethrtime() - zio->io_queued_timestamp > 2422 spa_deadman_ziotime(zio->io_spa)) { 2423 mutex_exit(&zio->io_lock); 2424 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms); 2425 zio_deadman(zio, FTAG); 2426 mutex_enter(&zio->io_lock); 2427 } 2428 } 2429 mutex_exit(&zio->io_lock); 2430 2431 error = zio->io_error; 2432 zio_destroy(zio); 2433 2434 return (error); 2435 } 2436 2437 void 2438 zio_nowait(zio_t *zio) 2439 { 2440 /* 2441 * See comment in zio_wait(). 2442 */ 2443 if (zio == NULL) 2444 return; 2445 2446 ASSERT3P(zio->io_executor, ==, NULL); 2447 2448 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 2449 list_is_empty(&zio->io_parent_list)) { 2450 zio_t *pio; 2451 2452 /* 2453 * This is a logical async I/O with no parent to wait for it. 2454 * We add it to the spa_async_root_zio "Godfather" I/O which 2455 * will ensure they complete prior to unloading the pool. 2456 */ 2457 spa_t *spa = zio->io_spa; 2458 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE]; 2459 2460 zio_add_child(pio, zio); 2461 } 2462 2463 ASSERT0(zio->io_queued_timestamp); 2464 zio->io_queued_timestamp = gethrtime(); 2465 if (zio->io_type == ZIO_TYPE_WRITE) { 2466 spa_select_allocator(zio); 2467 } 2468 __zio_execute(zio); 2469 } 2470 2471 /* 2472 * ========================================================================== 2473 * Reexecute, cancel, or suspend/resume failed I/O 2474 * ========================================================================== 2475 */ 2476 2477 static void 2478 zio_reexecute(void *arg) 2479 { 2480 zio_t *pio = arg; 2481 zio_t *cio, *cio_next, *gio; 2482 2483 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 2484 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 2485 ASSERT(pio->io_gang_leader == NULL); 2486 ASSERT(pio->io_gang_tree == NULL); 2487 2488 mutex_enter(&pio->io_lock); 2489 pio->io_flags = pio->io_orig_flags; 2490 pio->io_stage = pio->io_orig_stage; 2491 pio->io_pipeline = pio->io_orig_pipeline; 2492 pio->io_reexecute = 0; 2493 pio->io_flags |= ZIO_FLAG_REEXECUTED; 2494 pio->io_pipeline_trace = 0; 2495 pio->io_error = 0; 2496 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) || 2497 (pio->io_pipeline & ZIO_STAGE_READY) == 0; 2498 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE); 2499 zio_link_t *zl = NULL; 2500 while ((gio = zio_walk_parents(pio, &zl)) != NULL) { 2501 for (int w = 0; w < ZIO_WAIT_TYPES; w++) { 2502 gio->io_children[pio->io_child_type][w] += 2503 !pio->io_state[w]; 2504 } 2505 } 2506 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2507 pio->io_child_error[c] = 0; 2508 2509 if (IO_IS_ALLOCATING(pio)) 2510 BP_ZERO(pio->io_bp); 2511 2512 /* 2513 * As we reexecute pio's children, new children could be created. 2514 * New children go to the head of pio's io_child_list, however, 2515 * so we will (correctly) not reexecute them. The key is that 2516 * the remainder of pio's io_child_list, from 'cio_next' onward, 2517 * cannot be affected by any side effects of reexecuting 'cio'. 2518 */ 2519 zl = NULL; 2520 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 2521 cio_next = zio_walk_children(pio, &zl); 2522 mutex_exit(&pio->io_lock); 2523 zio_reexecute(cio); 2524 mutex_enter(&pio->io_lock); 2525 } 2526 mutex_exit(&pio->io_lock); 2527 2528 /* 2529 * Now that all children have been reexecuted, execute the parent. 2530 * We don't reexecute "The Godfather" I/O here as it's the 2531 * responsibility of the caller to wait on it. 2532 */ 2533 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 2534 pio->io_queued_timestamp = gethrtime(); 2535 __zio_execute(pio); 2536 } 2537 } 2538 2539 void 2540 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 2541 { 2542 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 2543 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 2544 "failure and the failure mode property for this pool " 2545 "is set to panic.", spa_name(spa)); 2546 2547 if (reason != ZIO_SUSPEND_MMP) { 2548 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable " 2549 "I/O failure and has been suspended.\n", spa_name(spa)); 2550 } 2551 2552 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, 2553 NULL, NULL, 0); 2554 2555 mutex_enter(&spa->spa_suspend_lock); 2556 2557 if (spa->spa_suspend_zio_root == NULL) 2558 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 2559 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2560 ZIO_FLAG_GODFATHER); 2561 2562 spa->spa_suspended = reason; 2563 2564 if (zio != NULL) { 2565 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2566 ASSERT(zio != spa->spa_suspend_zio_root); 2567 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2568 ASSERT(zio_unique_parent(zio) == NULL); 2569 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 2570 zio_add_child(spa->spa_suspend_zio_root, zio); 2571 } 2572 2573 mutex_exit(&spa->spa_suspend_lock); 2574 } 2575 2576 int 2577 zio_resume(spa_t *spa) 2578 { 2579 zio_t *pio; 2580 2581 /* 2582 * Reexecute all previously suspended i/o. 2583 */ 2584 mutex_enter(&spa->spa_suspend_lock); 2585 spa->spa_suspended = ZIO_SUSPEND_NONE; 2586 cv_broadcast(&spa->spa_suspend_cv); 2587 pio = spa->spa_suspend_zio_root; 2588 spa->spa_suspend_zio_root = NULL; 2589 mutex_exit(&spa->spa_suspend_lock); 2590 2591 if (pio == NULL) 2592 return (0); 2593 2594 zio_reexecute(pio); 2595 return (zio_wait(pio)); 2596 } 2597 2598 void 2599 zio_resume_wait(spa_t *spa) 2600 { 2601 mutex_enter(&spa->spa_suspend_lock); 2602 while (spa_suspended(spa)) 2603 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 2604 mutex_exit(&spa->spa_suspend_lock); 2605 } 2606 2607 /* 2608 * ========================================================================== 2609 * Gang blocks. 2610 * 2611 * A gang block is a collection of small blocks that looks to the DMU 2612 * like one large block. When zio_dva_allocate() cannot find a block 2613 * of the requested size, due to either severe fragmentation or the pool 2614 * being nearly full, it calls zio_write_gang_block() to construct the 2615 * block from smaller fragments. 2616 * 2617 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 2618 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 2619 * an indirect block: it's an array of block pointers. It consumes 2620 * only one sector and hence is allocatable regardless of fragmentation. 2621 * The gang header's bps point to its gang members, which hold the data. 2622 * 2623 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 2624 * as the verifier to ensure uniqueness of the SHA256 checksum. 2625 * Critically, the gang block bp's blk_cksum is the checksum of the data, 2626 * not the gang header. This ensures that data block signatures (needed for 2627 * deduplication) are independent of how the block is physically stored. 2628 * 2629 * Gang blocks can be nested: a gang member may itself be a gang block. 2630 * Thus every gang block is a tree in which root and all interior nodes are 2631 * gang headers, and the leaves are normal blocks that contain user data. 2632 * The root of the gang tree is called the gang leader. 2633 * 2634 * To perform any operation (read, rewrite, free, claim) on a gang block, 2635 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 2636 * in the io_gang_tree field of the original logical i/o by recursively 2637 * reading the gang leader and all gang headers below it. This yields 2638 * an in-core tree containing the contents of every gang header and the 2639 * bps for every constituent of the gang block. 2640 * 2641 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 2642 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 2643 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 2644 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 2645 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 2646 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 2647 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 2648 * of the gang header plus zio_checksum_compute() of the data to update the 2649 * gang header's blk_cksum as described above. 2650 * 2651 * The two-phase assemble/issue model solves the problem of partial failure -- 2652 * what if you'd freed part of a gang block but then couldn't read the 2653 * gang header for another part? Assembling the entire gang tree first 2654 * ensures that all the necessary gang header I/O has succeeded before 2655 * starting the actual work of free, claim, or write. Once the gang tree 2656 * is assembled, free and claim are in-memory operations that cannot fail. 2657 * 2658 * In the event that a gang write fails, zio_dva_unallocate() walks the 2659 * gang tree to immediately free (i.e. insert back into the space map) 2660 * everything we've allocated. This ensures that we don't get ENOSPC 2661 * errors during repeated suspend/resume cycles due to a flaky device. 2662 * 2663 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2664 * the gang tree, we won't modify the block, so we can safely defer the free 2665 * (knowing that the block is still intact). If we *can* assemble the gang 2666 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2667 * each constituent bp and we can allocate a new block on the next sync pass. 2668 * 2669 * In all cases, the gang tree allows complete recovery from partial failure. 2670 * ========================================================================== 2671 */ 2672 2673 static void 2674 zio_gang_issue_func_done(zio_t *zio) 2675 { 2676 abd_free(zio->io_abd); 2677 } 2678 2679 static zio_t * 2680 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2681 uint64_t offset) 2682 { 2683 if (gn != NULL) 2684 return (pio); 2685 2686 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2687 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2688 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2689 &pio->io_bookmark)); 2690 } 2691 2692 static zio_t * 2693 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2694 uint64_t offset) 2695 { 2696 zio_t *zio; 2697 2698 if (gn != NULL) { 2699 abd_t *gbh_abd = 2700 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2701 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2702 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2703 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2704 &pio->io_bookmark); 2705 /* 2706 * As we rewrite each gang header, the pipeline will compute 2707 * a new gang block header checksum for it; but no one will 2708 * compute a new data checksum, so we do that here. The one 2709 * exception is the gang leader: the pipeline already computed 2710 * its data checksum because that stage precedes gang assembly. 2711 * (Presently, nothing actually uses interior data checksums; 2712 * this is just good hygiene.) 2713 */ 2714 if (gn != pio->io_gang_leader->io_gang_tree) { 2715 abd_t *buf = abd_get_offset(data, offset); 2716 2717 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2718 buf, BP_GET_PSIZE(bp)); 2719 2720 abd_free(buf); 2721 } 2722 /* 2723 * If we are here to damage data for testing purposes, 2724 * leave the GBH alone so that we can detect the damage. 2725 */ 2726 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2727 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2728 } else { 2729 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2730 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2731 zio_gang_issue_func_done, NULL, pio->io_priority, 2732 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2733 } 2734 2735 return (zio); 2736 } 2737 2738 static zio_t * 2739 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2740 uint64_t offset) 2741 { 2742 (void) gn, (void) data, (void) offset; 2743 2744 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2745 ZIO_GANG_CHILD_FLAGS(pio)); 2746 if (zio == NULL) { 2747 zio = zio_null(pio, pio->io_spa, 2748 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)); 2749 } 2750 return (zio); 2751 } 2752 2753 static zio_t * 2754 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2755 uint64_t offset) 2756 { 2757 (void) gn, (void) data, (void) offset; 2758 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2759 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2760 } 2761 2762 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2763 NULL, 2764 zio_read_gang, 2765 zio_rewrite_gang, 2766 zio_free_gang, 2767 zio_claim_gang, 2768 NULL 2769 }; 2770 2771 static void zio_gang_tree_assemble_done(zio_t *zio); 2772 2773 static zio_gang_node_t * 2774 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2775 { 2776 zio_gang_node_t *gn; 2777 2778 ASSERT(*gnpp == NULL); 2779 2780 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2781 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2782 *gnpp = gn; 2783 2784 return (gn); 2785 } 2786 2787 static void 2788 zio_gang_node_free(zio_gang_node_t **gnpp) 2789 { 2790 zio_gang_node_t *gn = *gnpp; 2791 2792 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2793 ASSERT(gn->gn_child[g] == NULL); 2794 2795 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2796 kmem_free(gn, sizeof (*gn)); 2797 *gnpp = NULL; 2798 } 2799 2800 static void 2801 zio_gang_tree_free(zio_gang_node_t **gnpp) 2802 { 2803 zio_gang_node_t *gn = *gnpp; 2804 2805 if (gn == NULL) 2806 return; 2807 2808 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2809 zio_gang_tree_free(&gn->gn_child[g]); 2810 2811 zio_gang_node_free(gnpp); 2812 } 2813 2814 static void 2815 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2816 { 2817 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2818 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2819 2820 ASSERT(gio->io_gang_leader == gio); 2821 ASSERT(BP_IS_GANG(bp)); 2822 2823 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2824 zio_gang_tree_assemble_done, gn, gio->io_priority, 2825 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2826 } 2827 2828 static void 2829 zio_gang_tree_assemble_done(zio_t *zio) 2830 { 2831 zio_t *gio = zio->io_gang_leader; 2832 zio_gang_node_t *gn = zio->io_private; 2833 blkptr_t *bp = zio->io_bp; 2834 2835 ASSERT(gio == zio_unique_parent(zio)); 2836 ASSERT(list_is_empty(&zio->io_child_list)); 2837 2838 if (zio->io_error) 2839 return; 2840 2841 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2842 if (BP_SHOULD_BYTESWAP(bp)) 2843 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2844 2845 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2846 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2847 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2848 2849 abd_free(zio->io_abd); 2850 2851 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2852 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2853 if (!BP_IS_GANG(gbp)) 2854 continue; 2855 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2856 } 2857 } 2858 2859 static void 2860 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2861 uint64_t offset) 2862 { 2863 zio_t *gio = pio->io_gang_leader; 2864 zio_t *zio; 2865 2866 ASSERT(BP_IS_GANG(bp) == !!gn); 2867 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2868 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2869 2870 /* 2871 * If you're a gang header, your data is in gn->gn_gbh. 2872 * If you're a gang member, your data is in 'data' and gn == NULL. 2873 */ 2874 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2875 2876 if (gn != NULL) { 2877 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2878 2879 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2880 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2881 if (BP_IS_HOLE(gbp)) 2882 continue; 2883 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2884 offset); 2885 offset += BP_GET_PSIZE(gbp); 2886 } 2887 } 2888 2889 if (gn == gio->io_gang_tree) 2890 ASSERT3U(gio->io_size, ==, offset); 2891 2892 if (zio != pio) 2893 zio_nowait(zio); 2894 } 2895 2896 static zio_t * 2897 zio_gang_assemble(zio_t *zio) 2898 { 2899 blkptr_t *bp = zio->io_bp; 2900 2901 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2902 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2903 2904 zio->io_gang_leader = zio; 2905 2906 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2907 2908 return (zio); 2909 } 2910 2911 static zio_t * 2912 zio_gang_issue(zio_t *zio) 2913 { 2914 blkptr_t *bp = zio->io_bp; 2915 2916 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2917 return (NULL); 2918 } 2919 2920 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2921 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2922 2923 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2924 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2925 0); 2926 else 2927 zio_gang_tree_free(&zio->io_gang_tree); 2928 2929 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2930 2931 return (zio); 2932 } 2933 2934 static void 2935 zio_gang_inherit_allocator(zio_t *pio, zio_t *cio) 2936 { 2937 cio->io_allocator = pio->io_allocator; 2938 } 2939 2940 static void 2941 zio_write_gang_member_ready(zio_t *zio) 2942 { 2943 zio_t *pio = zio_unique_parent(zio); 2944 dva_t *cdva = zio->io_bp->blk_dva; 2945 dva_t *pdva = pio->io_bp->blk_dva; 2946 uint64_t asize; 2947 zio_t *gio __maybe_unused = zio->io_gang_leader; 2948 2949 if (BP_IS_HOLE(zio->io_bp)) 2950 return; 2951 2952 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2953 2954 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2955 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2956 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2957 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2958 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2959 2960 mutex_enter(&pio->io_lock); 2961 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2962 ASSERT(DVA_GET_GANG(&pdva[d])); 2963 asize = DVA_GET_ASIZE(&pdva[d]); 2964 asize += DVA_GET_ASIZE(&cdva[d]); 2965 DVA_SET_ASIZE(&pdva[d], asize); 2966 } 2967 mutex_exit(&pio->io_lock); 2968 } 2969 2970 static void 2971 zio_write_gang_done(zio_t *zio) 2972 { 2973 /* 2974 * The io_abd field will be NULL for a zio with no data. The io_flags 2975 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 2976 * check for it here as it is cleared in zio_ready. 2977 */ 2978 if (zio->io_abd != NULL) 2979 abd_free(zio->io_abd); 2980 } 2981 2982 static zio_t * 2983 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc) 2984 { 2985 spa_t *spa = pio->io_spa; 2986 blkptr_t *bp = pio->io_bp; 2987 zio_t *gio = pio->io_gang_leader; 2988 zio_t *zio; 2989 zio_gang_node_t *gn, **gnpp; 2990 zio_gbh_phys_t *gbh; 2991 abd_t *gbh_abd; 2992 uint64_t txg = pio->io_txg; 2993 uint64_t resid = pio->io_size; 2994 uint64_t lsize; 2995 int copies = gio->io_prop.zp_copies; 2996 zio_prop_t zp; 2997 int error; 2998 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2999 3000 /* 3001 * If one copy was requested, store 2 copies of the GBH, so that we 3002 * can still traverse all the data (e.g. to free or scrub) even if a 3003 * block is damaged. Note that we can't store 3 copies of the GBH in 3004 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt. 3005 */ 3006 int gbh_copies = copies; 3007 if (gbh_copies == 1) { 3008 gbh_copies = MIN(2, spa_max_replication(spa)); 3009 } 3010 3011 ASSERT(ZIO_HAS_ALLOCATOR(pio)); 3012 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 3013 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3014 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3015 ASSERT(has_data); 3016 3017 flags |= METASLAB_ASYNC_ALLOC; 3018 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator]. 3019 mca_alloc_slots, pio)); 3020 3021 /* 3022 * The logical zio has already placed a reservation for 3023 * 'copies' allocation slots but gang blocks may require 3024 * additional copies. These additional copies 3025 * (i.e. gbh_copies - copies) are guaranteed to succeed 3026 * since metaslab_class_throttle_reserve() always allows 3027 * additional reservations for gang blocks. 3028 */ 3029 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 3030 pio->io_allocator, pio, flags)); 3031 } 3032 3033 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 3034 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 3035 &pio->io_alloc_list, pio, pio->io_allocator); 3036 if (error) { 3037 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3038 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3039 ASSERT(has_data); 3040 3041 /* 3042 * If we failed to allocate the gang block header then 3043 * we remove any additional allocation reservations that 3044 * we placed here. The original reservation will 3045 * be removed when the logical I/O goes to the ready 3046 * stage. 3047 */ 3048 metaslab_class_throttle_unreserve(mc, 3049 gbh_copies - copies, pio->io_allocator, pio); 3050 } 3051 3052 pio->io_error = error; 3053 return (pio); 3054 } 3055 3056 if (pio == gio) { 3057 gnpp = &gio->io_gang_tree; 3058 } else { 3059 gnpp = pio->io_private; 3060 ASSERT(pio->io_ready == zio_write_gang_member_ready); 3061 } 3062 3063 gn = zio_gang_node_alloc(gnpp); 3064 gbh = gn->gn_gbh; 3065 memset(gbh, 0, SPA_GANGBLOCKSIZE); 3066 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 3067 3068 /* 3069 * Create the gang header. 3070 */ 3071 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 3072 zio_write_gang_done, NULL, pio->io_priority, 3073 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 3074 3075 zio_gang_inherit_allocator(pio, zio); 3076 3077 /* 3078 * Create and nowait the gang children. 3079 */ 3080 for (int g = 0; resid != 0; resid -= lsize, g++) { 3081 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 3082 SPA_MINBLOCKSIZE); 3083 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 3084 3085 zp.zp_checksum = gio->io_prop.zp_checksum; 3086 zp.zp_compress = ZIO_COMPRESS_OFF; 3087 zp.zp_complevel = gio->io_prop.zp_complevel; 3088 zp.zp_type = zp.zp_storage_type = DMU_OT_NONE; 3089 zp.zp_level = 0; 3090 zp.zp_copies = gio->io_prop.zp_copies; 3091 zp.zp_dedup = B_FALSE; 3092 zp.zp_dedup_verify = B_FALSE; 3093 zp.zp_nopwrite = B_FALSE; 3094 zp.zp_encrypt = gio->io_prop.zp_encrypt; 3095 zp.zp_byteorder = gio->io_prop.zp_byteorder; 3096 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN); 3097 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN); 3098 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN); 3099 3100 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 3101 has_data ? abd_get_offset(pio->io_abd, pio->io_size - 3102 resid) : NULL, lsize, lsize, &zp, 3103 zio_write_gang_member_ready, NULL, 3104 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 3105 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 3106 3107 zio_gang_inherit_allocator(zio, cio); 3108 3109 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3110 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3111 ASSERT(has_data); 3112 3113 /* 3114 * Gang children won't throttle but we should 3115 * account for their work, so reserve an allocation 3116 * slot for them here. 3117 */ 3118 VERIFY(metaslab_class_throttle_reserve(mc, 3119 zp.zp_copies, cio->io_allocator, cio, flags)); 3120 } 3121 zio_nowait(cio); 3122 } 3123 3124 /* 3125 * Set pio's pipeline to just wait for zio to finish. 3126 */ 3127 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3128 3129 zio_nowait(zio); 3130 3131 return (pio); 3132 } 3133 3134 /* 3135 * The zio_nop_write stage in the pipeline determines if allocating a 3136 * new bp is necessary. The nopwrite feature can handle writes in 3137 * either syncing or open context (i.e. zil writes) and as a result is 3138 * mutually exclusive with dedup. 3139 * 3140 * By leveraging a cryptographically secure checksum, such as SHA256, we 3141 * can compare the checksums of the new data and the old to determine if 3142 * allocating a new block is required. Note that our requirements for 3143 * cryptographic strength are fairly weak: there can't be any accidental 3144 * hash collisions, but we don't need to be secure against intentional 3145 * (malicious) collisions. To trigger a nopwrite, you have to be able 3146 * to write the file to begin with, and triggering an incorrect (hash 3147 * collision) nopwrite is no worse than simply writing to the file. 3148 * That said, there are no known attacks against the checksum algorithms 3149 * used for nopwrite, assuming that the salt and the checksums 3150 * themselves remain secret. 3151 */ 3152 static zio_t * 3153 zio_nop_write(zio_t *zio) 3154 { 3155 blkptr_t *bp = zio->io_bp; 3156 blkptr_t *bp_orig = &zio->io_bp_orig; 3157 zio_prop_t *zp = &zio->io_prop; 3158 3159 ASSERT(BP_IS_HOLE(bp)); 3160 ASSERT(BP_GET_LEVEL(bp) == 0); 3161 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 3162 ASSERT(zp->zp_nopwrite); 3163 ASSERT(!zp->zp_dedup); 3164 ASSERT(zio->io_bp_override == NULL); 3165 ASSERT(IO_IS_ALLOCATING(zio)); 3166 3167 /* 3168 * Check to see if the original bp and the new bp have matching 3169 * characteristics (i.e. same checksum, compression algorithms, etc). 3170 * If they don't then just continue with the pipeline which will 3171 * allocate a new bp. 3172 */ 3173 if (BP_IS_HOLE(bp_orig) || 3174 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 3175 ZCHECKSUM_FLAG_NOPWRITE) || 3176 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || 3177 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 3178 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 3179 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 3180 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 3181 return (zio); 3182 3183 /* 3184 * If the checksums match then reset the pipeline so that we 3185 * avoid allocating a new bp and issuing any I/O. 3186 */ 3187 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 3188 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 3189 ZCHECKSUM_FLAG_NOPWRITE); 3190 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 3191 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 3192 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 3193 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop); 3194 3195 /* 3196 * If we're overwriting a block that is currently on an 3197 * indirect vdev, then ignore the nopwrite request and 3198 * allow a new block to be allocated on a concrete vdev. 3199 */ 3200 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER); 3201 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) { 3202 vdev_t *tvd = vdev_lookup_top(zio->io_spa, 3203 DVA_GET_VDEV(&bp_orig->blk_dva[d])); 3204 if (tvd->vdev_ops == &vdev_indirect_ops) { 3205 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3206 return (zio); 3207 } 3208 } 3209 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); 3210 3211 *bp = *bp_orig; 3212 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3213 zio->io_flags |= ZIO_FLAG_NOPWRITE; 3214 } 3215 3216 return (zio); 3217 } 3218 3219 /* 3220 * ========================================================================== 3221 * Block Reference Table 3222 * ========================================================================== 3223 */ 3224 static zio_t * 3225 zio_brt_free(zio_t *zio) 3226 { 3227 blkptr_t *bp; 3228 3229 bp = zio->io_bp; 3230 3231 if (BP_GET_LEVEL(bp) > 0 || 3232 BP_IS_METADATA(bp) || 3233 !brt_maybe_exists(zio->io_spa, bp)) { 3234 return (zio); 3235 } 3236 3237 if (!brt_entry_decref(zio->io_spa, bp)) { 3238 /* 3239 * This isn't the last reference, so we cannot free 3240 * the data yet. 3241 */ 3242 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3243 } 3244 3245 return (zio); 3246 } 3247 3248 /* 3249 * ========================================================================== 3250 * Dedup 3251 * ========================================================================== 3252 */ 3253 static void 3254 zio_ddt_child_read_done(zio_t *zio) 3255 { 3256 blkptr_t *bp = zio->io_bp; 3257 ddt_entry_t *dde = zio->io_private; 3258 ddt_phys_t *ddp; 3259 zio_t *pio = zio_unique_parent(zio); 3260 3261 mutex_enter(&pio->io_lock); 3262 ddp = ddt_phys_select(dde, bp); 3263 if (zio->io_error == 0) 3264 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 3265 3266 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 3267 dde->dde_repair_abd = zio->io_abd; 3268 else 3269 abd_free(zio->io_abd); 3270 mutex_exit(&pio->io_lock); 3271 } 3272 3273 static zio_t * 3274 zio_ddt_read_start(zio_t *zio) 3275 { 3276 blkptr_t *bp = zio->io_bp; 3277 3278 ASSERT(BP_GET_DEDUP(bp)); 3279 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3280 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3281 3282 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3283 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3284 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 3285 ddt_phys_t *ddp = dde->dde_phys; 3286 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 3287 blkptr_t blk; 3288 3289 ASSERT(zio->io_vsd == NULL); 3290 zio->io_vsd = dde; 3291 3292 if (ddp_self == NULL) 3293 return (zio); 3294 3295 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 3296 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 3297 continue; 3298 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 3299 &blk); 3300 zio_nowait(zio_read(zio, zio->io_spa, &blk, 3301 abd_alloc_for_io(zio->io_size, B_TRUE), 3302 zio->io_size, zio_ddt_child_read_done, dde, 3303 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 3304 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 3305 } 3306 return (zio); 3307 } 3308 3309 zio_nowait(zio_read(zio, zio->io_spa, bp, 3310 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 3311 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 3312 3313 return (zio); 3314 } 3315 3316 static zio_t * 3317 zio_ddt_read_done(zio_t *zio) 3318 { 3319 blkptr_t *bp = zio->io_bp; 3320 3321 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 3322 return (NULL); 3323 } 3324 3325 ASSERT(BP_GET_DEDUP(bp)); 3326 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 3327 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3328 3329 if (zio->io_child_error[ZIO_CHILD_DDT]) { 3330 ddt_t *ddt = ddt_select(zio->io_spa, bp); 3331 ddt_entry_t *dde = zio->io_vsd; 3332 if (ddt == NULL) { 3333 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 3334 return (zio); 3335 } 3336 if (dde == NULL) { 3337 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 3338 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 3339 return (NULL); 3340 } 3341 if (dde->dde_repair_abd != NULL) { 3342 abd_copy(zio->io_abd, dde->dde_repair_abd, 3343 zio->io_size); 3344 zio->io_child_error[ZIO_CHILD_DDT] = 0; 3345 } 3346 ddt_repair_done(ddt, dde); 3347 zio->io_vsd = NULL; 3348 } 3349 3350 ASSERT(zio->io_vsd == NULL); 3351 3352 return (zio); 3353 } 3354 3355 static boolean_t 3356 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 3357 { 3358 spa_t *spa = zio->io_spa; 3359 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); 3360 3361 ASSERT(!(zio->io_bp_override && do_raw)); 3362 3363 /* 3364 * Note: we compare the original data, not the transformed data, 3365 * because when zio->io_bp is an override bp, we will not have 3366 * pushed the I/O transforms. That's an important optimization 3367 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 3368 * However, we should never get a raw, override zio so in these 3369 * cases we can compare the io_abd directly. This is useful because 3370 * it allows us to do dedup verification even if we don't have access 3371 * to the original data (for instance, if the encryption keys aren't 3372 * loaded). 3373 */ 3374 3375 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 3376 zio_t *lio = dde->dde_lead_zio[p]; 3377 3378 if (lio != NULL && do_raw) { 3379 return (lio->io_size != zio->io_size || 3380 abd_cmp(zio->io_abd, lio->io_abd) != 0); 3381 } else if (lio != NULL) { 3382 return (lio->io_orig_size != zio->io_orig_size || 3383 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0); 3384 } 3385 } 3386 3387 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 3388 ddt_phys_t *ddp = &dde->dde_phys[p]; 3389 3390 if (ddp->ddp_phys_birth != 0 && do_raw) { 3391 blkptr_t blk = *zio->io_bp; 3392 uint64_t psize; 3393 abd_t *tmpabd; 3394 int error; 3395 3396 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 3397 psize = BP_GET_PSIZE(&blk); 3398 3399 if (psize != zio->io_size) 3400 return (B_TRUE); 3401 3402 ddt_exit(ddt); 3403 3404 tmpabd = abd_alloc_for_io(psize, B_TRUE); 3405 3406 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, 3407 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, 3408 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 3409 ZIO_FLAG_RAW, &zio->io_bookmark)); 3410 3411 if (error == 0) { 3412 if (abd_cmp(tmpabd, zio->io_abd) != 0) 3413 error = SET_ERROR(ENOENT); 3414 } 3415 3416 abd_free(tmpabd); 3417 ddt_enter(ddt); 3418 return (error != 0); 3419 } else if (ddp->ddp_phys_birth != 0) { 3420 arc_buf_t *abuf = NULL; 3421 arc_flags_t aflags = ARC_FLAG_WAIT; 3422 blkptr_t blk = *zio->io_bp; 3423 int error; 3424 3425 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 3426 3427 if (BP_GET_LSIZE(&blk) != zio->io_orig_size) 3428 return (B_TRUE); 3429 3430 ddt_exit(ddt); 3431 3432 error = arc_read(NULL, spa, &blk, 3433 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 3434 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3435 &aflags, &zio->io_bookmark); 3436 3437 if (error == 0) { 3438 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 3439 zio->io_orig_size) != 0) 3440 error = SET_ERROR(ENOENT); 3441 arc_buf_destroy(abuf, &abuf); 3442 } 3443 3444 ddt_enter(ddt); 3445 return (error != 0); 3446 } 3447 } 3448 3449 return (B_FALSE); 3450 } 3451 3452 static void 3453 zio_ddt_child_write_ready(zio_t *zio) 3454 { 3455 int p = zio->io_prop.zp_copies; 3456 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3457 ddt_entry_t *dde = zio->io_private; 3458 ddt_phys_t *ddp = &dde->dde_phys[p]; 3459 zio_t *pio; 3460 3461 if (zio->io_error) 3462 return; 3463 3464 ddt_enter(ddt); 3465 3466 ASSERT(dde->dde_lead_zio[p] == zio); 3467 3468 ddt_phys_fill(ddp, zio->io_bp); 3469 3470 zio_link_t *zl = NULL; 3471 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 3472 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 3473 3474 ddt_exit(ddt); 3475 } 3476 3477 static void 3478 zio_ddt_child_write_done(zio_t *zio) 3479 { 3480 int p = zio->io_prop.zp_copies; 3481 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 3482 ddt_entry_t *dde = zio->io_private; 3483 ddt_phys_t *ddp = &dde->dde_phys[p]; 3484 3485 ddt_enter(ddt); 3486 3487 ASSERT(ddp->ddp_refcnt == 0); 3488 ASSERT(dde->dde_lead_zio[p] == zio); 3489 dde->dde_lead_zio[p] = NULL; 3490 3491 if (zio->io_error == 0) { 3492 zio_link_t *zl = NULL; 3493 while (zio_walk_parents(zio, &zl) != NULL) 3494 ddt_phys_addref(ddp); 3495 } else { 3496 ddt_phys_clear(ddp); 3497 } 3498 3499 ddt_exit(ddt); 3500 } 3501 3502 static zio_t * 3503 zio_ddt_write(zio_t *zio) 3504 { 3505 spa_t *spa = zio->io_spa; 3506 blkptr_t *bp = zio->io_bp; 3507 uint64_t txg = zio->io_txg; 3508 zio_prop_t *zp = &zio->io_prop; 3509 int p = zp->zp_copies; 3510 zio_t *cio = NULL; 3511 ddt_t *ddt = ddt_select(spa, bp); 3512 ddt_entry_t *dde; 3513 ddt_phys_t *ddp; 3514 3515 ASSERT(BP_GET_DEDUP(bp)); 3516 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 3517 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 3518 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 3519 3520 ddt_enter(ddt); 3521 dde = ddt_lookup(ddt, bp, B_TRUE); 3522 if (dde == NULL) { 3523 /* DDT size is over its quota so no new entries */ 3524 zp->zp_dedup = B_FALSE; 3525 BP_SET_DEDUP(bp, B_FALSE); 3526 if (zio->io_bp_override == NULL) 3527 zio->io_pipeline = ZIO_WRITE_PIPELINE; 3528 ddt_exit(ddt); 3529 return (zio); 3530 } 3531 ddp = &dde->dde_phys[p]; 3532 3533 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 3534 /* 3535 * If we're using a weak checksum, upgrade to a strong checksum 3536 * and try again. If we're already using a strong checksum, 3537 * we can't resolve it, so just convert to an ordinary write. 3538 * (And automatically e-mail a paper to Nature?) 3539 */ 3540 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 3541 ZCHECKSUM_FLAG_DEDUP)) { 3542 zp->zp_checksum = spa_dedup_checksum(spa); 3543 zio_pop_transforms(zio); 3544 zio->io_stage = ZIO_STAGE_OPEN; 3545 BP_ZERO(bp); 3546 } else { 3547 zp->zp_dedup = B_FALSE; 3548 BP_SET_DEDUP(bp, B_FALSE); 3549 } 3550 ASSERT(!BP_GET_DEDUP(bp)); 3551 zio->io_pipeline = ZIO_WRITE_PIPELINE; 3552 ddt_exit(ddt); 3553 return (zio); 3554 } 3555 3556 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 3557 if (ddp->ddp_phys_birth != 0) 3558 ddt_bp_fill(ddp, bp, txg); 3559 if (dde->dde_lead_zio[p] != NULL) 3560 zio_add_child(zio, dde->dde_lead_zio[p]); 3561 else 3562 ddt_phys_addref(ddp); 3563 } else if (zio->io_bp_override) { 3564 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg); 3565 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 3566 ddt_phys_fill(ddp, bp); 3567 ddt_phys_addref(ddp); 3568 } else { 3569 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 3570 zio->io_orig_size, zio->io_orig_size, zp, 3571 zio_ddt_child_write_ready, NULL, 3572 zio_ddt_child_write_done, dde, zio->io_priority, 3573 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 3574 3575 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 3576 dde->dde_lead_zio[p] = cio; 3577 } 3578 3579 ddt_exit(ddt); 3580 3581 zio_nowait(cio); 3582 3583 return (zio); 3584 } 3585 3586 static ddt_entry_t *freedde; /* for debugging */ 3587 3588 static zio_t * 3589 zio_ddt_free(zio_t *zio) 3590 { 3591 spa_t *spa = zio->io_spa; 3592 blkptr_t *bp = zio->io_bp; 3593 ddt_t *ddt = ddt_select(spa, bp); 3594 ddt_entry_t *dde; 3595 ddt_phys_t *ddp; 3596 3597 ASSERT(BP_GET_DEDUP(bp)); 3598 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3599 3600 ddt_enter(ddt); 3601 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 3602 if (dde) { 3603 ddp = ddt_phys_select(dde, bp); 3604 if (ddp) 3605 ddt_phys_decref(ddp); 3606 } 3607 ddt_exit(ddt); 3608 3609 return (zio); 3610 } 3611 3612 /* 3613 * ========================================================================== 3614 * Allocate and free blocks 3615 * ========================================================================== 3616 */ 3617 3618 static zio_t * 3619 zio_io_to_allocate(spa_t *spa, int allocator) 3620 { 3621 zio_t *zio; 3622 3623 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock)); 3624 3625 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree); 3626 if (zio == NULL) 3627 return (NULL); 3628 3629 ASSERT(IO_IS_ALLOCATING(zio)); 3630 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 3631 3632 /* 3633 * Try to place a reservation for this zio. If we're unable to 3634 * reserve then we throttle. 3635 */ 3636 ASSERT3U(zio->io_allocator, ==, allocator); 3637 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 3638 zio->io_prop.zp_copies, allocator, zio, 0)) { 3639 return (NULL); 3640 } 3641 3642 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio); 3643 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 3644 3645 return (zio); 3646 } 3647 3648 static zio_t * 3649 zio_dva_throttle(zio_t *zio) 3650 { 3651 spa_t *spa = zio->io_spa; 3652 zio_t *nio; 3653 metaslab_class_t *mc; 3654 3655 /* locate an appropriate allocation class */ 3656 mc = spa_preferred_class(spa, zio); 3657 3658 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 3659 !mc->mc_alloc_throttle_enabled || 3660 zio->io_child_type == ZIO_CHILD_GANG || 3661 zio->io_flags & ZIO_FLAG_NODATA) { 3662 return (zio); 3663 } 3664 3665 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3666 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 3667 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3668 ASSERT3U(zio->io_queued_timestamp, >, 0); 3669 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 3670 3671 int allocator = zio->io_allocator; 3672 zio->io_metaslab_class = mc; 3673 mutex_enter(&spa->spa_allocs[allocator].spaa_lock); 3674 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio); 3675 nio = zio_io_to_allocate(spa, allocator); 3676 mutex_exit(&spa->spa_allocs[allocator].spaa_lock); 3677 return (nio); 3678 } 3679 3680 static void 3681 zio_allocate_dispatch(spa_t *spa, int allocator) 3682 { 3683 zio_t *zio; 3684 3685 mutex_enter(&spa->spa_allocs[allocator].spaa_lock); 3686 zio = zio_io_to_allocate(spa, allocator); 3687 mutex_exit(&spa->spa_allocs[allocator].spaa_lock); 3688 if (zio == NULL) 3689 return; 3690 3691 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 3692 ASSERT0(zio->io_error); 3693 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 3694 } 3695 3696 static zio_t * 3697 zio_dva_allocate(zio_t *zio) 3698 { 3699 spa_t *spa = zio->io_spa; 3700 metaslab_class_t *mc; 3701 blkptr_t *bp = zio->io_bp; 3702 int error; 3703 int flags = 0; 3704 3705 if (zio->io_gang_leader == NULL) { 3706 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3707 zio->io_gang_leader = zio; 3708 } 3709 3710 ASSERT(BP_IS_HOLE(bp)); 3711 ASSERT0(BP_GET_NDVAS(bp)); 3712 ASSERT3U(zio->io_prop.zp_copies, >, 0); 3713 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 3714 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 3715 3716 if (zio->io_flags & ZIO_FLAG_NODATA) 3717 flags |= METASLAB_DONT_THROTTLE; 3718 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 3719 flags |= METASLAB_GANG_CHILD; 3720 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 3721 flags |= METASLAB_ASYNC_ALLOC; 3722 3723 /* 3724 * if not already chosen, locate an appropriate allocation class 3725 */ 3726 mc = zio->io_metaslab_class; 3727 if (mc == NULL) { 3728 mc = spa_preferred_class(spa, zio); 3729 zio->io_metaslab_class = mc; 3730 } 3731 3732 /* 3733 * Try allocating the block in the usual metaslab class. 3734 * If that's full, allocate it in the normal class. 3735 * If that's full, allocate as a gang block, 3736 * and if all are full, the allocation fails (which shouldn't happen). 3737 * 3738 * Note that we do not fall back on embedded slog (ZIL) space, to 3739 * preserve unfragmented slog space, which is critical for decent 3740 * sync write performance. If a log allocation fails, we will fall 3741 * back to spa_sync() which is abysmal for performance. 3742 */ 3743 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 3744 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3745 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3746 &zio->io_alloc_list, zio, zio->io_allocator); 3747 3748 /* 3749 * Fallback to normal class when an alloc class is full 3750 */ 3751 if (error == ENOSPC && mc != spa_normal_class(spa)) { 3752 /* 3753 * When the dedup or special class is spilling into the normal 3754 * class, there can still be significant space available due 3755 * to deferred frees that are in-flight. We track the txg when 3756 * this occurred and back off adding new DDT entries for a few 3757 * txgs to allow the free blocks to be processed. 3758 */ 3759 if ((mc == spa_dedup_class(spa) || (spa_special_has_ddt(spa) && 3760 mc == spa_special_class(spa))) && 3761 spa->spa_dedup_class_full_txg != zio->io_txg) { 3762 spa->spa_dedup_class_full_txg = zio->io_txg; 3763 zfs_dbgmsg("%s[%d]: %s class spilling, req size %d, " 3764 "%llu allocated of %llu", 3765 spa_name(spa), (int)zio->io_txg, 3766 mc == spa_dedup_class(spa) ? "dedup" : "special", 3767 (int)zio->io_size, 3768 (u_longlong_t)metaslab_class_get_alloc(mc), 3769 (u_longlong_t)metaslab_class_get_space(mc)); 3770 } 3771 3772 /* 3773 * If throttling, transfer reservation over to normal class. 3774 * The io_allocator slot can remain the same even though we 3775 * are switching classes. 3776 */ 3777 if (mc->mc_alloc_throttle_enabled && 3778 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 3779 metaslab_class_throttle_unreserve(mc, 3780 zio->io_prop.zp_copies, zio->io_allocator, zio); 3781 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 3782 3783 VERIFY(metaslab_class_throttle_reserve( 3784 spa_normal_class(spa), 3785 zio->io_prop.zp_copies, zio->io_allocator, zio, 3786 flags | METASLAB_MUST_RESERVE)); 3787 } 3788 zio->io_metaslab_class = mc = spa_normal_class(spa); 3789 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 3790 zfs_dbgmsg("%s: metaslab allocation failure, " 3791 "trying normal class: zio %px, size %llu, error %d", 3792 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3793 error); 3794 } 3795 3796 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3797 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3798 &zio->io_alloc_list, zio, zio->io_allocator); 3799 } 3800 3801 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) { 3802 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { 3803 zfs_dbgmsg("%s: metaslab allocation failure, " 3804 "trying ganging: zio %px, size %llu, error %d", 3805 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3806 error); 3807 } 3808 return (zio_write_gang_block(zio, mc)); 3809 } 3810 if (error != 0) { 3811 if (error != ENOSPC || 3812 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) { 3813 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, " 3814 "size %llu, error %d", 3815 spa_name(spa), zio, (u_longlong_t)zio->io_size, 3816 error); 3817 } 3818 zio->io_error = error; 3819 } 3820 3821 return (zio); 3822 } 3823 3824 static zio_t * 3825 zio_dva_free(zio_t *zio) 3826 { 3827 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3828 3829 return (zio); 3830 } 3831 3832 static zio_t * 3833 zio_dva_claim(zio_t *zio) 3834 { 3835 int error; 3836 3837 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3838 if (error) 3839 zio->io_error = error; 3840 3841 return (zio); 3842 } 3843 3844 /* 3845 * Undo an allocation. This is used by zio_done() when an I/O fails 3846 * and we want to give back the block we just allocated. 3847 * This handles both normal blocks and gang blocks. 3848 */ 3849 static void 3850 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3851 { 3852 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp)); 3853 ASSERT(zio->io_bp_override == NULL); 3854 3855 if (!BP_IS_HOLE(bp)) { 3856 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp), 3857 B_TRUE); 3858 } 3859 3860 if (gn != NULL) { 3861 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3862 zio_dva_unallocate(zio, gn->gn_child[g], 3863 &gn->gn_gbh->zg_blkptr[g]); 3864 } 3865 } 3866 } 3867 3868 /* 3869 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3870 */ 3871 int 3872 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, 3873 uint64_t size, boolean_t *slog) 3874 { 3875 int error = 1; 3876 zio_alloc_list_t io_alloc_list; 3877 3878 ASSERT(txg > spa_syncing_txg(spa)); 3879 3880 metaslab_trace_init(&io_alloc_list); 3881 3882 /* 3883 * Block pointer fields are useful to metaslabs for stats and debugging. 3884 * Fill in the obvious ones before calling into metaslab_alloc(). 3885 */ 3886 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3887 BP_SET_PSIZE(new_bp, size); 3888 BP_SET_LEVEL(new_bp, 0); 3889 3890 /* 3891 * When allocating a zil block, we don't have information about 3892 * the final destination of the block except the objset it's part 3893 * of, so we just hash the objset ID to pick the allocator to get 3894 * some parallelism. 3895 */ 3896 int flags = METASLAB_ZIL; 3897 int allocator = (uint_t)cityhash4(0, 0, 0, 3898 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count; 3899 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3900 txg, NULL, flags, &io_alloc_list, NULL, allocator); 3901 *slog = (error == 0); 3902 if (error != 0) { 3903 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size, 3904 new_bp, 1, txg, NULL, flags, 3905 &io_alloc_list, NULL, allocator); 3906 } 3907 if (error != 0) { 3908 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3909 new_bp, 1, txg, NULL, flags, 3910 &io_alloc_list, NULL, allocator); 3911 } 3912 metaslab_trace_fini(&io_alloc_list); 3913 3914 if (error == 0) { 3915 BP_SET_LSIZE(new_bp, size); 3916 BP_SET_PSIZE(new_bp, size); 3917 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3918 BP_SET_CHECKSUM(new_bp, 3919 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3920 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3921 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3922 BP_SET_LEVEL(new_bp, 0); 3923 BP_SET_DEDUP(new_bp, 0); 3924 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3925 3926 /* 3927 * encrypted blocks will require an IV and salt. We generate 3928 * these now since we will not be rewriting the bp at 3929 * rewrite time. 3930 */ 3931 if (os->os_encrypted) { 3932 uint8_t iv[ZIO_DATA_IV_LEN]; 3933 uint8_t salt[ZIO_DATA_SALT_LEN]; 3934 3935 BP_SET_CRYPT(new_bp, B_TRUE); 3936 VERIFY0(spa_crypt_get_salt(spa, 3937 dmu_objset_id(os), salt)); 3938 VERIFY0(zio_crypt_generate_iv(iv)); 3939 3940 zio_crypt_encode_params_bp(new_bp, salt, iv); 3941 } 3942 } else { 3943 zfs_dbgmsg("%s: zil block allocation failure: " 3944 "size %llu, error %d", spa_name(spa), (u_longlong_t)size, 3945 error); 3946 } 3947 3948 return (error); 3949 } 3950 3951 /* 3952 * ========================================================================== 3953 * Read and write to physical devices 3954 * ========================================================================== 3955 */ 3956 3957 /* 3958 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3959 * stops after this stage and will resume upon I/O completion. 3960 * However, there are instances where the vdev layer may need to 3961 * continue the pipeline when an I/O was not issued. Since the I/O 3962 * that was sent to the vdev layer might be different than the one 3963 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3964 * force the underlying vdev layers to call either zio_execute() or 3965 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3966 */ 3967 static zio_t * 3968 zio_vdev_io_start(zio_t *zio) 3969 { 3970 vdev_t *vd = zio->io_vd; 3971 uint64_t align; 3972 spa_t *spa = zio->io_spa; 3973 3974 zio->io_delay = 0; 3975 3976 ASSERT(zio->io_error == 0); 3977 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3978 3979 if (vd == NULL) { 3980 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3981 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3982 3983 /* 3984 * The mirror_ops handle multiple DVAs in a single BP. 3985 */ 3986 vdev_mirror_ops.vdev_op_io_start(zio); 3987 return (NULL); 3988 } 3989 3990 ASSERT3P(zio->io_logical, !=, zio); 3991 if (zio->io_type == ZIO_TYPE_WRITE) { 3992 ASSERT(spa->spa_trust_config); 3993 3994 /* 3995 * Note: the code can handle other kinds of writes, 3996 * but we don't expect them. 3997 */ 3998 if (zio->io_vd->vdev_noalloc) { 3999 ASSERT(zio->io_flags & 4000 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 4001 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 4002 } 4003 } 4004 4005 align = 1ULL << vd->vdev_top->vdev_ashift; 4006 4007 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 4008 P2PHASE(zio->io_size, align) != 0) { 4009 /* Transform logical writes to be a full physical block size. */ 4010 uint64_t asize = P2ROUNDUP(zio->io_size, align); 4011 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 4012 ASSERT(vd == vd->vdev_top); 4013 if (zio->io_type == ZIO_TYPE_WRITE) { 4014 abd_copy(abuf, zio->io_abd, zio->io_size); 4015 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 4016 } 4017 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 4018 } 4019 4020 /* 4021 * If this is not a physical io, make sure that it is properly aligned 4022 * before proceeding. 4023 */ 4024 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 4025 ASSERT0(P2PHASE(zio->io_offset, align)); 4026 ASSERT0(P2PHASE(zio->io_size, align)); 4027 } else { 4028 /* 4029 * For physical writes, we allow 512b aligned writes and assume 4030 * the device will perform a read-modify-write as necessary. 4031 */ 4032 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 4033 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 4034 } 4035 4036 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 4037 4038 /* 4039 * If this is a repair I/O, and there's no self-healing involved -- 4040 * that is, we're just resilvering what we expect to resilver -- 4041 * then don't do the I/O unless zio's txg is actually in vd's DTL. 4042 * This prevents spurious resilvering. 4043 * 4044 * There are a few ways that we can end up creating these spurious 4045 * resilver i/os: 4046 * 4047 * 1. A resilver i/o will be issued if any DVA in the BP has a 4048 * dirty DTL. The mirror code will issue resilver writes to 4049 * each DVA, including the one(s) that are not on vdevs with dirty 4050 * DTLs. 4051 * 4052 * 2. With nested replication, which happens when we have a 4053 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 4054 * For example, given mirror(replacing(A+B), C), it's likely that 4055 * only A is out of date (it's the new device). In this case, we'll 4056 * read from C, then use the data to resilver A+B -- but we don't 4057 * actually want to resilver B, just A. The top-level mirror has no 4058 * way to know this, so instead we just discard unnecessary repairs 4059 * as we work our way down the vdev tree. 4060 * 4061 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 4062 * The same logic applies to any form of nested replication: ditto 4063 * + mirror, RAID-Z + replacing, etc. 4064 * 4065 * However, indirect vdevs point off to other vdevs which may have 4066 * DTL's, so we never bypass them. The child i/os on concrete vdevs 4067 * will be properly bypassed instead. 4068 * 4069 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from 4070 * a dRAID spare vdev. For example, when a dRAID spare is first 4071 * used, its spare blocks need to be written to but the leaf vdev's 4072 * of such blocks can have empty DTL_PARTIAL. 4073 * 4074 * There seemed no clean way to allow such writes while bypassing 4075 * spurious ones. At this point, just avoid all bypassing for dRAID 4076 * for correctness. 4077 */ 4078 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 4079 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 4080 zio->io_txg != 0 && /* not a delegated i/o */ 4081 vd->vdev_ops != &vdev_indirect_ops && 4082 vd->vdev_top->vdev_ops != &vdev_draid_ops && 4083 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 4084 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4085 zio_vdev_io_bypass(zio); 4086 return (zio); 4087 } 4088 4089 /* 4090 * Select the next best leaf I/O to process. Distributed spares are 4091 * excluded since they dispatch the I/O directly to a leaf vdev after 4092 * applying the dRAID mapping. 4093 */ 4094 if (vd->vdev_ops->vdev_op_leaf && 4095 vd->vdev_ops != &vdev_draid_spare_ops && 4096 (zio->io_type == ZIO_TYPE_READ || 4097 zio->io_type == ZIO_TYPE_WRITE || 4098 zio->io_type == ZIO_TYPE_TRIM)) { 4099 4100 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) { 4101 /* 4102 * "no-op" injections return success, but do no actual 4103 * work. Just skip the remaining vdev stages. 4104 */ 4105 zio_vdev_io_bypass(zio); 4106 zio_interrupt(zio); 4107 return (NULL); 4108 } 4109 4110 if ((zio = vdev_queue_io(zio)) == NULL) 4111 return (NULL); 4112 4113 if (!vdev_accessible(vd, zio)) { 4114 zio->io_error = SET_ERROR(ENXIO); 4115 zio_interrupt(zio); 4116 return (NULL); 4117 } 4118 zio->io_delay = gethrtime(); 4119 } 4120 4121 vd->vdev_ops->vdev_op_io_start(zio); 4122 return (NULL); 4123 } 4124 4125 static zio_t * 4126 zio_vdev_io_done(zio_t *zio) 4127 { 4128 vdev_t *vd = zio->io_vd; 4129 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 4130 boolean_t unexpected_error = B_FALSE; 4131 4132 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 4133 return (NULL); 4134 } 4135 4136 ASSERT(zio->io_type == ZIO_TYPE_READ || 4137 zio->io_type == ZIO_TYPE_WRITE || 4138 zio->io_type == ZIO_TYPE_FLUSH || 4139 zio->io_type == ZIO_TYPE_TRIM); 4140 4141 if (zio->io_delay) 4142 zio->io_delay = gethrtime() - zio->io_delay; 4143 4144 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 4145 vd->vdev_ops != &vdev_draid_spare_ops) { 4146 if (zio->io_type != ZIO_TYPE_FLUSH) 4147 vdev_queue_io_done(zio); 4148 4149 if (zio_injection_enabled && zio->io_error == 0) 4150 zio->io_error = zio_handle_device_injections(vd, zio, 4151 EIO, EILSEQ); 4152 4153 if (zio_injection_enabled && zio->io_error == 0) 4154 zio->io_error = zio_handle_label_injection(zio, EIO); 4155 4156 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH && 4157 zio->io_type != ZIO_TYPE_TRIM) { 4158 if (!vdev_accessible(vd, zio)) { 4159 zio->io_error = SET_ERROR(ENXIO); 4160 } else { 4161 unexpected_error = B_TRUE; 4162 } 4163 } 4164 } 4165 4166 ops->vdev_op_io_done(zio); 4167 4168 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE) 4169 VERIFY(vdev_probe(vd, zio) == NULL); 4170 4171 return (zio); 4172 } 4173 4174 /* 4175 * This function is used to change the priority of an existing zio that is 4176 * currently in-flight. This is used by the arc to upgrade priority in the 4177 * event that a demand read is made for a block that is currently queued 4178 * as a scrub or async read IO. Otherwise, the high priority read request 4179 * would end up having to wait for the lower priority IO. 4180 */ 4181 void 4182 zio_change_priority(zio_t *pio, zio_priority_t priority) 4183 { 4184 zio_t *cio, *cio_next; 4185 zio_link_t *zl = NULL; 4186 4187 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 4188 4189 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 4190 vdev_queue_change_io_priority(pio, priority); 4191 } else { 4192 pio->io_priority = priority; 4193 } 4194 4195 mutex_enter(&pio->io_lock); 4196 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 4197 cio_next = zio_walk_children(pio, &zl); 4198 zio_change_priority(cio, priority); 4199 } 4200 mutex_exit(&pio->io_lock); 4201 } 4202 4203 /* 4204 * For non-raidz ZIOs, we can just copy aside the bad data read from the 4205 * disk, and use that to finish the checksum ereport later. 4206 */ 4207 static void 4208 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 4209 const abd_t *good_buf) 4210 { 4211 /* no processing needed */ 4212 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 4213 } 4214 4215 void 4216 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr) 4217 { 4218 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); 4219 4220 abd_copy(abd, zio->io_abd, zio->io_size); 4221 4222 zcr->zcr_cbinfo = zio->io_size; 4223 zcr->zcr_cbdata = abd; 4224 zcr->zcr_finish = zio_vsd_default_cksum_finish; 4225 zcr->zcr_free = zio_abd_free; 4226 } 4227 4228 static zio_t * 4229 zio_vdev_io_assess(zio_t *zio) 4230 { 4231 vdev_t *vd = zio->io_vd; 4232 4233 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 4234 return (NULL); 4235 } 4236 4237 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 4238 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 4239 4240 if (zio->io_vsd != NULL) { 4241 zio->io_vsd_ops->vsd_free(zio); 4242 zio->io_vsd = NULL; 4243 } 4244 4245 if (zio_injection_enabled && zio->io_error == 0) 4246 zio->io_error = zio_handle_fault_injection(zio, EIO); 4247 4248 /* 4249 * If the I/O failed, determine whether we should attempt to retry it. 4250 * 4251 * On retry, we cut in line in the issue queue, since we don't want 4252 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 4253 */ 4254 if (zio->io_error && vd == NULL && 4255 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 4256 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 4257 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 4258 zio->io_error = 0; 4259 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE; 4260 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 4261 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 4262 zio_requeue_io_start_cut_in_line); 4263 return (NULL); 4264 } 4265 4266 /* 4267 * If we got an error on a leaf device, convert it to ENXIO 4268 * if the device is not accessible at all. 4269 */ 4270 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 4271 !vdev_accessible(vd, zio)) 4272 zio->io_error = SET_ERROR(ENXIO); 4273 4274 /* 4275 * If we can't write to an interior vdev (mirror or RAID-Z), 4276 * set vdev_cant_write so that we stop trying to allocate from it. 4277 */ 4278 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 4279 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 4280 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting " 4281 "cant_write=TRUE due to write failure with ENXIO", 4282 zio); 4283 vd->vdev_cant_write = B_TRUE; 4284 } 4285 4286 /* 4287 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 4288 * attempts will ever succeed. In this case we set a persistent 4289 * boolean flag so that we don't bother with it in the future. 4290 */ 4291 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 4292 zio->io_type == ZIO_TYPE_FLUSH && vd != NULL) 4293 vd->vdev_nowritecache = B_TRUE; 4294 4295 if (zio->io_error) 4296 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4297 4298 return (zio); 4299 } 4300 4301 void 4302 zio_vdev_io_reissue(zio_t *zio) 4303 { 4304 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4305 ASSERT(zio->io_error == 0); 4306 4307 zio->io_stage >>= 1; 4308 } 4309 4310 void 4311 zio_vdev_io_redone(zio_t *zio) 4312 { 4313 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 4314 4315 zio->io_stage >>= 1; 4316 } 4317 4318 void 4319 zio_vdev_io_bypass(zio_t *zio) 4320 { 4321 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 4322 ASSERT(zio->io_error == 0); 4323 4324 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 4325 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 4326 } 4327 4328 /* 4329 * ========================================================================== 4330 * Encrypt and store encryption parameters 4331 * ========================================================================== 4332 */ 4333 4334 4335 /* 4336 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for 4337 * managing the storage of encryption parameters and passing them to the 4338 * lower-level encryption functions. 4339 */ 4340 static zio_t * 4341 zio_encrypt(zio_t *zio) 4342 { 4343 zio_prop_t *zp = &zio->io_prop; 4344 spa_t *spa = zio->io_spa; 4345 blkptr_t *bp = zio->io_bp; 4346 uint64_t psize = BP_GET_PSIZE(bp); 4347 uint64_t dsobj = zio->io_bookmark.zb_objset; 4348 dmu_object_type_t ot = BP_GET_TYPE(bp); 4349 void *enc_buf = NULL; 4350 abd_t *eabd = NULL; 4351 uint8_t salt[ZIO_DATA_SALT_LEN]; 4352 uint8_t iv[ZIO_DATA_IV_LEN]; 4353 uint8_t mac[ZIO_DATA_MAC_LEN]; 4354 boolean_t no_crypt = B_FALSE; 4355 4356 /* the root zio already encrypted the data */ 4357 if (zio->io_child_type == ZIO_CHILD_GANG) 4358 return (zio); 4359 4360 /* only ZIL blocks are re-encrypted on rewrite */ 4361 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) 4362 return (zio); 4363 4364 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { 4365 BP_SET_CRYPT(bp, B_FALSE); 4366 return (zio); 4367 } 4368 4369 /* if we are doing raw encryption set the provided encryption params */ 4370 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { 4371 ASSERT0(BP_GET_LEVEL(bp)); 4372 BP_SET_CRYPT(bp, B_TRUE); 4373 BP_SET_BYTEORDER(bp, zp->zp_byteorder); 4374 if (ot != DMU_OT_OBJSET) 4375 zio_crypt_encode_mac_bp(bp, zp->zp_mac); 4376 4377 /* dnode blocks must be written out in the provided byteorder */ 4378 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && 4379 ot == DMU_OT_DNODE) { 4380 void *bswap_buf = zio_buf_alloc(psize); 4381 abd_t *babd = abd_get_from_buf(bswap_buf, psize); 4382 4383 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4384 abd_copy_to_buf(bswap_buf, zio->io_abd, psize); 4385 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, 4386 psize); 4387 4388 abd_take_ownership_of_buf(babd, B_TRUE); 4389 zio_push_transform(zio, babd, psize, psize, NULL); 4390 } 4391 4392 if (DMU_OT_IS_ENCRYPTED(ot)) 4393 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); 4394 return (zio); 4395 } 4396 4397 /* indirect blocks only maintain a cksum of the lower level MACs */ 4398 if (BP_GET_LEVEL(bp) > 0) { 4399 BP_SET_CRYPT(bp, B_TRUE); 4400 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, 4401 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), 4402 mac)); 4403 zio_crypt_encode_mac_bp(bp, mac); 4404 return (zio); 4405 } 4406 4407 /* 4408 * Objset blocks are a special case since they have 2 256-bit MACs 4409 * embedded within them. 4410 */ 4411 if (ot == DMU_OT_OBJSET) { 4412 ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); 4413 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 4414 BP_SET_CRYPT(bp, B_TRUE); 4415 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, 4416 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); 4417 return (zio); 4418 } 4419 4420 /* unencrypted object types are only authenticated with a MAC */ 4421 if (!DMU_OT_IS_ENCRYPTED(ot)) { 4422 BP_SET_CRYPT(bp, B_TRUE); 4423 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, 4424 zio->io_abd, psize, mac)); 4425 zio_crypt_encode_mac_bp(bp, mac); 4426 return (zio); 4427 } 4428 4429 /* 4430 * Later passes of sync-to-convergence may decide to rewrite data 4431 * in place to avoid more disk reallocations. This presents a problem 4432 * for encryption because this constitutes rewriting the new data with 4433 * the same encryption key and IV. However, this only applies to blocks 4434 * in the MOS (particularly the spacemaps) and we do not encrypt the 4435 * MOS. We assert that the zio is allocating or an intent log write 4436 * to enforce this. 4437 */ 4438 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); 4439 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); 4440 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); 4441 ASSERT3U(psize, !=, 0); 4442 4443 enc_buf = zio_buf_alloc(psize); 4444 eabd = abd_get_from_buf(enc_buf, psize); 4445 abd_take_ownership_of_buf(eabd, B_TRUE); 4446 4447 /* 4448 * For an explanation of what encryption parameters are stored 4449 * where, see the block comment in zio_crypt.c. 4450 */ 4451 if (ot == DMU_OT_INTENT_LOG) { 4452 zio_crypt_decode_params_bp(bp, salt, iv); 4453 } else { 4454 BP_SET_CRYPT(bp, B_TRUE); 4455 } 4456 4457 /* Perform the encryption. This should not fail */ 4458 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, 4459 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 4460 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); 4461 4462 /* encode encryption metadata into the bp */ 4463 if (ot == DMU_OT_INTENT_LOG) { 4464 /* 4465 * ZIL blocks store the MAC in the embedded checksum, so the 4466 * transform must always be applied. 4467 */ 4468 zio_crypt_encode_mac_zil(enc_buf, mac); 4469 zio_push_transform(zio, eabd, psize, psize, NULL); 4470 } else { 4471 BP_SET_CRYPT(bp, B_TRUE); 4472 zio_crypt_encode_params_bp(bp, salt, iv); 4473 zio_crypt_encode_mac_bp(bp, mac); 4474 4475 if (no_crypt) { 4476 ASSERT3U(ot, ==, DMU_OT_DNODE); 4477 abd_free(eabd); 4478 } else { 4479 zio_push_transform(zio, eabd, psize, psize, NULL); 4480 } 4481 } 4482 4483 return (zio); 4484 } 4485 4486 /* 4487 * ========================================================================== 4488 * Generate and verify checksums 4489 * ========================================================================== 4490 */ 4491 static zio_t * 4492 zio_checksum_generate(zio_t *zio) 4493 { 4494 blkptr_t *bp = zio->io_bp; 4495 enum zio_checksum checksum; 4496 4497 if (bp == NULL) { 4498 /* 4499 * This is zio_write_phys(). 4500 * We're either generating a label checksum, or none at all. 4501 */ 4502 checksum = zio->io_prop.zp_checksum; 4503 4504 if (checksum == ZIO_CHECKSUM_OFF) 4505 return (zio); 4506 4507 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 4508 } else { 4509 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 4510 ASSERT(!IO_IS_ALLOCATING(zio)); 4511 checksum = ZIO_CHECKSUM_GANG_HEADER; 4512 } else { 4513 checksum = BP_GET_CHECKSUM(bp); 4514 } 4515 } 4516 4517 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 4518 4519 return (zio); 4520 } 4521 4522 static zio_t * 4523 zio_checksum_verify(zio_t *zio) 4524 { 4525 zio_bad_cksum_t info; 4526 blkptr_t *bp = zio->io_bp; 4527 int error; 4528 4529 ASSERT(zio->io_vd != NULL); 4530 4531 if (bp == NULL) { 4532 /* 4533 * This is zio_read_phys(). 4534 * We're either verifying a label checksum, or nothing at all. 4535 */ 4536 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 4537 return (zio); 4538 4539 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL); 4540 } 4541 4542 if ((error = zio_checksum_error(zio, &info)) != 0) { 4543 zio->io_error = error; 4544 if (error == ECKSUM && 4545 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 4546 mutex_enter(&zio->io_vd->vdev_stat_lock); 4547 zio->io_vd->vdev_stat.vs_checksum_errors++; 4548 mutex_exit(&zio->io_vd->vdev_stat_lock); 4549 (void) zfs_ereport_start_checksum(zio->io_spa, 4550 zio->io_vd, &zio->io_bookmark, zio, 4551 zio->io_offset, zio->io_size, &info); 4552 } 4553 } 4554 4555 return (zio); 4556 } 4557 4558 /* 4559 * Called by RAID-Z to ensure we don't compute the checksum twice. 4560 */ 4561 void 4562 zio_checksum_verified(zio_t *zio) 4563 { 4564 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 4565 } 4566 4567 /* 4568 * ========================================================================== 4569 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 4570 * An error of 0 indicates success. ENXIO indicates whole-device failure, 4571 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO 4572 * indicate errors that are specific to one I/O, and most likely permanent. 4573 * Any other error is presumed to be worse because we weren't expecting it. 4574 * ========================================================================== 4575 */ 4576 int 4577 zio_worst_error(int e1, int e2) 4578 { 4579 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 4580 int r1, r2; 4581 4582 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 4583 if (e1 == zio_error_rank[r1]) 4584 break; 4585 4586 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 4587 if (e2 == zio_error_rank[r2]) 4588 break; 4589 4590 return (r1 > r2 ? e1 : e2); 4591 } 4592 4593 /* 4594 * ========================================================================== 4595 * I/O completion 4596 * ========================================================================== 4597 */ 4598 static zio_t * 4599 zio_ready(zio_t *zio) 4600 { 4601 blkptr_t *bp = zio->io_bp; 4602 zio_t *pio, *pio_next; 4603 zio_link_t *zl = NULL; 4604 4605 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 4606 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) { 4607 return (NULL); 4608 } 4609 4610 if (zio->io_ready) { 4611 ASSERT(IO_IS_ALLOCATING(zio)); 4612 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || 4613 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE)); 4614 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 4615 4616 zio->io_ready(zio); 4617 } 4618 4619 #ifdef ZFS_DEBUG 4620 if (bp != NULL && bp != &zio->io_bp_copy) 4621 zio->io_bp_copy = *bp; 4622 #endif 4623 4624 if (zio->io_error != 0) { 4625 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4626 4627 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4628 ASSERT(IO_IS_ALLOCATING(zio)); 4629 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4630 ASSERT(zio->io_metaslab_class != NULL); 4631 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 4632 4633 /* 4634 * We were unable to allocate anything, unreserve and 4635 * issue the next I/O to allocate. 4636 */ 4637 metaslab_class_throttle_unreserve( 4638 zio->io_metaslab_class, zio->io_prop.zp_copies, 4639 zio->io_allocator, zio); 4640 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 4641 } 4642 } 4643 4644 mutex_enter(&zio->io_lock); 4645 zio->io_state[ZIO_WAIT_READY] = 1; 4646 pio = zio_walk_parents(zio, &zl); 4647 mutex_exit(&zio->io_lock); 4648 4649 /* 4650 * As we notify zio's parents, new parents could be added. 4651 * New parents go to the head of zio's io_parent_list, however, 4652 * so we will (correctly) not notify them. The remainder of zio's 4653 * io_parent_list, from 'pio_next' onward, cannot change because 4654 * all parents must wait for us to be done before they can be done. 4655 */ 4656 for (; pio != NULL; pio = pio_next) { 4657 pio_next = zio_walk_parents(zio, &zl); 4658 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL); 4659 } 4660 4661 if (zio->io_flags & ZIO_FLAG_NODATA) { 4662 if (bp != NULL && BP_IS_GANG(bp)) { 4663 zio->io_flags &= ~ZIO_FLAG_NODATA; 4664 } else { 4665 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 4666 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 4667 } 4668 } 4669 4670 if (zio_injection_enabled && 4671 zio->io_spa->spa_syncing_txg == zio->io_txg) 4672 zio_handle_ignored_writes(zio); 4673 4674 return (zio); 4675 } 4676 4677 /* 4678 * Update the allocation throttle accounting. 4679 */ 4680 static void 4681 zio_dva_throttle_done(zio_t *zio) 4682 { 4683 zio_t *lio __maybe_unused = zio->io_logical; 4684 zio_t *pio = zio_unique_parent(zio); 4685 vdev_t *vd = zio->io_vd; 4686 int flags = METASLAB_ASYNC_ALLOC; 4687 4688 ASSERT3P(zio->io_bp, !=, NULL); 4689 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 4690 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 4691 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 4692 ASSERT(vd != NULL); 4693 ASSERT3P(vd, ==, vd->vdev_top); 4694 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY)); 4695 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 4696 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 4697 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 4698 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 4699 4700 /* 4701 * Parents of gang children can have two flavors -- ones that 4702 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 4703 * and ones that allocated the constituent blocks. The allocation 4704 * throttle needs to know the allocating parent zio so we must find 4705 * it here. 4706 */ 4707 if (pio->io_child_type == ZIO_CHILD_GANG) { 4708 /* 4709 * If our parent is a rewrite gang child then our grandparent 4710 * would have been the one that performed the allocation. 4711 */ 4712 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 4713 pio = zio_unique_parent(pio); 4714 flags |= METASLAB_GANG_CHILD; 4715 } 4716 4717 ASSERT(IO_IS_ALLOCATING(pio)); 4718 ASSERT(ZIO_HAS_ALLOCATOR(pio)); 4719 ASSERT3P(zio, !=, zio->io_logical); 4720 ASSERT(zio->io_logical != NULL); 4721 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 4722 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 4723 ASSERT(zio->io_metaslab_class != NULL); 4724 4725 mutex_enter(&pio->io_lock); 4726 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 4727 pio->io_allocator, B_TRUE); 4728 mutex_exit(&pio->io_lock); 4729 4730 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 4731 pio->io_allocator, pio); 4732 4733 /* 4734 * Call into the pipeline to see if there is more work that 4735 * needs to be done. If there is work to be done it will be 4736 * dispatched to another taskq thread. 4737 */ 4738 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 4739 } 4740 4741 static zio_t * 4742 zio_done(zio_t *zio) 4743 { 4744 /* 4745 * Always attempt to keep stack usage minimal here since 4746 * we can be called recursively up to 19 levels deep. 4747 */ 4748 const uint64_t psize = zio->io_size; 4749 zio_t *pio, *pio_next; 4750 zio_link_t *zl = NULL; 4751 4752 /* 4753 * If our children haven't all completed, 4754 * wait for them and then repeat this pipeline stage. 4755 */ 4756 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 4757 return (NULL); 4758 } 4759 4760 /* 4761 * If the allocation throttle is enabled, then update the accounting. 4762 * We only track child I/Os that are part of an allocating async 4763 * write. We must do this since the allocation is performed 4764 * by the logical I/O but the actual write is done by child I/Os. 4765 */ 4766 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 4767 zio->io_child_type == ZIO_CHILD_VDEV) { 4768 ASSERT(zio->io_metaslab_class != NULL); 4769 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 4770 zio_dva_throttle_done(zio); 4771 } 4772 4773 /* 4774 * If the allocation throttle is enabled, verify that 4775 * we have decremented the refcounts for every I/O that was throttled. 4776 */ 4777 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4778 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4779 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4780 ASSERT(zio->io_bp != NULL); 4781 ASSERT(ZIO_HAS_ALLOCATOR(zio)); 4782 4783 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio, 4784 zio->io_allocator); 4785 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class-> 4786 mc_allocator[zio->io_allocator].mca_alloc_slots, zio)); 4787 } 4788 4789 4790 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 4791 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 4792 ASSERT(zio->io_children[c][w] == 0); 4793 4794 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { 4795 ASSERT(zio->io_bp->blk_pad[0] == 0); 4796 ASSERT(zio->io_bp->blk_pad[1] == 0); 4797 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy, 4798 sizeof (blkptr_t)) == 0 || 4799 (zio->io_bp == zio_unique_parent(zio)->io_bp)); 4800 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && 4801 zio->io_bp_override == NULL && 4802 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 4803 ASSERT3U(zio->io_prop.zp_copies, <=, 4804 BP_GET_NDVAS(zio->io_bp)); 4805 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || 4806 (BP_COUNT_GANG(zio->io_bp) == 4807 BP_GET_NDVAS(zio->io_bp))); 4808 } 4809 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 4810 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4811 } 4812 4813 /* 4814 * If there were child vdev/gang/ddt errors, they apply to us now. 4815 */ 4816 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 4817 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 4818 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 4819 4820 /* 4821 * If the I/O on the transformed data was successful, generate any 4822 * checksum reports now while we still have the transformed data. 4823 */ 4824 if (zio->io_error == 0) { 4825 while (zio->io_cksum_report != NULL) { 4826 zio_cksum_report_t *zcr = zio->io_cksum_report; 4827 uint64_t align = zcr->zcr_align; 4828 uint64_t asize = P2ROUNDUP(psize, align); 4829 abd_t *adata = zio->io_abd; 4830 4831 if (adata != NULL && asize != psize) { 4832 adata = abd_alloc(asize, B_TRUE); 4833 abd_copy(adata, zio->io_abd, psize); 4834 abd_zero_off(adata, psize, asize - psize); 4835 } 4836 4837 zio->io_cksum_report = zcr->zcr_next; 4838 zcr->zcr_next = NULL; 4839 zcr->zcr_finish(zcr, adata); 4840 zfs_ereport_free_checksum(zcr); 4841 4842 if (adata != NULL && asize != psize) 4843 abd_free(adata); 4844 } 4845 } 4846 4847 zio_pop_transforms(zio); /* note: may set zio->io_error */ 4848 4849 vdev_stat_update(zio, psize); 4850 4851 /* 4852 * If this I/O is attached to a particular vdev is slow, exceeding 4853 * 30 seconds to complete, post an error described the I/O delay. 4854 * We ignore these errors if the device is currently unavailable. 4855 */ 4856 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) { 4857 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) { 4858 /* 4859 * We want to only increment our slow IO counters if 4860 * the IO is valid (i.e. not if the drive is removed). 4861 * 4862 * zfs_ereport_post() will also do these checks, but 4863 * it can also ratelimit and have other failures, so we 4864 * need to increment the slow_io counters independent 4865 * of it. 4866 */ 4867 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY, 4868 zio->io_spa, zio->io_vd, zio)) { 4869 mutex_enter(&zio->io_vd->vdev_stat_lock); 4870 zio->io_vd->vdev_stat.vs_slow_ios++; 4871 mutex_exit(&zio->io_vd->vdev_stat_lock); 4872 4873 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY, 4874 zio->io_spa, zio->io_vd, &zio->io_bookmark, 4875 zio, 0); 4876 } 4877 } 4878 } 4879 4880 if (zio->io_error) { 4881 /* 4882 * If this I/O is attached to a particular vdev, 4883 * generate an error message describing the I/O failure 4884 * at the block level. We ignore these errors if the 4885 * device is currently unavailable. 4886 */ 4887 if (zio->io_error != ECKSUM && zio->io_vd != NULL && 4888 !vdev_is_dead(zio->io_vd)) { 4889 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO, 4890 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0); 4891 if (ret != EALREADY) { 4892 mutex_enter(&zio->io_vd->vdev_stat_lock); 4893 if (zio->io_type == ZIO_TYPE_READ) 4894 zio->io_vd->vdev_stat.vs_read_errors++; 4895 else if (zio->io_type == ZIO_TYPE_WRITE) 4896 zio->io_vd->vdev_stat.vs_write_errors++; 4897 mutex_exit(&zio->io_vd->vdev_stat_lock); 4898 } 4899 } 4900 4901 if ((zio->io_error == EIO || !(zio->io_flags & 4902 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 4903 zio == zio->io_logical) { 4904 /* 4905 * For logical I/O requests, tell the SPA to log the 4906 * error and generate a logical data ereport. 4907 */ 4908 spa_log_error(zio->io_spa, &zio->io_bookmark, 4909 BP_GET_LOGICAL_BIRTH(zio->io_bp)); 4910 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA, 4911 zio->io_spa, NULL, &zio->io_bookmark, zio, 0); 4912 } 4913 } 4914 4915 if (zio->io_error && zio == zio->io_logical) { 4916 /* 4917 * Determine whether zio should be reexecuted. This will 4918 * propagate all the way to the root via zio_notify_parent(). 4919 */ 4920 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL); 4921 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4922 4923 if (IO_IS_ALLOCATING(zio) && 4924 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 4925 if (zio->io_error != ENOSPC) 4926 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 4927 else 4928 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4929 } 4930 4931 if ((zio->io_type == ZIO_TYPE_READ || 4932 zio->io_type == ZIO_TYPE_FREE) && 4933 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 4934 zio->io_error == ENXIO && 4935 spa_load_state(zio->io_spa) == SPA_LOAD_NONE && 4936 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE) 4937 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4938 4939 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 4940 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4941 4942 /* 4943 * Here is a possibly good place to attempt to do 4944 * either combinatorial reconstruction or error correction 4945 * based on checksums. It also might be a good place 4946 * to send out preliminary ereports before we suspend 4947 * processing. 4948 */ 4949 } 4950 4951 /* 4952 * If there were logical child errors, they apply to us now. 4953 * We defer this until now to avoid conflating logical child 4954 * errors with errors that happened to the zio itself when 4955 * updating vdev stats and reporting FMA events above. 4956 */ 4957 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 4958 4959 if ((zio->io_error || zio->io_reexecute) && 4960 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 4961 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 4962 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp); 4963 4964 zio_gang_tree_free(&zio->io_gang_tree); 4965 4966 /* 4967 * Godfather I/Os should never suspend. 4968 */ 4969 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 4970 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 4971 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND; 4972 4973 if (zio->io_reexecute) { 4974 /* 4975 * This is a logical I/O that wants to reexecute. 4976 * 4977 * Reexecute is top-down. When an i/o fails, if it's not 4978 * the root, it simply notifies its parent and sticks around. 4979 * The parent, seeing that it still has children in zio_done(), 4980 * does the same. This percolates all the way up to the root. 4981 * The root i/o will reexecute or suspend the entire tree. 4982 * 4983 * This approach ensures that zio_reexecute() honors 4984 * all the original i/o dependency relationships, e.g. 4985 * parents not executing until children are ready. 4986 */ 4987 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4988 4989 zio->io_gang_leader = NULL; 4990 4991 mutex_enter(&zio->io_lock); 4992 zio->io_state[ZIO_WAIT_DONE] = 1; 4993 mutex_exit(&zio->io_lock); 4994 4995 /* 4996 * "The Godfather" I/O monitors its children but is 4997 * not a true parent to them. It will track them through 4998 * the pipeline but severs its ties whenever they get into 4999 * trouble (e.g. suspended). This allows "The Godfather" 5000 * I/O to return status without blocking. 5001 */ 5002 zl = NULL; 5003 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 5004 pio = pio_next) { 5005 zio_link_t *remove_zl = zl; 5006 pio_next = zio_walk_parents(zio, &zl); 5007 5008 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 5009 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 5010 zio_remove_child(pio, zio, remove_zl); 5011 /* 5012 * This is a rare code path, so we don't 5013 * bother with "next_to_execute". 5014 */ 5015 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, 5016 NULL); 5017 } 5018 } 5019 5020 if ((pio = zio_unique_parent(zio)) != NULL) { 5021 /* 5022 * We're not a root i/o, so there's nothing to do 5023 * but notify our parent. Don't propagate errors 5024 * upward since we haven't permanently failed yet. 5025 */ 5026 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 5027 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 5028 /* 5029 * This is a rare code path, so we don't bother with 5030 * "next_to_execute". 5031 */ 5032 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL); 5033 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 5034 /* 5035 * We'd fail again if we reexecuted now, so suspend 5036 * until conditions improve (e.g. device comes online). 5037 */ 5038 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 5039 } else { 5040 /* 5041 * Reexecution is potentially a huge amount of work. 5042 * Hand it off to the otherwise-unused claim taskq. 5043 */ 5044 spa_taskq_dispatch(zio->io_spa, 5045 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE, 5046 zio_reexecute, zio, B_FALSE); 5047 } 5048 return (NULL); 5049 } 5050 5051 ASSERT(list_is_empty(&zio->io_child_list)); 5052 ASSERT(zio->io_reexecute == 0); 5053 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 5054 5055 /* 5056 * Report any checksum errors, since the I/O is complete. 5057 */ 5058 while (zio->io_cksum_report != NULL) { 5059 zio_cksum_report_t *zcr = zio->io_cksum_report; 5060 zio->io_cksum_report = zcr->zcr_next; 5061 zcr->zcr_next = NULL; 5062 zcr->zcr_finish(zcr, NULL); 5063 zfs_ereport_free_checksum(zcr); 5064 } 5065 5066 /* 5067 * It is the responsibility of the done callback to ensure that this 5068 * particular zio is no longer discoverable for adoption, and as 5069 * such, cannot acquire any new parents. 5070 */ 5071 if (zio->io_done) 5072 zio->io_done(zio); 5073 5074 mutex_enter(&zio->io_lock); 5075 zio->io_state[ZIO_WAIT_DONE] = 1; 5076 mutex_exit(&zio->io_lock); 5077 5078 /* 5079 * We are done executing this zio. We may want to execute a parent 5080 * next. See the comment in zio_notify_parent(). 5081 */ 5082 zio_t *next_to_execute = NULL; 5083 zl = NULL; 5084 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 5085 zio_link_t *remove_zl = zl; 5086 pio_next = zio_walk_parents(zio, &zl); 5087 zio_remove_child(pio, zio, remove_zl); 5088 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute); 5089 } 5090 5091 if (zio->io_waiter != NULL) { 5092 mutex_enter(&zio->io_lock); 5093 zio->io_executor = NULL; 5094 cv_broadcast(&zio->io_cv); 5095 mutex_exit(&zio->io_lock); 5096 } else { 5097 zio_destroy(zio); 5098 } 5099 5100 return (next_to_execute); 5101 } 5102 5103 /* 5104 * ========================================================================== 5105 * I/O pipeline definition 5106 * ========================================================================== 5107 */ 5108 static zio_pipe_stage_t *zio_pipeline[] = { 5109 NULL, 5110 zio_read_bp_init, 5111 zio_write_bp_init, 5112 zio_free_bp_init, 5113 zio_issue_async, 5114 zio_write_compress, 5115 zio_encrypt, 5116 zio_checksum_generate, 5117 zio_nop_write, 5118 zio_brt_free, 5119 zio_ddt_read_start, 5120 zio_ddt_read_done, 5121 zio_ddt_write, 5122 zio_ddt_free, 5123 zio_gang_assemble, 5124 zio_gang_issue, 5125 zio_dva_throttle, 5126 zio_dva_allocate, 5127 zio_dva_free, 5128 zio_dva_claim, 5129 zio_ready, 5130 zio_vdev_io_start, 5131 zio_vdev_io_done, 5132 zio_vdev_io_assess, 5133 zio_checksum_verify, 5134 zio_done 5135 }; 5136 5137 5138 5139 5140 /* 5141 * Compare two zbookmark_phys_t's to see which we would reach first in a 5142 * pre-order traversal of the object tree. 5143 * 5144 * This is simple in every case aside from the meta-dnode object. For all other 5145 * objects, we traverse them in order (object 1 before object 2, and so on). 5146 * However, all of these objects are traversed while traversing object 0, since 5147 * the data it points to is the list of objects. Thus, we need to convert to a 5148 * canonical representation so we can compare meta-dnode bookmarks to 5149 * non-meta-dnode bookmarks. 5150 * 5151 * We do this by calculating "equivalents" for each field of the zbookmark. 5152 * zbookmarks outside of the meta-dnode use their own object and level, and 5153 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 5154 * blocks this bookmark refers to) by multiplying their blkid by their span 5155 * (the number of L0 blocks contained within one block at their level). 5156 * zbookmarks inside the meta-dnode calculate their object equivalent 5157 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 5158 * level + 1<<31 (any value larger than a level could ever be) for their level. 5159 * This causes them to always compare before a bookmark in their object 5160 * equivalent, compare appropriately to bookmarks in other objects, and to 5161 * compare appropriately to other bookmarks in the meta-dnode. 5162 */ 5163 int 5164 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 5165 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 5166 { 5167 /* 5168 * These variables represent the "equivalent" values for the zbookmark, 5169 * after converting zbookmarks inside the meta dnode to their 5170 * normal-object equivalents. 5171 */ 5172 uint64_t zb1obj, zb2obj; 5173 uint64_t zb1L0, zb2L0; 5174 uint64_t zb1level, zb2level; 5175 5176 if (zb1->zb_object == zb2->zb_object && 5177 zb1->zb_level == zb2->zb_level && 5178 zb1->zb_blkid == zb2->zb_blkid) 5179 return (0); 5180 5181 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT); 5182 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT); 5183 5184 /* 5185 * BP_SPANB calculates the span in blocks. 5186 */ 5187 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 5188 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 5189 5190 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 5191 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5192 zb1L0 = 0; 5193 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 5194 } else { 5195 zb1obj = zb1->zb_object; 5196 zb1level = zb1->zb_level; 5197 } 5198 5199 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 5200 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 5201 zb2L0 = 0; 5202 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 5203 } else { 5204 zb2obj = zb2->zb_object; 5205 zb2level = zb2->zb_level; 5206 } 5207 5208 /* Now that we have a canonical representation, do the comparison. */ 5209 if (zb1obj != zb2obj) 5210 return (zb1obj < zb2obj ? -1 : 1); 5211 else if (zb1L0 != zb2L0) 5212 return (zb1L0 < zb2L0 ? -1 : 1); 5213 else if (zb1level != zb2level) 5214 return (zb1level > zb2level ? -1 : 1); 5215 /* 5216 * This can (theoretically) happen if the bookmarks have the same object 5217 * and level, but different blkids, if the block sizes are not the same. 5218 * There is presently no way to change the indirect block sizes 5219 */ 5220 return (0); 5221 } 5222 5223 /* 5224 * This function checks the following: given that last_block is the place that 5225 * our traversal stopped last time, does that guarantee that we've visited 5226 * every node under subtree_root? Therefore, we can't just use the raw output 5227 * of zbookmark_compare. We have to pass in a modified version of 5228 * subtree_root; by incrementing the block id, and then checking whether 5229 * last_block is before or equal to that, we can tell whether or not having 5230 * visited last_block implies that all of subtree_root's children have been 5231 * visited. 5232 */ 5233 boolean_t 5234 zbookmark_subtree_completed(const dnode_phys_t *dnp, 5235 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5236 { 5237 zbookmark_phys_t mod_zb = *subtree_root; 5238 mod_zb.zb_blkid++; 5239 ASSERT0(last_block->zb_level); 5240 5241 /* The objset_phys_t isn't before anything. */ 5242 if (dnp == NULL) 5243 return (B_FALSE); 5244 5245 /* 5246 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 5247 * data block size in sectors, because that variable is only used if 5248 * the bookmark refers to a block in the meta-dnode. Since we don't 5249 * know without examining it what object it refers to, and there's no 5250 * harm in passing in this value in other cases, we always pass it in. 5251 * 5252 * We pass in 0 for the indirect block size shift because zb2 must be 5253 * level 0. The indirect block size is only used to calculate the span 5254 * of the bookmark, but since the bookmark must be level 0, the span is 5255 * always 1, so the math works out. 5256 * 5257 * If you make changes to how the zbookmark_compare code works, be sure 5258 * to make sure that this code still works afterwards. 5259 */ 5260 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5261 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 5262 last_block) <= 0); 5263 } 5264 5265 /* 5266 * This function is similar to zbookmark_subtree_completed(), but returns true 5267 * if subtree_root is equal or ahead of last_block, i.e. still to be done. 5268 */ 5269 boolean_t 5270 zbookmark_subtree_tbd(const dnode_phys_t *dnp, 5271 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 5272 { 5273 ASSERT0(last_block->zb_level); 5274 if (dnp == NULL) 5275 return (B_FALSE); 5276 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 5277 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root, 5278 last_block) >= 0); 5279 } 5280 5281 EXPORT_SYMBOL(zio_type_name); 5282 EXPORT_SYMBOL(zio_buf_alloc); 5283 EXPORT_SYMBOL(zio_data_buf_alloc); 5284 EXPORT_SYMBOL(zio_buf_free); 5285 EXPORT_SYMBOL(zio_data_buf_free); 5286 5287 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW, 5288 "Max I/O completion time (milliseconds) before marking it as slow"); 5289 5290 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW, 5291 "Prioritize requeued I/O"); 5292 5293 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW, 5294 "Defer frees starting in this pass"); 5295 5296 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW, 5297 "Don't compress starting in this pass"); 5298 5299 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW, 5300 "Rewrite new bps starting in this pass"); 5301 5302 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW, 5303 "Throttle block allocations in the ZIO pipeline"); 5304 5305 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW, 5306 "Log all slow ZIOs, not just those with vdevs"); 5307