1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28 #include <sys/sysmacros.h> 29 #include <sys/zfs_context.h> 30 #include <sys/fm/fs/zfs.h> 31 #include <sys/spa.h> 32 #include <sys/txg.h> 33 #include <sys/spa_impl.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/zio_impl.h> 36 #include <sys/zio_compress.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/dmu_objset.h> 39 #include <sys/arc.h> 40 #include <sys/ddt.h> 41 #include <sys/blkptr.h> 42 #include <sys/zfeature.h> 43 #include <sys/metaslab_impl.h> 44 45 /* 46 * ========================================================================== 47 * I/O type descriptions 48 * ========================================================================== 49 */ 50 const char *zio_type_name[ZIO_TYPES] = { 51 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 52 "zio_ioctl" 53 }; 54 55 boolean_t zio_dva_throttle_enabled = B_TRUE; 56 57 /* 58 * ========================================================================== 59 * I/O kmem caches 60 * ========================================================================== 61 */ 62 kmem_cache_t *zio_cache; 63 kmem_cache_t *zio_link_cache; 64 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 65 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 66 67 #ifdef _KERNEL 68 extern vmem_t *zio_alloc_arena; 69 #endif 70 71 #define ZIO_PIPELINE_CONTINUE 0x100 72 #define ZIO_PIPELINE_STOP 0x101 73 74 #define BP_SPANB(indblkshift, level) \ 75 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 76 #define COMPARE_META_LEVEL 0x80000000ul 77 /* 78 * The following actions directly effect the spa's sync-to-convergence logic. 79 * The values below define the sync pass when we start performing the action. 80 * Care should be taken when changing these values as they directly impact 81 * spa_sync() performance. Tuning these values may introduce subtle performance 82 * pathologies and should only be done in the context of performance analysis. 83 * These tunables will eventually be removed and replaced with #defines once 84 * enough analysis has been done to determine optimal values. 85 * 86 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 87 * regular blocks are not deferred. 88 */ 89 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 90 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 91 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 92 93 /* 94 * An allocating zio is one that either currently has the DVA allocate 95 * stage set or will have it later in its lifetime. 96 */ 97 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 98 99 boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 100 101 #ifdef ZFS_DEBUG 102 int zio_buf_debug_limit = 16384; 103 #else 104 int zio_buf_debug_limit = 0; 105 #endif 106 107 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 108 109 void 110 zio_init(void) 111 { 112 size_t c; 113 vmem_t *data_alloc_arena = NULL; 114 115 #ifdef _KERNEL 116 data_alloc_arena = zio_alloc_arena; 117 #endif 118 zio_cache = kmem_cache_create("zio_cache", 119 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 120 zio_link_cache = kmem_cache_create("zio_link_cache", 121 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 122 123 /* 124 * For small buffers, we want a cache for each multiple of 125 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 126 * for each quarter-power of 2. 127 */ 128 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 129 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 130 size_t p2 = size; 131 size_t align = 0; 132 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 133 134 while (!ISP2(p2)) 135 p2 &= p2 - 1; 136 137 #ifndef _KERNEL 138 /* 139 * If we are using watchpoints, put each buffer on its own page, 140 * to eliminate the performance overhead of trapping to the 141 * kernel when modifying a non-watched buffer that shares the 142 * page with a watched buffer. 143 */ 144 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 145 continue; 146 #endif 147 if (size <= 4 * SPA_MINBLOCKSIZE) { 148 align = SPA_MINBLOCKSIZE; 149 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 150 align = MIN(p2 >> 2, PAGESIZE); 151 } 152 153 if (align != 0) { 154 char name[36]; 155 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 156 zio_buf_cache[c] = kmem_cache_create(name, size, 157 align, NULL, NULL, NULL, NULL, NULL, cflags); 158 159 /* 160 * Since zio_data bufs do not appear in crash dumps, we 161 * pass KMC_NOTOUCH so that no allocator metadata is 162 * stored with the buffers. 163 */ 164 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 165 zio_data_buf_cache[c] = kmem_cache_create(name, size, 166 align, NULL, NULL, NULL, NULL, data_alloc_arena, 167 cflags | KMC_NOTOUCH); 168 } 169 } 170 171 while (--c != 0) { 172 ASSERT(zio_buf_cache[c] != NULL); 173 if (zio_buf_cache[c - 1] == NULL) 174 zio_buf_cache[c - 1] = zio_buf_cache[c]; 175 176 ASSERT(zio_data_buf_cache[c] != NULL); 177 if (zio_data_buf_cache[c - 1] == NULL) 178 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 179 } 180 181 zio_inject_init(); 182 } 183 184 void 185 zio_fini(void) 186 { 187 size_t c; 188 kmem_cache_t *last_cache = NULL; 189 kmem_cache_t *last_data_cache = NULL; 190 191 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 192 if (zio_buf_cache[c] != last_cache) { 193 last_cache = zio_buf_cache[c]; 194 kmem_cache_destroy(zio_buf_cache[c]); 195 } 196 zio_buf_cache[c] = NULL; 197 198 if (zio_data_buf_cache[c] != last_data_cache) { 199 last_data_cache = zio_data_buf_cache[c]; 200 kmem_cache_destroy(zio_data_buf_cache[c]); 201 } 202 zio_data_buf_cache[c] = NULL; 203 } 204 205 kmem_cache_destroy(zio_link_cache); 206 kmem_cache_destroy(zio_cache); 207 208 zio_inject_fini(); 209 } 210 211 /* 212 * ========================================================================== 213 * Allocate and free I/O buffers 214 * ========================================================================== 215 */ 216 217 /* 218 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 219 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 220 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 221 * excess / transient data in-core during a crashdump. 222 */ 223 void * 224 zio_buf_alloc(size_t size) 225 { 226 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 227 228 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 229 230 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 231 } 232 233 /* 234 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 235 * crashdump if the kernel panics. This exists so that we will limit the amount 236 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 237 * of kernel heap dumped to disk when the kernel panics) 238 */ 239 void * 240 zio_data_buf_alloc(size_t size) 241 { 242 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 243 244 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 245 246 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 247 } 248 249 void 250 zio_buf_free(void *buf, size_t size) 251 { 252 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 253 254 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 255 256 kmem_cache_free(zio_buf_cache[c], buf); 257 } 258 259 void 260 zio_data_buf_free(void *buf, size_t size) 261 { 262 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 263 264 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 265 266 kmem_cache_free(zio_data_buf_cache[c], buf); 267 } 268 269 /* 270 * ========================================================================== 271 * Push and pop I/O transform buffers 272 * ========================================================================== 273 */ 274 void 275 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 276 zio_transform_func_t *transform) 277 { 278 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 279 280 zt->zt_orig_data = zio->io_data; 281 zt->zt_orig_size = zio->io_size; 282 zt->zt_bufsize = bufsize; 283 zt->zt_transform = transform; 284 285 zt->zt_next = zio->io_transform_stack; 286 zio->io_transform_stack = zt; 287 288 zio->io_data = data; 289 zio->io_size = size; 290 } 291 292 void 293 zio_pop_transforms(zio_t *zio) 294 { 295 zio_transform_t *zt; 296 297 while ((zt = zio->io_transform_stack) != NULL) { 298 if (zt->zt_transform != NULL) 299 zt->zt_transform(zio, 300 zt->zt_orig_data, zt->zt_orig_size); 301 302 if (zt->zt_bufsize != 0) 303 zio_buf_free(zio->io_data, zt->zt_bufsize); 304 305 zio->io_data = zt->zt_orig_data; 306 zio->io_size = zt->zt_orig_size; 307 zio->io_transform_stack = zt->zt_next; 308 309 kmem_free(zt, sizeof (zio_transform_t)); 310 } 311 } 312 313 /* 314 * ========================================================================== 315 * I/O transform callbacks for subblocks and decompression 316 * ========================================================================== 317 */ 318 static void 319 zio_subblock(zio_t *zio, void *data, uint64_t size) 320 { 321 ASSERT(zio->io_size > size); 322 323 if (zio->io_type == ZIO_TYPE_READ) 324 bcopy(zio->io_data, data, size); 325 } 326 327 static void 328 zio_decompress(zio_t *zio, void *data, uint64_t size) 329 { 330 if (zio->io_error == 0 && 331 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 332 zio->io_data, data, zio->io_size, size) != 0) 333 zio->io_error = SET_ERROR(EIO); 334 } 335 336 /* 337 * ========================================================================== 338 * I/O parent/child relationships and pipeline interlocks 339 * ========================================================================== 340 */ 341 zio_t * 342 zio_walk_parents(zio_t *cio, zio_link_t **zl) 343 { 344 list_t *pl = &cio->io_parent_list; 345 346 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 347 if (*zl == NULL) 348 return (NULL); 349 350 ASSERT((*zl)->zl_child == cio); 351 return ((*zl)->zl_parent); 352 } 353 354 zio_t * 355 zio_walk_children(zio_t *pio, zio_link_t **zl) 356 { 357 list_t *cl = &pio->io_child_list; 358 359 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 360 if (*zl == NULL) 361 return (NULL); 362 363 ASSERT((*zl)->zl_parent == pio); 364 return ((*zl)->zl_child); 365 } 366 367 zio_t * 368 zio_unique_parent(zio_t *cio) 369 { 370 zio_link_t *zl = NULL; 371 zio_t *pio = zio_walk_parents(cio, &zl); 372 373 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 374 return (pio); 375 } 376 377 void 378 zio_add_child(zio_t *pio, zio_t *cio) 379 { 380 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 381 382 /* 383 * Logical I/Os can have logical, gang, or vdev children. 384 * Gang I/Os can have gang or vdev children. 385 * Vdev I/Os can only have vdev children. 386 * The following ASSERT captures all of these constraints. 387 */ 388 ASSERT(cio->io_child_type <= pio->io_child_type); 389 390 zl->zl_parent = pio; 391 zl->zl_child = cio; 392 393 mutex_enter(&cio->io_lock); 394 mutex_enter(&pio->io_lock); 395 396 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 397 398 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 399 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 400 401 list_insert_head(&pio->io_child_list, zl); 402 list_insert_head(&cio->io_parent_list, zl); 403 404 pio->io_child_count++; 405 cio->io_parent_count++; 406 407 mutex_exit(&pio->io_lock); 408 mutex_exit(&cio->io_lock); 409 } 410 411 static void 412 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 413 { 414 ASSERT(zl->zl_parent == pio); 415 ASSERT(zl->zl_child == cio); 416 417 mutex_enter(&cio->io_lock); 418 mutex_enter(&pio->io_lock); 419 420 list_remove(&pio->io_child_list, zl); 421 list_remove(&cio->io_parent_list, zl); 422 423 pio->io_child_count--; 424 cio->io_parent_count--; 425 426 mutex_exit(&pio->io_lock); 427 mutex_exit(&cio->io_lock); 428 429 kmem_cache_free(zio_link_cache, zl); 430 } 431 432 static boolean_t 433 zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 434 { 435 uint64_t *countp = &zio->io_children[child][wait]; 436 boolean_t waiting = B_FALSE; 437 438 mutex_enter(&zio->io_lock); 439 ASSERT(zio->io_stall == NULL); 440 if (*countp != 0) { 441 zio->io_stage >>= 1; 442 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 443 zio->io_stall = countp; 444 waiting = B_TRUE; 445 } 446 mutex_exit(&zio->io_lock); 447 448 return (waiting); 449 } 450 451 static void 452 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 453 { 454 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 455 int *errorp = &pio->io_child_error[zio->io_child_type]; 456 457 mutex_enter(&pio->io_lock); 458 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 459 *errorp = zio_worst_error(*errorp, zio->io_error); 460 pio->io_reexecute |= zio->io_reexecute; 461 ASSERT3U(*countp, >, 0); 462 463 (*countp)--; 464 465 if (*countp == 0 && pio->io_stall == countp) { 466 zio_taskq_type_t type = 467 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 468 ZIO_TASKQ_INTERRUPT; 469 pio->io_stall = NULL; 470 mutex_exit(&pio->io_lock); 471 /* 472 * Dispatch the parent zio in its own taskq so that 473 * the child can continue to make progress. This also 474 * prevents overflowing the stack when we have deeply nested 475 * parent-child relationships. 476 */ 477 zio_taskq_dispatch(pio, type, B_FALSE); 478 } else { 479 mutex_exit(&pio->io_lock); 480 } 481 } 482 483 static void 484 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 485 { 486 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 487 zio->io_error = zio->io_child_error[c]; 488 } 489 490 int 491 zio_timestamp_compare(const void *x1, const void *x2) 492 { 493 const zio_t *z1 = x1; 494 const zio_t *z2 = x2; 495 496 if (z1->io_queued_timestamp < z2->io_queued_timestamp) 497 return (-1); 498 if (z1->io_queued_timestamp > z2->io_queued_timestamp) 499 return (1); 500 501 if (z1->io_offset < z2->io_offset) 502 return (-1); 503 if (z1->io_offset > z2->io_offset) 504 return (1); 505 506 if (z1 < z2) 507 return (-1); 508 if (z1 > z2) 509 return (1); 510 511 return (0); 512 } 513 514 /* 515 * ========================================================================== 516 * Create the various types of I/O (read, write, free, etc) 517 * ========================================================================== 518 */ 519 static zio_t * 520 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 521 void *data, uint64_t size, zio_done_func_t *done, void *private, 522 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 523 vdev_t *vd, uint64_t offset, const zbookmark_phys_t *zb, 524 enum zio_stage stage, enum zio_stage pipeline) 525 { 526 zio_t *zio; 527 528 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 529 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 530 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 531 532 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 533 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 534 ASSERT(vd || stage == ZIO_STAGE_OPEN); 535 536 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 537 bzero(zio, sizeof (zio_t)); 538 539 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 540 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 541 542 list_create(&zio->io_parent_list, sizeof (zio_link_t), 543 offsetof(zio_link_t, zl_parent_node)); 544 list_create(&zio->io_child_list, sizeof (zio_link_t), 545 offsetof(zio_link_t, zl_child_node)); 546 547 if (vd != NULL) 548 zio->io_child_type = ZIO_CHILD_VDEV; 549 else if (flags & ZIO_FLAG_GANG_CHILD) 550 zio->io_child_type = ZIO_CHILD_GANG; 551 else if (flags & ZIO_FLAG_DDT_CHILD) 552 zio->io_child_type = ZIO_CHILD_DDT; 553 else 554 zio->io_child_type = ZIO_CHILD_LOGICAL; 555 556 if (bp != NULL) { 557 zio->io_bp = (blkptr_t *)bp; 558 zio->io_bp_copy = *bp; 559 zio->io_bp_orig = *bp; 560 if (type != ZIO_TYPE_WRITE || 561 zio->io_child_type == ZIO_CHILD_DDT) 562 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 563 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 564 zio->io_logical = zio; 565 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 566 pipeline |= ZIO_GANG_STAGES; 567 } 568 569 zio->io_spa = spa; 570 zio->io_txg = txg; 571 zio->io_done = done; 572 zio->io_private = private; 573 zio->io_type = type; 574 zio->io_priority = priority; 575 zio->io_vd = vd; 576 zio->io_offset = offset; 577 zio->io_orig_data = zio->io_data = data; 578 zio->io_orig_size = zio->io_size = size; 579 zio->io_orig_flags = zio->io_flags = flags; 580 zio->io_orig_stage = zio->io_stage = stage; 581 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 582 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 583 584 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 585 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 586 587 if (zb != NULL) 588 zio->io_bookmark = *zb; 589 590 if (pio != NULL) { 591 if (zio->io_logical == NULL) 592 zio->io_logical = pio->io_logical; 593 if (zio->io_child_type == ZIO_CHILD_GANG) 594 zio->io_gang_leader = pio->io_gang_leader; 595 zio_add_child(pio, zio); 596 } 597 598 return (zio); 599 } 600 601 static void 602 zio_destroy(zio_t *zio) 603 { 604 list_destroy(&zio->io_parent_list); 605 list_destroy(&zio->io_child_list); 606 mutex_destroy(&zio->io_lock); 607 cv_destroy(&zio->io_cv); 608 kmem_cache_free(zio_cache, zio); 609 } 610 611 zio_t * 612 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 613 void *private, enum zio_flag flags) 614 { 615 zio_t *zio; 616 617 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 618 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 619 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 620 621 return (zio); 622 } 623 624 zio_t * 625 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 626 { 627 return (zio_null(NULL, spa, NULL, done, private, flags)); 628 } 629 630 void 631 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 632 { 633 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 634 zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 635 bp, (longlong_t)BP_GET_TYPE(bp)); 636 } 637 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 638 BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 639 zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 640 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 641 } 642 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 643 BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 644 zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 645 bp, (longlong_t)BP_GET_COMPRESS(bp)); 646 } 647 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 648 zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 649 bp, (longlong_t)BP_GET_LSIZE(bp)); 650 } 651 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 652 zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 653 bp, (longlong_t)BP_GET_PSIZE(bp)); 654 } 655 656 if (BP_IS_EMBEDDED(bp)) { 657 if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 658 zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 659 bp, (longlong_t)BPE_GET_ETYPE(bp)); 660 } 661 } 662 663 /* 664 * Pool-specific checks. 665 * 666 * Note: it would be nice to verify that the blk_birth and 667 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 668 * allows the birth time of log blocks (and dmu_sync()-ed blocks 669 * that are in the log) to be arbitrarily large. 670 */ 671 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 672 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 673 if (vdevid >= spa->spa_root_vdev->vdev_children) { 674 zfs_panic_recover("blkptr at %p DVA %u has invalid " 675 "VDEV %llu", 676 bp, i, (longlong_t)vdevid); 677 continue; 678 } 679 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 680 if (vd == NULL) { 681 zfs_panic_recover("blkptr at %p DVA %u has invalid " 682 "VDEV %llu", 683 bp, i, (longlong_t)vdevid); 684 continue; 685 } 686 if (vd->vdev_ops == &vdev_hole_ops) { 687 zfs_panic_recover("blkptr at %p DVA %u has hole " 688 "VDEV %llu", 689 bp, i, (longlong_t)vdevid); 690 continue; 691 } 692 if (vd->vdev_ops == &vdev_missing_ops) { 693 /* 694 * "missing" vdevs are valid during import, but we 695 * don't have their detailed info (e.g. asize), so 696 * we can't perform any more checks on them. 697 */ 698 continue; 699 } 700 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 701 uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 702 if (BP_IS_GANG(bp)) 703 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 704 if (offset + asize > vd->vdev_asize) { 705 zfs_panic_recover("blkptr at %p DVA %u has invalid " 706 "OFFSET %llu", 707 bp, i, (longlong_t)offset); 708 } 709 } 710 } 711 712 zio_t * 713 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 714 void *data, uint64_t size, zio_done_func_t *done, void *private, 715 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 716 { 717 zio_t *zio; 718 719 zfs_blkptr_verify(spa, bp); 720 721 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 722 data, size, done, private, 723 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 724 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 725 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 726 727 return (zio); 728 } 729 730 zio_t * 731 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 732 void *data, uint64_t size, const zio_prop_t *zp, 733 zio_done_func_t *ready, zio_done_func_t *children_ready, 734 zio_done_func_t *physdone, zio_done_func_t *done, 735 void *private, zio_priority_t priority, enum zio_flag flags, 736 const zbookmark_phys_t *zb) 737 { 738 zio_t *zio; 739 740 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 741 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 742 zp->zp_compress >= ZIO_COMPRESS_OFF && 743 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 744 DMU_OT_IS_VALID(zp->zp_type) && 745 zp->zp_level < 32 && 746 zp->zp_copies > 0 && 747 zp->zp_copies <= spa_max_replication(spa)); 748 749 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 750 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 751 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 752 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 753 754 zio->io_ready = ready; 755 zio->io_children_ready = children_ready; 756 zio->io_physdone = physdone; 757 zio->io_prop = *zp; 758 759 /* 760 * Data can be NULL if we are going to call zio_write_override() to 761 * provide the already-allocated BP. But we may need the data to 762 * verify a dedup hit (if requested). In this case, don't try to 763 * dedup (just take the already-allocated BP verbatim). 764 */ 765 if (data == NULL && zio->io_prop.zp_dedup_verify) { 766 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 767 } 768 769 return (zio); 770 } 771 772 zio_t * 773 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 774 uint64_t size, zio_done_func_t *done, void *private, 775 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 776 { 777 zio_t *zio; 778 779 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 780 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 781 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 782 783 return (zio); 784 } 785 786 void 787 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 788 { 789 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 790 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 791 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 792 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 793 794 /* 795 * We must reset the io_prop to match the values that existed 796 * when the bp was first written by dmu_sync() keeping in mind 797 * that nopwrite and dedup are mutually exclusive. 798 */ 799 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 800 zio->io_prop.zp_nopwrite = nopwrite; 801 zio->io_prop.zp_copies = copies; 802 zio->io_bp_override = bp; 803 } 804 805 void 806 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 807 { 808 809 /* 810 * The check for EMBEDDED is a performance optimization. We 811 * process the free here (by ignoring it) rather than 812 * putting it on the list and then processing it in zio_free_sync(). 813 */ 814 if (BP_IS_EMBEDDED(bp)) 815 return; 816 metaslab_check_free(spa, bp); 817 818 /* 819 * Frees that are for the currently-syncing txg, are not going to be 820 * deferred, and which will not need to do a read (i.e. not GANG or 821 * DEDUP), can be processed immediately. Otherwise, put them on the 822 * in-memory list for later processing. 823 */ 824 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 825 txg != spa->spa_syncing_txg || 826 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 827 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 828 } else { 829 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 830 } 831 } 832 833 zio_t * 834 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 835 enum zio_flag flags) 836 { 837 zio_t *zio; 838 enum zio_stage stage = ZIO_FREE_PIPELINE; 839 840 ASSERT(!BP_IS_HOLE(bp)); 841 ASSERT(spa_syncing_txg(spa) == txg); 842 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 843 844 if (BP_IS_EMBEDDED(bp)) 845 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 846 847 metaslab_check_free(spa, bp); 848 arc_freed(spa, bp); 849 850 /* 851 * GANG and DEDUP blocks can induce a read (for the gang block header, 852 * or the DDT), so issue them asynchronously so that this thread is 853 * not tied up. 854 */ 855 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 856 stage |= ZIO_STAGE_ISSUE_ASYNC; 857 858 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 859 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, flags, 860 NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 861 862 return (zio); 863 } 864 865 zio_t * 866 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 867 zio_done_func_t *done, void *private, enum zio_flag flags) 868 { 869 zio_t *zio; 870 871 dprintf_bp(bp, "claiming in txg %llu", txg); 872 873 if (BP_IS_EMBEDDED(bp)) 874 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 875 876 /* 877 * A claim is an allocation of a specific block. Claims are needed 878 * to support immediate writes in the intent log. The issue is that 879 * immediate writes contain committed data, but in a txg that was 880 * *not* committed. Upon opening the pool after an unclean shutdown, 881 * the intent log claims all blocks that contain immediate write data 882 * so that the SPA knows they're in use. 883 * 884 * All claims *must* be resolved in the first txg -- before the SPA 885 * starts allocating blocks -- so that nothing is allocated twice. 886 * If txg == 0 we just verify that the block is claimable. 887 */ 888 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 889 ASSERT(txg == spa_first_txg(spa) || txg == 0); 890 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 891 892 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 893 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 894 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 895 ASSERT0(zio->io_queued_timestamp); 896 897 return (zio); 898 } 899 900 zio_t * 901 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 902 zio_done_func_t *done, void *private, enum zio_flag flags) 903 { 904 zio_t *zio; 905 int c; 906 907 if (vd->vdev_children == 0) { 908 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 909 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 910 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 911 912 zio->io_cmd = cmd; 913 } else { 914 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 915 916 for (c = 0; c < vd->vdev_children; c++) 917 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 918 done, private, flags)); 919 } 920 921 return (zio); 922 } 923 924 zio_t * 925 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 926 void *data, int checksum, zio_done_func_t *done, void *private, 927 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 928 { 929 zio_t *zio; 930 931 ASSERT(vd->vdev_children == 0); 932 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 933 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 934 ASSERT3U(offset + size, <=, vd->vdev_psize); 935 936 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 937 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 938 NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 939 940 zio->io_prop.zp_checksum = checksum; 941 942 return (zio); 943 } 944 945 zio_t * 946 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 947 void *data, int checksum, zio_done_func_t *done, void *private, 948 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 949 { 950 zio_t *zio; 951 952 ASSERT(vd->vdev_children == 0); 953 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 954 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 955 ASSERT3U(offset + size, <=, vd->vdev_psize); 956 957 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 958 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, 959 NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 960 961 zio->io_prop.zp_checksum = checksum; 962 963 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 964 /* 965 * zec checksums are necessarily destructive -- they modify 966 * the end of the write buffer to hold the verifier/checksum. 967 * Therefore, we must make a local copy in case the data is 968 * being written to multiple places in parallel. 969 */ 970 void *wbuf = zio_buf_alloc(size); 971 bcopy(data, wbuf, size); 972 zio_push_transform(zio, wbuf, size, size, NULL); 973 } 974 975 return (zio); 976 } 977 978 /* 979 * Create a child I/O to do some work for us. 980 */ 981 zio_t * 982 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 983 void *data, uint64_t size, int type, zio_priority_t priority, 984 enum zio_flag flags, zio_done_func_t *done, void *private) 985 { 986 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 987 zio_t *zio; 988 989 ASSERT(vd->vdev_parent == 990 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 991 992 if (type == ZIO_TYPE_READ && bp != NULL) { 993 /* 994 * If we have the bp, then the child should perform the 995 * checksum and the parent need not. This pushes error 996 * detection as close to the leaves as possible and 997 * eliminates redundant checksums in the interior nodes. 998 */ 999 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1000 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1001 } 1002 1003 if (vd->vdev_children == 0) 1004 offset += VDEV_LABEL_START_SIZE; 1005 1006 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 1007 1008 /* 1009 * If we've decided to do a repair, the write is not speculative -- 1010 * even if the original read was. 1011 */ 1012 if (flags & ZIO_FLAG_IO_REPAIR) 1013 flags &= ~ZIO_FLAG_SPECULATIVE; 1014 1015 /* 1016 * If we're creating a child I/O that is not associated with a 1017 * top-level vdev, then the child zio is not an allocating I/O. 1018 * If this is a retried I/O then we ignore it since we will 1019 * have already processed the original allocating I/O. 1020 */ 1021 if (flags & ZIO_FLAG_IO_ALLOCATING && 1022 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1023 metaslab_class_t *mc = spa_normal_class(pio->io_spa); 1024 1025 ASSERT(mc->mc_alloc_throttle_enabled); 1026 ASSERT(type == ZIO_TYPE_WRITE); 1027 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1028 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1029 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1030 pio->io_child_type == ZIO_CHILD_GANG); 1031 1032 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1033 } 1034 1035 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 1036 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1037 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1038 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1039 1040 zio->io_physdone = pio->io_physdone; 1041 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 1042 zio->io_logical->io_phys_children++; 1043 1044 return (zio); 1045 } 1046 1047 zio_t * 1048 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 1049 int type, zio_priority_t priority, enum zio_flag flags, 1050 zio_done_func_t *done, void *private) 1051 { 1052 zio_t *zio; 1053 1054 ASSERT(vd->vdev_ops->vdev_op_leaf); 1055 1056 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1057 data, size, done, private, type, priority, 1058 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1059 vd, offset, NULL, 1060 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1061 1062 return (zio); 1063 } 1064 1065 void 1066 zio_flush(zio_t *zio, vdev_t *vd) 1067 { 1068 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 1069 NULL, NULL, 1070 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1071 } 1072 1073 void 1074 zio_shrink(zio_t *zio, uint64_t size) 1075 { 1076 ASSERT(zio->io_executor == NULL); 1077 ASSERT(zio->io_orig_size == zio->io_size); 1078 ASSERT(size <= zio->io_size); 1079 1080 /* 1081 * We don't shrink for raidz because of problems with the 1082 * reconstruction when reading back less than the block size. 1083 * Note, BP_IS_RAIDZ() assumes no compression. 1084 */ 1085 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1086 if (!BP_IS_RAIDZ(zio->io_bp)) 1087 zio->io_orig_size = zio->io_size = size; 1088 } 1089 1090 /* 1091 * ========================================================================== 1092 * Prepare to read and write logical blocks 1093 * ========================================================================== 1094 */ 1095 1096 static int 1097 zio_read_bp_init(zio_t *zio) 1098 { 1099 blkptr_t *bp = zio->io_bp; 1100 1101 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1102 zio->io_child_type == ZIO_CHILD_LOGICAL && 1103 !(zio->io_flags & ZIO_FLAG_RAW)) { 1104 uint64_t psize = 1105 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1106 void *cbuf = zio_buf_alloc(psize); 1107 1108 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 1109 } 1110 1111 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1112 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1113 decode_embedded_bp_compressed(bp, zio->io_data); 1114 } else { 1115 ASSERT(!BP_IS_EMBEDDED(bp)); 1116 } 1117 1118 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1119 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1120 1121 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1122 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1123 1124 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1125 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1126 1127 return (ZIO_PIPELINE_CONTINUE); 1128 } 1129 1130 static int 1131 zio_write_bp_init(zio_t *zio) 1132 { 1133 if (!IO_IS_ALLOCATING(zio)) 1134 return (ZIO_PIPELINE_CONTINUE); 1135 1136 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1137 1138 if (zio->io_bp_override) { 1139 blkptr_t *bp = zio->io_bp; 1140 zio_prop_t *zp = &zio->io_prop; 1141 1142 ASSERT(bp->blk_birth != zio->io_txg); 1143 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1144 1145 *bp = *zio->io_bp_override; 1146 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1147 1148 if (BP_IS_EMBEDDED(bp)) 1149 return (ZIO_PIPELINE_CONTINUE); 1150 1151 /* 1152 * If we've been overridden and nopwrite is set then 1153 * set the flag accordingly to indicate that a nopwrite 1154 * has already occurred. 1155 */ 1156 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1157 ASSERT(!zp->zp_dedup); 1158 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1159 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1160 return (ZIO_PIPELINE_CONTINUE); 1161 } 1162 1163 ASSERT(!zp->zp_nopwrite); 1164 1165 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1166 return (ZIO_PIPELINE_CONTINUE); 1167 1168 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1169 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1170 1171 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1172 BP_SET_DEDUP(bp, 1); 1173 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1174 return (ZIO_PIPELINE_CONTINUE); 1175 } 1176 1177 /* 1178 * We were unable to handle this as an override bp, treat 1179 * it as a regular write I/O. 1180 */ 1181 zio->io_bp_override = NULL; 1182 *bp = zio->io_bp_orig; 1183 zio->io_pipeline = zio->io_orig_pipeline; 1184 } 1185 1186 return (ZIO_PIPELINE_CONTINUE); 1187 } 1188 1189 static int 1190 zio_write_compress(zio_t *zio) 1191 { 1192 spa_t *spa = zio->io_spa; 1193 zio_prop_t *zp = &zio->io_prop; 1194 enum zio_compress compress = zp->zp_compress; 1195 blkptr_t *bp = zio->io_bp; 1196 uint64_t lsize = zio->io_size; 1197 uint64_t psize = lsize; 1198 int pass = 1; 1199 1200 /* 1201 * If our children haven't all reached the ready stage, 1202 * wait for them and then repeat this pipeline stage. 1203 */ 1204 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 1205 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 1206 return (ZIO_PIPELINE_STOP); 1207 1208 if (!IO_IS_ALLOCATING(zio)) 1209 return (ZIO_PIPELINE_CONTINUE); 1210 1211 if (zio->io_children_ready != NULL) { 1212 /* 1213 * Now that all our children are ready, run the callback 1214 * associated with this zio in case it wants to modify the 1215 * data to be written. 1216 */ 1217 ASSERT3U(zp->zp_level, >, 0); 1218 zio->io_children_ready(zio); 1219 } 1220 1221 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1222 ASSERT(zio->io_bp_override == NULL); 1223 1224 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1225 /* 1226 * We're rewriting an existing block, which means we're 1227 * working on behalf of spa_sync(). For spa_sync() to 1228 * converge, it must eventually be the case that we don't 1229 * have to allocate new blocks. But compression changes 1230 * the blocksize, which forces a reallocate, and makes 1231 * convergence take longer. Therefore, after the first 1232 * few passes, stop compressing to ensure convergence. 1233 */ 1234 pass = spa_sync_pass(spa); 1235 1236 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1237 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1238 ASSERT(!BP_GET_DEDUP(bp)); 1239 1240 if (pass >= zfs_sync_pass_dont_compress) 1241 compress = ZIO_COMPRESS_OFF; 1242 1243 /* Make sure someone doesn't change their mind on overwrites */ 1244 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1245 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1246 } 1247 1248 if (compress != ZIO_COMPRESS_OFF) { 1249 void *cbuf = zio_buf_alloc(lsize); 1250 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1251 if (psize == 0 || psize == lsize) { 1252 compress = ZIO_COMPRESS_OFF; 1253 zio_buf_free(cbuf, lsize); 1254 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1255 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1256 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1257 encode_embedded_bp_compressed(bp, 1258 cbuf, compress, lsize, psize); 1259 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1260 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1261 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1262 zio_buf_free(cbuf, lsize); 1263 bp->blk_birth = zio->io_txg; 1264 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1265 ASSERT(spa_feature_is_active(spa, 1266 SPA_FEATURE_EMBEDDED_DATA)); 1267 return (ZIO_PIPELINE_CONTINUE); 1268 } else { 1269 /* 1270 * Round up compressed size up to the ashift 1271 * of the smallest-ashift device, and zero the tail. 1272 * This ensures that the compressed size of the BP 1273 * (and thus compressratio property) are correct, 1274 * in that we charge for the padding used to fill out 1275 * the last sector. 1276 */ 1277 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 1278 size_t rounded = (size_t)P2ROUNDUP(psize, 1279 1ULL << spa->spa_min_ashift); 1280 if (rounded >= lsize) { 1281 compress = ZIO_COMPRESS_OFF; 1282 zio_buf_free(cbuf, lsize); 1283 psize = lsize; 1284 } else { 1285 bzero((char *)cbuf + psize, rounded - psize); 1286 psize = rounded; 1287 zio_push_transform(zio, cbuf, 1288 psize, lsize, NULL); 1289 } 1290 } 1291 1292 /* 1293 * We were unable to handle this as an override bp, treat 1294 * it as a regular write I/O. 1295 */ 1296 zio->io_bp_override = NULL; 1297 *bp = zio->io_bp_orig; 1298 zio->io_pipeline = zio->io_orig_pipeline; 1299 } 1300 1301 /* 1302 * The final pass of spa_sync() must be all rewrites, but the first 1303 * few passes offer a trade-off: allocating blocks defers convergence, 1304 * but newly allocated blocks are sequential, so they can be written 1305 * to disk faster. Therefore, we allow the first few passes of 1306 * spa_sync() to allocate new blocks, but force rewrites after that. 1307 * There should only be a handful of blocks after pass 1 in any case. 1308 */ 1309 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1310 BP_GET_PSIZE(bp) == psize && 1311 pass >= zfs_sync_pass_rewrite) { 1312 ASSERT(psize != 0); 1313 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1314 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1315 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1316 } else { 1317 BP_ZERO(bp); 1318 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1319 } 1320 1321 if (psize == 0) { 1322 if (zio->io_bp_orig.blk_birth != 0 && 1323 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1324 BP_SET_LSIZE(bp, lsize); 1325 BP_SET_TYPE(bp, zp->zp_type); 1326 BP_SET_LEVEL(bp, zp->zp_level); 1327 BP_SET_BIRTH(bp, zio->io_txg, 0); 1328 } 1329 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1330 } else { 1331 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1332 BP_SET_LSIZE(bp, lsize); 1333 BP_SET_TYPE(bp, zp->zp_type); 1334 BP_SET_LEVEL(bp, zp->zp_level); 1335 BP_SET_PSIZE(bp, psize); 1336 BP_SET_COMPRESS(bp, compress); 1337 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1338 BP_SET_DEDUP(bp, zp->zp_dedup); 1339 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1340 if (zp->zp_dedup) { 1341 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1342 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1343 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1344 } 1345 if (zp->zp_nopwrite) { 1346 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1347 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1348 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1349 } 1350 } 1351 return (ZIO_PIPELINE_CONTINUE); 1352 } 1353 1354 static int 1355 zio_free_bp_init(zio_t *zio) 1356 { 1357 blkptr_t *bp = zio->io_bp; 1358 1359 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1360 if (BP_GET_DEDUP(bp)) 1361 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1362 } 1363 1364 return (ZIO_PIPELINE_CONTINUE); 1365 } 1366 1367 /* 1368 * ========================================================================== 1369 * Execute the I/O pipeline 1370 * ========================================================================== 1371 */ 1372 1373 static void 1374 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1375 { 1376 spa_t *spa = zio->io_spa; 1377 zio_type_t t = zio->io_type; 1378 int flags = (cutinline ? TQ_FRONT : 0); 1379 1380 /* 1381 * If we're a config writer or a probe, the normal issue and 1382 * interrupt threads may all be blocked waiting for the config lock. 1383 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1384 */ 1385 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1386 t = ZIO_TYPE_NULL; 1387 1388 /* 1389 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1390 */ 1391 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1392 t = ZIO_TYPE_NULL; 1393 1394 /* 1395 * If this is a high priority I/O, then use the high priority taskq if 1396 * available. 1397 */ 1398 if (zio->io_priority == ZIO_PRIORITY_NOW && 1399 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1400 q++; 1401 1402 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1403 1404 /* 1405 * NB: We are assuming that the zio can only be dispatched 1406 * to a single taskq at a time. It would be a grievous error 1407 * to dispatch the zio to another taskq at the same time. 1408 */ 1409 ASSERT(zio->io_tqent.tqent_next == NULL); 1410 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1411 flags, &zio->io_tqent); 1412 } 1413 1414 static boolean_t 1415 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1416 { 1417 kthread_t *executor = zio->io_executor; 1418 spa_t *spa = zio->io_spa; 1419 1420 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1421 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1422 uint_t i; 1423 for (i = 0; i < tqs->stqs_count; i++) { 1424 if (taskq_member(tqs->stqs_taskq[i], executor)) 1425 return (B_TRUE); 1426 } 1427 } 1428 1429 return (B_FALSE); 1430 } 1431 1432 static int 1433 zio_issue_async(zio_t *zio) 1434 { 1435 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1436 1437 return (ZIO_PIPELINE_STOP); 1438 } 1439 1440 void 1441 zio_interrupt(zio_t *zio) 1442 { 1443 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1444 } 1445 1446 void 1447 zio_delay_interrupt(zio_t *zio) 1448 { 1449 /* 1450 * The timeout_generic() function isn't defined in userspace, so 1451 * rather than trying to implement the function, the zio delay 1452 * functionality has been disabled for userspace builds. 1453 */ 1454 1455 #ifdef _KERNEL 1456 /* 1457 * If io_target_timestamp is zero, then no delay has been registered 1458 * for this IO, thus jump to the end of this function and "skip" the 1459 * delay; issuing it directly to the zio layer. 1460 */ 1461 if (zio->io_target_timestamp != 0) { 1462 hrtime_t now = gethrtime(); 1463 1464 if (now >= zio->io_target_timestamp) { 1465 /* 1466 * This IO has already taken longer than the target 1467 * delay to complete, so we don't want to delay it 1468 * any longer; we "miss" the delay and issue it 1469 * directly to the zio layer. This is likely due to 1470 * the target latency being set to a value less than 1471 * the underlying hardware can satisfy (e.g. delay 1472 * set to 1ms, but the disks take 10ms to complete an 1473 * IO request). 1474 */ 1475 1476 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 1477 hrtime_t, now); 1478 1479 zio_interrupt(zio); 1480 } else { 1481 hrtime_t diff = zio->io_target_timestamp - now; 1482 1483 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 1484 hrtime_t, now, hrtime_t, diff); 1485 1486 (void) timeout_generic(CALLOUT_NORMAL, 1487 (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 1488 } 1489 1490 return; 1491 } 1492 #endif 1493 1494 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 1495 zio_interrupt(zio); 1496 } 1497 1498 /* 1499 * Execute the I/O pipeline until one of the following occurs: 1500 * 1501 * (1) the I/O completes 1502 * (2) the pipeline stalls waiting for dependent child I/Os 1503 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1504 * (4) the I/O is delegated by vdev-level caching or aggregation 1505 * (5) the I/O is deferred due to vdev-level queueing 1506 * (6) the I/O is handed off to another thread. 1507 * 1508 * In all cases, the pipeline stops whenever there's no CPU work; it never 1509 * burns a thread in cv_wait(). 1510 * 1511 * There's no locking on io_stage because there's no legitimate way 1512 * for multiple threads to be attempting to process the same I/O. 1513 */ 1514 static zio_pipe_stage_t *zio_pipeline[]; 1515 1516 void 1517 zio_execute(zio_t *zio) 1518 { 1519 zio->io_executor = curthread; 1520 1521 ASSERT3U(zio->io_queued_timestamp, >, 0); 1522 1523 while (zio->io_stage < ZIO_STAGE_DONE) { 1524 enum zio_stage pipeline = zio->io_pipeline; 1525 enum zio_stage stage = zio->io_stage; 1526 int rv; 1527 1528 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1529 ASSERT(ISP2(stage)); 1530 ASSERT(zio->io_stall == NULL); 1531 1532 do { 1533 stage <<= 1; 1534 } while ((stage & pipeline) == 0); 1535 1536 ASSERT(stage <= ZIO_STAGE_DONE); 1537 1538 /* 1539 * If we are in interrupt context and this pipeline stage 1540 * will grab a config lock that is held across I/O, 1541 * or may wait for an I/O that needs an interrupt thread 1542 * to complete, issue async to avoid deadlock. 1543 * 1544 * For VDEV_IO_START, we cut in line so that the io will 1545 * be sent to disk promptly. 1546 */ 1547 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1548 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1549 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1550 zio_requeue_io_start_cut_in_line : B_FALSE; 1551 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1552 return; 1553 } 1554 1555 zio->io_stage = stage; 1556 zio->io_pipeline_trace |= zio->io_stage; 1557 rv = zio_pipeline[highbit64(stage) - 1](zio); 1558 1559 if (rv == ZIO_PIPELINE_STOP) 1560 return; 1561 1562 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1563 } 1564 } 1565 1566 /* 1567 * ========================================================================== 1568 * Initiate I/O, either sync or async 1569 * ========================================================================== 1570 */ 1571 int 1572 zio_wait(zio_t *zio) 1573 { 1574 int error; 1575 1576 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1577 ASSERT(zio->io_executor == NULL); 1578 1579 zio->io_waiter = curthread; 1580 ASSERT0(zio->io_queued_timestamp); 1581 zio->io_queued_timestamp = gethrtime(); 1582 1583 zio_execute(zio); 1584 1585 mutex_enter(&zio->io_lock); 1586 while (zio->io_executor != NULL) 1587 cv_wait(&zio->io_cv, &zio->io_lock); 1588 mutex_exit(&zio->io_lock); 1589 1590 error = zio->io_error; 1591 zio_destroy(zio); 1592 1593 return (error); 1594 } 1595 1596 void 1597 zio_nowait(zio_t *zio) 1598 { 1599 ASSERT(zio->io_executor == NULL); 1600 1601 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1602 zio_unique_parent(zio) == NULL) { 1603 /* 1604 * This is a logical async I/O with no parent to wait for it. 1605 * We add it to the spa_async_root_zio "Godfather" I/O which 1606 * will ensure they complete prior to unloading the pool. 1607 */ 1608 spa_t *spa = zio->io_spa; 1609 1610 zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1611 } 1612 1613 ASSERT0(zio->io_queued_timestamp); 1614 zio->io_queued_timestamp = gethrtime(); 1615 zio_execute(zio); 1616 } 1617 1618 /* 1619 * ========================================================================== 1620 * Reexecute or suspend/resume failed I/O 1621 * ========================================================================== 1622 */ 1623 1624 static void 1625 zio_reexecute(zio_t *pio) 1626 { 1627 zio_t *cio, *cio_next; 1628 1629 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1630 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1631 ASSERT(pio->io_gang_leader == NULL); 1632 ASSERT(pio->io_gang_tree == NULL); 1633 1634 pio->io_flags = pio->io_orig_flags; 1635 pio->io_stage = pio->io_orig_stage; 1636 pio->io_pipeline = pio->io_orig_pipeline; 1637 pio->io_reexecute = 0; 1638 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1639 pio->io_pipeline_trace = 0; 1640 pio->io_error = 0; 1641 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1642 pio->io_state[w] = 0; 1643 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1644 pio->io_child_error[c] = 0; 1645 1646 if (IO_IS_ALLOCATING(pio)) 1647 BP_ZERO(pio->io_bp); 1648 1649 /* 1650 * As we reexecute pio's children, new children could be created. 1651 * New children go to the head of pio's io_child_list, however, 1652 * so we will (correctly) not reexecute them. The key is that 1653 * the remainder of pio's io_child_list, from 'cio_next' onward, 1654 * cannot be affected by any side effects of reexecuting 'cio'. 1655 */ 1656 zio_link_t *zl = NULL; 1657 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 1658 cio_next = zio_walk_children(pio, &zl); 1659 mutex_enter(&pio->io_lock); 1660 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1661 pio->io_children[cio->io_child_type][w]++; 1662 mutex_exit(&pio->io_lock); 1663 zio_reexecute(cio); 1664 } 1665 1666 /* 1667 * Now that all children have been reexecuted, execute the parent. 1668 * We don't reexecute "The Godfather" I/O here as it's the 1669 * responsibility of the caller to wait on him. 1670 */ 1671 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 1672 pio->io_queued_timestamp = gethrtime(); 1673 zio_execute(pio); 1674 } 1675 } 1676 1677 void 1678 zio_suspend(spa_t *spa, zio_t *zio) 1679 { 1680 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1681 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1682 "failure and the failure mode property for this pool " 1683 "is set to panic.", spa_name(spa)); 1684 1685 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1686 1687 mutex_enter(&spa->spa_suspend_lock); 1688 1689 if (spa->spa_suspend_zio_root == NULL) 1690 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1691 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1692 ZIO_FLAG_GODFATHER); 1693 1694 spa->spa_suspended = B_TRUE; 1695 1696 if (zio != NULL) { 1697 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1698 ASSERT(zio != spa->spa_suspend_zio_root); 1699 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1700 ASSERT(zio_unique_parent(zio) == NULL); 1701 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1702 zio_add_child(spa->spa_suspend_zio_root, zio); 1703 } 1704 1705 mutex_exit(&spa->spa_suspend_lock); 1706 } 1707 1708 int 1709 zio_resume(spa_t *spa) 1710 { 1711 zio_t *pio; 1712 1713 /* 1714 * Reexecute all previously suspended i/o. 1715 */ 1716 mutex_enter(&spa->spa_suspend_lock); 1717 spa->spa_suspended = B_FALSE; 1718 cv_broadcast(&spa->spa_suspend_cv); 1719 pio = spa->spa_suspend_zio_root; 1720 spa->spa_suspend_zio_root = NULL; 1721 mutex_exit(&spa->spa_suspend_lock); 1722 1723 if (pio == NULL) 1724 return (0); 1725 1726 zio_reexecute(pio); 1727 return (zio_wait(pio)); 1728 } 1729 1730 void 1731 zio_resume_wait(spa_t *spa) 1732 { 1733 mutex_enter(&spa->spa_suspend_lock); 1734 while (spa_suspended(spa)) 1735 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1736 mutex_exit(&spa->spa_suspend_lock); 1737 } 1738 1739 /* 1740 * ========================================================================== 1741 * Gang blocks. 1742 * 1743 * A gang block is a collection of small blocks that looks to the DMU 1744 * like one large block. When zio_dva_allocate() cannot find a block 1745 * of the requested size, due to either severe fragmentation or the pool 1746 * being nearly full, it calls zio_write_gang_block() to construct the 1747 * block from smaller fragments. 1748 * 1749 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1750 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1751 * an indirect block: it's an array of block pointers. It consumes 1752 * only one sector and hence is allocatable regardless of fragmentation. 1753 * The gang header's bps point to its gang members, which hold the data. 1754 * 1755 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1756 * as the verifier to ensure uniqueness of the SHA256 checksum. 1757 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1758 * not the gang header. This ensures that data block signatures (needed for 1759 * deduplication) are independent of how the block is physically stored. 1760 * 1761 * Gang blocks can be nested: a gang member may itself be a gang block. 1762 * Thus every gang block is a tree in which root and all interior nodes are 1763 * gang headers, and the leaves are normal blocks that contain user data. 1764 * The root of the gang tree is called the gang leader. 1765 * 1766 * To perform any operation (read, rewrite, free, claim) on a gang block, 1767 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1768 * in the io_gang_tree field of the original logical i/o by recursively 1769 * reading the gang leader and all gang headers below it. This yields 1770 * an in-core tree containing the contents of every gang header and the 1771 * bps for every constituent of the gang block. 1772 * 1773 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1774 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1775 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1776 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1777 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1778 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1779 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1780 * of the gang header plus zio_checksum_compute() of the data to update the 1781 * gang header's blk_cksum as described above. 1782 * 1783 * The two-phase assemble/issue model solves the problem of partial failure -- 1784 * what if you'd freed part of a gang block but then couldn't read the 1785 * gang header for another part? Assembling the entire gang tree first 1786 * ensures that all the necessary gang header I/O has succeeded before 1787 * starting the actual work of free, claim, or write. Once the gang tree 1788 * is assembled, free and claim are in-memory operations that cannot fail. 1789 * 1790 * In the event that a gang write fails, zio_dva_unallocate() walks the 1791 * gang tree to immediately free (i.e. insert back into the space map) 1792 * everything we've allocated. This ensures that we don't get ENOSPC 1793 * errors during repeated suspend/resume cycles due to a flaky device. 1794 * 1795 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1796 * the gang tree, we won't modify the block, so we can safely defer the free 1797 * (knowing that the block is still intact). If we *can* assemble the gang 1798 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1799 * each constituent bp and we can allocate a new block on the next sync pass. 1800 * 1801 * In all cases, the gang tree allows complete recovery from partial failure. 1802 * ========================================================================== 1803 */ 1804 1805 static zio_t * 1806 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1807 { 1808 if (gn != NULL) 1809 return (pio); 1810 1811 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1812 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1813 &pio->io_bookmark)); 1814 } 1815 1816 zio_t * 1817 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1818 { 1819 zio_t *zio; 1820 1821 if (gn != NULL) { 1822 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1823 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1824 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1825 /* 1826 * As we rewrite each gang header, the pipeline will compute 1827 * a new gang block header checksum for it; but no one will 1828 * compute a new data checksum, so we do that here. The one 1829 * exception is the gang leader: the pipeline already computed 1830 * its data checksum because that stage precedes gang assembly. 1831 * (Presently, nothing actually uses interior data checksums; 1832 * this is just good hygiene.) 1833 */ 1834 if (gn != pio->io_gang_leader->io_gang_tree) { 1835 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1836 data, BP_GET_PSIZE(bp)); 1837 } 1838 /* 1839 * If we are here to damage data for testing purposes, 1840 * leave the GBH alone so that we can detect the damage. 1841 */ 1842 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1843 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1844 } else { 1845 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1846 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1847 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1848 } 1849 1850 return (zio); 1851 } 1852 1853 /* ARGSUSED */ 1854 zio_t * 1855 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1856 { 1857 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1858 ZIO_GANG_CHILD_FLAGS(pio))); 1859 } 1860 1861 /* ARGSUSED */ 1862 zio_t * 1863 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1864 { 1865 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1866 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1867 } 1868 1869 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1870 NULL, 1871 zio_read_gang, 1872 zio_rewrite_gang, 1873 zio_free_gang, 1874 zio_claim_gang, 1875 NULL 1876 }; 1877 1878 static void zio_gang_tree_assemble_done(zio_t *zio); 1879 1880 static zio_gang_node_t * 1881 zio_gang_node_alloc(zio_gang_node_t **gnpp) 1882 { 1883 zio_gang_node_t *gn; 1884 1885 ASSERT(*gnpp == NULL); 1886 1887 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1888 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1889 *gnpp = gn; 1890 1891 return (gn); 1892 } 1893 1894 static void 1895 zio_gang_node_free(zio_gang_node_t **gnpp) 1896 { 1897 zio_gang_node_t *gn = *gnpp; 1898 1899 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1900 ASSERT(gn->gn_child[g] == NULL); 1901 1902 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1903 kmem_free(gn, sizeof (*gn)); 1904 *gnpp = NULL; 1905 } 1906 1907 static void 1908 zio_gang_tree_free(zio_gang_node_t **gnpp) 1909 { 1910 zio_gang_node_t *gn = *gnpp; 1911 1912 if (gn == NULL) 1913 return; 1914 1915 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1916 zio_gang_tree_free(&gn->gn_child[g]); 1917 1918 zio_gang_node_free(gnpp); 1919 } 1920 1921 static void 1922 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1923 { 1924 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1925 1926 ASSERT(gio->io_gang_leader == gio); 1927 ASSERT(BP_IS_GANG(bp)); 1928 1929 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1930 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1931 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1932 } 1933 1934 static void 1935 zio_gang_tree_assemble_done(zio_t *zio) 1936 { 1937 zio_t *gio = zio->io_gang_leader; 1938 zio_gang_node_t *gn = zio->io_private; 1939 blkptr_t *bp = zio->io_bp; 1940 1941 ASSERT(gio == zio_unique_parent(zio)); 1942 ASSERT(zio->io_child_count == 0); 1943 1944 if (zio->io_error) 1945 return; 1946 1947 if (BP_SHOULD_BYTESWAP(bp)) 1948 byteswap_uint64_array(zio->io_data, zio->io_size); 1949 1950 ASSERT(zio->io_data == gn->gn_gbh); 1951 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1952 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1953 1954 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1955 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1956 if (!BP_IS_GANG(gbp)) 1957 continue; 1958 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1959 } 1960 } 1961 1962 static void 1963 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1964 { 1965 zio_t *gio = pio->io_gang_leader; 1966 zio_t *zio; 1967 1968 ASSERT(BP_IS_GANG(bp) == !!gn); 1969 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1970 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1971 1972 /* 1973 * If you're a gang header, your data is in gn->gn_gbh. 1974 * If you're a gang member, your data is in 'data' and gn == NULL. 1975 */ 1976 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1977 1978 if (gn != NULL) { 1979 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1980 1981 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1982 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1983 if (BP_IS_HOLE(gbp)) 1984 continue; 1985 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1986 data = (char *)data + BP_GET_PSIZE(gbp); 1987 } 1988 } 1989 1990 if (gn == gio->io_gang_tree) 1991 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1992 1993 if (zio != pio) 1994 zio_nowait(zio); 1995 } 1996 1997 static int 1998 zio_gang_assemble(zio_t *zio) 1999 { 2000 blkptr_t *bp = zio->io_bp; 2001 2002 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2003 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2004 2005 zio->io_gang_leader = zio; 2006 2007 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2008 2009 return (ZIO_PIPELINE_CONTINUE); 2010 } 2011 2012 static int 2013 zio_gang_issue(zio_t *zio) 2014 { 2015 blkptr_t *bp = zio->io_bp; 2016 2017 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 2018 return (ZIO_PIPELINE_STOP); 2019 2020 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2021 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2022 2023 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2024 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 2025 else 2026 zio_gang_tree_free(&zio->io_gang_tree); 2027 2028 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2029 2030 return (ZIO_PIPELINE_CONTINUE); 2031 } 2032 2033 static void 2034 zio_write_gang_member_ready(zio_t *zio) 2035 { 2036 zio_t *pio = zio_unique_parent(zio); 2037 zio_t *gio = zio->io_gang_leader; 2038 dva_t *cdva = zio->io_bp->blk_dva; 2039 dva_t *pdva = pio->io_bp->blk_dva; 2040 uint64_t asize; 2041 2042 if (BP_IS_HOLE(zio->io_bp)) 2043 return; 2044 2045 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2046 2047 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2048 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2049 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2050 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2051 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2052 2053 mutex_enter(&pio->io_lock); 2054 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2055 ASSERT(DVA_GET_GANG(&pdva[d])); 2056 asize = DVA_GET_ASIZE(&pdva[d]); 2057 asize += DVA_GET_ASIZE(&cdva[d]); 2058 DVA_SET_ASIZE(&pdva[d], asize); 2059 } 2060 mutex_exit(&pio->io_lock); 2061 } 2062 2063 static int 2064 zio_write_gang_block(zio_t *pio) 2065 { 2066 spa_t *spa = pio->io_spa; 2067 metaslab_class_t *mc = spa_normal_class(spa); 2068 blkptr_t *bp = pio->io_bp; 2069 zio_t *gio = pio->io_gang_leader; 2070 zio_t *zio; 2071 zio_gang_node_t *gn, **gnpp; 2072 zio_gbh_phys_t *gbh; 2073 uint64_t txg = pio->io_txg; 2074 uint64_t resid = pio->io_size; 2075 uint64_t lsize; 2076 int copies = gio->io_prop.zp_copies; 2077 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2078 zio_prop_t zp; 2079 int error; 2080 2081 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 2082 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2083 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2084 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2085 2086 flags |= METASLAB_ASYNC_ALLOC; 2087 VERIFY(refcount_held(&mc->mc_alloc_slots, pio)); 2088 2089 /* 2090 * The logical zio has already placed a reservation for 2091 * 'copies' allocation slots but gang blocks may require 2092 * additional copies. These additional copies 2093 * (i.e. gbh_copies - copies) are guaranteed to succeed 2094 * since metaslab_class_throttle_reserve() always allows 2095 * additional reservations for gang blocks. 2096 */ 2097 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2098 pio, flags)); 2099 } 2100 2101 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 2102 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, pio); 2103 if (error) { 2104 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2105 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2106 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2107 2108 /* 2109 * If we failed to allocate the gang block header then 2110 * we remove any additional allocation reservations that 2111 * we placed here. The original reservation will 2112 * be removed when the logical I/O goes to the ready 2113 * stage. 2114 */ 2115 metaslab_class_throttle_unreserve(mc, 2116 gbh_copies - copies, pio); 2117 } 2118 pio->io_error = error; 2119 return (ZIO_PIPELINE_CONTINUE); 2120 } 2121 2122 if (pio == gio) { 2123 gnpp = &gio->io_gang_tree; 2124 } else { 2125 gnpp = pio->io_private; 2126 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2127 } 2128 2129 gn = zio_gang_node_alloc(gnpp); 2130 gbh = gn->gn_gbh; 2131 bzero(gbh, SPA_GANGBLOCKSIZE); 2132 2133 /* 2134 * Create the gang header. 2135 */ 2136 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 2137 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2138 2139 /* 2140 * Create and nowait the gang children. 2141 */ 2142 for (int g = 0; resid != 0; resid -= lsize, g++) { 2143 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2144 SPA_MINBLOCKSIZE); 2145 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2146 2147 zp.zp_checksum = gio->io_prop.zp_checksum; 2148 zp.zp_compress = ZIO_COMPRESS_OFF; 2149 zp.zp_type = DMU_OT_NONE; 2150 zp.zp_level = 0; 2151 zp.zp_copies = gio->io_prop.zp_copies; 2152 zp.zp_dedup = B_FALSE; 2153 zp.zp_dedup_verify = B_FALSE; 2154 zp.zp_nopwrite = B_FALSE; 2155 2156 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2157 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 2158 zio_write_gang_member_ready, NULL, NULL, NULL, 2159 &gn->gn_child[g], pio->io_priority, 2160 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2161 2162 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2163 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2164 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2165 2166 /* 2167 * Gang children won't throttle but we should 2168 * account for their work, so reserve an allocation 2169 * slot for them here. 2170 */ 2171 VERIFY(metaslab_class_throttle_reserve(mc, 2172 zp.zp_copies, cio, flags)); 2173 } 2174 zio_nowait(cio); 2175 } 2176 2177 /* 2178 * Set pio's pipeline to just wait for zio to finish. 2179 */ 2180 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2181 2182 zio_nowait(zio); 2183 2184 return (ZIO_PIPELINE_CONTINUE); 2185 } 2186 2187 /* 2188 * The zio_nop_write stage in the pipeline determines if allocating a 2189 * new bp is necessary. The nopwrite feature can handle writes in 2190 * either syncing or open context (i.e. zil writes) and as a result is 2191 * mutually exclusive with dedup. 2192 * 2193 * By leveraging a cryptographically secure checksum, such as SHA256, we 2194 * can compare the checksums of the new data and the old to determine if 2195 * allocating a new block is required. Note that our requirements for 2196 * cryptographic strength are fairly weak: there can't be any accidental 2197 * hash collisions, but we don't need to be secure against intentional 2198 * (malicious) collisions. To trigger a nopwrite, you have to be able 2199 * to write the file to begin with, and triggering an incorrect (hash 2200 * collision) nopwrite is no worse than simply writing to the file. 2201 * That said, there are no known attacks against the checksum algorithms 2202 * used for nopwrite, assuming that the salt and the checksums 2203 * themselves remain secret. 2204 */ 2205 static int 2206 zio_nop_write(zio_t *zio) 2207 { 2208 blkptr_t *bp = zio->io_bp; 2209 blkptr_t *bp_orig = &zio->io_bp_orig; 2210 zio_prop_t *zp = &zio->io_prop; 2211 2212 ASSERT(BP_GET_LEVEL(bp) == 0); 2213 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2214 ASSERT(zp->zp_nopwrite); 2215 ASSERT(!zp->zp_dedup); 2216 ASSERT(zio->io_bp_override == NULL); 2217 ASSERT(IO_IS_ALLOCATING(zio)); 2218 2219 /* 2220 * Check to see if the original bp and the new bp have matching 2221 * characteristics (i.e. same checksum, compression algorithms, etc). 2222 * If they don't then just continue with the pipeline which will 2223 * allocate a new bp. 2224 */ 2225 if (BP_IS_HOLE(bp_orig) || 2226 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 2227 ZCHECKSUM_FLAG_NOPWRITE) || 2228 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2229 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2230 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2231 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2232 return (ZIO_PIPELINE_CONTINUE); 2233 2234 /* 2235 * If the checksums match then reset the pipeline so that we 2236 * avoid allocating a new bp and issuing any I/O. 2237 */ 2238 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2239 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 2240 ZCHECKSUM_FLAG_NOPWRITE); 2241 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2242 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2243 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2244 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2245 sizeof (uint64_t)) == 0); 2246 2247 *bp = *bp_orig; 2248 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2249 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2250 } 2251 2252 return (ZIO_PIPELINE_CONTINUE); 2253 } 2254 2255 /* 2256 * ========================================================================== 2257 * Dedup 2258 * ========================================================================== 2259 */ 2260 static void 2261 zio_ddt_child_read_done(zio_t *zio) 2262 { 2263 blkptr_t *bp = zio->io_bp; 2264 ddt_entry_t *dde = zio->io_private; 2265 ddt_phys_t *ddp; 2266 zio_t *pio = zio_unique_parent(zio); 2267 2268 mutex_enter(&pio->io_lock); 2269 ddp = ddt_phys_select(dde, bp); 2270 if (zio->io_error == 0) 2271 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2272 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 2273 dde->dde_repair_data = zio->io_data; 2274 else 2275 zio_buf_free(zio->io_data, zio->io_size); 2276 mutex_exit(&pio->io_lock); 2277 } 2278 2279 static int 2280 zio_ddt_read_start(zio_t *zio) 2281 { 2282 blkptr_t *bp = zio->io_bp; 2283 2284 ASSERT(BP_GET_DEDUP(bp)); 2285 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2286 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2287 2288 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2289 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2290 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2291 ddt_phys_t *ddp = dde->dde_phys; 2292 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2293 blkptr_t blk; 2294 2295 ASSERT(zio->io_vsd == NULL); 2296 zio->io_vsd = dde; 2297 2298 if (ddp_self == NULL) 2299 return (ZIO_PIPELINE_CONTINUE); 2300 2301 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2302 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2303 continue; 2304 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2305 &blk); 2306 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2307 zio_buf_alloc(zio->io_size), zio->io_size, 2308 zio_ddt_child_read_done, dde, zio->io_priority, 2309 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 2310 &zio->io_bookmark)); 2311 } 2312 return (ZIO_PIPELINE_CONTINUE); 2313 } 2314 2315 zio_nowait(zio_read(zio, zio->io_spa, bp, 2316 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 2317 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2318 2319 return (ZIO_PIPELINE_CONTINUE); 2320 } 2321 2322 static int 2323 zio_ddt_read_done(zio_t *zio) 2324 { 2325 blkptr_t *bp = zio->io_bp; 2326 2327 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 2328 return (ZIO_PIPELINE_STOP); 2329 2330 ASSERT(BP_GET_DEDUP(bp)); 2331 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2332 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2333 2334 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2335 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2336 ddt_entry_t *dde = zio->io_vsd; 2337 if (ddt == NULL) { 2338 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2339 return (ZIO_PIPELINE_CONTINUE); 2340 } 2341 if (dde == NULL) { 2342 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2343 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2344 return (ZIO_PIPELINE_STOP); 2345 } 2346 if (dde->dde_repair_data != NULL) { 2347 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 2348 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2349 } 2350 ddt_repair_done(ddt, dde); 2351 zio->io_vsd = NULL; 2352 } 2353 2354 ASSERT(zio->io_vsd == NULL); 2355 2356 return (ZIO_PIPELINE_CONTINUE); 2357 } 2358 2359 static boolean_t 2360 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2361 { 2362 spa_t *spa = zio->io_spa; 2363 2364 /* 2365 * Note: we compare the original data, not the transformed data, 2366 * because when zio->io_bp is an override bp, we will not have 2367 * pushed the I/O transforms. That's an important optimization 2368 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2369 */ 2370 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2371 zio_t *lio = dde->dde_lead_zio[p]; 2372 2373 if (lio != NULL) { 2374 return (lio->io_orig_size != zio->io_orig_size || 2375 bcmp(zio->io_orig_data, lio->io_orig_data, 2376 zio->io_orig_size) != 0); 2377 } 2378 } 2379 2380 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2381 ddt_phys_t *ddp = &dde->dde_phys[p]; 2382 2383 if (ddp->ddp_phys_birth != 0) { 2384 arc_buf_t *abuf = NULL; 2385 arc_flags_t aflags = ARC_FLAG_WAIT; 2386 blkptr_t blk = *zio->io_bp; 2387 int error; 2388 2389 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2390 2391 ddt_exit(ddt); 2392 2393 error = arc_read(NULL, spa, &blk, 2394 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2395 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2396 &aflags, &zio->io_bookmark); 2397 2398 if (error == 0) { 2399 if (arc_buf_size(abuf) != zio->io_orig_size || 2400 bcmp(abuf->b_data, zio->io_orig_data, 2401 zio->io_orig_size) != 0) 2402 error = SET_ERROR(EEXIST); 2403 arc_buf_destroy(abuf, &abuf); 2404 } 2405 2406 ddt_enter(ddt); 2407 return (error != 0); 2408 } 2409 } 2410 2411 return (B_FALSE); 2412 } 2413 2414 static void 2415 zio_ddt_child_write_ready(zio_t *zio) 2416 { 2417 int p = zio->io_prop.zp_copies; 2418 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2419 ddt_entry_t *dde = zio->io_private; 2420 ddt_phys_t *ddp = &dde->dde_phys[p]; 2421 zio_t *pio; 2422 2423 if (zio->io_error) 2424 return; 2425 2426 ddt_enter(ddt); 2427 2428 ASSERT(dde->dde_lead_zio[p] == zio); 2429 2430 ddt_phys_fill(ddp, zio->io_bp); 2431 2432 zio_link_t *zl = NULL; 2433 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2434 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2435 2436 ddt_exit(ddt); 2437 } 2438 2439 static void 2440 zio_ddt_child_write_done(zio_t *zio) 2441 { 2442 int p = zio->io_prop.zp_copies; 2443 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2444 ddt_entry_t *dde = zio->io_private; 2445 ddt_phys_t *ddp = &dde->dde_phys[p]; 2446 2447 ddt_enter(ddt); 2448 2449 ASSERT(ddp->ddp_refcnt == 0); 2450 ASSERT(dde->dde_lead_zio[p] == zio); 2451 dde->dde_lead_zio[p] = NULL; 2452 2453 if (zio->io_error == 0) { 2454 zio_link_t *zl = NULL; 2455 while (zio_walk_parents(zio, &zl) != NULL) 2456 ddt_phys_addref(ddp); 2457 } else { 2458 ddt_phys_clear(ddp); 2459 } 2460 2461 ddt_exit(ddt); 2462 } 2463 2464 static void 2465 zio_ddt_ditto_write_done(zio_t *zio) 2466 { 2467 int p = DDT_PHYS_DITTO; 2468 zio_prop_t *zp = &zio->io_prop; 2469 blkptr_t *bp = zio->io_bp; 2470 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2471 ddt_entry_t *dde = zio->io_private; 2472 ddt_phys_t *ddp = &dde->dde_phys[p]; 2473 ddt_key_t *ddk = &dde->dde_key; 2474 2475 ddt_enter(ddt); 2476 2477 ASSERT(ddp->ddp_refcnt == 0); 2478 ASSERT(dde->dde_lead_zio[p] == zio); 2479 dde->dde_lead_zio[p] = NULL; 2480 2481 if (zio->io_error == 0) { 2482 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2483 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2484 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2485 if (ddp->ddp_phys_birth != 0) 2486 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2487 ddt_phys_fill(ddp, bp); 2488 } 2489 2490 ddt_exit(ddt); 2491 } 2492 2493 static int 2494 zio_ddt_write(zio_t *zio) 2495 { 2496 spa_t *spa = zio->io_spa; 2497 blkptr_t *bp = zio->io_bp; 2498 uint64_t txg = zio->io_txg; 2499 zio_prop_t *zp = &zio->io_prop; 2500 int p = zp->zp_copies; 2501 int ditto_copies; 2502 zio_t *cio = NULL; 2503 zio_t *dio = NULL; 2504 ddt_t *ddt = ddt_select(spa, bp); 2505 ddt_entry_t *dde; 2506 ddt_phys_t *ddp; 2507 2508 ASSERT(BP_GET_DEDUP(bp)); 2509 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2510 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2511 2512 ddt_enter(ddt); 2513 dde = ddt_lookup(ddt, bp, B_TRUE); 2514 ddp = &dde->dde_phys[p]; 2515 2516 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2517 /* 2518 * If we're using a weak checksum, upgrade to a strong checksum 2519 * and try again. If we're already using a strong checksum, 2520 * we can't resolve it, so just convert to an ordinary write. 2521 * (And automatically e-mail a paper to Nature?) 2522 */ 2523 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 2524 ZCHECKSUM_FLAG_DEDUP)) { 2525 zp->zp_checksum = spa_dedup_checksum(spa); 2526 zio_pop_transforms(zio); 2527 zio->io_stage = ZIO_STAGE_OPEN; 2528 BP_ZERO(bp); 2529 } else { 2530 zp->zp_dedup = B_FALSE; 2531 } 2532 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2533 ddt_exit(ddt); 2534 return (ZIO_PIPELINE_CONTINUE); 2535 } 2536 2537 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2538 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2539 2540 if (ditto_copies > ddt_ditto_copies_present(dde) && 2541 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2542 zio_prop_t czp = *zp; 2543 2544 czp.zp_copies = ditto_copies; 2545 2546 /* 2547 * If we arrived here with an override bp, we won't have run 2548 * the transform stack, so we won't have the data we need to 2549 * generate a child i/o. So, toss the override bp and restart. 2550 * This is safe, because using the override bp is just an 2551 * optimization; and it's rare, so the cost doesn't matter. 2552 */ 2553 if (zio->io_bp_override) { 2554 zio_pop_transforms(zio); 2555 zio->io_stage = ZIO_STAGE_OPEN; 2556 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2557 zio->io_bp_override = NULL; 2558 BP_ZERO(bp); 2559 ddt_exit(ddt); 2560 return (ZIO_PIPELINE_CONTINUE); 2561 } 2562 2563 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2564 zio->io_orig_size, &czp, NULL, NULL, 2565 NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2566 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2567 2568 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2569 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2570 } 2571 2572 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2573 if (ddp->ddp_phys_birth != 0) 2574 ddt_bp_fill(ddp, bp, txg); 2575 if (dde->dde_lead_zio[p] != NULL) 2576 zio_add_child(zio, dde->dde_lead_zio[p]); 2577 else 2578 ddt_phys_addref(ddp); 2579 } else if (zio->io_bp_override) { 2580 ASSERT(bp->blk_birth == txg); 2581 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2582 ddt_phys_fill(ddp, bp); 2583 ddt_phys_addref(ddp); 2584 } else { 2585 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2586 zio->io_orig_size, zp, 2587 zio_ddt_child_write_ready, NULL, NULL, 2588 zio_ddt_child_write_done, dde, zio->io_priority, 2589 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2590 2591 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2592 dde->dde_lead_zio[p] = cio; 2593 } 2594 2595 ddt_exit(ddt); 2596 2597 if (cio) 2598 zio_nowait(cio); 2599 if (dio) 2600 zio_nowait(dio); 2601 2602 return (ZIO_PIPELINE_CONTINUE); 2603 } 2604 2605 ddt_entry_t *freedde; /* for debugging */ 2606 2607 static int 2608 zio_ddt_free(zio_t *zio) 2609 { 2610 spa_t *spa = zio->io_spa; 2611 blkptr_t *bp = zio->io_bp; 2612 ddt_t *ddt = ddt_select(spa, bp); 2613 ddt_entry_t *dde; 2614 ddt_phys_t *ddp; 2615 2616 ASSERT(BP_GET_DEDUP(bp)); 2617 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2618 2619 ddt_enter(ddt); 2620 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2621 ddp = ddt_phys_select(dde, bp); 2622 ddt_phys_decref(ddp); 2623 ddt_exit(ddt); 2624 2625 return (ZIO_PIPELINE_CONTINUE); 2626 } 2627 2628 /* 2629 * ========================================================================== 2630 * Allocate and free blocks 2631 * ========================================================================== 2632 */ 2633 2634 static zio_t * 2635 zio_io_to_allocate(spa_t *spa) 2636 { 2637 zio_t *zio; 2638 2639 ASSERT(MUTEX_HELD(&spa->spa_alloc_lock)); 2640 2641 zio = avl_first(&spa->spa_alloc_tree); 2642 if (zio == NULL) 2643 return (NULL); 2644 2645 ASSERT(IO_IS_ALLOCATING(zio)); 2646 2647 /* 2648 * Try to place a reservation for this zio. If we're unable to 2649 * reserve then we throttle. 2650 */ 2651 if (!metaslab_class_throttle_reserve(spa_normal_class(spa), 2652 zio->io_prop.zp_copies, zio, 0)) { 2653 return (NULL); 2654 } 2655 2656 avl_remove(&spa->spa_alloc_tree, zio); 2657 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 2658 2659 return (zio); 2660 } 2661 2662 static int 2663 zio_dva_throttle(zio_t *zio) 2664 { 2665 spa_t *spa = zio->io_spa; 2666 zio_t *nio; 2667 2668 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 2669 !spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled || 2670 zio->io_child_type == ZIO_CHILD_GANG || 2671 zio->io_flags & ZIO_FLAG_NODATA) { 2672 return (ZIO_PIPELINE_CONTINUE); 2673 } 2674 2675 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2676 2677 ASSERT3U(zio->io_queued_timestamp, >, 0); 2678 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2679 2680 mutex_enter(&spa->spa_alloc_lock); 2681 2682 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2683 avl_add(&spa->spa_alloc_tree, zio); 2684 2685 nio = zio_io_to_allocate(zio->io_spa); 2686 mutex_exit(&spa->spa_alloc_lock); 2687 2688 if (nio == zio) 2689 return (ZIO_PIPELINE_CONTINUE); 2690 2691 if (nio != NULL) { 2692 ASSERT3U(nio->io_queued_timestamp, <=, 2693 zio->io_queued_timestamp); 2694 ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2695 /* 2696 * We are passing control to a new zio so make sure that 2697 * it is processed by a different thread. We do this to 2698 * avoid stack overflows that can occur when parents are 2699 * throttled and children are making progress. We allow 2700 * it to go to the head of the taskq since it's already 2701 * been waiting. 2702 */ 2703 zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 2704 } 2705 return (ZIO_PIPELINE_STOP); 2706 } 2707 2708 void 2709 zio_allocate_dispatch(spa_t *spa) 2710 { 2711 zio_t *zio; 2712 2713 mutex_enter(&spa->spa_alloc_lock); 2714 zio = zio_io_to_allocate(spa); 2715 mutex_exit(&spa->spa_alloc_lock); 2716 if (zio == NULL) 2717 return; 2718 2719 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 2720 ASSERT0(zio->io_error); 2721 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 2722 } 2723 2724 static int 2725 zio_dva_allocate(zio_t *zio) 2726 { 2727 spa_t *spa = zio->io_spa; 2728 metaslab_class_t *mc = spa_normal_class(spa); 2729 blkptr_t *bp = zio->io_bp; 2730 int error; 2731 int flags = 0; 2732 2733 if (zio->io_gang_leader == NULL) { 2734 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2735 zio->io_gang_leader = zio; 2736 } 2737 2738 ASSERT(BP_IS_HOLE(bp)); 2739 ASSERT0(BP_GET_NDVAS(bp)); 2740 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2741 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2742 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2743 2744 if (zio->io_flags & ZIO_FLAG_NODATA) { 2745 flags |= METASLAB_DONT_THROTTLE; 2746 } 2747 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) { 2748 flags |= METASLAB_GANG_CHILD; 2749 } 2750 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) { 2751 flags |= METASLAB_ASYNC_ALLOC; 2752 } 2753 2754 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2755 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, zio); 2756 2757 if (error != 0) { 2758 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2759 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2760 error); 2761 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2762 return (zio_write_gang_block(zio)); 2763 zio->io_error = error; 2764 } 2765 2766 return (ZIO_PIPELINE_CONTINUE); 2767 } 2768 2769 static int 2770 zio_dva_free(zio_t *zio) 2771 { 2772 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2773 2774 return (ZIO_PIPELINE_CONTINUE); 2775 } 2776 2777 static int 2778 zio_dva_claim(zio_t *zio) 2779 { 2780 int error; 2781 2782 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2783 if (error) 2784 zio->io_error = error; 2785 2786 return (ZIO_PIPELINE_CONTINUE); 2787 } 2788 2789 /* 2790 * Undo an allocation. This is used by zio_done() when an I/O fails 2791 * and we want to give back the block we just allocated. 2792 * This handles both normal blocks and gang blocks. 2793 */ 2794 static void 2795 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2796 { 2797 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2798 ASSERT(zio->io_bp_override == NULL); 2799 2800 if (!BP_IS_HOLE(bp)) 2801 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2802 2803 if (gn != NULL) { 2804 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2805 zio_dva_unallocate(zio, gn->gn_child[g], 2806 &gn->gn_gbh->zg_blkptr[g]); 2807 } 2808 } 2809 } 2810 2811 /* 2812 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2813 */ 2814 int 2815 zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2816 uint64_t size, boolean_t use_slog) 2817 { 2818 int error = 1; 2819 2820 ASSERT(txg > spa_syncing_txg(spa)); 2821 2822 if (use_slog) { 2823 error = metaslab_alloc(spa, spa_log_class(spa), size, 2824 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, NULL); 2825 } 2826 2827 if (error) { 2828 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2829 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, NULL); 2830 } 2831 2832 if (error == 0) { 2833 BP_SET_LSIZE(new_bp, size); 2834 BP_SET_PSIZE(new_bp, size); 2835 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2836 BP_SET_CHECKSUM(new_bp, 2837 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2838 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2839 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2840 BP_SET_LEVEL(new_bp, 0); 2841 BP_SET_DEDUP(new_bp, 0); 2842 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2843 } 2844 2845 return (error); 2846 } 2847 2848 /* 2849 * Free an intent log block. 2850 */ 2851 void 2852 zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2853 { 2854 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2855 ASSERT(!BP_IS_GANG(bp)); 2856 2857 zio_free(spa, txg, bp); 2858 } 2859 2860 /* 2861 * ========================================================================== 2862 * Read and write to physical devices 2863 * ========================================================================== 2864 */ 2865 2866 2867 /* 2868 * Issue an I/O to the underlying vdev. Typically the issue pipeline 2869 * stops after this stage and will resume upon I/O completion. 2870 * However, there are instances where the vdev layer may need to 2871 * continue the pipeline when an I/O was not issued. Since the I/O 2872 * that was sent to the vdev layer might be different than the one 2873 * currently active in the pipeline (see vdev_queue_io()), we explicitly 2874 * force the underlying vdev layers to call either zio_execute() or 2875 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 2876 */ 2877 static int 2878 zio_vdev_io_start(zio_t *zio) 2879 { 2880 vdev_t *vd = zio->io_vd; 2881 uint64_t align; 2882 spa_t *spa = zio->io_spa; 2883 2884 ASSERT(zio->io_error == 0); 2885 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2886 2887 if (vd == NULL) { 2888 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2889 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2890 2891 /* 2892 * The mirror_ops handle multiple DVAs in a single BP. 2893 */ 2894 vdev_mirror_ops.vdev_op_io_start(zio); 2895 return (ZIO_PIPELINE_STOP); 2896 } 2897 2898 ASSERT3P(zio->io_logical, !=, zio); 2899 2900 /* 2901 * We keep track of time-sensitive I/Os so that the scan thread 2902 * can quickly react to certain workloads. In particular, we care 2903 * about non-scrubbing, top-level reads and writes with the following 2904 * characteristics: 2905 * - synchronous writes of user data to non-slog devices 2906 * - any reads of user data 2907 * When these conditions are met, adjust the timestamp of spa_last_io 2908 * which allows the scan thread to adjust its workload accordingly. 2909 */ 2910 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2911 vd == vd->vdev_top && !vd->vdev_islog && 2912 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2913 zio->io_txg != spa_syncing_txg(spa)) { 2914 uint64_t old = spa->spa_last_io; 2915 uint64_t new = ddi_get_lbolt64(); 2916 if (old != new) 2917 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2918 } 2919 2920 align = 1ULL << vd->vdev_top->vdev_ashift; 2921 2922 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 2923 P2PHASE(zio->io_size, align) != 0) { 2924 /* Transform logical writes to be a full physical block size. */ 2925 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2926 char *abuf = zio_buf_alloc(asize); 2927 ASSERT(vd == vd->vdev_top); 2928 if (zio->io_type == ZIO_TYPE_WRITE) { 2929 bcopy(zio->io_data, abuf, zio->io_size); 2930 bzero(abuf + zio->io_size, asize - zio->io_size); 2931 } 2932 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 2933 } 2934 2935 /* 2936 * If this is not a physical io, make sure that it is properly aligned 2937 * before proceeding. 2938 */ 2939 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 2940 ASSERT0(P2PHASE(zio->io_offset, align)); 2941 ASSERT0(P2PHASE(zio->io_size, align)); 2942 } else { 2943 /* 2944 * For physical writes, we allow 512b aligned writes and assume 2945 * the device will perform a read-modify-write as necessary. 2946 */ 2947 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 2948 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 2949 } 2950 2951 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 2952 2953 /* 2954 * If this is a repair I/O, and there's no self-healing involved -- 2955 * that is, we're just resilvering what we expect to resilver -- 2956 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2957 * This prevents spurious resilvering with nested replication. 2958 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2959 * A is out of date, we'll read from C+D, then use the data to 2960 * resilver A+B -- but we don't actually want to resilver B, just A. 2961 * The top-level mirror has no way to know this, so instead we just 2962 * discard unnecessary repairs as we work our way down the vdev tree. 2963 * The same logic applies to any form of nested replication: 2964 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2965 */ 2966 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2967 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2968 zio->io_txg != 0 && /* not a delegated i/o */ 2969 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2970 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2971 zio_vdev_io_bypass(zio); 2972 return (ZIO_PIPELINE_CONTINUE); 2973 } 2974 2975 if (vd->vdev_ops->vdev_op_leaf && 2976 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 2977 2978 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 2979 return (ZIO_PIPELINE_CONTINUE); 2980 2981 if ((zio = vdev_queue_io(zio)) == NULL) 2982 return (ZIO_PIPELINE_STOP); 2983 2984 if (!vdev_accessible(vd, zio)) { 2985 zio->io_error = SET_ERROR(ENXIO); 2986 zio_interrupt(zio); 2987 return (ZIO_PIPELINE_STOP); 2988 } 2989 } 2990 2991 vd->vdev_ops->vdev_op_io_start(zio); 2992 return (ZIO_PIPELINE_STOP); 2993 } 2994 2995 static int 2996 zio_vdev_io_done(zio_t *zio) 2997 { 2998 vdev_t *vd = zio->io_vd; 2999 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3000 boolean_t unexpected_error = B_FALSE; 3001 3002 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 3003 return (ZIO_PIPELINE_STOP); 3004 3005 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 3006 3007 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3008 3009 vdev_queue_io_done(zio); 3010 3011 if (zio->io_type == ZIO_TYPE_WRITE) 3012 vdev_cache_write(zio); 3013 3014 if (zio_injection_enabled && zio->io_error == 0) 3015 zio->io_error = zio_handle_device_injection(vd, 3016 zio, EIO); 3017 3018 if (zio_injection_enabled && zio->io_error == 0) 3019 zio->io_error = zio_handle_label_injection(zio, EIO); 3020 3021 if (zio->io_error) { 3022 if (!vdev_accessible(vd, zio)) { 3023 zio->io_error = SET_ERROR(ENXIO); 3024 } else { 3025 unexpected_error = B_TRUE; 3026 } 3027 } 3028 } 3029 3030 ops->vdev_op_io_done(zio); 3031 3032 if (unexpected_error) 3033 VERIFY(vdev_probe(vd, zio) == NULL); 3034 3035 return (ZIO_PIPELINE_CONTINUE); 3036 } 3037 3038 /* 3039 * For non-raidz ZIOs, we can just copy aside the bad data read from the 3040 * disk, and use that to finish the checksum ereport later. 3041 */ 3042 static void 3043 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3044 const void *good_buf) 3045 { 3046 /* no processing needed */ 3047 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 3048 } 3049 3050 /*ARGSUSED*/ 3051 void 3052 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 3053 { 3054 void *buf = zio_buf_alloc(zio->io_size); 3055 3056 bcopy(zio->io_data, buf, zio->io_size); 3057 3058 zcr->zcr_cbinfo = zio->io_size; 3059 zcr->zcr_cbdata = buf; 3060 zcr->zcr_finish = zio_vsd_default_cksum_finish; 3061 zcr->zcr_free = zio_buf_free; 3062 } 3063 3064 static int 3065 zio_vdev_io_assess(zio_t *zio) 3066 { 3067 vdev_t *vd = zio->io_vd; 3068 3069 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 3070 return (ZIO_PIPELINE_STOP); 3071 3072 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3073 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3074 3075 if (zio->io_vsd != NULL) { 3076 zio->io_vsd_ops->vsd_free(zio); 3077 zio->io_vsd = NULL; 3078 } 3079 3080 if (zio_injection_enabled && zio->io_error == 0) 3081 zio->io_error = zio_handle_fault_injection(zio, EIO); 3082 3083 /* 3084 * If the I/O failed, determine whether we should attempt to retry it. 3085 * 3086 * On retry, we cut in line in the issue queue, since we don't want 3087 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3088 */ 3089 if (zio->io_error && vd == NULL && 3090 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3091 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3092 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3093 zio->io_error = 0; 3094 zio->io_flags |= ZIO_FLAG_IO_RETRY | 3095 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3096 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 3097 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 3098 zio_requeue_io_start_cut_in_line); 3099 return (ZIO_PIPELINE_STOP); 3100 } 3101 3102 /* 3103 * If we got an error on a leaf device, convert it to ENXIO 3104 * if the device is not accessible at all. 3105 */ 3106 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3107 !vdev_accessible(vd, zio)) 3108 zio->io_error = SET_ERROR(ENXIO); 3109 3110 /* 3111 * If we can't write to an interior vdev (mirror or RAID-Z), 3112 * set vdev_cant_write so that we stop trying to allocate from it. 3113 */ 3114 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 3115 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3116 vd->vdev_cant_write = B_TRUE; 3117 } 3118 3119 /* 3120 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3121 * attempts will ever succeed. In this case we set a persistent bit so 3122 * that we don't bother with it in the future. 3123 */ 3124 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3125 zio->io_type == ZIO_TYPE_IOCTL && 3126 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3127 vd->vdev_nowritecache = B_TRUE; 3128 3129 if (zio->io_error) 3130 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3131 3132 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3133 zio->io_physdone != NULL) { 3134 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 3135 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 3136 zio->io_physdone(zio->io_logical); 3137 } 3138 3139 return (ZIO_PIPELINE_CONTINUE); 3140 } 3141 3142 void 3143 zio_vdev_io_reissue(zio_t *zio) 3144 { 3145 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3146 ASSERT(zio->io_error == 0); 3147 3148 zio->io_stage >>= 1; 3149 } 3150 3151 void 3152 zio_vdev_io_redone(zio_t *zio) 3153 { 3154 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3155 3156 zio->io_stage >>= 1; 3157 } 3158 3159 void 3160 zio_vdev_io_bypass(zio_t *zio) 3161 { 3162 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3163 ASSERT(zio->io_error == 0); 3164 3165 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3166 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3167 } 3168 3169 /* 3170 * ========================================================================== 3171 * Generate and verify checksums 3172 * ========================================================================== 3173 */ 3174 static int 3175 zio_checksum_generate(zio_t *zio) 3176 { 3177 blkptr_t *bp = zio->io_bp; 3178 enum zio_checksum checksum; 3179 3180 if (bp == NULL) { 3181 /* 3182 * This is zio_write_phys(). 3183 * We're either generating a label checksum, or none at all. 3184 */ 3185 checksum = zio->io_prop.zp_checksum; 3186 3187 if (checksum == ZIO_CHECKSUM_OFF) 3188 return (ZIO_PIPELINE_CONTINUE); 3189 3190 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3191 } else { 3192 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3193 ASSERT(!IO_IS_ALLOCATING(zio)); 3194 checksum = ZIO_CHECKSUM_GANG_HEADER; 3195 } else { 3196 checksum = BP_GET_CHECKSUM(bp); 3197 } 3198 } 3199 3200 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 3201 3202 return (ZIO_PIPELINE_CONTINUE); 3203 } 3204 3205 static int 3206 zio_checksum_verify(zio_t *zio) 3207 { 3208 zio_bad_cksum_t info; 3209 blkptr_t *bp = zio->io_bp; 3210 int error; 3211 3212 ASSERT(zio->io_vd != NULL); 3213 3214 if (bp == NULL) { 3215 /* 3216 * This is zio_read_phys(). 3217 * We're either verifying a label checksum, or nothing at all. 3218 */ 3219 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3220 return (ZIO_PIPELINE_CONTINUE); 3221 3222 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3223 } 3224 3225 if ((error = zio_checksum_error(zio, &info)) != 0) { 3226 zio->io_error = error; 3227 if (error == ECKSUM && 3228 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3229 zfs_ereport_start_checksum(zio->io_spa, 3230 zio->io_vd, zio, zio->io_offset, 3231 zio->io_size, NULL, &info); 3232 } 3233 } 3234 3235 return (ZIO_PIPELINE_CONTINUE); 3236 } 3237 3238 /* 3239 * Called by RAID-Z to ensure we don't compute the checksum twice. 3240 */ 3241 void 3242 zio_checksum_verified(zio_t *zio) 3243 { 3244 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3245 } 3246 3247 /* 3248 * ========================================================================== 3249 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 3250 * An error of 0 indicates success. ENXIO indicates whole-device failure, 3251 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3252 * indicate errors that are specific to one I/O, and most likely permanent. 3253 * Any other error is presumed to be worse because we weren't expecting it. 3254 * ========================================================================== 3255 */ 3256 int 3257 zio_worst_error(int e1, int e2) 3258 { 3259 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3260 int r1, r2; 3261 3262 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3263 if (e1 == zio_error_rank[r1]) 3264 break; 3265 3266 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3267 if (e2 == zio_error_rank[r2]) 3268 break; 3269 3270 return (r1 > r2 ? e1 : e2); 3271 } 3272 3273 /* 3274 * ========================================================================== 3275 * I/O completion 3276 * ========================================================================== 3277 */ 3278 static int 3279 zio_ready(zio_t *zio) 3280 { 3281 blkptr_t *bp = zio->io_bp; 3282 zio_t *pio, *pio_next; 3283 zio_link_t *zl = NULL; 3284 3285 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 3286 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 3287 return (ZIO_PIPELINE_STOP); 3288 3289 if (zio->io_ready) { 3290 ASSERT(IO_IS_ALLOCATING(zio)); 3291 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3292 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3293 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3294 3295 zio->io_ready(zio); 3296 } 3297 3298 if (bp != NULL && bp != &zio->io_bp_copy) 3299 zio->io_bp_copy = *bp; 3300 3301 if (zio->io_error != 0) { 3302 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3303 3304 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3305 ASSERT(IO_IS_ALLOCATING(zio)); 3306 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3307 /* 3308 * We were unable to allocate anything, unreserve and 3309 * issue the next I/O to allocate. 3310 */ 3311 metaslab_class_throttle_unreserve( 3312 spa_normal_class(zio->io_spa), 3313 zio->io_prop.zp_copies, zio); 3314 zio_allocate_dispatch(zio->io_spa); 3315 } 3316 } 3317 3318 mutex_enter(&zio->io_lock); 3319 zio->io_state[ZIO_WAIT_READY] = 1; 3320 pio = zio_walk_parents(zio, &zl); 3321 mutex_exit(&zio->io_lock); 3322 3323 /* 3324 * As we notify zio's parents, new parents could be added. 3325 * New parents go to the head of zio's io_parent_list, however, 3326 * so we will (correctly) not notify them. The remainder of zio's 3327 * io_parent_list, from 'pio_next' onward, cannot change because 3328 * all parents must wait for us to be done before they can be done. 3329 */ 3330 for (; pio != NULL; pio = pio_next) { 3331 pio_next = zio_walk_parents(zio, &zl); 3332 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3333 } 3334 3335 if (zio->io_flags & ZIO_FLAG_NODATA) { 3336 if (BP_IS_GANG(bp)) { 3337 zio->io_flags &= ~ZIO_FLAG_NODATA; 3338 } else { 3339 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 3340 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3341 } 3342 } 3343 3344 if (zio_injection_enabled && 3345 zio->io_spa->spa_syncing_txg == zio->io_txg) 3346 zio_handle_ignored_writes(zio); 3347 3348 return (ZIO_PIPELINE_CONTINUE); 3349 } 3350 3351 /* 3352 * Update the allocation throttle accounting. 3353 */ 3354 static void 3355 zio_dva_throttle_done(zio_t *zio) 3356 { 3357 zio_t *lio = zio->io_logical; 3358 zio_t *pio = zio_unique_parent(zio); 3359 vdev_t *vd = zio->io_vd; 3360 int flags = METASLAB_ASYNC_ALLOC; 3361 3362 ASSERT3P(zio->io_bp, !=, NULL); 3363 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 3364 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 3365 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 3366 ASSERT(vd != NULL); 3367 ASSERT3P(vd, ==, vd->vdev_top); 3368 ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 3369 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 3370 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 3371 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 3372 3373 /* 3374 * Parents of gang children can have two flavors -- ones that 3375 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 3376 * and ones that allocated the constituent blocks. The allocation 3377 * throttle needs to know the allocating parent zio so we must find 3378 * it here. 3379 */ 3380 if (pio->io_child_type == ZIO_CHILD_GANG) { 3381 /* 3382 * If our parent is a rewrite gang child then our grandparent 3383 * would have been the one that performed the allocation. 3384 */ 3385 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 3386 pio = zio_unique_parent(pio); 3387 flags |= METASLAB_GANG_CHILD; 3388 } 3389 3390 ASSERT(IO_IS_ALLOCATING(pio)); 3391 ASSERT3P(zio, !=, zio->io_logical); 3392 ASSERT(zio->io_logical != NULL); 3393 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 3394 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 3395 3396 mutex_enter(&pio->io_lock); 3397 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags); 3398 mutex_exit(&pio->io_lock); 3399 3400 metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa), 3401 1, pio); 3402 3403 /* 3404 * Call into the pipeline to see if there is more work that 3405 * needs to be done. If there is work to be done it will be 3406 * dispatched to another taskq thread. 3407 */ 3408 zio_allocate_dispatch(zio->io_spa); 3409 } 3410 3411 static int 3412 zio_done(zio_t *zio) 3413 { 3414 spa_t *spa = zio->io_spa; 3415 zio_t *lio = zio->io_logical; 3416 blkptr_t *bp = zio->io_bp; 3417 vdev_t *vd = zio->io_vd; 3418 uint64_t psize = zio->io_size; 3419 zio_t *pio, *pio_next; 3420 metaslab_class_t *mc = spa_normal_class(spa); 3421 zio_link_t *zl = NULL; 3422 3423 /* 3424 * If our children haven't all completed, 3425 * wait for them and then repeat this pipeline stage. 3426 */ 3427 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 3428 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 3429 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 3430 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 3431 return (ZIO_PIPELINE_STOP); 3432 3433 /* 3434 * If the allocation throttle is enabled, then update the accounting. 3435 * We only track child I/Os that are part of an allocating async 3436 * write. We must do this since the allocation is performed 3437 * by the logical I/O but the actual write is done by child I/Os. 3438 */ 3439 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 3440 zio->io_child_type == ZIO_CHILD_VDEV) { 3441 ASSERT(mc->mc_alloc_throttle_enabled); 3442 zio_dva_throttle_done(zio); 3443 } 3444 3445 /* 3446 * If the allocation throttle is enabled, verify that 3447 * we have decremented the refcounts for every I/O that was throttled. 3448 */ 3449 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3450 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3451 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3452 ASSERT(bp != NULL); 3453 metaslab_group_alloc_verify(spa, zio->io_bp, zio); 3454 VERIFY(refcount_not_held(&mc->mc_alloc_slots, zio)); 3455 } 3456 3457 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3458 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3459 ASSERT(zio->io_children[c][w] == 0); 3460 3461 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3462 ASSERT(bp->blk_pad[0] == 0); 3463 ASSERT(bp->blk_pad[1] == 0); 3464 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3465 (bp == zio_unique_parent(zio)->io_bp)); 3466 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3467 zio->io_bp_override == NULL && 3468 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3469 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3470 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3471 ASSERT(BP_COUNT_GANG(bp) == 0 || 3472 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3473 } 3474 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3475 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3476 } 3477 3478 /* 3479 * If there were child vdev/gang/ddt errors, they apply to us now. 3480 */ 3481 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3482 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3483 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3484 3485 /* 3486 * If the I/O on the transformed data was successful, generate any 3487 * checksum reports now while we still have the transformed data. 3488 */ 3489 if (zio->io_error == 0) { 3490 while (zio->io_cksum_report != NULL) { 3491 zio_cksum_report_t *zcr = zio->io_cksum_report; 3492 uint64_t align = zcr->zcr_align; 3493 uint64_t asize = P2ROUNDUP(psize, align); 3494 char *abuf = zio->io_data; 3495 3496 if (asize != psize) { 3497 abuf = zio_buf_alloc(asize); 3498 bcopy(zio->io_data, abuf, psize); 3499 bzero(abuf + psize, asize - psize); 3500 } 3501 3502 zio->io_cksum_report = zcr->zcr_next; 3503 zcr->zcr_next = NULL; 3504 zcr->zcr_finish(zcr, abuf); 3505 zfs_ereport_free_checksum(zcr); 3506 3507 if (asize != psize) 3508 zio_buf_free(abuf, asize); 3509 } 3510 } 3511 3512 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3513 3514 vdev_stat_update(zio, psize); 3515 3516 if (zio->io_error) { 3517 /* 3518 * If this I/O is attached to a particular vdev, 3519 * generate an error message describing the I/O failure 3520 * at the block level. We ignore these errors if the 3521 * device is currently unavailable. 3522 */ 3523 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3524 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3525 3526 if ((zio->io_error == EIO || !(zio->io_flags & 3527 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3528 zio == lio) { 3529 /* 3530 * For logical I/O requests, tell the SPA to log the 3531 * error and generate a logical data ereport. 3532 */ 3533 spa_log_error(spa, zio); 3534 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3535 0, 0); 3536 } 3537 } 3538 3539 if (zio->io_error && zio == lio) { 3540 /* 3541 * Determine whether zio should be reexecuted. This will 3542 * propagate all the way to the root via zio_notify_parent(). 3543 */ 3544 ASSERT(vd == NULL && bp != NULL); 3545 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3546 3547 if (IO_IS_ALLOCATING(zio) && 3548 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3549 if (zio->io_error != ENOSPC) 3550 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3551 else 3552 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3553 } 3554 3555 if ((zio->io_type == ZIO_TYPE_READ || 3556 zio->io_type == ZIO_TYPE_FREE) && 3557 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3558 zio->io_error == ENXIO && 3559 spa_load_state(spa) == SPA_LOAD_NONE && 3560 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3561 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3562 3563 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3564 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3565 3566 /* 3567 * Here is a possibly good place to attempt to do 3568 * either combinatorial reconstruction or error correction 3569 * based on checksums. It also might be a good place 3570 * to send out preliminary ereports before we suspend 3571 * processing. 3572 */ 3573 } 3574 3575 /* 3576 * If there were logical child errors, they apply to us now. 3577 * We defer this until now to avoid conflating logical child 3578 * errors with errors that happened to the zio itself when 3579 * updating vdev stats and reporting FMA events above. 3580 */ 3581 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3582 3583 if ((zio->io_error || zio->io_reexecute) && 3584 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3585 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3586 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3587 3588 zio_gang_tree_free(&zio->io_gang_tree); 3589 3590 /* 3591 * Godfather I/Os should never suspend. 3592 */ 3593 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3594 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3595 zio->io_reexecute = 0; 3596 3597 if (zio->io_reexecute) { 3598 /* 3599 * This is a logical I/O that wants to reexecute. 3600 * 3601 * Reexecute is top-down. When an i/o fails, if it's not 3602 * the root, it simply notifies its parent and sticks around. 3603 * The parent, seeing that it still has children in zio_done(), 3604 * does the same. This percolates all the way up to the root. 3605 * The root i/o will reexecute or suspend the entire tree. 3606 * 3607 * This approach ensures that zio_reexecute() honors 3608 * all the original i/o dependency relationships, e.g. 3609 * parents not executing until children are ready. 3610 */ 3611 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3612 3613 zio->io_gang_leader = NULL; 3614 3615 mutex_enter(&zio->io_lock); 3616 zio->io_state[ZIO_WAIT_DONE] = 1; 3617 mutex_exit(&zio->io_lock); 3618 3619 /* 3620 * "The Godfather" I/O monitors its children but is 3621 * not a true parent to them. It will track them through 3622 * the pipeline but severs its ties whenever they get into 3623 * trouble (e.g. suspended). This allows "The Godfather" 3624 * I/O to return status without blocking. 3625 */ 3626 zl = NULL; 3627 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 3628 pio = pio_next) { 3629 zio_link_t *remove_zl = zl; 3630 pio_next = zio_walk_parents(zio, &zl); 3631 3632 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3633 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3634 zio_remove_child(pio, zio, remove_zl); 3635 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3636 } 3637 } 3638 3639 if ((pio = zio_unique_parent(zio)) != NULL) { 3640 /* 3641 * We're not a root i/o, so there's nothing to do 3642 * but notify our parent. Don't propagate errors 3643 * upward since we haven't permanently failed yet. 3644 */ 3645 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3646 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3647 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3648 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3649 /* 3650 * We'd fail again if we reexecuted now, so suspend 3651 * until conditions improve (e.g. device comes online). 3652 */ 3653 zio_suspend(spa, zio); 3654 } else { 3655 /* 3656 * Reexecution is potentially a huge amount of work. 3657 * Hand it off to the otherwise-unused claim taskq. 3658 */ 3659 ASSERT(zio->io_tqent.tqent_next == NULL); 3660 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3661 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3662 0, &zio->io_tqent); 3663 } 3664 return (ZIO_PIPELINE_STOP); 3665 } 3666 3667 ASSERT(zio->io_child_count == 0); 3668 ASSERT(zio->io_reexecute == 0); 3669 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3670 3671 /* 3672 * Report any checksum errors, since the I/O is complete. 3673 */ 3674 while (zio->io_cksum_report != NULL) { 3675 zio_cksum_report_t *zcr = zio->io_cksum_report; 3676 zio->io_cksum_report = zcr->zcr_next; 3677 zcr->zcr_next = NULL; 3678 zcr->zcr_finish(zcr, NULL); 3679 zfs_ereport_free_checksum(zcr); 3680 } 3681 3682 /* 3683 * It is the responsibility of the done callback to ensure that this 3684 * particular zio is no longer discoverable for adoption, and as 3685 * such, cannot acquire any new parents. 3686 */ 3687 if (zio->io_done) 3688 zio->io_done(zio); 3689 3690 mutex_enter(&zio->io_lock); 3691 zio->io_state[ZIO_WAIT_DONE] = 1; 3692 mutex_exit(&zio->io_lock); 3693 3694 zl = NULL; 3695 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 3696 zio_link_t *remove_zl = zl; 3697 pio_next = zio_walk_parents(zio, &zl); 3698 zio_remove_child(pio, zio, remove_zl); 3699 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3700 } 3701 3702 if (zio->io_waiter != NULL) { 3703 mutex_enter(&zio->io_lock); 3704 zio->io_executor = NULL; 3705 cv_broadcast(&zio->io_cv); 3706 mutex_exit(&zio->io_lock); 3707 } else { 3708 zio_destroy(zio); 3709 } 3710 3711 return (ZIO_PIPELINE_STOP); 3712 } 3713 3714 /* 3715 * ========================================================================== 3716 * I/O pipeline definition 3717 * ========================================================================== 3718 */ 3719 static zio_pipe_stage_t *zio_pipeline[] = { 3720 NULL, 3721 zio_read_bp_init, 3722 zio_write_bp_init, 3723 zio_free_bp_init, 3724 zio_issue_async, 3725 zio_write_compress, 3726 zio_checksum_generate, 3727 zio_nop_write, 3728 zio_ddt_read_start, 3729 zio_ddt_read_done, 3730 zio_ddt_write, 3731 zio_ddt_free, 3732 zio_gang_assemble, 3733 zio_gang_issue, 3734 zio_dva_throttle, 3735 zio_dva_allocate, 3736 zio_dva_free, 3737 zio_dva_claim, 3738 zio_ready, 3739 zio_vdev_io_start, 3740 zio_vdev_io_done, 3741 zio_vdev_io_assess, 3742 zio_checksum_verify, 3743 zio_done 3744 }; 3745 3746 3747 3748 3749 /* 3750 * Compare two zbookmark_phys_t's to see which we would reach first in a 3751 * pre-order traversal of the object tree. 3752 * 3753 * This is simple in every case aside from the meta-dnode object. For all other 3754 * objects, we traverse them in order (object 1 before object 2, and so on). 3755 * However, all of these objects are traversed while traversing object 0, since 3756 * the data it points to is the list of objects. Thus, we need to convert to a 3757 * canonical representation so we can compare meta-dnode bookmarks to 3758 * non-meta-dnode bookmarks. 3759 * 3760 * We do this by calculating "equivalents" for each field of the zbookmark. 3761 * zbookmarks outside of the meta-dnode use their own object and level, and 3762 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 3763 * blocks this bookmark refers to) by multiplying their blkid by their span 3764 * (the number of L0 blocks contained within one block at their level). 3765 * zbookmarks inside the meta-dnode calculate their object equivalent 3766 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 3767 * level + 1<<31 (any value larger than a level could ever be) for their level. 3768 * This causes them to always compare before a bookmark in their object 3769 * equivalent, compare appropriately to bookmarks in other objects, and to 3770 * compare appropriately to other bookmarks in the meta-dnode. 3771 */ 3772 int 3773 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 3774 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 3775 { 3776 /* 3777 * These variables represent the "equivalent" values for the zbookmark, 3778 * after converting zbookmarks inside the meta dnode to their 3779 * normal-object equivalents. 3780 */ 3781 uint64_t zb1obj, zb2obj; 3782 uint64_t zb1L0, zb2L0; 3783 uint64_t zb1level, zb2level; 3784 3785 if (zb1->zb_object == zb2->zb_object && 3786 zb1->zb_level == zb2->zb_level && 3787 zb1->zb_blkid == zb2->zb_blkid) 3788 return (0); 3789 3790 /* 3791 * BP_SPANB calculates the span in blocks. 3792 */ 3793 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 3794 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 3795 3796 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3797 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 3798 zb1L0 = 0; 3799 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 3800 } else { 3801 zb1obj = zb1->zb_object; 3802 zb1level = zb1->zb_level; 3803 } 3804 3805 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 3806 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 3807 zb2L0 = 0; 3808 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 3809 } else { 3810 zb2obj = zb2->zb_object; 3811 zb2level = zb2->zb_level; 3812 } 3813 3814 /* Now that we have a canonical representation, do the comparison. */ 3815 if (zb1obj != zb2obj) 3816 return (zb1obj < zb2obj ? -1 : 1); 3817 else if (zb1L0 != zb2L0) 3818 return (zb1L0 < zb2L0 ? -1 : 1); 3819 else if (zb1level != zb2level) 3820 return (zb1level > zb2level ? -1 : 1); 3821 /* 3822 * This can (theoretically) happen if the bookmarks have the same object 3823 * and level, but different blkids, if the block sizes are not the same. 3824 * There is presently no way to change the indirect block sizes 3825 */ 3826 return (0); 3827 } 3828 3829 /* 3830 * This function checks the following: given that last_block is the place that 3831 * our traversal stopped last time, does that guarantee that we've visited 3832 * every node under subtree_root? Therefore, we can't just use the raw output 3833 * of zbookmark_compare. We have to pass in a modified version of 3834 * subtree_root; by incrementing the block id, and then checking whether 3835 * last_block is before or equal to that, we can tell whether or not having 3836 * visited last_block implies that all of subtree_root's children have been 3837 * visited. 3838 */ 3839 boolean_t 3840 zbookmark_subtree_completed(const dnode_phys_t *dnp, 3841 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 3842 { 3843 zbookmark_phys_t mod_zb = *subtree_root; 3844 mod_zb.zb_blkid++; 3845 ASSERT(last_block->zb_level == 0); 3846 3847 /* The objset_phys_t isn't before anything. */ 3848 if (dnp == NULL) 3849 return (B_FALSE); 3850 3851 /* 3852 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 3853 * data block size in sectors, because that variable is only used if 3854 * the bookmark refers to a block in the meta-dnode. Since we don't 3855 * know without examining it what object it refers to, and there's no 3856 * harm in passing in this value in other cases, we always pass it in. 3857 * 3858 * We pass in 0 for the indirect block size shift because zb2 must be 3859 * level 0. The indirect block size is only used to calculate the span 3860 * of the bookmark, but since the bookmark must be level 0, the span is 3861 * always 1, so the math works out. 3862 * 3863 * If you make changes to how the zbookmark_compare code works, be sure 3864 * to make sure that this code still works afterwards. 3865 */ 3866 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 3867 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 3868 last_block) <= 0); 3869 } 3870