1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/fm/fs/zfs.h> 29 #include <sys/spa.h> 30 #include <sys/txg.h> 31 #include <sys/spa_impl.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/zio_impl.h> 34 #include <sys/zio_compress.h> 35 #include <sys/zio_checksum.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/arc.h> 38 #include <sys/ddt.h> 39 40 /* 41 * ========================================================================== 42 * I/O priority table 43 * ========================================================================== 44 */ 45 uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 46 0, /* ZIO_PRIORITY_NOW */ 47 0, /* ZIO_PRIORITY_SYNC_READ */ 48 0, /* ZIO_PRIORITY_SYNC_WRITE */ 49 0, /* ZIO_PRIORITY_LOG_WRITE */ 50 1, /* ZIO_PRIORITY_CACHE_FILL */ 51 1, /* ZIO_PRIORITY_AGG */ 52 4, /* ZIO_PRIORITY_FREE */ 53 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 54 6, /* ZIO_PRIORITY_ASYNC_READ */ 55 10, /* ZIO_PRIORITY_RESILVER */ 56 20, /* ZIO_PRIORITY_SCRUB */ 57 2, /* ZIO_PRIORITY_DDT_PREFETCH */ 58 }; 59 60 /* 61 * ========================================================================== 62 * I/O type descriptions 63 * ========================================================================== 64 */ 65 char *zio_type_name[ZIO_TYPES] = { 66 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 67 "zio_ioctl" 68 }; 69 70 /* 71 * ========================================================================== 72 * I/O kmem caches 73 * ========================================================================== 74 */ 75 kmem_cache_t *zio_cache; 76 kmem_cache_t *zio_link_cache; 77 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 78 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 79 80 #ifdef _KERNEL 81 extern vmem_t *zio_alloc_arena; 82 #endif 83 extern int zfs_mg_alloc_failures; 84 85 /* 86 * The following actions directly effect the spa's sync-to-convergence logic. 87 * The values below define the sync pass when we start performing the action. 88 * Care should be taken when changing these values as they directly impact 89 * spa_sync() performance. Tuning these values may introduce subtle performance 90 * pathologies and should only be done in the context of performance analysis. 91 * These tunables will eventually be removed and replaced with #defines once 92 * enough analysis has been done to determine optimal values. 93 * 94 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 95 * regular blocks are not deferred. 96 */ 97 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 98 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 99 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 100 101 /* 102 * An allocating zio is one that either currently has the DVA allocate 103 * stage set or will have it later in its lifetime. 104 */ 105 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 106 107 boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 108 109 #ifdef ZFS_DEBUG 110 int zio_buf_debug_limit = 16384; 111 #else 112 int zio_buf_debug_limit = 0; 113 #endif 114 115 void 116 zio_init(void) 117 { 118 size_t c; 119 vmem_t *data_alloc_arena = NULL; 120 121 #ifdef _KERNEL 122 data_alloc_arena = zio_alloc_arena; 123 #endif 124 zio_cache = kmem_cache_create("zio_cache", 125 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 126 zio_link_cache = kmem_cache_create("zio_link_cache", 127 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 128 129 /* 130 * For small buffers, we want a cache for each multiple of 131 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 132 * for each quarter-power of 2. For large buffers, we want 133 * a cache for each multiple of PAGESIZE. 134 */ 135 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 136 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 137 size_t p2 = size; 138 size_t align = 0; 139 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 140 141 while (p2 & (p2 - 1)) 142 p2 &= p2 - 1; 143 144 #ifndef _KERNEL 145 /* 146 * If we are using watchpoints, put each buffer on its own page, 147 * to eliminate the performance overhead of trapping to the 148 * kernel when modifying a non-watched buffer that shares the 149 * page with a watched buffer. 150 */ 151 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 152 continue; 153 #endif 154 if (size <= 4 * SPA_MINBLOCKSIZE) { 155 align = SPA_MINBLOCKSIZE; 156 } else if (IS_P2ALIGNED(size, PAGESIZE)) { 157 align = PAGESIZE; 158 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 159 align = p2 >> 2; 160 } 161 162 if (align != 0) { 163 char name[36]; 164 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 165 zio_buf_cache[c] = kmem_cache_create(name, size, 166 align, NULL, NULL, NULL, NULL, NULL, cflags); 167 168 /* 169 * Since zio_data bufs do not appear in crash dumps, we 170 * pass KMC_NOTOUCH so that no allocator metadata is 171 * stored with the buffers. 172 */ 173 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 174 zio_data_buf_cache[c] = kmem_cache_create(name, size, 175 align, NULL, NULL, NULL, NULL, data_alloc_arena, 176 cflags | KMC_NOTOUCH); 177 } 178 } 179 180 while (--c != 0) { 181 ASSERT(zio_buf_cache[c] != NULL); 182 if (zio_buf_cache[c - 1] == NULL) 183 zio_buf_cache[c - 1] = zio_buf_cache[c]; 184 185 ASSERT(zio_data_buf_cache[c] != NULL); 186 if (zio_data_buf_cache[c - 1] == NULL) 187 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 188 } 189 190 /* 191 * The zio write taskqs have 1 thread per cpu, allow 1/2 of the taskqs 192 * to fail 3 times per txg or 8 failures, whichever is greater. 193 */ 194 zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8); 195 196 zio_inject_init(); 197 } 198 199 void 200 zio_fini(void) 201 { 202 size_t c; 203 kmem_cache_t *last_cache = NULL; 204 kmem_cache_t *last_data_cache = NULL; 205 206 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 207 if (zio_buf_cache[c] != last_cache) { 208 last_cache = zio_buf_cache[c]; 209 kmem_cache_destroy(zio_buf_cache[c]); 210 } 211 zio_buf_cache[c] = NULL; 212 213 if (zio_data_buf_cache[c] != last_data_cache) { 214 last_data_cache = zio_data_buf_cache[c]; 215 kmem_cache_destroy(zio_data_buf_cache[c]); 216 } 217 zio_data_buf_cache[c] = NULL; 218 } 219 220 kmem_cache_destroy(zio_link_cache); 221 kmem_cache_destroy(zio_cache); 222 223 zio_inject_fini(); 224 } 225 226 /* 227 * ========================================================================== 228 * Allocate and free I/O buffers 229 * ========================================================================== 230 */ 231 232 /* 233 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 234 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 235 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 236 * excess / transient data in-core during a crashdump. 237 */ 238 void * 239 zio_buf_alloc(size_t size) 240 { 241 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 242 243 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 244 245 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 246 } 247 248 /* 249 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 250 * crashdump if the kernel panics. This exists so that we will limit the amount 251 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 252 * of kernel heap dumped to disk when the kernel panics) 253 */ 254 void * 255 zio_data_buf_alloc(size_t size) 256 { 257 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 258 259 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 260 261 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 262 } 263 264 void 265 zio_buf_free(void *buf, size_t size) 266 { 267 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 268 269 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 270 271 kmem_cache_free(zio_buf_cache[c], buf); 272 } 273 274 void 275 zio_data_buf_free(void *buf, size_t size) 276 { 277 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 278 279 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 280 281 kmem_cache_free(zio_data_buf_cache[c], buf); 282 } 283 284 /* 285 * ========================================================================== 286 * Push and pop I/O transform buffers 287 * ========================================================================== 288 */ 289 static void 290 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 291 zio_transform_func_t *transform) 292 { 293 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 294 295 zt->zt_orig_data = zio->io_data; 296 zt->zt_orig_size = zio->io_size; 297 zt->zt_bufsize = bufsize; 298 zt->zt_transform = transform; 299 300 zt->zt_next = zio->io_transform_stack; 301 zio->io_transform_stack = zt; 302 303 zio->io_data = data; 304 zio->io_size = size; 305 } 306 307 static void 308 zio_pop_transforms(zio_t *zio) 309 { 310 zio_transform_t *zt; 311 312 while ((zt = zio->io_transform_stack) != NULL) { 313 if (zt->zt_transform != NULL) 314 zt->zt_transform(zio, 315 zt->zt_orig_data, zt->zt_orig_size); 316 317 if (zt->zt_bufsize != 0) 318 zio_buf_free(zio->io_data, zt->zt_bufsize); 319 320 zio->io_data = zt->zt_orig_data; 321 zio->io_size = zt->zt_orig_size; 322 zio->io_transform_stack = zt->zt_next; 323 324 kmem_free(zt, sizeof (zio_transform_t)); 325 } 326 } 327 328 /* 329 * ========================================================================== 330 * I/O transform callbacks for subblocks and decompression 331 * ========================================================================== 332 */ 333 static void 334 zio_subblock(zio_t *zio, void *data, uint64_t size) 335 { 336 ASSERT(zio->io_size > size); 337 338 if (zio->io_type == ZIO_TYPE_READ) 339 bcopy(zio->io_data, data, size); 340 } 341 342 static void 343 zio_decompress(zio_t *zio, void *data, uint64_t size) 344 { 345 if (zio->io_error == 0 && 346 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 347 zio->io_data, data, zio->io_size, size) != 0) 348 zio->io_error = EIO; 349 } 350 351 /* 352 * ========================================================================== 353 * I/O parent/child relationships and pipeline interlocks 354 * ========================================================================== 355 */ 356 /* 357 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 358 * continue calling these functions until they return NULL. 359 * Otherwise, the next caller will pick up the list walk in 360 * some indeterminate state. (Otherwise every caller would 361 * have to pass in a cookie to keep the state represented by 362 * io_walk_link, which gets annoying.) 363 */ 364 zio_t * 365 zio_walk_parents(zio_t *cio) 366 { 367 zio_link_t *zl = cio->io_walk_link; 368 list_t *pl = &cio->io_parent_list; 369 370 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 371 cio->io_walk_link = zl; 372 373 if (zl == NULL) 374 return (NULL); 375 376 ASSERT(zl->zl_child == cio); 377 return (zl->zl_parent); 378 } 379 380 zio_t * 381 zio_walk_children(zio_t *pio) 382 { 383 zio_link_t *zl = pio->io_walk_link; 384 list_t *cl = &pio->io_child_list; 385 386 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 387 pio->io_walk_link = zl; 388 389 if (zl == NULL) 390 return (NULL); 391 392 ASSERT(zl->zl_parent == pio); 393 return (zl->zl_child); 394 } 395 396 zio_t * 397 zio_unique_parent(zio_t *cio) 398 { 399 zio_t *pio = zio_walk_parents(cio); 400 401 VERIFY(zio_walk_parents(cio) == NULL); 402 return (pio); 403 } 404 405 void 406 zio_add_child(zio_t *pio, zio_t *cio) 407 { 408 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 409 410 /* 411 * Logical I/Os can have logical, gang, or vdev children. 412 * Gang I/Os can have gang or vdev children. 413 * Vdev I/Os can only have vdev children. 414 * The following ASSERT captures all of these constraints. 415 */ 416 ASSERT(cio->io_child_type <= pio->io_child_type); 417 418 zl->zl_parent = pio; 419 zl->zl_child = cio; 420 421 mutex_enter(&cio->io_lock); 422 mutex_enter(&pio->io_lock); 423 424 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 425 426 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 427 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 428 429 list_insert_head(&pio->io_child_list, zl); 430 list_insert_head(&cio->io_parent_list, zl); 431 432 pio->io_child_count++; 433 cio->io_parent_count++; 434 435 mutex_exit(&pio->io_lock); 436 mutex_exit(&cio->io_lock); 437 } 438 439 static void 440 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 441 { 442 ASSERT(zl->zl_parent == pio); 443 ASSERT(zl->zl_child == cio); 444 445 mutex_enter(&cio->io_lock); 446 mutex_enter(&pio->io_lock); 447 448 list_remove(&pio->io_child_list, zl); 449 list_remove(&cio->io_parent_list, zl); 450 451 pio->io_child_count--; 452 cio->io_parent_count--; 453 454 mutex_exit(&pio->io_lock); 455 mutex_exit(&cio->io_lock); 456 457 kmem_cache_free(zio_link_cache, zl); 458 } 459 460 static boolean_t 461 zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 462 { 463 uint64_t *countp = &zio->io_children[child][wait]; 464 boolean_t waiting = B_FALSE; 465 466 mutex_enter(&zio->io_lock); 467 ASSERT(zio->io_stall == NULL); 468 if (*countp != 0) { 469 zio->io_stage >>= 1; 470 zio->io_stall = countp; 471 waiting = B_TRUE; 472 } 473 mutex_exit(&zio->io_lock); 474 475 return (waiting); 476 } 477 478 static void 479 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 480 { 481 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 482 int *errorp = &pio->io_child_error[zio->io_child_type]; 483 484 mutex_enter(&pio->io_lock); 485 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 486 *errorp = zio_worst_error(*errorp, zio->io_error); 487 pio->io_reexecute |= zio->io_reexecute; 488 ASSERT3U(*countp, >, 0); 489 if (--*countp == 0 && pio->io_stall == countp) { 490 pio->io_stall = NULL; 491 mutex_exit(&pio->io_lock); 492 zio_execute(pio); 493 } else { 494 mutex_exit(&pio->io_lock); 495 } 496 } 497 498 static void 499 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 500 { 501 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 502 zio->io_error = zio->io_child_error[c]; 503 } 504 505 /* 506 * ========================================================================== 507 * Create the various types of I/O (read, write, free, etc) 508 * ========================================================================== 509 */ 510 static zio_t * 511 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 512 void *data, uint64_t size, zio_done_func_t *done, void *private, 513 zio_type_t type, int priority, enum zio_flag flags, 514 vdev_t *vd, uint64_t offset, const zbookmark_t *zb, 515 enum zio_stage stage, enum zio_stage pipeline) 516 { 517 zio_t *zio; 518 519 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 520 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 521 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 522 523 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 524 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 525 ASSERT(vd || stage == ZIO_STAGE_OPEN); 526 527 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 528 bzero(zio, sizeof (zio_t)); 529 530 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 531 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 532 533 list_create(&zio->io_parent_list, sizeof (zio_link_t), 534 offsetof(zio_link_t, zl_parent_node)); 535 list_create(&zio->io_child_list, sizeof (zio_link_t), 536 offsetof(zio_link_t, zl_child_node)); 537 538 if (vd != NULL) 539 zio->io_child_type = ZIO_CHILD_VDEV; 540 else if (flags & ZIO_FLAG_GANG_CHILD) 541 zio->io_child_type = ZIO_CHILD_GANG; 542 else if (flags & ZIO_FLAG_DDT_CHILD) 543 zio->io_child_type = ZIO_CHILD_DDT; 544 else 545 zio->io_child_type = ZIO_CHILD_LOGICAL; 546 547 if (bp != NULL) { 548 zio->io_bp = (blkptr_t *)bp; 549 zio->io_bp_copy = *bp; 550 zio->io_bp_orig = *bp; 551 if (type != ZIO_TYPE_WRITE || 552 zio->io_child_type == ZIO_CHILD_DDT) 553 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 554 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 555 zio->io_logical = zio; 556 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 557 pipeline |= ZIO_GANG_STAGES; 558 } 559 560 zio->io_spa = spa; 561 zio->io_txg = txg; 562 zio->io_done = done; 563 zio->io_private = private; 564 zio->io_type = type; 565 zio->io_priority = priority; 566 zio->io_vd = vd; 567 zio->io_offset = offset; 568 zio->io_orig_data = zio->io_data = data; 569 zio->io_orig_size = zio->io_size = size; 570 zio->io_orig_flags = zio->io_flags = flags; 571 zio->io_orig_stage = zio->io_stage = stage; 572 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 573 574 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 575 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 576 577 if (zb != NULL) 578 zio->io_bookmark = *zb; 579 580 if (pio != NULL) { 581 if (zio->io_logical == NULL) 582 zio->io_logical = pio->io_logical; 583 if (zio->io_child_type == ZIO_CHILD_GANG) 584 zio->io_gang_leader = pio->io_gang_leader; 585 zio_add_child(pio, zio); 586 } 587 588 return (zio); 589 } 590 591 static void 592 zio_destroy(zio_t *zio) 593 { 594 list_destroy(&zio->io_parent_list); 595 list_destroy(&zio->io_child_list); 596 mutex_destroy(&zio->io_lock); 597 cv_destroy(&zio->io_cv); 598 kmem_cache_free(zio_cache, zio); 599 } 600 601 zio_t * 602 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 603 void *private, enum zio_flag flags) 604 { 605 zio_t *zio; 606 607 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 608 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 609 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 610 611 return (zio); 612 } 613 614 zio_t * 615 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 616 { 617 return (zio_null(NULL, spa, NULL, done, private, flags)); 618 } 619 620 zio_t * 621 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 622 void *data, uint64_t size, zio_done_func_t *done, void *private, 623 int priority, enum zio_flag flags, const zbookmark_t *zb) 624 { 625 zio_t *zio; 626 627 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 628 data, size, done, private, 629 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 630 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 631 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 632 633 return (zio); 634 } 635 636 zio_t * 637 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 638 void *data, uint64_t size, const zio_prop_t *zp, 639 zio_done_func_t *ready, zio_done_func_t *done, void *private, 640 int priority, enum zio_flag flags, const zbookmark_t *zb) 641 { 642 zio_t *zio; 643 644 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 645 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 646 zp->zp_compress >= ZIO_COMPRESS_OFF && 647 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 648 DMU_OT_IS_VALID(zp->zp_type) && 649 zp->zp_level < 32 && 650 zp->zp_copies > 0 && 651 zp->zp_copies <= spa_max_replication(spa) && 652 zp->zp_dedup <= 1 && 653 zp->zp_dedup_verify <= 1); 654 655 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 656 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 657 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 658 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 659 660 zio->io_ready = ready; 661 zio->io_prop = *zp; 662 663 return (zio); 664 } 665 666 zio_t * 667 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 668 uint64_t size, zio_done_func_t *done, void *private, int priority, 669 enum zio_flag flags, zbookmark_t *zb) 670 { 671 zio_t *zio; 672 673 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 674 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 675 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 676 677 return (zio); 678 } 679 680 void 681 zio_write_override(zio_t *zio, blkptr_t *bp, int copies) 682 { 683 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 684 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 685 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 686 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 687 688 zio->io_prop.zp_copies = copies; 689 zio->io_bp_override = bp; 690 } 691 692 void 693 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 694 { 695 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 696 } 697 698 zio_t * 699 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 700 enum zio_flag flags) 701 { 702 zio_t *zio; 703 704 dprintf_bp(bp, "freeing in txg %llu, pass %u", 705 (longlong_t)txg, spa->spa_sync_pass); 706 707 ASSERT(!BP_IS_HOLE(bp)); 708 ASSERT(spa_syncing_txg(spa) == txg); 709 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 710 711 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 712 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags, 713 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 714 715 return (zio); 716 } 717 718 zio_t * 719 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 720 zio_done_func_t *done, void *private, enum zio_flag flags) 721 { 722 zio_t *zio; 723 724 /* 725 * A claim is an allocation of a specific block. Claims are needed 726 * to support immediate writes in the intent log. The issue is that 727 * immediate writes contain committed data, but in a txg that was 728 * *not* committed. Upon opening the pool after an unclean shutdown, 729 * the intent log claims all blocks that contain immediate write data 730 * so that the SPA knows they're in use. 731 * 732 * All claims *must* be resolved in the first txg -- before the SPA 733 * starts allocating blocks -- so that nothing is allocated twice. 734 * If txg == 0 we just verify that the block is claimable. 735 */ 736 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 737 ASSERT(txg == spa_first_txg(spa) || txg == 0); 738 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 739 740 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 741 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 742 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 743 744 return (zio); 745 } 746 747 zio_t * 748 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 749 zio_done_func_t *done, void *private, int priority, enum zio_flag flags) 750 { 751 zio_t *zio; 752 int c; 753 754 if (vd->vdev_children == 0) { 755 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 756 ZIO_TYPE_IOCTL, priority, flags, vd, 0, NULL, 757 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 758 759 zio->io_cmd = cmd; 760 } else { 761 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 762 763 for (c = 0; c < vd->vdev_children; c++) 764 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 765 done, private, priority, flags)); 766 } 767 768 return (zio); 769 } 770 771 zio_t * 772 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 773 void *data, int checksum, zio_done_func_t *done, void *private, 774 int priority, enum zio_flag flags, boolean_t labels) 775 { 776 zio_t *zio; 777 778 ASSERT(vd->vdev_children == 0); 779 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 780 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 781 ASSERT3U(offset + size, <=, vd->vdev_psize); 782 783 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 784 ZIO_TYPE_READ, priority, flags, vd, offset, NULL, 785 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 786 787 zio->io_prop.zp_checksum = checksum; 788 789 return (zio); 790 } 791 792 zio_t * 793 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 794 void *data, int checksum, zio_done_func_t *done, void *private, 795 int priority, enum zio_flag flags, boolean_t labels) 796 { 797 zio_t *zio; 798 799 ASSERT(vd->vdev_children == 0); 800 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 801 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 802 ASSERT3U(offset + size, <=, vd->vdev_psize); 803 804 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 805 ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, 806 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 807 808 zio->io_prop.zp_checksum = checksum; 809 810 if (zio_checksum_table[checksum].ci_eck) { 811 /* 812 * zec checksums are necessarily destructive -- they modify 813 * the end of the write buffer to hold the verifier/checksum. 814 * Therefore, we must make a local copy in case the data is 815 * being written to multiple places in parallel. 816 */ 817 void *wbuf = zio_buf_alloc(size); 818 bcopy(data, wbuf, size); 819 zio_push_transform(zio, wbuf, size, size, NULL); 820 } 821 822 return (zio); 823 } 824 825 /* 826 * Create a child I/O to do some work for us. 827 */ 828 zio_t * 829 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 830 void *data, uint64_t size, int type, int priority, enum zio_flag flags, 831 zio_done_func_t *done, void *private) 832 { 833 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 834 zio_t *zio; 835 836 ASSERT(vd->vdev_parent == 837 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 838 839 if (type == ZIO_TYPE_READ && bp != NULL) { 840 /* 841 * If we have the bp, then the child should perform the 842 * checksum and the parent need not. This pushes error 843 * detection as close to the leaves as possible and 844 * eliminates redundant checksums in the interior nodes. 845 */ 846 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 847 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 848 } 849 850 if (vd->vdev_children == 0) 851 offset += VDEV_LABEL_START_SIZE; 852 853 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 854 855 /* 856 * If we've decided to do a repair, the write is not speculative -- 857 * even if the original read was. 858 */ 859 if (flags & ZIO_FLAG_IO_REPAIR) 860 flags &= ~ZIO_FLAG_SPECULATIVE; 861 862 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 863 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 864 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 865 866 return (zio); 867 } 868 869 zio_t * 870 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 871 int type, int priority, enum zio_flag flags, 872 zio_done_func_t *done, void *private) 873 { 874 zio_t *zio; 875 876 ASSERT(vd->vdev_ops->vdev_op_leaf); 877 878 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 879 data, size, done, private, type, priority, 880 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY, 881 vd, offset, NULL, 882 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 883 884 return (zio); 885 } 886 887 void 888 zio_flush(zio_t *zio, vdev_t *vd) 889 { 890 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 891 NULL, NULL, ZIO_PRIORITY_NOW, 892 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 893 } 894 895 void 896 zio_shrink(zio_t *zio, uint64_t size) 897 { 898 ASSERT(zio->io_executor == NULL); 899 ASSERT(zio->io_orig_size == zio->io_size); 900 ASSERT(size <= zio->io_size); 901 902 /* 903 * We don't shrink for raidz because of problems with the 904 * reconstruction when reading back less than the block size. 905 * Note, BP_IS_RAIDZ() assumes no compression. 906 */ 907 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 908 if (!BP_IS_RAIDZ(zio->io_bp)) 909 zio->io_orig_size = zio->io_size = size; 910 } 911 912 /* 913 * ========================================================================== 914 * Prepare to read and write logical blocks 915 * ========================================================================== 916 */ 917 918 static int 919 zio_read_bp_init(zio_t *zio) 920 { 921 blkptr_t *bp = zio->io_bp; 922 923 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 924 zio->io_child_type == ZIO_CHILD_LOGICAL && 925 !(zio->io_flags & ZIO_FLAG_RAW)) { 926 uint64_t psize = BP_GET_PSIZE(bp); 927 void *cbuf = zio_buf_alloc(psize); 928 929 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 930 } 931 932 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 933 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 934 935 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 936 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 937 938 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 939 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 940 941 return (ZIO_PIPELINE_CONTINUE); 942 } 943 944 static int 945 zio_write_bp_init(zio_t *zio) 946 { 947 spa_t *spa = zio->io_spa; 948 zio_prop_t *zp = &zio->io_prop; 949 enum zio_compress compress = zp->zp_compress; 950 blkptr_t *bp = zio->io_bp; 951 uint64_t lsize = zio->io_size; 952 uint64_t psize = lsize; 953 int pass = 1; 954 955 /* 956 * If our children haven't all reached the ready stage, 957 * wait for them and then repeat this pipeline stage. 958 */ 959 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 960 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 961 return (ZIO_PIPELINE_STOP); 962 963 if (!IO_IS_ALLOCATING(zio)) 964 return (ZIO_PIPELINE_CONTINUE); 965 966 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 967 968 if (zio->io_bp_override) { 969 ASSERT(bp->blk_birth != zio->io_txg); 970 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 971 972 *bp = *zio->io_bp_override; 973 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 974 975 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 976 return (ZIO_PIPELINE_CONTINUE); 977 978 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 979 zp->zp_dedup_verify); 980 981 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 982 BP_SET_DEDUP(bp, 1); 983 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 984 return (ZIO_PIPELINE_CONTINUE); 985 } 986 zio->io_bp_override = NULL; 987 BP_ZERO(bp); 988 } 989 990 if (bp->blk_birth == zio->io_txg) { 991 /* 992 * We're rewriting an existing block, which means we're 993 * working on behalf of spa_sync(). For spa_sync() to 994 * converge, it must eventually be the case that we don't 995 * have to allocate new blocks. But compression changes 996 * the blocksize, which forces a reallocate, and makes 997 * convergence take longer. Therefore, after the first 998 * few passes, stop compressing to ensure convergence. 999 */ 1000 pass = spa_sync_pass(spa); 1001 1002 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1003 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1004 ASSERT(!BP_GET_DEDUP(bp)); 1005 1006 if (pass >= zfs_sync_pass_dont_compress) 1007 compress = ZIO_COMPRESS_OFF; 1008 1009 /* Make sure someone doesn't change their mind on overwrites */ 1010 ASSERT(MIN(zp->zp_copies + BP_IS_GANG(bp), 1011 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1012 } 1013 1014 if (compress != ZIO_COMPRESS_OFF) { 1015 void *cbuf = zio_buf_alloc(lsize); 1016 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1017 if (psize == 0 || psize == lsize) { 1018 compress = ZIO_COMPRESS_OFF; 1019 zio_buf_free(cbuf, lsize); 1020 } else { 1021 ASSERT(psize < lsize); 1022 zio_push_transform(zio, cbuf, psize, lsize, NULL); 1023 } 1024 } 1025 1026 /* 1027 * The final pass of spa_sync() must be all rewrites, but the first 1028 * few passes offer a trade-off: allocating blocks defers convergence, 1029 * but newly allocated blocks are sequential, so they can be written 1030 * to disk faster. Therefore, we allow the first few passes of 1031 * spa_sync() to allocate new blocks, but force rewrites after that. 1032 * There should only be a handful of blocks after pass 1 in any case. 1033 */ 1034 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize && 1035 pass >= zfs_sync_pass_rewrite) { 1036 ASSERT(psize != 0); 1037 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1038 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1039 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1040 } else { 1041 BP_ZERO(bp); 1042 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1043 } 1044 1045 if (psize == 0) { 1046 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1047 } else { 1048 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1049 BP_SET_LSIZE(bp, lsize); 1050 BP_SET_PSIZE(bp, psize); 1051 BP_SET_COMPRESS(bp, compress); 1052 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1053 BP_SET_TYPE(bp, zp->zp_type); 1054 BP_SET_LEVEL(bp, zp->zp_level); 1055 BP_SET_DEDUP(bp, zp->zp_dedup); 1056 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1057 if (zp->zp_dedup) { 1058 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1059 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1060 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1061 } 1062 } 1063 1064 return (ZIO_PIPELINE_CONTINUE); 1065 } 1066 1067 static int 1068 zio_free_bp_init(zio_t *zio) 1069 { 1070 blkptr_t *bp = zio->io_bp; 1071 1072 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1073 if (BP_GET_DEDUP(bp)) 1074 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1075 } 1076 1077 return (ZIO_PIPELINE_CONTINUE); 1078 } 1079 1080 /* 1081 * ========================================================================== 1082 * Execute the I/O pipeline 1083 * ========================================================================== 1084 */ 1085 1086 static void 1087 zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q, boolean_t cutinline) 1088 { 1089 spa_t *spa = zio->io_spa; 1090 zio_type_t t = zio->io_type; 1091 int flags = (cutinline ? TQ_FRONT : 0); 1092 1093 /* 1094 * If we're a config writer or a probe, the normal issue and 1095 * interrupt threads may all be blocked waiting for the config lock. 1096 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1097 */ 1098 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1099 t = ZIO_TYPE_NULL; 1100 1101 /* 1102 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1103 */ 1104 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1105 t = ZIO_TYPE_NULL; 1106 1107 /* 1108 * If this is a high priority I/O, then use the high priority taskq. 1109 */ 1110 if (zio->io_priority == ZIO_PRIORITY_NOW && 1111 spa->spa_zio_taskq[t][q + 1] != NULL) 1112 q++; 1113 1114 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1115 1116 /* 1117 * NB: We are assuming that the zio can only be dispatched 1118 * to a single taskq at a time. It would be a grievous error 1119 * to dispatch the zio to another taskq at the same time. 1120 */ 1121 ASSERT(zio->io_tqent.tqent_next == NULL); 1122 taskq_dispatch_ent(spa->spa_zio_taskq[t][q], 1123 (task_func_t *)zio_execute, zio, flags, &zio->io_tqent); 1124 } 1125 1126 static boolean_t 1127 zio_taskq_member(zio_t *zio, enum zio_taskq_type q) 1128 { 1129 kthread_t *executor = zio->io_executor; 1130 spa_t *spa = zio->io_spa; 1131 1132 for (zio_type_t t = 0; t < ZIO_TYPES; t++) 1133 if (taskq_member(spa->spa_zio_taskq[t][q], executor)) 1134 return (B_TRUE); 1135 1136 return (B_FALSE); 1137 } 1138 1139 static int 1140 zio_issue_async(zio_t *zio) 1141 { 1142 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1143 1144 return (ZIO_PIPELINE_STOP); 1145 } 1146 1147 void 1148 zio_interrupt(zio_t *zio) 1149 { 1150 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1151 } 1152 1153 /* 1154 * Execute the I/O pipeline until one of the following occurs: 1155 * (1) the I/O completes; (2) the pipeline stalls waiting for 1156 * dependent child I/Os; (3) the I/O issues, so we're waiting 1157 * for an I/O completion interrupt; (4) the I/O is delegated by 1158 * vdev-level caching or aggregation; (5) the I/O is deferred 1159 * due to vdev-level queueing; (6) the I/O is handed off to 1160 * another thread. In all cases, the pipeline stops whenever 1161 * there's no CPU work; it never burns a thread in cv_wait(). 1162 * 1163 * There's no locking on io_stage because there's no legitimate way 1164 * for multiple threads to be attempting to process the same I/O. 1165 */ 1166 static zio_pipe_stage_t *zio_pipeline[]; 1167 1168 void 1169 zio_execute(zio_t *zio) 1170 { 1171 zio->io_executor = curthread; 1172 1173 while (zio->io_stage < ZIO_STAGE_DONE) { 1174 enum zio_stage pipeline = zio->io_pipeline; 1175 enum zio_stage stage = zio->io_stage; 1176 int rv; 1177 1178 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1179 ASSERT(ISP2(stage)); 1180 ASSERT(zio->io_stall == NULL); 1181 1182 do { 1183 stage <<= 1; 1184 } while ((stage & pipeline) == 0); 1185 1186 ASSERT(stage <= ZIO_STAGE_DONE); 1187 1188 /* 1189 * If we are in interrupt context and this pipeline stage 1190 * will grab a config lock that is held across I/O, 1191 * or may wait for an I/O that needs an interrupt thread 1192 * to complete, issue async to avoid deadlock. 1193 * 1194 * For VDEV_IO_START, we cut in line so that the io will 1195 * be sent to disk promptly. 1196 */ 1197 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1198 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1199 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1200 zio_requeue_io_start_cut_in_line : B_FALSE; 1201 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1202 return; 1203 } 1204 1205 zio->io_stage = stage; 1206 rv = zio_pipeline[highbit(stage) - 1](zio); 1207 1208 if (rv == ZIO_PIPELINE_STOP) 1209 return; 1210 1211 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1212 } 1213 } 1214 1215 /* 1216 * ========================================================================== 1217 * Initiate I/O, either sync or async 1218 * ========================================================================== 1219 */ 1220 int 1221 zio_wait(zio_t *zio) 1222 { 1223 int error; 1224 1225 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1226 ASSERT(zio->io_executor == NULL); 1227 1228 zio->io_waiter = curthread; 1229 1230 zio_execute(zio); 1231 1232 mutex_enter(&zio->io_lock); 1233 while (zio->io_executor != NULL) 1234 cv_wait(&zio->io_cv, &zio->io_lock); 1235 mutex_exit(&zio->io_lock); 1236 1237 error = zio->io_error; 1238 zio_destroy(zio); 1239 1240 return (error); 1241 } 1242 1243 void 1244 zio_nowait(zio_t *zio) 1245 { 1246 ASSERT(zio->io_executor == NULL); 1247 1248 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1249 zio_unique_parent(zio) == NULL) { 1250 /* 1251 * This is a logical async I/O with no parent to wait for it. 1252 * We add it to the spa_async_root_zio "Godfather" I/O which 1253 * will ensure they complete prior to unloading the pool. 1254 */ 1255 spa_t *spa = zio->io_spa; 1256 1257 zio_add_child(spa->spa_async_zio_root, zio); 1258 } 1259 1260 zio_execute(zio); 1261 } 1262 1263 /* 1264 * ========================================================================== 1265 * Reexecute or suspend/resume failed I/O 1266 * ========================================================================== 1267 */ 1268 1269 static void 1270 zio_reexecute(zio_t *pio) 1271 { 1272 zio_t *cio, *cio_next; 1273 1274 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1275 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1276 ASSERT(pio->io_gang_leader == NULL); 1277 ASSERT(pio->io_gang_tree == NULL); 1278 1279 pio->io_flags = pio->io_orig_flags; 1280 pio->io_stage = pio->io_orig_stage; 1281 pio->io_pipeline = pio->io_orig_pipeline; 1282 pio->io_reexecute = 0; 1283 pio->io_error = 0; 1284 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1285 pio->io_state[w] = 0; 1286 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1287 pio->io_child_error[c] = 0; 1288 1289 if (IO_IS_ALLOCATING(pio)) 1290 BP_ZERO(pio->io_bp); 1291 1292 /* 1293 * As we reexecute pio's children, new children could be created. 1294 * New children go to the head of pio's io_child_list, however, 1295 * so we will (correctly) not reexecute them. The key is that 1296 * the remainder of pio's io_child_list, from 'cio_next' onward, 1297 * cannot be affected by any side effects of reexecuting 'cio'. 1298 */ 1299 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1300 cio_next = zio_walk_children(pio); 1301 mutex_enter(&pio->io_lock); 1302 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1303 pio->io_children[cio->io_child_type][w]++; 1304 mutex_exit(&pio->io_lock); 1305 zio_reexecute(cio); 1306 } 1307 1308 /* 1309 * Now that all children have been reexecuted, execute the parent. 1310 * We don't reexecute "The Godfather" I/O here as it's the 1311 * responsibility of the caller to wait on him. 1312 */ 1313 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1314 zio_execute(pio); 1315 } 1316 1317 void 1318 zio_suspend(spa_t *spa, zio_t *zio) 1319 { 1320 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1321 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1322 "failure and the failure mode property for this pool " 1323 "is set to panic.", spa_name(spa)); 1324 1325 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1326 1327 mutex_enter(&spa->spa_suspend_lock); 1328 1329 if (spa->spa_suspend_zio_root == NULL) 1330 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1331 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1332 ZIO_FLAG_GODFATHER); 1333 1334 spa->spa_suspended = B_TRUE; 1335 1336 if (zio != NULL) { 1337 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1338 ASSERT(zio != spa->spa_suspend_zio_root); 1339 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1340 ASSERT(zio_unique_parent(zio) == NULL); 1341 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1342 zio_add_child(spa->spa_suspend_zio_root, zio); 1343 } 1344 1345 mutex_exit(&spa->spa_suspend_lock); 1346 } 1347 1348 int 1349 zio_resume(spa_t *spa) 1350 { 1351 zio_t *pio; 1352 1353 /* 1354 * Reexecute all previously suspended i/o. 1355 */ 1356 mutex_enter(&spa->spa_suspend_lock); 1357 spa->spa_suspended = B_FALSE; 1358 cv_broadcast(&spa->spa_suspend_cv); 1359 pio = spa->spa_suspend_zio_root; 1360 spa->spa_suspend_zio_root = NULL; 1361 mutex_exit(&spa->spa_suspend_lock); 1362 1363 if (pio == NULL) 1364 return (0); 1365 1366 zio_reexecute(pio); 1367 return (zio_wait(pio)); 1368 } 1369 1370 void 1371 zio_resume_wait(spa_t *spa) 1372 { 1373 mutex_enter(&spa->spa_suspend_lock); 1374 while (spa_suspended(spa)) 1375 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1376 mutex_exit(&spa->spa_suspend_lock); 1377 } 1378 1379 /* 1380 * ========================================================================== 1381 * Gang blocks. 1382 * 1383 * A gang block is a collection of small blocks that looks to the DMU 1384 * like one large block. When zio_dva_allocate() cannot find a block 1385 * of the requested size, due to either severe fragmentation or the pool 1386 * being nearly full, it calls zio_write_gang_block() to construct the 1387 * block from smaller fragments. 1388 * 1389 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1390 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1391 * an indirect block: it's an array of block pointers. It consumes 1392 * only one sector and hence is allocatable regardless of fragmentation. 1393 * The gang header's bps point to its gang members, which hold the data. 1394 * 1395 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1396 * as the verifier to ensure uniqueness of the SHA256 checksum. 1397 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1398 * not the gang header. This ensures that data block signatures (needed for 1399 * deduplication) are independent of how the block is physically stored. 1400 * 1401 * Gang blocks can be nested: a gang member may itself be a gang block. 1402 * Thus every gang block is a tree in which root and all interior nodes are 1403 * gang headers, and the leaves are normal blocks that contain user data. 1404 * The root of the gang tree is called the gang leader. 1405 * 1406 * To perform any operation (read, rewrite, free, claim) on a gang block, 1407 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1408 * in the io_gang_tree field of the original logical i/o by recursively 1409 * reading the gang leader and all gang headers below it. This yields 1410 * an in-core tree containing the contents of every gang header and the 1411 * bps for every constituent of the gang block. 1412 * 1413 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1414 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1415 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1416 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1417 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1418 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1419 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1420 * of the gang header plus zio_checksum_compute() of the data to update the 1421 * gang header's blk_cksum as described above. 1422 * 1423 * The two-phase assemble/issue model solves the problem of partial failure -- 1424 * what if you'd freed part of a gang block but then couldn't read the 1425 * gang header for another part? Assembling the entire gang tree first 1426 * ensures that all the necessary gang header I/O has succeeded before 1427 * starting the actual work of free, claim, or write. Once the gang tree 1428 * is assembled, free and claim are in-memory operations that cannot fail. 1429 * 1430 * In the event that a gang write fails, zio_dva_unallocate() walks the 1431 * gang tree to immediately free (i.e. insert back into the space map) 1432 * everything we've allocated. This ensures that we don't get ENOSPC 1433 * errors during repeated suspend/resume cycles due to a flaky device. 1434 * 1435 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1436 * the gang tree, we won't modify the block, so we can safely defer the free 1437 * (knowing that the block is still intact). If we *can* assemble the gang 1438 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1439 * each constituent bp and we can allocate a new block on the next sync pass. 1440 * 1441 * In all cases, the gang tree allows complete recovery from partial failure. 1442 * ========================================================================== 1443 */ 1444 1445 static zio_t * 1446 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1447 { 1448 if (gn != NULL) 1449 return (pio); 1450 1451 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1452 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1453 &pio->io_bookmark)); 1454 } 1455 1456 zio_t * 1457 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1458 { 1459 zio_t *zio; 1460 1461 if (gn != NULL) { 1462 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1463 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1464 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1465 /* 1466 * As we rewrite each gang header, the pipeline will compute 1467 * a new gang block header checksum for it; but no one will 1468 * compute a new data checksum, so we do that here. The one 1469 * exception is the gang leader: the pipeline already computed 1470 * its data checksum because that stage precedes gang assembly. 1471 * (Presently, nothing actually uses interior data checksums; 1472 * this is just good hygiene.) 1473 */ 1474 if (gn != pio->io_gang_leader->io_gang_tree) { 1475 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1476 data, BP_GET_PSIZE(bp)); 1477 } 1478 /* 1479 * If we are here to damage data for testing purposes, 1480 * leave the GBH alone so that we can detect the damage. 1481 */ 1482 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1483 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1484 } else { 1485 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1486 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1487 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1488 } 1489 1490 return (zio); 1491 } 1492 1493 /* ARGSUSED */ 1494 zio_t * 1495 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1496 { 1497 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1498 ZIO_GANG_CHILD_FLAGS(pio))); 1499 } 1500 1501 /* ARGSUSED */ 1502 zio_t * 1503 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1504 { 1505 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1506 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1507 } 1508 1509 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1510 NULL, 1511 zio_read_gang, 1512 zio_rewrite_gang, 1513 zio_free_gang, 1514 zio_claim_gang, 1515 NULL 1516 }; 1517 1518 static void zio_gang_tree_assemble_done(zio_t *zio); 1519 1520 static zio_gang_node_t * 1521 zio_gang_node_alloc(zio_gang_node_t **gnpp) 1522 { 1523 zio_gang_node_t *gn; 1524 1525 ASSERT(*gnpp == NULL); 1526 1527 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1528 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1529 *gnpp = gn; 1530 1531 return (gn); 1532 } 1533 1534 static void 1535 zio_gang_node_free(zio_gang_node_t **gnpp) 1536 { 1537 zio_gang_node_t *gn = *gnpp; 1538 1539 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1540 ASSERT(gn->gn_child[g] == NULL); 1541 1542 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1543 kmem_free(gn, sizeof (*gn)); 1544 *gnpp = NULL; 1545 } 1546 1547 static void 1548 zio_gang_tree_free(zio_gang_node_t **gnpp) 1549 { 1550 zio_gang_node_t *gn = *gnpp; 1551 1552 if (gn == NULL) 1553 return; 1554 1555 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1556 zio_gang_tree_free(&gn->gn_child[g]); 1557 1558 zio_gang_node_free(gnpp); 1559 } 1560 1561 static void 1562 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1563 { 1564 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1565 1566 ASSERT(gio->io_gang_leader == gio); 1567 ASSERT(BP_IS_GANG(bp)); 1568 1569 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1570 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1571 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1572 } 1573 1574 static void 1575 zio_gang_tree_assemble_done(zio_t *zio) 1576 { 1577 zio_t *gio = zio->io_gang_leader; 1578 zio_gang_node_t *gn = zio->io_private; 1579 blkptr_t *bp = zio->io_bp; 1580 1581 ASSERT(gio == zio_unique_parent(zio)); 1582 ASSERT(zio->io_child_count == 0); 1583 1584 if (zio->io_error) 1585 return; 1586 1587 if (BP_SHOULD_BYTESWAP(bp)) 1588 byteswap_uint64_array(zio->io_data, zio->io_size); 1589 1590 ASSERT(zio->io_data == gn->gn_gbh); 1591 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1592 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1593 1594 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1595 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1596 if (!BP_IS_GANG(gbp)) 1597 continue; 1598 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1599 } 1600 } 1601 1602 static void 1603 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1604 { 1605 zio_t *gio = pio->io_gang_leader; 1606 zio_t *zio; 1607 1608 ASSERT(BP_IS_GANG(bp) == !!gn); 1609 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1610 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1611 1612 /* 1613 * If you're a gang header, your data is in gn->gn_gbh. 1614 * If you're a gang member, your data is in 'data' and gn == NULL. 1615 */ 1616 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1617 1618 if (gn != NULL) { 1619 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1620 1621 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1622 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1623 if (BP_IS_HOLE(gbp)) 1624 continue; 1625 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1626 data = (char *)data + BP_GET_PSIZE(gbp); 1627 } 1628 } 1629 1630 if (gn == gio->io_gang_tree) 1631 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1632 1633 if (zio != pio) 1634 zio_nowait(zio); 1635 } 1636 1637 static int 1638 zio_gang_assemble(zio_t *zio) 1639 { 1640 blkptr_t *bp = zio->io_bp; 1641 1642 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1643 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1644 1645 zio->io_gang_leader = zio; 1646 1647 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1648 1649 return (ZIO_PIPELINE_CONTINUE); 1650 } 1651 1652 static int 1653 zio_gang_issue(zio_t *zio) 1654 { 1655 blkptr_t *bp = zio->io_bp; 1656 1657 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1658 return (ZIO_PIPELINE_STOP); 1659 1660 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1661 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1662 1663 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1664 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1665 else 1666 zio_gang_tree_free(&zio->io_gang_tree); 1667 1668 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1669 1670 return (ZIO_PIPELINE_CONTINUE); 1671 } 1672 1673 static void 1674 zio_write_gang_member_ready(zio_t *zio) 1675 { 1676 zio_t *pio = zio_unique_parent(zio); 1677 zio_t *gio = zio->io_gang_leader; 1678 dva_t *cdva = zio->io_bp->blk_dva; 1679 dva_t *pdva = pio->io_bp->blk_dva; 1680 uint64_t asize; 1681 1682 if (BP_IS_HOLE(zio->io_bp)) 1683 return; 1684 1685 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1686 1687 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1688 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1689 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1690 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1691 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1692 1693 mutex_enter(&pio->io_lock); 1694 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1695 ASSERT(DVA_GET_GANG(&pdva[d])); 1696 asize = DVA_GET_ASIZE(&pdva[d]); 1697 asize += DVA_GET_ASIZE(&cdva[d]); 1698 DVA_SET_ASIZE(&pdva[d], asize); 1699 } 1700 mutex_exit(&pio->io_lock); 1701 } 1702 1703 static int 1704 zio_write_gang_block(zio_t *pio) 1705 { 1706 spa_t *spa = pio->io_spa; 1707 blkptr_t *bp = pio->io_bp; 1708 zio_t *gio = pio->io_gang_leader; 1709 zio_t *zio; 1710 zio_gang_node_t *gn, **gnpp; 1711 zio_gbh_phys_t *gbh; 1712 uint64_t txg = pio->io_txg; 1713 uint64_t resid = pio->io_size; 1714 uint64_t lsize; 1715 int copies = gio->io_prop.zp_copies; 1716 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 1717 zio_prop_t zp; 1718 int error; 1719 1720 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 1721 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 1722 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1723 if (error) { 1724 pio->io_error = error; 1725 return (ZIO_PIPELINE_CONTINUE); 1726 } 1727 1728 if (pio == gio) { 1729 gnpp = &gio->io_gang_tree; 1730 } else { 1731 gnpp = pio->io_private; 1732 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1733 } 1734 1735 gn = zio_gang_node_alloc(gnpp); 1736 gbh = gn->gn_gbh; 1737 bzero(gbh, SPA_GANGBLOCKSIZE); 1738 1739 /* 1740 * Create the gang header. 1741 */ 1742 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1743 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1744 1745 /* 1746 * Create and nowait the gang children. 1747 */ 1748 for (int g = 0; resid != 0; resid -= lsize, g++) { 1749 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1750 SPA_MINBLOCKSIZE); 1751 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1752 1753 zp.zp_checksum = gio->io_prop.zp_checksum; 1754 zp.zp_compress = ZIO_COMPRESS_OFF; 1755 zp.zp_type = DMU_OT_NONE; 1756 zp.zp_level = 0; 1757 zp.zp_copies = gio->io_prop.zp_copies; 1758 zp.zp_dedup = 0; 1759 zp.zp_dedup_verify = 0; 1760 1761 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1762 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1763 zio_write_gang_member_ready, NULL, &gn->gn_child[g], 1764 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1765 &pio->io_bookmark)); 1766 } 1767 1768 /* 1769 * Set pio's pipeline to just wait for zio to finish. 1770 */ 1771 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1772 1773 zio_nowait(zio); 1774 1775 return (ZIO_PIPELINE_CONTINUE); 1776 } 1777 1778 /* 1779 * ========================================================================== 1780 * Dedup 1781 * ========================================================================== 1782 */ 1783 static void 1784 zio_ddt_child_read_done(zio_t *zio) 1785 { 1786 blkptr_t *bp = zio->io_bp; 1787 ddt_entry_t *dde = zio->io_private; 1788 ddt_phys_t *ddp; 1789 zio_t *pio = zio_unique_parent(zio); 1790 1791 mutex_enter(&pio->io_lock); 1792 ddp = ddt_phys_select(dde, bp); 1793 if (zio->io_error == 0) 1794 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 1795 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 1796 dde->dde_repair_data = zio->io_data; 1797 else 1798 zio_buf_free(zio->io_data, zio->io_size); 1799 mutex_exit(&pio->io_lock); 1800 } 1801 1802 static int 1803 zio_ddt_read_start(zio_t *zio) 1804 { 1805 blkptr_t *bp = zio->io_bp; 1806 1807 ASSERT(BP_GET_DEDUP(bp)); 1808 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1809 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1810 1811 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1812 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1813 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 1814 ddt_phys_t *ddp = dde->dde_phys; 1815 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 1816 blkptr_t blk; 1817 1818 ASSERT(zio->io_vsd == NULL); 1819 zio->io_vsd = dde; 1820 1821 if (ddp_self == NULL) 1822 return (ZIO_PIPELINE_CONTINUE); 1823 1824 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1825 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 1826 continue; 1827 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 1828 &blk); 1829 zio_nowait(zio_read(zio, zio->io_spa, &blk, 1830 zio_buf_alloc(zio->io_size), zio->io_size, 1831 zio_ddt_child_read_done, dde, zio->io_priority, 1832 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 1833 &zio->io_bookmark)); 1834 } 1835 return (ZIO_PIPELINE_CONTINUE); 1836 } 1837 1838 zio_nowait(zio_read(zio, zio->io_spa, bp, 1839 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 1840 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 1841 1842 return (ZIO_PIPELINE_CONTINUE); 1843 } 1844 1845 static int 1846 zio_ddt_read_done(zio_t *zio) 1847 { 1848 blkptr_t *bp = zio->io_bp; 1849 1850 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 1851 return (ZIO_PIPELINE_STOP); 1852 1853 ASSERT(BP_GET_DEDUP(bp)); 1854 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1855 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1856 1857 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1858 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1859 ddt_entry_t *dde = zio->io_vsd; 1860 if (ddt == NULL) { 1861 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 1862 return (ZIO_PIPELINE_CONTINUE); 1863 } 1864 if (dde == NULL) { 1865 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 1866 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1867 return (ZIO_PIPELINE_STOP); 1868 } 1869 if (dde->dde_repair_data != NULL) { 1870 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 1871 zio->io_child_error[ZIO_CHILD_DDT] = 0; 1872 } 1873 ddt_repair_done(ddt, dde); 1874 zio->io_vsd = NULL; 1875 } 1876 1877 ASSERT(zio->io_vsd == NULL); 1878 1879 return (ZIO_PIPELINE_CONTINUE); 1880 } 1881 1882 static boolean_t 1883 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 1884 { 1885 spa_t *spa = zio->io_spa; 1886 1887 /* 1888 * Note: we compare the original data, not the transformed data, 1889 * because when zio->io_bp is an override bp, we will not have 1890 * pushed the I/O transforms. That's an important optimization 1891 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 1892 */ 1893 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 1894 zio_t *lio = dde->dde_lead_zio[p]; 1895 1896 if (lio != NULL) { 1897 return (lio->io_orig_size != zio->io_orig_size || 1898 bcmp(zio->io_orig_data, lio->io_orig_data, 1899 zio->io_orig_size) != 0); 1900 } 1901 } 1902 1903 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 1904 ddt_phys_t *ddp = &dde->dde_phys[p]; 1905 1906 if (ddp->ddp_phys_birth != 0) { 1907 arc_buf_t *abuf = NULL; 1908 uint32_t aflags = ARC_WAIT; 1909 blkptr_t blk = *zio->io_bp; 1910 int error; 1911 1912 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 1913 1914 ddt_exit(ddt); 1915 1916 error = arc_read_nolock(NULL, spa, &blk, 1917 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 1918 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1919 &aflags, &zio->io_bookmark); 1920 1921 if (error == 0) { 1922 if (arc_buf_size(abuf) != zio->io_orig_size || 1923 bcmp(abuf->b_data, zio->io_orig_data, 1924 zio->io_orig_size) != 0) 1925 error = EEXIST; 1926 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); 1927 } 1928 1929 ddt_enter(ddt); 1930 return (error != 0); 1931 } 1932 } 1933 1934 return (B_FALSE); 1935 } 1936 1937 static void 1938 zio_ddt_child_write_ready(zio_t *zio) 1939 { 1940 int p = zio->io_prop.zp_copies; 1941 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 1942 ddt_entry_t *dde = zio->io_private; 1943 ddt_phys_t *ddp = &dde->dde_phys[p]; 1944 zio_t *pio; 1945 1946 if (zio->io_error) 1947 return; 1948 1949 ddt_enter(ddt); 1950 1951 ASSERT(dde->dde_lead_zio[p] == zio); 1952 1953 ddt_phys_fill(ddp, zio->io_bp); 1954 1955 while ((pio = zio_walk_parents(zio)) != NULL) 1956 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 1957 1958 ddt_exit(ddt); 1959 } 1960 1961 static void 1962 zio_ddt_child_write_done(zio_t *zio) 1963 { 1964 int p = zio->io_prop.zp_copies; 1965 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 1966 ddt_entry_t *dde = zio->io_private; 1967 ddt_phys_t *ddp = &dde->dde_phys[p]; 1968 1969 ddt_enter(ddt); 1970 1971 ASSERT(ddp->ddp_refcnt == 0); 1972 ASSERT(dde->dde_lead_zio[p] == zio); 1973 dde->dde_lead_zio[p] = NULL; 1974 1975 if (zio->io_error == 0) { 1976 while (zio_walk_parents(zio) != NULL) 1977 ddt_phys_addref(ddp); 1978 } else { 1979 ddt_phys_clear(ddp); 1980 } 1981 1982 ddt_exit(ddt); 1983 } 1984 1985 static void 1986 zio_ddt_ditto_write_done(zio_t *zio) 1987 { 1988 int p = DDT_PHYS_DITTO; 1989 zio_prop_t *zp = &zio->io_prop; 1990 blkptr_t *bp = zio->io_bp; 1991 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1992 ddt_entry_t *dde = zio->io_private; 1993 ddt_phys_t *ddp = &dde->dde_phys[p]; 1994 ddt_key_t *ddk = &dde->dde_key; 1995 1996 ddt_enter(ddt); 1997 1998 ASSERT(ddp->ddp_refcnt == 0); 1999 ASSERT(dde->dde_lead_zio[p] == zio); 2000 dde->dde_lead_zio[p] = NULL; 2001 2002 if (zio->io_error == 0) { 2003 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2004 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2005 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2006 if (ddp->ddp_phys_birth != 0) 2007 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2008 ddt_phys_fill(ddp, bp); 2009 } 2010 2011 ddt_exit(ddt); 2012 } 2013 2014 static int 2015 zio_ddt_write(zio_t *zio) 2016 { 2017 spa_t *spa = zio->io_spa; 2018 blkptr_t *bp = zio->io_bp; 2019 uint64_t txg = zio->io_txg; 2020 zio_prop_t *zp = &zio->io_prop; 2021 int p = zp->zp_copies; 2022 int ditto_copies; 2023 zio_t *cio = NULL; 2024 zio_t *dio = NULL; 2025 ddt_t *ddt = ddt_select(spa, bp); 2026 ddt_entry_t *dde; 2027 ddt_phys_t *ddp; 2028 2029 ASSERT(BP_GET_DEDUP(bp)); 2030 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2031 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2032 2033 ddt_enter(ddt); 2034 dde = ddt_lookup(ddt, bp, B_TRUE); 2035 ddp = &dde->dde_phys[p]; 2036 2037 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2038 /* 2039 * If we're using a weak checksum, upgrade to a strong checksum 2040 * and try again. If we're already using a strong checksum, 2041 * we can't resolve it, so just convert to an ordinary write. 2042 * (And automatically e-mail a paper to Nature?) 2043 */ 2044 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2045 zp->zp_checksum = spa_dedup_checksum(spa); 2046 zio_pop_transforms(zio); 2047 zio->io_stage = ZIO_STAGE_OPEN; 2048 BP_ZERO(bp); 2049 } else { 2050 zp->zp_dedup = 0; 2051 } 2052 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2053 ddt_exit(ddt); 2054 return (ZIO_PIPELINE_CONTINUE); 2055 } 2056 2057 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2058 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2059 2060 if (ditto_copies > ddt_ditto_copies_present(dde) && 2061 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2062 zio_prop_t czp = *zp; 2063 2064 czp.zp_copies = ditto_copies; 2065 2066 /* 2067 * If we arrived here with an override bp, we won't have run 2068 * the transform stack, so we won't have the data we need to 2069 * generate a child i/o. So, toss the override bp and restart. 2070 * This is safe, because using the override bp is just an 2071 * optimization; and it's rare, so the cost doesn't matter. 2072 */ 2073 if (zio->io_bp_override) { 2074 zio_pop_transforms(zio); 2075 zio->io_stage = ZIO_STAGE_OPEN; 2076 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2077 zio->io_bp_override = NULL; 2078 BP_ZERO(bp); 2079 ddt_exit(ddt); 2080 return (ZIO_PIPELINE_CONTINUE); 2081 } 2082 2083 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2084 zio->io_orig_size, &czp, NULL, 2085 zio_ddt_ditto_write_done, dde, zio->io_priority, 2086 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2087 2088 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2089 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2090 } 2091 2092 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2093 if (ddp->ddp_phys_birth != 0) 2094 ddt_bp_fill(ddp, bp, txg); 2095 if (dde->dde_lead_zio[p] != NULL) 2096 zio_add_child(zio, dde->dde_lead_zio[p]); 2097 else 2098 ddt_phys_addref(ddp); 2099 } else if (zio->io_bp_override) { 2100 ASSERT(bp->blk_birth == txg); 2101 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2102 ddt_phys_fill(ddp, bp); 2103 ddt_phys_addref(ddp); 2104 } else { 2105 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2106 zio->io_orig_size, zp, zio_ddt_child_write_ready, 2107 zio_ddt_child_write_done, dde, zio->io_priority, 2108 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2109 2110 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2111 dde->dde_lead_zio[p] = cio; 2112 } 2113 2114 ddt_exit(ddt); 2115 2116 if (cio) 2117 zio_nowait(cio); 2118 if (dio) 2119 zio_nowait(dio); 2120 2121 return (ZIO_PIPELINE_CONTINUE); 2122 } 2123 2124 ddt_entry_t *freedde; /* for debugging */ 2125 2126 static int 2127 zio_ddt_free(zio_t *zio) 2128 { 2129 spa_t *spa = zio->io_spa; 2130 blkptr_t *bp = zio->io_bp; 2131 ddt_t *ddt = ddt_select(spa, bp); 2132 ddt_entry_t *dde; 2133 ddt_phys_t *ddp; 2134 2135 ASSERT(BP_GET_DEDUP(bp)); 2136 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2137 2138 ddt_enter(ddt); 2139 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2140 ddp = ddt_phys_select(dde, bp); 2141 ddt_phys_decref(ddp); 2142 ddt_exit(ddt); 2143 2144 return (ZIO_PIPELINE_CONTINUE); 2145 } 2146 2147 /* 2148 * ========================================================================== 2149 * Allocate and free blocks 2150 * ========================================================================== 2151 */ 2152 static int 2153 zio_dva_allocate(zio_t *zio) 2154 { 2155 spa_t *spa = zio->io_spa; 2156 metaslab_class_t *mc = spa_normal_class(spa); 2157 blkptr_t *bp = zio->io_bp; 2158 int error; 2159 int flags = 0; 2160 2161 if (zio->io_gang_leader == NULL) { 2162 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2163 zio->io_gang_leader = zio; 2164 } 2165 2166 ASSERT(BP_IS_HOLE(bp)); 2167 ASSERT0(BP_GET_NDVAS(bp)); 2168 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2169 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2170 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2171 2172 /* 2173 * The dump device does not support gang blocks so allocation on 2174 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid 2175 * the "fast" gang feature. 2176 */ 2177 flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; 2178 flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? 2179 METASLAB_GANG_CHILD : 0; 2180 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2181 zio->io_prop.zp_copies, zio->io_txg, NULL, flags); 2182 2183 if (error) { 2184 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2185 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2186 error); 2187 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2188 return (zio_write_gang_block(zio)); 2189 zio->io_error = error; 2190 } 2191 2192 return (ZIO_PIPELINE_CONTINUE); 2193 } 2194 2195 static int 2196 zio_dva_free(zio_t *zio) 2197 { 2198 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2199 2200 return (ZIO_PIPELINE_CONTINUE); 2201 } 2202 2203 static int 2204 zio_dva_claim(zio_t *zio) 2205 { 2206 int error; 2207 2208 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2209 if (error) 2210 zio->io_error = error; 2211 2212 return (ZIO_PIPELINE_CONTINUE); 2213 } 2214 2215 /* 2216 * Undo an allocation. This is used by zio_done() when an I/O fails 2217 * and we want to give back the block we just allocated. 2218 * This handles both normal blocks and gang blocks. 2219 */ 2220 static void 2221 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2222 { 2223 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2224 ASSERT(zio->io_bp_override == NULL); 2225 2226 if (!BP_IS_HOLE(bp)) 2227 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2228 2229 if (gn != NULL) { 2230 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2231 zio_dva_unallocate(zio, gn->gn_child[g], 2232 &gn->gn_gbh->zg_blkptr[g]); 2233 } 2234 } 2235 } 2236 2237 /* 2238 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2239 */ 2240 int 2241 zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2242 uint64_t size, boolean_t use_slog) 2243 { 2244 int error = 1; 2245 2246 ASSERT(txg > spa_syncing_txg(spa)); 2247 2248 /* 2249 * ZIL blocks are always contiguous (i.e. not gang blocks) so we 2250 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" 2251 * when allocating them. 2252 */ 2253 if (use_slog) { 2254 error = metaslab_alloc(spa, spa_log_class(spa), size, 2255 new_bp, 1, txg, old_bp, 2256 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2257 } 2258 2259 if (error) { 2260 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2261 new_bp, 1, txg, old_bp, 2262 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2263 } 2264 2265 if (error == 0) { 2266 BP_SET_LSIZE(new_bp, size); 2267 BP_SET_PSIZE(new_bp, size); 2268 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2269 BP_SET_CHECKSUM(new_bp, 2270 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2271 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2272 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2273 BP_SET_LEVEL(new_bp, 0); 2274 BP_SET_DEDUP(new_bp, 0); 2275 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2276 } 2277 2278 return (error); 2279 } 2280 2281 /* 2282 * Free an intent log block. 2283 */ 2284 void 2285 zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2286 { 2287 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2288 ASSERT(!BP_IS_GANG(bp)); 2289 2290 zio_free(spa, txg, bp); 2291 } 2292 2293 /* 2294 * ========================================================================== 2295 * Read and write to physical devices 2296 * ========================================================================== 2297 */ 2298 static int 2299 zio_vdev_io_start(zio_t *zio) 2300 { 2301 vdev_t *vd = zio->io_vd; 2302 uint64_t align; 2303 spa_t *spa = zio->io_spa; 2304 2305 ASSERT(zio->io_error == 0); 2306 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2307 2308 if (vd == NULL) { 2309 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2310 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2311 2312 /* 2313 * The mirror_ops handle multiple DVAs in a single BP. 2314 */ 2315 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2316 } 2317 2318 /* 2319 * We keep track of time-sensitive I/Os so that the scan thread 2320 * can quickly react to certain workloads. In particular, we care 2321 * about non-scrubbing, top-level reads and writes with the following 2322 * characteristics: 2323 * - synchronous writes of user data to non-slog devices 2324 * - any reads of user data 2325 * When these conditions are met, adjust the timestamp of spa_last_io 2326 * which allows the scan thread to adjust its workload accordingly. 2327 */ 2328 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2329 vd == vd->vdev_top && !vd->vdev_islog && 2330 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2331 zio->io_txg != spa_syncing_txg(spa)) { 2332 uint64_t old = spa->spa_last_io; 2333 uint64_t new = ddi_get_lbolt64(); 2334 if (old != new) 2335 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2336 } 2337 2338 align = 1ULL << vd->vdev_top->vdev_ashift; 2339 2340 if (P2PHASE(zio->io_size, align) != 0) { 2341 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2342 char *abuf = zio_buf_alloc(asize); 2343 ASSERT(vd == vd->vdev_top); 2344 if (zio->io_type == ZIO_TYPE_WRITE) { 2345 bcopy(zio->io_data, abuf, zio->io_size); 2346 bzero(abuf + zio->io_size, asize - zio->io_size); 2347 } 2348 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 2349 } 2350 2351 ASSERT(P2PHASE(zio->io_offset, align) == 0); 2352 ASSERT(P2PHASE(zio->io_size, align) == 0); 2353 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 2354 2355 /* 2356 * If this is a repair I/O, and there's no self-healing involved -- 2357 * that is, we're just resilvering what we expect to resilver -- 2358 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2359 * This prevents spurious resilvering with nested replication. 2360 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2361 * A is out of date, we'll read from C+D, then use the data to 2362 * resilver A+B -- but we don't actually want to resilver B, just A. 2363 * The top-level mirror has no way to know this, so instead we just 2364 * discard unnecessary repairs as we work our way down the vdev tree. 2365 * The same logic applies to any form of nested replication: 2366 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2367 */ 2368 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2369 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2370 zio->io_txg != 0 && /* not a delegated i/o */ 2371 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2372 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2373 zio_vdev_io_bypass(zio); 2374 return (ZIO_PIPELINE_CONTINUE); 2375 } 2376 2377 if (vd->vdev_ops->vdev_op_leaf && 2378 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 2379 2380 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) 2381 return (ZIO_PIPELINE_CONTINUE); 2382 2383 if ((zio = vdev_queue_io(zio)) == NULL) 2384 return (ZIO_PIPELINE_STOP); 2385 2386 if (!vdev_accessible(vd, zio)) { 2387 zio->io_error = ENXIO; 2388 zio_interrupt(zio); 2389 return (ZIO_PIPELINE_STOP); 2390 } 2391 } 2392 2393 return (vd->vdev_ops->vdev_op_io_start(zio)); 2394 } 2395 2396 static int 2397 zio_vdev_io_done(zio_t *zio) 2398 { 2399 vdev_t *vd = zio->io_vd; 2400 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2401 boolean_t unexpected_error = B_FALSE; 2402 2403 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2404 return (ZIO_PIPELINE_STOP); 2405 2406 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 2407 2408 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 2409 2410 vdev_queue_io_done(zio); 2411 2412 if (zio->io_type == ZIO_TYPE_WRITE) 2413 vdev_cache_write(zio); 2414 2415 if (zio_injection_enabled && zio->io_error == 0) 2416 zio->io_error = zio_handle_device_injection(vd, 2417 zio, EIO); 2418 2419 if (zio_injection_enabled && zio->io_error == 0) 2420 zio->io_error = zio_handle_label_injection(zio, EIO); 2421 2422 if (zio->io_error) { 2423 if (!vdev_accessible(vd, zio)) { 2424 zio->io_error = ENXIO; 2425 } else { 2426 unexpected_error = B_TRUE; 2427 } 2428 } 2429 } 2430 2431 ops->vdev_op_io_done(zio); 2432 2433 if (unexpected_error) 2434 VERIFY(vdev_probe(vd, zio) == NULL); 2435 2436 return (ZIO_PIPELINE_CONTINUE); 2437 } 2438 2439 /* 2440 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2441 * disk, and use that to finish the checksum ereport later. 2442 */ 2443 static void 2444 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2445 const void *good_buf) 2446 { 2447 /* no processing needed */ 2448 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2449 } 2450 2451 /*ARGSUSED*/ 2452 void 2453 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2454 { 2455 void *buf = zio_buf_alloc(zio->io_size); 2456 2457 bcopy(zio->io_data, buf, zio->io_size); 2458 2459 zcr->zcr_cbinfo = zio->io_size; 2460 zcr->zcr_cbdata = buf; 2461 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2462 zcr->zcr_free = zio_buf_free; 2463 } 2464 2465 static int 2466 zio_vdev_io_assess(zio_t *zio) 2467 { 2468 vdev_t *vd = zio->io_vd; 2469 2470 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2471 return (ZIO_PIPELINE_STOP); 2472 2473 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2474 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2475 2476 if (zio->io_vsd != NULL) { 2477 zio->io_vsd_ops->vsd_free(zio); 2478 zio->io_vsd = NULL; 2479 } 2480 2481 if (zio_injection_enabled && zio->io_error == 0) 2482 zio->io_error = zio_handle_fault_injection(zio, EIO); 2483 2484 /* 2485 * If the I/O failed, determine whether we should attempt to retry it. 2486 * 2487 * On retry, we cut in line in the issue queue, since we don't want 2488 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2489 */ 2490 if (zio->io_error && vd == NULL && 2491 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2492 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2493 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2494 zio->io_error = 0; 2495 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2496 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2497 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2498 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2499 zio_requeue_io_start_cut_in_line); 2500 return (ZIO_PIPELINE_STOP); 2501 } 2502 2503 /* 2504 * If we got an error on a leaf device, convert it to ENXIO 2505 * if the device is not accessible at all. 2506 */ 2507 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2508 !vdev_accessible(vd, zio)) 2509 zio->io_error = ENXIO; 2510 2511 /* 2512 * If we can't write to an interior vdev (mirror or RAID-Z), 2513 * set vdev_cant_write so that we stop trying to allocate from it. 2514 */ 2515 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2516 vd != NULL && !vd->vdev_ops->vdev_op_leaf) 2517 vd->vdev_cant_write = B_TRUE; 2518 2519 if (zio->io_error) 2520 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2521 2522 return (ZIO_PIPELINE_CONTINUE); 2523 } 2524 2525 void 2526 zio_vdev_io_reissue(zio_t *zio) 2527 { 2528 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2529 ASSERT(zio->io_error == 0); 2530 2531 zio->io_stage >>= 1; 2532 } 2533 2534 void 2535 zio_vdev_io_redone(zio_t *zio) 2536 { 2537 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2538 2539 zio->io_stage >>= 1; 2540 } 2541 2542 void 2543 zio_vdev_io_bypass(zio_t *zio) 2544 { 2545 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2546 ASSERT(zio->io_error == 0); 2547 2548 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2549 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2550 } 2551 2552 /* 2553 * ========================================================================== 2554 * Generate and verify checksums 2555 * ========================================================================== 2556 */ 2557 static int 2558 zio_checksum_generate(zio_t *zio) 2559 { 2560 blkptr_t *bp = zio->io_bp; 2561 enum zio_checksum checksum; 2562 2563 if (bp == NULL) { 2564 /* 2565 * This is zio_write_phys(). 2566 * We're either generating a label checksum, or none at all. 2567 */ 2568 checksum = zio->io_prop.zp_checksum; 2569 2570 if (checksum == ZIO_CHECKSUM_OFF) 2571 return (ZIO_PIPELINE_CONTINUE); 2572 2573 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2574 } else { 2575 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2576 ASSERT(!IO_IS_ALLOCATING(zio)); 2577 checksum = ZIO_CHECKSUM_GANG_HEADER; 2578 } else { 2579 checksum = BP_GET_CHECKSUM(bp); 2580 } 2581 } 2582 2583 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2584 2585 return (ZIO_PIPELINE_CONTINUE); 2586 } 2587 2588 static int 2589 zio_checksum_verify(zio_t *zio) 2590 { 2591 zio_bad_cksum_t info; 2592 blkptr_t *bp = zio->io_bp; 2593 int error; 2594 2595 ASSERT(zio->io_vd != NULL); 2596 2597 if (bp == NULL) { 2598 /* 2599 * This is zio_read_phys(). 2600 * We're either verifying a label checksum, or nothing at all. 2601 */ 2602 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2603 return (ZIO_PIPELINE_CONTINUE); 2604 2605 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2606 } 2607 2608 if ((error = zio_checksum_error(zio, &info)) != 0) { 2609 zio->io_error = error; 2610 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2611 zfs_ereport_start_checksum(zio->io_spa, 2612 zio->io_vd, zio, zio->io_offset, 2613 zio->io_size, NULL, &info); 2614 } 2615 } 2616 2617 return (ZIO_PIPELINE_CONTINUE); 2618 } 2619 2620 /* 2621 * Called by RAID-Z to ensure we don't compute the checksum twice. 2622 */ 2623 void 2624 zio_checksum_verified(zio_t *zio) 2625 { 2626 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 2627 } 2628 2629 /* 2630 * ========================================================================== 2631 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2632 * An error of 0 indictes success. ENXIO indicates whole-device failure, 2633 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2634 * indicate errors that are specific to one I/O, and most likely permanent. 2635 * Any other error is presumed to be worse because we weren't expecting it. 2636 * ========================================================================== 2637 */ 2638 int 2639 zio_worst_error(int e1, int e2) 2640 { 2641 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2642 int r1, r2; 2643 2644 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2645 if (e1 == zio_error_rank[r1]) 2646 break; 2647 2648 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2649 if (e2 == zio_error_rank[r2]) 2650 break; 2651 2652 return (r1 > r2 ? e1 : e2); 2653 } 2654 2655 /* 2656 * ========================================================================== 2657 * I/O completion 2658 * ========================================================================== 2659 */ 2660 static int 2661 zio_ready(zio_t *zio) 2662 { 2663 blkptr_t *bp = zio->io_bp; 2664 zio_t *pio, *pio_next; 2665 2666 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 2667 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 2668 return (ZIO_PIPELINE_STOP); 2669 2670 if (zio->io_ready) { 2671 ASSERT(IO_IS_ALLOCATING(zio)); 2672 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2673 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 2674 2675 zio->io_ready(zio); 2676 } 2677 2678 if (bp != NULL && bp != &zio->io_bp_copy) 2679 zio->io_bp_copy = *bp; 2680 2681 if (zio->io_error) 2682 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2683 2684 mutex_enter(&zio->io_lock); 2685 zio->io_state[ZIO_WAIT_READY] = 1; 2686 pio = zio_walk_parents(zio); 2687 mutex_exit(&zio->io_lock); 2688 2689 /* 2690 * As we notify zio's parents, new parents could be added. 2691 * New parents go to the head of zio's io_parent_list, however, 2692 * so we will (correctly) not notify them. The remainder of zio's 2693 * io_parent_list, from 'pio_next' onward, cannot change because 2694 * all parents must wait for us to be done before they can be done. 2695 */ 2696 for (; pio != NULL; pio = pio_next) { 2697 pio_next = zio_walk_parents(zio); 2698 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 2699 } 2700 2701 if (zio->io_flags & ZIO_FLAG_NODATA) { 2702 if (BP_IS_GANG(bp)) { 2703 zio->io_flags &= ~ZIO_FLAG_NODATA; 2704 } else { 2705 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 2706 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2707 } 2708 } 2709 2710 if (zio_injection_enabled && 2711 zio->io_spa->spa_syncing_txg == zio->io_txg) 2712 zio_handle_ignored_writes(zio); 2713 2714 return (ZIO_PIPELINE_CONTINUE); 2715 } 2716 2717 static int 2718 zio_done(zio_t *zio) 2719 { 2720 spa_t *spa = zio->io_spa; 2721 zio_t *lio = zio->io_logical; 2722 blkptr_t *bp = zio->io_bp; 2723 vdev_t *vd = zio->io_vd; 2724 uint64_t psize = zio->io_size; 2725 zio_t *pio, *pio_next; 2726 2727 /* 2728 * If our children haven't all completed, 2729 * wait for them and then repeat this pipeline stage. 2730 */ 2731 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 2732 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 2733 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 2734 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 2735 return (ZIO_PIPELINE_STOP); 2736 2737 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2738 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2739 ASSERT(zio->io_children[c][w] == 0); 2740 2741 if (bp != NULL) { 2742 ASSERT(bp->blk_pad[0] == 0); 2743 ASSERT(bp->blk_pad[1] == 0); 2744 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 2745 (bp == zio_unique_parent(zio)->io_bp)); 2746 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 2747 zio->io_bp_override == NULL && 2748 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 2749 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 2750 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 2751 ASSERT(BP_COUNT_GANG(bp) == 0 || 2752 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 2753 } 2754 } 2755 2756 /* 2757 * If there were child vdev/gang/ddt errors, they apply to us now. 2758 */ 2759 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 2760 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 2761 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 2762 2763 /* 2764 * If the I/O on the transformed data was successful, generate any 2765 * checksum reports now while we still have the transformed data. 2766 */ 2767 if (zio->io_error == 0) { 2768 while (zio->io_cksum_report != NULL) { 2769 zio_cksum_report_t *zcr = zio->io_cksum_report; 2770 uint64_t align = zcr->zcr_align; 2771 uint64_t asize = P2ROUNDUP(psize, align); 2772 char *abuf = zio->io_data; 2773 2774 if (asize != psize) { 2775 abuf = zio_buf_alloc(asize); 2776 bcopy(zio->io_data, abuf, psize); 2777 bzero(abuf + psize, asize - psize); 2778 } 2779 2780 zio->io_cksum_report = zcr->zcr_next; 2781 zcr->zcr_next = NULL; 2782 zcr->zcr_finish(zcr, abuf); 2783 zfs_ereport_free_checksum(zcr); 2784 2785 if (asize != psize) 2786 zio_buf_free(abuf, asize); 2787 } 2788 } 2789 2790 zio_pop_transforms(zio); /* note: may set zio->io_error */ 2791 2792 vdev_stat_update(zio, psize); 2793 2794 if (zio->io_error) { 2795 /* 2796 * If this I/O is attached to a particular vdev, 2797 * generate an error message describing the I/O failure 2798 * at the block level. We ignore these errors if the 2799 * device is currently unavailable. 2800 */ 2801 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 2802 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 2803 2804 if ((zio->io_error == EIO || !(zio->io_flags & 2805 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 2806 zio == lio) { 2807 /* 2808 * For logical I/O requests, tell the SPA to log the 2809 * error and generate a logical data ereport. 2810 */ 2811 spa_log_error(spa, zio); 2812 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 2813 0, 0); 2814 } 2815 } 2816 2817 if (zio->io_error && zio == lio) { 2818 /* 2819 * Determine whether zio should be reexecuted. This will 2820 * propagate all the way to the root via zio_notify_parent(). 2821 */ 2822 ASSERT(vd == NULL && bp != NULL); 2823 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2824 2825 if (IO_IS_ALLOCATING(zio) && 2826 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 2827 if (zio->io_error != ENOSPC) 2828 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 2829 else 2830 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2831 } 2832 2833 if ((zio->io_type == ZIO_TYPE_READ || 2834 zio->io_type == ZIO_TYPE_FREE) && 2835 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 2836 zio->io_error == ENXIO && 2837 spa_load_state(spa) == SPA_LOAD_NONE && 2838 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 2839 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2840 2841 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 2842 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2843 2844 /* 2845 * Here is a possibly good place to attempt to do 2846 * either combinatorial reconstruction or error correction 2847 * based on checksums. It also might be a good place 2848 * to send out preliminary ereports before we suspend 2849 * processing. 2850 */ 2851 } 2852 2853 /* 2854 * If there were logical child errors, they apply to us now. 2855 * We defer this until now to avoid conflating logical child 2856 * errors with errors that happened to the zio itself when 2857 * updating vdev stats and reporting FMA events above. 2858 */ 2859 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 2860 2861 if ((zio->io_error || zio->io_reexecute) && 2862 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 2863 !(zio->io_flags & ZIO_FLAG_IO_REWRITE)) 2864 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 2865 2866 zio_gang_tree_free(&zio->io_gang_tree); 2867 2868 /* 2869 * Godfather I/Os should never suspend. 2870 */ 2871 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 2872 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 2873 zio->io_reexecute = 0; 2874 2875 if (zio->io_reexecute) { 2876 /* 2877 * This is a logical I/O that wants to reexecute. 2878 * 2879 * Reexecute is top-down. When an i/o fails, if it's not 2880 * the root, it simply notifies its parent and sticks around. 2881 * The parent, seeing that it still has children in zio_done(), 2882 * does the same. This percolates all the way up to the root. 2883 * The root i/o will reexecute or suspend the entire tree. 2884 * 2885 * This approach ensures that zio_reexecute() honors 2886 * all the original i/o dependency relationships, e.g. 2887 * parents not executing until children are ready. 2888 */ 2889 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2890 2891 zio->io_gang_leader = NULL; 2892 2893 mutex_enter(&zio->io_lock); 2894 zio->io_state[ZIO_WAIT_DONE] = 1; 2895 mutex_exit(&zio->io_lock); 2896 2897 /* 2898 * "The Godfather" I/O monitors its children but is 2899 * not a true parent to them. It will track them through 2900 * the pipeline but severs its ties whenever they get into 2901 * trouble (e.g. suspended). This allows "The Godfather" 2902 * I/O to return status without blocking. 2903 */ 2904 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2905 zio_link_t *zl = zio->io_walk_link; 2906 pio_next = zio_walk_parents(zio); 2907 2908 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 2909 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 2910 zio_remove_child(pio, zio, zl); 2911 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2912 } 2913 } 2914 2915 if ((pio = zio_unique_parent(zio)) != NULL) { 2916 /* 2917 * We're not a root i/o, so there's nothing to do 2918 * but notify our parent. Don't propagate errors 2919 * upward since we haven't permanently failed yet. 2920 */ 2921 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2922 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 2923 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2924 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 2925 /* 2926 * We'd fail again if we reexecuted now, so suspend 2927 * until conditions improve (e.g. device comes online). 2928 */ 2929 zio_suspend(spa, zio); 2930 } else { 2931 /* 2932 * Reexecution is potentially a huge amount of work. 2933 * Hand it off to the otherwise-unused claim taskq. 2934 */ 2935 ASSERT(zio->io_tqent.tqent_next == NULL); 2936 taskq_dispatch_ent( 2937 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 2938 (task_func_t *)zio_reexecute, zio, 0, 2939 &zio->io_tqent); 2940 } 2941 return (ZIO_PIPELINE_STOP); 2942 } 2943 2944 ASSERT(zio->io_child_count == 0); 2945 ASSERT(zio->io_reexecute == 0); 2946 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 2947 2948 /* 2949 * Report any checksum errors, since the I/O is complete. 2950 */ 2951 while (zio->io_cksum_report != NULL) { 2952 zio_cksum_report_t *zcr = zio->io_cksum_report; 2953 zio->io_cksum_report = zcr->zcr_next; 2954 zcr->zcr_next = NULL; 2955 zcr->zcr_finish(zcr, NULL); 2956 zfs_ereport_free_checksum(zcr); 2957 } 2958 2959 /* 2960 * It is the responsibility of the done callback to ensure that this 2961 * particular zio is no longer discoverable for adoption, and as 2962 * such, cannot acquire any new parents. 2963 */ 2964 if (zio->io_done) 2965 zio->io_done(zio); 2966 2967 mutex_enter(&zio->io_lock); 2968 zio->io_state[ZIO_WAIT_DONE] = 1; 2969 mutex_exit(&zio->io_lock); 2970 2971 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2972 zio_link_t *zl = zio->io_walk_link; 2973 pio_next = zio_walk_parents(zio); 2974 zio_remove_child(pio, zio, zl); 2975 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2976 } 2977 2978 if (zio->io_waiter != NULL) { 2979 mutex_enter(&zio->io_lock); 2980 zio->io_executor = NULL; 2981 cv_broadcast(&zio->io_cv); 2982 mutex_exit(&zio->io_lock); 2983 } else { 2984 zio_destroy(zio); 2985 } 2986 2987 return (ZIO_PIPELINE_STOP); 2988 } 2989 2990 /* 2991 * ========================================================================== 2992 * I/O pipeline definition 2993 * ========================================================================== 2994 */ 2995 static zio_pipe_stage_t *zio_pipeline[] = { 2996 NULL, 2997 zio_read_bp_init, 2998 zio_free_bp_init, 2999 zio_issue_async, 3000 zio_write_bp_init, 3001 zio_checksum_generate, 3002 zio_ddt_read_start, 3003 zio_ddt_read_done, 3004 zio_ddt_write, 3005 zio_ddt_free, 3006 zio_gang_assemble, 3007 zio_gang_issue, 3008 zio_dva_allocate, 3009 zio_dva_free, 3010 zio_dva_claim, 3011 zio_ready, 3012 zio_vdev_io_start, 3013 zio_vdev_io_done, 3014 zio_vdev_io_assess, 3015 zio_checksum_verify, 3016 zio_done 3017 }; 3018 3019 /* dnp is the dnode for zb1->zb_object */ 3020 boolean_t 3021 zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_t *zb1, 3022 const zbookmark_t *zb2) 3023 { 3024 uint64_t zb1nextL0, zb2thisobj; 3025 3026 ASSERT(zb1->zb_objset == zb2->zb_objset); 3027 ASSERT(zb2->zb_level == 0); 3028 3029 /* 3030 * A bookmark in the deadlist is considered to be after 3031 * everything else. 3032 */ 3033 if (zb2->zb_object == DMU_DEADLIST_OBJECT) 3034 return (B_TRUE); 3035 3036 /* The objset_phys_t isn't before anything. */ 3037 if (dnp == NULL) 3038 return (B_FALSE); 3039 3040 zb1nextL0 = (zb1->zb_blkid + 1) << 3041 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); 3042 3043 zb2thisobj = zb2->zb_object ? zb2->zb_object : 3044 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); 3045 3046 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3047 uint64_t nextobj = zb1nextL0 * 3048 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; 3049 return (nextobj <= zb2thisobj); 3050 } 3051 3052 if (zb1->zb_object < zb2thisobj) 3053 return (B_TRUE); 3054 if (zb1->zb_object > zb2thisobj) 3055 return (B_FALSE); 3056 if (zb2->zb_object == DMU_META_DNODE_OBJECT) 3057 return (B_FALSE); 3058 return (zb1nextL0 <= zb2->zb_blkid); 3059 } 3060