1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/fm/fs/zfs.h> 29 #include <sys/spa.h> 30 #include <sys/txg.h> 31 #include <sys/spa_impl.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/zio_impl.h> 34 #include <sys/zio_compress.h> 35 #include <sys/zio_checksum.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/arc.h> 38 #include <sys/ddt.h> 39 #include <sys/zfeature.h> 40 41 /* 42 * ========================================================================== 43 * I/O type descriptions 44 * ========================================================================== 45 */ 46 const char *zio_type_name[ZIO_TYPES] = { 47 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 48 "zio_ioctl" 49 }; 50 51 /* 52 * ========================================================================== 53 * I/O kmem caches 54 * ========================================================================== 55 */ 56 kmem_cache_t *zio_cache; 57 kmem_cache_t *zio_link_cache; 58 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 59 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 60 61 #ifdef _KERNEL 62 extern vmem_t *zio_alloc_arena; 63 #endif 64 65 /* 66 * The following actions directly effect the spa's sync-to-convergence logic. 67 * The values below define the sync pass when we start performing the action. 68 * Care should be taken when changing these values as they directly impact 69 * spa_sync() performance. Tuning these values may introduce subtle performance 70 * pathologies and should only be done in the context of performance analysis. 71 * These tunables will eventually be removed and replaced with #defines once 72 * enough analysis has been done to determine optimal values. 73 * 74 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 75 * regular blocks are not deferred. 76 */ 77 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 78 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 79 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 80 81 /* 82 * An allocating zio is one that either currently has the DVA allocate 83 * stage set or will have it later in its lifetime. 84 */ 85 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 86 87 boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 88 89 #ifdef ZFS_DEBUG 90 int zio_buf_debug_limit = 16384; 91 #else 92 int zio_buf_debug_limit = 0; 93 #endif 94 95 void 96 zio_init(void) 97 { 98 size_t c; 99 vmem_t *data_alloc_arena = NULL; 100 101 #ifdef _KERNEL 102 data_alloc_arena = zio_alloc_arena; 103 #endif 104 zio_cache = kmem_cache_create("zio_cache", 105 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 106 zio_link_cache = kmem_cache_create("zio_link_cache", 107 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 108 109 /* 110 * For small buffers, we want a cache for each multiple of 111 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 112 * for each quarter-power of 2. For large buffers, we want 113 * a cache for each multiple of PAGESIZE. 114 */ 115 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 116 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 117 size_t p2 = size; 118 size_t align = 0; 119 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 120 121 while (p2 & (p2 - 1)) 122 p2 &= p2 - 1; 123 124 #ifndef _KERNEL 125 /* 126 * If we are using watchpoints, put each buffer on its own page, 127 * to eliminate the performance overhead of trapping to the 128 * kernel when modifying a non-watched buffer that shares the 129 * page with a watched buffer. 130 */ 131 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 132 continue; 133 #endif 134 if (size <= 4 * SPA_MINBLOCKSIZE) { 135 align = SPA_MINBLOCKSIZE; 136 } else if (IS_P2ALIGNED(size, PAGESIZE)) { 137 align = PAGESIZE; 138 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 139 align = p2 >> 2; 140 } 141 142 if (align != 0) { 143 char name[36]; 144 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 145 zio_buf_cache[c] = kmem_cache_create(name, size, 146 align, NULL, NULL, NULL, NULL, NULL, cflags); 147 148 /* 149 * Since zio_data bufs do not appear in crash dumps, we 150 * pass KMC_NOTOUCH so that no allocator metadata is 151 * stored with the buffers. 152 */ 153 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 154 zio_data_buf_cache[c] = kmem_cache_create(name, size, 155 align, NULL, NULL, NULL, NULL, data_alloc_arena, 156 cflags | KMC_NOTOUCH); 157 } 158 } 159 160 while (--c != 0) { 161 ASSERT(zio_buf_cache[c] != NULL); 162 if (zio_buf_cache[c - 1] == NULL) 163 zio_buf_cache[c - 1] = zio_buf_cache[c]; 164 165 ASSERT(zio_data_buf_cache[c] != NULL); 166 if (zio_data_buf_cache[c - 1] == NULL) 167 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 168 } 169 170 zio_inject_init(); 171 } 172 173 void 174 zio_fini(void) 175 { 176 size_t c; 177 kmem_cache_t *last_cache = NULL; 178 kmem_cache_t *last_data_cache = NULL; 179 180 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 181 if (zio_buf_cache[c] != last_cache) { 182 last_cache = zio_buf_cache[c]; 183 kmem_cache_destroy(zio_buf_cache[c]); 184 } 185 zio_buf_cache[c] = NULL; 186 187 if (zio_data_buf_cache[c] != last_data_cache) { 188 last_data_cache = zio_data_buf_cache[c]; 189 kmem_cache_destroy(zio_data_buf_cache[c]); 190 } 191 zio_data_buf_cache[c] = NULL; 192 } 193 194 kmem_cache_destroy(zio_link_cache); 195 kmem_cache_destroy(zio_cache); 196 197 zio_inject_fini(); 198 } 199 200 /* 201 * ========================================================================== 202 * Allocate and free I/O buffers 203 * ========================================================================== 204 */ 205 206 /* 207 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 208 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 209 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 210 * excess / transient data in-core during a crashdump. 211 */ 212 void * 213 zio_buf_alloc(size_t size) 214 { 215 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 216 217 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 218 219 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 220 } 221 222 /* 223 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 224 * crashdump if the kernel panics. This exists so that we will limit the amount 225 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 226 * of kernel heap dumped to disk when the kernel panics) 227 */ 228 void * 229 zio_data_buf_alloc(size_t size) 230 { 231 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 232 233 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 234 235 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 236 } 237 238 void 239 zio_buf_free(void *buf, size_t size) 240 { 241 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 242 243 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 244 245 kmem_cache_free(zio_buf_cache[c], buf); 246 } 247 248 void 249 zio_data_buf_free(void *buf, size_t size) 250 { 251 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 252 253 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 254 255 kmem_cache_free(zio_data_buf_cache[c], buf); 256 } 257 258 /* 259 * ========================================================================== 260 * Push and pop I/O transform buffers 261 * ========================================================================== 262 */ 263 static void 264 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 265 zio_transform_func_t *transform) 266 { 267 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 268 269 zt->zt_orig_data = zio->io_data; 270 zt->zt_orig_size = zio->io_size; 271 zt->zt_bufsize = bufsize; 272 zt->zt_transform = transform; 273 274 zt->zt_next = zio->io_transform_stack; 275 zio->io_transform_stack = zt; 276 277 zio->io_data = data; 278 zio->io_size = size; 279 } 280 281 static void 282 zio_pop_transforms(zio_t *zio) 283 { 284 zio_transform_t *zt; 285 286 while ((zt = zio->io_transform_stack) != NULL) { 287 if (zt->zt_transform != NULL) 288 zt->zt_transform(zio, 289 zt->zt_orig_data, zt->zt_orig_size); 290 291 if (zt->zt_bufsize != 0) 292 zio_buf_free(zio->io_data, zt->zt_bufsize); 293 294 zio->io_data = zt->zt_orig_data; 295 zio->io_size = zt->zt_orig_size; 296 zio->io_transform_stack = zt->zt_next; 297 298 kmem_free(zt, sizeof (zio_transform_t)); 299 } 300 } 301 302 /* 303 * ========================================================================== 304 * I/O transform callbacks for subblocks and decompression 305 * ========================================================================== 306 */ 307 static void 308 zio_subblock(zio_t *zio, void *data, uint64_t size) 309 { 310 ASSERT(zio->io_size > size); 311 312 if (zio->io_type == ZIO_TYPE_READ) 313 bcopy(zio->io_data, data, size); 314 } 315 316 static void 317 zio_decompress(zio_t *zio, void *data, uint64_t size) 318 { 319 if (zio->io_error == 0 && 320 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 321 zio->io_data, data, zio->io_size, size) != 0) 322 zio->io_error = SET_ERROR(EIO); 323 } 324 325 /* 326 * ========================================================================== 327 * I/O parent/child relationships and pipeline interlocks 328 * ========================================================================== 329 */ 330 /* 331 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 332 * continue calling these functions until they return NULL. 333 * Otherwise, the next caller will pick up the list walk in 334 * some indeterminate state. (Otherwise every caller would 335 * have to pass in a cookie to keep the state represented by 336 * io_walk_link, which gets annoying.) 337 */ 338 zio_t * 339 zio_walk_parents(zio_t *cio) 340 { 341 zio_link_t *zl = cio->io_walk_link; 342 list_t *pl = &cio->io_parent_list; 343 344 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 345 cio->io_walk_link = zl; 346 347 if (zl == NULL) 348 return (NULL); 349 350 ASSERT(zl->zl_child == cio); 351 return (zl->zl_parent); 352 } 353 354 zio_t * 355 zio_walk_children(zio_t *pio) 356 { 357 zio_link_t *zl = pio->io_walk_link; 358 list_t *cl = &pio->io_child_list; 359 360 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 361 pio->io_walk_link = zl; 362 363 if (zl == NULL) 364 return (NULL); 365 366 ASSERT(zl->zl_parent == pio); 367 return (zl->zl_child); 368 } 369 370 zio_t * 371 zio_unique_parent(zio_t *cio) 372 { 373 zio_t *pio = zio_walk_parents(cio); 374 375 VERIFY(zio_walk_parents(cio) == NULL); 376 return (pio); 377 } 378 379 void 380 zio_add_child(zio_t *pio, zio_t *cio) 381 { 382 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 383 384 /* 385 * Logical I/Os can have logical, gang, or vdev children. 386 * Gang I/Os can have gang or vdev children. 387 * Vdev I/Os can only have vdev children. 388 * The following ASSERT captures all of these constraints. 389 */ 390 ASSERT(cio->io_child_type <= pio->io_child_type); 391 392 zl->zl_parent = pio; 393 zl->zl_child = cio; 394 395 mutex_enter(&cio->io_lock); 396 mutex_enter(&pio->io_lock); 397 398 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 399 400 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 401 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 402 403 list_insert_head(&pio->io_child_list, zl); 404 list_insert_head(&cio->io_parent_list, zl); 405 406 pio->io_child_count++; 407 cio->io_parent_count++; 408 409 mutex_exit(&pio->io_lock); 410 mutex_exit(&cio->io_lock); 411 } 412 413 static void 414 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 415 { 416 ASSERT(zl->zl_parent == pio); 417 ASSERT(zl->zl_child == cio); 418 419 mutex_enter(&cio->io_lock); 420 mutex_enter(&pio->io_lock); 421 422 list_remove(&pio->io_child_list, zl); 423 list_remove(&cio->io_parent_list, zl); 424 425 pio->io_child_count--; 426 cio->io_parent_count--; 427 428 mutex_exit(&pio->io_lock); 429 mutex_exit(&cio->io_lock); 430 431 kmem_cache_free(zio_link_cache, zl); 432 } 433 434 static boolean_t 435 zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 436 { 437 uint64_t *countp = &zio->io_children[child][wait]; 438 boolean_t waiting = B_FALSE; 439 440 mutex_enter(&zio->io_lock); 441 ASSERT(zio->io_stall == NULL); 442 if (*countp != 0) { 443 zio->io_stage >>= 1; 444 zio->io_stall = countp; 445 waiting = B_TRUE; 446 } 447 mutex_exit(&zio->io_lock); 448 449 return (waiting); 450 } 451 452 static void 453 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 454 { 455 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 456 int *errorp = &pio->io_child_error[zio->io_child_type]; 457 458 mutex_enter(&pio->io_lock); 459 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 460 *errorp = zio_worst_error(*errorp, zio->io_error); 461 pio->io_reexecute |= zio->io_reexecute; 462 ASSERT3U(*countp, >, 0); 463 464 (*countp)--; 465 466 if (*countp == 0 && pio->io_stall == countp) { 467 pio->io_stall = NULL; 468 mutex_exit(&pio->io_lock); 469 zio_execute(pio); 470 } else { 471 mutex_exit(&pio->io_lock); 472 } 473 } 474 475 static void 476 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 477 { 478 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 479 zio->io_error = zio->io_child_error[c]; 480 } 481 482 /* 483 * ========================================================================== 484 * Create the various types of I/O (read, write, free, etc) 485 * ========================================================================== 486 */ 487 static zio_t * 488 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 489 void *data, uint64_t size, zio_done_func_t *done, void *private, 490 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 491 vdev_t *vd, uint64_t offset, const zbookmark_t *zb, 492 enum zio_stage stage, enum zio_stage pipeline) 493 { 494 zio_t *zio; 495 496 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 497 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 498 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 499 500 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 501 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 502 ASSERT(vd || stage == ZIO_STAGE_OPEN); 503 504 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 505 bzero(zio, sizeof (zio_t)); 506 507 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 508 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 509 510 list_create(&zio->io_parent_list, sizeof (zio_link_t), 511 offsetof(zio_link_t, zl_parent_node)); 512 list_create(&zio->io_child_list, sizeof (zio_link_t), 513 offsetof(zio_link_t, zl_child_node)); 514 515 if (vd != NULL) 516 zio->io_child_type = ZIO_CHILD_VDEV; 517 else if (flags & ZIO_FLAG_GANG_CHILD) 518 zio->io_child_type = ZIO_CHILD_GANG; 519 else if (flags & ZIO_FLAG_DDT_CHILD) 520 zio->io_child_type = ZIO_CHILD_DDT; 521 else 522 zio->io_child_type = ZIO_CHILD_LOGICAL; 523 524 if (bp != NULL) { 525 zio->io_bp = (blkptr_t *)bp; 526 zio->io_bp_copy = *bp; 527 zio->io_bp_orig = *bp; 528 if (type != ZIO_TYPE_WRITE || 529 zio->io_child_type == ZIO_CHILD_DDT) 530 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 531 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 532 zio->io_logical = zio; 533 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 534 pipeline |= ZIO_GANG_STAGES; 535 } 536 537 zio->io_spa = spa; 538 zio->io_txg = txg; 539 zio->io_done = done; 540 zio->io_private = private; 541 zio->io_type = type; 542 zio->io_priority = priority; 543 zio->io_vd = vd; 544 zio->io_offset = offset; 545 zio->io_orig_data = zio->io_data = data; 546 zio->io_orig_size = zio->io_size = size; 547 zio->io_orig_flags = zio->io_flags = flags; 548 zio->io_orig_stage = zio->io_stage = stage; 549 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 550 551 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 552 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 553 554 if (zb != NULL) 555 zio->io_bookmark = *zb; 556 557 if (pio != NULL) { 558 if (zio->io_logical == NULL) 559 zio->io_logical = pio->io_logical; 560 if (zio->io_child_type == ZIO_CHILD_GANG) 561 zio->io_gang_leader = pio->io_gang_leader; 562 zio_add_child(pio, zio); 563 } 564 565 return (zio); 566 } 567 568 static void 569 zio_destroy(zio_t *zio) 570 { 571 list_destroy(&zio->io_parent_list); 572 list_destroy(&zio->io_child_list); 573 mutex_destroy(&zio->io_lock); 574 cv_destroy(&zio->io_cv); 575 kmem_cache_free(zio_cache, zio); 576 } 577 578 zio_t * 579 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 580 void *private, enum zio_flag flags) 581 { 582 zio_t *zio; 583 584 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 585 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 586 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 587 588 return (zio); 589 } 590 591 zio_t * 592 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 593 { 594 return (zio_null(NULL, spa, NULL, done, private, flags)); 595 } 596 597 zio_t * 598 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 599 void *data, uint64_t size, zio_done_func_t *done, void *private, 600 zio_priority_t priority, enum zio_flag flags, const zbookmark_t *zb) 601 { 602 zio_t *zio; 603 604 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 605 data, size, done, private, 606 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 607 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 608 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 609 610 return (zio); 611 } 612 613 zio_t * 614 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 615 void *data, uint64_t size, const zio_prop_t *zp, 616 zio_done_func_t *ready, zio_done_func_t *physdone, zio_done_func_t *done, 617 void *private, 618 zio_priority_t priority, enum zio_flag flags, const zbookmark_t *zb) 619 { 620 zio_t *zio; 621 622 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 623 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 624 zp->zp_compress >= ZIO_COMPRESS_OFF && 625 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 626 DMU_OT_IS_VALID(zp->zp_type) && 627 zp->zp_level < 32 && 628 zp->zp_copies > 0 && 629 zp->zp_copies <= spa_max_replication(spa)); 630 631 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 632 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 633 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 634 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 635 636 zio->io_ready = ready; 637 zio->io_physdone = physdone; 638 zio->io_prop = *zp; 639 640 return (zio); 641 } 642 643 zio_t * 644 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 645 uint64_t size, zio_done_func_t *done, void *private, 646 zio_priority_t priority, enum zio_flag flags, zbookmark_t *zb) 647 { 648 zio_t *zio; 649 650 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 651 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 652 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 653 654 return (zio); 655 } 656 657 void 658 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 659 { 660 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 661 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 662 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 663 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 664 665 /* 666 * We must reset the io_prop to match the values that existed 667 * when the bp was first written by dmu_sync() keeping in mind 668 * that nopwrite and dedup are mutually exclusive. 669 */ 670 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 671 zio->io_prop.zp_nopwrite = nopwrite; 672 zio->io_prop.zp_copies = copies; 673 zio->io_bp_override = bp; 674 } 675 676 void 677 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 678 { 679 metaslab_check_free(spa, bp); 680 681 /* 682 * Frees that are for the currently-syncing txg, are not going to be 683 * deferred, and which will not need to do a read (i.e. not GANG or 684 * DEDUP), can be processed immediately. Otherwise, put them on the 685 * in-memory list for later processing. 686 */ 687 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 688 txg != spa->spa_syncing_txg || 689 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 690 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 691 } else { 692 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 693 } 694 } 695 696 zio_t * 697 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 698 enum zio_flag flags) 699 { 700 zio_t *zio; 701 enum zio_stage stage = ZIO_FREE_PIPELINE; 702 703 dprintf_bp(bp, "freeing in txg %llu, pass %u", 704 (longlong_t)txg, spa->spa_sync_pass); 705 706 ASSERT(!BP_IS_HOLE(bp)); 707 ASSERT(spa_syncing_txg(spa) == txg); 708 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 709 710 metaslab_check_free(spa, bp); 711 arc_freed(spa, bp); 712 713 /* 714 * GANG and DEDUP blocks can induce a read (for the gang block header, 715 * or the DDT), so issue them asynchronously so that this thread is 716 * not tied up. 717 */ 718 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 719 stage |= ZIO_STAGE_ISSUE_ASYNC; 720 721 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 722 NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, flags, 723 NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 724 725 726 return (zio); 727 } 728 729 zio_t * 730 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 731 zio_done_func_t *done, void *private, enum zio_flag flags) 732 { 733 zio_t *zio; 734 735 /* 736 * A claim is an allocation of a specific block. Claims are needed 737 * to support immediate writes in the intent log. The issue is that 738 * immediate writes contain committed data, but in a txg that was 739 * *not* committed. Upon opening the pool after an unclean shutdown, 740 * the intent log claims all blocks that contain immediate write data 741 * so that the SPA knows they're in use. 742 * 743 * All claims *must* be resolved in the first txg -- before the SPA 744 * starts allocating blocks -- so that nothing is allocated twice. 745 * If txg == 0 we just verify that the block is claimable. 746 */ 747 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 748 ASSERT(txg == spa_first_txg(spa) || txg == 0); 749 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 750 751 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 752 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 753 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 754 755 return (zio); 756 } 757 758 zio_t * 759 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 760 zio_done_func_t *done, void *private, enum zio_flag flags) 761 { 762 zio_t *zio; 763 int c; 764 765 if (vd->vdev_children == 0) { 766 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 767 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 768 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 769 770 zio->io_cmd = cmd; 771 } else { 772 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 773 774 for (c = 0; c < vd->vdev_children; c++) 775 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 776 done, private, flags)); 777 } 778 779 return (zio); 780 } 781 782 zio_t * 783 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 784 void *data, int checksum, zio_done_func_t *done, void *private, 785 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 786 { 787 zio_t *zio; 788 789 ASSERT(vd->vdev_children == 0); 790 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 791 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 792 ASSERT3U(offset + size, <=, vd->vdev_psize); 793 794 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 795 ZIO_TYPE_READ, priority, flags, vd, offset, NULL, 796 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 797 798 zio->io_prop.zp_checksum = checksum; 799 800 return (zio); 801 } 802 803 zio_t * 804 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 805 void *data, int checksum, zio_done_func_t *done, void *private, 806 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 807 { 808 zio_t *zio; 809 810 ASSERT(vd->vdev_children == 0); 811 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 812 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 813 ASSERT3U(offset + size, <=, vd->vdev_psize); 814 815 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 816 ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, 817 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 818 819 zio->io_prop.zp_checksum = checksum; 820 821 if (zio_checksum_table[checksum].ci_eck) { 822 /* 823 * zec checksums are necessarily destructive -- they modify 824 * the end of the write buffer to hold the verifier/checksum. 825 * Therefore, we must make a local copy in case the data is 826 * being written to multiple places in parallel. 827 */ 828 void *wbuf = zio_buf_alloc(size); 829 bcopy(data, wbuf, size); 830 zio_push_transform(zio, wbuf, size, size, NULL); 831 } 832 833 return (zio); 834 } 835 836 /* 837 * Create a child I/O to do some work for us. 838 */ 839 zio_t * 840 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 841 void *data, uint64_t size, int type, zio_priority_t priority, 842 enum zio_flag flags, zio_done_func_t *done, void *private) 843 { 844 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 845 zio_t *zio; 846 847 ASSERT(vd->vdev_parent == 848 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 849 850 if (type == ZIO_TYPE_READ && bp != NULL) { 851 /* 852 * If we have the bp, then the child should perform the 853 * checksum and the parent need not. This pushes error 854 * detection as close to the leaves as possible and 855 * eliminates redundant checksums in the interior nodes. 856 */ 857 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 858 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 859 } 860 861 if (vd->vdev_children == 0) 862 offset += VDEV_LABEL_START_SIZE; 863 864 flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 865 866 /* 867 * If we've decided to do a repair, the write is not speculative -- 868 * even if the original read was. 869 */ 870 if (flags & ZIO_FLAG_IO_REPAIR) 871 flags &= ~ZIO_FLAG_SPECULATIVE; 872 873 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 874 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 875 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 876 877 zio->io_physdone = pio->io_physdone; 878 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 879 zio->io_logical->io_phys_children++; 880 881 return (zio); 882 } 883 884 zio_t * 885 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 886 int type, zio_priority_t priority, enum zio_flag flags, 887 zio_done_func_t *done, void *private) 888 { 889 zio_t *zio; 890 891 ASSERT(vd->vdev_ops->vdev_op_leaf); 892 893 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 894 data, size, done, private, type, priority, 895 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 896 vd, offset, NULL, 897 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 898 899 return (zio); 900 } 901 902 void 903 zio_flush(zio_t *zio, vdev_t *vd) 904 { 905 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 906 NULL, NULL, 907 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 908 } 909 910 void 911 zio_shrink(zio_t *zio, uint64_t size) 912 { 913 ASSERT(zio->io_executor == NULL); 914 ASSERT(zio->io_orig_size == zio->io_size); 915 ASSERT(size <= zio->io_size); 916 917 /* 918 * We don't shrink for raidz because of problems with the 919 * reconstruction when reading back less than the block size. 920 * Note, BP_IS_RAIDZ() assumes no compression. 921 */ 922 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 923 if (!BP_IS_RAIDZ(zio->io_bp)) 924 zio->io_orig_size = zio->io_size = size; 925 } 926 927 /* 928 * ========================================================================== 929 * Prepare to read and write logical blocks 930 * ========================================================================== 931 */ 932 933 static int 934 zio_read_bp_init(zio_t *zio) 935 { 936 blkptr_t *bp = zio->io_bp; 937 938 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 939 zio->io_child_type == ZIO_CHILD_LOGICAL && 940 !(zio->io_flags & ZIO_FLAG_RAW)) { 941 uint64_t psize = BP_GET_PSIZE(bp); 942 void *cbuf = zio_buf_alloc(psize); 943 944 zio_push_transform(zio, cbuf, psize, psize, zio_decompress); 945 } 946 947 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 948 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 949 950 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 951 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 952 953 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 954 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 955 956 return (ZIO_PIPELINE_CONTINUE); 957 } 958 959 static int 960 zio_write_bp_init(zio_t *zio) 961 { 962 spa_t *spa = zio->io_spa; 963 zio_prop_t *zp = &zio->io_prop; 964 enum zio_compress compress = zp->zp_compress; 965 blkptr_t *bp = zio->io_bp; 966 uint64_t lsize = zio->io_size; 967 uint64_t psize = lsize; 968 int pass = 1; 969 970 /* 971 * If our children haven't all reached the ready stage, 972 * wait for them and then repeat this pipeline stage. 973 */ 974 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 975 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 976 return (ZIO_PIPELINE_STOP); 977 978 if (!IO_IS_ALLOCATING(zio)) 979 return (ZIO_PIPELINE_CONTINUE); 980 981 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 982 983 if (zio->io_bp_override) { 984 ASSERT(bp->blk_birth != zio->io_txg); 985 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 986 987 *bp = *zio->io_bp_override; 988 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 989 990 /* 991 * If we've been overridden and nopwrite is set then 992 * set the flag accordingly to indicate that a nopwrite 993 * has already occurred. 994 */ 995 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 996 ASSERT(!zp->zp_dedup); 997 zio->io_flags |= ZIO_FLAG_NOPWRITE; 998 return (ZIO_PIPELINE_CONTINUE); 999 } 1000 1001 ASSERT(!zp->zp_nopwrite); 1002 1003 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1004 return (ZIO_PIPELINE_CONTINUE); 1005 1006 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || 1007 zp->zp_dedup_verify); 1008 1009 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1010 BP_SET_DEDUP(bp, 1); 1011 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1012 return (ZIO_PIPELINE_CONTINUE); 1013 } 1014 zio->io_bp_override = NULL; 1015 BP_ZERO(bp); 1016 } 1017 1018 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1019 /* 1020 * We're rewriting an existing block, which means we're 1021 * working on behalf of spa_sync(). For spa_sync() to 1022 * converge, it must eventually be the case that we don't 1023 * have to allocate new blocks. But compression changes 1024 * the blocksize, which forces a reallocate, and makes 1025 * convergence take longer. Therefore, after the first 1026 * few passes, stop compressing to ensure convergence. 1027 */ 1028 pass = spa_sync_pass(spa); 1029 1030 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1031 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1032 ASSERT(!BP_GET_DEDUP(bp)); 1033 1034 if (pass >= zfs_sync_pass_dont_compress) 1035 compress = ZIO_COMPRESS_OFF; 1036 1037 /* Make sure someone doesn't change their mind on overwrites */ 1038 ASSERT(MIN(zp->zp_copies + BP_IS_GANG(bp), 1039 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1040 } 1041 1042 if (compress != ZIO_COMPRESS_OFF) { 1043 void *cbuf = zio_buf_alloc(lsize); 1044 psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); 1045 if (psize == 0 || psize == lsize) { 1046 compress = ZIO_COMPRESS_OFF; 1047 zio_buf_free(cbuf, lsize); 1048 } else { 1049 ASSERT(psize < lsize); 1050 zio_push_transform(zio, cbuf, psize, lsize, NULL); 1051 } 1052 } 1053 1054 /* 1055 * The final pass of spa_sync() must be all rewrites, but the first 1056 * few passes offer a trade-off: allocating blocks defers convergence, 1057 * but newly allocated blocks are sequential, so they can be written 1058 * to disk faster. Therefore, we allow the first few passes of 1059 * spa_sync() to allocate new blocks, but force rewrites after that. 1060 * There should only be a handful of blocks after pass 1 in any case. 1061 */ 1062 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1063 BP_GET_PSIZE(bp) == psize && 1064 pass >= zfs_sync_pass_rewrite) { 1065 ASSERT(psize != 0); 1066 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1067 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1068 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1069 } else { 1070 BP_ZERO(bp); 1071 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1072 } 1073 1074 if (psize == 0) { 1075 if (zio->io_bp_orig.blk_birth != 0 && 1076 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1077 BP_SET_LSIZE(bp, lsize); 1078 BP_SET_TYPE(bp, zp->zp_type); 1079 BP_SET_LEVEL(bp, zp->zp_level); 1080 BP_SET_BIRTH(bp, zio->io_txg, 0); 1081 } 1082 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1083 } else { 1084 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1085 BP_SET_LSIZE(bp, lsize); 1086 BP_SET_TYPE(bp, zp->zp_type); 1087 BP_SET_LEVEL(bp, zp->zp_level); 1088 BP_SET_PSIZE(bp, psize); 1089 BP_SET_COMPRESS(bp, compress); 1090 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1091 BP_SET_DEDUP(bp, zp->zp_dedup); 1092 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1093 if (zp->zp_dedup) { 1094 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1095 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1096 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1097 } 1098 if (zp->zp_nopwrite) { 1099 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1100 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1101 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1102 } 1103 } 1104 1105 return (ZIO_PIPELINE_CONTINUE); 1106 } 1107 1108 static int 1109 zio_free_bp_init(zio_t *zio) 1110 { 1111 blkptr_t *bp = zio->io_bp; 1112 1113 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1114 if (BP_GET_DEDUP(bp)) 1115 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1116 } 1117 1118 return (ZIO_PIPELINE_CONTINUE); 1119 } 1120 1121 /* 1122 * ========================================================================== 1123 * Execute the I/O pipeline 1124 * ========================================================================== 1125 */ 1126 1127 static void 1128 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1129 { 1130 spa_t *spa = zio->io_spa; 1131 zio_type_t t = zio->io_type; 1132 int flags = (cutinline ? TQ_FRONT : 0); 1133 1134 /* 1135 * If we're a config writer or a probe, the normal issue and 1136 * interrupt threads may all be blocked waiting for the config lock. 1137 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1138 */ 1139 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1140 t = ZIO_TYPE_NULL; 1141 1142 /* 1143 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1144 */ 1145 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1146 t = ZIO_TYPE_NULL; 1147 1148 /* 1149 * If this is a high priority I/O, then use the high priority taskq if 1150 * available. 1151 */ 1152 if (zio->io_priority == ZIO_PRIORITY_NOW && 1153 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1154 q++; 1155 1156 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1157 1158 /* 1159 * NB: We are assuming that the zio can only be dispatched 1160 * to a single taskq at a time. It would be a grievous error 1161 * to dispatch the zio to another taskq at the same time. 1162 */ 1163 ASSERT(zio->io_tqent.tqent_next == NULL); 1164 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1165 flags, &zio->io_tqent); 1166 } 1167 1168 static boolean_t 1169 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1170 { 1171 kthread_t *executor = zio->io_executor; 1172 spa_t *spa = zio->io_spa; 1173 1174 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1175 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1176 uint_t i; 1177 for (i = 0; i < tqs->stqs_count; i++) { 1178 if (taskq_member(tqs->stqs_taskq[i], executor)) 1179 return (B_TRUE); 1180 } 1181 } 1182 1183 return (B_FALSE); 1184 } 1185 1186 static int 1187 zio_issue_async(zio_t *zio) 1188 { 1189 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1190 1191 return (ZIO_PIPELINE_STOP); 1192 } 1193 1194 void 1195 zio_interrupt(zio_t *zio) 1196 { 1197 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1198 } 1199 1200 /* 1201 * Execute the I/O pipeline until one of the following occurs: 1202 * 1203 * (1) the I/O completes 1204 * (2) the pipeline stalls waiting for dependent child I/Os 1205 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1206 * (4) the I/O is delegated by vdev-level caching or aggregation 1207 * (5) the I/O is deferred due to vdev-level queueing 1208 * (6) the I/O is handed off to another thread. 1209 * 1210 * In all cases, the pipeline stops whenever there's no CPU work; it never 1211 * burns a thread in cv_wait(). 1212 * 1213 * There's no locking on io_stage because there's no legitimate way 1214 * for multiple threads to be attempting to process the same I/O. 1215 */ 1216 static zio_pipe_stage_t *zio_pipeline[]; 1217 1218 void 1219 zio_execute(zio_t *zio) 1220 { 1221 zio->io_executor = curthread; 1222 1223 while (zio->io_stage < ZIO_STAGE_DONE) { 1224 enum zio_stage pipeline = zio->io_pipeline; 1225 enum zio_stage stage = zio->io_stage; 1226 int rv; 1227 1228 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1229 ASSERT(ISP2(stage)); 1230 ASSERT(zio->io_stall == NULL); 1231 1232 do { 1233 stage <<= 1; 1234 } while ((stage & pipeline) == 0); 1235 1236 ASSERT(stage <= ZIO_STAGE_DONE); 1237 1238 /* 1239 * If we are in interrupt context and this pipeline stage 1240 * will grab a config lock that is held across I/O, 1241 * or may wait for an I/O that needs an interrupt thread 1242 * to complete, issue async to avoid deadlock. 1243 * 1244 * For VDEV_IO_START, we cut in line so that the io will 1245 * be sent to disk promptly. 1246 */ 1247 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1248 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1249 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1250 zio_requeue_io_start_cut_in_line : B_FALSE; 1251 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1252 return; 1253 } 1254 1255 zio->io_stage = stage; 1256 rv = zio_pipeline[highbit64(stage) - 1](zio); 1257 1258 if (rv == ZIO_PIPELINE_STOP) 1259 return; 1260 1261 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1262 } 1263 } 1264 1265 /* 1266 * ========================================================================== 1267 * Initiate I/O, either sync or async 1268 * ========================================================================== 1269 */ 1270 int 1271 zio_wait(zio_t *zio) 1272 { 1273 int error; 1274 1275 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1276 ASSERT(zio->io_executor == NULL); 1277 1278 zio->io_waiter = curthread; 1279 1280 zio_execute(zio); 1281 1282 mutex_enter(&zio->io_lock); 1283 while (zio->io_executor != NULL) 1284 cv_wait(&zio->io_cv, &zio->io_lock); 1285 mutex_exit(&zio->io_lock); 1286 1287 error = zio->io_error; 1288 zio_destroy(zio); 1289 1290 return (error); 1291 } 1292 1293 void 1294 zio_nowait(zio_t *zio) 1295 { 1296 ASSERT(zio->io_executor == NULL); 1297 1298 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1299 zio_unique_parent(zio) == NULL) { 1300 /* 1301 * This is a logical async I/O with no parent to wait for it. 1302 * We add it to the spa_async_root_zio "Godfather" I/O which 1303 * will ensure they complete prior to unloading the pool. 1304 */ 1305 spa_t *spa = zio->io_spa; 1306 1307 zio_add_child(spa->spa_async_zio_root, zio); 1308 } 1309 1310 zio_execute(zio); 1311 } 1312 1313 /* 1314 * ========================================================================== 1315 * Reexecute or suspend/resume failed I/O 1316 * ========================================================================== 1317 */ 1318 1319 static void 1320 zio_reexecute(zio_t *pio) 1321 { 1322 zio_t *cio, *cio_next; 1323 1324 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1325 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1326 ASSERT(pio->io_gang_leader == NULL); 1327 ASSERT(pio->io_gang_tree == NULL); 1328 1329 pio->io_flags = pio->io_orig_flags; 1330 pio->io_stage = pio->io_orig_stage; 1331 pio->io_pipeline = pio->io_orig_pipeline; 1332 pio->io_reexecute = 0; 1333 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1334 pio->io_error = 0; 1335 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1336 pio->io_state[w] = 0; 1337 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1338 pio->io_child_error[c] = 0; 1339 1340 if (IO_IS_ALLOCATING(pio)) 1341 BP_ZERO(pio->io_bp); 1342 1343 /* 1344 * As we reexecute pio's children, new children could be created. 1345 * New children go to the head of pio's io_child_list, however, 1346 * so we will (correctly) not reexecute them. The key is that 1347 * the remainder of pio's io_child_list, from 'cio_next' onward, 1348 * cannot be affected by any side effects of reexecuting 'cio'. 1349 */ 1350 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1351 cio_next = zio_walk_children(pio); 1352 mutex_enter(&pio->io_lock); 1353 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1354 pio->io_children[cio->io_child_type][w]++; 1355 mutex_exit(&pio->io_lock); 1356 zio_reexecute(cio); 1357 } 1358 1359 /* 1360 * Now that all children have been reexecuted, execute the parent. 1361 * We don't reexecute "The Godfather" I/O here as it's the 1362 * responsibility of the caller to wait on him. 1363 */ 1364 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1365 zio_execute(pio); 1366 } 1367 1368 void 1369 zio_suspend(spa_t *spa, zio_t *zio) 1370 { 1371 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1372 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1373 "failure and the failure mode property for this pool " 1374 "is set to panic.", spa_name(spa)); 1375 1376 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1377 1378 mutex_enter(&spa->spa_suspend_lock); 1379 1380 if (spa->spa_suspend_zio_root == NULL) 1381 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1382 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1383 ZIO_FLAG_GODFATHER); 1384 1385 spa->spa_suspended = B_TRUE; 1386 1387 if (zio != NULL) { 1388 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1389 ASSERT(zio != spa->spa_suspend_zio_root); 1390 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1391 ASSERT(zio_unique_parent(zio) == NULL); 1392 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1393 zio_add_child(spa->spa_suspend_zio_root, zio); 1394 } 1395 1396 mutex_exit(&spa->spa_suspend_lock); 1397 } 1398 1399 int 1400 zio_resume(spa_t *spa) 1401 { 1402 zio_t *pio; 1403 1404 /* 1405 * Reexecute all previously suspended i/o. 1406 */ 1407 mutex_enter(&spa->spa_suspend_lock); 1408 spa->spa_suspended = B_FALSE; 1409 cv_broadcast(&spa->spa_suspend_cv); 1410 pio = spa->spa_suspend_zio_root; 1411 spa->spa_suspend_zio_root = NULL; 1412 mutex_exit(&spa->spa_suspend_lock); 1413 1414 if (pio == NULL) 1415 return (0); 1416 1417 zio_reexecute(pio); 1418 return (zio_wait(pio)); 1419 } 1420 1421 void 1422 zio_resume_wait(spa_t *spa) 1423 { 1424 mutex_enter(&spa->spa_suspend_lock); 1425 while (spa_suspended(spa)) 1426 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1427 mutex_exit(&spa->spa_suspend_lock); 1428 } 1429 1430 /* 1431 * ========================================================================== 1432 * Gang blocks. 1433 * 1434 * A gang block is a collection of small blocks that looks to the DMU 1435 * like one large block. When zio_dva_allocate() cannot find a block 1436 * of the requested size, due to either severe fragmentation or the pool 1437 * being nearly full, it calls zio_write_gang_block() to construct the 1438 * block from smaller fragments. 1439 * 1440 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1441 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1442 * an indirect block: it's an array of block pointers. It consumes 1443 * only one sector and hence is allocatable regardless of fragmentation. 1444 * The gang header's bps point to its gang members, which hold the data. 1445 * 1446 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1447 * as the verifier to ensure uniqueness of the SHA256 checksum. 1448 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1449 * not the gang header. This ensures that data block signatures (needed for 1450 * deduplication) are independent of how the block is physically stored. 1451 * 1452 * Gang blocks can be nested: a gang member may itself be a gang block. 1453 * Thus every gang block is a tree in which root and all interior nodes are 1454 * gang headers, and the leaves are normal blocks that contain user data. 1455 * The root of the gang tree is called the gang leader. 1456 * 1457 * To perform any operation (read, rewrite, free, claim) on a gang block, 1458 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1459 * in the io_gang_tree field of the original logical i/o by recursively 1460 * reading the gang leader and all gang headers below it. This yields 1461 * an in-core tree containing the contents of every gang header and the 1462 * bps for every constituent of the gang block. 1463 * 1464 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1465 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1466 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1467 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1468 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1469 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1470 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1471 * of the gang header plus zio_checksum_compute() of the data to update the 1472 * gang header's blk_cksum as described above. 1473 * 1474 * The two-phase assemble/issue model solves the problem of partial failure -- 1475 * what if you'd freed part of a gang block but then couldn't read the 1476 * gang header for another part? Assembling the entire gang tree first 1477 * ensures that all the necessary gang header I/O has succeeded before 1478 * starting the actual work of free, claim, or write. Once the gang tree 1479 * is assembled, free and claim are in-memory operations that cannot fail. 1480 * 1481 * In the event that a gang write fails, zio_dva_unallocate() walks the 1482 * gang tree to immediately free (i.e. insert back into the space map) 1483 * everything we've allocated. This ensures that we don't get ENOSPC 1484 * errors during repeated suspend/resume cycles due to a flaky device. 1485 * 1486 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1487 * the gang tree, we won't modify the block, so we can safely defer the free 1488 * (knowing that the block is still intact). If we *can* assemble the gang 1489 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1490 * each constituent bp and we can allocate a new block on the next sync pass. 1491 * 1492 * In all cases, the gang tree allows complete recovery from partial failure. 1493 * ========================================================================== 1494 */ 1495 1496 static zio_t * 1497 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1498 { 1499 if (gn != NULL) 1500 return (pio); 1501 1502 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1503 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1504 &pio->io_bookmark)); 1505 } 1506 1507 zio_t * 1508 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1509 { 1510 zio_t *zio; 1511 1512 if (gn != NULL) { 1513 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1514 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1515 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1516 /* 1517 * As we rewrite each gang header, the pipeline will compute 1518 * a new gang block header checksum for it; but no one will 1519 * compute a new data checksum, so we do that here. The one 1520 * exception is the gang leader: the pipeline already computed 1521 * its data checksum because that stage precedes gang assembly. 1522 * (Presently, nothing actually uses interior data checksums; 1523 * this is just good hygiene.) 1524 */ 1525 if (gn != pio->io_gang_leader->io_gang_tree) { 1526 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1527 data, BP_GET_PSIZE(bp)); 1528 } 1529 /* 1530 * If we are here to damage data for testing purposes, 1531 * leave the GBH alone so that we can detect the damage. 1532 */ 1533 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1534 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1535 } else { 1536 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1537 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1538 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1539 } 1540 1541 return (zio); 1542 } 1543 1544 /* ARGSUSED */ 1545 zio_t * 1546 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1547 { 1548 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1549 ZIO_GANG_CHILD_FLAGS(pio))); 1550 } 1551 1552 /* ARGSUSED */ 1553 zio_t * 1554 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1555 { 1556 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1557 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1558 } 1559 1560 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1561 NULL, 1562 zio_read_gang, 1563 zio_rewrite_gang, 1564 zio_free_gang, 1565 zio_claim_gang, 1566 NULL 1567 }; 1568 1569 static void zio_gang_tree_assemble_done(zio_t *zio); 1570 1571 static zio_gang_node_t * 1572 zio_gang_node_alloc(zio_gang_node_t **gnpp) 1573 { 1574 zio_gang_node_t *gn; 1575 1576 ASSERT(*gnpp == NULL); 1577 1578 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1579 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1580 *gnpp = gn; 1581 1582 return (gn); 1583 } 1584 1585 static void 1586 zio_gang_node_free(zio_gang_node_t **gnpp) 1587 { 1588 zio_gang_node_t *gn = *gnpp; 1589 1590 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1591 ASSERT(gn->gn_child[g] == NULL); 1592 1593 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1594 kmem_free(gn, sizeof (*gn)); 1595 *gnpp = NULL; 1596 } 1597 1598 static void 1599 zio_gang_tree_free(zio_gang_node_t **gnpp) 1600 { 1601 zio_gang_node_t *gn = *gnpp; 1602 1603 if (gn == NULL) 1604 return; 1605 1606 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1607 zio_gang_tree_free(&gn->gn_child[g]); 1608 1609 zio_gang_node_free(gnpp); 1610 } 1611 1612 static void 1613 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1614 { 1615 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1616 1617 ASSERT(gio->io_gang_leader == gio); 1618 ASSERT(BP_IS_GANG(bp)); 1619 1620 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1621 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1622 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1623 } 1624 1625 static void 1626 zio_gang_tree_assemble_done(zio_t *zio) 1627 { 1628 zio_t *gio = zio->io_gang_leader; 1629 zio_gang_node_t *gn = zio->io_private; 1630 blkptr_t *bp = zio->io_bp; 1631 1632 ASSERT(gio == zio_unique_parent(zio)); 1633 ASSERT(zio->io_child_count == 0); 1634 1635 if (zio->io_error) 1636 return; 1637 1638 if (BP_SHOULD_BYTESWAP(bp)) 1639 byteswap_uint64_array(zio->io_data, zio->io_size); 1640 1641 ASSERT(zio->io_data == gn->gn_gbh); 1642 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1643 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1644 1645 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1646 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1647 if (!BP_IS_GANG(gbp)) 1648 continue; 1649 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1650 } 1651 } 1652 1653 static void 1654 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1655 { 1656 zio_t *gio = pio->io_gang_leader; 1657 zio_t *zio; 1658 1659 ASSERT(BP_IS_GANG(bp) == !!gn); 1660 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1661 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1662 1663 /* 1664 * If you're a gang header, your data is in gn->gn_gbh. 1665 * If you're a gang member, your data is in 'data' and gn == NULL. 1666 */ 1667 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1668 1669 if (gn != NULL) { 1670 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 1671 1672 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1673 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1674 if (BP_IS_HOLE(gbp)) 1675 continue; 1676 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1677 data = (char *)data + BP_GET_PSIZE(gbp); 1678 } 1679 } 1680 1681 if (gn == gio->io_gang_tree) 1682 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1683 1684 if (zio != pio) 1685 zio_nowait(zio); 1686 } 1687 1688 static int 1689 zio_gang_assemble(zio_t *zio) 1690 { 1691 blkptr_t *bp = zio->io_bp; 1692 1693 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1694 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1695 1696 zio->io_gang_leader = zio; 1697 1698 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1699 1700 return (ZIO_PIPELINE_CONTINUE); 1701 } 1702 1703 static int 1704 zio_gang_issue(zio_t *zio) 1705 { 1706 blkptr_t *bp = zio->io_bp; 1707 1708 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1709 return (ZIO_PIPELINE_STOP); 1710 1711 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1712 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1713 1714 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1715 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1716 else 1717 zio_gang_tree_free(&zio->io_gang_tree); 1718 1719 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1720 1721 return (ZIO_PIPELINE_CONTINUE); 1722 } 1723 1724 static void 1725 zio_write_gang_member_ready(zio_t *zio) 1726 { 1727 zio_t *pio = zio_unique_parent(zio); 1728 zio_t *gio = zio->io_gang_leader; 1729 dva_t *cdva = zio->io_bp->blk_dva; 1730 dva_t *pdva = pio->io_bp->blk_dva; 1731 uint64_t asize; 1732 1733 if (BP_IS_HOLE(zio->io_bp)) 1734 return; 1735 1736 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1737 1738 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1739 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 1740 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 1741 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 1742 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1743 1744 mutex_enter(&pio->io_lock); 1745 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1746 ASSERT(DVA_GET_GANG(&pdva[d])); 1747 asize = DVA_GET_ASIZE(&pdva[d]); 1748 asize += DVA_GET_ASIZE(&cdva[d]); 1749 DVA_SET_ASIZE(&pdva[d], asize); 1750 } 1751 mutex_exit(&pio->io_lock); 1752 } 1753 1754 static int 1755 zio_write_gang_block(zio_t *pio) 1756 { 1757 spa_t *spa = pio->io_spa; 1758 blkptr_t *bp = pio->io_bp; 1759 zio_t *gio = pio->io_gang_leader; 1760 zio_t *zio; 1761 zio_gang_node_t *gn, **gnpp; 1762 zio_gbh_phys_t *gbh; 1763 uint64_t txg = pio->io_txg; 1764 uint64_t resid = pio->io_size; 1765 uint64_t lsize; 1766 int copies = gio->io_prop.zp_copies; 1767 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 1768 zio_prop_t zp; 1769 int error; 1770 1771 error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, 1772 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, 1773 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1774 if (error) { 1775 pio->io_error = error; 1776 return (ZIO_PIPELINE_CONTINUE); 1777 } 1778 1779 if (pio == gio) { 1780 gnpp = &gio->io_gang_tree; 1781 } else { 1782 gnpp = pio->io_private; 1783 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1784 } 1785 1786 gn = zio_gang_node_alloc(gnpp); 1787 gbh = gn->gn_gbh; 1788 bzero(gbh, SPA_GANGBLOCKSIZE); 1789 1790 /* 1791 * Create the gang header. 1792 */ 1793 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1794 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1795 1796 /* 1797 * Create and nowait the gang children. 1798 */ 1799 for (int g = 0; resid != 0; resid -= lsize, g++) { 1800 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1801 SPA_MINBLOCKSIZE); 1802 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1803 1804 zp.zp_checksum = gio->io_prop.zp_checksum; 1805 zp.zp_compress = ZIO_COMPRESS_OFF; 1806 zp.zp_type = DMU_OT_NONE; 1807 zp.zp_level = 0; 1808 zp.zp_copies = gio->io_prop.zp_copies; 1809 zp.zp_dedup = B_FALSE; 1810 zp.zp_dedup_verify = B_FALSE; 1811 zp.zp_nopwrite = B_FALSE; 1812 1813 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1814 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1815 zio_write_gang_member_ready, NULL, NULL, &gn->gn_child[g], 1816 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1817 &pio->io_bookmark)); 1818 } 1819 1820 /* 1821 * Set pio's pipeline to just wait for zio to finish. 1822 */ 1823 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1824 1825 zio_nowait(zio); 1826 1827 return (ZIO_PIPELINE_CONTINUE); 1828 } 1829 1830 /* 1831 * The zio_nop_write stage in the pipeline determines if allocating 1832 * a new bp is necessary. By leveraging a cryptographically secure checksum, 1833 * such as SHA256, we can compare the checksums of the new data and the old 1834 * to determine if allocating a new block is required. The nopwrite 1835 * feature can handle writes in either syncing or open context (i.e. zil 1836 * writes) and as a result is mutually exclusive with dedup. 1837 */ 1838 static int 1839 zio_nop_write(zio_t *zio) 1840 { 1841 blkptr_t *bp = zio->io_bp; 1842 blkptr_t *bp_orig = &zio->io_bp_orig; 1843 zio_prop_t *zp = &zio->io_prop; 1844 1845 ASSERT(BP_GET_LEVEL(bp) == 0); 1846 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1847 ASSERT(zp->zp_nopwrite); 1848 ASSERT(!zp->zp_dedup); 1849 ASSERT(zio->io_bp_override == NULL); 1850 ASSERT(IO_IS_ALLOCATING(zio)); 1851 1852 /* 1853 * Check to see if the original bp and the new bp have matching 1854 * characteristics (i.e. same checksum, compression algorithms, etc). 1855 * If they don't then just continue with the pipeline which will 1856 * allocate a new bp. 1857 */ 1858 if (BP_IS_HOLE(bp_orig) || 1859 !zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_dedup || 1860 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 1861 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 1862 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 1863 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 1864 return (ZIO_PIPELINE_CONTINUE); 1865 1866 /* 1867 * If the checksums match then reset the pipeline so that we 1868 * avoid allocating a new bp and issuing any I/O. 1869 */ 1870 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 1871 ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup); 1872 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 1873 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 1874 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 1875 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 1876 sizeof (uint64_t)) == 0); 1877 1878 *bp = *bp_orig; 1879 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1880 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1881 } 1882 1883 return (ZIO_PIPELINE_CONTINUE); 1884 } 1885 1886 /* 1887 * ========================================================================== 1888 * Dedup 1889 * ========================================================================== 1890 */ 1891 static void 1892 zio_ddt_child_read_done(zio_t *zio) 1893 { 1894 blkptr_t *bp = zio->io_bp; 1895 ddt_entry_t *dde = zio->io_private; 1896 ddt_phys_t *ddp; 1897 zio_t *pio = zio_unique_parent(zio); 1898 1899 mutex_enter(&pio->io_lock); 1900 ddp = ddt_phys_select(dde, bp); 1901 if (zio->io_error == 0) 1902 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 1903 if (zio->io_error == 0 && dde->dde_repair_data == NULL) 1904 dde->dde_repair_data = zio->io_data; 1905 else 1906 zio_buf_free(zio->io_data, zio->io_size); 1907 mutex_exit(&pio->io_lock); 1908 } 1909 1910 static int 1911 zio_ddt_read_start(zio_t *zio) 1912 { 1913 blkptr_t *bp = zio->io_bp; 1914 1915 ASSERT(BP_GET_DEDUP(bp)); 1916 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1917 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1918 1919 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1920 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1921 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 1922 ddt_phys_t *ddp = dde->dde_phys; 1923 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 1924 blkptr_t blk; 1925 1926 ASSERT(zio->io_vsd == NULL); 1927 zio->io_vsd = dde; 1928 1929 if (ddp_self == NULL) 1930 return (ZIO_PIPELINE_CONTINUE); 1931 1932 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1933 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 1934 continue; 1935 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 1936 &blk); 1937 zio_nowait(zio_read(zio, zio->io_spa, &blk, 1938 zio_buf_alloc(zio->io_size), zio->io_size, 1939 zio_ddt_child_read_done, dde, zio->io_priority, 1940 ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, 1941 &zio->io_bookmark)); 1942 } 1943 return (ZIO_PIPELINE_CONTINUE); 1944 } 1945 1946 zio_nowait(zio_read(zio, zio->io_spa, bp, 1947 zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, 1948 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 1949 1950 return (ZIO_PIPELINE_CONTINUE); 1951 } 1952 1953 static int 1954 zio_ddt_read_done(zio_t *zio) 1955 { 1956 blkptr_t *bp = zio->io_bp; 1957 1958 if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 1959 return (ZIO_PIPELINE_STOP); 1960 1961 ASSERT(BP_GET_DEDUP(bp)); 1962 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 1963 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1964 1965 if (zio->io_child_error[ZIO_CHILD_DDT]) { 1966 ddt_t *ddt = ddt_select(zio->io_spa, bp); 1967 ddt_entry_t *dde = zio->io_vsd; 1968 if (ddt == NULL) { 1969 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 1970 return (ZIO_PIPELINE_CONTINUE); 1971 } 1972 if (dde == NULL) { 1973 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 1974 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1975 return (ZIO_PIPELINE_STOP); 1976 } 1977 if (dde->dde_repair_data != NULL) { 1978 bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); 1979 zio->io_child_error[ZIO_CHILD_DDT] = 0; 1980 } 1981 ddt_repair_done(ddt, dde); 1982 zio->io_vsd = NULL; 1983 } 1984 1985 ASSERT(zio->io_vsd == NULL); 1986 1987 return (ZIO_PIPELINE_CONTINUE); 1988 } 1989 1990 static boolean_t 1991 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 1992 { 1993 spa_t *spa = zio->io_spa; 1994 1995 /* 1996 * Note: we compare the original data, not the transformed data, 1997 * because when zio->io_bp is an override bp, we will not have 1998 * pushed the I/O transforms. That's an important optimization 1999 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2000 */ 2001 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2002 zio_t *lio = dde->dde_lead_zio[p]; 2003 2004 if (lio != NULL) { 2005 return (lio->io_orig_size != zio->io_orig_size || 2006 bcmp(zio->io_orig_data, lio->io_orig_data, 2007 zio->io_orig_size) != 0); 2008 } 2009 } 2010 2011 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2012 ddt_phys_t *ddp = &dde->dde_phys[p]; 2013 2014 if (ddp->ddp_phys_birth != 0) { 2015 arc_buf_t *abuf = NULL; 2016 uint32_t aflags = ARC_WAIT; 2017 blkptr_t blk = *zio->io_bp; 2018 int error; 2019 2020 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2021 2022 ddt_exit(ddt); 2023 2024 error = arc_read(NULL, spa, &blk, 2025 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2026 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2027 &aflags, &zio->io_bookmark); 2028 2029 if (error == 0) { 2030 if (arc_buf_size(abuf) != zio->io_orig_size || 2031 bcmp(abuf->b_data, zio->io_orig_data, 2032 zio->io_orig_size) != 0) 2033 error = SET_ERROR(EEXIST); 2034 VERIFY(arc_buf_remove_ref(abuf, &abuf)); 2035 } 2036 2037 ddt_enter(ddt); 2038 return (error != 0); 2039 } 2040 } 2041 2042 return (B_FALSE); 2043 } 2044 2045 static void 2046 zio_ddt_child_write_ready(zio_t *zio) 2047 { 2048 int p = zio->io_prop.zp_copies; 2049 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2050 ddt_entry_t *dde = zio->io_private; 2051 ddt_phys_t *ddp = &dde->dde_phys[p]; 2052 zio_t *pio; 2053 2054 if (zio->io_error) 2055 return; 2056 2057 ddt_enter(ddt); 2058 2059 ASSERT(dde->dde_lead_zio[p] == zio); 2060 2061 ddt_phys_fill(ddp, zio->io_bp); 2062 2063 while ((pio = zio_walk_parents(zio)) != NULL) 2064 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2065 2066 ddt_exit(ddt); 2067 } 2068 2069 static void 2070 zio_ddt_child_write_done(zio_t *zio) 2071 { 2072 int p = zio->io_prop.zp_copies; 2073 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2074 ddt_entry_t *dde = zio->io_private; 2075 ddt_phys_t *ddp = &dde->dde_phys[p]; 2076 2077 ddt_enter(ddt); 2078 2079 ASSERT(ddp->ddp_refcnt == 0); 2080 ASSERT(dde->dde_lead_zio[p] == zio); 2081 dde->dde_lead_zio[p] = NULL; 2082 2083 if (zio->io_error == 0) { 2084 while (zio_walk_parents(zio) != NULL) 2085 ddt_phys_addref(ddp); 2086 } else { 2087 ddt_phys_clear(ddp); 2088 } 2089 2090 ddt_exit(ddt); 2091 } 2092 2093 static void 2094 zio_ddt_ditto_write_done(zio_t *zio) 2095 { 2096 int p = DDT_PHYS_DITTO; 2097 zio_prop_t *zp = &zio->io_prop; 2098 blkptr_t *bp = zio->io_bp; 2099 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2100 ddt_entry_t *dde = zio->io_private; 2101 ddt_phys_t *ddp = &dde->dde_phys[p]; 2102 ddt_key_t *ddk = &dde->dde_key; 2103 2104 ddt_enter(ddt); 2105 2106 ASSERT(ddp->ddp_refcnt == 0); 2107 ASSERT(dde->dde_lead_zio[p] == zio); 2108 dde->dde_lead_zio[p] = NULL; 2109 2110 if (zio->io_error == 0) { 2111 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2112 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2113 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2114 if (ddp->ddp_phys_birth != 0) 2115 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2116 ddt_phys_fill(ddp, bp); 2117 } 2118 2119 ddt_exit(ddt); 2120 } 2121 2122 static int 2123 zio_ddt_write(zio_t *zio) 2124 { 2125 spa_t *spa = zio->io_spa; 2126 blkptr_t *bp = zio->io_bp; 2127 uint64_t txg = zio->io_txg; 2128 zio_prop_t *zp = &zio->io_prop; 2129 int p = zp->zp_copies; 2130 int ditto_copies; 2131 zio_t *cio = NULL; 2132 zio_t *dio = NULL; 2133 ddt_t *ddt = ddt_select(spa, bp); 2134 ddt_entry_t *dde; 2135 ddt_phys_t *ddp; 2136 2137 ASSERT(BP_GET_DEDUP(bp)); 2138 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2139 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2140 2141 ddt_enter(ddt); 2142 dde = ddt_lookup(ddt, bp, B_TRUE); 2143 ddp = &dde->dde_phys[p]; 2144 2145 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2146 /* 2147 * If we're using a weak checksum, upgrade to a strong checksum 2148 * and try again. If we're already using a strong checksum, 2149 * we can't resolve it, so just convert to an ordinary write. 2150 * (And automatically e-mail a paper to Nature?) 2151 */ 2152 if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { 2153 zp->zp_checksum = spa_dedup_checksum(spa); 2154 zio_pop_transforms(zio); 2155 zio->io_stage = ZIO_STAGE_OPEN; 2156 BP_ZERO(bp); 2157 } else { 2158 zp->zp_dedup = B_FALSE; 2159 } 2160 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2161 ddt_exit(ddt); 2162 return (ZIO_PIPELINE_CONTINUE); 2163 } 2164 2165 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2166 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2167 2168 if (ditto_copies > ddt_ditto_copies_present(dde) && 2169 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2170 zio_prop_t czp = *zp; 2171 2172 czp.zp_copies = ditto_copies; 2173 2174 /* 2175 * If we arrived here with an override bp, we won't have run 2176 * the transform stack, so we won't have the data we need to 2177 * generate a child i/o. So, toss the override bp and restart. 2178 * This is safe, because using the override bp is just an 2179 * optimization; and it's rare, so the cost doesn't matter. 2180 */ 2181 if (zio->io_bp_override) { 2182 zio_pop_transforms(zio); 2183 zio->io_stage = ZIO_STAGE_OPEN; 2184 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2185 zio->io_bp_override = NULL; 2186 BP_ZERO(bp); 2187 ddt_exit(ddt); 2188 return (ZIO_PIPELINE_CONTINUE); 2189 } 2190 2191 dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2192 zio->io_orig_size, &czp, NULL, NULL, 2193 zio_ddt_ditto_write_done, dde, zio->io_priority, 2194 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2195 2196 zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); 2197 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2198 } 2199 2200 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2201 if (ddp->ddp_phys_birth != 0) 2202 ddt_bp_fill(ddp, bp, txg); 2203 if (dde->dde_lead_zio[p] != NULL) 2204 zio_add_child(zio, dde->dde_lead_zio[p]); 2205 else 2206 ddt_phys_addref(ddp); 2207 } else if (zio->io_bp_override) { 2208 ASSERT(bp->blk_birth == txg); 2209 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2210 ddt_phys_fill(ddp, bp); 2211 ddt_phys_addref(ddp); 2212 } else { 2213 cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, 2214 zio->io_orig_size, zp, zio_ddt_child_write_ready, NULL, 2215 zio_ddt_child_write_done, dde, zio->io_priority, 2216 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2217 2218 zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); 2219 dde->dde_lead_zio[p] = cio; 2220 } 2221 2222 ddt_exit(ddt); 2223 2224 if (cio) 2225 zio_nowait(cio); 2226 if (dio) 2227 zio_nowait(dio); 2228 2229 return (ZIO_PIPELINE_CONTINUE); 2230 } 2231 2232 ddt_entry_t *freedde; /* for debugging */ 2233 2234 static int 2235 zio_ddt_free(zio_t *zio) 2236 { 2237 spa_t *spa = zio->io_spa; 2238 blkptr_t *bp = zio->io_bp; 2239 ddt_t *ddt = ddt_select(spa, bp); 2240 ddt_entry_t *dde; 2241 ddt_phys_t *ddp; 2242 2243 ASSERT(BP_GET_DEDUP(bp)); 2244 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2245 2246 ddt_enter(ddt); 2247 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2248 ddp = ddt_phys_select(dde, bp); 2249 ddt_phys_decref(ddp); 2250 ddt_exit(ddt); 2251 2252 return (ZIO_PIPELINE_CONTINUE); 2253 } 2254 2255 /* 2256 * ========================================================================== 2257 * Allocate and free blocks 2258 * ========================================================================== 2259 */ 2260 static int 2261 zio_dva_allocate(zio_t *zio) 2262 { 2263 spa_t *spa = zio->io_spa; 2264 metaslab_class_t *mc = spa_normal_class(spa); 2265 blkptr_t *bp = zio->io_bp; 2266 int error; 2267 int flags = 0; 2268 2269 if (zio->io_gang_leader == NULL) { 2270 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2271 zio->io_gang_leader = zio; 2272 } 2273 2274 ASSERT(BP_IS_HOLE(bp)); 2275 ASSERT0(BP_GET_NDVAS(bp)); 2276 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2277 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2278 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2279 2280 /* 2281 * The dump device does not support gang blocks so allocation on 2282 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid 2283 * the "fast" gang feature. 2284 */ 2285 flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; 2286 flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? 2287 METASLAB_GANG_CHILD : 0; 2288 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2289 zio->io_prop.zp_copies, zio->io_txg, NULL, flags); 2290 2291 if (error) { 2292 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2293 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2294 error); 2295 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2296 return (zio_write_gang_block(zio)); 2297 zio->io_error = error; 2298 } 2299 2300 return (ZIO_PIPELINE_CONTINUE); 2301 } 2302 2303 static int 2304 zio_dva_free(zio_t *zio) 2305 { 2306 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2307 2308 return (ZIO_PIPELINE_CONTINUE); 2309 } 2310 2311 static int 2312 zio_dva_claim(zio_t *zio) 2313 { 2314 int error; 2315 2316 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2317 if (error) 2318 zio->io_error = error; 2319 2320 return (ZIO_PIPELINE_CONTINUE); 2321 } 2322 2323 /* 2324 * Undo an allocation. This is used by zio_done() when an I/O fails 2325 * and we want to give back the block we just allocated. 2326 * This handles both normal blocks and gang blocks. 2327 */ 2328 static void 2329 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2330 { 2331 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2332 ASSERT(zio->io_bp_override == NULL); 2333 2334 if (!BP_IS_HOLE(bp)) 2335 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2336 2337 if (gn != NULL) { 2338 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2339 zio_dva_unallocate(zio, gn->gn_child[g], 2340 &gn->gn_gbh->zg_blkptr[g]); 2341 } 2342 } 2343 } 2344 2345 /* 2346 * Try to allocate an intent log block. Return 0 on success, errno on failure. 2347 */ 2348 int 2349 zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2350 uint64_t size, boolean_t use_slog) 2351 { 2352 int error = 1; 2353 2354 ASSERT(txg > spa_syncing_txg(spa)); 2355 2356 /* 2357 * ZIL blocks are always contiguous (i.e. not gang blocks) so we 2358 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" 2359 * when allocating them. 2360 */ 2361 if (use_slog) { 2362 error = metaslab_alloc(spa, spa_log_class(spa), size, 2363 new_bp, 1, txg, old_bp, 2364 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID); 2365 } 2366 2367 if (error) { 2368 error = metaslab_alloc(spa, spa_normal_class(spa), size, 2369 new_bp, 1, txg, old_bp, 2370 METASLAB_HINTBP_AVOID); 2371 } 2372 2373 if (error == 0) { 2374 BP_SET_LSIZE(new_bp, size); 2375 BP_SET_PSIZE(new_bp, size); 2376 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 2377 BP_SET_CHECKSUM(new_bp, 2378 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 2379 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2380 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2381 BP_SET_LEVEL(new_bp, 0); 2382 BP_SET_DEDUP(new_bp, 0); 2383 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2384 } 2385 2386 return (error); 2387 } 2388 2389 /* 2390 * Free an intent log block. 2391 */ 2392 void 2393 zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2394 { 2395 ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2396 ASSERT(!BP_IS_GANG(bp)); 2397 2398 zio_free(spa, txg, bp); 2399 } 2400 2401 /* 2402 * ========================================================================== 2403 * Read and write to physical devices 2404 * ========================================================================== 2405 */ 2406 static int 2407 zio_vdev_io_start(zio_t *zio) 2408 { 2409 vdev_t *vd = zio->io_vd; 2410 uint64_t align; 2411 spa_t *spa = zio->io_spa; 2412 2413 ASSERT(zio->io_error == 0); 2414 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 2415 2416 if (vd == NULL) { 2417 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2418 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 2419 2420 /* 2421 * The mirror_ops handle multiple DVAs in a single BP. 2422 */ 2423 return (vdev_mirror_ops.vdev_op_io_start(zio)); 2424 } 2425 2426 /* 2427 * We keep track of time-sensitive I/Os so that the scan thread 2428 * can quickly react to certain workloads. In particular, we care 2429 * about non-scrubbing, top-level reads and writes with the following 2430 * characteristics: 2431 * - synchronous writes of user data to non-slog devices 2432 * - any reads of user data 2433 * When these conditions are met, adjust the timestamp of spa_last_io 2434 * which allows the scan thread to adjust its workload accordingly. 2435 */ 2436 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 2437 vd == vd->vdev_top && !vd->vdev_islog && 2438 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 2439 zio->io_txg != spa_syncing_txg(spa)) { 2440 uint64_t old = spa->spa_last_io; 2441 uint64_t new = ddi_get_lbolt64(); 2442 if (old != new) 2443 (void) atomic_cas_64(&spa->spa_last_io, old, new); 2444 } 2445 2446 align = 1ULL << vd->vdev_top->vdev_ashift; 2447 2448 if (P2PHASE(zio->io_size, align) != 0) { 2449 uint64_t asize = P2ROUNDUP(zio->io_size, align); 2450 char *abuf = zio_buf_alloc(asize); 2451 ASSERT(vd == vd->vdev_top); 2452 if (zio->io_type == ZIO_TYPE_WRITE) { 2453 bcopy(zio->io_data, abuf, zio->io_size); 2454 bzero(abuf + zio->io_size, asize - zio->io_size); 2455 } 2456 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 2457 } 2458 2459 ASSERT(P2PHASE(zio->io_offset, align) == 0); 2460 ASSERT(P2PHASE(zio->io_size, align) == 0); 2461 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 2462 2463 /* 2464 * If this is a repair I/O, and there's no self-healing involved -- 2465 * that is, we're just resilvering what we expect to resilver -- 2466 * then don't do the I/O unless zio's txg is actually in vd's DTL. 2467 * This prevents spurious resilvering with nested replication. 2468 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 2469 * A is out of date, we'll read from C+D, then use the data to 2470 * resilver A+B -- but we don't actually want to resilver B, just A. 2471 * The top-level mirror has no way to know this, so instead we just 2472 * discard unnecessary repairs as we work our way down the vdev tree. 2473 * The same logic applies to any form of nested replication: 2474 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 2475 */ 2476 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 2477 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 2478 zio->io_txg != 0 && /* not a delegated i/o */ 2479 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 2480 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2481 zio_vdev_io_bypass(zio); 2482 return (ZIO_PIPELINE_CONTINUE); 2483 } 2484 2485 if (vd->vdev_ops->vdev_op_leaf && 2486 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 2487 2488 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 2489 return (ZIO_PIPELINE_CONTINUE); 2490 2491 if ((zio = vdev_queue_io(zio)) == NULL) 2492 return (ZIO_PIPELINE_STOP); 2493 2494 if (!vdev_accessible(vd, zio)) { 2495 zio->io_error = SET_ERROR(ENXIO); 2496 zio_interrupt(zio); 2497 return (ZIO_PIPELINE_STOP); 2498 } 2499 } 2500 2501 return (vd->vdev_ops->vdev_op_io_start(zio)); 2502 } 2503 2504 static int 2505 zio_vdev_io_done(zio_t *zio) 2506 { 2507 vdev_t *vd = zio->io_vd; 2508 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 2509 boolean_t unexpected_error = B_FALSE; 2510 2511 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2512 return (ZIO_PIPELINE_STOP); 2513 2514 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 2515 2516 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 2517 2518 vdev_queue_io_done(zio); 2519 2520 if (zio->io_type == ZIO_TYPE_WRITE) 2521 vdev_cache_write(zio); 2522 2523 if (zio_injection_enabled && zio->io_error == 0) 2524 zio->io_error = zio_handle_device_injection(vd, 2525 zio, EIO); 2526 2527 if (zio_injection_enabled && zio->io_error == 0) 2528 zio->io_error = zio_handle_label_injection(zio, EIO); 2529 2530 if (zio->io_error) { 2531 if (!vdev_accessible(vd, zio)) { 2532 zio->io_error = SET_ERROR(ENXIO); 2533 } else { 2534 unexpected_error = B_TRUE; 2535 } 2536 } 2537 } 2538 2539 ops->vdev_op_io_done(zio); 2540 2541 if (unexpected_error) 2542 VERIFY(vdev_probe(vd, zio) == NULL); 2543 2544 return (ZIO_PIPELINE_CONTINUE); 2545 } 2546 2547 /* 2548 * For non-raidz ZIOs, we can just copy aside the bad data read from the 2549 * disk, and use that to finish the checksum ereport later. 2550 */ 2551 static void 2552 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 2553 const void *good_buf) 2554 { 2555 /* no processing needed */ 2556 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 2557 } 2558 2559 /*ARGSUSED*/ 2560 void 2561 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 2562 { 2563 void *buf = zio_buf_alloc(zio->io_size); 2564 2565 bcopy(zio->io_data, buf, zio->io_size); 2566 2567 zcr->zcr_cbinfo = zio->io_size; 2568 zcr->zcr_cbdata = buf; 2569 zcr->zcr_finish = zio_vsd_default_cksum_finish; 2570 zcr->zcr_free = zio_buf_free; 2571 } 2572 2573 static int 2574 zio_vdev_io_assess(zio_t *zio) 2575 { 2576 vdev_t *vd = zio->io_vd; 2577 2578 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 2579 return (ZIO_PIPELINE_STOP); 2580 2581 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 2582 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 2583 2584 if (zio->io_vsd != NULL) { 2585 zio->io_vsd_ops->vsd_free(zio); 2586 zio->io_vsd = NULL; 2587 } 2588 2589 if (zio_injection_enabled && zio->io_error == 0) 2590 zio->io_error = zio_handle_fault_injection(zio, EIO); 2591 2592 /* 2593 * If the I/O failed, determine whether we should attempt to retry it. 2594 * 2595 * On retry, we cut in line in the issue queue, since we don't want 2596 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 2597 */ 2598 if (zio->io_error && vd == NULL && 2599 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 2600 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 2601 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 2602 zio->io_error = 0; 2603 zio->io_flags |= ZIO_FLAG_IO_RETRY | 2604 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 2605 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 2606 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 2607 zio_requeue_io_start_cut_in_line); 2608 return (ZIO_PIPELINE_STOP); 2609 } 2610 2611 /* 2612 * If we got an error on a leaf device, convert it to ENXIO 2613 * if the device is not accessible at all. 2614 */ 2615 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 2616 !vdev_accessible(vd, zio)) 2617 zio->io_error = SET_ERROR(ENXIO); 2618 2619 /* 2620 * If we can't write to an interior vdev (mirror or RAID-Z), 2621 * set vdev_cant_write so that we stop trying to allocate from it. 2622 */ 2623 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 2624 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 2625 vd->vdev_cant_write = B_TRUE; 2626 } 2627 2628 if (zio->io_error) 2629 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2630 2631 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 2632 zio->io_physdone != NULL) { 2633 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 2634 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 2635 zio->io_physdone(zio->io_logical); 2636 } 2637 2638 return (ZIO_PIPELINE_CONTINUE); 2639 } 2640 2641 void 2642 zio_vdev_io_reissue(zio_t *zio) 2643 { 2644 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2645 ASSERT(zio->io_error == 0); 2646 2647 zio->io_stage >>= 1; 2648 } 2649 2650 void 2651 zio_vdev_io_redone(zio_t *zio) 2652 { 2653 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 2654 2655 zio->io_stage >>= 1; 2656 } 2657 2658 void 2659 zio_vdev_io_bypass(zio_t *zio) 2660 { 2661 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 2662 ASSERT(zio->io_error == 0); 2663 2664 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 2665 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 2666 } 2667 2668 /* 2669 * ========================================================================== 2670 * Generate and verify checksums 2671 * ========================================================================== 2672 */ 2673 static int 2674 zio_checksum_generate(zio_t *zio) 2675 { 2676 blkptr_t *bp = zio->io_bp; 2677 enum zio_checksum checksum; 2678 2679 if (bp == NULL) { 2680 /* 2681 * This is zio_write_phys(). 2682 * We're either generating a label checksum, or none at all. 2683 */ 2684 checksum = zio->io_prop.zp_checksum; 2685 2686 if (checksum == ZIO_CHECKSUM_OFF) 2687 return (ZIO_PIPELINE_CONTINUE); 2688 2689 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2690 } else { 2691 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2692 ASSERT(!IO_IS_ALLOCATING(zio)); 2693 checksum = ZIO_CHECKSUM_GANG_HEADER; 2694 } else { 2695 checksum = BP_GET_CHECKSUM(bp); 2696 } 2697 } 2698 2699 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2700 2701 return (ZIO_PIPELINE_CONTINUE); 2702 } 2703 2704 static int 2705 zio_checksum_verify(zio_t *zio) 2706 { 2707 zio_bad_cksum_t info; 2708 blkptr_t *bp = zio->io_bp; 2709 int error; 2710 2711 ASSERT(zio->io_vd != NULL); 2712 2713 if (bp == NULL) { 2714 /* 2715 * This is zio_read_phys(). 2716 * We're either verifying a label checksum, or nothing at all. 2717 */ 2718 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2719 return (ZIO_PIPELINE_CONTINUE); 2720 2721 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2722 } 2723 2724 if ((error = zio_checksum_error(zio, &info)) != 0) { 2725 zio->io_error = error; 2726 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2727 zfs_ereport_start_checksum(zio->io_spa, 2728 zio->io_vd, zio, zio->io_offset, 2729 zio->io_size, NULL, &info); 2730 } 2731 } 2732 2733 return (ZIO_PIPELINE_CONTINUE); 2734 } 2735 2736 /* 2737 * Called by RAID-Z to ensure we don't compute the checksum twice. 2738 */ 2739 void 2740 zio_checksum_verified(zio_t *zio) 2741 { 2742 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 2743 } 2744 2745 /* 2746 * ========================================================================== 2747 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2748 * An error of 0 indictes success. ENXIO indicates whole-device failure, 2749 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2750 * indicate errors that are specific to one I/O, and most likely permanent. 2751 * Any other error is presumed to be worse because we weren't expecting it. 2752 * ========================================================================== 2753 */ 2754 int 2755 zio_worst_error(int e1, int e2) 2756 { 2757 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2758 int r1, r2; 2759 2760 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2761 if (e1 == zio_error_rank[r1]) 2762 break; 2763 2764 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2765 if (e2 == zio_error_rank[r2]) 2766 break; 2767 2768 return (r1 > r2 ? e1 : e2); 2769 } 2770 2771 /* 2772 * ========================================================================== 2773 * I/O completion 2774 * ========================================================================== 2775 */ 2776 static int 2777 zio_ready(zio_t *zio) 2778 { 2779 blkptr_t *bp = zio->io_bp; 2780 zio_t *pio, *pio_next; 2781 2782 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 2783 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 2784 return (ZIO_PIPELINE_STOP); 2785 2786 if (zio->io_ready) { 2787 ASSERT(IO_IS_ALLOCATING(zio)); 2788 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 2789 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 2790 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 2791 2792 zio->io_ready(zio); 2793 } 2794 2795 if (bp != NULL && bp != &zio->io_bp_copy) 2796 zio->io_bp_copy = *bp; 2797 2798 if (zio->io_error) 2799 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2800 2801 mutex_enter(&zio->io_lock); 2802 zio->io_state[ZIO_WAIT_READY] = 1; 2803 pio = zio_walk_parents(zio); 2804 mutex_exit(&zio->io_lock); 2805 2806 /* 2807 * As we notify zio's parents, new parents could be added. 2808 * New parents go to the head of zio's io_parent_list, however, 2809 * so we will (correctly) not notify them. The remainder of zio's 2810 * io_parent_list, from 'pio_next' onward, cannot change because 2811 * all parents must wait for us to be done before they can be done. 2812 */ 2813 for (; pio != NULL; pio = pio_next) { 2814 pio_next = zio_walk_parents(zio); 2815 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 2816 } 2817 2818 if (zio->io_flags & ZIO_FLAG_NODATA) { 2819 if (BP_IS_GANG(bp)) { 2820 zio->io_flags &= ~ZIO_FLAG_NODATA; 2821 } else { 2822 ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); 2823 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2824 } 2825 } 2826 2827 if (zio_injection_enabled && 2828 zio->io_spa->spa_syncing_txg == zio->io_txg) 2829 zio_handle_ignored_writes(zio); 2830 2831 return (ZIO_PIPELINE_CONTINUE); 2832 } 2833 2834 static int 2835 zio_done(zio_t *zio) 2836 { 2837 spa_t *spa = zio->io_spa; 2838 zio_t *lio = zio->io_logical; 2839 blkptr_t *bp = zio->io_bp; 2840 vdev_t *vd = zio->io_vd; 2841 uint64_t psize = zio->io_size; 2842 zio_t *pio, *pio_next; 2843 2844 /* 2845 * If our children haven't all completed, 2846 * wait for them and then repeat this pipeline stage. 2847 */ 2848 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 2849 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 2850 zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 2851 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 2852 return (ZIO_PIPELINE_STOP); 2853 2854 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2855 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2856 ASSERT(zio->io_children[c][w] == 0); 2857 2858 if (bp != NULL) { 2859 ASSERT(bp->blk_pad[0] == 0); 2860 ASSERT(bp->blk_pad[1] == 0); 2861 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 2862 (bp == zio_unique_parent(zio)->io_bp)); 2863 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 2864 zio->io_bp_override == NULL && 2865 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 2866 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 2867 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 2868 ASSERT(BP_COUNT_GANG(bp) == 0 || 2869 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 2870 } 2871 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 2872 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 2873 } 2874 2875 /* 2876 * If there were child vdev/gang/ddt errors, they apply to us now. 2877 */ 2878 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 2879 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 2880 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 2881 2882 /* 2883 * If the I/O on the transformed data was successful, generate any 2884 * checksum reports now while we still have the transformed data. 2885 */ 2886 if (zio->io_error == 0) { 2887 while (zio->io_cksum_report != NULL) { 2888 zio_cksum_report_t *zcr = zio->io_cksum_report; 2889 uint64_t align = zcr->zcr_align; 2890 uint64_t asize = P2ROUNDUP(psize, align); 2891 char *abuf = zio->io_data; 2892 2893 if (asize != psize) { 2894 abuf = zio_buf_alloc(asize); 2895 bcopy(zio->io_data, abuf, psize); 2896 bzero(abuf + psize, asize - psize); 2897 } 2898 2899 zio->io_cksum_report = zcr->zcr_next; 2900 zcr->zcr_next = NULL; 2901 zcr->zcr_finish(zcr, abuf); 2902 zfs_ereport_free_checksum(zcr); 2903 2904 if (asize != psize) 2905 zio_buf_free(abuf, asize); 2906 } 2907 } 2908 2909 zio_pop_transforms(zio); /* note: may set zio->io_error */ 2910 2911 vdev_stat_update(zio, psize); 2912 2913 if (zio->io_error) { 2914 /* 2915 * If this I/O is attached to a particular vdev, 2916 * generate an error message describing the I/O failure 2917 * at the block level. We ignore these errors if the 2918 * device is currently unavailable. 2919 */ 2920 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 2921 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 2922 2923 if ((zio->io_error == EIO || !(zio->io_flags & 2924 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 2925 zio == lio) { 2926 /* 2927 * For logical I/O requests, tell the SPA to log the 2928 * error and generate a logical data ereport. 2929 */ 2930 spa_log_error(spa, zio); 2931 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 2932 0, 0); 2933 } 2934 } 2935 2936 if (zio->io_error && zio == lio) { 2937 /* 2938 * Determine whether zio should be reexecuted. This will 2939 * propagate all the way to the root via zio_notify_parent(). 2940 */ 2941 ASSERT(vd == NULL && bp != NULL); 2942 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2943 2944 if (IO_IS_ALLOCATING(zio) && 2945 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 2946 if (zio->io_error != ENOSPC) 2947 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 2948 else 2949 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2950 } 2951 2952 if ((zio->io_type == ZIO_TYPE_READ || 2953 zio->io_type == ZIO_TYPE_FREE) && 2954 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 2955 zio->io_error == ENXIO && 2956 spa_load_state(spa) == SPA_LOAD_NONE && 2957 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 2958 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2959 2960 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 2961 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2962 2963 /* 2964 * Here is a possibly good place to attempt to do 2965 * either combinatorial reconstruction or error correction 2966 * based on checksums. It also might be a good place 2967 * to send out preliminary ereports before we suspend 2968 * processing. 2969 */ 2970 } 2971 2972 /* 2973 * If there were logical child errors, they apply to us now. 2974 * We defer this until now to avoid conflating logical child 2975 * errors with errors that happened to the zio itself when 2976 * updating vdev stats and reporting FMA events above. 2977 */ 2978 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 2979 2980 if ((zio->io_error || zio->io_reexecute) && 2981 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 2982 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 2983 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 2984 2985 zio_gang_tree_free(&zio->io_gang_tree); 2986 2987 /* 2988 * Godfather I/Os should never suspend. 2989 */ 2990 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 2991 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 2992 zio->io_reexecute = 0; 2993 2994 if (zio->io_reexecute) { 2995 /* 2996 * This is a logical I/O that wants to reexecute. 2997 * 2998 * Reexecute is top-down. When an i/o fails, if it's not 2999 * the root, it simply notifies its parent and sticks around. 3000 * The parent, seeing that it still has children in zio_done(), 3001 * does the same. This percolates all the way up to the root. 3002 * The root i/o will reexecute or suspend the entire tree. 3003 * 3004 * This approach ensures that zio_reexecute() honors 3005 * all the original i/o dependency relationships, e.g. 3006 * parents not executing until children are ready. 3007 */ 3008 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3009 3010 zio->io_gang_leader = NULL; 3011 3012 mutex_enter(&zio->io_lock); 3013 zio->io_state[ZIO_WAIT_DONE] = 1; 3014 mutex_exit(&zio->io_lock); 3015 3016 /* 3017 * "The Godfather" I/O monitors its children but is 3018 * not a true parent to them. It will track them through 3019 * the pipeline but severs its ties whenever they get into 3020 * trouble (e.g. suspended). This allows "The Godfather" 3021 * I/O to return status without blocking. 3022 */ 3023 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3024 zio_link_t *zl = zio->io_walk_link; 3025 pio_next = zio_walk_parents(zio); 3026 3027 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3028 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3029 zio_remove_child(pio, zio, zl); 3030 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3031 } 3032 } 3033 3034 if ((pio = zio_unique_parent(zio)) != NULL) { 3035 /* 3036 * We're not a root i/o, so there's nothing to do 3037 * but notify our parent. Don't propagate errors 3038 * upward since we haven't permanently failed yet. 3039 */ 3040 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3041 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3042 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3043 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3044 /* 3045 * We'd fail again if we reexecuted now, so suspend 3046 * until conditions improve (e.g. device comes online). 3047 */ 3048 zio_suspend(spa, zio); 3049 } else { 3050 /* 3051 * Reexecution is potentially a huge amount of work. 3052 * Hand it off to the otherwise-unused claim taskq. 3053 */ 3054 ASSERT(zio->io_tqent.tqent_next == NULL); 3055 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3056 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3057 0, &zio->io_tqent); 3058 } 3059 return (ZIO_PIPELINE_STOP); 3060 } 3061 3062 ASSERT(zio->io_child_count == 0); 3063 ASSERT(zio->io_reexecute == 0); 3064 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3065 3066 /* 3067 * Report any checksum errors, since the I/O is complete. 3068 */ 3069 while (zio->io_cksum_report != NULL) { 3070 zio_cksum_report_t *zcr = zio->io_cksum_report; 3071 zio->io_cksum_report = zcr->zcr_next; 3072 zcr->zcr_next = NULL; 3073 zcr->zcr_finish(zcr, NULL); 3074 zfs_ereport_free_checksum(zcr); 3075 } 3076 3077 /* 3078 * It is the responsibility of the done callback to ensure that this 3079 * particular zio is no longer discoverable for adoption, and as 3080 * such, cannot acquire any new parents. 3081 */ 3082 if (zio->io_done) 3083 zio->io_done(zio); 3084 3085 mutex_enter(&zio->io_lock); 3086 zio->io_state[ZIO_WAIT_DONE] = 1; 3087 mutex_exit(&zio->io_lock); 3088 3089 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 3090 zio_link_t *zl = zio->io_walk_link; 3091 pio_next = zio_walk_parents(zio); 3092 zio_remove_child(pio, zio, zl); 3093 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3094 } 3095 3096 if (zio->io_waiter != NULL) { 3097 mutex_enter(&zio->io_lock); 3098 zio->io_executor = NULL; 3099 cv_broadcast(&zio->io_cv); 3100 mutex_exit(&zio->io_lock); 3101 } else { 3102 zio_destroy(zio); 3103 } 3104 3105 return (ZIO_PIPELINE_STOP); 3106 } 3107 3108 /* 3109 * ========================================================================== 3110 * I/O pipeline definition 3111 * ========================================================================== 3112 */ 3113 static zio_pipe_stage_t *zio_pipeline[] = { 3114 NULL, 3115 zio_read_bp_init, 3116 zio_free_bp_init, 3117 zio_issue_async, 3118 zio_write_bp_init, 3119 zio_checksum_generate, 3120 zio_nop_write, 3121 zio_ddt_read_start, 3122 zio_ddt_read_done, 3123 zio_ddt_write, 3124 zio_ddt_free, 3125 zio_gang_assemble, 3126 zio_gang_issue, 3127 zio_dva_allocate, 3128 zio_dva_free, 3129 zio_dva_claim, 3130 zio_ready, 3131 zio_vdev_io_start, 3132 zio_vdev_io_done, 3133 zio_vdev_io_assess, 3134 zio_checksum_verify, 3135 zio_done 3136 }; 3137 3138 /* dnp is the dnode for zb1->zb_object */ 3139 boolean_t 3140 zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_t *zb1, 3141 const zbookmark_t *zb2) 3142 { 3143 uint64_t zb1nextL0, zb2thisobj; 3144 3145 ASSERT(zb1->zb_objset == zb2->zb_objset); 3146 ASSERT(zb2->zb_level == 0); 3147 3148 /* 3149 * A bookmark in the deadlist is considered to be after 3150 * everything else. 3151 */ 3152 if (zb2->zb_object == DMU_DEADLIST_OBJECT) 3153 return (B_TRUE); 3154 3155 /* The objset_phys_t isn't before anything. */ 3156 if (dnp == NULL) 3157 return (B_FALSE); 3158 3159 zb1nextL0 = (zb1->zb_blkid + 1) << 3160 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); 3161 3162 zb2thisobj = zb2->zb_object ? zb2->zb_object : 3163 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); 3164 3165 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3166 uint64_t nextobj = zb1nextL0 * 3167 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; 3168 return (nextobj <= zb2thisobj); 3169 } 3170 3171 if (zb1->zb_object < zb2thisobj) 3172 return (B_TRUE); 3173 if (zb1->zb_object > zb2thisobj) 3174 return (B_FALSE); 3175 if (zb2->zb_object == DMU_META_DNODE_OBJECT) 3176 return (B_FALSE); 3177 return (zb1nextL0 <= zb2->zb_blkid); 3178 } 3179