1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright (c) 2017, Intel Corporation. 27 */ 28 29 #include <sys/sysmacros.h> 30 #include <sys/zfs_context.h> 31 #include <sys/fm/fs/zfs.h> 32 #include <sys/spa.h> 33 #include <sys/txg.h> 34 #include <sys/spa_impl.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/zio_impl.h> 37 #include <sys/zio_compress.h> 38 #include <sys/zio_checksum.h> 39 #include <sys/dmu_objset.h> 40 #include <sys/arc.h> 41 #include <sys/ddt.h> 42 #include <sys/blkptr.h> 43 #include <sys/zfeature.h> 44 #include <sys/dsl_scan.h> 45 #include <sys/metaslab_impl.h> 46 #include <sys/abd.h> 47 #include <sys/cityhash.h> 48 49 /* 50 * ========================================================================== 51 * I/O type descriptions 52 * ========================================================================== 53 */ 54 const char *zio_type_name[ZIO_TYPES] = { 55 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 56 "zio_ioctl" 57 }; 58 59 boolean_t zio_dva_throttle_enabled = B_TRUE; 60 61 /* 62 * ========================================================================== 63 * I/O kmem caches 64 * ========================================================================== 65 */ 66 kmem_cache_t *zio_cache; 67 kmem_cache_t *zio_link_cache; 68 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 69 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 70 71 #ifdef _KERNEL 72 extern vmem_t *zio_alloc_arena; 73 #endif 74 75 #define ZIO_PIPELINE_CONTINUE 0x100 76 #define ZIO_PIPELINE_STOP 0x101 77 78 #define BP_SPANB(indblkshift, level) \ 79 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 80 #define COMPARE_META_LEVEL 0x80000000ul 81 /* 82 * The following actions directly effect the spa's sync-to-convergence logic. 83 * The values below define the sync pass when we start performing the action. 84 * Care should be taken when changing these values as they directly impact 85 * spa_sync() performance. Tuning these values may introduce subtle performance 86 * pathologies and should only be done in the context of performance analysis. 87 * These tunables will eventually be removed and replaced with #defines once 88 * enough analysis has been done to determine optimal values. 89 * 90 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 91 * regular blocks are not deferred. 92 */ 93 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 94 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 95 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 96 97 /* 98 * An allocating zio is one that either currently has the DVA allocate 99 * stage set or will have it later in its lifetime. 100 */ 101 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 102 103 boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 104 105 #ifdef ZFS_DEBUG 106 int zio_buf_debug_limit = 16384; 107 #else 108 int zio_buf_debug_limit = 0; 109 #endif 110 111 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 112 113 void 114 zio_init(void) 115 { 116 size_t c; 117 vmem_t *data_alloc_arena = NULL; 118 119 #ifdef _KERNEL 120 data_alloc_arena = zio_alloc_arena; 121 #endif 122 zio_cache = kmem_cache_create("zio_cache", 123 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 124 zio_link_cache = kmem_cache_create("zio_link_cache", 125 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 126 127 /* 128 * For small buffers, we want a cache for each multiple of 129 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 130 * for each quarter-power of 2. 131 */ 132 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 133 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 134 size_t p2 = size; 135 size_t align = 0; 136 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 137 138 while (!ISP2(p2)) 139 p2 &= p2 - 1; 140 141 #ifndef _KERNEL 142 /* 143 * If we are using watchpoints, put each buffer on its own page, 144 * to eliminate the performance overhead of trapping to the 145 * kernel when modifying a non-watched buffer that shares the 146 * page with a watched buffer. 147 */ 148 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 149 continue; 150 #endif 151 if (size <= 4 * SPA_MINBLOCKSIZE) { 152 align = SPA_MINBLOCKSIZE; 153 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 154 align = MIN(p2 >> 2, PAGESIZE); 155 } 156 157 if (align != 0) { 158 char name[36]; 159 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 160 zio_buf_cache[c] = kmem_cache_create(name, size, 161 align, NULL, NULL, NULL, NULL, NULL, cflags); 162 163 /* 164 * Since zio_data bufs do not appear in crash dumps, we 165 * pass KMC_NOTOUCH so that no allocator metadata is 166 * stored with the buffers. 167 */ 168 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 169 zio_data_buf_cache[c] = kmem_cache_create(name, size, 170 align, NULL, NULL, NULL, NULL, data_alloc_arena, 171 cflags | KMC_NOTOUCH); 172 } 173 } 174 175 while (--c != 0) { 176 ASSERT(zio_buf_cache[c] != NULL); 177 if (zio_buf_cache[c - 1] == NULL) 178 zio_buf_cache[c - 1] = zio_buf_cache[c]; 179 180 ASSERT(zio_data_buf_cache[c] != NULL); 181 if (zio_data_buf_cache[c - 1] == NULL) 182 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 183 } 184 185 zio_inject_init(); 186 } 187 188 void 189 zio_fini(void) 190 { 191 size_t c; 192 kmem_cache_t *last_cache = NULL; 193 kmem_cache_t *last_data_cache = NULL; 194 195 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 196 if (zio_buf_cache[c] != last_cache) { 197 last_cache = zio_buf_cache[c]; 198 kmem_cache_destroy(zio_buf_cache[c]); 199 } 200 zio_buf_cache[c] = NULL; 201 202 if (zio_data_buf_cache[c] != last_data_cache) { 203 last_data_cache = zio_data_buf_cache[c]; 204 kmem_cache_destroy(zio_data_buf_cache[c]); 205 } 206 zio_data_buf_cache[c] = NULL; 207 } 208 209 kmem_cache_destroy(zio_link_cache); 210 kmem_cache_destroy(zio_cache); 211 212 zio_inject_fini(); 213 } 214 215 /* 216 * ========================================================================== 217 * Allocate and free I/O buffers 218 * ========================================================================== 219 */ 220 221 /* 222 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 223 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 224 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 225 * excess / transient data in-core during a crashdump. 226 */ 227 void * 228 zio_buf_alloc(size_t size) 229 { 230 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 231 232 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 233 234 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 235 } 236 237 /* 238 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 239 * crashdump if the kernel panics. This exists so that we will limit the amount 240 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 241 * of kernel heap dumped to disk when the kernel panics) 242 */ 243 void * 244 zio_data_buf_alloc(size_t size) 245 { 246 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 247 248 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 249 250 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 251 } 252 253 void 254 zio_buf_free(void *buf, size_t size) 255 { 256 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 257 258 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 259 260 kmem_cache_free(zio_buf_cache[c], buf); 261 } 262 263 void 264 zio_data_buf_free(void *buf, size_t size) 265 { 266 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 267 268 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 269 270 kmem_cache_free(zio_data_buf_cache[c], buf); 271 } 272 273 /* 274 * ========================================================================== 275 * Push and pop I/O transform buffers 276 * ========================================================================== 277 */ 278 void 279 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 280 zio_transform_func_t *transform) 281 { 282 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 283 284 /* 285 * Ensure that anyone expecting this zio to contain a linear ABD isn't 286 * going to get a nasty surprise when they try to access the data. 287 */ 288 IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 289 290 zt->zt_orig_abd = zio->io_abd; 291 zt->zt_orig_size = zio->io_size; 292 zt->zt_bufsize = bufsize; 293 zt->zt_transform = transform; 294 295 zt->zt_next = zio->io_transform_stack; 296 zio->io_transform_stack = zt; 297 298 zio->io_abd = data; 299 zio->io_size = size; 300 } 301 302 void 303 zio_pop_transforms(zio_t *zio) 304 { 305 zio_transform_t *zt; 306 307 while ((zt = zio->io_transform_stack) != NULL) { 308 if (zt->zt_transform != NULL) 309 zt->zt_transform(zio, 310 zt->zt_orig_abd, zt->zt_orig_size); 311 312 if (zt->zt_bufsize != 0) 313 abd_free(zio->io_abd); 314 315 zio->io_abd = zt->zt_orig_abd; 316 zio->io_size = zt->zt_orig_size; 317 zio->io_transform_stack = zt->zt_next; 318 319 kmem_free(zt, sizeof (zio_transform_t)); 320 } 321 } 322 323 /* 324 * ========================================================================== 325 * I/O transform callbacks for subblocks and decompression 326 * ========================================================================== 327 */ 328 static void 329 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 330 { 331 ASSERT(zio->io_size > size); 332 333 if (zio->io_type == ZIO_TYPE_READ) 334 abd_copy(data, zio->io_abd, size); 335 } 336 337 static void 338 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 339 { 340 if (zio->io_error == 0) { 341 void *tmp = abd_borrow_buf(data, size); 342 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 343 zio->io_abd, tmp, zio->io_size, size); 344 abd_return_buf_copy(data, tmp, size); 345 346 if (ret != 0) 347 zio->io_error = SET_ERROR(EIO); 348 } 349 } 350 351 /* 352 * ========================================================================== 353 * I/O parent/child relationships and pipeline interlocks 354 * ========================================================================== 355 */ 356 zio_t * 357 zio_walk_parents(zio_t *cio, zio_link_t **zl) 358 { 359 list_t *pl = &cio->io_parent_list; 360 361 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 362 if (*zl == NULL) 363 return (NULL); 364 365 ASSERT((*zl)->zl_child == cio); 366 return ((*zl)->zl_parent); 367 } 368 369 zio_t * 370 zio_walk_children(zio_t *pio, zio_link_t **zl) 371 { 372 list_t *cl = &pio->io_child_list; 373 374 ASSERT(MUTEX_HELD(&pio->io_lock)); 375 376 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 377 if (*zl == NULL) 378 return (NULL); 379 380 ASSERT((*zl)->zl_parent == pio); 381 return ((*zl)->zl_child); 382 } 383 384 zio_t * 385 zio_unique_parent(zio_t *cio) 386 { 387 zio_link_t *zl = NULL; 388 zio_t *pio = zio_walk_parents(cio, &zl); 389 390 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 391 return (pio); 392 } 393 394 void 395 zio_add_child(zio_t *pio, zio_t *cio) 396 { 397 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 398 399 /* 400 * Logical I/Os can have logical, gang, or vdev children. 401 * Gang I/Os can have gang or vdev children. 402 * Vdev I/Os can only have vdev children. 403 * The following ASSERT captures all of these constraints. 404 */ 405 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 406 407 zl->zl_parent = pio; 408 zl->zl_child = cio; 409 410 mutex_enter(&pio->io_lock); 411 mutex_enter(&cio->io_lock); 412 413 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 414 415 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 416 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 417 418 list_insert_head(&pio->io_child_list, zl); 419 list_insert_head(&cio->io_parent_list, zl); 420 421 pio->io_child_count++; 422 cio->io_parent_count++; 423 424 mutex_exit(&cio->io_lock); 425 mutex_exit(&pio->io_lock); 426 } 427 428 static void 429 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 430 { 431 ASSERT(zl->zl_parent == pio); 432 ASSERT(zl->zl_child == cio); 433 434 mutex_enter(&pio->io_lock); 435 mutex_enter(&cio->io_lock); 436 437 list_remove(&pio->io_child_list, zl); 438 list_remove(&cio->io_parent_list, zl); 439 440 pio->io_child_count--; 441 cio->io_parent_count--; 442 443 mutex_exit(&cio->io_lock); 444 mutex_exit(&pio->io_lock); 445 446 kmem_cache_free(zio_link_cache, zl); 447 } 448 449 static boolean_t 450 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 451 { 452 boolean_t waiting = B_FALSE; 453 454 mutex_enter(&zio->io_lock); 455 ASSERT(zio->io_stall == NULL); 456 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 457 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 458 continue; 459 460 uint64_t *countp = &zio->io_children[c][wait]; 461 if (*countp != 0) { 462 zio->io_stage >>= 1; 463 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 464 zio->io_stall = countp; 465 waiting = B_TRUE; 466 break; 467 } 468 } 469 mutex_exit(&zio->io_lock); 470 return (waiting); 471 } 472 473 static void 474 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 475 { 476 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 477 int *errorp = &pio->io_child_error[zio->io_child_type]; 478 479 mutex_enter(&pio->io_lock); 480 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 481 *errorp = zio_worst_error(*errorp, zio->io_error); 482 pio->io_reexecute |= zio->io_reexecute; 483 ASSERT3U(*countp, >, 0); 484 485 (*countp)--; 486 487 if (*countp == 0 && pio->io_stall == countp) { 488 zio_taskq_type_t type = 489 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 490 ZIO_TASKQ_INTERRUPT; 491 pio->io_stall = NULL; 492 mutex_exit(&pio->io_lock); 493 /* 494 * Dispatch the parent zio in its own taskq so that 495 * the child can continue to make progress. This also 496 * prevents overflowing the stack when we have deeply nested 497 * parent-child relationships. 498 */ 499 zio_taskq_dispatch(pio, type, B_FALSE); 500 } else { 501 mutex_exit(&pio->io_lock); 502 } 503 } 504 505 static void 506 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 507 { 508 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 509 zio->io_error = zio->io_child_error[c]; 510 } 511 512 int 513 zio_bookmark_compare(const void *x1, const void *x2) 514 { 515 const zio_t *z1 = x1; 516 const zio_t *z2 = x2; 517 518 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 519 return (-1); 520 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 521 return (1); 522 523 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 524 return (-1); 525 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 526 return (1); 527 528 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 529 return (-1); 530 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 531 return (1); 532 533 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 534 return (-1); 535 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 536 return (1); 537 538 if (z1 < z2) 539 return (-1); 540 if (z1 > z2) 541 return (1); 542 543 return (0); 544 } 545 546 /* 547 * ========================================================================== 548 * Create the various types of I/O (read, write, free, etc) 549 * ========================================================================== 550 */ 551 static zio_t * 552 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 553 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 554 void *private, zio_type_t type, zio_priority_t priority, 555 enum zio_flag flags, vdev_t *vd, uint64_t offset, 556 const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 557 { 558 zio_t *zio; 559 560 ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE); 561 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 562 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 563 564 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 565 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 566 ASSERT(vd || stage == ZIO_STAGE_OPEN); 567 568 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW) != 0); 569 570 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 571 bzero(zio, sizeof (zio_t)); 572 573 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 574 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 575 576 list_create(&zio->io_parent_list, sizeof (zio_link_t), 577 offsetof(zio_link_t, zl_parent_node)); 578 list_create(&zio->io_child_list, sizeof (zio_link_t), 579 offsetof(zio_link_t, zl_child_node)); 580 metaslab_trace_init(&zio->io_alloc_list); 581 582 if (vd != NULL) 583 zio->io_child_type = ZIO_CHILD_VDEV; 584 else if (flags & ZIO_FLAG_GANG_CHILD) 585 zio->io_child_type = ZIO_CHILD_GANG; 586 else if (flags & ZIO_FLAG_DDT_CHILD) 587 zio->io_child_type = ZIO_CHILD_DDT; 588 else 589 zio->io_child_type = ZIO_CHILD_LOGICAL; 590 591 if (bp != NULL) { 592 zio->io_bp = (blkptr_t *)bp; 593 zio->io_bp_copy = *bp; 594 zio->io_bp_orig = *bp; 595 if (type != ZIO_TYPE_WRITE || 596 zio->io_child_type == ZIO_CHILD_DDT) 597 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 598 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 599 zio->io_logical = zio; 600 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 601 pipeline |= ZIO_GANG_STAGES; 602 } 603 604 zio->io_spa = spa; 605 zio->io_txg = txg; 606 zio->io_done = done; 607 zio->io_private = private; 608 zio->io_type = type; 609 zio->io_priority = priority; 610 zio->io_vd = vd; 611 zio->io_offset = offset; 612 zio->io_orig_abd = zio->io_abd = data; 613 zio->io_orig_size = zio->io_size = psize; 614 zio->io_lsize = lsize; 615 zio->io_orig_flags = zio->io_flags = flags; 616 zio->io_orig_stage = zio->io_stage = stage; 617 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 618 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 619 620 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 621 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 622 623 if (zb != NULL) 624 zio->io_bookmark = *zb; 625 626 if (pio != NULL) { 627 if (zio->io_metaslab_class == NULL) 628 zio->io_metaslab_class = pio->io_metaslab_class; 629 if (zio->io_logical == NULL) 630 zio->io_logical = pio->io_logical; 631 if (zio->io_child_type == ZIO_CHILD_GANG) 632 zio->io_gang_leader = pio->io_gang_leader; 633 zio_add_child(pio, zio); 634 } 635 636 return (zio); 637 } 638 639 static void 640 zio_destroy(zio_t *zio) 641 { 642 metaslab_trace_fini(&zio->io_alloc_list); 643 list_destroy(&zio->io_parent_list); 644 list_destroy(&zio->io_child_list); 645 mutex_destroy(&zio->io_lock); 646 cv_destroy(&zio->io_cv); 647 kmem_cache_free(zio_cache, zio); 648 } 649 650 zio_t * 651 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 652 void *private, enum zio_flag flags) 653 { 654 zio_t *zio; 655 656 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 657 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 658 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 659 660 return (zio); 661 } 662 663 zio_t * 664 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 665 { 666 return (zio_null(NULL, spa, NULL, done, private, flags)); 667 } 668 669 void 670 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 671 { 672 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 673 zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 674 bp, (longlong_t)BP_GET_TYPE(bp)); 675 } 676 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 677 BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 678 zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 679 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 680 } 681 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 682 BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 683 zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 684 bp, (longlong_t)BP_GET_COMPRESS(bp)); 685 } 686 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 687 zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 688 bp, (longlong_t)BP_GET_LSIZE(bp)); 689 } 690 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 691 zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 692 bp, (longlong_t)BP_GET_PSIZE(bp)); 693 } 694 695 if (BP_IS_EMBEDDED(bp)) { 696 if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 697 zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 698 bp, (longlong_t)BPE_GET_ETYPE(bp)); 699 } 700 } 701 702 /* 703 * Do not verify individual DVAs if the config is not trusted. This 704 * will be done once the zio is executed in vdev_mirror_map_alloc. 705 */ 706 if (!spa->spa_trust_config) 707 return; 708 709 /* 710 * Pool-specific checks. 711 * 712 * Note: it would be nice to verify that the blk_birth and 713 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 714 * allows the birth time of log blocks (and dmu_sync()-ed blocks 715 * that are in the log) to be arbitrarily large. 716 */ 717 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 718 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 719 if (vdevid >= spa->spa_root_vdev->vdev_children) { 720 zfs_panic_recover("blkptr at %p DVA %u has invalid " 721 "VDEV %llu", 722 bp, i, (longlong_t)vdevid); 723 continue; 724 } 725 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 726 if (vd == NULL) { 727 zfs_panic_recover("blkptr at %p DVA %u has invalid " 728 "VDEV %llu", 729 bp, i, (longlong_t)vdevid); 730 continue; 731 } 732 if (vd->vdev_ops == &vdev_hole_ops) { 733 zfs_panic_recover("blkptr at %p DVA %u has hole " 734 "VDEV %llu", 735 bp, i, (longlong_t)vdevid); 736 continue; 737 } 738 if (vd->vdev_ops == &vdev_missing_ops) { 739 /* 740 * "missing" vdevs are valid during import, but we 741 * don't have their detailed info (e.g. asize), so 742 * we can't perform any more checks on them. 743 */ 744 continue; 745 } 746 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 747 uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 748 if (BP_IS_GANG(bp)) 749 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 750 if (offset + asize > vd->vdev_asize) { 751 zfs_panic_recover("blkptr at %p DVA %u has invalid " 752 "OFFSET %llu", 753 bp, i, (longlong_t)offset); 754 } 755 } 756 } 757 758 boolean_t 759 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 760 { 761 uint64_t vdevid = DVA_GET_VDEV(dva); 762 763 if (vdevid >= spa->spa_root_vdev->vdev_children) 764 return (B_FALSE); 765 766 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 767 if (vd == NULL) 768 return (B_FALSE); 769 770 if (vd->vdev_ops == &vdev_hole_ops) 771 return (B_FALSE); 772 773 if (vd->vdev_ops == &vdev_missing_ops) { 774 return (B_FALSE); 775 } 776 777 uint64_t offset = DVA_GET_OFFSET(dva); 778 uint64_t asize = DVA_GET_ASIZE(dva); 779 780 if (BP_IS_GANG(bp)) 781 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 782 if (offset + asize > vd->vdev_asize) 783 return (B_FALSE); 784 785 return (B_TRUE); 786 } 787 788 zio_t * 789 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 790 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 791 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 792 { 793 zio_t *zio; 794 795 zfs_blkptr_verify(spa, bp); 796 797 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 798 data, size, size, done, private, 799 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 800 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 801 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 802 803 return (zio); 804 } 805 806 zio_t * 807 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 808 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 809 zio_done_func_t *ready, zio_done_func_t *children_ready, 810 zio_done_func_t *physdone, zio_done_func_t *done, 811 void *private, zio_priority_t priority, enum zio_flag flags, 812 const zbookmark_phys_t *zb) 813 { 814 zio_t *zio; 815 816 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 817 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 818 zp->zp_compress >= ZIO_COMPRESS_OFF && 819 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 820 DMU_OT_IS_VALID(zp->zp_type) && 821 zp->zp_level < 32 && 822 zp->zp_copies > 0 && 823 zp->zp_copies <= spa_max_replication(spa)); 824 825 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 826 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 827 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 828 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 829 830 zio->io_ready = ready; 831 zio->io_children_ready = children_ready; 832 zio->io_physdone = physdone; 833 zio->io_prop = *zp; 834 835 /* 836 * Data can be NULL if we are going to call zio_write_override() to 837 * provide the already-allocated BP. But we may need the data to 838 * verify a dedup hit (if requested). In this case, don't try to 839 * dedup (just take the already-allocated BP verbatim). 840 */ 841 if (data == NULL && zio->io_prop.zp_dedup_verify) { 842 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 843 } 844 845 return (zio); 846 } 847 848 zio_t * 849 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 850 uint64_t size, zio_done_func_t *done, void *private, 851 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 852 { 853 zio_t *zio; 854 855 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 856 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 857 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 858 859 return (zio); 860 } 861 862 void 863 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 864 { 865 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 866 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 867 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 868 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 869 870 /* 871 * We must reset the io_prop to match the values that existed 872 * when the bp was first written by dmu_sync() keeping in mind 873 * that nopwrite and dedup are mutually exclusive. 874 */ 875 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 876 zio->io_prop.zp_nopwrite = nopwrite; 877 zio->io_prop.zp_copies = copies; 878 zio->io_bp_override = bp; 879 } 880 881 void 882 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 883 { 884 885 zfs_blkptr_verify(spa, bp); 886 887 /* 888 * The check for EMBEDDED is a performance optimization. We 889 * process the free here (by ignoring it) rather than 890 * putting it on the list and then processing it in zio_free_sync(). 891 */ 892 if (BP_IS_EMBEDDED(bp)) 893 return; 894 metaslab_check_free(spa, bp); 895 896 /* 897 * Frees that are for the currently-syncing txg, are not going to be 898 * deferred, and which will not need to do a read (i.e. not GANG or 899 * DEDUP), can be processed immediately. Otherwise, put them on the 900 * in-memory list for later processing. 901 */ 902 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 903 txg != spa->spa_syncing_txg || 904 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 905 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 906 } else { 907 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 908 } 909 } 910 911 zio_t * 912 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 913 enum zio_flag flags) 914 { 915 zio_t *zio; 916 enum zio_stage stage = ZIO_FREE_PIPELINE; 917 918 ASSERT(!BP_IS_HOLE(bp)); 919 ASSERT(spa_syncing_txg(spa) == txg); 920 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 921 922 if (BP_IS_EMBEDDED(bp)) 923 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 924 925 metaslab_check_free(spa, bp); 926 arc_freed(spa, bp); 927 dsl_scan_freed(spa, bp); 928 929 /* 930 * GANG and DEDUP blocks can induce a read (for the gang block header, 931 * or the DDT), so issue them asynchronously so that this thread is 932 * not tied up. 933 */ 934 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 935 stage |= ZIO_STAGE_ISSUE_ASYNC; 936 937 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 938 BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 939 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 940 941 return (zio); 942 } 943 944 zio_t * 945 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 946 zio_done_func_t *done, void *private, enum zio_flag flags) 947 { 948 zio_t *zio; 949 950 zfs_blkptr_verify(spa, bp); 951 952 if (BP_IS_EMBEDDED(bp)) 953 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 954 955 /* 956 * A claim is an allocation of a specific block. Claims are needed 957 * to support immediate writes in the intent log. The issue is that 958 * immediate writes contain committed data, but in a txg that was 959 * *not* committed. Upon opening the pool after an unclean shutdown, 960 * the intent log claims all blocks that contain immediate write data 961 * so that the SPA knows they're in use. 962 * 963 * All claims *must* be resolved in the first txg -- before the SPA 964 * starts allocating blocks -- so that nothing is allocated twice. 965 * If txg == 0 we just verify that the block is claimable. 966 */ 967 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 968 spa_min_claim_txg(spa)); 969 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 970 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 971 972 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 973 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 974 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 975 ASSERT0(zio->io_queued_timestamp); 976 977 return (zio); 978 } 979 980 zio_t * 981 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 982 zio_done_func_t *done, void *private, enum zio_flag flags) 983 { 984 zio_t *zio; 985 int c; 986 987 if (vd->vdev_children == 0) { 988 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 989 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 990 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 991 992 zio->io_cmd = cmd; 993 } else { 994 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 995 996 for (c = 0; c < vd->vdev_children; c++) 997 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 998 done, private, flags)); 999 } 1000 1001 return (zio); 1002 } 1003 1004 zio_t * 1005 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1006 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1007 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1008 { 1009 zio_t *zio; 1010 1011 ASSERT(vd->vdev_children == 0); 1012 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1013 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1014 ASSERT3U(offset + size, <=, vd->vdev_psize); 1015 1016 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1017 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1018 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1019 1020 zio->io_prop.zp_checksum = checksum; 1021 1022 return (zio); 1023 } 1024 1025 zio_t * 1026 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1027 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1028 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1029 { 1030 zio_t *zio; 1031 1032 ASSERT(vd->vdev_children == 0); 1033 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1034 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1035 ASSERT3U(offset + size, <=, vd->vdev_psize); 1036 1037 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1038 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1039 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1040 1041 zio->io_prop.zp_checksum = checksum; 1042 1043 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1044 /* 1045 * zec checksums are necessarily destructive -- they modify 1046 * the end of the write buffer to hold the verifier/checksum. 1047 * Therefore, we must make a local copy in case the data is 1048 * being written to multiple places in parallel. 1049 */ 1050 abd_t *wbuf = abd_alloc_sametype(data, size); 1051 abd_copy(wbuf, data, size); 1052 1053 zio_push_transform(zio, wbuf, size, size, NULL); 1054 } 1055 1056 return (zio); 1057 } 1058 1059 /* 1060 * Create a child I/O to do some work for us. 1061 */ 1062 zio_t * 1063 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1064 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1065 enum zio_flag flags, zio_done_func_t *done, void *private) 1066 { 1067 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1068 zio_t *zio; 1069 1070 /* 1071 * vdev child I/Os do not propagate their error to the parent. 1072 * Therefore, for correct operation the caller *must* check for 1073 * and handle the error in the child i/o's done callback. 1074 * The only exceptions are i/os that we don't care about 1075 * (OPTIONAL or REPAIR). 1076 */ 1077 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1078 done != NULL); 1079 1080 if (type == ZIO_TYPE_READ && bp != NULL) { 1081 /* 1082 * If we have the bp, then the child should perform the 1083 * checksum and the parent need not. This pushes error 1084 * detection as close to the leaves as possible and 1085 * eliminates redundant checksums in the interior nodes. 1086 */ 1087 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1088 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1089 } 1090 1091 if (vd->vdev_ops->vdev_op_leaf) { 1092 ASSERT0(vd->vdev_children); 1093 offset += VDEV_LABEL_START_SIZE; 1094 } 1095 1096 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1097 1098 /* 1099 * If we've decided to do a repair, the write is not speculative -- 1100 * even if the original read was. 1101 */ 1102 if (flags & ZIO_FLAG_IO_REPAIR) 1103 flags &= ~ZIO_FLAG_SPECULATIVE; 1104 1105 /* 1106 * If we're creating a child I/O that is not associated with a 1107 * top-level vdev, then the child zio is not an allocating I/O. 1108 * If this is a retried I/O then we ignore it since we will 1109 * have already processed the original allocating I/O. 1110 */ 1111 if (flags & ZIO_FLAG_IO_ALLOCATING && 1112 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1113 ASSERT(pio->io_metaslab_class != NULL); 1114 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 1115 ASSERT(type == ZIO_TYPE_WRITE); 1116 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1117 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1118 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1119 pio->io_child_type == ZIO_CHILD_GANG); 1120 1121 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1122 } 1123 1124 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1125 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1126 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1127 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1128 1129 zio->io_physdone = pio->io_physdone; 1130 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 1131 zio->io_logical->io_phys_children++; 1132 1133 return (zio); 1134 } 1135 1136 zio_t * 1137 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1138 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 1139 zio_done_func_t *done, void *private) 1140 { 1141 zio_t *zio; 1142 1143 ASSERT(vd->vdev_ops->vdev_op_leaf); 1144 1145 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1146 data, size, size, done, private, type, priority, 1147 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1148 vd, offset, NULL, 1149 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1150 1151 return (zio); 1152 } 1153 1154 void 1155 zio_flush(zio_t *zio, vdev_t *vd) 1156 { 1157 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 1158 NULL, NULL, 1159 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1160 } 1161 1162 void 1163 zio_shrink(zio_t *zio, uint64_t size) 1164 { 1165 ASSERT3P(zio->io_executor, ==, NULL); 1166 ASSERT3P(zio->io_orig_size, ==, zio->io_size); 1167 ASSERT3U(size, <=, zio->io_size); 1168 1169 /* 1170 * We don't shrink for raidz because of problems with the 1171 * reconstruction when reading back less than the block size. 1172 * Note, BP_IS_RAIDZ() assumes no compression. 1173 */ 1174 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1175 if (!BP_IS_RAIDZ(zio->io_bp)) { 1176 /* we are not doing a raw write */ 1177 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1178 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1179 } 1180 } 1181 1182 /* 1183 * ========================================================================== 1184 * Prepare to read and write logical blocks 1185 * ========================================================================== 1186 */ 1187 1188 static int 1189 zio_read_bp_init(zio_t *zio) 1190 { 1191 blkptr_t *bp = zio->io_bp; 1192 1193 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1194 1195 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1196 zio->io_child_type == ZIO_CHILD_LOGICAL && 1197 !(zio->io_flags & ZIO_FLAG_RAW)) { 1198 uint64_t psize = 1199 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1200 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1201 psize, psize, zio_decompress); 1202 } 1203 1204 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1205 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1206 1207 int psize = BPE_GET_PSIZE(bp); 1208 void *data = abd_borrow_buf(zio->io_abd, psize); 1209 decode_embedded_bp_compressed(bp, data); 1210 abd_return_buf_copy(zio->io_abd, data, psize); 1211 } else { 1212 ASSERT(!BP_IS_EMBEDDED(bp)); 1213 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1214 } 1215 1216 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1217 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1218 1219 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1220 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1221 1222 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1223 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1224 1225 return (ZIO_PIPELINE_CONTINUE); 1226 } 1227 1228 static int 1229 zio_write_bp_init(zio_t *zio) 1230 { 1231 if (!IO_IS_ALLOCATING(zio)) 1232 return (ZIO_PIPELINE_CONTINUE); 1233 1234 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1235 1236 if (zio->io_bp_override) { 1237 blkptr_t *bp = zio->io_bp; 1238 zio_prop_t *zp = &zio->io_prop; 1239 1240 ASSERT(bp->blk_birth != zio->io_txg); 1241 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1242 1243 *bp = *zio->io_bp_override; 1244 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1245 1246 if (BP_IS_EMBEDDED(bp)) 1247 return (ZIO_PIPELINE_CONTINUE); 1248 1249 /* 1250 * If we've been overridden and nopwrite is set then 1251 * set the flag accordingly to indicate that a nopwrite 1252 * has already occurred. 1253 */ 1254 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1255 ASSERT(!zp->zp_dedup); 1256 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1257 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1258 return (ZIO_PIPELINE_CONTINUE); 1259 } 1260 1261 ASSERT(!zp->zp_nopwrite); 1262 1263 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1264 return (ZIO_PIPELINE_CONTINUE); 1265 1266 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1267 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1268 1269 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1270 BP_SET_DEDUP(bp, 1); 1271 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1272 return (ZIO_PIPELINE_CONTINUE); 1273 } 1274 1275 /* 1276 * We were unable to handle this as an override bp, treat 1277 * it as a regular write I/O. 1278 */ 1279 zio->io_bp_override = NULL; 1280 *bp = zio->io_bp_orig; 1281 zio->io_pipeline = zio->io_orig_pipeline; 1282 } 1283 1284 return (ZIO_PIPELINE_CONTINUE); 1285 } 1286 1287 static int 1288 zio_write_compress(zio_t *zio) 1289 { 1290 spa_t *spa = zio->io_spa; 1291 zio_prop_t *zp = &zio->io_prop; 1292 enum zio_compress compress = zp->zp_compress; 1293 blkptr_t *bp = zio->io_bp; 1294 uint64_t lsize = zio->io_lsize; 1295 uint64_t psize = zio->io_size; 1296 int pass = 1; 1297 1298 EQUIV(lsize != psize, (zio->io_flags & ZIO_FLAG_RAW) != 0); 1299 1300 /* 1301 * If our children haven't all reached the ready stage, 1302 * wait for them and then repeat this pipeline stage. 1303 */ 1304 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1305 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1306 return (ZIO_PIPELINE_STOP); 1307 } 1308 1309 if (!IO_IS_ALLOCATING(zio)) 1310 return (ZIO_PIPELINE_CONTINUE); 1311 1312 if (zio->io_children_ready != NULL) { 1313 /* 1314 * Now that all our children are ready, run the callback 1315 * associated with this zio in case it wants to modify the 1316 * data to be written. 1317 */ 1318 ASSERT3U(zp->zp_level, >, 0); 1319 zio->io_children_ready(zio); 1320 } 1321 1322 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1323 ASSERT(zio->io_bp_override == NULL); 1324 1325 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1326 /* 1327 * We're rewriting an existing block, which means we're 1328 * working on behalf of spa_sync(). For spa_sync() to 1329 * converge, it must eventually be the case that we don't 1330 * have to allocate new blocks. But compression changes 1331 * the blocksize, which forces a reallocate, and makes 1332 * convergence take longer. Therefore, after the first 1333 * few passes, stop compressing to ensure convergence. 1334 */ 1335 pass = spa_sync_pass(spa); 1336 1337 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1338 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1339 ASSERT(!BP_GET_DEDUP(bp)); 1340 1341 if (pass >= zfs_sync_pass_dont_compress) 1342 compress = ZIO_COMPRESS_OFF; 1343 1344 /* Make sure someone doesn't change their mind on overwrites */ 1345 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1346 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1347 } 1348 1349 /* If it's a compressed write that is not raw, compress the buffer. */ 1350 if (compress != ZIO_COMPRESS_OFF && psize == lsize) { 1351 void *cbuf = zio_buf_alloc(lsize); 1352 psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1353 if (psize == 0 || psize == lsize) { 1354 compress = ZIO_COMPRESS_OFF; 1355 zio_buf_free(cbuf, lsize); 1356 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1357 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1358 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1359 encode_embedded_bp_compressed(bp, 1360 cbuf, compress, lsize, psize); 1361 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1362 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1363 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1364 zio_buf_free(cbuf, lsize); 1365 bp->blk_birth = zio->io_txg; 1366 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1367 ASSERT(spa_feature_is_active(spa, 1368 SPA_FEATURE_EMBEDDED_DATA)); 1369 return (ZIO_PIPELINE_CONTINUE); 1370 } else { 1371 /* 1372 * Round up compressed size up to the ashift 1373 * of the smallest-ashift device, and zero the tail. 1374 * This ensures that the compressed size of the BP 1375 * (and thus compressratio property) are correct, 1376 * in that we charge for the padding used to fill out 1377 * the last sector. 1378 */ 1379 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 1380 size_t rounded = (size_t)P2ROUNDUP(psize, 1381 1ULL << spa->spa_min_ashift); 1382 if (rounded >= lsize) { 1383 compress = ZIO_COMPRESS_OFF; 1384 zio_buf_free(cbuf, lsize); 1385 psize = lsize; 1386 } else { 1387 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1388 abd_take_ownership_of_buf(cdata, B_TRUE); 1389 abd_zero_off(cdata, psize, rounded - psize); 1390 psize = rounded; 1391 zio_push_transform(zio, cdata, 1392 psize, lsize, NULL); 1393 } 1394 } 1395 1396 /* 1397 * We were unable to handle this as an override bp, treat 1398 * it as a regular write I/O. 1399 */ 1400 zio->io_bp_override = NULL; 1401 *bp = zio->io_bp_orig; 1402 zio->io_pipeline = zio->io_orig_pipeline; 1403 } else { 1404 ASSERT3U(psize, !=, 0); 1405 } 1406 1407 /* 1408 * The final pass of spa_sync() must be all rewrites, but the first 1409 * few passes offer a trade-off: allocating blocks defers convergence, 1410 * but newly allocated blocks are sequential, so they can be written 1411 * to disk faster. Therefore, we allow the first few passes of 1412 * spa_sync() to allocate new blocks, but force rewrites after that. 1413 * There should only be a handful of blocks after pass 1 in any case. 1414 */ 1415 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1416 BP_GET_PSIZE(bp) == psize && 1417 pass >= zfs_sync_pass_rewrite) { 1418 VERIFY3U(psize, !=, 0); 1419 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1420 1421 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1422 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1423 } else { 1424 BP_ZERO(bp); 1425 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1426 } 1427 1428 if (psize == 0) { 1429 if (zio->io_bp_orig.blk_birth != 0 && 1430 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1431 BP_SET_LSIZE(bp, lsize); 1432 BP_SET_TYPE(bp, zp->zp_type); 1433 BP_SET_LEVEL(bp, zp->zp_level); 1434 BP_SET_BIRTH(bp, zio->io_txg, 0); 1435 } 1436 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1437 } else { 1438 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1439 BP_SET_LSIZE(bp, lsize); 1440 BP_SET_TYPE(bp, zp->zp_type); 1441 BP_SET_LEVEL(bp, zp->zp_level); 1442 BP_SET_PSIZE(bp, psize); 1443 BP_SET_COMPRESS(bp, compress); 1444 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1445 BP_SET_DEDUP(bp, zp->zp_dedup); 1446 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1447 if (zp->zp_dedup) { 1448 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1449 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1450 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1451 } 1452 if (zp->zp_nopwrite) { 1453 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1454 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1455 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1456 } 1457 } 1458 return (ZIO_PIPELINE_CONTINUE); 1459 } 1460 1461 static int 1462 zio_free_bp_init(zio_t *zio) 1463 { 1464 blkptr_t *bp = zio->io_bp; 1465 1466 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1467 if (BP_GET_DEDUP(bp)) 1468 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1469 } 1470 1471 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1472 1473 return (ZIO_PIPELINE_CONTINUE); 1474 } 1475 1476 /* 1477 * ========================================================================== 1478 * Execute the I/O pipeline 1479 * ========================================================================== 1480 */ 1481 1482 static void 1483 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1484 { 1485 spa_t *spa = zio->io_spa; 1486 zio_type_t t = zio->io_type; 1487 int flags = (cutinline ? TQ_FRONT : 0); 1488 1489 /* 1490 * If we're a config writer or a probe, the normal issue and 1491 * interrupt threads may all be blocked waiting for the config lock. 1492 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1493 */ 1494 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1495 t = ZIO_TYPE_NULL; 1496 1497 /* 1498 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1499 */ 1500 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1501 t = ZIO_TYPE_NULL; 1502 1503 /* 1504 * If this is a high priority I/O, then use the high priority taskq if 1505 * available. 1506 */ 1507 if ((zio->io_priority == ZIO_PRIORITY_NOW || 1508 zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && 1509 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1510 q++; 1511 1512 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1513 1514 /* 1515 * NB: We are assuming that the zio can only be dispatched 1516 * to a single taskq at a time. It would be a grievous error 1517 * to dispatch the zio to another taskq at the same time. 1518 */ 1519 ASSERT(zio->io_tqent.tqent_next == NULL); 1520 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1521 flags, &zio->io_tqent); 1522 } 1523 1524 static boolean_t 1525 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1526 { 1527 kthread_t *executor = zio->io_executor; 1528 spa_t *spa = zio->io_spa; 1529 1530 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1531 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1532 uint_t i; 1533 for (i = 0; i < tqs->stqs_count; i++) { 1534 if (taskq_member(tqs->stqs_taskq[i], executor)) 1535 return (B_TRUE); 1536 } 1537 } 1538 1539 return (B_FALSE); 1540 } 1541 1542 static int 1543 zio_issue_async(zio_t *zio) 1544 { 1545 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1546 1547 return (ZIO_PIPELINE_STOP); 1548 } 1549 1550 void 1551 zio_interrupt(zio_t *zio) 1552 { 1553 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1554 } 1555 1556 void 1557 zio_delay_interrupt(zio_t *zio) 1558 { 1559 /* 1560 * The timeout_generic() function isn't defined in userspace, so 1561 * rather than trying to implement the function, the zio delay 1562 * functionality has been disabled for userspace builds. 1563 */ 1564 1565 #ifdef _KERNEL 1566 /* 1567 * If io_target_timestamp is zero, then no delay has been registered 1568 * for this IO, thus jump to the end of this function and "skip" the 1569 * delay; issuing it directly to the zio layer. 1570 */ 1571 if (zio->io_target_timestamp != 0) { 1572 hrtime_t now = gethrtime(); 1573 1574 if (now >= zio->io_target_timestamp) { 1575 /* 1576 * This IO has already taken longer than the target 1577 * delay to complete, so we don't want to delay it 1578 * any longer; we "miss" the delay and issue it 1579 * directly to the zio layer. This is likely due to 1580 * the target latency being set to a value less than 1581 * the underlying hardware can satisfy (e.g. delay 1582 * set to 1ms, but the disks take 10ms to complete an 1583 * IO request). 1584 */ 1585 1586 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 1587 hrtime_t, now); 1588 1589 zio_interrupt(zio); 1590 } else { 1591 hrtime_t diff = zio->io_target_timestamp - now; 1592 1593 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 1594 hrtime_t, now, hrtime_t, diff); 1595 1596 (void) timeout_generic(CALLOUT_NORMAL, 1597 (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 1598 } 1599 1600 return; 1601 } 1602 #endif 1603 1604 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 1605 zio_interrupt(zio); 1606 } 1607 1608 /* 1609 * Execute the I/O pipeline until one of the following occurs: 1610 * 1611 * (1) the I/O completes 1612 * (2) the pipeline stalls waiting for dependent child I/Os 1613 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1614 * (4) the I/O is delegated by vdev-level caching or aggregation 1615 * (5) the I/O is deferred due to vdev-level queueing 1616 * (6) the I/O is handed off to another thread. 1617 * 1618 * In all cases, the pipeline stops whenever there's no CPU work; it never 1619 * burns a thread in cv_wait(). 1620 * 1621 * There's no locking on io_stage because there's no legitimate way 1622 * for multiple threads to be attempting to process the same I/O. 1623 */ 1624 static zio_pipe_stage_t *zio_pipeline[]; 1625 1626 void 1627 zio_execute(zio_t *zio) 1628 { 1629 zio->io_executor = curthread; 1630 1631 ASSERT3U(zio->io_queued_timestamp, >, 0); 1632 1633 while (zio->io_stage < ZIO_STAGE_DONE) { 1634 enum zio_stage pipeline = zio->io_pipeline; 1635 enum zio_stage stage = zio->io_stage; 1636 int rv; 1637 1638 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1639 ASSERT(ISP2(stage)); 1640 ASSERT(zio->io_stall == NULL); 1641 1642 do { 1643 stage <<= 1; 1644 } while ((stage & pipeline) == 0); 1645 1646 ASSERT(stage <= ZIO_STAGE_DONE); 1647 1648 /* 1649 * If we are in interrupt context and this pipeline stage 1650 * will grab a config lock that is held across I/O, 1651 * or may wait for an I/O that needs an interrupt thread 1652 * to complete, issue async to avoid deadlock. 1653 * 1654 * For VDEV_IO_START, we cut in line so that the io will 1655 * be sent to disk promptly. 1656 */ 1657 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1658 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1659 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1660 zio_requeue_io_start_cut_in_line : B_FALSE; 1661 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1662 return; 1663 } 1664 1665 zio->io_stage = stage; 1666 zio->io_pipeline_trace |= zio->io_stage; 1667 rv = zio_pipeline[highbit64(stage) - 1](zio); 1668 1669 if (rv == ZIO_PIPELINE_STOP) 1670 return; 1671 1672 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1673 } 1674 } 1675 1676 /* 1677 * ========================================================================== 1678 * Initiate I/O, either sync or async 1679 * ========================================================================== 1680 */ 1681 int 1682 zio_wait(zio_t *zio) 1683 { 1684 int error; 1685 1686 ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 1687 ASSERT3P(zio->io_executor, ==, NULL); 1688 1689 zio->io_waiter = curthread; 1690 ASSERT0(zio->io_queued_timestamp); 1691 zio->io_queued_timestamp = gethrtime(); 1692 1693 zio_execute(zio); 1694 1695 mutex_enter(&zio->io_lock); 1696 while (zio->io_executor != NULL) 1697 cv_wait(&zio->io_cv, &zio->io_lock); 1698 mutex_exit(&zio->io_lock); 1699 1700 error = zio->io_error; 1701 zio_destroy(zio); 1702 1703 return (error); 1704 } 1705 1706 void 1707 zio_nowait(zio_t *zio) 1708 { 1709 ASSERT3P(zio->io_executor, ==, NULL); 1710 1711 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1712 zio_unique_parent(zio) == NULL) { 1713 /* 1714 * This is a logical async I/O with no parent to wait for it. 1715 * We add it to the spa_async_root_zio "Godfather" I/O which 1716 * will ensure they complete prior to unloading the pool. 1717 */ 1718 spa_t *spa = zio->io_spa; 1719 1720 zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1721 } 1722 1723 ASSERT0(zio->io_queued_timestamp); 1724 zio->io_queued_timestamp = gethrtime(); 1725 zio_execute(zio); 1726 } 1727 1728 /* 1729 * ========================================================================== 1730 * Reexecute, cancel, or suspend/resume failed I/O 1731 * ========================================================================== 1732 */ 1733 1734 static void 1735 zio_reexecute(zio_t *pio) 1736 { 1737 zio_t *cio, *cio_next; 1738 1739 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1740 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1741 ASSERT(pio->io_gang_leader == NULL); 1742 ASSERT(pio->io_gang_tree == NULL); 1743 1744 pio->io_flags = pio->io_orig_flags; 1745 pio->io_stage = pio->io_orig_stage; 1746 pio->io_pipeline = pio->io_orig_pipeline; 1747 pio->io_reexecute = 0; 1748 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1749 pio->io_pipeline_trace = 0; 1750 pio->io_error = 0; 1751 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1752 pio->io_state[w] = 0; 1753 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1754 pio->io_child_error[c] = 0; 1755 1756 if (IO_IS_ALLOCATING(pio)) 1757 BP_ZERO(pio->io_bp); 1758 1759 /* 1760 * As we reexecute pio's children, new children could be created. 1761 * New children go to the head of pio's io_child_list, however, 1762 * so we will (correctly) not reexecute them. The key is that 1763 * the remainder of pio's io_child_list, from 'cio_next' onward, 1764 * cannot be affected by any side effects of reexecuting 'cio'. 1765 */ 1766 zio_link_t *zl = NULL; 1767 mutex_enter(&pio->io_lock); 1768 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 1769 cio_next = zio_walk_children(pio, &zl); 1770 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1771 pio->io_children[cio->io_child_type][w]++; 1772 mutex_exit(&pio->io_lock); 1773 zio_reexecute(cio); 1774 mutex_enter(&pio->io_lock); 1775 } 1776 mutex_exit(&pio->io_lock); 1777 1778 /* 1779 * Now that all children have been reexecuted, execute the parent. 1780 * We don't reexecute "The Godfather" I/O here as it's the 1781 * responsibility of the caller to wait on it. 1782 */ 1783 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 1784 pio->io_queued_timestamp = gethrtime(); 1785 zio_execute(pio); 1786 } 1787 } 1788 1789 void 1790 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 1791 { 1792 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1793 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1794 "failure and the failure mode property for this pool " 1795 "is set to panic.", spa_name(spa)); 1796 1797 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1798 1799 mutex_enter(&spa->spa_suspend_lock); 1800 1801 if (spa->spa_suspend_zio_root == NULL) 1802 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1803 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1804 ZIO_FLAG_GODFATHER); 1805 1806 spa->spa_suspended = reason; 1807 1808 if (zio != NULL) { 1809 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1810 ASSERT(zio != spa->spa_suspend_zio_root); 1811 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1812 ASSERT(zio_unique_parent(zio) == NULL); 1813 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1814 zio_add_child(spa->spa_suspend_zio_root, zio); 1815 } 1816 1817 mutex_exit(&spa->spa_suspend_lock); 1818 } 1819 1820 int 1821 zio_resume(spa_t *spa) 1822 { 1823 zio_t *pio; 1824 1825 /* 1826 * Reexecute all previously suspended i/o. 1827 */ 1828 mutex_enter(&spa->spa_suspend_lock); 1829 spa->spa_suspended = ZIO_SUSPEND_NONE; 1830 cv_broadcast(&spa->spa_suspend_cv); 1831 pio = spa->spa_suspend_zio_root; 1832 spa->spa_suspend_zio_root = NULL; 1833 mutex_exit(&spa->spa_suspend_lock); 1834 1835 if (pio == NULL) 1836 return (0); 1837 1838 zio_reexecute(pio); 1839 return (zio_wait(pio)); 1840 } 1841 1842 void 1843 zio_resume_wait(spa_t *spa) 1844 { 1845 mutex_enter(&spa->spa_suspend_lock); 1846 while (spa_suspended(spa)) 1847 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1848 mutex_exit(&spa->spa_suspend_lock); 1849 } 1850 1851 /* 1852 * ========================================================================== 1853 * Gang blocks. 1854 * 1855 * A gang block is a collection of small blocks that looks to the DMU 1856 * like one large block. When zio_dva_allocate() cannot find a block 1857 * of the requested size, due to either severe fragmentation or the pool 1858 * being nearly full, it calls zio_write_gang_block() to construct the 1859 * block from smaller fragments. 1860 * 1861 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1862 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1863 * an indirect block: it's an array of block pointers. It consumes 1864 * only one sector and hence is allocatable regardless of fragmentation. 1865 * The gang header's bps point to its gang members, which hold the data. 1866 * 1867 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1868 * as the verifier to ensure uniqueness of the SHA256 checksum. 1869 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1870 * not the gang header. This ensures that data block signatures (needed for 1871 * deduplication) are independent of how the block is physically stored. 1872 * 1873 * Gang blocks can be nested: a gang member may itself be a gang block. 1874 * Thus every gang block is a tree in which root and all interior nodes are 1875 * gang headers, and the leaves are normal blocks that contain user data. 1876 * The root of the gang tree is called the gang leader. 1877 * 1878 * To perform any operation (read, rewrite, free, claim) on a gang block, 1879 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1880 * in the io_gang_tree field of the original logical i/o by recursively 1881 * reading the gang leader and all gang headers below it. This yields 1882 * an in-core tree containing the contents of every gang header and the 1883 * bps for every constituent of the gang block. 1884 * 1885 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1886 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1887 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1888 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1889 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1890 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1891 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1892 * of the gang header plus zio_checksum_compute() of the data to update the 1893 * gang header's blk_cksum as described above. 1894 * 1895 * The two-phase assemble/issue model solves the problem of partial failure -- 1896 * what if you'd freed part of a gang block but then couldn't read the 1897 * gang header for another part? Assembling the entire gang tree first 1898 * ensures that all the necessary gang header I/O has succeeded before 1899 * starting the actual work of free, claim, or write. Once the gang tree 1900 * is assembled, free and claim are in-memory operations that cannot fail. 1901 * 1902 * In the event that a gang write fails, zio_dva_unallocate() walks the 1903 * gang tree to immediately free (i.e. insert back into the space map) 1904 * everything we've allocated. This ensures that we don't get ENOSPC 1905 * errors during repeated suspend/resume cycles due to a flaky device. 1906 * 1907 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1908 * the gang tree, we won't modify the block, so we can safely defer the free 1909 * (knowing that the block is still intact). If we *can* assemble the gang 1910 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1911 * each constituent bp and we can allocate a new block on the next sync pass. 1912 * 1913 * In all cases, the gang tree allows complete recovery from partial failure. 1914 * ========================================================================== 1915 */ 1916 1917 static void 1918 zio_gang_issue_func_done(zio_t *zio) 1919 { 1920 abd_put(zio->io_abd); 1921 } 1922 1923 static zio_t * 1924 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1925 uint64_t offset) 1926 { 1927 if (gn != NULL) 1928 return (pio); 1929 1930 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 1931 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 1932 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1933 &pio->io_bookmark)); 1934 } 1935 1936 static zio_t * 1937 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1938 uint64_t offset) 1939 { 1940 zio_t *zio; 1941 1942 if (gn != NULL) { 1943 abd_t *gbh_abd = 1944 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1945 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1946 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 1947 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1948 &pio->io_bookmark); 1949 /* 1950 * As we rewrite each gang header, the pipeline will compute 1951 * a new gang block header checksum for it; but no one will 1952 * compute a new data checksum, so we do that here. The one 1953 * exception is the gang leader: the pipeline already computed 1954 * its data checksum because that stage precedes gang assembly. 1955 * (Presently, nothing actually uses interior data checksums; 1956 * this is just good hygiene.) 1957 */ 1958 if (gn != pio->io_gang_leader->io_gang_tree) { 1959 abd_t *buf = abd_get_offset(data, offset); 1960 1961 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1962 buf, BP_GET_PSIZE(bp)); 1963 1964 abd_put(buf); 1965 } 1966 /* 1967 * If we are here to damage data for testing purposes, 1968 * leave the GBH alone so that we can detect the damage. 1969 */ 1970 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1971 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1972 } else { 1973 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1974 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 1975 zio_gang_issue_func_done, NULL, pio->io_priority, 1976 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1977 } 1978 1979 return (zio); 1980 } 1981 1982 /* ARGSUSED */ 1983 static zio_t * 1984 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1985 uint64_t offset) 1986 { 1987 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1988 ZIO_GANG_CHILD_FLAGS(pio))); 1989 } 1990 1991 /* ARGSUSED */ 1992 static zio_t * 1993 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1994 uint64_t offset) 1995 { 1996 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1997 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1998 } 1999 2000 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2001 NULL, 2002 zio_read_gang, 2003 zio_rewrite_gang, 2004 zio_free_gang, 2005 zio_claim_gang, 2006 NULL 2007 }; 2008 2009 static void zio_gang_tree_assemble_done(zio_t *zio); 2010 2011 static zio_gang_node_t * 2012 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2013 { 2014 zio_gang_node_t *gn; 2015 2016 ASSERT(*gnpp == NULL); 2017 2018 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2019 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2020 *gnpp = gn; 2021 2022 return (gn); 2023 } 2024 2025 static void 2026 zio_gang_node_free(zio_gang_node_t **gnpp) 2027 { 2028 zio_gang_node_t *gn = *gnpp; 2029 2030 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2031 ASSERT(gn->gn_child[g] == NULL); 2032 2033 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2034 kmem_free(gn, sizeof (*gn)); 2035 *gnpp = NULL; 2036 } 2037 2038 static void 2039 zio_gang_tree_free(zio_gang_node_t **gnpp) 2040 { 2041 zio_gang_node_t *gn = *gnpp; 2042 2043 if (gn == NULL) 2044 return; 2045 2046 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2047 zio_gang_tree_free(&gn->gn_child[g]); 2048 2049 zio_gang_node_free(gnpp); 2050 } 2051 2052 static void 2053 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2054 { 2055 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2056 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2057 2058 ASSERT(gio->io_gang_leader == gio); 2059 ASSERT(BP_IS_GANG(bp)); 2060 2061 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2062 zio_gang_tree_assemble_done, gn, gio->io_priority, 2063 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2064 } 2065 2066 static void 2067 zio_gang_tree_assemble_done(zio_t *zio) 2068 { 2069 zio_t *gio = zio->io_gang_leader; 2070 zio_gang_node_t *gn = zio->io_private; 2071 blkptr_t *bp = zio->io_bp; 2072 2073 ASSERT(gio == zio_unique_parent(zio)); 2074 ASSERT(zio->io_child_count == 0); 2075 2076 if (zio->io_error) 2077 return; 2078 2079 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2080 if (BP_SHOULD_BYTESWAP(bp)) 2081 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2082 2083 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2084 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2085 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2086 2087 abd_put(zio->io_abd); 2088 2089 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2090 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2091 if (!BP_IS_GANG(gbp)) 2092 continue; 2093 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2094 } 2095 } 2096 2097 static void 2098 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2099 uint64_t offset) 2100 { 2101 zio_t *gio = pio->io_gang_leader; 2102 zio_t *zio; 2103 2104 ASSERT(BP_IS_GANG(bp) == !!gn); 2105 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2106 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2107 2108 /* 2109 * If you're a gang header, your data is in gn->gn_gbh. 2110 * If you're a gang member, your data is in 'data' and gn == NULL. 2111 */ 2112 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2113 2114 if (gn != NULL) { 2115 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2116 2117 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2118 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2119 if (BP_IS_HOLE(gbp)) 2120 continue; 2121 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2122 offset); 2123 offset += BP_GET_PSIZE(gbp); 2124 } 2125 } 2126 2127 if (gn == gio->io_gang_tree) 2128 ASSERT3U(gio->io_size, ==, offset); 2129 2130 if (zio != pio) 2131 zio_nowait(zio); 2132 } 2133 2134 static int 2135 zio_gang_assemble(zio_t *zio) 2136 { 2137 blkptr_t *bp = zio->io_bp; 2138 2139 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2140 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2141 2142 zio->io_gang_leader = zio; 2143 2144 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2145 2146 return (ZIO_PIPELINE_CONTINUE); 2147 } 2148 2149 static int 2150 zio_gang_issue(zio_t *zio) 2151 { 2152 blkptr_t *bp = zio->io_bp; 2153 2154 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2155 return (ZIO_PIPELINE_STOP); 2156 } 2157 2158 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2159 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2160 2161 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2162 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2163 0); 2164 else 2165 zio_gang_tree_free(&zio->io_gang_tree); 2166 2167 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2168 2169 return (ZIO_PIPELINE_CONTINUE); 2170 } 2171 2172 static void 2173 zio_write_gang_member_ready(zio_t *zio) 2174 { 2175 zio_t *pio = zio_unique_parent(zio); 2176 zio_t *gio = zio->io_gang_leader; 2177 dva_t *cdva = zio->io_bp->blk_dva; 2178 dva_t *pdva = pio->io_bp->blk_dva; 2179 uint64_t asize; 2180 2181 if (BP_IS_HOLE(zio->io_bp)) 2182 return; 2183 2184 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2185 2186 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2187 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2188 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2189 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2190 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2191 2192 mutex_enter(&pio->io_lock); 2193 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2194 ASSERT(DVA_GET_GANG(&pdva[d])); 2195 asize = DVA_GET_ASIZE(&pdva[d]); 2196 asize += DVA_GET_ASIZE(&cdva[d]); 2197 DVA_SET_ASIZE(&pdva[d], asize); 2198 } 2199 mutex_exit(&pio->io_lock); 2200 } 2201 2202 static void 2203 zio_write_gang_done(zio_t *zio) 2204 { 2205 /* 2206 * The io_abd field will be NULL for a zio with no data. The io_flags 2207 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 2208 * check for it here as it is cleared in zio_ready. 2209 */ 2210 if (zio->io_abd != NULL) 2211 abd_put(zio->io_abd); 2212 } 2213 2214 static int 2215 zio_write_gang_block(zio_t *pio) 2216 { 2217 spa_t *spa = pio->io_spa; 2218 metaslab_class_t *mc = spa_normal_class(spa); 2219 blkptr_t *bp = pio->io_bp; 2220 zio_t *gio = pio->io_gang_leader; 2221 zio_t *zio; 2222 zio_gang_node_t *gn, **gnpp; 2223 zio_gbh_phys_t *gbh; 2224 abd_t *gbh_abd; 2225 uint64_t txg = pio->io_txg; 2226 uint64_t resid = pio->io_size; 2227 uint64_t lsize; 2228 int copies = gio->io_prop.zp_copies; 2229 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2230 zio_prop_t zp; 2231 int error; 2232 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2233 2234 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 2235 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2236 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2237 ASSERT(has_data); 2238 2239 flags |= METASLAB_ASYNC_ALLOC; 2240 VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator], 2241 pio)); 2242 2243 /* 2244 * The logical zio has already placed a reservation for 2245 * 'copies' allocation slots but gang blocks may require 2246 * additional copies. These additional copies 2247 * (i.e. gbh_copies - copies) are guaranteed to succeed 2248 * since metaslab_class_throttle_reserve() always allows 2249 * additional reservations for gang blocks. 2250 */ 2251 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2252 pio->io_allocator, pio, flags)); 2253 } 2254 2255 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 2256 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2257 &pio->io_alloc_list, pio, pio->io_allocator); 2258 if (error) { 2259 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2260 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2261 ASSERT(has_data); 2262 2263 /* 2264 * If we failed to allocate the gang block header then 2265 * we remove any additional allocation reservations that 2266 * we placed here. The original reservation will 2267 * be removed when the logical I/O goes to the ready 2268 * stage. 2269 */ 2270 metaslab_class_throttle_unreserve(mc, 2271 gbh_copies - copies, pio->io_allocator, pio); 2272 } 2273 pio->io_error = error; 2274 return (ZIO_PIPELINE_CONTINUE); 2275 } 2276 2277 if (pio == gio) { 2278 gnpp = &gio->io_gang_tree; 2279 } else { 2280 gnpp = pio->io_private; 2281 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2282 } 2283 2284 gn = zio_gang_node_alloc(gnpp); 2285 gbh = gn->gn_gbh; 2286 bzero(gbh, SPA_GANGBLOCKSIZE); 2287 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2288 2289 /* 2290 * Create the gang header. 2291 */ 2292 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2293 zio_write_gang_done, NULL, pio->io_priority, 2294 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2295 2296 /* 2297 * Create and nowait the gang children. 2298 */ 2299 for (int g = 0; resid != 0; resid -= lsize, g++) { 2300 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2301 SPA_MINBLOCKSIZE); 2302 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2303 2304 zp.zp_checksum = gio->io_prop.zp_checksum; 2305 zp.zp_compress = ZIO_COMPRESS_OFF; 2306 zp.zp_type = DMU_OT_NONE; 2307 zp.zp_level = 0; 2308 zp.zp_copies = gio->io_prop.zp_copies; 2309 zp.zp_dedup = B_FALSE; 2310 zp.zp_dedup_verify = B_FALSE; 2311 zp.zp_nopwrite = B_FALSE; 2312 2313 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2314 has_data ? abd_get_offset(pio->io_abd, pio->io_size - 2315 resid) : NULL, lsize, lsize, &zp, 2316 zio_write_gang_member_ready, NULL, NULL, 2317 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 2318 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2319 2320 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2321 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2322 ASSERT(has_data); 2323 2324 /* 2325 * Gang children won't throttle but we should 2326 * account for their work, so reserve an allocation 2327 * slot for them here. 2328 */ 2329 VERIFY(metaslab_class_throttle_reserve(mc, 2330 zp.zp_copies, cio->io_allocator, cio, flags)); 2331 } 2332 zio_nowait(cio); 2333 } 2334 2335 /* 2336 * Set pio's pipeline to just wait for zio to finish. 2337 */ 2338 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2339 2340 zio_nowait(zio); 2341 2342 return (ZIO_PIPELINE_CONTINUE); 2343 } 2344 2345 /* 2346 * The zio_nop_write stage in the pipeline determines if allocating a 2347 * new bp is necessary. The nopwrite feature can handle writes in 2348 * either syncing or open context (i.e. zil writes) and as a result is 2349 * mutually exclusive with dedup. 2350 * 2351 * By leveraging a cryptographically secure checksum, such as SHA256, we 2352 * can compare the checksums of the new data and the old to determine if 2353 * allocating a new block is required. Note that our requirements for 2354 * cryptographic strength are fairly weak: there can't be any accidental 2355 * hash collisions, but we don't need to be secure against intentional 2356 * (malicious) collisions. To trigger a nopwrite, you have to be able 2357 * to write the file to begin with, and triggering an incorrect (hash 2358 * collision) nopwrite is no worse than simply writing to the file. 2359 * That said, there are no known attacks against the checksum algorithms 2360 * used for nopwrite, assuming that the salt and the checksums 2361 * themselves remain secret. 2362 */ 2363 static int 2364 zio_nop_write(zio_t *zio) 2365 { 2366 blkptr_t *bp = zio->io_bp; 2367 blkptr_t *bp_orig = &zio->io_bp_orig; 2368 zio_prop_t *zp = &zio->io_prop; 2369 2370 ASSERT(BP_GET_LEVEL(bp) == 0); 2371 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2372 ASSERT(zp->zp_nopwrite); 2373 ASSERT(!zp->zp_dedup); 2374 ASSERT(zio->io_bp_override == NULL); 2375 ASSERT(IO_IS_ALLOCATING(zio)); 2376 2377 /* 2378 * Check to see if the original bp and the new bp have matching 2379 * characteristics (i.e. same checksum, compression algorithms, etc). 2380 * If they don't then just continue with the pipeline which will 2381 * allocate a new bp. 2382 */ 2383 if (BP_IS_HOLE(bp_orig) || 2384 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 2385 ZCHECKSUM_FLAG_NOPWRITE) || 2386 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2387 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2388 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2389 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2390 return (ZIO_PIPELINE_CONTINUE); 2391 2392 /* 2393 * If the checksums match then reset the pipeline so that we 2394 * avoid allocating a new bp and issuing any I/O. 2395 */ 2396 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2397 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 2398 ZCHECKSUM_FLAG_NOPWRITE); 2399 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2400 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2401 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2402 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2403 sizeof (uint64_t)) == 0); 2404 2405 *bp = *bp_orig; 2406 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2407 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2408 } 2409 2410 return (ZIO_PIPELINE_CONTINUE); 2411 } 2412 2413 /* 2414 * ========================================================================== 2415 * Dedup 2416 * ========================================================================== 2417 */ 2418 static void 2419 zio_ddt_child_read_done(zio_t *zio) 2420 { 2421 blkptr_t *bp = zio->io_bp; 2422 ddt_entry_t *dde = zio->io_private; 2423 ddt_phys_t *ddp; 2424 zio_t *pio = zio_unique_parent(zio); 2425 2426 mutex_enter(&pio->io_lock); 2427 ddp = ddt_phys_select(dde, bp); 2428 if (zio->io_error == 0) 2429 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2430 2431 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2432 dde->dde_repair_abd = zio->io_abd; 2433 else 2434 abd_free(zio->io_abd); 2435 mutex_exit(&pio->io_lock); 2436 } 2437 2438 static int 2439 zio_ddt_read_start(zio_t *zio) 2440 { 2441 blkptr_t *bp = zio->io_bp; 2442 2443 ASSERT(BP_GET_DEDUP(bp)); 2444 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2445 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2446 2447 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2448 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2449 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2450 ddt_phys_t *ddp = dde->dde_phys; 2451 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2452 blkptr_t blk; 2453 2454 ASSERT(zio->io_vsd == NULL); 2455 zio->io_vsd = dde; 2456 2457 if (ddp_self == NULL) 2458 return (ZIO_PIPELINE_CONTINUE); 2459 2460 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2461 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2462 continue; 2463 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2464 &blk); 2465 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2466 abd_alloc_for_io(zio->io_size, B_TRUE), 2467 zio->io_size, zio_ddt_child_read_done, dde, 2468 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2469 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2470 } 2471 return (ZIO_PIPELINE_CONTINUE); 2472 } 2473 2474 zio_nowait(zio_read(zio, zio->io_spa, bp, 2475 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2476 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2477 2478 return (ZIO_PIPELINE_CONTINUE); 2479 } 2480 2481 static int 2482 zio_ddt_read_done(zio_t *zio) 2483 { 2484 blkptr_t *bp = zio->io_bp; 2485 2486 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 2487 return (ZIO_PIPELINE_STOP); 2488 } 2489 2490 ASSERT(BP_GET_DEDUP(bp)); 2491 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2492 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2493 2494 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2495 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2496 ddt_entry_t *dde = zio->io_vsd; 2497 if (ddt == NULL) { 2498 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2499 return (ZIO_PIPELINE_CONTINUE); 2500 } 2501 if (dde == NULL) { 2502 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2503 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2504 return (ZIO_PIPELINE_STOP); 2505 } 2506 if (dde->dde_repair_abd != NULL) { 2507 abd_copy(zio->io_abd, dde->dde_repair_abd, 2508 zio->io_size); 2509 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2510 } 2511 ddt_repair_done(ddt, dde); 2512 zio->io_vsd = NULL; 2513 } 2514 2515 ASSERT(zio->io_vsd == NULL); 2516 2517 return (ZIO_PIPELINE_CONTINUE); 2518 } 2519 2520 static boolean_t 2521 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2522 { 2523 spa_t *spa = zio->io_spa; 2524 boolean_t do_raw = (zio->io_flags & ZIO_FLAG_RAW); 2525 2526 /* We should never get a raw, override zio */ 2527 ASSERT(!(zio->io_bp_override && do_raw)); 2528 2529 /* 2530 * Note: we compare the original data, not the transformed data, 2531 * because when zio->io_bp is an override bp, we will not have 2532 * pushed the I/O transforms. That's an important optimization 2533 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2534 */ 2535 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2536 zio_t *lio = dde->dde_lead_zio[p]; 2537 2538 if (lio != NULL) { 2539 return (lio->io_orig_size != zio->io_orig_size || 2540 abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2541 zio->io_orig_size) != 0); 2542 } 2543 } 2544 2545 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2546 ddt_phys_t *ddp = &dde->dde_phys[p]; 2547 2548 if (ddp->ddp_phys_birth != 0) { 2549 arc_buf_t *abuf = NULL; 2550 arc_flags_t aflags = ARC_FLAG_WAIT; 2551 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2552 blkptr_t blk = *zio->io_bp; 2553 int error; 2554 2555 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2556 2557 ddt_exit(ddt); 2558 2559 /* 2560 * Intuitively, it would make more sense to compare 2561 * io_abd than io_orig_abd in the raw case since you 2562 * don't want to look at any transformations that have 2563 * happened to the data. However, for raw I/Os the 2564 * data will actually be the same in io_abd and 2565 * io_orig_abd, so all we have to do is issue this as 2566 * a raw ARC read. 2567 */ 2568 if (do_raw) { 2569 zio_flags |= ZIO_FLAG_RAW; 2570 ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2571 ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 2572 zio->io_size)); 2573 ASSERT3P(zio->io_transform_stack, ==, NULL); 2574 } 2575 2576 error = arc_read(NULL, spa, &blk, 2577 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2578 zio_flags, &aflags, &zio->io_bookmark); 2579 2580 if (error == 0) { 2581 if (arc_buf_size(abuf) != zio->io_orig_size || 2582 abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2583 zio->io_orig_size) != 0) 2584 error = SET_ERROR(EEXIST); 2585 arc_buf_destroy(abuf, &abuf); 2586 } 2587 2588 ddt_enter(ddt); 2589 return (error != 0); 2590 } 2591 } 2592 2593 return (B_FALSE); 2594 } 2595 2596 static void 2597 zio_ddt_child_write_ready(zio_t *zio) 2598 { 2599 int p = zio->io_prop.zp_copies; 2600 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2601 ddt_entry_t *dde = zio->io_private; 2602 ddt_phys_t *ddp = &dde->dde_phys[p]; 2603 zio_t *pio; 2604 2605 if (zio->io_error) 2606 return; 2607 2608 ddt_enter(ddt); 2609 2610 ASSERT(dde->dde_lead_zio[p] == zio); 2611 2612 ddt_phys_fill(ddp, zio->io_bp); 2613 2614 zio_link_t *zl = NULL; 2615 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2616 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2617 2618 ddt_exit(ddt); 2619 } 2620 2621 static void 2622 zio_ddt_child_write_done(zio_t *zio) 2623 { 2624 int p = zio->io_prop.zp_copies; 2625 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2626 ddt_entry_t *dde = zio->io_private; 2627 ddt_phys_t *ddp = &dde->dde_phys[p]; 2628 2629 ddt_enter(ddt); 2630 2631 ASSERT(ddp->ddp_refcnt == 0); 2632 ASSERT(dde->dde_lead_zio[p] == zio); 2633 dde->dde_lead_zio[p] = NULL; 2634 2635 if (zio->io_error == 0) { 2636 zio_link_t *zl = NULL; 2637 while (zio_walk_parents(zio, &zl) != NULL) 2638 ddt_phys_addref(ddp); 2639 } else { 2640 ddt_phys_clear(ddp); 2641 } 2642 2643 ddt_exit(ddt); 2644 } 2645 2646 static void 2647 zio_ddt_ditto_write_done(zio_t *zio) 2648 { 2649 int p = DDT_PHYS_DITTO; 2650 zio_prop_t *zp = &zio->io_prop; 2651 blkptr_t *bp = zio->io_bp; 2652 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2653 ddt_entry_t *dde = zio->io_private; 2654 ddt_phys_t *ddp = &dde->dde_phys[p]; 2655 ddt_key_t *ddk = &dde->dde_key; 2656 2657 ddt_enter(ddt); 2658 2659 ASSERT(ddp->ddp_refcnt == 0); 2660 ASSERT(dde->dde_lead_zio[p] == zio); 2661 dde->dde_lead_zio[p] = NULL; 2662 2663 if (zio->io_error == 0) { 2664 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2665 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2666 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2667 if (ddp->ddp_phys_birth != 0) 2668 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2669 ddt_phys_fill(ddp, bp); 2670 } 2671 2672 ddt_exit(ddt); 2673 } 2674 2675 static int 2676 zio_ddt_write(zio_t *zio) 2677 { 2678 spa_t *spa = zio->io_spa; 2679 blkptr_t *bp = zio->io_bp; 2680 uint64_t txg = zio->io_txg; 2681 zio_prop_t *zp = &zio->io_prop; 2682 int p = zp->zp_copies; 2683 int ditto_copies; 2684 zio_t *cio = NULL; 2685 zio_t *dio = NULL; 2686 ddt_t *ddt = ddt_select(spa, bp); 2687 ddt_entry_t *dde; 2688 ddt_phys_t *ddp; 2689 2690 ASSERT(BP_GET_DEDUP(bp)); 2691 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2692 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2693 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2694 2695 ddt_enter(ddt); 2696 dde = ddt_lookup(ddt, bp, B_TRUE); 2697 ddp = &dde->dde_phys[p]; 2698 2699 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2700 /* 2701 * If we're using a weak checksum, upgrade to a strong checksum 2702 * and try again. If we're already using a strong checksum, 2703 * we can't resolve it, so just convert to an ordinary write. 2704 * (And automatically e-mail a paper to Nature?) 2705 */ 2706 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 2707 ZCHECKSUM_FLAG_DEDUP)) { 2708 zp->zp_checksum = spa_dedup_checksum(spa); 2709 zio_pop_transforms(zio); 2710 zio->io_stage = ZIO_STAGE_OPEN; 2711 BP_ZERO(bp); 2712 } else { 2713 zp->zp_dedup = B_FALSE; 2714 BP_SET_DEDUP(bp, B_FALSE); 2715 } 2716 ASSERT(!BP_GET_DEDUP(bp)); 2717 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2718 ddt_exit(ddt); 2719 return (ZIO_PIPELINE_CONTINUE); 2720 } 2721 2722 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2723 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2724 2725 if (ditto_copies > ddt_ditto_copies_present(dde) && 2726 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2727 zio_prop_t czp = *zp; 2728 2729 czp.zp_copies = ditto_copies; 2730 2731 /* 2732 * If we arrived here with an override bp, we won't have run 2733 * the transform stack, so we won't have the data we need to 2734 * generate a child i/o. So, toss the override bp and restart. 2735 * This is safe, because using the override bp is just an 2736 * optimization; and it's rare, so the cost doesn't matter. 2737 */ 2738 if (zio->io_bp_override) { 2739 zio_pop_transforms(zio); 2740 zio->io_stage = ZIO_STAGE_OPEN; 2741 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2742 zio->io_bp_override = NULL; 2743 BP_ZERO(bp); 2744 ddt_exit(ddt); 2745 return (ZIO_PIPELINE_CONTINUE); 2746 } 2747 2748 dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2749 zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 2750 NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2751 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2752 2753 zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 2754 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2755 } 2756 2757 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2758 if (ddp->ddp_phys_birth != 0) 2759 ddt_bp_fill(ddp, bp, txg); 2760 if (dde->dde_lead_zio[p] != NULL) 2761 zio_add_child(zio, dde->dde_lead_zio[p]); 2762 else 2763 ddt_phys_addref(ddp); 2764 } else if (zio->io_bp_override) { 2765 ASSERT(bp->blk_birth == txg); 2766 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2767 ddt_phys_fill(ddp, bp); 2768 ddt_phys_addref(ddp); 2769 } else { 2770 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2771 zio->io_orig_size, zio->io_orig_size, zp, 2772 zio_ddt_child_write_ready, NULL, NULL, 2773 zio_ddt_child_write_done, dde, zio->io_priority, 2774 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2775 2776 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 2777 dde->dde_lead_zio[p] = cio; 2778 } 2779 2780 ddt_exit(ddt); 2781 2782 if (cio) 2783 zio_nowait(cio); 2784 if (dio) 2785 zio_nowait(dio); 2786 2787 return (ZIO_PIPELINE_CONTINUE); 2788 } 2789 2790 ddt_entry_t *freedde; /* for debugging */ 2791 2792 static int 2793 zio_ddt_free(zio_t *zio) 2794 { 2795 spa_t *spa = zio->io_spa; 2796 blkptr_t *bp = zio->io_bp; 2797 ddt_t *ddt = ddt_select(spa, bp); 2798 ddt_entry_t *dde; 2799 ddt_phys_t *ddp; 2800 2801 ASSERT(BP_GET_DEDUP(bp)); 2802 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2803 2804 ddt_enter(ddt); 2805 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2806 ddp = ddt_phys_select(dde, bp); 2807 ddt_phys_decref(ddp); 2808 ddt_exit(ddt); 2809 2810 return (ZIO_PIPELINE_CONTINUE); 2811 } 2812 2813 /* 2814 * ========================================================================== 2815 * Allocate and free blocks 2816 * ========================================================================== 2817 */ 2818 2819 static zio_t * 2820 zio_io_to_allocate(spa_t *spa, int allocator) 2821 { 2822 zio_t *zio; 2823 2824 ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); 2825 2826 zio = avl_first(&spa->spa_alloc_trees[allocator]); 2827 if (zio == NULL) 2828 return (NULL); 2829 2830 ASSERT(IO_IS_ALLOCATING(zio)); 2831 2832 /* 2833 * Try to place a reservation for this zio. If we're unable to 2834 * reserve then we throttle. 2835 */ 2836 ASSERT3U(zio->io_allocator, ==, allocator); 2837 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 2838 zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { 2839 return (NULL); 2840 } 2841 2842 avl_remove(&spa->spa_alloc_trees[allocator], zio); 2843 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 2844 2845 return (zio); 2846 } 2847 2848 static int 2849 zio_dva_throttle(zio_t *zio) 2850 { 2851 spa_t *spa = zio->io_spa; 2852 zio_t *nio; 2853 metaslab_class_t *mc; 2854 2855 /* locate an appropriate allocation class */ 2856 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, 2857 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); 2858 2859 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 2860 !mc->mc_alloc_throttle_enabled || 2861 zio->io_child_type == ZIO_CHILD_GANG || 2862 zio->io_flags & ZIO_FLAG_NODATA) { 2863 return (ZIO_PIPELINE_CONTINUE); 2864 } 2865 2866 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2867 2868 ASSERT3U(zio->io_queued_timestamp, >, 0); 2869 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2870 2871 zbookmark_phys_t *bm = &zio->io_bookmark; 2872 /* 2873 * We want to try to use as many allocators as possible to help improve 2874 * performance, but we also want logically adjacent IOs to be physically 2875 * adjacent to improve sequential read performance. We chunk each object 2876 * into 2^20 block regions, and then hash based on the objset, object, 2877 * level, and region to accomplish both of these goals. 2878 */ 2879 zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, 2880 bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 2881 mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); 2882 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2883 zio->io_metaslab_class = mc; 2884 avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); 2885 nio = zio_io_to_allocate(spa, zio->io_allocator); 2886 mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); 2887 2888 if (nio == zio) 2889 return (ZIO_PIPELINE_CONTINUE); 2890 2891 if (nio != NULL) { 2892 ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2893 /* 2894 * We are passing control to a new zio so make sure that 2895 * it is processed by a different thread. We do this to 2896 * avoid stack overflows that can occur when parents are 2897 * throttled and children are making progress. We allow 2898 * it to go to the head of the taskq since it's already 2899 * been waiting. 2900 */ 2901 zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 2902 } 2903 return (ZIO_PIPELINE_STOP); 2904 } 2905 2906 static void 2907 zio_allocate_dispatch(spa_t *spa, int allocator) 2908 { 2909 zio_t *zio; 2910 2911 mutex_enter(&spa->spa_alloc_locks[allocator]); 2912 zio = zio_io_to_allocate(spa, allocator); 2913 mutex_exit(&spa->spa_alloc_locks[allocator]); 2914 if (zio == NULL) 2915 return; 2916 2917 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 2918 ASSERT0(zio->io_error); 2919 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 2920 } 2921 2922 static int 2923 zio_dva_allocate(zio_t *zio) 2924 { 2925 spa_t *spa = zio->io_spa; 2926 metaslab_class_t *mc; 2927 blkptr_t *bp = zio->io_bp; 2928 int error; 2929 int flags = 0; 2930 2931 if (zio->io_gang_leader == NULL) { 2932 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2933 zio->io_gang_leader = zio; 2934 } 2935 2936 ASSERT(BP_IS_HOLE(bp)); 2937 ASSERT0(BP_GET_NDVAS(bp)); 2938 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2939 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2940 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2941 2942 if (zio->io_flags & ZIO_FLAG_NODATA) 2943 flags |= METASLAB_DONT_THROTTLE; 2944 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 2945 flags |= METASLAB_GANG_CHILD; 2946 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 2947 flags |= METASLAB_ASYNC_ALLOC; 2948 2949 /* 2950 * if not already chosen, locate an appropriate allocation class 2951 */ 2952 mc = zio->io_metaslab_class; 2953 if (mc == NULL) { 2954 mc = spa_preferred_class(spa, zio->io_size, 2955 zio->io_prop.zp_type, zio->io_prop.zp_level, 2956 zio->io_prop.zp_zpl_smallblk); 2957 zio->io_metaslab_class = mc; 2958 } 2959 2960 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2961 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 2962 &zio->io_alloc_list, zio, zio->io_allocator); 2963 2964 /* 2965 * Fallback to normal class when an alloc class is full 2966 */ 2967 if (error == ENOSPC && mc != spa_normal_class(spa)) { 2968 /* 2969 * If throttling, transfer reservation over to normal class. 2970 * The io_allocator slot can remain the same even though we 2971 * are switching classes. 2972 */ 2973 if (mc->mc_alloc_throttle_enabled && 2974 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 2975 metaslab_class_throttle_unreserve(mc, 2976 zio->io_prop.zp_copies, zio->io_allocator, zio); 2977 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 2978 2979 mc = spa_normal_class(spa); 2980 VERIFY(metaslab_class_throttle_reserve(mc, 2981 zio->io_prop.zp_copies, zio->io_allocator, zio, 2982 flags | METASLAB_MUST_RESERVE)); 2983 } else { 2984 mc = spa_normal_class(spa); 2985 } 2986 zio->io_metaslab_class = mc; 2987 2988 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2989 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 2990 &zio->io_alloc_list, zio, zio->io_allocator); 2991 } 2992 2993 if (error != 0) { 2994 zfs_dbgmsg("%s: metaslab allocation failure: zio %p, " 2995 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2996 error); 2997 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2998 return (zio_write_gang_block(zio)); 2999 zio->io_error = error; 3000 } 3001 3002 return (ZIO_PIPELINE_CONTINUE); 3003 } 3004 3005 static int 3006 zio_dva_free(zio_t *zio) 3007 { 3008 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3009 3010 return (ZIO_PIPELINE_CONTINUE); 3011 } 3012 3013 static int 3014 zio_dva_claim(zio_t *zio) 3015 { 3016 int error; 3017 3018 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3019 if (error) 3020 zio->io_error = error; 3021 3022 return (ZIO_PIPELINE_CONTINUE); 3023 } 3024 3025 /* 3026 * Undo an allocation. This is used by zio_done() when an I/O fails 3027 * and we want to give back the block we just allocated. 3028 * This handles both normal blocks and gang blocks. 3029 */ 3030 static void 3031 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3032 { 3033 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 3034 ASSERT(zio->io_bp_override == NULL); 3035 3036 if (!BP_IS_HOLE(bp)) 3037 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 3038 3039 if (gn != NULL) { 3040 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3041 zio_dva_unallocate(zio, gn->gn_child[g], 3042 &gn->gn_gbh->zg_blkptr[g]); 3043 } 3044 } 3045 } 3046 3047 /* 3048 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3049 */ 3050 int 3051 zio_alloc_zil(spa_t *spa, uint64_t objset, uint64_t txg, blkptr_t *new_bp, 3052 blkptr_t *old_bp, uint64_t size, boolean_t *slog) 3053 { 3054 int error = 1; 3055 zio_alloc_list_t io_alloc_list; 3056 3057 ASSERT(txg > spa_syncing_txg(spa)); 3058 3059 metaslab_trace_init(&io_alloc_list); 3060 3061 /* 3062 * Block pointer fields are useful to metaslabs for stats and debugging. 3063 * Fill in the obvious ones before calling into metaslab_alloc(). 3064 */ 3065 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3066 BP_SET_PSIZE(new_bp, size); 3067 BP_SET_LEVEL(new_bp, 0); 3068 3069 /* 3070 * When allocating a zil block, we don't have information about 3071 * the final destination of the block except the objset it's part 3072 * of, so we just hash the objset ID to pick the allocator to get 3073 * some parallelism. 3074 */ 3075 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3076 txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL, 3077 cityhash4(0, 0, 0, objset) % spa->spa_alloc_count); 3078 if (error == 0) { 3079 *slog = TRUE; 3080 } else { 3081 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3082 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 3083 &io_alloc_list, NULL, cityhash4(0, 0, 0, objset) % 3084 spa->spa_alloc_count); 3085 if (error == 0) 3086 *slog = FALSE; 3087 } 3088 metaslab_trace_fini(&io_alloc_list); 3089 3090 if (error == 0) { 3091 BP_SET_LSIZE(new_bp, size); 3092 BP_SET_PSIZE(new_bp, size); 3093 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3094 BP_SET_CHECKSUM(new_bp, 3095 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3096 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3097 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3098 BP_SET_LEVEL(new_bp, 0); 3099 BP_SET_DEDUP(new_bp, 0); 3100 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3101 } else { 3102 zfs_dbgmsg("%s: zil block allocation failure: " 3103 "size %llu, error %d", spa_name(spa), size, error); 3104 } 3105 3106 return (error); 3107 } 3108 3109 /* 3110 * ========================================================================== 3111 * Read and write to physical devices 3112 * ========================================================================== 3113 */ 3114 3115 3116 /* 3117 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3118 * stops after this stage and will resume upon I/O completion. 3119 * However, there are instances where the vdev layer may need to 3120 * continue the pipeline when an I/O was not issued. Since the I/O 3121 * that was sent to the vdev layer might be different than the one 3122 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3123 * force the underlying vdev layers to call either zio_execute() or 3124 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3125 */ 3126 static int 3127 zio_vdev_io_start(zio_t *zio) 3128 { 3129 vdev_t *vd = zio->io_vd; 3130 uint64_t align; 3131 spa_t *spa = zio->io_spa; 3132 3133 ASSERT(zio->io_error == 0); 3134 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3135 3136 if (vd == NULL) { 3137 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3138 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3139 3140 /* 3141 * The mirror_ops handle multiple DVAs in a single BP. 3142 */ 3143 vdev_mirror_ops.vdev_op_io_start(zio); 3144 return (ZIO_PIPELINE_STOP); 3145 } 3146 3147 ASSERT3P(zio->io_logical, !=, zio); 3148 if (zio->io_type == ZIO_TYPE_WRITE) { 3149 ASSERT(spa->spa_trust_config); 3150 3151 /* 3152 * Note: the code can handle other kinds of writes, 3153 * but we don't expect them. 3154 */ 3155 if (zio->io_vd->vdev_removing) { 3156 ASSERT(zio->io_flags & 3157 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 3158 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 3159 } 3160 } 3161 3162 align = 1ULL << vd->vdev_top->vdev_ashift; 3163 3164 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 3165 P2PHASE(zio->io_size, align) != 0) { 3166 /* Transform logical writes to be a full physical block size. */ 3167 uint64_t asize = P2ROUNDUP(zio->io_size, align); 3168 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3169 ASSERT(vd == vd->vdev_top); 3170 if (zio->io_type == ZIO_TYPE_WRITE) { 3171 abd_copy(abuf, zio->io_abd, zio->io_size); 3172 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3173 } 3174 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3175 } 3176 3177 /* 3178 * If this is not a physical io, make sure that it is properly aligned 3179 * before proceeding. 3180 */ 3181 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 3182 ASSERT0(P2PHASE(zio->io_offset, align)); 3183 ASSERT0(P2PHASE(zio->io_size, align)); 3184 } else { 3185 /* 3186 * For physical writes, we allow 512b aligned writes and assume 3187 * the device will perform a read-modify-write as necessary. 3188 */ 3189 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 3190 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 3191 } 3192 3193 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 3194 3195 /* 3196 * If this is a repair I/O, and there's no self-healing involved -- 3197 * that is, we're just resilvering what we expect to resilver -- 3198 * then don't do the I/O unless zio's txg is actually in vd's DTL. 3199 * This prevents spurious resilvering. 3200 * 3201 * There are a few ways that we can end up creating these spurious 3202 * resilver i/os: 3203 * 3204 * 1. A resilver i/o will be issued if any DVA in the BP has a 3205 * dirty DTL. The mirror code will issue resilver writes to 3206 * each DVA, including the one(s) that are not on vdevs with dirty 3207 * DTLs. 3208 * 3209 * 2. With nested replication, which happens when we have a 3210 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 3211 * For example, given mirror(replacing(A+B), C), it's likely that 3212 * only A is out of date (it's the new device). In this case, we'll 3213 * read from C, then use the data to resilver A+B -- but we don't 3214 * actually want to resilver B, just A. The top-level mirror has no 3215 * way to know this, so instead we just discard unnecessary repairs 3216 * as we work our way down the vdev tree. 3217 * 3218 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 3219 * The same logic applies to any form of nested replication: ditto 3220 * + mirror, RAID-Z + replacing, etc. 3221 * 3222 * However, indirect vdevs point off to other vdevs which may have 3223 * DTL's, so we never bypass them. The child i/os on concrete vdevs 3224 * will be properly bypassed instead. 3225 */ 3226 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 3227 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 3228 zio->io_txg != 0 && /* not a delegated i/o */ 3229 vd->vdev_ops != &vdev_indirect_ops && 3230 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 3231 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3232 zio_vdev_io_bypass(zio); 3233 return (ZIO_PIPELINE_CONTINUE); 3234 } 3235 3236 if (vd->vdev_ops->vdev_op_leaf && 3237 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 3238 3239 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 3240 return (ZIO_PIPELINE_CONTINUE); 3241 3242 if ((zio = vdev_queue_io(zio)) == NULL) 3243 return (ZIO_PIPELINE_STOP); 3244 3245 if (!vdev_accessible(vd, zio)) { 3246 zio->io_error = SET_ERROR(ENXIO); 3247 zio_interrupt(zio); 3248 return (ZIO_PIPELINE_STOP); 3249 } 3250 } 3251 3252 vd->vdev_ops->vdev_op_io_start(zio); 3253 return (ZIO_PIPELINE_STOP); 3254 } 3255 3256 static int 3257 zio_vdev_io_done(zio_t *zio) 3258 { 3259 vdev_t *vd = zio->io_vd; 3260 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3261 boolean_t unexpected_error = B_FALSE; 3262 3263 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3264 return (ZIO_PIPELINE_STOP); 3265 } 3266 3267 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 3268 3269 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3270 3271 vdev_queue_io_done(zio); 3272 3273 if (zio->io_type == ZIO_TYPE_WRITE) 3274 vdev_cache_write(zio); 3275 3276 if (zio_injection_enabled && zio->io_error == 0) 3277 zio->io_error = zio_handle_device_injection(vd, 3278 zio, EIO); 3279 3280 if (zio_injection_enabled && zio->io_error == 0) 3281 zio->io_error = zio_handle_label_injection(zio, EIO); 3282 3283 if (zio->io_error) { 3284 if (!vdev_accessible(vd, zio)) { 3285 zio->io_error = SET_ERROR(ENXIO); 3286 } else { 3287 unexpected_error = B_TRUE; 3288 } 3289 } 3290 } 3291 3292 ops->vdev_op_io_done(zio); 3293 3294 if (unexpected_error) 3295 VERIFY(vdev_probe(vd, zio) == NULL); 3296 3297 return (ZIO_PIPELINE_CONTINUE); 3298 } 3299 3300 /* 3301 * This function is used to change the priority of an existing zio that is 3302 * currently in-flight. This is used by the arc to upgrade priority in the 3303 * event that a demand read is made for a block that is currently queued 3304 * as a scrub or async read IO. Otherwise, the high priority read request 3305 * would end up having to wait for the lower priority IO. 3306 */ 3307 void 3308 zio_change_priority(zio_t *pio, zio_priority_t priority) 3309 { 3310 zio_t *cio, *cio_next; 3311 zio_link_t *zl = NULL; 3312 3313 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 3314 3315 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 3316 vdev_queue_change_io_priority(pio, priority); 3317 } else { 3318 pio->io_priority = priority; 3319 } 3320 3321 mutex_enter(&pio->io_lock); 3322 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 3323 cio_next = zio_walk_children(pio, &zl); 3324 zio_change_priority(cio, priority); 3325 } 3326 mutex_exit(&pio->io_lock); 3327 } 3328 3329 /* 3330 * For non-raidz ZIOs, we can just copy aside the bad data read from the 3331 * disk, and use that to finish the checksum ereport later. 3332 */ 3333 static void 3334 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3335 const void *good_buf) 3336 { 3337 /* no processing needed */ 3338 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 3339 } 3340 3341 /*ARGSUSED*/ 3342 void 3343 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 3344 { 3345 void *buf = zio_buf_alloc(zio->io_size); 3346 3347 abd_copy_to_buf(buf, zio->io_abd, zio->io_size); 3348 3349 zcr->zcr_cbinfo = zio->io_size; 3350 zcr->zcr_cbdata = buf; 3351 zcr->zcr_finish = zio_vsd_default_cksum_finish; 3352 zcr->zcr_free = zio_buf_free; 3353 } 3354 3355 static int 3356 zio_vdev_io_assess(zio_t *zio) 3357 { 3358 vdev_t *vd = zio->io_vd; 3359 3360 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3361 return (ZIO_PIPELINE_STOP); 3362 } 3363 3364 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3365 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3366 3367 if (zio->io_vsd != NULL) { 3368 zio->io_vsd_ops->vsd_free(zio); 3369 zio->io_vsd = NULL; 3370 } 3371 3372 if (zio_injection_enabled && zio->io_error == 0) 3373 zio->io_error = zio_handle_fault_injection(zio, EIO); 3374 3375 /* 3376 * If the I/O failed, determine whether we should attempt to retry it. 3377 * 3378 * On retry, we cut in line in the issue queue, since we don't want 3379 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3380 */ 3381 if (zio->io_error && vd == NULL && 3382 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3383 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3384 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3385 zio->io_error = 0; 3386 zio->io_flags |= ZIO_FLAG_IO_RETRY | 3387 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3388 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 3389 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 3390 zio_requeue_io_start_cut_in_line); 3391 return (ZIO_PIPELINE_STOP); 3392 } 3393 3394 /* 3395 * If we got an error on a leaf device, convert it to ENXIO 3396 * if the device is not accessible at all. 3397 */ 3398 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3399 !vdev_accessible(vd, zio)) 3400 zio->io_error = SET_ERROR(ENXIO); 3401 3402 /* 3403 * If we can't write to an interior vdev (mirror or RAID-Z), 3404 * set vdev_cant_write so that we stop trying to allocate from it. 3405 */ 3406 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 3407 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3408 vd->vdev_cant_write = B_TRUE; 3409 } 3410 3411 /* 3412 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3413 * attempts will ever succeed. In this case we set a persistent bit so 3414 * that we don't bother with it in the future. 3415 */ 3416 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3417 zio->io_type == ZIO_TYPE_IOCTL && 3418 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3419 vd->vdev_nowritecache = B_TRUE; 3420 3421 if (zio->io_error) 3422 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3423 3424 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3425 zio->io_physdone != NULL) { 3426 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 3427 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 3428 zio->io_physdone(zio->io_logical); 3429 } 3430 3431 return (ZIO_PIPELINE_CONTINUE); 3432 } 3433 3434 void 3435 zio_vdev_io_reissue(zio_t *zio) 3436 { 3437 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3438 ASSERT(zio->io_error == 0); 3439 3440 zio->io_stage >>= 1; 3441 } 3442 3443 void 3444 zio_vdev_io_redone(zio_t *zio) 3445 { 3446 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3447 3448 zio->io_stage >>= 1; 3449 } 3450 3451 void 3452 zio_vdev_io_bypass(zio_t *zio) 3453 { 3454 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3455 ASSERT(zio->io_error == 0); 3456 3457 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3458 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3459 } 3460 3461 /* 3462 * ========================================================================== 3463 * Generate and verify checksums 3464 * ========================================================================== 3465 */ 3466 static int 3467 zio_checksum_generate(zio_t *zio) 3468 { 3469 blkptr_t *bp = zio->io_bp; 3470 enum zio_checksum checksum; 3471 3472 if (bp == NULL) { 3473 /* 3474 * This is zio_write_phys(). 3475 * We're either generating a label checksum, or none at all. 3476 */ 3477 checksum = zio->io_prop.zp_checksum; 3478 3479 if (checksum == ZIO_CHECKSUM_OFF) 3480 return (ZIO_PIPELINE_CONTINUE); 3481 3482 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3483 } else { 3484 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3485 ASSERT(!IO_IS_ALLOCATING(zio)); 3486 checksum = ZIO_CHECKSUM_GANG_HEADER; 3487 } else { 3488 checksum = BP_GET_CHECKSUM(bp); 3489 } 3490 } 3491 3492 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3493 3494 return (ZIO_PIPELINE_CONTINUE); 3495 } 3496 3497 static int 3498 zio_checksum_verify(zio_t *zio) 3499 { 3500 zio_bad_cksum_t info; 3501 blkptr_t *bp = zio->io_bp; 3502 int error; 3503 3504 ASSERT(zio->io_vd != NULL); 3505 3506 if (bp == NULL) { 3507 /* 3508 * This is zio_read_phys(). 3509 * We're either verifying a label checksum, or nothing at all. 3510 */ 3511 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3512 return (ZIO_PIPELINE_CONTINUE); 3513 3514 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3515 } 3516 3517 if ((error = zio_checksum_error(zio, &info)) != 0) { 3518 zio->io_error = error; 3519 if (error == ECKSUM && 3520 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3521 zfs_ereport_start_checksum(zio->io_spa, 3522 zio->io_vd, zio, zio->io_offset, 3523 zio->io_size, NULL, &info); 3524 } 3525 } 3526 3527 return (ZIO_PIPELINE_CONTINUE); 3528 } 3529 3530 /* 3531 * Called by RAID-Z to ensure we don't compute the checksum twice. 3532 */ 3533 void 3534 zio_checksum_verified(zio_t *zio) 3535 { 3536 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3537 } 3538 3539 /* 3540 * ========================================================================== 3541 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 3542 * An error of 0 indicates success. ENXIO indicates whole-device failure, 3543 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3544 * indicate errors that are specific to one I/O, and most likely permanent. 3545 * Any other error is presumed to be worse because we weren't expecting it. 3546 * ========================================================================== 3547 */ 3548 int 3549 zio_worst_error(int e1, int e2) 3550 { 3551 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3552 int r1, r2; 3553 3554 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3555 if (e1 == zio_error_rank[r1]) 3556 break; 3557 3558 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3559 if (e2 == zio_error_rank[r2]) 3560 break; 3561 3562 return (r1 > r2 ? e1 : e2); 3563 } 3564 3565 /* 3566 * ========================================================================== 3567 * I/O completion 3568 * ========================================================================== 3569 */ 3570 static int 3571 zio_ready(zio_t *zio) 3572 { 3573 blkptr_t *bp = zio->io_bp; 3574 zio_t *pio, *pio_next; 3575 zio_link_t *zl = NULL; 3576 3577 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 3578 ZIO_WAIT_READY)) { 3579 return (ZIO_PIPELINE_STOP); 3580 } 3581 3582 if (zio->io_ready) { 3583 ASSERT(IO_IS_ALLOCATING(zio)); 3584 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3585 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3586 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3587 3588 zio->io_ready(zio); 3589 } 3590 3591 if (bp != NULL && bp != &zio->io_bp_copy) 3592 zio->io_bp_copy = *bp; 3593 3594 if (zio->io_error != 0) { 3595 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3596 3597 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3598 ASSERT(IO_IS_ALLOCATING(zio)); 3599 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3600 ASSERT(zio->io_metaslab_class != NULL); 3601 3602 /* 3603 * We were unable to allocate anything, unreserve and 3604 * issue the next I/O to allocate. 3605 */ 3606 metaslab_class_throttle_unreserve( 3607 zio->io_metaslab_class, zio->io_prop.zp_copies, 3608 zio->io_allocator, zio); 3609 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 3610 } 3611 } 3612 3613 mutex_enter(&zio->io_lock); 3614 zio->io_state[ZIO_WAIT_READY] = 1; 3615 pio = zio_walk_parents(zio, &zl); 3616 mutex_exit(&zio->io_lock); 3617 3618 /* 3619 * As we notify zio's parents, new parents could be added. 3620 * New parents go to the head of zio's io_parent_list, however, 3621 * so we will (correctly) not notify them. The remainder of zio's 3622 * io_parent_list, from 'pio_next' onward, cannot change because 3623 * all parents must wait for us to be done before they can be done. 3624 */ 3625 for (; pio != NULL; pio = pio_next) { 3626 pio_next = zio_walk_parents(zio, &zl); 3627 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3628 } 3629 3630 if (zio->io_flags & ZIO_FLAG_NODATA) { 3631 if (BP_IS_GANG(bp)) { 3632 zio->io_flags &= ~ZIO_FLAG_NODATA; 3633 } else { 3634 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 3635 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3636 } 3637 } 3638 3639 if (zio_injection_enabled && 3640 zio->io_spa->spa_syncing_txg == zio->io_txg) 3641 zio_handle_ignored_writes(zio); 3642 3643 return (ZIO_PIPELINE_CONTINUE); 3644 } 3645 3646 /* 3647 * Update the allocation throttle accounting. 3648 */ 3649 static void 3650 zio_dva_throttle_done(zio_t *zio) 3651 { 3652 zio_t *lio = zio->io_logical; 3653 zio_t *pio = zio_unique_parent(zio); 3654 vdev_t *vd = zio->io_vd; 3655 int flags = METASLAB_ASYNC_ALLOC; 3656 3657 ASSERT3P(zio->io_bp, !=, NULL); 3658 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 3659 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 3660 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 3661 ASSERT(vd != NULL); 3662 ASSERT3P(vd, ==, vd->vdev_top); 3663 ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 3664 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 3665 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 3666 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 3667 3668 /* 3669 * Parents of gang children can have two flavors -- ones that 3670 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 3671 * and ones that allocated the constituent blocks. The allocation 3672 * throttle needs to know the allocating parent zio so we must find 3673 * it here. 3674 */ 3675 if (pio->io_child_type == ZIO_CHILD_GANG) { 3676 /* 3677 * If our parent is a rewrite gang child then our grandparent 3678 * would have been the one that performed the allocation. 3679 */ 3680 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 3681 pio = zio_unique_parent(pio); 3682 flags |= METASLAB_GANG_CHILD; 3683 } 3684 3685 ASSERT(IO_IS_ALLOCATING(pio)); 3686 ASSERT3P(zio, !=, zio->io_logical); 3687 ASSERT(zio->io_logical != NULL); 3688 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 3689 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 3690 ASSERT(zio->io_metaslab_class != NULL); 3691 3692 mutex_enter(&pio->io_lock); 3693 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 3694 pio->io_allocator, B_TRUE); 3695 mutex_exit(&pio->io_lock); 3696 3697 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 3698 pio->io_allocator, pio); 3699 3700 /* 3701 * Call into the pipeline to see if there is more work that 3702 * needs to be done. If there is work to be done it will be 3703 * dispatched to another taskq thread. 3704 */ 3705 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 3706 } 3707 3708 static int 3709 zio_done(zio_t *zio) 3710 { 3711 spa_t *spa = zio->io_spa; 3712 zio_t *lio = zio->io_logical; 3713 blkptr_t *bp = zio->io_bp; 3714 vdev_t *vd = zio->io_vd; 3715 uint64_t psize = zio->io_size; 3716 zio_t *pio, *pio_next; 3717 zio_link_t *zl = NULL; 3718 3719 /* 3720 * If our children haven't all completed, 3721 * wait for them and then repeat this pipeline stage. 3722 */ 3723 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 3724 return (ZIO_PIPELINE_STOP); 3725 } 3726 3727 /* 3728 * If the allocation throttle is enabled, then update the accounting. 3729 * We only track child I/Os that are part of an allocating async 3730 * write. We must do this since the allocation is performed 3731 * by the logical I/O but the actual write is done by child I/Os. 3732 */ 3733 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 3734 zio->io_child_type == ZIO_CHILD_VDEV) { 3735 ASSERT(zio->io_metaslab_class != NULL); 3736 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 3737 zio_dva_throttle_done(zio); 3738 } 3739 3740 /* 3741 * If the allocation throttle is enabled, verify that 3742 * we have decremented the refcounts for every I/O that was throttled. 3743 */ 3744 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3745 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3746 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3747 ASSERT(bp != NULL); 3748 3749 metaslab_group_alloc_verify(spa, zio->io_bp, zio, 3750 zio->io_allocator); 3751 VERIFY(zfs_refcount_not_held( 3752 &zio->io_metaslab_class->mc_alloc_slots[zio->io_allocator], 3753 zio)); 3754 } 3755 3756 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3757 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3758 ASSERT(zio->io_children[c][w] == 0); 3759 3760 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3761 ASSERT(bp->blk_pad[0] == 0); 3762 ASSERT(bp->blk_pad[1] == 0); 3763 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3764 (bp == zio_unique_parent(zio)->io_bp)); 3765 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3766 zio->io_bp_override == NULL && 3767 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3768 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3769 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3770 ASSERT(BP_COUNT_GANG(bp) == 0 || 3771 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3772 } 3773 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3774 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3775 } 3776 3777 /* 3778 * If there were child vdev/gang/ddt errors, they apply to us now. 3779 */ 3780 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3781 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3782 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3783 3784 /* 3785 * If the I/O on the transformed data was successful, generate any 3786 * checksum reports now while we still have the transformed data. 3787 */ 3788 if (zio->io_error == 0) { 3789 while (zio->io_cksum_report != NULL) { 3790 zio_cksum_report_t *zcr = zio->io_cksum_report; 3791 uint64_t align = zcr->zcr_align; 3792 uint64_t asize = P2ROUNDUP(psize, align); 3793 char *abuf = NULL; 3794 abd_t *adata = zio->io_abd; 3795 3796 if (asize != psize) { 3797 adata = abd_alloc_linear(asize, B_TRUE); 3798 abd_copy(adata, zio->io_abd, psize); 3799 abd_zero_off(adata, psize, asize - psize); 3800 } 3801 3802 if (adata != NULL) 3803 abuf = abd_borrow_buf_copy(adata, asize); 3804 3805 zio->io_cksum_report = zcr->zcr_next; 3806 zcr->zcr_next = NULL; 3807 zcr->zcr_finish(zcr, abuf); 3808 zfs_ereport_free_checksum(zcr); 3809 3810 if (adata != NULL) 3811 abd_return_buf(adata, abuf, asize); 3812 3813 if (asize != psize) 3814 abd_free(adata); 3815 } 3816 } 3817 3818 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3819 3820 vdev_stat_update(zio, psize); 3821 3822 if (zio->io_error) { 3823 /* 3824 * If this I/O is attached to a particular vdev, 3825 * generate an error message describing the I/O failure 3826 * at the block level. We ignore these errors if the 3827 * device is currently unavailable. 3828 */ 3829 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3830 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3831 3832 if ((zio->io_error == EIO || !(zio->io_flags & 3833 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3834 zio == lio) { 3835 /* 3836 * For logical I/O requests, tell the SPA to log the 3837 * error and generate a logical data ereport. 3838 */ 3839 spa_log_error(spa, zio); 3840 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3841 0, 0); 3842 } 3843 } 3844 3845 if (zio->io_error && zio == lio) { 3846 /* 3847 * Determine whether zio should be reexecuted. This will 3848 * propagate all the way to the root via zio_notify_parent(). 3849 */ 3850 ASSERT(vd == NULL && bp != NULL); 3851 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3852 3853 if (IO_IS_ALLOCATING(zio) && 3854 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3855 if (zio->io_error != ENOSPC) 3856 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3857 else 3858 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3859 } 3860 3861 if ((zio->io_type == ZIO_TYPE_READ || 3862 zio->io_type == ZIO_TYPE_FREE) && 3863 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3864 zio->io_error == ENXIO && 3865 spa_load_state(spa) == SPA_LOAD_NONE && 3866 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3867 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3868 3869 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3870 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3871 3872 /* 3873 * Here is a possibly good place to attempt to do 3874 * either combinatorial reconstruction or error correction 3875 * based on checksums. It also might be a good place 3876 * to send out preliminary ereports before we suspend 3877 * processing. 3878 */ 3879 } 3880 3881 /* 3882 * If there were logical child errors, they apply to us now. 3883 * We defer this until now to avoid conflating logical child 3884 * errors with errors that happened to the zio itself when 3885 * updating vdev stats and reporting FMA events above. 3886 */ 3887 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3888 3889 if ((zio->io_error || zio->io_reexecute) && 3890 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3891 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3892 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3893 3894 zio_gang_tree_free(&zio->io_gang_tree); 3895 3896 /* 3897 * Godfather I/Os should never suspend. 3898 */ 3899 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3900 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3901 zio->io_reexecute = 0; 3902 3903 if (zio->io_reexecute) { 3904 /* 3905 * This is a logical I/O that wants to reexecute. 3906 * 3907 * Reexecute is top-down. When an i/o fails, if it's not 3908 * the root, it simply notifies its parent and sticks around. 3909 * The parent, seeing that it still has children in zio_done(), 3910 * does the same. This percolates all the way up to the root. 3911 * The root i/o will reexecute or suspend the entire tree. 3912 * 3913 * This approach ensures that zio_reexecute() honors 3914 * all the original i/o dependency relationships, e.g. 3915 * parents not executing until children are ready. 3916 */ 3917 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3918 3919 zio->io_gang_leader = NULL; 3920 3921 mutex_enter(&zio->io_lock); 3922 zio->io_state[ZIO_WAIT_DONE] = 1; 3923 mutex_exit(&zio->io_lock); 3924 3925 /* 3926 * "The Godfather" I/O monitors its children but is 3927 * not a true parent to them. It will track them through 3928 * the pipeline but severs its ties whenever they get into 3929 * trouble (e.g. suspended). This allows "The Godfather" 3930 * I/O to return status without blocking. 3931 */ 3932 zl = NULL; 3933 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 3934 pio = pio_next) { 3935 zio_link_t *remove_zl = zl; 3936 pio_next = zio_walk_parents(zio, &zl); 3937 3938 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3939 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3940 zio_remove_child(pio, zio, remove_zl); 3941 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3942 } 3943 } 3944 3945 if ((pio = zio_unique_parent(zio)) != NULL) { 3946 /* 3947 * We're not a root i/o, so there's nothing to do 3948 * but notify our parent. Don't propagate errors 3949 * upward since we haven't permanently failed yet. 3950 */ 3951 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3952 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3953 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3954 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3955 /* 3956 * We'd fail again if we reexecuted now, so suspend 3957 * until conditions improve (e.g. device comes online). 3958 */ 3959 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 3960 } else { 3961 /* 3962 * Reexecution is potentially a huge amount of work. 3963 * Hand it off to the otherwise-unused claim taskq. 3964 */ 3965 ASSERT(zio->io_tqent.tqent_next == NULL); 3966 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3967 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3968 0, &zio->io_tqent); 3969 } 3970 return (ZIO_PIPELINE_STOP); 3971 } 3972 3973 ASSERT(zio->io_child_count == 0); 3974 ASSERT(zio->io_reexecute == 0); 3975 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3976 3977 /* 3978 * Report any checksum errors, since the I/O is complete. 3979 */ 3980 while (zio->io_cksum_report != NULL) { 3981 zio_cksum_report_t *zcr = zio->io_cksum_report; 3982 zio->io_cksum_report = zcr->zcr_next; 3983 zcr->zcr_next = NULL; 3984 zcr->zcr_finish(zcr, NULL); 3985 zfs_ereport_free_checksum(zcr); 3986 } 3987 3988 /* 3989 * It is the responsibility of the done callback to ensure that this 3990 * particular zio is no longer discoverable for adoption, and as 3991 * such, cannot acquire any new parents. 3992 */ 3993 if (zio->io_done) 3994 zio->io_done(zio); 3995 3996 mutex_enter(&zio->io_lock); 3997 zio->io_state[ZIO_WAIT_DONE] = 1; 3998 mutex_exit(&zio->io_lock); 3999 4000 zl = NULL; 4001 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 4002 zio_link_t *remove_zl = zl; 4003 pio_next = zio_walk_parents(zio, &zl); 4004 zio_remove_child(pio, zio, remove_zl); 4005 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4006 } 4007 4008 if (zio->io_waiter != NULL) { 4009 mutex_enter(&zio->io_lock); 4010 zio->io_executor = NULL; 4011 cv_broadcast(&zio->io_cv); 4012 mutex_exit(&zio->io_lock); 4013 } else { 4014 zio_destroy(zio); 4015 } 4016 4017 return (ZIO_PIPELINE_STOP); 4018 } 4019 4020 /* 4021 * ========================================================================== 4022 * I/O pipeline definition 4023 * ========================================================================== 4024 */ 4025 static zio_pipe_stage_t *zio_pipeline[] = { 4026 NULL, 4027 zio_read_bp_init, 4028 zio_write_bp_init, 4029 zio_free_bp_init, 4030 zio_issue_async, 4031 zio_write_compress, 4032 zio_checksum_generate, 4033 zio_nop_write, 4034 zio_ddt_read_start, 4035 zio_ddt_read_done, 4036 zio_ddt_write, 4037 zio_ddt_free, 4038 zio_gang_assemble, 4039 zio_gang_issue, 4040 zio_dva_throttle, 4041 zio_dva_allocate, 4042 zio_dva_free, 4043 zio_dva_claim, 4044 zio_ready, 4045 zio_vdev_io_start, 4046 zio_vdev_io_done, 4047 zio_vdev_io_assess, 4048 zio_checksum_verify, 4049 zio_done 4050 }; 4051 4052 4053 4054 4055 /* 4056 * Compare two zbookmark_phys_t's to see which we would reach first in a 4057 * pre-order traversal of the object tree. 4058 * 4059 * This is simple in every case aside from the meta-dnode object. For all other 4060 * objects, we traverse them in order (object 1 before object 2, and so on). 4061 * However, all of these objects are traversed while traversing object 0, since 4062 * the data it points to is the list of objects. Thus, we need to convert to a 4063 * canonical representation so we can compare meta-dnode bookmarks to 4064 * non-meta-dnode bookmarks. 4065 * 4066 * We do this by calculating "equivalents" for each field of the zbookmark. 4067 * zbookmarks outside of the meta-dnode use their own object and level, and 4068 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 4069 * blocks this bookmark refers to) by multiplying their blkid by their span 4070 * (the number of L0 blocks contained within one block at their level). 4071 * zbookmarks inside the meta-dnode calculate their object equivalent 4072 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 4073 * level + 1<<31 (any value larger than a level could ever be) for their level. 4074 * This causes them to always compare before a bookmark in their object 4075 * equivalent, compare appropriately to bookmarks in other objects, and to 4076 * compare appropriately to other bookmarks in the meta-dnode. 4077 */ 4078 int 4079 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 4080 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 4081 { 4082 /* 4083 * These variables represent the "equivalent" values for the zbookmark, 4084 * after converting zbookmarks inside the meta dnode to their 4085 * normal-object equivalents. 4086 */ 4087 uint64_t zb1obj, zb2obj; 4088 uint64_t zb1L0, zb2L0; 4089 uint64_t zb1level, zb2level; 4090 4091 if (zb1->zb_object == zb2->zb_object && 4092 zb1->zb_level == zb2->zb_level && 4093 zb1->zb_blkid == zb2->zb_blkid) 4094 return (0); 4095 4096 /* 4097 * BP_SPANB calculates the span in blocks. 4098 */ 4099 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 4100 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 4101 4102 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 4103 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4104 zb1L0 = 0; 4105 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 4106 } else { 4107 zb1obj = zb1->zb_object; 4108 zb1level = zb1->zb_level; 4109 } 4110 4111 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 4112 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4113 zb2L0 = 0; 4114 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 4115 } else { 4116 zb2obj = zb2->zb_object; 4117 zb2level = zb2->zb_level; 4118 } 4119 4120 /* Now that we have a canonical representation, do the comparison. */ 4121 if (zb1obj != zb2obj) 4122 return (zb1obj < zb2obj ? -1 : 1); 4123 else if (zb1L0 != zb2L0) 4124 return (zb1L0 < zb2L0 ? -1 : 1); 4125 else if (zb1level != zb2level) 4126 return (zb1level > zb2level ? -1 : 1); 4127 /* 4128 * This can (theoretically) happen if the bookmarks have the same object 4129 * and level, but different blkids, if the block sizes are not the same. 4130 * There is presently no way to change the indirect block sizes 4131 */ 4132 return (0); 4133 } 4134 4135 /* 4136 * This function checks the following: given that last_block is the place that 4137 * our traversal stopped last time, does that guarantee that we've visited 4138 * every node under subtree_root? Therefore, we can't just use the raw output 4139 * of zbookmark_compare. We have to pass in a modified version of 4140 * subtree_root; by incrementing the block id, and then checking whether 4141 * last_block is before or equal to that, we can tell whether or not having 4142 * visited last_block implies that all of subtree_root's children have been 4143 * visited. 4144 */ 4145 boolean_t 4146 zbookmark_subtree_completed(const dnode_phys_t *dnp, 4147 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 4148 { 4149 zbookmark_phys_t mod_zb = *subtree_root; 4150 mod_zb.zb_blkid++; 4151 ASSERT(last_block->zb_level == 0); 4152 4153 /* The objset_phys_t isn't before anything. */ 4154 if (dnp == NULL) 4155 return (B_FALSE); 4156 4157 /* 4158 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 4159 * data block size in sectors, because that variable is only used if 4160 * the bookmark refers to a block in the meta-dnode. Since we don't 4161 * know without examining it what object it refers to, and there's no 4162 * harm in passing in this value in other cases, we always pass it in. 4163 * 4164 * We pass in 0 for the indirect block size shift because zb2 must be 4165 * level 0. The indirect block size is only used to calculate the span 4166 * of the bookmark, but since the bookmark must be level 0, the span is 4167 * always 1, so the math works out. 4168 * 4169 * If you make changes to how the zbookmark_compare code works, be sure 4170 * to make sure that this code still works afterwards. 4171 */ 4172 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 4173 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 4174 last_block) <= 0); 4175 } 4176