1 /* 2 * CDDL HEADER START 3 * 4 * This file and its contents are supplied under the terms of the 5 * Common Development and Distribution License ("CDDL"), version 1.0. 6 * You may only use this file in accordance with the terms of version 7 * 1.0 of the CDDL. 8 * 9 * A full copy of the text of the CDDL should have accompanied this 10 * source. A copy of the CDDL is also available via the Internet at 11 * http://www.illumos.org/license/CDDL. 12 * 13 * CDDL HEADER END 14 */ 15 16 /* 17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved. 18 */ 19 20 #include <sys/zfs_context.h> 21 #include <sys/spa.h> 22 #include <sys/spa_impl.h> 23 #include <sys/vdev_impl.h> 24 #include <sys/fs/zfs.h> 25 #include <sys/zio.h> 26 #include <sys/zio_checksum.h> 27 #include <sys/metaslab.h> 28 #include <sys/refcount.h> 29 #include <sys/dmu.h> 30 #include <sys/vdev_indirect_mapping.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/dsl_synctask.h> 33 #include <sys/zap.h> 34 #include <sys/abd.h> 35 #include <sys/zthr.h> 36 37 /* 38 * An indirect vdev corresponds to a vdev that has been removed. Since 39 * we cannot rewrite block pointers of snapshots, etc., we keep a 40 * mapping from old location on the removed device to the new location 41 * on another device in the pool and use this mapping whenever we need 42 * to access the DVA. Unfortunately, this mapping did not respect 43 * logical block boundaries when it was first created, and so a DVA on 44 * this indirect vdev may be "split" into multiple sections that each 45 * map to a different location. As a consequence, not all DVAs can be 46 * translated to an equivalent new DVA. Instead we must provide a 47 * "vdev_remap" operation that executes a callback on each contiguous 48 * segment of the new location. This function is used in multiple ways: 49 * 50 * - i/os to this vdev use the callback to determine where the 51 * data is now located, and issue child i/os for each segment's new 52 * location. 53 * 54 * - frees and claims to this vdev use the callback to free or claim 55 * each mapped segment. (Note that we don't actually need to claim 56 * log blocks on indirect vdevs, because we don't allocate to 57 * removing vdevs. However, zdb uses zio_claim() for its leak 58 * detection.) 59 */ 60 61 /* 62 * "Big theory statement" for how we mark blocks obsolete. 63 * 64 * When a block on an indirect vdev is freed or remapped, a section of 65 * that vdev's mapping may no longer be referenced (aka "obsolete"). We 66 * keep track of how much of each mapping entry is obsolete. When 67 * an entry becomes completely obsolete, we can remove it, thus reducing 68 * the memory used by the mapping. The complete picture of obsolescence 69 * is given by the following data structures, described below: 70 * - the entry-specific obsolete count 71 * - the vdev-specific obsolete spacemap 72 * - the pool-specific obsolete bpobj 73 * 74 * == On disk data structures used == 75 * 76 * We track the obsolete space for the pool using several objects. Each 77 * of these objects is created on demand and freed when no longer 78 * needed, and is assumed to be empty if it does not exist. 79 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects. 80 * 81 * - Each vic_mapping_object (associated with an indirect vdev) can 82 * have a vimp_counts_object. This is an array of uint32_t's 83 * with the same number of entries as the vic_mapping_object. When 84 * the mapping is condensed, entries from the vic_obsolete_sm_object 85 * (see below) are folded into the counts. Therefore, each 86 * obsolete_counts entry tells us the number of bytes in the 87 * corresponding mapping entry that were not referenced when the 88 * mapping was last condensed. 89 * 90 * - Each indirect or removing vdev can have a vic_obsolete_sm_object. 91 * This is a space map containing an alloc entry for every DVA that 92 * has been obsoleted since the last time this indirect vdev was 93 * condensed. We use this object in order to improve performance 94 * when marking a DVA as obsolete. Instead of modifying an arbitrary 95 * offset of the vimp_counts_object, we only need to append an entry 96 * to the end of this object. When a DVA becomes obsolete, it is 97 * added to the obsolete space map. This happens when the DVA is 98 * freed, remapped and not referenced by a snapshot, or the last 99 * snapshot referencing it is destroyed. 100 * 101 * - Each dataset can have a ds_remap_deadlist object. This is a 102 * deadlist object containing all blocks that were remapped in this 103 * dataset but referenced in a previous snapshot. Blocks can *only* 104 * appear on this list if they were remapped (dsl_dataset_block_remapped); 105 * blocks that were killed in a head dataset are put on the normal 106 * ds_deadlist and marked obsolete when they are freed. 107 * 108 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks 109 * in the pool that need to be marked obsolete. When a snapshot is 110 * destroyed, we move some of the ds_remap_deadlist to the obsolete 111 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then 112 * asynchronously process the obsolete bpobj, moving its entries to 113 * the specific vdevs' obsolete space maps. 114 * 115 * == Summary of how we mark blocks as obsolete == 116 * 117 * - When freeing a block: if any DVA is on an indirect vdev, append to 118 * vic_obsolete_sm_object. 119 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap 120 * references; otherwise append to vic_obsolete_sm_object). 121 * - When freeing a snapshot: move parts of ds_remap_deadlist to 122 * dp_obsolete_bpobj (same algorithm as ds_deadlist). 123 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to 124 * individual vdev's vic_obsolete_sm_object. 125 */ 126 127 /* 128 * "Big theory statement" for how we condense indirect vdevs. 129 * 130 * Condensing an indirect vdev's mapping is the process of determining 131 * the precise counts of obsolete space for each mapping entry (by 132 * integrating the obsolete spacemap into the obsolete counts) and 133 * writing out a new mapping that contains only referenced entries. 134 * 135 * We condense a vdev when we expect the mapping to shrink (see 136 * vdev_indirect_should_condense()), but only perform one condense at a 137 * time to limit the memory usage. In addition, we use a separate 138 * open-context thread (spa_condense_indirect_thread) to incrementally 139 * create the new mapping object in a way that minimizes the impact on 140 * the rest of the system. 141 * 142 * == Generating a new mapping == 143 * 144 * To generate a new mapping, we follow these steps: 145 * 146 * 1. Save the old obsolete space map and create a new mapping object 147 * (see spa_condense_indirect_start_sync()). This initializes the 148 * spa_condensing_indirect_phys with the "previous obsolete space map", 149 * which is now read only. Newly obsolete DVAs will be added to a 150 * new (initially empty) obsolete space map, and will not be 151 * considered as part of this condense operation. 152 * 153 * 2. Construct in memory the precise counts of obsolete space for each 154 * mapping entry, by incorporating the obsolete space map into the 155 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().) 156 * 157 * 3. Iterate through each mapping entry, writing to the new mapping any 158 * entries that are not completely obsolete (i.e. which don't have 159 * obsolete count == mapping length). (See 160 * spa_condense_indirect_generate_new_mapping().) 161 * 162 * 4. Destroy the old mapping object and switch over to the new one 163 * (spa_condense_indirect_complete_sync). 164 * 165 * == Restarting from failure == 166 * 167 * To restart the condense when we import/open the pool, we must start 168 * at the 2nd step above: reconstruct the precise counts in memory, 169 * based on the space map + counts. Then in the 3rd step, we start 170 * iterating where we left off: at vimp_max_offset of the new mapping 171 * object. 172 */ 173 174 boolean_t zfs_condense_indirect_vdevs_enable = B_TRUE; 175 176 /* 177 * Condense if at least this percent of the bytes in the mapping is 178 * obsolete. With the default of 25%, the amount of space mapped 179 * will be reduced to 1% of its original size after at most 16 180 * condenses. Higher values will condense less often (causing less 181 * i/o); lower values will reduce the mapping size more quickly. 182 */ 183 int zfs_indirect_condense_obsolete_pct = 25; 184 185 /* 186 * Condense if the obsolete space map takes up more than this amount of 187 * space on disk (logically). This limits the amount of disk space 188 * consumed by the obsolete space map; the default of 1GB is small enough 189 * that we typically don't mind "wasting" it. 190 */ 191 uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; 192 193 /* 194 * Don't bother condensing if the mapping uses less than this amount of 195 * memory. The default of 128KB is considered a "trivial" amount of 196 * memory and not worth reducing. 197 */ 198 uint64_t zfs_condense_min_mapping_bytes = 128 * 1024; 199 200 /* 201 * This is used by the test suite so that it can ensure that certain 202 * actions happen while in the middle of a condense (which might otherwise 203 * complete too quickly). If used to reduce the performance impact of 204 * condensing in production, a maximum value of 1 should be sufficient. 205 */ 206 int zfs_condense_indirect_commit_entry_delay_ticks = 0; 207 208 /* 209 * If a split block contains more than this many segments, consider it too 210 * computationally expensive to check all (2^num_segments) possible 211 * combinations. Instead, try at most 2^_segments_max randomly-selected 212 * combinations. 213 * 214 * This is reasonable if only a few segment copies are damaged and the 215 * majority of segment copies are good. This allows all the segment copies to 216 * participate fairly in the reconstruction and prevents the repeated use of 217 * one bad copy. 218 */ 219 int zfs_reconstruct_indirect_segments_max = 10; 220 221 /* 222 * The indirect_child_t represents the vdev that we will read from, when we 223 * need to read all copies of the data (e.g. for scrub or reconstruction). 224 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), 225 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, 226 * ic_vdev is a child of the mirror. 227 */ 228 typedef struct indirect_child { 229 abd_t *ic_data; 230 vdev_t *ic_vdev; 231 } indirect_child_t; 232 233 /* 234 * The indirect_split_t represents one mapped segment of an i/o to the 235 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be 236 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. 237 * For split blocks, there will be several of these. 238 */ 239 typedef struct indirect_split { 240 list_node_t is_node; /* link on iv_splits */ 241 242 /* 243 * is_split_offset is the offset into the i/o. 244 * This is the sum of the previous splits' is_size's. 245 */ 246 uint64_t is_split_offset; 247 248 vdev_t *is_vdev; /* top-level vdev */ 249 uint64_t is_target_offset; /* offset on is_vdev */ 250 uint64_t is_size; 251 int is_children; /* number of entries in is_child[] */ 252 253 /* 254 * is_good_child is the child that we are currently using to 255 * attempt reconstruction. 256 */ 257 int is_good_child; 258 259 indirect_child_t is_child[1]; /* variable-length */ 260 } indirect_split_t; 261 262 /* 263 * The indirect_vsd_t is associated with each i/o to the indirect vdev. 264 * It is the "Vdev-Specific Data" in the zio_t's io_vsd. 265 */ 266 typedef struct indirect_vsd { 267 boolean_t iv_split_block; 268 boolean_t iv_reconstruct; 269 270 list_t iv_splits; /* list of indirect_split_t's */ 271 } indirect_vsd_t; 272 273 static void 274 vdev_indirect_map_free(zio_t *zio) 275 { 276 indirect_vsd_t *iv = zio->io_vsd; 277 278 indirect_split_t *is; 279 while ((is = list_head(&iv->iv_splits)) != NULL) { 280 for (int c = 0; c < is->is_children; c++) { 281 indirect_child_t *ic = &is->is_child[c]; 282 if (ic->ic_data != NULL) 283 abd_free(ic->ic_data); 284 } 285 list_remove(&iv->iv_splits, is); 286 kmem_free(is, 287 offsetof(indirect_split_t, is_child[is->is_children])); 288 } 289 kmem_free(iv, sizeof (*iv)); 290 } 291 292 static const zio_vsd_ops_t vdev_indirect_vsd_ops = { 293 vdev_indirect_map_free, 294 zio_vsd_default_cksum_report 295 }; 296 /* 297 * Mark the given offset and size as being obsolete. 298 */ 299 void 300 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size) 301 { 302 spa_t *spa = vd->vdev_spa; 303 304 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0); 305 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); 306 ASSERT(size > 0); 307 VERIFY(vdev_indirect_mapping_entry_for_offset( 308 vd->vdev_indirect_mapping, offset) != NULL); 309 310 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 311 mutex_enter(&vd->vdev_obsolete_lock); 312 range_tree_add(vd->vdev_obsolete_segments, offset, size); 313 mutex_exit(&vd->vdev_obsolete_lock); 314 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa)); 315 } 316 } 317 318 /* 319 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This 320 * wrapper is provided because the DMU does not know about vdev_t's and 321 * cannot directly call vdev_indirect_mark_obsolete. 322 */ 323 void 324 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset, 325 uint64_t size, dmu_tx_t *tx) 326 { 327 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 328 ASSERT(dmu_tx_is_syncing(tx)); 329 330 /* The DMU can only remap indirect vdevs. */ 331 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 332 vdev_indirect_mark_obsolete(vd, offset, size); 333 } 334 335 static spa_condensing_indirect_t * 336 spa_condensing_indirect_create(spa_t *spa) 337 { 338 spa_condensing_indirect_phys_t *scip = 339 &spa->spa_condensing_indirect_phys; 340 spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP); 341 objset_t *mos = spa->spa_meta_objset; 342 343 for (int i = 0; i < TXG_SIZE; i++) { 344 list_create(&sci->sci_new_mapping_entries[i], 345 sizeof (vdev_indirect_mapping_entry_t), 346 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 347 } 348 349 sci->sci_new_mapping = 350 vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object); 351 352 return (sci); 353 } 354 355 static void 356 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci) 357 { 358 for (int i = 0; i < TXG_SIZE; i++) 359 list_destroy(&sci->sci_new_mapping_entries[i]); 360 361 if (sci->sci_new_mapping != NULL) 362 vdev_indirect_mapping_close(sci->sci_new_mapping); 363 364 kmem_free(sci, sizeof (*sci)); 365 } 366 367 boolean_t 368 vdev_indirect_should_condense(vdev_t *vd) 369 { 370 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 371 spa_t *spa = vd->vdev_spa; 372 373 ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool)); 374 375 if (!zfs_condense_indirect_vdevs_enable) 376 return (B_FALSE); 377 378 /* 379 * We can only condense one indirect vdev at a time. 380 */ 381 if (spa->spa_condensing_indirect != NULL) 382 return (B_FALSE); 383 384 if (spa_shutting_down(spa)) 385 return (B_FALSE); 386 387 /* 388 * The mapping object size must not change while we are 389 * condensing, so we can only condense indirect vdevs 390 * (not vdevs that are still in the middle of being removed). 391 */ 392 if (vd->vdev_ops != &vdev_indirect_ops) 393 return (B_FALSE); 394 395 /* 396 * If nothing new has been marked obsolete, there is no 397 * point in condensing. 398 */ 399 if (vd->vdev_obsolete_sm == NULL) { 400 ASSERT0(vdev_obsolete_sm_object(vd)); 401 return (B_FALSE); 402 } 403 404 ASSERT(vd->vdev_obsolete_sm != NULL); 405 406 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 407 space_map_object(vd->vdev_obsolete_sm)); 408 409 uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim); 410 uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm); 411 uint64_t mapping_size = vdev_indirect_mapping_size(vim); 412 uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm); 413 414 ASSERT3U(bytes_obsolete, <=, bytes_mapped); 415 416 /* 417 * If a high percentage of the bytes that are mapped have become 418 * obsolete, condense (unless the mapping is already small enough). 419 * This has a good chance of reducing the amount of memory used 420 * by the mapping. 421 */ 422 if (bytes_obsolete * 100 / bytes_mapped >= 423 zfs_indirect_condense_obsolete_pct && 424 mapping_size > zfs_condense_min_mapping_bytes) { 425 zfs_dbgmsg("should condense vdev %llu because obsolete " 426 "spacemap covers %d%% of %lluMB mapping", 427 (u_longlong_t)vd->vdev_id, 428 (int)(bytes_obsolete * 100 / bytes_mapped), 429 (u_longlong_t)bytes_mapped / 1024 / 1024); 430 return (B_TRUE); 431 } 432 433 /* 434 * If the obsolete space map takes up too much space on disk, 435 * condense in order to free up this disk space. 436 */ 437 if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) { 438 zfs_dbgmsg("should condense vdev %llu because obsolete sm " 439 "length %lluMB >= max size %lluMB", 440 (u_longlong_t)vd->vdev_id, 441 (u_longlong_t)obsolete_sm_size / 1024 / 1024, 442 (u_longlong_t)zfs_condense_max_obsolete_bytes / 443 1024 / 1024); 444 return (B_TRUE); 445 } 446 447 return (B_FALSE); 448 } 449 450 /* 451 * This sync task completes (finishes) a condense, deleting the old 452 * mapping and replacing it with the new one. 453 */ 454 static void 455 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx) 456 { 457 spa_condensing_indirect_t *sci = arg; 458 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 459 spa_condensing_indirect_phys_t *scip = 460 &spa->spa_condensing_indirect_phys; 461 vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev); 462 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 463 objset_t *mos = spa->spa_meta_objset; 464 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 465 uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping); 466 uint64_t new_count = 467 vdev_indirect_mapping_num_entries(sci->sci_new_mapping); 468 469 ASSERT(dmu_tx_is_syncing(tx)); 470 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 471 ASSERT3P(sci, ==, spa->spa_condensing_indirect); 472 for (int i = 0; i < TXG_SIZE; i++) { 473 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); 474 } 475 ASSERT(vic->vic_mapping_object != 0); 476 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); 477 ASSERT(scip->scip_next_mapping_object != 0); 478 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 479 480 /* 481 * Reset vdev_indirect_mapping to refer to the new object. 482 */ 483 rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER); 484 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 485 vd->vdev_indirect_mapping = sci->sci_new_mapping; 486 rw_exit(&vd->vdev_indirect_rwlock); 487 488 sci->sci_new_mapping = NULL; 489 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 490 vic->vic_mapping_object = scip->scip_next_mapping_object; 491 scip->scip_next_mapping_object = 0; 492 493 space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx); 494 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 495 scip->scip_prev_obsolete_sm_object = 0; 496 497 scip->scip_vdev = 0; 498 499 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 500 DMU_POOL_CONDENSING_INDIRECT, tx)); 501 spa_condensing_indirect_destroy(spa->spa_condensing_indirect); 502 spa->spa_condensing_indirect = NULL; 503 504 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: " 505 "new mapping object %llu has %llu entries " 506 "(was %llu entries)", 507 vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object, 508 new_count, old_count); 509 510 vdev_config_dirty(spa->spa_root_vdev); 511 } 512 513 /* 514 * This sync task appends entries to the new mapping object. 515 */ 516 static void 517 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx) 518 { 519 spa_condensing_indirect_t *sci = arg; 520 uint64_t txg = dmu_tx_get_txg(tx); 521 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 522 523 ASSERT(dmu_tx_is_syncing(tx)); 524 ASSERT3P(sci, ==, spa->spa_condensing_indirect); 525 526 vdev_indirect_mapping_add_entries(sci->sci_new_mapping, 527 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx); 528 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK])); 529 } 530 531 /* 532 * Open-context function to add one entry to the new mapping. The new 533 * entry will be remembered and written from syncing context. 534 */ 535 static void 536 spa_condense_indirect_commit_entry(spa_t *spa, 537 vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count) 538 { 539 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; 540 541 ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst)); 542 543 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 544 dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count)); 545 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 546 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 547 548 /* 549 * If we are the first entry committed this txg, kick off the sync 550 * task to write to the MOS on our behalf. 551 */ 552 if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) { 553 dsl_sync_task_nowait(dmu_tx_pool(tx), 554 spa_condense_indirect_commit_sync, sci, 555 0, ZFS_SPACE_CHECK_NONE, tx); 556 } 557 558 vdev_indirect_mapping_entry_t *vime = 559 kmem_alloc(sizeof (*vime), KM_SLEEP); 560 vime->vime_mapping = *vimep; 561 vime->vime_obsolete_count = count; 562 list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime); 563 564 dmu_tx_commit(tx); 565 } 566 567 static void 568 spa_condense_indirect_generate_new_mapping(vdev_t *vd, 569 uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr) 570 { 571 spa_t *spa = vd->vdev_spa; 572 uint64_t mapi = start_index; 573 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 574 uint64_t old_num_entries = 575 vdev_indirect_mapping_num_entries(old_mapping); 576 577 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 578 ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev); 579 580 zfs_dbgmsg("starting condense of vdev %llu from index %llu", 581 (u_longlong_t)vd->vdev_id, 582 (u_longlong_t)mapi); 583 584 while (mapi < old_num_entries) { 585 586 if (zthr_iscancelled(zthr)) { 587 zfs_dbgmsg("pausing condense of vdev %llu " 588 "at index %llu", (u_longlong_t)vd->vdev_id, 589 (u_longlong_t)mapi); 590 break; 591 } 592 593 vdev_indirect_mapping_entry_phys_t *entry = 594 &old_mapping->vim_entries[mapi]; 595 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst); 596 ASSERT3U(obsolete_counts[mapi], <=, entry_size); 597 if (obsolete_counts[mapi] < entry_size) { 598 spa_condense_indirect_commit_entry(spa, entry, 599 obsolete_counts[mapi]); 600 601 /* 602 * This delay may be requested for testing, debugging, 603 * or performance reasons. 604 */ 605 delay(zfs_condense_indirect_commit_entry_delay_ticks); 606 } 607 608 mapi++; 609 } 610 } 611 612 /* ARGSUSED */ 613 static boolean_t 614 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr) 615 { 616 spa_t *spa = arg; 617 618 return (spa->spa_condensing_indirect != NULL); 619 } 620 621 /* ARGSUSED */ 622 static int 623 spa_condense_indirect_thread(void *arg, zthr_t *zthr) 624 { 625 spa_t *spa = arg; 626 vdev_t *vd; 627 628 ASSERT3P(spa->spa_condensing_indirect, !=, NULL); 629 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 630 vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev); 631 ASSERT3P(vd, !=, NULL); 632 spa_config_exit(spa, SCL_VDEV, FTAG); 633 634 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; 635 spa_condensing_indirect_phys_t *scip = 636 &spa->spa_condensing_indirect_phys; 637 uint32_t *counts; 638 uint64_t start_index; 639 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 640 space_map_t *prev_obsolete_sm = NULL; 641 642 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); 643 ASSERT(scip->scip_next_mapping_object != 0); 644 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 645 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 646 647 for (int i = 0; i < TXG_SIZE; i++) { 648 /* 649 * The list must start out empty in order for the 650 * _commit_sync() sync task to be properly registered 651 * on the first call to _commit_entry(); so it's wise 652 * to double check and ensure we actually are starting 653 * with empty lists. 654 */ 655 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); 656 } 657 658 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset, 659 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0)); 660 space_map_update(prev_obsolete_sm); 661 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping); 662 if (prev_obsolete_sm != NULL) { 663 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping, 664 counts, prev_obsolete_sm); 665 } 666 space_map_close(prev_obsolete_sm); 667 668 /* 669 * Generate new mapping. Determine what index to continue from 670 * based on the max offset that we've already written in the 671 * new mapping. 672 */ 673 uint64_t max_offset = 674 vdev_indirect_mapping_max_offset(sci->sci_new_mapping); 675 if (max_offset == 0) { 676 /* We haven't written anything to the new mapping yet. */ 677 start_index = 0; 678 } else { 679 /* 680 * Pick up from where we left off. _entry_for_offset() 681 * returns a pointer into the vim_entries array. If 682 * max_offset is greater than any of the mappings 683 * contained in the table NULL will be returned and 684 * that indicates we've exhausted our iteration of the 685 * old_mapping. 686 */ 687 688 vdev_indirect_mapping_entry_phys_t *entry = 689 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping, 690 max_offset); 691 692 if (entry == NULL) { 693 /* 694 * We've already written the whole new mapping. 695 * This special value will cause us to skip the 696 * generate_new_mapping step and just do the sync 697 * task to complete the condense. 698 */ 699 start_index = UINT64_MAX; 700 } else { 701 start_index = entry - old_mapping->vim_entries; 702 ASSERT3U(start_index, <, 703 vdev_indirect_mapping_num_entries(old_mapping)); 704 } 705 } 706 707 spa_condense_indirect_generate_new_mapping(vd, counts, 708 start_index, zthr); 709 710 vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts); 711 712 /* 713 * If the zthr has received a cancellation signal while running 714 * in generate_new_mapping() or at any point after that, then bail 715 * early. We don't want to complete the condense if the spa is 716 * shutting down. 717 */ 718 if (zthr_iscancelled(zthr)) 719 return (0); 720 721 VERIFY0(dsl_sync_task(spa_name(spa), NULL, 722 spa_condense_indirect_complete_sync, sci, 0, 723 ZFS_SPACE_CHECK_EXTRA_RESERVED)); 724 725 return (0); 726 } 727 728 /* 729 * Sync task to begin the condensing process. 730 */ 731 void 732 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx) 733 { 734 spa_t *spa = vd->vdev_spa; 735 spa_condensing_indirect_phys_t *scip = 736 &spa->spa_condensing_indirect_phys; 737 738 ASSERT0(scip->scip_next_mapping_object); 739 ASSERT0(scip->scip_prev_obsolete_sm_object); 740 ASSERT0(scip->scip_vdev); 741 ASSERT(dmu_tx_is_syncing(tx)); 742 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 743 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS)); 744 ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping)); 745 746 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd); 747 ASSERT(obsolete_sm_obj != 0); 748 749 scip->scip_vdev = vd->vdev_id; 750 scip->scip_next_mapping_object = 751 vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx); 752 753 scip->scip_prev_obsolete_sm_object = obsolete_sm_obj; 754 755 /* 756 * We don't need to allocate a new space map object, since 757 * vdev_indirect_sync_obsolete will allocate one when needed. 758 */ 759 space_map_close(vd->vdev_obsolete_sm); 760 vd->vdev_obsolete_sm = NULL; 761 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 762 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 763 764 VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset, 765 DMU_POOL_DIRECTORY_OBJECT, 766 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), 767 sizeof (*scip) / sizeof (uint64_t), scip, tx)); 768 769 ASSERT3P(spa->spa_condensing_indirect, ==, NULL); 770 spa->spa_condensing_indirect = spa_condensing_indirect_create(spa); 771 772 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: " 773 "posm=%llu nm=%llu", 774 vd->vdev_id, dmu_tx_get_txg(tx), 775 (u_longlong_t)scip->scip_prev_obsolete_sm_object, 776 (u_longlong_t)scip->scip_next_mapping_object); 777 778 zthr_wakeup(spa->spa_condense_zthr); 779 } 780 781 /* 782 * Sync to the given vdev's obsolete space map any segments that are no longer 783 * referenced as of the given txg. 784 * 785 * If the obsolete space map doesn't exist yet, create and open it. 786 */ 787 void 788 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx) 789 { 790 spa_t *spa = vd->vdev_spa; 791 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 792 793 ASSERT3U(vic->vic_mapping_object, !=, 0); 794 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0); 795 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); 796 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)); 797 798 if (vdev_obsolete_sm_object(vd) == 0) { 799 uint64_t obsolete_sm_object = 800 space_map_alloc(spa->spa_meta_objset, 801 vdev_standard_sm_blksz, tx); 802 803 ASSERT(vd->vdev_top_zap != 0); 804 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 805 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, 806 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx)); 807 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0); 808 809 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 810 VERIFY0(space_map_open(&vd->vdev_obsolete_sm, 811 spa->spa_meta_objset, obsolete_sm_object, 812 0, vd->vdev_asize, 0)); 813 space_map_update(vd->vdev_obsolete_sm); 814 } 815 816 ASSERT(vd->vdev_obsolete_sm != NULL); 817 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 818 space_map_object(vd->vdev_obsolete_sm)); 819 820 space_map_write(vd->vdev_obsolete_sm, 821 vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx); 822 space_map_update(vd->vdev_obsolete_sm); 823 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 824 } 825 826 int 827 spa_condense_init(spa_t *spa) 828 { 829 int error = zap_lookup(spa->spa_meta_objset, 830 DMU_POOL_DIRECTORY_OBJECT, 831 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), 832 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t), 833 &spa->spa_condensing_indirect_phys); 834 if (error == 0) { 835 if (spa_writeable(spa)) { 836 spa->spa_condensing_indirect = 837 spa_condensing_indirect_create(spa); 838 } 839 return (0); 840 } else if (error == ENOENT) { 841 return (0); 842 } else { 843 return (error); 844 } 845 } 846 847 void 848 spa_condense_fini(spa_t *spa) 849 { 850 if (spa->spa_condensing_indirect != NULL) { 851 spa_condensing_indirect_destroy(spa->spa_condensing_indirect); 852 spa->spa_condensing_indirect = NULL; 853 } 854 } 855 856 void 857 spa_start_indirect_condensing_thread(spa_t *spa) 858 { 859 ASSERT3P(spa->spa_condense_zthr, ==, NULL); 860 spa->spa_condense_zthr = zthr_create(spa_condense_indirect_thread_check, 861 spa_condense_indirect_thread, spa); 862 } 863 864 /* 865 * Gets the obsolete spacemap object from the vdev's ZAP. 866 * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't 867 * exist yet. 868 */ 869 int 870 vdev_obsolete_sm_object(vdev_t *vd) 871 { 872 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 873 if (vd->vdev_top_zap == 0) { 874 return (0); 875 } 876 877 uint64_t sm_obj = 0; 878 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 879 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj); 880 881 ASSERT(err == 0 || err == ENOENT); 882 883 return (sm_obj); 884 } 885 886 boolean_t 887 vdev_obsolete_counts_are_precise(vdev_t *vd) 888 { 889 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 890 if (vd->vdev_top_zap == 0) { 891 return (B_FALSE); 892 } 893 894 uint64_t val = 0; 895 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 896 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val); 897 898 ASSERT(err == 0 || err == ENOENT); 899 900 return (val != 0); 901 } 902 903 /* ARGSUSED */ 904 static void 905 vdev_indirect_close(vdev_t *vd) 906 { 907 } 908 909 /* ARGSUSED */ 910 static int 911 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 912 uint64_t *ashift) 913 { 914 *psize = *max_psize = vd->vdev_asize + 915 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 916 *ashift = vd->vdev_ashift; 917 return (0); 918 } 919 920 typedef struct remap_segment { 921 vdev_t *rs_vd; 922 uint64_t rs_offset; 923 uint64_t rs_asize; 924 uint64_t rs_split_offset; 925 list_node_t rs_node; 926 } remap_segment_t; 927 928 remap_segment_t * 929 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) 930 { 931 remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP); 932 rs->rs_vd = vd; 933 rs->rs_offset = offset; 934 rs->rs_asize = asize; 935 rs->rs_split_offset = split_offset; 936 return (rs); 937 } 938 939 /* 940 * Given an indirect vdev and an extent on that vdev, it duplicates the 941 * physical entries of the indirect mapping that correspond to the extent 942 * to a new array and returns a pointer to it. In addition, copied_entries 943 * is populated with the number of mapping entries that were duplicated. 944 * 945 * Note that the function assumes that the caller holds vdev_indirect_rwlock. 946 * This ensures that the mapping won't change due to condensing as we 947 * copy over its contents. 948 * 949 * Finally, since we are doing an allocation, it is up to the caller to 950 * free the array allocated in this function. 951 */ 952 vdev_indirect_mapping_entry_phys_t * 953 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, 954 uint64_t asize, uint64_t *copied_entries) 955 { 956 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; 957 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 958 uint64_t entries = 0; 959 960 ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock)); 961 962 vdev_indirect_mapping_entry_phys_t *first_mapping = 963 vdev_indirect_mapping_entry_for_offset(vim, offset); 964 ASSERT3P(first_mapping, !=, NULL); 965 966 vdev_indirect_mapping_entry_phys_t *m = first_mapping; 967 while (asize > 0) { 968 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 969 970 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m)); 971 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size); 972 973 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); 974 uint64_t inner_size = MIN(asize, size - inner_offset); 975 976 offset += inner_size; 977 asize -= inner_size; 978 entries++; 979 m++; 980 } 981 982 size_t copy_length = entries * sizeof (*first_mapping); 983 duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP); 984 bcopy(first_mapping, duplicate_mappings, copy_length); 985 *copied_entries = entries; 986 987 return (duplicate_mappings); 988 } 989 990 /* 991 * Goes through the relevant indirect mappings until it hits a concrete vdev 992 * and issues the callback. On the way to the concrete vdev, if any other 993 * indirect vdevs are encountered, then the callback will also be called on 994 * each of those indirect vdevs. For example, if the segment is mapped to 995 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is 996 * mapped to segment B on concrete vdev 2, then the callback will be called on 997 * both vdev 1 and vdev 2. 998 * 999 * While the callback passed to vdev_indirect_remap() is called on every vdev 1000 * the function encounters, certain callbacks only care about concrete vdevs. 1001 * These types of callbacks should return immediately and explicitly when they 1002 * are called on an indirect vdev. 1003 * 1004 * Because there is a possibility that a DVA section in the indirect device 1005 * has been split into multiple sections in our mapping, we keep track 1006 * of the relevant contiguous segments of the new location (remap_segment_t) 1007 * in a stack. This way we can call the callback for each of the new sections 1008 * created by a single section of the indirect device. Note though, that in 1009 * this scenario the callbacks in each split block won't occur in-order in 1010 * terms of offset, so callers should not make any assumptions about that. 1011 * 1012 * For callbacks that don't handle split blocks and immediately return when 1013 * they encounter them (as is the case for remap_blkptr_cb), the caller can 1014 * assume that its callback will be applied from the first indirect vdev 1015 * encountered to the last one and then the concrete vdev, in that order. 1016 */ 1017 static void 1018 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, 1019 void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg) 1020 { 1021 list_t stack; 1022 spa_t *spa = vd->vdev_spa; 1023 1024 list_create(&stack, sizeof (remap_segment_t), 1025 offsetof(remap_segment_t, rs_node)); 1026 1027 for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0); 1028 rs != NULL; rs = list_remove_head(&stack)) { 1029 vdev_t *v = rs->rs_vd; 1030 uint64_t num_entries = 0; 1031 1032 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1033 ASSERT(rs->rs_asize > 0); 1034 1035 /* 1036 * Note: As this function can be called from open context 1037 * (e.g. zio_read()), we need the following rwlock to 1038 * prevent the mapping from being changed by condensing. 1039 * 1040 * So we grab the lock and we make a copy of the entries 1041 * that are relevant to the extent that we are working on. 1042 * Once that is done, we drop the lock and iterate over 1043 * our copy of the mapping. Once we are done with the with 1044 * the remap segment and we free it, we also free our copy 1045 * of the indirect mapping entries that are relevant to it. 1046 * 1047 * This way we don't need to wait until the function is 1048 * finished with a segment, to condense it. In addition, we 1049 * don't need a recursive rwlock for the case that a call to 1050 * vdev_indirect_remap() needs to call itself (through the 1051 * codepath of its callback) for the same vdev in the middle 1052 * of its execution. 1053 */ 1054 rw_enter(&v->vdev_indirect_rwlock, RW_READER); 1055 vdev_indirect_mapping_t *vim = v->vdev_indirect_mapping; 1056 ASSERT3P(vim, !=, NULL); 1057 1058 vdev_indirect_mapping_entry_phys_t *mapping = 1059 vdev_indirect_mapping_duplicate_adjacent_entries(v, 1060 rs->rs_offset, rs->rs_asize, &num_entries); 1061 ASSERT3P(mapping, !=, NULL); 1062 ASSERT3U(num_entries, >, 0); 1063 rw_exit(&v->vdev_indirect_rwlock); 1064 1065 for (uint64_t i = 0; i < num_entries; i++) { 1066 /* 1067 * Note: the vdev_indirect_mapping can not change 1068 * while we are running. It only changes while the 1069 * removal is in progress, and then only from syncing 1070 * context. While a removal is in progress, this 1071 * function is only called for frees, which also only 1072 * happen from syncing context. 1073 */ 1074 vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; 1075 1076 ASSERT3P(m, !=, NULL); 1077 ASSERT3U(rs->rs_asize, >, 0); 1078 1079 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 1080 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); 1081 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); 1082 1083 ASSERT3U(rs->rs_offset, >=, 1084 DVA_MAPPING_GET_SRC_OFFSET(m)); 1085 ASSERT3U(rs->rs_offset, <, 1086 DVA_MAPPING_GET_SRC_OFFSET(m) + size); 1087 ASSERT3U(dst_vdev, !=, v->vdev_id); 1088 1089 uint64_t inner_offset = rs->rs_offset - 1090 DVA_MAPPING_GET_SRC_OFFSET(m); 1091 uint64_t inner_size = 1092 MIN(rs->rs_asize, size - inner_offset); 1093 1094 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); 1095 ASSERT3P(dst_v, !=, NULL); 1096 1097 if (dst_v->vdev_ops == &vdev_indirect_ops) { 1098 list_insert_head(&stack, 1099 rs_alloc(dst_v, dst_offset + inner_offset, 1100 inner_size, rs->rs_split_offset)); 1101 1102 } 1103 1104 if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) && 1105 IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) { 1106 /* 1107 * Note: This clause exists only solely for 1108 * testing purposes. We use it to ensure that 1109 * split blocks work and that the callbacks 1110 * using them yield the same result if issued 1111 * in reverse order. 1112 */ 1113 uint64_t inner_half = inner_size / 2; 1114 1115 func(rs->rs_split_offset + inner_half, dst_v, 1116 dst_offset + inner_offset + inner_half, 1117 inner_half, arg); 1118 1119 func(rs->rs_split_offset, dst_v, 1120 dst_offset + inner_offset, 1121 inner_half, arg); 1122 } else { 1123 func(rs->rs_split_offset, dst_v, 1124 dst_offset + inner_offset, 1125 inner_size, arg); 1126 } 1127 1128 rs->rs_offset += inner_size; 1129 rs->rs_asize -= inner_size; 1130 rs->rs_split_offset += inner_size; 1131 } 1132 VERIFY0(rs->rs_asize); 1133 1134 kmem_free(mapping, num_entries * sizeof (*mapping)); 1135 kmem_free(rs, sizeof (remap_segment_t)); 1136 } 1137 list_destroy(&stack); 1138 } 1139 1140 static void 1141 vdev_indirect_child_io_done(zio_t *zio) 1142 { 1143 zio_t *pio = zio->io_private; 1144 1145 mutex_enter(&pio->io_lock); 1146 pio->io_error = zio_worst_error(pio->io_error, zio->io_error); 1147 mutex_exit(&pio->io_lock); 1148 1149 abd_put(zio->io_abd); 1150 } 1151 1152 /* 1153 * This is a callback for vdev_indirect_remap() which allocates an 1154 * indirect_split_t for each split segment and adds it to iv_splits. 1155 */ 1156 static void 1157 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, 1158 uint64_t size, void *arg) 1159 { 1160 zio_t *zio = arg; 1161 indirect_vsd_t *iv = zio->io_vsd; 1162 1163 ASSERT3P(vd, !=, NULL); 1164 1165 if (vd->vdev_ops == &vdev_indirect_ops) 1166 return; 1167 1168 int n = 1; 1169 if (vd->vdev_ops == &vdev_mirror_ops) 1170 n = vd->vdev_children; 1171 1172 indirect_split_t *is = 1173 kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP); 1174 1175 is->is_children = n; 1176 is->is_size = size; 1177 is->is_split_offset = split_offset; 1178 is->is_target_offset = offset; 1179 is->is_vdev = vd; 1180 1181 /* 1182 * Note that we only consider multiple copies of the data for 1183 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even 1184 * though they use the same ops as mirror, because there's only one 1185 * "good" copy under the replacing/spare. 1186 */ 1187 if (vd->vdev_ops == &vdev_mirror_ops) { 1188 for (int i = 0; i < n; i++) { 1189 is->is_child[i].ic_vdev = vd->vdev_child[i]; 1190 } 1191 } else { 1192 is->is_child[0].ic_vdev = vd; 1193 } 1194 1195 list_insert_tail(&iv->iv_splits, is); 1196 } 1197 1198 static void 1199 vdev_indirect_read_split_done(zio_t *zio) 1200 { 1201 indirect_child_t *ic = zio->io_private; 1202 1203 if (zio->io_error != 0) { 1204 /* 1205 * Clear ic_data to indicate that we do not have data for this 1206 * child. 1207 */ 1208 abd_free(ic->ic_data); 1209 ic->ic_data = NULL; 1210 } 1211 } 1212 1213 /* 1214 * Issue reads for all copies (mirror children) of all splits. 1215 */ 1216 static void 1217 vdev_indirect_read_all(zio_t *zio) 1218 { 1219 indirect_vsd_t *iv = zio->io_vsd; 1220 1221 for (indirect_split_t *is = list_head(&iv->iv_splits); 1222 is != NULL; is = list_next(&iv->iv_splits, is)) { 1223 for (int i = 0; i < is->is_children; i++) { 1224 indirect_child_t *ic = &is->is_child[i]; 1225 1226 if (!vdev_readable(ic->ic_vdev)) 1227 continue; 1228 1229 /* 1230 * Note, we may read from a child whose DTL 1231 * indicates that the data may not be present here. 1232 * While this might result in a few i/os that will 1233 * likely return incorrect data, it simplifies the 1234 * code since we can treat scrub and resilver 1235 * identically. (The incorrect data will be 1236 * detected and ignored when we verify the 1237 * checksum.) 1238 */ 1239 1240 ic->ic_data = abd_alloc_sametype(zio->io_abd, 1241 is->is_size); 1242 1243 zio_nowait(zio_vdev_child_io(zio, NULL, 1244 ic->ic_vdev, is->is_target_offset, ic->ic_data, 1245 is->is_size, zio->io_type, zio->io_priority, 0, 1246 vdev_indirect_read_split_done, ic)); 1247 } 1248 } 1249 iv->iv_reconstruct = B_TRUE; 1250 } 1251 1252 static void 1253 vdev_indirect_io_start(zio_t *zio) 1254 { 1255 spa_t *spa = zio->io_spa; 1256 indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP); 1257 list_create(&iv->iv_splits, 1258 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); 1259 1260 zio->io_vsd = iv; 1261 zio->io_vsd_ops = &vdev_indirect_vsd_ops; 1262 1263 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1264 if (zio->io_type != ZIO_TYPE_READ) { 1265 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 1266 /* 1267 * Note: this code can handle other kinds of writes, 1268 * but we don't expect them. 1269 */ 1270 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL | 1271 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0); 1272 } 1273 1274 vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size, 1275 vdev_indirect_gather_splits, zio); 1276 1277 indirect_split_t *first = list_head(&iv->iv_splits); 1278 if (first->is_size == zio->io_size) { 1279 /* 1280 * This is not a split block; we are pointing to the entire 1281 * data, which will checksum the same as the original data. 1282 * Pass the BP down so that the child i/o can verify the 1283 * checksum, and try a different location if available 1284 * (e.g. on a mirror). 1285 * 1286 * While this special case could be handled the same as the 1287 * general (split block) case, doing it this way ensures 1288 * that the vast majority of blocks on indirect vdevs 1289 * (which are not split) are handled identically to blocks 1290 * on non-indirect vdevs. This allows us to be less strict 1291 * about performance in the general (but rare) case. 1292 */ 1293 ASSERT0(first->is_split_offset); 1294 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL); 1295 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 1296 first->is_vdev, first->is_target_offset, 1297 abd_get_offset(zio->io_abd, 0), 1298 zio->io_size, zio->io_type, zio->io_priority, 0, 1299 vdev_indirect_child_io_done, zio)); 1300 } else { 1301 iv->iv_split_block = B_TRUE; 1302 if (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) { 1303 /* 1304 * Read all copies. Note that for simplicity, 1305 * we don't bother consulting the DTL in the 1306 * resilver case. 1307 */ 1308 vdev_indirect_read_all(zio); 1309 } else { 1310 /* 1311 * Read one copy of each split segment, from the 1312 * top-level vdev. Since we don't know the 1313 * checksum of each split individually, the child 1314 * zio can't ensure that we get the right data. 1315 * E.g. if it's a mirror, it will just read from a 1316 * random (healthy) leaf vdev. We have to verify 1317 * the checksum in vdev_indirect_io_done(). 1318 */ 1319 for (indirect_split_t *is = list_head(&iv->iv_splits); 1320 is != NULL; is = list_next(&iv->iv_splits, is)) { 1321 zio_nowait(zio_vdev_child_io(zio, NULL, 1322 is->is_vdev, is->is_target_offset, 1323 abd_get_offset(zio->io_abd, 1324 is->is_split_offset), 1325 is->is_size, zio->io_type, 1326 zio->io_priority, 0, 1327 vdev_indirect_child_io_done, zio)); 1328 } 1329 } 1330 } 1331 1332 zio_execute(zio); 1333 } 1334 1335 /* 1336 * Report a checksum error for a child. 1337 */ 1338 static void 1339 vdev_indirect_checksum_error(zio_t *zio, 1340 indirect_split_t *is, indirect_child_t *ic) 1341 { 1342 vdev_t *vd = ic->ic_vdev; 1343 1344 if (zio->io_flags & ZIO_FLAG_SPECULATIVE) 1345 return; 1346 1347 mutex_enter(&vd->vdev_stat_lock); 1348 vd->vdev_stat.vs_checksum_errors++; 1349 mutex_exit(&vd->vdev_stat_lock); 1350 1351 zio_bad_cksum_t zbc = { 0 }; 1352 void *bad_buf = abd_borrow_buf_copy(ic->ic_data, is->is_size); 1353 abd_t *good_abd = is->is_child[is->is_good_child].ic_data; 1354 void *good_buf = abd_borrow_buf_copy(good_abd, is->is_size); 1355 zfs_ereport_post_checksum(zio->io_spa, vd, zio, 1356 is->is_target_offset, is->is_size, good_buf, bad_buf, &zbc); 1357 abd_return_buf(ic->ic_data, bad_buf, is->is_size); 1358 abd_return_buf(good_abd, good_buf, is->is_size); 1359 } 1360 1361 /* 1362 * Issue repair i/os for any incorrect copies. We do this by comparing 1363 * each split segment's correct data (is_good_child's ic_data) with each 1364 * other copy of the data. If they differ, then we overwrite the bad data 1365 * with the good copy. Note that we do this without regard for the DTL's, 1366 * which simplifies this code and also issues the optimal number of writes 1367 * (based on which copies actually read bad data, as opposed to which we 1368 * think might be wrong). For the same reason, we always use 1369 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start(). 1370 */ 1371 static void 1372 vdev_indirect_repair(zio_t *zio) 1373 { 1374 indirect_vsd_t *iv = zio->io_vsd; 1375 1376 enum zio_flag flags = ZIO_FLAG_IO_REPAIR; 1377 1378 if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) 1379 flags |= ZIO_FLAG_SELF_HEAL; 1380 1381 if (!spa_writeable(zio->io_spa)) 1382 return; 1383 1384 for (indirect_split_t *is = list_head(&iv->iv_splits); 1385 is != NULL; is = list_next(&iv->iv_splits, is)) { 1386 indirect_child_t *good_child = &is->is_child[is->is_good_child]; 1387 1388 for (int c = 0; c < is->is_children; c++) { 1389 indirect_child_t *ic = &is->is_child[c]; 1390 if (ic == good_child) 1391 continue; 1392 if (ic->ic_data == NULL) 1393 continue; 1394 if (abd_cmp(good_child->ic_data, ic->ic_data, 1395 is->is_size) == 0) 1396 continue; 1397 1398 zio_nowait(zio_vdev_child_io(zio, NULL, 1399 ic->ic_vdev, is->is_target_offset, 1400 good_child->ic_data, is->is_size, 1401 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE, 1402 ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL, 1403 NULL, NULL)); 1404 1405 vdev_indirect_checksum_error(zio, is, ic); 1406 } 1407 } 1408 } 1409 1410 /* 1411 * Report checksum errors on all children that we read from. 1412 */ 1413 static void 1414 vdev_indirect_all_checksum_errors(zio_t *zio) 1415 { 1416 indirect_vsd_t *iv = zio->io_vsd; 1417 1418 if (zio->io_flags & ZIO_FLAG_SPECULATIVE) 1419 return; 1420 1421 for (indirect_split_t *is = list_head(&iv->iv_splits); 1422 is != NULL; is = list_next(&iv->iv_splits, is)) { 1423 for (int c = 0; c < is->is_children; c++) { 1424 indirect_child_t *ic = &is->is_child[c]; 1425 1426 if (ic->ic_data == NULL) 1427 continue; 1428 1429 vdev_t *vd = ic->ic_vdev; 1430 1431 mutex_enter(&vd->vdev_stat_lock); 1432 vd->vdev_stat.vs_checksum_errors++; 1433 mutex_exit(&vd->vdev_stat_lock); 1434 1435 zfs_ereport_post_checksum(zio->io_spa, vd, zio, 1436 is->is_target_offset, is->is_size, 1437 NULL, NULL, NULL); 1438 } 1439 } 1440 } 1441 1442 /* 1443 * This function is called when we have read all copies of the data and need 1444 * to try to find a combination of copies that gives us the right checksum. 1445 * 1446 * If we pointed to any mirror vdevs, this effectively does the job of the 1447 * mirror. The mirror vdev code can't do its own job because we don't know 1448 * the checksum of each split segment individually. We have to try every 1449 * combination of copies of split segments, until we find one that checksums 1450 * correctly. (Or until we have tried all combinations, or have tried 1451 * 2^zfs_reconstruct_indirect_segments_max combinations. In these cases we 1452 * set io_error to ECKSUM to propagate the error up to the user.) 1453 * 1454 * For example, if we have 3 segments in the split, 1455 * and each points to a 2-way mirror, we will have the following pieces of 1456 * data: 1457 * 1458 * | mirror child 1459 * split | [0] [1] 1460 * ======|===================== 1461 * A | data_A_0 data_A_1 1462 * B | data_B_0 data_B_1 1463 * C | data_C_0 data_C_1 1464 * 1465 * We will try the following (mirror children)^(number of splits) (2^3=8) 1466 * combinations, which is similar to bitwise-little-endian counting in 1467 * binary. In general each "digit" corresponds to a split segment, and the 1468 * base of each digit is is_children, which can be different for each 1469 * digit. 1470 * 1471 * "low bit" "high bit" 1472 * v v 1473 * data_A_0 data_B_0 data_C_0 1474 * data_A_1 data_B_0 data_C_0 1475 * data_A_0 data_B_1 data_C_0 1476 * data_A_1 data_B_1 data_C_0 1477 * data_A_0 data_B_0 data_C_1 1478 * data_A_1 data_B_0 data_C_1 1479 * data_A_0 data_B_1 data_C_1 1480 * data_A_1 data_B_1 data_C_1 1481 * 1482 * Note that the split segments may be on the same or different top-level 1483 * vdevs. In either case, we try lots of combinations (see 1484 * zfs_reconstruct_indirect_segments_max). This ensures that if a mirror has 1485 * small silent errors on all of its children, we can still reconstruct the 1486 * correct data, as long as those errors are at sufficiently-separated 1487 * offsets (specifically, separated by the largest block size - default of 1488 * 128KB, but up to 16MB). 1489 */ 1490 static void 1491 vdev_indirect_reconstruct_io_done(zio_t *zio) 1492 { 1493 indirect_vsd_t *iv = zio->io_vsd; 1494 uint64_t attempts = 0; 1495 uint64_t attempts_max = 1ULL << zfs_reconstruct_indirect_segments_max; 1496 int segments = 0; 1497 1498 for (indirect_split_t *is = list_head(&iv->iv_splits); 1499 is != NULL; is = list_next(&iv->iv_splits, is)) 1500 segments++; 1501 1502 for (;;) { 1503 /* copy data from splits to main zio */ 1504 int ret; 1505 for (indirect_split_t *is = list_head(&iv->iv_splits); 1506 is != NULL; is = list_next(&iv->iv_splits, is)) { 1507 1508 /* 1509 * If this child failed, its ic_data will be NULL. 1510 * Skip this combination. 1511 */ 1512 if (is->is_child[is->is_good_child].ic_data == NULL) { 1513 ret = EIO; 1514 goto next; 1515 } 1516 1517 abd_copy_off(zio->io_abd, 1518 is->is_child[is->is_good_child].ic_data, 1519 is->is_split_offset, 0, is->is_size); 1520 } 1521 1522 /* See if this checksum matches. */ 1523 zio_bad_cksum_t zbc; 1524 ret = zio_checksum_error(zio, &zbc); 1525 if (ret == 0) { 1526 /* Found a matching checksum. Issue repair i/os. */ 1527 vdev_indirect_repair(zio); 1528 zio_checksum_verified(zio); 1529 return; 1530 } 1531 1532 /* 1533 * Checksum failed; try a different combination of split 1534 * children. 1535 */ 1536 boolean_t more; 1537 next: 1538 more = B_FALSE; 1539 if (segments <= zfs_reconstruct_indirect_segments_max) { 1540 /* 1541 * There are relatively few segments, so 1542 * deterministically check all combinations. We do 1543 * this by by adding one to the first split's 1544 * good_child. If it overflows, then "carry over" to 1545 * the next split (like counting in base is_children, 1546 * but each digit can have a different base). 1547 */ 1548 for (indirect_split_t *is = list_head(&iv->iv_splits); 1549 is != NULL; is = list_next(&iv->iv_splits, is)) { 1550 is->is_good_child++; 1551 if (is->is_good_child < is->is_children) { 1552 more = B_TRUE; 1553 break; 1554 } 1555 is->is_good_child = 0; 1556 } 1557 } else if (++attempts < attempts_max) { 1558 /* 1559 * There are too many combinations to try all of them 1560 * in a reasonable amount of time, so try a fixed 1561 * number of random combinations, after which we'll 1562 * consider the block unrecoverable. 1563 */ 1564 for (indirect_split_t *is = list_head(&iv->iv_splits); 1565 is != NULL; is = list_next(&iv->iv_splits, is)) { 1566 is->is_good_child = 1567 spa_get_random(is->is_children); 1568 } 1569 more = B_TRUE; 1570 } 1571 if (!more) { 1572 /* All combinations failed. */ 1573 zio->io_error = ret; 1574 vdev_indirect_all_checksum_errors(zio); 1575 zio_checksum_verified(zio); 1576 return; 1577 } 1578 } 1579 } 1580 1581 static void 1582 vdev_indirect_io_done(zio_t *zio) 1583 { 1584 indirect_vsd_t *iv = zio->io_vsd; 1585 1586 if (iv->iv_reconstruct) { 1587 /* 1588 * We have read all copies of the data (e.g. from mirrors), 1589 * either because this was a scrub/resilver, or because the 1590 * one-copy read didn't checksum correctly. 1591 */ 1592 vdev_indirect_reconstruct_io_done(zio); 1593 return; 1594 } 1595 1596 if (!iv->iv_split_block) { 1597 /* 1598 * This was not a split block, so we passed the BP down, 1599 * and the checksum was handled by the (one) child zio. 1600 */ 1601 return; 1602 } 1603 1604 zio_bad_cksum_t zbc; 1605 int ret = zio_checksum_error(zio, &zbc); 1606 if (ret == 0) { 1607 zio_checksum_verified(zio); 1608 return; 1609 } 1610 1611 /* 1612 * The checksum didn't match. Read all copies of all splits, and 1613 * then we will try to reconstruct. The next time 1614 * vdev_indirect_io_done() is called, iv_reconstruct will be set. 1615 */ 1616 vdev_indirect_read_all(zio); 1617 1618 zio_vdev_io_redone(zio); 1619 } 1620 1621 vdev_ops_t vdev_indirect_ops = { 1622 vdev_indirect_open, 1623 vdev_indirect_close, 1624 vdev_default_asize, 1625 vdev_indirect_io_start, 1626 vdev_indirect_io_done, 1627 NULL, 1628 NULL, 1629 NULL, 1630 vdev_indirect_remap, 1631 NULL, 1632 VDEV_TYPE_INDIRECT, /* name of this vdev type */ 1633 B_FALSE /* leaf vdev */ 1634 }; 1635