1 /* 2 * CDDL HEADER START 3 * 4 * This file and its contents are supplied under the terms of the 5 * Common Development and Distribution License ("CDDL"), version 1.0. 6 * You may only use this file in accordance with the terms of version 7 * 1.0 of the CDDL. 8 * 9 * A full copy of the text of the CDDL should have accompanied this 10 * source. A copy of the CDDL is also available via the Internet at 11 * http://www.illumos.org/license/CDDL. 12 * 13 * CDDL HEADER END 14 */ 15 16 /* 17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved. 18 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 19 * Copyright (c) 2014, 2020 by Delphix. All rights reserved. 20 */ 21 22 #include <sys/zfs_context.h> 23 #include <sys/spa.h> 24 #include <sys/spa_impl.h> 25 #include <sys/vdev_impl.h> 26 #include <sys/fs/zfs.h> 27 #include <sys/zio.h> 28 #include <sys/zio_checksum.h> 29 #include <sys/metaslab.h> 30 #include <sys/dmu.h> 31 #include <sys/vdev_indirect_mapping.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dsl_synctask.h> 34 #include <sys/zap.h> 35 #include <sys/abd.h> 36 #include <sys/zthr.h> 37 38 /* 39 * An indirect vdev corresponds to a vdev that has been removed. Since 40 * we cannot rewrite block pointers of snapshots, etc., we keep a 41 * mapping from old location on the removed device to the new location 42 * on another device in the pool and use this mapping whenever we need 43 * to access the DVA. Unfortunately, this mapping did not respect 44 * logical block boundaries when it was first created, and so a DVA on 45 * this indirect vdev may be "split" into multiple sections that each 46 * map to a different location. As a consequence, not all DVAs can be 47 * translated to an equivalent new DVA. Instead we must provide a 48 * "vdev_remap" operation that executes a callback on each contiguous 49 * segment of the new location. This function is used in multiple ways: 50 * 51 * - i/os to this vdev use the callback to determine where the 52 * data is now located, and issue child i/os for each segment's new 53 * location. 54 * 55 * - frees and claims to this vdev use the callback to free or claim 56 * each mapped segment. (Note that we don't actually need to claim 57 * log blocks on indirect vdevs, because we don't allocate to 58 * removing vdevs. However, zdb uses zio_claim() for its leak 59 * detection.) 60 */ 61 62 /* 63 * "Big theory statement" for how we mark blocks obsolete. 64 * 65 * When a block on an indirect vdev is freed or remapped, a section of 66 * that vdev's mapping may no longer be referenced (aka "obsolete"). We 67 * keep track of how much of each mapping entry is obsolete. When 68 * an entry becomes completely obsolete, we can remove it, thus reducing 69 * the memory used by the mapping. The complete picture of obsolescence 70 * is given by the following data structures, described below: 71 * - the entry-specific obsolete count 72 * - the vdev-specific obsolete spacemap 73 * - the pool-specific obsolete bpobj 74 * 75 * == On disk data structures used == 76 * 77 * We track the obsolete space for the pool using several objects. Each 78 * of these objects is created on demand and freed when no longer 79 * needed, and is assumed to be empty if it does not exist. 80 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects. 81 * 82 * - Each vic_mapping_object (associated with an indirect vdev) can 83 * have a vimp_counts_object. This is an array of uint32_t's 84 * with the same number of entries as the vic_mapping_object. When 85 * the mapping is condensed, entries from the vic_obsolete_sm_object 86 * (see below) are folded into the counts. Therefore, each 87 * obsolete_counts entry tells us the number of bytes in the 88 * corresponding mapping entry that were not referenced when the 89 * mapping was last condensed. 90 * 91 * - Each indirect or removing vdev can have a vic_obsolete_sm_object. 92 * This is a space map containing an alloc entry for every DVA that 93 * has been obsoleted since the last time this indirect vdev was 94 * condensed. We use this object in order to improve performance 95 * when marking a DVA as obsolete. Instead of modifying an arbitrary 96 * offset of the vimp_counts_object, we only need to append an entry 97 * to the end of this object. When a DVA becomes obsolete, it is 98 * added to the obsolete space map. This happens when the DVA is 99 * freed, remapped and not referenced by a snapshot, or the last 100 * snapshot referencing it is destroyed. 101 * 102 * - Each dataset can have a ds_remap_deadlist object. This is a 103 * deadlist object containing all blocks that were remapped in this 104 * dataset but referenced in a previous snapshot. Blocks can *only* 105 * appear on this list if they were remapped (dsl_dataset_block_remapped); 106 * blocks that were killed in a head dataset are put on the normal 107 * ds_deadlist and marked obsolete when they are freed. 108 * 109 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks 110 * in the pool that need to be marked obsolete. When a snapshot is 111 * destroyed, we move some of the ds_remap_deadlist to the obsolete 112 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then 113 * asynchronously process the obsolete bpobj, moving its entries to 114 * the specific vdevs' obsolete space maps. 115 * 116 * == Summary of how we mark blocks as obsolete == 117 * 118 * - When freeing a block: if any DVA is on an indirect vdev, append to 119 * vic_obsolete_sm_object. 120 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap 121 * references; otherwise append to vic_obsolete_sm_object). 122 * - When freeing a snapshot: move parts of ds_remap_deadlist to 123 * dp_obsolete_bpobj (same algorithm as ds_deadlist). 124 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to 125 * individual vdev's vic_obsolete_sm_object. 126 */ 127 128 /* 129 * "Big theory statement" for how we condense indirect vdevs. 130 * 131 * Condensing an indirect vdev's mapping is the process of determining 132 * the precise counts of obsolete space for each mapping entry (by 133 * integrating the obsolete spacemap into the obsolete counts) and 134 * writing out a new mapping that contains only referenced entries. 135 * 136 * We condense a vdev when we expect the mapping to shrink (see 137 * vdev_indirect_should_condense()), but only perform one condense at a 138 * time to limit the memory usage. In addition, we use a separate 139 * open-context thread (spa_condense_indirect_thread) to incrementally 140 * create the new mapping object in a way that minimizes the impact on 141 * the rest of the system. 142 * 143 * == Generating a new mapping == 144 * 145 * To generate a new mapping, we follow these steps: 146 * 147 * 1. Save the old obsolete space map and create a new mapping object 148 * (see spa_condense_indirect_start_sync()). This initializes the 149 * spa_condensing_indirect_phys with the "previous obsolete space map", 150 * which is now read only. Newly obsolete DVAs will be added to a 151 * new (initially empty) obsolete space map, and will not be 152 * considered as part of this condense operation. 153 * 154 * 2. Construct in memory the precise counts of obsolete space for each 155 * mapping entry, by incorporating the obsolete space map into the 156 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().) 157 * 158 * 3. Iterate through each mapping entry, writing to the new mapping any 159 * entries that are not completely obsolete (i.e. which don't have 160 * obsolete count == mapping length). (See 161 * spa_condense_indirect_generate_new_mapping().) 162 * 163 * 4. Destroy the old mapping object and switch over to the new one 164 * (spa_condense_indirect_complete_sync). 165 * 166 * == Restarting from failure == 167 * 168 * To restart the condense when we import/open the pool, we must start 169 * at the 2nd step above: reconstruct the precise counts in memory, 170 * based on the space map + counts. Then in the 3rd step, we start 171 * iterating where we left off: at vimp_max_offset of the new mapping 172 * object. 173 */ 174 175 int zfs_condense_indirect_vdevs_enable = B_TRUE; 176 177 /* 178 * Condense if at least this percent of the bytes in the mapping is 179 * obsolete. With the default of 25%, the amount of space mapped 180 * will be reduced to 1% of its original size after at most 16 181 * condenses. Higher values will condense less often (causing less 182 * i/o); lower values will reduce the mapping size more quickly. 183 */ 184 int zfs_indirect_condense_obsolete_pct = 25; 185 186 /* 187 * Condense if the obsolete space map takes up more than this amount of 188 * space on disk (logically). This limits the amount of disk space 189 * consumed by the obsolete space map; the default of 1GB is small enough 190 * that we typically don't mind "wasting" it. 191 */ 192 unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; 193 194 /* 195 * Don't bother condensing if the mapping uses less than this amount of 196 * memory. The default of 128KB is considered a "trivial" amount of 197 * memory and not worth reducing. 198 */ 199 unsigned long zfs_condense_min_mapping_bytes = 128 * 1024; 200 201 /* 202 * This is used by the test suite so that it can ensure that certain 203 * actions happen while in the middle of a condense (which might otherwise 204 * complete too quickly). If used to reduce the performance impact of 205 * condensing in production, a maximum value of 1 should be sufficient. 206 */ 207 int zfs_condense_indirect_commit_entry_delay_ms = 0; 208 209 /* 210 * If an indirect split block contains more than this many possible unique 211 * combinations when being reconstructed, consider it too computationally 212 * expensive to check them all. Instead, try at most 100 randomly-selected 213 * combinations each time the block is accessed. This allows all segment 214 * copies to participate fairly in the reconstruction when all combinations 215 * cannot be checked and prevents repeated use of one bad copy. 216 */ 217 int zfs_reconstruct_indirect_combinations_max = 4096; 218 219 /* 220 * Enable to simulate damaged segments and validate reconstruction. This 221 * is intentionally not exposed as a module parameter. 222 */ 223 unsigned long zfs_reconstruct_indirect_damage_fraction = 0; 224 225 /* 226 * The indirect_child_t represents the vdev that we will read from, when we 227 * need to read all copies of the data (e.g. for scrub or reconstruction). 228 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), 229 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, 230 * ic_vdev is a child of the mirror. 231 */ 232 typedef struct indirect_child { 233 abd_t *ic_data; 234 vdev_t *ic_vdev; 235 236 /* 237 * ic_duplicate is NULL when the ic_data contents are unique, when it 238 * is determined to be a duplicate it references the primary child. 239 */ 240 struct indirect_child *ic_duplicate; 241 list_node_t ic_node; /* node on is_unique_child */ 242 int ic_error; /* set when a child does not contain the data */ 243 } indirect_child_t; 244 245 /* 246 * The indirect_split_t represents one mapped segment of an i/o to the 247 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be 248 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. 249 * For split blocks, there will be several of these. 250 */ 251 typedef struct indirect_split { 252 list_node_t is_node; /* link on iv_splits */ 253 254 /* 255 * is_split_offset is the offset into the i/o. 256 * This is the sum of the previous splits' is_size's. 257 */ 258 uint64_t is_split_offset; 259 260 vdev_t *is_vdev; /* top-level vdev */ 261 uint64_t is_target_offset; /* offset on is_vdev */ 262 uint64_t is_size; 263 int is_children; /* number of entries in is_child[] */ 264 int is_unique_children; /* number of entries in is_unique_child */ 265 list_t is_unique_child; 266 267 /* 268 * is_good_child is the child that we are currently using to 269 * attempt reconstruction. 270 */ 271 indirect_child_t *is_good_child; 272 273 indirect_child_t is_child[1]; /* variable-length */ 274 } indirect_split_t; 275 276 /* 277 * The indirect_vsd_t is associated with each i/o to the indirect vdev. 278 * It is the "Vdev-Specific Data" in the zio_t's io_vsd. 279 */ 280 typedef struct indirect_vsd { 281 boolean_t iv_split_block; 282 boolean_t iv_reconstruct; 283 uint64_t iv_unique_combinations; 284 uint64_t iv_attempts; 285 uint64_t iv_attempts_max; 286 287 list_t iv_splits; /* list of indirect_split_t's */ 288 } indirect_vsd_t; 289 290 static void 291 vdev_indirect_map_free(zio_t *zio) 292 { 293 indirect_vsd_t *iv = zio->io_vsd; 294 295 indirect_split_t *is; 296 while ((is = list_head(&iv->iv_splits)) != NULL) { 297 for (int c = 0; c < is->is_children; c++) { 298 indirect_child_t *ic = &is->is_child[c]; 299 if (ic->ic_data != NULL) 300 abd_free(ic->ic_data); 301 } 302 list_remove(&iv->iv_splits, is); 303 304 indirect_child_t *ic; 305 while ((ic = list_head(&is->is_unique_child)) != NULL) 306 list_remove(&is->is_unique_child, ic); 307 308 list_destroy(&is->is_unique_child); 309 310 kmem_free(is, 311 offsetof(indirect_split_t, is_child[is->is_children])); 312 } 313 kmem_free(iv, sizeof (*iv)); 314 } 315 316 static const zio_vsd_ops_t vdev_indirect_vsd_ops = { 317 .vsd_free = vdev_indirect_map_free, 318 }; 319 320 /* 321 * Mark the given offset and size as being obsolete. 322 */ 323 void 324 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size) 325 { 326 spa_t *spa = vd->vdev_spa; 327 328 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0); 329 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); 330 ASSERT(size > 0); 331 VERIFY(vdev_indirect_mapping_entry_for_offset( 332 vd->vdev_indirect_mapping, offset) != NULL); 333 334 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 335 mutex_enter(&vd->vdev_obsolete_lock); 336 range_tree_add(vd->vdev_obsolete_segments, offset, size); 337 mutex_exit(&vd->vdev_obsolete_lock); 338 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa)); 339 } 340 } 341 342 /* 343 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This 344 * wrapper is provided because the DMU does not know about vdev_t's and 345 * cannot directly call vdev_indirect_mark_obsolete. 346 */ 347 void 348 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset, 349 uint64_t size, dmu_tx_t *tx) 350 { 351 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 352 ASSERT(dmu_tx_is_syncing(tx)); 353 354 /* The DMU can only remap indirect vdevs. */ 355 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 356 vdev_indirect_mark_obsolete(vd, offset, size); 357 } 358 359 static spa_condensing_indirect_t * 360 spa_condensing_indirect_create(spa_t *spa) 361 { 362 spa_condensing_indirect_phys_t *scip = 363 &spa->spa_condensing_indirect_phys; 364 spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP); 365 objset_t *mos = spa->spa_meta_objset; 366 367 for (int i = 0; i < TXG_SIZE; i++) { 368 list_create(&sci->sci_new_mapping_entries[i], 369 sizeof (vdev_indirect_mapping_entry_t), 370 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 371 } 372 373 sci->sci_new_mapping = 374 vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object); 375 376 return (sci); 377 } 378 379 static void 380 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci) 381 { 382 for (int i = 0; i < TXG_SIZE; i++) 383 list_destroy(&sci->sci_new_mapping_entries[i]); 384 385 if (sci->sci_new_mapping != NULL) 386 vdev_indirect_mapping_close(sci->sci_new_mapping); 387 388 kmem_free(sci, sizeof (*sci)); 389 } 390 391 boolean_t 392 vdev_indirect_should_condense(vdev_t *vd) 393 { 394 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 395 spa_t *spa = vd->vdev_spa; 396 397 ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool)); 398 399 if (!zfs_condense_indirect_vdevs_enable) 400 return (B_FALSE); 401 402 /* 403 * We can only condense one indirect vdev at a time. 404 */ 405 if (spa->spa_condensing_indirect != NULL) 406 return (B_FALSE); 407 408 if (spa_shutting_down(spa)) 409 return (B_FALSE); 410 411 /* 412 * The mapping object size must not change while we are 413 * condensing, so we can only condense indirect vdevs 414 * (not vdevs that are still in the middle of being removed). 415 */ 416 if (vd->vdev_ops != &vdev_indirect_ops) 417 return (B_FALSE); 418 419 /* 420 * If nothing new has been marked obsolete, there is no 421 * point in condensing. 422 */ 423 uint64_t obsolete_sm_obj __maybe_unused; 424 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj)); 425 if (vd->vdev_obsolete_sm == NULL) { 426 ASSERT0(obsolete_sm_obj); 427 return (B_FALSE); 428 } 429 430 ASSERT(vd->vdev_obsolete_sm != NULL); 431 432 ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm)); 433 434 uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim); 435 uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm); 436 uint64_t mapping_size = vdev_indirect_mapping_size(vim); 437 uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm); 438 439 ASSERT3U(bytes_obsolete, <=, bytes_mapped); 440 441 /* 442 * If a high percentage of the bytes that are mapped have become 443 * obsolete, condense (unless the mapping is already small enough). 444 * This has a good chance of reducing the amount of memory used 445 * by the mapping. 446 */ 447 if (bytes_obsolete * 100 / bytes_mapped >= 448 zfs_indirect_condense_obsolete_pct && 449 mapping_size > zfs_condense_min_mapping_bytes) { 450 zfs_dbgmsg("should condense vdev %llu because obsolete " 451 "spacemap covers %d%% of %lluMB mapping", 452 (u_longlong_t)vd->vdev_id, 453 (int)(bytes_obsolete * 100 / bytes_mapped), 454 (u_longlong_t)bytes_mapped / 1024 / 1024); 455 return (B_TRUE); 456 } 457 458 /* 459 * If the obsolete space map takes up too much space on disk, 460 * condense in order to free up this disk space. 461 */ 462 if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) { 463 zfs_dbgmsg("should condense vdev %llu because obsolete sm " 464 "length %lluMB >= max size %lluMB", 465 (u_longlong_t)vd->vdev_id, 466 (u_longlong_t)obsolete_sm_size / 1024 / 1024, 467 (u_longlong_t)zfs_condense_max_obsolete_bytes / 468 1024 / 1024); 469 return (B_TRUE); 470 } 471 472 return (B_FALSE); 473 } 474 475 /* 476 * This sync task completes (finishes) a condense, deleting the old 477 * mapping and replacing it with the new one. 478 */ 479 static void 480 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx) 481 { 482 spa_condensing_indirect_t *sci = arg; 483 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 484 spa_condensing_indirect_phys_t *scip = 485 &spa->spa_condensing_indirect_phys; 486 vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev); 487 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 488 objset_t *mos = spa->spa_meta_objset; 489 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 490 uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping); 491 uint64_t new_count = 492 vdev_indirect_mapping_num_entries(sci->sci_new_mapping); 493 494 ASSERT(dmu_tx_is_syncing(tx)); 495 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 496 ASSERT3P(sci, ==, spa->spa_condensing_indirect); 497 for (int i = 0; i < TXG_SIZE; i++) { 498 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); 499 } 500 ASSERT(vic->vic_mapping_object != 0); 501 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); 502 ASSERT(scip->scip_next_mapping_object != 0); 503 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 504 505 /* 506 * Reset vdev_indirect_mapping to refer to the new object. 507 */ 508 rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER); 509 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 510 vd->vdev_indirect_mapping = sci->sci_new_mapping; 511 rw_exit(&vd->vdev_indirect_rwlock); 512 513 sci->sci_new_mapping = NULL; 514 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 515 vic->vic_mapping_object = scip->scip_next_mapping_object; 516 scip->scip_next_mapping_object = 0; 517 518 space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx); 519 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 520 scip->scip_prev_obsolete_sm_object = 0; 521 522 scip->scip_vdev = 0; 523 524 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 525 DMU_POOL_CONDENSING_INDIRECT, tx)); 526 spa_condensing_indirect_destroy(spa->spa_condensing_indirect); 527 spa->spa_condensing_indirect = NULL; 528 529 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: " 530 "new mapping object %llu has %llu entries " 531 "(was %llu entries)", 532 vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object, 533 new_count, old_count); 534 535 vdev_config_dirty(spa->spa_root_vdev); 536 } 537 538 /* 539 * This sync task appends entries to the new mapping object. 540 */ 541 static void 542 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx) 543 { 544 spa_condensing_indirect_t *sci = arg; 545 uint64_t txg = dmu_tx_get_txg(tx); 546 spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa; 547 548 ASSERT(dmu_tx_is_syncing(tx)); 549 ASSERT3P(sci, ==, spa->spa_condensing_indirect); 550 551 vdev_indirect_mapping_add_entries(sci->sci_new_mapping, 552 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx); 553 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK])); 554 } 555 556 /* 557 * Open-context function to add one entry to the new mapping. The new 558 * entry will be remembered and written from syncing context. 559 */ 560 static void 561 spa_condense_indirect_commit_entry(spa_t *spa, 562 vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count) 563 { 564 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; 565 566 ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst)); 567 568 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 569 dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count)); 570 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 571 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 572 573 /* 574 * If we are the first entry committed this txg, kick off the sync 575 * task to write to the MOS on our behalf. 576 */ 577 if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) { 578 dsl_sync_task_nowait(dmu_tx_pool(tx), 579 spa_condense_indirect_commit_sync, sci, tx); 580 } 581 582 vdev_indirect_mapping_entry_t *vime = 583 kmem_alloc(sizeof (*vime), KM_SLEEP); 584 vime->vime_mapping = *vimep; 585 vime->vime_obsolete_count = count; 586 list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime); 587 588 dmu_tx_commit(tx); 589 } 590 591 static void 592 spa_condense_indirect_generate_new_mapping(vdev_t *vd, 593 uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr) 594 { 595 spa_t *spa = vd->vdev_spa; 596 uint64_t mapi = start_index; 597 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 598 uint64_t old_num_entries = 599 vdev_indirect_mapping_num_entries(old_mapping); 600 601 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 602 ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev); 603 604 zfs_dbgmsg("starting condense of vdev %llu from index %llu", 605 (u_longlong_t)vd->vdev_id, 606 (u_longlong_t)mapi); 607 608 while (mapi < old_num_entries) { 609 610 if (zthr_iscancelled(zthr)) { 611 zfs_dbgmsg("pausing condense of vdev %llu " 612 "at index %llu", (u_longlong_t)vd->vdev_id, 613 (u_longlong_t)mapi); 614 break; 615 } 616 617 vdev_indirect_mapping_entry_phys_t *entry = 618 &old_mapping->vim_entries[mapi]; 619 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst); 620 ASSERT3U(obsolete_counts[mapi], <=, entry_size); 621 if (obsolete_counts[mapi] < entry_size) { 622 spa_condense_indirect_commit_entry(spa, entry, 623 obsolete_counts[mapi]); 624 625 /* 626 * This delay may be requested for testing, debugging, 627 * or performance reasons. 628 */ 629 hrtime_t now = gethrtime(); 630 hrtime_t sleep_until = now + MSEC2NSEC( 631 zfs_condense_indirect_commit_entry_delay_ms); 632 zfs_sleep_until(sleep_until); 633 } 634 635 mapi++; 636 } 637 } 638 639 /* ARGSUSED */ 640 static boolean_t 641 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr) 642 { 643 spa_t *spa = arg; 644 645 return (spa->spa_condensing_indirect != NULL); 646 } 647 648 /* ARGSUSED */ 649 static void 650 spa_condense_indirect_thread(void *arg, zthr_t *zthr) 651 { 652 spa_t *spa = arg; 653 vdev_t *vd; 654 655 ASSERT3P(spa->spa_condensing_indirect, !=, NULL); 656 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 657 vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev); 658 ASSERT3P(vd, !=, NULL); 659 spa_config_exit(spa, SCL_VDEV, FTAG); 660 661 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; 662 spa_condensing_indirect_phys_t *scip = 663 &spa->spa_condensing_indirect_phys; 664 uint32_t *counts; 665 uint64_t start_index; 666 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 667 space_map_t *prev_obsolete_sm = NULL; 668 669 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); 670 ASSERT(scip->scip_next_mapping_object != 0); 671 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 672 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 673 674 for (int i = 0; i < TXG_SIZE; i++) { 675 /* 676 * The list must start out empty in order for the 677 * _commit_sync() sync task to be properly registered 678 * on the first call to _commit_entry(); so it's wise 679 * to double check and ensure we actually are starting 680 * with empty lists. 681 */ 682 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); 683 } 684 685 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset, 686 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0)); 687 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping); 688 if (prev_obsolete_sm != NULL) { 689 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping, 690 counts, prev_obsolete_sm); 691 } 692 space_map_close(prev_obsolete_sm); 693 694 /* 695 * Generate new mapping. Determine what index to continue from 696 * based on the max offset that we've already written in the 697 * new mapping. 698 */ 699 uint64_t max_offset = 700 vdev_indirect_mapping_max_offset(sci->sci_new_mapping); 701 if (max_offset == 0) { 702 /* We haven't written anything to the new mapping yet. */ 703 start_index = 0; 704 } else { 705 /* 706 * Pick up from where we left off. _entry_for_offset() 707 * returns a pointer into the vim_entries array. If 708 * max_offset is greater than any of the mappings 709 * contained in the table NULL will be returned and 710 * that indicates we've exhausted our iteration of the 711 * old_mapping. 712 */ 713 714 vdev_indirect_mapping_entry_phys_t *entry = 715 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping, 716 max_offset); 717 718 if (entry == NULL) { 719 /* 720 * We've already written the whole new mapping. 721 * This special value will cause us to skip the 722 * generate_new_mapping step and just do the sync 723 * task to complete the condense. 724 */ 725 start_index = UINT64_MAX; 726 } else { 727 start_index = entry - old_mapping->vim_entries; 728 ASSERT3U(start_index, <, 729 vdev_indirect_mapping_num_entries(old_mapping)); 730 } 731 } 732 733 spa_condense_indirect_generate_new_mapping(vd, counts, 734 start_index, zthr); 735 736 vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts); 737 738 /* 739 * If the zthr has received a cancellation signal while running 740 * in generate_new_mapping() or at any point after that, then bail 741 * early. We don't want to complete the condense if the spa is 742 * shutting down. 743 */ 744 if (zthr_iscancelled(zthr)) 745 return; 746 747 VERIFY0(dsl_sync_task(spa_name(spa), NULL, 748 spa_condense_indirect_complete_sync, sci, 0, 749 ZFS_SPACE_CHECK_EXTRA_RESERVED)); 750 } 751 752 /* 753 * Sync task to begin the condensing process. 754 */ 755 void 756 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx) 757 { 758 spa_t *spa = vd->vdev_spa; 759 spa_condensing_indirect_phys_t *scip = 760 &spa->spa_condensing_indirect_phys; 761 762 ASSERT0(scip->scip_next_mapping_object); 763 ASSERT0(scip->scip_prev_obsolete_sm_object); 764 ASSERT0(scip->scip_vdev); 765 ASSERT(dmu_tx_is_syncing(tx)); 766 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 767 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS)); 768 ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping)); 769 770 uint64_t obsolete_sm_obj; 771 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj)); 772 ASSERT3U(obsolete_sm_obj, !=, 0); 773 774 scip->scip_vdev = vd->vdev_id; 775 scip->scip_next_mapping_object = 776 vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx); 777 778 scip->scip_prev_obsolete_sm_object = obsolete_sm_obj; 779 780 /* 781 * We don't need to allocate a new space map object, since 782 * vdev_indirect_sync_obsolete will allocate one when needed. 783 */ 784 space_map_close(vd->vdev_obsolete_sm); 785 vd->vdev_obsolete_sm = NULL; 786 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 787 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 788 789 VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset, 790 DMU_POOL_DIRECTORY_OBJECT, 791 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), 792 sizeof (*scip) / sizeof (uint64_t), scip, tx)); 793 794 ASSERT3P(spa->spa_condensing_indirect, ==, NULL); 795 spa->spa_condensing_indirect = spa_condensing_indirect_create(spa); 796 797 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: " 798 "posm=%llu nm=%llu", 799 vd->vdev_id, dmu_tx_get_txg(tx), 800 (u_longlong_t)scip->scip_prev_obsolete_sm_object, 801 (u_longlong_t)scip->scip_next_mapping_object); 802 803 zthr_wakeup(spa->spa_condense_zthr); 804 } 805 806 /* 807 * Sync to the given vdev's obsolete space map any segments that are no longer 808 * referenced as of the given txg. 809 * 810 * If the obsolete space map doesn't exist yet, create and open it. 811 */ 812 void 813 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx) 814 { 815 spa_t *spa = vd->vdev_spa; 816 vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config; 817 818 ASSERT3U(vic->vic_mapping_object, !=, 0); 819 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0); 820 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); 821 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)); 822 823 uint64_t obsolete_sm_object; 824 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 825 if (obsolete_sm_object == 0) { 826 obsolete_sm_object = space_map_alloc(spa->spa_meta_objset, 827 zfs_vdev_standard_sm_blksz, tx); 828 829 ASSERT(vd->vdev_top_zap != 0); 830 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 831 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, 832 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx)); 833 ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 834 ASSERT3U(obsolete_sm_object, !=, 0); 835 836 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 837 VERIFY0(space_map_open(&vd->vdev_obsolete_sm, 838 spa->spa_meta_objset, obsolete_sm_object, 839 0, vd->vdev_asize, 0)); 840 } 841 842 ASSERT(vd->vdev_obsolete_sm != NULL); 843 ASSERT3U(obsolete_sm_object, ==, 844 space_map_object(vd->vdev_obsolete_sm)); 845 846 space_map_write(vd->vdev_obsolete_sm, 847 vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx); 848 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 849 } 850 851 int 852 spa_condense_init(spa_t *spa) 853 { 854 int error = zap_lookup(spa->spa_meta_objset, 855 DMU_POOL_DIRECTORY_OBJECT, 856 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), 857 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t), 858 &spa->spa_condensing_indirect_phys); 859 if (error == 0) { 860 if (spa_writeable(spa)) { 861 spa->spa_condensing_indirect = 862 spa_condensing_indirect_create(spa); 863 } 864 return (0); 865 } else if (error == ENOENT) { 866 return (0); 867 } else { 868 return (error); 869 } 870 } 871 872 void 873 spa_condense_fini(spa_t *spa) 874 { 875 if (spa->spa_condensing_indirect != NULL) { 876 spa_condensing_indirect_destroy(spa->spa_condensing_indirect); 877 spa->spa_condensing_indirect = NULL; 878 } 879 } 880 881 void 882 spa_start_indirect_condensing_thread(spa_t *spa) 883 { 884 ASSERT3P(spa->spa_condense_zthr, ==, NULL); 885 spa->spa_condense_zthr = zthr_create("z_indirect_condense", 886 spa_condense_indirect_thread_check, 887 spa_condense_indirect_thread, spa); 888 } 889 890 /* 891 * Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj 892 * will contain either the obsolete spacemap object or zero if none exists. 893 * All other errors are returned to the caller. 894 */ 895 int 896 vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj) 897 { 898 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 899 900 if (vd->vdev_top_zap == 0) { 901 *sm_obj = 0; 902 return (0); 903 } 904 905 int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 906 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj); 907 if (error == ENOENT) { 908 *sm_obj = 0; 909 error = 0; 910 } 911 912 return (error); 913 } 914 915 /* 916 * Gets the obsolete count are precise spacemap object from the vdev's ZAP. 917 * On success are_precise will be set to reflect if the counts are precise. 918 * All other errors are returned to the caller. 919 */ 920 int 921 vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise) 922 { 923 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 924 925 if (vd->vdev_top_zap == 0) { 926 *are_precise = B_FALSE; 927 return (0); 928 } 929 930 uint64_t val = 0; 931 int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 932 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val); 933 if (error == 0) { 934 *are_precise = (val != 0); 935 } else if (error == ENOENT) { 936 *are_precise = B_FALSE; 937 error = 0; 938 } 939 940 return (error); 941 } 942 943 /* ARGSUSED */ 944 static void 945 vdev_indirect_close(vdev_t *vd) 946 { 947 } 948 949 /* ARGSUSED */ 950 static int 951 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 952 uint64_t *logical_ashift, uint64_t *physical_ashift) 953 { 954 *psize = *max_psize = vd->vdev_asize + 955 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 956 *logical_ashift = vd->vdev_ashift; 957 *physical_ashift = vd->vdev_physical_ashift; 958 return (0); 959 } 960 961 typedef struct remap_segment { 962 vdev_t *rs_vd; 963 uint64_t rs_offset; 964 uint64_t rs_asize; 965 uint64_t rs_split_offset; 966 list_node_t rs_node; 967 } remap_segment_t; 968 969 static remap_segment_t * 970 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) 971 { 972 remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP); 973 rs->rs_vd = vd; 974 rs->rs_offset = offset; 975 rs->rs_asize = asize; 976 rs->rs_split_offset = split_offset; 977 return (rs); 978 } 979 980 /* 981 * Given an indirect vdev and an extent on that vdev, it duplicates the 982 * physical entries of the indirect mapping that correspond to the extent 983 * to a new array and returns a pointer to it. In addition, copied_entries 984 * is populated with the number of mapping entries that were duplicated. 985 * 986 * Note that the function assumes that the caller holds vdev_indirect_rwlock. 987 * This ensures that the mapping won't change due to condensing as we 988 * copy over its contents. 989 * 990 * Finally, since we are doing an allocation, it is up to the caller to 991 * free the array allocated in this function. 992 */ 993 static vdev_indirect_mapping_entry_phys_t * 994 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, 995 uint64_t asize, uint64_t *copied_entries) 996 { 997 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; 998 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 999 uint64_t entries = 0; 1000 1001 ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock)); 1002 1003 vdev_indirect_mapping_entry_phys_t *first_mapping = 1004 vdev_indirect_mapping_entry_for_offset(vim, offset); 1005 ASSERT3P(first_mapping, !=, NULL); 1006 1007 vdev_indirect_mapping_entry_phys_t *m = first_mapping; 1008 while (asize > 0) { 1009 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 1010 1011 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m)); 1012 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size); 1013 1014 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); 1015 uint64_t inner_size = MIN(asize, size - inner_offset); 1016 1017 offset += inner_size; 1018 asize -= inner_size; 1019 entries++; 1020 m++; 1021 } 1022 1023 size_t copy_length = entries * sizeof (*first_mapping); 1024 duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP); 1025 bcopy(first_mapping, duplicate_mappings, copy_length); 1026 *copied_entries = entries; 1027 1028 return (duplicate_mappings); 1029 } 1030 1031 /* 1032 * Goes through the relevant indirect mappings until it hits a concrete vdev 1033 * and issues the callback. On the way to the concrete vdev, if any other 1034 * indirect vdevs are encountered, then the callback will also be called on 1035 * each of those indirect vdevs. For example, if the segment is mapped to 1036 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is 1037 * mapped to segment B on concrete vdev 2, then the callback will be called on 1038 * both vdev 1 and vdev 2. 1039 * 1040 * While the callback passed to vdev_indirect_remap() is called on every vdev 1041 * the function encounters, certain callbacks only care about concrete vdevs. 1042 * These types of callbacks should return immediately and explicitly when they 1043 * are called on an indirect vdev. 1044 * 1045 * Because there is a possibility that a DVA section in the indirect device 1046 * has been split into multiple sections in our mapping, we keep track 1047 * of the relevant contiguous segments of the new location (remap_segment_t) 1048 * in a stack. This way we can call the callback for each of the new sections 1049 * created by a single section of the indirect device. Note though, that in 1050 * this scenario the callbacks in each split block won't occur in-order in 1051 * terms of offset, so callers should not make any assumptions about that. 1052 * 1053 * For callbacks that don't handle split blocks and immediately return when 1054 * they encounter them (as is the case for remap_blkptr_cb), the caller can 1055 * assume that its callback will be applied from the first indirect vdev 1056 * encountered to the last one and then the concrete vdev, in that order. 1057 */ 1058 static void 1059 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, 1060 void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg) 1061 { 1062 list_t stack; 1063 spa_t *spa = vd->vdev_spa; 1064 1065 list_create(&stack, sizeof (remap_segment_t), 1066 offsetof(remap_segment_t, rs_node)); 1067 1068 for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0); 1069 rs != NULL; rs = list_remove_head(&stack)) { 1070 vdev_t *v = rs->rs_vd; 1071 uint64_t num_entries = 0; 1072 1073 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1074 ASSERT(rs->rs_asize > 0); 1075 1076 /* 1077 * Note: As this function can be called from open context 1078 * (e.g. zio_read()), we need the following rwlock to 1079 * prevent the mapping from being changed by condensing. 1080 * 1081 * So we grab the lock and we make a copy of the entries 1082 * that are relevant to the extent that we are working on. 1083 * Once that is done, we drop the lock and iterate over 1084 * our copy of the mapping. Once we are done with the with 1085 * the remap segment and we free it, we also free our copy 1086 * of the indirect mapping entries that are relevant to it. 1087 * 1088 * This way we don't need to wait until the function is 1089 * finished with a segment, to condense it. In addition, we 1090 * don't need a recursive rwlock for the case that a call to 1091 * vdev_indirect_remap() needs to call itself (through the 1092 * codepath of its callback) for the same vdev in the middle 1093 * of its execution. 1094 */ 1095 rw_enter(&v->vdev_indirect_rwlock, RW_READER); 1096 ASSERT3P(v->vdev_indirect_mapping, !=, NULL); 1097 1098 vdev_indirect_mapping_entry_phys_t *mapping = 1099 vdev_indirect_mapping_duplicate_adjacent_entries(v, 1100 rs->rs_offset, rs->rs_asize, &num_entries); 1101 ASSERT3P(mapping, !=, NULL); 1102 ASSERT3U(num_entries, >, 0); 1103 rw_exit(&v->vdev_indirect_rwlock); 1104 1105 for (uint64_t i = 0; i < num_entries; i++) { 1106 /* 1107 * Note: the vdev_indirect_mapping can not change 1108 * while we are running. It only changes while the 1109 * removal is in progress, and then only from syncing 1110 * context. While a removal is in progress, this 1111 * function is only called for frees, which also only 1112 * happen from syncing context. 1113 */ 1114 vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; 1115 1116 ASSERT3P(m, !=, NULL); 1117 ASSERT3U(rs->rs_asize, >, 0); 1118 1119 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 1120 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); 1121 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); 1122 1123 ASSERT3U(rs->rs_offset, >=, 1124 DVA_MAPPING_GET_SRC_OFFSET(m)); 1125 ASSERT3U(rs->rs_offset, <, 1126 DVA_MAPPING_GET_SRC_OFFSET(m) + size); 1127 ASSERT3U(dst_vdev, !=, v->vdev_id); 1128 1129 uint64_t inner_offset = rs->rs_offset - 1130 DVA_MAPPING_GET_SRC_OFFSET(m); 1131 uint64_t inner_size = 1132 MIN(rs->rs_asize, size - inner_offset); 1133 1134 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); 1135 ASSERT3P(dst_v, !=, NULL); 1136 1137 if (dst_v->vdev_ops == &vdev_indirect_ops) { 1138 list_insert_head(&stack, 1139 rs_alloc(dst_v, dst_offset + inner_offset, 1140 inner_size, rs->rs_split_offset)); 1141 1142 } 1143 1144 if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) && 1145 IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) { 1146 /* 1147 * Note: This clause exists only solely for 1148 * testing purposes. We use it to ensure that 1149 * split blocks work and that the callbacks 1150 * using them yield the same result if issued 1151 * in reverse order. 1152 */ 1153 uint64_t inner_half = inner_size / 2; 1154 1155 func(rs->rs_split_offset + inner_half, dst_v, 1156 dst_offset + inner_offset + inner_half, 1157 inner_half, arg); 1158 1159 func(rs->rs_split_offset, dst_v, 1160 dst_offset + inner_offset, 1161 inner_half, arg); 1162 } else { 1163 func(rs->rs_split_offset, dst_v, 1164 dst_offset + inner_offset, 1165 inner_size, arg); 1166 } 1167 1168 rs->rs_offset += inner_size; 1169 rs->rs_asize -= inner_size; 1170 rs->rs_split_offset += inner_size; 1171 } 1172 VERIFY0(rs->rs_asize); 1173 1174 kmem_free(mapping, num_entries * sizeof (*mapping)); 1175 kmem_free(rs, sizeof (remap_segment_t)); 1176 } 1177 list_destroy(&stack); 1178 } 1179 1180 static void 1181 vdev_indirect_child_io_done(zio_t *zio) 1182 { 1183 zio_t *pio = zio->io_private; 1184 1185 mutex_enter(&pio->io_lock); 1186 pio->io_error = zio_worst_error(pio->io_error, zio->io_error); 1187 mutex_exit(&pio->io_lock); 1188 1189 abd_free(zio->io_abd); 1190 } 1191 1192 /* 1193 * This is a callback for vdev_indirect_remap() which allocates an 1194 * indirect_split_t for each split segment and adds it to iv_splits. 1195 */ 1196 static void 1197 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, 1198 uint64_t size, void *arg) 1199 { 1200 zio_t *zio = arg; 1201 indirect_vsd_t *iv = zio->io_vsd; 1202 1203 ASSERT3P(vd, !=, NULL); 1204 1205 if (vd->vdev_ops == &vdev_indirect_ops) 1206 return; 1207 1208 int n = 1; 1209 if (vd->vdev_ops == &vdev_mirror_ops) 1210 n = vd->vdev_children; 1211 1212 indirect_split_t *is = 1213 kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP); 1214 1215 is->is_children = n; 1216 is->is_size = size; 1217 is->is_split_offset = split_offset; 1218 is->is_target_offset = offset; 1219 is->is_vdev = vd; 1220 list_create(&is->is_unique_child, sizeof (indirect_child_t), 1221 offsetof(indirect_child_t, ic_node)); 1222 1223 /* 1224 * Note that we only consider multiple copies of the data for 1225 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even 1226 * though they use the same ops as mirror, because there's only one 1227 * "good" copy under the replacing/spare. 1228 */ 1229 if (vd->vdev_ops == &vdev_mirror_ops) { 1230 for (int i = 0; i < n; i++) { 1231 is->is_child[i].ic_vdev = vd->vdev_child[i]; 1232 list_link_init(&is->is_child[i].ic_node); 1233 } 1234 } else { 1235 is->is_child[0].ic_vdev = vd; 1236 } 1237 1238 list_insert_tail(&iv->iv_splits, is); 1239 } 1240 1241 static void 1242 vdev_indirect_read_split_done(zio_t *zio) 1243 { 1244 indirect_child_t *ic = zio->io_private; 1245 1246 if (zio->io_error != 0) { 1247 /* 1248 * Clear ic_data to indicate that we do not have data for this 1249 * child. 1250 */ 1251 abd_free(ic->ic_data); 1252 ic->ic_data = NULL; 1253 } 1254 } 1255 1256 /* 1257 * Issue reads for all copies (mirror children) of all splits. 1258 */ 1259 static void 1260 vdev_indirect_read_all(zio_t *zio) 1261 { 1262 indirect_vsd_t *iv = zio->io_vsd; 1263 1264 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 1265 1266 for (indirect_split_t *is = list_head(&iv->iv_splits); 1267 is != NULL; is = list_next(&iv->iv_splits, is)) { 1268 for (int i = 0; i < is->is_children; i++) { 1269 indirect_child_t *ic = &is->is_child[i]; 1270 1271 if (!vdev_readable(ic->ic_vdev)) 1272 continue; 1273 1274 /* 1275 * If a child is missing the data, set ic_error. Used 1276 * in vdev_indirect_repair(). We perform the read 1277 * nevertheless which provides the opportunity to 1278 * reconstruct the split block if at all possible. 1279 */ 1280 if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING, 1281 zio->io_txg, 1)) 1282 ic->ic_error = SET_ERROR(ESTALE); 1283 1284 ic->ic_data = abd_alloc_sametype(zio->io_abd, 1285 is->is_size); 1286 ic->ic_duplicate = NULL; 1287 1288 zio_nowait(zio_vdev_child_io(zio, NULL, 1289 ic->ic_vdev, is->is_target_offset, ic->ic_data, 1290 is->is_size, zio->io_type, zio->io_priority, 0, 1291 vdev_indirect_read_split_done, ic)); 1292 } 1293 } 1294 iv->iv_reconstruct = B_TRUE; 1295 } 1296 1297 static void 1298 vdev_indirect_io_start(zio_t *zio) 1299 { 1300 spa_t *spa __maybe_unused = zio->io_spa; 1301 indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP); 1302 list_create(&iv->iv_splits, 1303 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); 1304 1305 zio->io_vsd = iv; 1306 zio->io_vsd_ops = &vdev_indirect_vsd_ops; 1307 1308 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1309 if (zio->io_type != ZIO_TYPE_READ) { 1310 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 1311 /* 1312 * Note: this code can handle other kinds of writes, 1313 * but we don't expect them. 1314 */ 1315 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL | 1316 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0); 1317 } 1318 1319 vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size, 1320 vdev_indirect_gather_splits, zio); 1321 1322 indirect_split_t *first = list_head(&iv->iv_splits); 1323 if (first->is_size == zio->io_size) { 1324 /* 1325 * This is not a split block; we are pointing to the entire 1326 * data, which will checksum the same as the original data. 1327 * Pass the BP down so that the child i/o can verify the 1328 * checksum, and try a different location if available 1329 * (e.g. on a mirror). 1330 * 1331 * While this special case could be handled the same as the 1332 * general (split block) case, doing it this way ensures 1333 * that the vast majority of blocks on indirect vdevs 1334 * (which are not split) are handled identically to blocks 1335 * on non-indirect vdevs. This allows us to be less strict 1336 * about performance in the general (but rare) case. 1337 */ 1338 ASSERT0(first->is_split_offset); 1339 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL); 1340 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 1341 first->is_vdev, first->is_target_offset, 1342 abd_get_offset(zio->io_abd, 0), 1343 zio->io_size, zio->io_type, zio->io_priority, 0, 1344 vdev_indirect_child_io_done, zio)); 1345 } else { 1346 iv->iv_split_block = B_TRUE; 1347 if (zio->io_type == ZIO_TYPE_READ && 1348 zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) { 1349 /* 1350 * Read all copies. Note that for simplicity, 1351 * we don't bother consulting the DTL in the 1352 * resilver case. 1353 */ 1354 vdev_indirect_read_all(zio); 1355 } else { 1356 /* 1357 * If this is a read zio, we read one copy of each 1358 * split segment, from the top-level vdev. Since 1359 * we don't know the checksum of each split 1360 * individually, the child zio can't ensure that 1361 * we get the right data. E.g. if it's a mirror, 1362 * it will just read from a random (healthy) leaf 1363 * vdev. We have to verify the checksum in 1364 * vdev_indirect_io_done(). 1365 * 1366 * For write zios, the vdev code will ensure we write 1367 * to all children. 1368 */ 1369 for (indirect_split_t *is = list_head(&iv->iv_splits); 1370 is != NULL; is = list_next(&iv->iv_splits, is)) { 1371 zio_nowait(zio_vdev_child_io(zio, NULL, 1372 is->is_vdev, is->is_target_offset, 1373 abd_get_offset(zio->io_abd, 1374 is->is_split_offset), is->is_size, 1375 zio->io_type, zio->io_priority, 0, 1376 vdev_indirect_child_io_done, zio)); 1377 } 1378 1379 } 1380 } 1381 1382 zio_execute(zio); 1383 } 1384 1385 /* 1386 * Report a checksum error for a child. 1387 */ 1388 static void 1389 vdev_indirect_checksum_error(zio_t *zio, 1390 indirect_split_t *is, indirect_child_t *ic) 1391 { 1392 vdev_t *vd = ic->ic_vdev; 1393 1394 if (zio->io_flags & ZIO_FLAG_SPECULATIVE) 1395 return; 1396 1397 mutex_enter(&vd->vdev_stat_lock); 1398 vd->vdev_stat.vs_checksum_errors++; 1399 mutex_exit(&vd->vdev_stat_lock); 1400 1401 zio_bad_cksum_t zbc = {{{ 0 }}}; 1402 abd_t *bad_abd = ic->ic_data; 1403 abd_t *good_abd = is->is_good_child->ic_data; 1404 (void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio, 1405 is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc); 1406 } 1407 1408 /* 1409 * Issue repair i/os for any incorrect copies. We do this by comparing 1410 * each split segment's correct data (is_good_child's ic_data) with each 1411 * other copy of the data. If they differ, then we overwrite the bad data 1412 * with the good copy. The DTL is checked in vdev_indirect_read_all() and 1413 * if a vdev is missing a copy of the data we set ic_error and the read is 1414 * performed. This provides the opportunity to reconstruct the split block 1415 * if at all possible. ic_error is checked here and if set it suppresses 1416 * incrementing the checksum counter. Aside from this DTLs are not checked, 1417 * which simplifies this code and also issues the optimal number of writes 1418 * (based on which copies actually read bad data, as opposed to which we 1419 * think might be wrong). For the same reason, we always use 1420 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start(). 1421 */ 1422 static void 1423 vdev_indirect_repair(zio_t *zio) 1424 { 1425 indirect_vsd_t *iv = zio->io_vsd; 1426 1427 enum zio_flag flags = ZIO_FLAG_IO_REPAIR; 1428 1429 if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) 1430 flags |= ZIO_FLAG_SELF_HEAL; 1431 1432 if (!spa_writeable(zio->io_spa)) 1433 return; 1434 1435 for (indirect_split_t *is = list_head(&iv->iv_splits); 1436 is != NULL; is = list_next(&iv->iv_splits, is)) { 1437 for (int c = 0; c < is->is_children; c++) { 1438 indirect_child_t *ic = &is->is_child[c]; 1439 if (ic == is->is_good_child) 1440 continue; 1441 if (ic->ic_data == NULL) 1442 continue; 1443 if (ic->ic_duplicate == is->is_good_child) 1444 continue; 1445 1446 zio_nowait(zio_vdev_child_io(zio, NULL, 1447 ic->ic_vdev, is->is_target_offset, 1448 is->is_good_child->ic_data, is->is_size, 1449 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE, 1450 ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL, 1451 NULL, NULL)); 1452 1453 /* 1454 * If ic_error is set the current child does not have 1455 * a copy of the data, so suppress incrementing the 1456 * checksum counter. 1457 */ 1458 if (ic->ic_error == ESTALE) 1459 continue; 1460 1461 vdev_indirect_checksum_error(zio, is, ic); 1462 } 1463 } 1464 } 1465 1466 /* 1467 * Report checksum errors on all children that we read from. 1468 */ 1469 static void 1470 vdev_indirect_all_checksum_errors(zio_t *zio) 1471 { 1472 indirect_vsd_t *iv = zio->io_vsd; 1473 1474 if (zio->io_flags & ZIO_FLAG_SPECULATIVE) 1475 return; 1476 1477 for (indirect_split_t *is = list_head(&iv->iv_splits); 1478 is != NULL; is = list_next(&iv->iv_splits, is)) { 1479 for (int c = 0; c < is->is_children; c++) { 1480 indirect_child_t *ic = &is->is_child[c]; 1481 1482 if (ic->ic_data == NULL) 1483 continue; 1484 1485 vdev_t *vd = ic->ic_vdev; 1486 1487 (void) zfs_ereport_post_checksum(zio->io_spa, vd, 1488 NULL, zio, is->is_target_offset, is->is_size, 1489 NULL, NULL, NULL); 1490 mutex_enter(&vd->vdev_stat_lock); 1491 vd->vdev_stat.vs_checksum_errors++; 1492 mutex_exit(&vd->vdev_stat_lock); 1493 } 1494 } 1495 } 1496 1497 /* 1498 * Copy data from all the splits to a main zio then validate the checksum. 1499 * If then checksum is successfully validated return success. 1500 */ 1501 static int 1502 vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio) 1503 { 1504 zio_bad_cksum_t zbc; 1505 1506 for (indirect_split_t *is = list_head(&iv->iv_splits); 1507 is != NULL; is = list_next(&iv->iv_splits, is)) { 1508 1509 ASSERT3P(is->is_good_child->ic_data, !=, NULL); 1510 ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL); 1511 1512 abd_copy_off(zio->io_abd, is->is_good_child->ic_data, 1513 is->is_split_offset, 0, is->is_size); 1514 } 1515 1516 return (zio_checksum_error(zio, &zbc)); 1517 } 1518 1519 /* 1520 * There are relatively few possible combinations making it feasible to 1521 * deterministically check them all. We do this by setting the good_child 1522 * to the next unique split version. If we reach the end of the list then 1523 * "carry over" to the next unique split version (like counting in base 1524 * is_unique_children, but each digit can have a different base). 1525 */ 1526 static int 1527 vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio) 1528 { 1529 boolean_t more = B_TRUE; 1530 1531 iv->iv_attempts = 0; 1532 1533 for (indirect_split_t *is = list_head(&iv->iv_splits); 1534 is != NULL; is = list_next(&iv->iv_splits, is)) 1535 is->is_good_child = list_head(&is->is_unique_child); 1536 1537 while (more == B_TRUE) { 1538 iv->iv_attempts++; 1539 more = B_FALSE; 1540 1541 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0) 1542 return (0); 1543 1544 for (indirect_split_t *is = list_head(&iv->iv_splits); 1545 is != NULL; is = list_next(&iv->iv_splits, is)) { 1546 is->is_good_child = list_next(&is->is_unique_child, 1547 is->is_good_child); 1548 if (is->is_good_child != NULL) { 1549 more = B_TRUE; 1550 break; 1551 } 1552 1553 is->is_good_child = list_head(&is->is_unique_child); 1554 } 1555 } 1556 1557 ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations); 1558 1559 return (SET_ERROR(ECKSUM)); 1560 } 1561 1562 /* 1563 * There are too many combinations to try all of them in a reasonable amount 1564 * of time. So try a fixed number of random combinations from the unique 1565 * split versions, after which we'll consider the block unrecoverable. 1566 */ 1567 static int 1568 vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio) 1569 { 1570 iv->iv_attempts = 0; 1571 1572 while (iv->iv_attempts < iv->iv_attempts_max) { 1573 iv->iv_attempts++; 1574 1575 for (indirect_split_t *is = list_head(&iv->iv_splits); 1576 is != NULL; is = list_next(&iv->iv_splits, is)) { 1577 indirect_child_t *ic = list_head(&is->is_unique_child); 1578 int children = is->is_unique_children; 1579 1580 for (int i = spa_get_random(children); i > 0; i--) 1581 ic = list_next(&is->is_unique_child, ic); 1582 1583 ASSERT3P(ic, !=, NULL); 1584 is->is_good_child = ic; 1585 } 1586 1587 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0) 1588 return (0); 1589 } 1590 1591 return (SET_ERROR(ECKSUM)); 1592 } 1593 1594 /* 1595 * This is a validation function for reconstruction. It randomly selects 1596 * a good combination, if one can be found, and then it intentionally 1597 * damages all other segment copes by zeroing them. This forces the 1598 * reconstruction algorithm to locate the one remaining known good copy. 1599 */ 1600 static int 1601 vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio) 1602 { 1603 int error; 1604 1605 /* Presume all the copies are unique for initial selection. */ 1606 for (indirect_split_t *is = list_head(&iv->iv_splits); 1607 is != NULL; is = list_next(&iv->iv_splits, is)) { 1608 is->is_unique_children = 0; 1609 1610 for (int i = 0; i < is->is_children; i++) { 1611 indirect_child_t *ic = &is->is_child[i]; 1612 if (ic->ic_data != NULL) { 1613 is->is_unique_children++; 1614 list_insert_tail(&is->is_unique_child, ic); 1615 } 1616 } 1617 1618 if (list_is_empty(&is->is_unique_child)) { 1619 error = SET_ERROR(EIO); 1620 goto out; 1621 } 1622 } 1623 1624 /* 1625 * Set each is_good_child to a randomly-selected child which 1626 * is known to contain validated data. 1627 */ 1628 error = vdev_indirect_splits_enumerate_randomly(iv, zio); 1629 if (error) 1630 goto out; 1631 1632 /* 1633 * Damage all but the known good copy by zeroing it. This will 1634 * result in two or less unique copies per indirect_child_t. 1635 * Both may need to be checked in order to reconstruct the block. 1636 * Set iv->iv_attempts_max such that all unique combinations will 1637 * enumerated, but limit the damage to at most 12 indirect splits. 1638 */ 1639 iv->iv_attempts_max = 1; 1640 1641 for (indirect_split_t *is = list_head(&iv->iv_splits); 1642 is != NULL; is = list_next(&iv->iv_splits, is)) { 1643 for (int c = 0; c < is->is_children; c++) { 1644 indirect_child_t *ic = &is->is_child[c]; 1645 1646 if (ic == is->is_good_child) 1647 continue; 1648 if (ic->ic_data == NULL) 1649 continue; 1650 1651 abd_zero(ic->ic_data, abd_get_size(ic->ic_data)); 1652 } 1653 1654 iv->iv_attempts_max *= 2; 1655 if (iv->iv_attempts_max >= (1ULL << 12)) { 1656 iv->iv_attempts_max = UINT64_MAX; 1657 break; 1658 } 1659 } 1660 1661 out: 1662 /* Empty the unique children lists so they can be reconstructed. */ 1663 for (indirect_split_t *is = list_head(&iv->iv_splits); 1664 is != NULL; is = list_next(&iv->iv_splits, is)) { 1665 indirect_child_t *ic; 1666 while ((ic = list_head(&is->is_unique_child)) != NULL) 1667 list_remove(&is->is_unique_child, ic); 1668 1669 is->is_unique_children = 0; 1670 } 1671 1672 return (error); 1673 } 1674 1675 /* 1676 * This function is called when we have read all copies of the data and need 1677 * to try to find a combination of copies that gives us the right checksum. 1678 * 1679 * If we pointed to any mirror vdevs, this effectively does the job of the 1680 * mirror. The mirror vdev code can't do its own job because we don't know 1681 * the checksum of each split segment individually. 1682 * 1683 * We have to try every unique combination of copies of split segments, until 1684 * we find one that checksums correctly. Duplicate segment copies are first 1685 * identified and latter skipped during reconstruction. This optimization 1686 * reduces the search space and ensures that of the remaining combinations 1687 * at most one is correct. 1688 * 1689 * When the total number of combinations is small they can all be checked. 1690 * For example, if we have 3 segments in the split, and each points to a 1691 * 2-way mirror with unique copies, we will have the following pieces of data: 1692 * 1693 * | mirror child 1694 * split | [0] [1] 1695 * ======|===================== 1696 * A | data_A_0 data_A_1 1697 * B | data_B_0 data_B_1 1698 * C | data_C_0 data_C_1 1699 * 1700 * We will try the following (mirror children)^(number of splits) (2^3=8) 1701 * combinations, which is similar to bitwise-little-endian counting in 1702 * binary. In general each "digit" corresponds to a split segment, and the 1703 * base of each digit is is_children, which can be different for each 1704 * digit. 1705 * 1706 * "low bit" "high bit" 1707 * v v 1708 * data_A_0 data_B_0 data_C_0 1709 * data_A_1 data_B_0 data_C_0 1710 * data_A_0 data_B_1 data_C_0 1711 * data_A_1 data_B_1 data_C_0 1712 * data_A_0 data_B_0 data_C_1 1713 * data_A_1 data_B_0 data_C_1 1714 * data_A_0 data_B_1 data_C_1 1715 * data_A_1 data_B_1 data_C_1 1716 * 1717 * Note that the split segments may be on the same or different top-level 1718 * vdevs. In either case, we may need to try lots of combinations (see 1719 * zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror 1720 * has small silent errors on all of its children, we can still reconstruct 1721 * the correct data, as long as those errors are at sufficiently-separated 1722 * offsets (specifically, separated by the largest block size - default of 1723 * 128KB, but up to 16MB). 1724 */ 1725 static void 1726 vdev_indirect_reconstruct_io_done(zio_t *zio) 1727 { 1728 indirect_vsd_t *iv = zio->io_vsd; 1729 boolean_t known_good = B_FALSE; 1730 int error; 1731 1732 iv->iv_unique_combinations = 1; 1733 iv->iv_attempts_max = UINT64_MAX; 1734 1735 if (zfs_reconstruct_indirect_combinations_max > 0) 1736 iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max; 1737 1738 /* 1739 * If nonzero, every 1/x blocks will be damaged, in order to validate 1740 * reconstruction when there are split segments with damaged copies. 1741 * Known_good will be TRUE when reconstruction is known to be possible. 1742 */ 1743 if (zfs_reconstruct_indirect_damage_fraction != 0 && 1744 spa_get_random(zfs_reconstruct_indirect_damage_fraction) == 0) 1745 known_good = (vdev_indirect_splits_damage(iv, zio) == 0); 1746 1747 /* 1748 * Determine the unique children for a split segment and add them 1749 * to the is_unique_child list. By restricting reconstruction 1750 * to these children, only unique combinations will be considered. 1751 * This can vastly reduce the search space when there are a large 1752 * number of indirect splits. 1753 */ 1754 for (indirect_split_t *is = list_head(&iv->iv_splits); 1755 is != NULL; is = list_next(&iv->iv_splits, is)) { 1756 is->is_unique_children = 0; 1757 1758 for (int i = 0; i < is->is_children; i++) { 1759 indirect_child_t *ic_i = &is->is_child[i]; 1760 1761 if (ic_i->ic_data == NULL || 1762 ic_i->ic_duplicate != NULL) 1763 continue; 1764 1765 for (int j = i + 1; j < is->is_children; j++) { 1766 indirect_child_t *ic_j = &is->is_child[j]; 1767 1768 if (ic_j->ic_data == NULL || 1769 ic_j->ic_duplicate != NULL) 1770 continue; 1771 1772 if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0) 1773 ic_j->ic_duplicate = ic_i; 1774 } 1775 1776 is->is_unique_children++; 1777 list_insert_tail(&is->is_unique_child, ic_i); 1778 } 1779 1780 /* Reconstruction is impossible, no valid children */ 1781 EQUIV(list_is_empty(&is->is_unique_child), 1782 is->is_unique_children == 0); 1783 if (list_is_empty(&is->is_unique_child)) { 1784 zio->io_error = EIO; 1785 vdev_indirect_all_checksum_errors(zio); 1786 zio_checksum_verified(zio); 1787 return; 1788 } 1789 1790 iv->iv_unique_combinations *= is->is_unique_children; 1791 } 1792 1793 if (iv->iv_unique_combinations <= iv->iv_attempts_max) 1794 error = vdev_indirect_splits_enumerate_all(iv, zio); 1795 else 1796 error = vdev_indirect_splits_enumerate_randomly(iv, zio); 1797 1798 if (error != 0) { 1799 /* All attempted combinations failed. */ 1800 ASSERT3B(known_good, ==, B_FALSE); 1801 zio->io_error = error; 1802 vdev_indirect_all_checksum_errors(zio); 1803 } else { 1804 /* 1805 * The checksum has been successfully validated. Issue 1806 * repair I/Os to any copies of splits which don't match 1807 * the validated version. 1808 */ 1809 ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio)); 1810 vdev_indirect_repair(zio); 1811 zio_checksum_verified(zio); 1812 } 1813 } 1814 1815 static void 1816 vdev_indirect_io_done(zio_t *zio) 1817 { 1818 indirect_vsd_t *iv = zio->io_vsd; 1819 1820 if (iv->iv_reconstruct) { 1821 /* 1822 * We have read all copies of the data (e.g. from mirrors), 1823 * either because this was a scrub/resilver, or because the 1824 * one-copy read didn't checksum correctly. 1825 */ 1826 vdev_indirect_reconstruct_io_done(zio); 1827 return; 1828 } 1829 1830 if (!iv->iv_split_block) { 1831 /* 1832 * This was not a split block, so we passed the BP down, 1833 * and the checksum was handled by the (one) child zio. 1834 */ 1835 return; 1836 } 1837 1838 zio_bad_cksum_t zbc; 1839 int ret = zio_checksum_error(zio, &zbc); 1840 if (ret == 0) { 1841 zio_checksum_verified(zio); 1842 return; 1843 } 1844 1845 /* 1846 * The checksum didn't match. Read all copies of all splits, and 1847 * then we will try to reconstruct. The next time 1848 * vdev_indirect_io_done() is called, iv_reconstruct will be set. 1849 */ 1850 vdev_indirect_read_all(zio); 1851 1852 zio_vdev_io_redone(zio); 1853 } 1854 1855 vdev_ops_t vdev_indirect_ops = { 1856 .vdev_op_init = NULL, 1857 .vdev_op_fini = NULL, 1858 .vdev_op_open = vdev_indirect_open, 1859 .vdev_op_close = vdev_indirect_close, 1860 .vdev_op_asize = vdev_default_asize, 1861 .vdev_op_min_asize = vdev_default_min_asize, 1862 .vdev_op_min_alloc = NULL, 1863 .vdev_op_io_start = vdev_indirect_io_start, 1864 .vdev_op_io_done = vdev_indirect_io_done, 1865 .vdev_op_state_change = NULL, 1866 .vdev_op_need_resilver = NULL, 1867 .vdev_op_hold = NULL, 1868 .vdev_op_rele = NULL, 1869 .vdev_op_remap = vdev_indirect_remap, 1870 .vdev_op_xlate = NULL, 1871 .vdev_op_rebuild_asize = NULL, 1872 .vdev_op_metaslab_init = NULL, 1873 .vdev_op_config_generate = NULL, 1874 .vdev_op_nparity = NULL, 1875 .vdev_op_ndisks = NULL, 1876 .vdev_op_type = VDEV_TYPE_INDIRECT, /* name of this vdev type */ 1877 .vdev_op_leaf = B_FALSE /* leaf vdev */ 1878 }; 1879 1880 EXPORT_SYMBOL(spa_condense_fini); 1881 EXPORT_SYMBOL(spa_start_indirect_condensing_thread); 1882 EXPORT_SYMBOL(spa_condense_indirect_start_sync); 1883 EXPORT_SYMBOL(spa_condense_init); 1884 EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete); 1885 EXPORT_SYMBOL(vdev_indirect_mark_obsolete); 1886 EXPORT_SYMBOL(vdev_indirect_should_condense); 1887 EXPORT_SYMBOL(vdev_indirect_sync_obsolete); 1888 EXPORT_SYMBOL(vdev_obsolete_counts_are_precise); 1889 EXPORT_SYMBOL(vdev_obsolete_sm_object); 1890 1891 /* BEGIN CSTYLED */ 1892 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT, ZMOD_RW, 1893 "Whether to attempt condensing indirect vdev mappings"); 1894 1895 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, ULONG, ZMOD_RW, 1896 "Don't bother condensing if the mapping uses less than this amount of " 1897 "memory"); 1898 1899 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, ULONG, ZMOD_RW, 1900 "Minimum size obsolete spacemap to attempt condensing"); 1901 1902 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms, INT, ZMOD_RW, 1903 "Used by tests to ensure certain actions happen in the middle of a " 1904 "condense. A maximum value of 1 should be sufficient."); 1905 1906 ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max, INT, ZMOD_RW, 1907 "Maximum number of combinations when reconstructing split segments"); 1908 /* END CSTYLED */ 1909