xref: /illumos-gate/usr/src/uts/common/fs/zfs/vdev_indirect.c (revision 698f4ab6008be205f4362675967638572eef4f21)
1 /*
2  * CDDL HEADER START
3  *
4  * This file and its contents are supplied under the terms of the
5  * Common Development and Distribution License ("CDDL"), version 1.0.
6  * You may only use this file in accordance with the terms of version
7  * 1.0 of the CDDL.
8  *
9  * A full copy of the text of the CDDL should have accompanied this
10  * source.  A copy of the CDDL is also available via the Internet at
11  * http://www.illumos.org/license/CDDL.
12  *
13  * CDDL HEADER END
14  */
15 
16 /*
17  * Copyright (c) 2014, 2019 by Delphix. All rights reserved.
18  */
19 
20 #include <sys/zfs_context.h>
21 #include <sys/spa.h>
22 #include <sys/spa_impl.h>
23 #include <sys/vdev_impl.h>
24 #include <sys/fs/zfs.h>
25 #include <sys/zio.h>
26 #include <sys/zio_checksum.h>
27 #include <sys/metaslab.h>
28 #include <sys/refcount.h>
29 #include <sys/dmu.h>
30 #include <sys/vdev_indirect_mapping.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/zap.h>
34 #include <sys/abd.h>
35 #include <sys/zthr.h>
36 
37 /*
38  * An indirect vdev corresponds to a vdev that has been removed.  Since
39  * we cannot rewrite block pointers of snapshots, etc., we keep a
40  * mapping from old location on the removed device to the new location
41  * on another device in the pool and use this mapping whenever we need
42  * to access the DVA.  Unfortunately, this mapping did not respect
43  * logical block boundaries when it was first created, and so a DVA on
44  * this indirect vdev may be "split" into multiple sections that each
45  * map to a different location.  As a consequence, not all DVAs can be
46  * translated to an equivalent new DVA.  Instead we must provide a
47  * "vdev_remap" operation that executes a callback on each contiguous
48  * segment of the new location.  This function is used in multiple ways:
49  *
50  *  - i/os to this vdev use the callback to determine where the
51  *    data is now located, and issue child i/os for each segment's new
52  *    location.
53  *
54  *  - frees and claims to this vdev use the callback to free or claim
55  *    each mapped segment.  (Note that we don't actually need to claim
56  *    log blocks on indirect vdevs, because we don't allocate to
57  *    removing vdevs.  However, zdb uses zio_claim() for its leak
58  *    detection.)
59  */
60 
61 /*
62  * "Big theory statement" for how we mark blocks obsolete.
63  *
64  * When a block on an indirect vdev is freed or remapped, a section of
65  * that vdev's mapping may no longer be referenced (aka "obsolete").  We
66  * keep track of how much of each mapping entry is obsolete.  When
67  * an entry becomes completely obsolete, we can remove it, thus reducing
68  * the memory used by the mapping.  The complete picture of obsolescence
69  * is given by the following data structures, described below:
70  *  - the entry-specific obsolete count
71  *  - the vdev-specific obsolete spacemap
72  *  - the pool-specific obsolete bpobj
73  *
74  * == On disk data structures used ==
75  *
76  * We track the obsolete space for the pool using several objects.  Each
77  * of these objects is created on demand and freed when no longer
78  * needed, and is assumed to be empty if it does not exist.
79  * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
80  *
81  *  - Each vic_mapping_object (associated with an indirect vdev) can
82  *    have a vimp_counts_object.  This is an array of uint32_t's
83  *    with the same number of entries as the vic_mapping_object.  When
84  *    the mapping is condensed, entries from the vic_obsolete_sm_object
85  *    (see below) are folded into the counts.  Therefore, each
86  *    obsolete_counts entry tells us the number of bytes in the
87  *    corresponding mapping entry that were not referenced when the
88  *    mapping was last condensed.
89  *
90  *  - Each indirect or removing vdev can have a vic_obsolete_sm_object.
91  *    This is a space map containing an alloc entry for every DVA that
92  *    has been obsoleted since the last time this indirect vdev was
93  *    condensed.  We use this object in order to improve performance
94  *    when marking a DVA as obsolete.  Instead of modifying an arbitrary
95  *    offset of the vimp_counts_object, we only need to append an entry
96  *    to the end of this object.  When a DVA becomes obsolete, it is
97  *    added to the obsolete space map.  This happens when the DVA is
98  *    freed, remapped and not referenced by a snapshot, or the last
99  *    snapshot referencing it is destroyed.
100  *
101  *  - Each dataset can have a ds_remap_deadlist object.  This is a
102  *    deadlist object containing all blocks that were remapped in this
103  *    dataset but referenced in a previous snapshot.  Blocks can *only*
104  *    appear on this list if they were remapped (dsl_dataset_block_remapped);
105  *    blocks that were killed in a head dataset are put on the normal
106  *    ds_deadlist and marked obsolete when they are freed.
107  *
108  *  - The pool can have a dp_obsolete_bpobj.  This is a list of blocks
109  *    in the pool that need to be marked obsolete.  When a snapshot is
110  *    destroyed, we move some of the ds_remap_deadlist to the obsolete
111  *    bpobj (see dsl_destroy_snapshot_handle_remaps()).  We then
112  *    asynchronously process the obsolete bpobj, moving its entries to
113  *    the specific vdevs' obsolete space maps.
114  *
115  * == Summary of how we mark blocks as obsolete ==
116  *
117  * - When freeing a block: if any DVA is on an indirect vdev, append to
118  *   vic_obsolete_sm_object.
119  * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
120  *   references; otherwise append to vic_obsolete_sm_object).
121  * - When freeing a snapshot: move parts of ds_remap_deadlist to
122  *   dp_obsolete_bpobj (same algorithm as ds_deadlist).
123  * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
124  *   individual vdev's vic_obsolete_sm_object.
125  */
126 
127 /*
128  * "Big theory statement" for how we condense indirect vdevs.
129  *
130  * Condensing an indirect vdev's mapping is the process of determining
131  * the precise counts of obsolete space for each mapping entry (by
132  * integrating the obsolete spacemap into the obsolete counts) and
133  * writing out a new mapping that contains only referenced entries.
134  *
135  * We condense a vdev when we expect the mapping to shrink (see
136  * vdev_indirect_should_condense()), but only perform one condense at a
137  * time to limit the memory usage.  In addition, we use a separate
138  * open-context thread (spa_condense_indirect_thread) to incrementally
139  * create the new mapping object in a way that minimizes the impact on
140  * the rest of the system.
141  *
142  * == Generating a new mapping ==
143  *
144  * To generate a new mapping, we follow these steps:
145  *
146  * 1. Save the old obsolete space map and create a new mapping object
147  *    (see spa_condense_indirect_start_sync()).  This initializes the
148  *    spa_condensing_indirect_phys with the "previous obsolete space map",
149  *    which is now read only.  Newly obsolete DVAs will be added to a
150  *    new (initially empty) obsolete space map, and will not be
151  *    considered as part of this condense operation.
152  *
153  * 2. Construct in memory the precise counts of obsolete space for each
154  *    mapping entry, by incorporating the obsolete space map into the
155  *    counts.  (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
156  *
157  * 3. Iterate through each mapping entry, writing to the new mapping any
158  *    entries that are not completely obsolete (i.e. which don't have
159  *    obsolete count == mapping length).  (See
160  *    spa_condense_indirect_generate_new_mapping().)
161  *
162  * 4. Destroy the old mapping object and switch over to the new one
163  *    (spa_condense_indirect_complete_sync).
164  *
165  * == Restarting from failure ==
166  *
167  * To restart the condense when we import/open the pool, we must start
168  * at the 2nd step above: reconstruct the precise counts in memory,
169  * based on the space map + counts.  Then in the 3rd step, we start
170  * iterating where we left off: at vimp_max_offset of the new mapping
171  * object.
172  */
173 
174 boolean_t zfs_condense_indirect_vdevs_enable = B_TRUE;
175 
176 /*
177  * Condense if at least this percent of the bytes in the mapping is
178  * obsolete.  With the default of 25%, the amount of space mapped
179  * will be reduced to 1% of its original size after at most 16
180  * condenses.  Higher values will condense less often (causing less
181  * i/o); lower values will reduce the mapping size more quickly.
182  */
183 int zfs_indirect_condense_obsolete_pct = 25;
184 
185 /*
186  * Condense if the obsolete space map takes up more than this amount of
187  * space on disk (logically).  This limits the amount of disk space
188  * consumed by the obsolete space map; the default of 1GB is small enough
189  * that we typically don't mind "wasting" it.
190  */
191 uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
192 
193 /*
194  * Don't bother condensing if the mapping uses less than this amount of
195  * memory.  The default of 128KB is considered a "trivial" amount of
196  * memory and not worth reducing.
197  */
198 uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
199 
200 /*
201  * This is used by the test suite so that it can ensure that certain
202  * actions happen while in the middle of a condense (which might otherwise
203  * complete too quickly).  If used to reduce the performance impact of
204  * condensing in production, a maximum value of 1 should be sufficient.
205  */
206 int zfs_condense_indirect_commit_entry_delay_ticks = 0;
207 
208 /*
209  * If an indirect split block contains more than this many possible unique
210  * combinations when being reconstructed, consider it too computationally
211  * expensive to check them all. Instead, try at most 100 randomly-selected
212  * combinations each time the block is accessed.  This allows all segment
213  * copies to participate fairly in the reconstruction when all combinations
214  * cannot be checked and prevents repeated use of one bad copy.
215  */
216 int zfs_reconstruct_indirect_combinations_max = 256;
217 
218 
219 /*
220  * Enable to simulate damaged segments and validate reconstruction.
221  * Used by ztest
222  */
223 unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
224 
225 /*
226  * The indirect_child_t represents the vdev that we will read from, when we
227  * need to read all copies of the data (e.g. for scrub or reconstruction).
228  * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
229  * ic_vdev is the same as is_vdev.  However, for mirror top-level vdevs,
230  * ic_vdev is a child of the mirror.
231  */
232 typedef struct indirect_child {
233 	abd_t *ic_data;
234 	vdev_t *ic_vdev;
235 
236 	/*
237 	 * ic_duplicate is NULL when the ic_data contents are unique, when it
238 	 * is determined to be a duplicate it references the primary child.
239 	 */
240 	struct indirect_child *ic_duplicate;
241 	list_node_t ic_node; /* node on is_unique_child */
242 } indirect_child_t;
243 
244 /*
245  * The indirect_split_t represents one mapped segment of an i/o to the
246  * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
247  * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
248  * For split blocks, there will be several of these.
249  */
250 typedef struct indirect_split {
251 	list_node_t is_node; /* link on iv_splits */
252 
253 	/*
254 	 * is_split_offset is the offset into the i/o.
255 	 * This is the sum of the previous splits' is_size's.
256 	 */
257 	uint64_t is_split_offset;
258 
259 	vdev_t *is_vdev; /* top-level vdev */
260 	uint64_t is_target_offset; /* offset on is_vdev */
261 	uint64_t is_size;
262 	int is_children; /* number of entries in is_child[] */
263 	int is_unique_children; /* number of entries in is_unique_child */
264 	list_t is_unique_child;
265 
266 	/*
267 	 * is_good_child is the child that we are currently using to
268 	 * attempt reconstruction.
269 	 */
270 	indirect_child_t *is_good_child;
271 
272 	indirect_child_t is_child[1]; /* variable-length */
273 } indirect_split_t;
274 
275 /*
276  * The indirect_vsd_t is associated with each i/o to the indirect vdev.
277  * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
278  */
279 typedef struct indirect_vsd {
280 	boolean_t iv_split_block;
281 	boolean_t iv_reconstruct;
282 	uint64_t iv_unique_combinations;
283 	uint64_t iv_attempts;
284 	uint64_t iv_attempts_max;
285 
286 	list_t iv_splits; /* list of indirect_split_t's */
287 } indirect_vsd_t;
288 
289 static void
290 vdev_indirect_map_free(zio_t *zio)
291 {
292 	indirect_vsd_t *iv = zio->io_vsd;
293 
294 	indirect_split_t *is;
295 	while ((is = list_head(&iv->iv_splits)) != NULL) {
296 		for (int c = 0; c < is->is_children; c++) {
297 			indirect_child_t *ic = &is->is_child[c];
298 			if (ic->ic_data != NULL)
299 				abd_free(ic->ic_data);
300 		}
301 		list_remove(&iv->iv_splits, is);
302 
303 		indirect_child_t *ic;
304 		while ((ic = list_head(&is->is_unique_child)) != NULL)
305 			list_remove(&is->is_unique_child, ic);
306 
307 		list_destroy(&is->is_unique_child);
308 
309 		kmem_free(is,
310 		    offsetof(indirect_split_t, is_child[is->is_children]));
311 	}
312 	kmem_free(iv, sizeof (*iv));
313 }
314 
315 static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
316 	vdev_indirect_map_free,
317 	zio_vsd_default_cksum_report
318 };
319 /*
320  * Mark the given offset and size as being obsolete.
321  */
322 void
323 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
324 {
325 	spa_t *spa = vd->vdev_spa;
326 
327 	ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
328 	ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
329 	ASSERT(size > 0);
330 	VERIFY(vdev_indirect_mapping_entry_for_offset(
331 	    vd->vdev_indirect_mapping, offset) != NULL);
332 
333 	if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
334 		mutex_enter(&vd->vdev_obsolete_lock);
335 		range_tree_add(vd->vdev_obsolete_segments, offset, size);
336 		mutex_exit(&vd->vdev_obsolete_lock);
337 		vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
338 	}
339 }
340 
341 /*
342  * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
343  * wrapper is provided because the DMU does not know about vdev_t's and
344  * cannot directly call vdev_indirect_mark_obsolete.
345  */
346 void
347 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
348     uint64_t size, dmu_tx_t *tx)
349 {
350 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
351 	ASSERT(dmu_tx_is_syncing(tx));
352 
353 	/* The DMU can only remap indirect vdevs. */
354 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
355 	vdev_indirect_mark_obsolete(vd, offset, size);
356 }
357 
358 static spa_condensing_indirect_t *
359 spa_condensing_indirect_create(spa_t *spa)
360 {
361 	spa_condensing_indirect_phys_t *scip =
362 	    &spa->spa_condensing_indirect_phys;
363 	spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
364 	objset_t *mos = spa->spa_meta_objset;
365 
366 	for (int i = 0; i < TXG_SIZE; i++) {
367 		list_create(&sci->sci_new_mapping_entries[i],
368 		    sizeof (vdev_indirect_mapping_entry_t),
369 		    offsetof(vdev_indirect_mapping_entry_t, vime_node));
370 	}
371 
372 	sci->sci_new_mapping =
373 	    vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
374 
375 	return (sci);
376 }
377 
378 static void
379 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
380 {
381 	for (int i = 0; i < TXG_SIZE; i++)
382 		list_destroy(&sci->sci_new_mapping_entries[i]);
383 
384 	if (sci->sci_new_mapping != NULL)
385 		vdev_indirect_mapping_close(sci->sci_new_mapping);
386 
387 	kmem_free(sci, sizeof (*sci));
388 }
389 
390 boolean_t
391 vdev_indirect_should_condense(vdev_t *vd)
392 {
393 	vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
394 	spa_t *spa = vd->vdev_spa;
395 
396 	ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
397 
398 	if (!zfs_condense_indirect_vdevs_enable)
399 		return (B_FALSE);
400 
401 	/*
402 	 * We can only condense one indirect vdev at a time.
403 	 */
404 	if (spa->spa_condensing_indirect != NULL)
405 		return (B_FALSE);
406 
407 	if (spa_shutting_down(spa))
408 		return (B_FALSE);
409 
410 	/*
411 	 * The mapping object size must not change while we are
412 	 * condensing, so we can only condense indirect vdevs
413 	 * (not vdevs that are still in the middle of being removed).
414 	 */
415 	if (vd->vdev_ops != &vdev_indirect_ops)
416 		return (B_FALSE);
417 
418 	/*
419 	 * If nothing new has been marked obsolete, there is no
420 	 * point in condensing.
421 	 */
422 	if (vd->vdev_obsolete_sm == NULL) {
423 		ASSERT0(vdev_obsolete_sm_object(vd));
424 		return (B_FALSE);
425 	}
426 
427 	ASSERT(vd->vdev_obsolete_sm != NULL);
428 
429 	ASSERT3U(vdev_obsolete_sm_object(vd), ==,
430 	    space_map_object(vd->vdev_obsolete_sm));
431 
432 	uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
433 	uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
434 	uint64_t mapping_size = vdev_indirect_mapping_size(vim);
435 	uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
436 
437 	ASSERT3U(bytes_obsolete, <=, bytes_mapped);
438 
439 	/*
440 	 * If a high percentage of the bytes that are mapped have become
441 	 * obsolete, condense (unless the mapping is already small enough).
442 	 * This has a good chance of reducing the amount of memory used
443 	 * by the mapping.
444 	 */
445 	if (bytes_obsolete * 100 / bytes_mapped >=
446 	    zfs_indirect_condense_obsolete_pct &&
447 	    mapping_size > zfs_condense_min_mapping_bytes) {
448 		zfs_dbgmsg("should condense vdev %llu because obsolete "
449 		    "spacemap covers %d%% of %lluMB mapping",
450 		    (u_longlong_t)vd->vdev_id,
451 		    (int)(bytes_obsolete * 100 / bytes_mapped),
452 		    (u_longlong_t)bytes_mapped / 1024 / 1024);
453 		return (B_TRUE);
454 	}
455 
456 	/*
457 	 * If the obsolete space map takes up too much space on disk,
458 	 * condense in order to free up this disk space.
459 	 */
460 	if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
461 		zfs_dbgmsg("should condense vdev %llu because obsolete sm "
462 		    "length %lluMB >= max size %lluMB",
463 		    (u_longlong_t)vd->vdev_id,
464 		    (u_longlong_t)obsolete_sm_size / 1024 / 1024,
465 		    (u_longlong_t)zfs_condense_max_obsolete_bytes /
466 		    1024 / 1024);
467 		return (B_TRUE);
468 	}
469 
470 	return (B_FALSE);
471 }
472 
473 /*
474  * This sync task completes (finishes) a condense, deleting the old
475  * mapping and replacing it with the new one.
476  */
477 static void
478 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
479 {
480 	spa_condensing_indirect_t *sci = arg;
481 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
482 	spa_condensing_indirect_phys_t *scip =
483 	    &spa->spa_condensing_indirect_phys;
484 	vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
485 	vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
486 	objset_t *mos = spa->spa_meta_objset;
487 	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
488 	uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
489 	uint64_t new_count =
490 	    vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
491 
492 	ASSERT(dmu_tx_is_syncing(tx));
493 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
494 	ASSERT3P(sci, ==, spa->spa_condensing_indirect);
495 	for (int i = 0; i < TXG_SIZE; i++) {
496 		ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
497 	}
498 	ASSERT(vic->vic_mapping_object != 0);
499 	ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
500 	ASSERT(scip->scip_next_mapping_object != 0);
501 	ASSERT(scip->scip_prev_obsolete_sm_object != 0);
502 
503 	/*
504 	 * Reset vdev_indirect_mapping to refer to the new object.
505 	 */
506 	rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
507 	vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
508 	vd->vdev_indirect_mapping = sci->sci_new_mapping;
509 	rw_exit(&vd->vdev_indirect_rwlock);
510 
511 	sci->sci_new_mapping = NULL;
512 	vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
513 	vic->vic_mapping_object = scip->scip_next_mapping_object;
514 	scip->scip_next_mapping_object = 0;
515 
516 	space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
517 	spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
518 	scip->scip_prev_obsolete_sm_object = 0;
519 
520 	scip->scip_vdev = 0;
521 
522 	VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
523 	    DMU_POOL_CONDENSING_INDIRECT, tx));
524 	spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
525 	spa->spa_condensing_indirect = NULL;
526 
527 	zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
528 	    "new mapping object %llu has %llu entries "
529 	    "(was %llu entries)",
530 	    vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
531 	    new_count, old_count);
532 
533 	vdev_config_dirty(spa->spa_root_vdev);
534 }
535 
536 /*
537  * This sync task appends entries to the new mapping object.
538  */
539 static void
540 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
541 {
542 	spa_condensing_indirect_t *sci = arg;
543 	uint64_t txg = dmu_tx_get_txg(tx);
544 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
545 
546 	ASSERT(dmu_tx_is_syncing(tx));
547 	ASSERT3P(sci, ==, spa->spa_condensing_indirect);
548 
549 	vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
550 	    &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
551 	ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
552 }
553 
554 /*
555  * Open-context function to add one entry to the new mapping.  The new
556  * entry will be remembered and written from syncing context.
557  */
558 static void
559 spa_condense_indirect_commit_entry(spa_t *spa,
560     vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
561 {
562 	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
563 
564 	ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
565 
566 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
567 	dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
568 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
569 	int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
570 
571 	/*
572 	 * If we are the first entry committed this txg, kick off the sync
573 	 * task to write to the MOS on our behalf.
574 	 */
575 	if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
576 		dsl_sync_task_nowait(dmu_tx_pool(tx),
577 		    spa_condense_indirect_commit_sync, sci,
578 		    0, ZFS_SPACE_CHECK_NONE, tx);
579 	}
580 
581 	vdev_indirect_mapping_entry_t *vime =
582 	    kmem_alloc(sizeof (*vime), KM_SLEEP);
583 	vime->vime_mapping = *vimep;
584 	vime->vime_obsolete_count = count;
585 	list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
586 
587 	dmu_tx_commit(tx);
588 }
589 
590 static void
591 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
592     uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
593 {
594 	spa_t *spa = vd->vdev_spa;
595 	uint64_t mapi = start_index;
596 	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
597 	uint64_t old_num_entries =
598 	    vdev_indirect_mapping_num_entries(old_mapping);
599 
600 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
601 	ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
602 
603 	zfs_dbgmsg("starting condense of vdev %llu from index %llu",
604 	    (u_longlong_t)vd->vdev_id,
605 	    (u_longlong_t)mapi);
606 
607 	while (mapi < old_num_entries) {
608 
609 		if (zthr_iscancelled(zthr)) {
610 			zfs_dbgmsg("pausing condense of vdev %llu "
611 			    "at index %llu", (u_longlong_t)vd->vdev_id,
612 			    (u_longlong_t)mapi);
613 			break;
614 		}
615 
616 		vdev_indirect_mapping_entry_phys_t *entry =
617 		    &old_mapping->vim_entries[mapi];
618 		uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
619 		ASSERT3U(obsolete_counts[mapi], <=, entry_size);
620 		if (obsolete_counts[mapi] < entry_size) {
621 			spa_condense_indirect_commit_entry(spa, entry,
622 			    obsolete_counts[mapi]);
623 
624 			/*
625 			 * This delay may be requested for testing, debugging,
626 			 * or performance reasons.
627 			 */
628 			delay(zfs_condense_indirect_commit_entry_delay_ticks);
629 		}
630 
631 		mapi++;
632 	}
633 }
634 
635 /* ARGSUSED */
636 static boolean_t
637 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
638 {
639 	spa_t *spa = arg;
640 
641 	return (spa->spa_condensing_indirect != NULL);
642 }
643 
644 /* ARGSUSED */
645 static void
646 spa_condense_indirect_thread(void *arg, zthr_t *zthr)
647 {
648 	spa_t *spa = arg;
649 	vdev_t *vd;
650 
651 	ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
652 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
653 	vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
654 	ASSERT3P(vd, !=, NULL);
655 	spa_config_exit(spa, SCL_VDEV, FTAG);
656 
657 	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
658 	spa_condensing_indirect_phys_t *scip =
659 	    &spa->spa_condensing_indirect_phys;
660 	uint32_t *counts;
661 	uint64_t start_index;
662 	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
663 	space_map_t *prev_obsolete_sm = NULL;
664 
665 	ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
666 	ASSERT(scip->scip_next_mapping_object != 0);
667 	ASSERT(scip->scip_prev_obsolete_sm_object != 0);
668 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
669 
670 	for (int i = 0; i < TXG_SIZE; i++) {
671 		/*
672 		 * The list must start out empty in order for the
673 		 * _commit_sync() sync task to be properly registered
674 		 * on the first call to _commit_entry(); so it's wise
675 		 * to double check and ensure we actually are starting
676 		 * with empty lists.
677 		 */
678 		ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
679 	}
680 
681 	VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
682 	    scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
683 	counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
684 	if (prev_obsolete_sm != NULL) {
685 		vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
686 		    counts, prev_obsolete_sm);
687 	}
688 	space_map_close(prev_obsolete_sm);
689 
690 	/*
691 	 * Generate new mapping.  Determine what index to continue from
692 	 * based on the max offset that we've already written in the
693 	 * new mapping.
694 	 */
695 	uint64_t max_offset =
696 	    vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
697 	if (max_offset == 0) {
698 		/* We haven't written anything to the new mapping yet. */
699 		start_index = 0;
700 	} else {
701 		/*
702 		 * Pick up from where we left off. _entry_for_offset()
703 		 * returns a pointer into the vim_entries array. If
704 		 * max_offset is greater than any of the mappings
705 		 * contained in the table  NULL will be returned and
706 		 * that indicates we've exhausted our iteration of the
707 		 * old_mapping.
708 		 */
709 
710 		vdev_indirect_mapping_entry_phys_t *entry =
711 		    vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
712 		    max_offset);
713 
714 		if (entry == NULL) {
715 			/*
716 			 * We've already written the whole new mapping.
717 			 * This special value will cause us to skip the
718 			 * generate_new_mapping step and just do the sync
719 			 * task to complete the condense.
720 			 */
721 			start_index = UINT64_MAX;
722 		} else {
723 			start_index = entry - old_mapping->vim_entries;
724 			ASSERT3U(start_index, <,
725 			    vdev_indirect_mapping_num_entries(old_mapping));
726 		}
727 	}
728 
729 	spa_condense_indirect_generate_new_mapping(vd, counts,
730 	    start_index, zthr);
731 
732 	vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
733 
734 	/*
735 	 * If the zthr has received a cancellation signal while running
736 	 * in generate_new_mapping() or at any point after that, then bail
737 	 * early. We don't want to complete the condense if the spa is
738 	 * shutting down.
739 	 */
740 	if (zthr_iscancelled(zthr))
741 		return;
742 
743 	VERIFY0(dsl_sync_task(spa_name(spa), NULL,
744 	    spa_condense_indirect_complete_sync, sci, 0,
745 	    ZFS_SPACE_CHECK_EXTRA_RESERVED));
746 }
747 
748 /*
749  * Sync task to begin the condensing process.
750  */
751 void
752 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
753 {
754 	spa_t *spa = vd->vdev_spa;
755 	spa_condensing_indirect_phys_t *scip =
756 	    &spa->spa_condensing_indirect_phys;
757 
758 	ASSERT0(scip->scip_next_mapping_object);
759 	ASSERT0(scip->scip_prev_obsolete_sm_object);
760 	ASSERT0(scip->scip_vdev);
761 	ASSERT(dmu_tx_is_syncing(tx));
762 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
763 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
764 	ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
765 
766 	uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
767 	ASSERT(obsolete_sm_obj != 0);
768 
769 	scip->scip_vdev = vd->vdev_id;
770 	scip->scip_next_mapping_object =
771 	    vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
772 
773 	scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
774 
775 	/*
776 	 * We don't need to allocate a new space map object, since
777 	 * vdev_indirect_sync_obsolete will allocate one when needed.
778 	 */
779 	space_map_close(vd->vdev_obsolete_sm);
780 	vd->vdev_obsolete_sm = NULL;
781 	VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
782 	    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
783 
784 	VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
785 	    DMU_POOL_DIRECTORY_OBJECT,
786 	    DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
787 	    sizeof (*scip) / sizeof (uint64_t), scip, tx));
788 
789 	ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
790 	spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
791 
792 	zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
793 	    "posm=%llu nm=%llu",
794 	    vd->vdev_id, dmu_tx_get_txg(tx),
795 	    (u_longlong_t)scip->scip_prev_obsolete_sm_object,
796 	    (u_longlong_t)scip->scip_next_mapping_object);
797 
798 	zthr_wakeup(spa->spa_condense_zthr);
799 }
800 
801 /*
802  * Sync to the given vdev's obsolete space map any segments that are no longer
803  * referenced as of the given txg.
804  *
805  * If the obsolete space map doesn't exist yet, create and open it.
806  */
807 void
808 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
809 {
810 	spa_t *spa = vd->vdev_spa;
811 	vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
812 
813 	ASSERT3U(vic->vic_mapping_object, !=, 0);
814 	ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
815 	ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
816 	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
817 
818 	if (vdev_obsolete_sm_object(vd) == 0) {
819 		uint64_t obsolete_sm_object =
820 		    space_map_alloc(spa->spa_meta_objset,
821 		    zfs_vdev_standard_sm_blksz, tx);
822 
823 		ASSERT(vd->vdev_top_zap != 0);
824 		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
825 		    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
826 		    sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
827 		ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
828 
829 		spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
830 		VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
831 		    spa->spa_meta_objset, obsolete_sm_object,
832 		    0, vd->vdev_asize, 0));
833 	}
834 
835 	ASSERT(vd->vdev_obsolete_sm != NULL);
836 	ASSERT3U(vdev_obsolete_sm_object(vd), ==,
837 	    space_map_object(vd->vdev_obsolete_sm));
838 
839 	space_map_write(vd->vdev_obsolete_sm,
840 	    vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
841 	range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
842 }
843 
844 int
845 spa_condense_init(spa_t *spa)
846 {
847 	int error = zap_lookup(spa->spa_meta_objset,
848 	    DMU_POOL_DIRECTORY_OBJECT,
849 	    DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
850 	    sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
851 	    &spa->spa_condensing_indirect_phys);
852 	if (error == 0) {
853 		if (spa_writeable(spa)) {
854 			spa->spa_condensing_indirect =
855 			    spa_condensing_indirect_create(spa);
856 		}
857 		return (0);
858 	} else if (error == ENOENT) {
859 		return (0);
860 	} else {
861 		return (error);
862 	}
863 }
864 
865 void
866 spa_condense_fini(spa_t *spa)
867 {
868 	if (spa->spa_condensing_indirect != NULL) {
869 		spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
870 		spa->spa_condensing_indirect = NULL;
871 	}
872 }
873 
874 void
875 spa_start_indirect_condensing_thread(spa_t *spa)
876 {
877 	ASSERT3P(spa->spa_condense_zthr, ==, NULL);
878 	spa->spa_condense_zthr = zthr_create(spa_condense_indirect_thread_check,
879 	    spa_condense_indirect_thread, spa);
880 }
881 
882 /*
883  * Gets the obsolete spacemap object from the vdev's ZAP.
884  * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
885  * exist yet.
886  */
887 int
888 vdev_obsolete_sm_object(vdev_t *vd)
889 {
890 	ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
891 	if (vd->vdev_top_zap == 0) {
892 		return (0);
893 	}
894 
895 	uint64_t sm_obj = 0;
896 	int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
897 	    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj);
898 
899 	ASSERT(err == 0 || err == ENOENT);
900 
901 	return (sm_obj);
902 }
903 
904 boolean_t
905 vdev_obsolete_counts_are_precise(vdev_t *vd)
906 {
907 	ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
908 	if (vd->vdev_top_zap == 0) {
909 		return (B_FALSE);
910 	}
911 
912 	uint64_t val = 0;
913 	int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
914 	    VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
915 
916 	ASSERT(err == 0 || err == ENOENT);
917 
918 	return (val != 0);
919 }
920 
921 /* ARGSUSED */
922 static void
923 vdev_indirect_close(vdev_t *vd)
924 {
925 }
926 
927 /* ARGSUSED */
928 static int
929 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
930     uint64_t *ashift)
931 {
932 	*psize = *max_psize = vd->vdev_asize +
933 	    VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
934 	*ashift = vd->vdev_ashift;
935 	return (0);
936 }
937 
938 typedef struct remap_segment {
939 	vdev_t *rs_vd;
940 	uint64_t rs_offset;
941 	uint64_t rs_asize;
942 	uint64_t rs_split_offset;
943 	list_node_t rs_node;
944 } remap_segment_t;
945 
946 remap_segment_t *
947 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
948 {
949 	remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
950 	rs->rs_vd = vd;
951 	rs->rs_offset = offset;
952 	rs->rs_asize = asize;
953 	rs->rs_split_offset = split_offset;
954 	return (rs);
955 }
956 
957 /*
958  * Given an indirect vdev and an extent on that vdev, it duplicates the
959  * physical entries of the indirect mapping that correspond to the extent
960  * to a new array and returns a pointer to it. In addition, copied_entries
961  * is populated with the number of mapping entries that were duplicated.
962  *
963  * Note that the function assumes that the caller holds vdev_indirect_rwlock.
964  * This ensures that the mapping won't change due to condensing as we
965  * copy over its contents.
966  *
967  * Finally, since we are doing an allocation, it is up to the caller to
968  * free the array allocated in this function.
969  */
970 vdev_indirect_mapping_entry_phys_t *
971 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
972     uint64_t asize, uint64_t *copied_entries)
973 {
974 	vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
975 	vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
976 	uint64_t entries = 0;
977 
978 	ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
979 
980 	vdev_indirect_mapping_entry_phys_t *first_mapping =
981 	    vdev_indirect_mapping_entry_for_offset(vim, offset);
982 	ASSERT3P(first_mapping, !=, NULL);
983 
984 	vdev_indirect_mapping_entry_phys_t *m = first_mapping;
985 	while (asize > 0) {
986 		uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
987 
988 		ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
989 		ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
990 
991 		uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
992 		uint64_t inner_size = MIN(asize, size - inner_offset);
993 
994 		offset += inner_size;
995 		asize -= inner_size;
996 		entries++;
997 		m++;
998 	}
999 
1000 	size_t copy_length = entries * sizeof (*first_mapping);
1001 	duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
1002 	bcopy(first_mapping, duplicate_mappings, copy_length);
1003 	*copied_entries = entries;
1004 
1005 	return (duplicate_mappings);
1006 }
1007 
1008 /*
1009  * Goes through the relevant indirect mappings until it hits a concrete vdev
1010  * and issues the callback. On the way to the concrete vdev, if any other
1011  * indirect vdevs are encountered, then the callback will also be called on
1012  * each of those indirect vdevs. For example, if the segment is mapped to
1013  * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
1014  * mapped to segment B on concrete vdev 2, then the callback will be called on
1015  * both vdev 1 and vdev 2.
1016  *
1017  * While the callback passed to vdev_indirect_remap() is called on every vdev
1018  * the function encounters, certain callbacks only care about concrete vdevs.
1019  * These types of callbacks should return immediately and explicitly when they
1020  * are called on an indirect vdev.
1021  *
1022  * Because there is a possibility that a DVA section in the indirect device
1023  * has been split into multiple sections in our mapping, we keep track
1024  * of the relevant contiguous segments of the new location (remap_segment_t)
1025  * in a stack. This way we can call the callback for each of the new sections
1026  * created by a single section of the indirect device. Note though, that in
1027  * this scenario the callbacks in each split block won't occur in-order in
1028  * terms of offset, so callers should not make any assumptions about that.
1029  *
1030  * For callbacks that don't handle split blocks and immediately return when
1031  * they encounter them (as is the case for remap_blkptr_cb), the caller can
1032  * assume that its callback will be applied from the first indirect vdev
1033  * encountered to the last one and then the concrete vdev, in that order.
1034  */
1035 static void
1036 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1037     void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1038 {
1039 	list_t stack;
1040 	spa_t *spa = vd->vdev_spa;
1041 
1042 	list_create(&stack, sizeof (remap_segment_t),
1043 	    offsetof(remap_segment_t, rs_node));
1044 
1045 	for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1046 	    rs != NULL; rs = list_remove_head(&stack)) {
1047 		vdev_t *v = rs->rs_vd;
1048 		uint64_t num_entries = 0;
1049 
1050 		ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1051 		ASSERT(rs->rs_asize > 0);
1052 
1053 		/*
1054 		 * Note: As this function can be called from open context
1055 		 * (e.g. zio_read()), we need the following rwlock to
1056 		 * prevent the mapping from being changed by condensing.
1057 		 *
1058 		 * So we grab the lock and we make a copy of the entries
1059 		 * that are relevant to the extent that we are working on.
1060 		 * Once that is done, we drop the lock and iterate over
1061 		 * our copy of the mapping. Once we are done with the with
1062 		 * the remap segment and we free it, we also free our copy
1063 		 * of the indirect mapping entries that are relevant to it.
1064 		 *
1065 		 * This way we don't need to wait until the function is
1066 		 * finished with a segment, to condense it. In addition, we
1067 		 * don't need a recursive rwlock for the case that a call to
1068 		 * vdev_indirect_remap() needs to call itself (through the
1069 		 * codepath of its callback) for the same vdev in the middle
1070 		 * of its execution.
1071 		 */
1072 		rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1073 		vdev_indirect_mapping_t *vim = v->vdev_indirect_mapping;
1074 		ASSERT3P(vim, !=, NULL);
1075 
1076 		vdev_indirect_mapping_entry_phys_t *mapping =
1077 		    vdev_indirect_mapping_duplicate_adjacent_entries(v,
1078 		    rs->rs_offset, rs->rs_asize, &num_entries);
1079 		ASSERT3P(mapping, !=, NULL);
1080 		ASSERT3U(num_entries, >, 0);
1081 		rw_exit(&v->vdev_indirect_rwlock);
1082 
1083 		for (uint64_t i = 0; i < num_entries; i++) {
1084 			/*
1085 			 * Note: the vdev_indirect_mapping can not change
1086 			 * while we are running.  It only changes while the
1087 			 * removal is in progress, and then only from syncing
1088 			 * context. While a removal is in progress, this
1089 			 * function is only called for frees, which also only
1090 			 * happen from syncing context.
1091 			 */
1092 			vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1093 
1094 			ASSERT3P(m, !=, NULL);
1095 			ASSERT3U(rs->rs_asize, >, 0);
1096 
1097 			uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1098 			uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1099 			uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1100 
1101 			ASSERT3U(rs->rs_offset, >=,
1102 			    DVA_MAPPING_GET_SRC_OFFSET(m));
1103 			ASSERT3U(rs->rs_offset, <,
1104 			    DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1105 			ASSERT3U(dst_vdev, !=, v->vdev_id);
1106 
1107 			uint64_t inner_offset = rs->rs_offset -
1108 			    DVA_MAPPING_GET_SRC_OFFSET(m);
1109 			uint64_t inner_size =
1110 			    MIN(rs->rs_asize, size - inner_offset);
1111 
1112 			vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1113 			ASSERT3P(dst_v, !=, NULL);
1114 
1115 			if (dst_v->vdev_ops == &vdev_indirect_ops) {
1116 				list_insert_head(&stack,
1117 				    rs_alloc(dst_v, dst_offset + inner_offset,
1118 				    inner_size, rs->rs_split_offset));
1119 
1120 			}
1121 
1122 			if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1123 			    IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1124 				/*
1125 				 * Note: This clause exists only solely for
1126 				 * testing purposes. We use it to ensure that
1127 				 * split blocks work and that the callbacks
1128 				 * using them yield the same result if issued
1129 				 * in reverse order.
1130 				 */
1131 				uint64_t inner_half = inner_size / 2;
1132 
1133 				func(rs->rs_split_offset + inner_half, dst_v,
1134 				    dst_offset + inner_offset + inner_half,
1135 				    inner_half, arg);
1136 
1137 				func(rs->rs_split_offset, dst_v,
1138 				    dst_offset + inner_offset,
1139 				    inner_half, arg);
1140 			} else {
1141 				func(rs->rs_split_offset, dst_v,
1142 				    dst_offset + inner_offset,
1143 				    inner_size, arg);
1144 			}
1145 
1146 			rs->rs_offset += inner_size;
1147 			rs->rs_asize -= inner_size;
1148 			rs->rs_split_offset += inner_size;
1149 		}
1150 		VERIFY0(rs->rs_asize);
1151 
1152 		kmem_free(mapping, num_entries * sizeof (*mapping));
1153 		kmem_free(rs, sizeof (remap_segment_t));
1154 	}
1155 	list_destroy(&stack);
1156 }
1157 
1158 static void
1159 vdev_indirect_child_io_done(zio_t *zio)
1160 {
1161 	zio_t *pio = zio->io_private;
1162 
1163 	mutex_enter(&pio->io_lock);
1164 	pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1165 	mutex_exit(&pio->io_lock);
1166 
1167 	abd_put(zio->io_abd);
1168 }
1169 
1170 /*
1171  * This is a callback for vdev_indirect_remap() which allocates an
1172  * indirect_split_t for each split segment and adds it to iv_splits.
1173  */
1174 static void
1175 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1176     uint64_t size, void *arg)
1177 {
1178 	zio_t *zio = arg;
1179 	indirect_vsd_t *iv = zio->io_vsd;
1180 
1181 	ASSERT3P(vd, !=, NULL);
1182 
1183 	if (vd->vdev_ops == &vdev_indirect_ops)
1184 		return;
1185 
1186 	int n = 1;
1187 	if (vd->vdev_ops == &vdev_mirror_ops)
1188 		n = vd->vdev_children;
1189 
1190 	indirect_split_t *is =
1191 	    kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1192 
1193 	is->is_children = n;
1194 	is->is_size = size;
1195 	is->is_split_offset = split_offset;
1196 	is->is_target_offset = offset;
1197 	is->is_vdev = vd;
1198 	list_create(&is->is_unique_child, sizeof (indirect_child_t),
1199 	    offsetof(indirect_child_t, ic_node));
1200 
1201 	/*
1202 	 * Note that we only consider multiple copies of the data for
1203 	 * *mirror* vdevs.  We don't for "replacing" or "spare" vdevs, even
1204 	 * though they use the same ops as mirror, because there's only one
1205 	 * "good" copy under the replacing/spare.
1206 	 */
1207 	if (vd->vdev_ops == &vdev_mirror_ops) {
1208 		for (int i = 0; i < n; i++) {
1209 			is->is_child[i].ic_vdev = vd->vdev_child[i];
1210 			list_link_init(&is->is_child[i].ic_node);
1211 		}
1212 	} else {
1213 		is->is_child[0].ic_vdev = vd;
1214 	}
1215 
1216 	list_insert_tail(&iv->iv_splits, is);
1217 }
1218 
1219 static void
1220 vdev_indirect_read_split_done(zio_t *zio)
1221 {
1222 	indirect_child_t *ic = zio->io_private;
1223 
1224 	if (zio->io_error != 0) {
1225 		/*
1226 		 * Clear ic_data to indicate that we do not have data for this
1227 		 * child.
1228 		 */
1229 		abd_free(ic->ic_data);
1230 		ic->ic_data = NULL;
1231 	}
1232 }
1233 
1234 /*
1235  * Issue reads for all copies (mirror children) of all splits.
1236  */
1237 static void
1238 vdev_indirect_read_all(zio_t *zio)
1239 {
1240 	indirect_vsd_t *iv = zio->io_vsd;
1241 
1242 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1243 
1244 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1245 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1246 		for (int i = 0; i < is->is_children; i++) {
1247 			indirect_child_t *ic = &is->is_child[i];
1248 
1249 			if (!vdev_readable(ic->ic_vdev))
1250 				continue;
1251 
1252 			/*
1253 			 * Note, we may read from a child whose DTL
1254 			 * indicates that the data may not be present here.
1255 			 * While this might result in a few i/os that will
1256 			 * likely return incorrect data, it simplifies the
1257 			 * code since we can treat scrub and resilver
1258 			 * identically.  (The incorrect data will be
1259 			 * detected and ignored when we verify the
1260 			 * checksum.)
1261 			 */
1262 
1263 			ic->ic_data = abd_alloc_sametype(zio->io_abd,
1264 			    is->is_size);
1265 			ic->ic_duplicate = NULL;
1266 
1267 			zio_nowait(zio_vdev_child_io(zio, NULL,
1268 			    ic->ic_vdev, is->is_target_offset, ic->ic_data,
1269 			    is->is_size, zio->io_type, zio->io_priority, 0,
1270 			    vdev_indirect_read_split_done, ic));
1271 		}
1272 	}
1273 	iv->iv_reconstruct = B_TRUE;
1274 }
1275 
1276 static void
1277 vdev_indirect_io_start(zio_t *zio)
1278 {
1279 	spa_t *spa = zio->io_spa;
1280 	indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1281 	list_create(&iv->iv_splits,
1282 	    sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1283 
1284 	zio->io_vsd = iv;
1285 	zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1286 
1287 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1288 	if (zio->io_type != ZIO_TYPE_READ) {
1289 		ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1290 		/*
1291 		 * Note: this code can handle other kinds of writes,
1292 		 * but we don't expect them.
1293 		 */
1294 		ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1295 		    ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1296 	}
1297 
1298 	vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1299 	    vdev_indirect_gather_splits, zio);
1300 
1301 	indirect_split_t *first = list_head(&iv->iv_splits);
1302 	if (first->is_size == zio->io_size) {
1303 		/*
1304 		 * This is not a split block; we are pointing to the entire
1305 		 * data, which will checksum the same as the original data.
1306 		 * Pass the BP down so that the child i/o can verify the
1307 		 * checksum, and try a different location if available
1308 		 * (e.g. on a mirror).
1309 		 *
1310 		 * While this special case could be handled the same as the
1311 		 * general (split block) case, doing it this way ensures
1312 		 * that the vast majority of blocks on indirect vdevs
1313 		 * (which are not split) are handled identically to blocks
1314 		 * on non-indirect vdevs.  This allows us to be less strict
1315 		 * about performance in the general (but rare) case.
1316 		 */
1317 		ASSERT0(first->is_split_offset);
1318 		ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1319 		zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1320 		    first->is_vdev, first->is_target_offset,
1321 		    abd_get_offset(zio->io_abd, 0),
1322 		    zio->io_size, zio->io_type, zio->io_priority, 0,
1323 		    vdev_indirect_child_io_done, zio));
1324 	} else {
1325 		iv->iv_split_block = B_TRUE;
1326 		if (zio->io_type == ZIO_TYPE_READ &&
1327 		    zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1328 			/*
1329 			 * Read all copies.  Note that for simplicity,
1330 			 * we don't bother consulting the DTL in the
1331 			 * resilver case.
1332 			 */
1333 			vdev_indirect_read_all(zio);
1334 		} else {
1335 			/*
1336 			 * If this is a read zio, we read one copy of each
1337 			 * split segment, from the top-level vdev.  Since
1338 			 * we don't know the checksum of each split
1339 			 * individually, the child zio can't ensure that
1340 			 * we get the right data. E.g. if it's a mirror,
1341 			 * it will just read from a random (healthy) leaf
1342 			 * vdev. We have to verify the checksum in
1343 			 * vdev_indirect_io_done().
1344 			 *
1345 			 * For write zios, the vdev code will ensure we write
1346 			 * to all children.
1347 			 */
1348 			for (indirect_split_t *is = list_head(&iv->iv_splits);
1349 			    is != NULL; is = list_next(&iv->iv_splits, is)) {
1350 				zio_nowait(zio_vdev_child_io(zio, NULL,
1351 				    is->is_vdev, is->is_target_offset,
1352 				    abd_get_offset(zio->io_abd,
1353 				    is->is_split_offset),
1354 				    is->is_size, zio->io_type,
1355 				    zio->io_priority, 0,
1356 				    vdev_indirect_child_io_done, zio));
1357 			}
1358 		}
1359 	}
1360 
1361 	zio_execute(zio);
1362 }
1363 
1364 /*
1365  * Report a checksum error for a child.
1366  */
1367 static void
1368 vdev_indirect_checksum_error(zio_t *zio,
1369     indirect_split_t *is, indirect_child_t *ic)
1370 {
1371 	vdev_t *vd = ic->ic_vdev;
1372 
1373 	if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1374 		return;
1375 
1376 	mutex_enter(&vd->vdev_stat_lock);
1377 	vd->vdev_stat.vs_checksum_errors++;
1378 	mutex_exit(&vd->vdev_stat_lock);
1379 
1380 	zio_bad_cksum_t zbc = { 0 };
1381 	void *bad_buf = abd_borrow_buf_copy(ic->ic_data, is->is_size);
1382 	abd_t *good_abd = is->is_good_child->ic_data;
1383 	void *good_buf = abd_borrow_buf_copy(good_abd, is->is_size);
1384 	zfs_ereport_post_checksum(zio->io_spa, vd, &zio->io_bookmark, zio,
1385 	    is->is_target_offset, is->is_size, good_buf, bad_buf, &zbc);
1386 	abd_return_buf(ic->ic_data, bad_buf, is->is_size);
1387 	abd_return_buf(good_abd, good_buf, is->is_size);
1388 }
1389 
1390 /*
1391  * Issue repair i/os for any incorrect copies.  We do this by comparing
1392  * each split segment's correct data (is_good_child's ic_data) with each
1393  * other copy of the data.  If they differ, then we overwrite the bad data
1394  * with the good copy.  Note that we do this without regard for the DTL's,
1395  * which simplifies this code and also issues the optimal number of writes
1396  * (based on which copies actually read bad data, as opposed to which we
1397  * think might be wrong).  For the same reason, we always use
1398  * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1399  */
1400 static void
1401 vdev_indirect_repair(zio_t *zio)
1402 {
1403 	indirect_vsd_t *iv = zio->io_vsd;
1404 
1405 	enum zio_flag flags = ZIO_FLAG_IO_REPAIR;
1406 
1407 	if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))
1408 		flags |= ZIO_FLAG_SELF_HEAL;
1409 
1410 	if (!spa_writeable(zio->io_spa))
1411 		return;
1412 
1413 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1414 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1415 		for (int c = 0; c < is->is_children; c++) {
1416 			indirect_child_t *ic = &is->is_child[c];
1417 			if (ic == is->is_good_child)
1418 				continue;
1419 			if (ic->ic_data == NULL)
1420 				continue;
1421 			if (ic->ic_duplicate == is->is_good_child)
1422 				continue;
1423 
1424 			zio_nowait(zio_vdev_child_io(zio, NULL,
1425 			    ic->ic_vdev, is->is_target_offset,
1426 			    is->is_good_child->ic_data, is->is_size,
1427 			    ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1428 			    ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1429 			    NULL, NULL));
1430 
1431 			vdev_indirect_checksum_error(zio, is, ic);
1432 		}
1433 	}
1434 }
1435 
1436 /*
1437  * Report checksum errors on all children that we read from.
1438  */
1439 static void
1440 vdev_indirect_all_checksum_errors(zio_t *zio)
1441 {
1442 	indirect_vsd_t *iv = zio->io_vsd;
1443 
1444 	if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1445 		return;
1446 
1447 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1448 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1449 		for (int c = 0; c < is->is_children; c++) {
1450 			indirect_child_t *ic = &is->is_child[c];
1451 
1452 			if (ic->ic_data == NULL)
1453 				continue;
1454 
1455 			vdev_t *vd = ic->ic_vdev;
1456 
1457 			mutex_enter(&vd->vdev_stat_lock);
1458 			vd->vdev_stat.vs_checksum_errors++;
1459 			mutex_exit(&vd->vdev_stat_lock);
1460 
1461 			zfs_ereport_post_checksum(zio->io_spa, vd,
1462 			    &zio->io_bookmark, zio, is->is_target_offset,
1463 			    is->is_size, NULL, NULL, NULL);
1464 		}
1465 	}
1466 }
1467 
1468 /*
1469  * Copy data from all the splits to a main zio then validate the checksum.
1470  * If then checksum is successfully validated return success.
1471  */
1472 static int
1473 vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
1474 {
1475 	zio_bad_cksum_t zbc;
1476 
1477 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1478 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1479 
1480 		ASSERT3P(is->is_good_child->ic_data, !=, NULL);
1481 		ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
1482 
1483 		abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
1484 		    is->is_split_offset, 0, is->is_size);
1485 	}
1486 
1487 	return (zio_checksum_error(zio, &zbc));
1488 }
1489 
1490 /*
1491  * There are relatively few possible combinations making it feasible to
1492  * deterministically check them all.  We do this by setting the good_child
1493  * to the next unique split version.  If we reach the end of the list then
1494  * "carry over" to the next unique split version (like counting in base
1495  * is_unique_children, but each digit can have a different base).
1496  */
1497 static int
1498 vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
1499 {
1500 	boolean_t more = B_TRUE;
1501 
1502 	iv->iv_attempts = 0;
1503 
1504 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1505 	    is != NULL; is = list_next(&iv->iv_splits, is))
1506 		is->is_good_child = list_head(&is->is_unique_child);
1507 
1508 	while (more == B_TRUE) {
1509 		iv->iv_attempts++;
1510 		more = B_FALSE;
1511 
1512 		if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1513 			return (0);
1514 
1515 		for (indirect_split_t *is = list_head(&iv->iv_splits);
1516 		    is != NULL; is = list_next(&iv->iv_splits, is)) {
1517 			is->is_good_child = list_next(&is->is_unique_child,
1518 			    is->is_good_child);
1519 			if (is->is_good_child != NULL) {
1520 				more = B_TRUE;
1521 				break;
1522 			}
1523 
1524 			is->is_good_child = list_head(&is->is_unique_child);
1525 		}
1526 	}
1527 
1528 	ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
1529 
1530 	return (SET_ERROR(ECKSUM));
1531 }
1532 
1533 /*
1534  * There are too many combinations to try all of them in a reasonable amount
1535  * of time.  So try a fixed number of random combinations from the unique
1536  * split versions, after which we'll consider the block unrecoverable.
1537  */
1538 static int
1539 vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
1540 {
1541 	iv->iv_attempts = 0;
1542 
1543 	while (iv->iv_attempts < iv->iv_attempts_max) {
1544 		iv->iv_attempts++;
1545 
1546 		for (indirect_split_t *is = list_head(&iv->iv_splits);
1547 		    is != NULL; is = list_next(&iv->iv_splits, is)) {
1548 			indirect_child_t *ic = list_head(&is->is_unique_child);
1549 			int children = is->is_unique_children;
1550 
1551 			for (int i = spa_get_random(children); i > 0; i--)
1552 				ic = list_next(&is->is_unique_child, ic);
1553 
1554 			ASSERT3P(ic, !=, NULL);
1555 			is->is_good_child = ic;
1556 		}
1557 
1558 		if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1559 			return (0);
1560 	}
1561 
1562 	return (SET_ERROR(ECKSUM));
1563 }
1564 
1565 /*
1566  * This is a validation function for reconstruction.  It randomly selects
1567  * a good combination, if one can be found, and then it intentionally
1568  * damages all other segment copes by zeroing them.  This forces the
1569  * reconstruction algorithm to locate the one remaining known good copy.
1570  */
1571 static int
1572 vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
1573 {
1574 	/* Presume all the copies are unique for initial selection. */
1575 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1576 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1577 		is->is_unique_children = 0;
1578 
1579 		for (int i = 0; i < is->is_children; i++) {
1580 			indirect_child_t *ic = &is->is_child[i];
1581 			if (ic->ic_data != NULL) {
1582 				is->is_unique_children++;
1583 				list_insert_tail(&is->is_unique_child, ic);
1584 			}
1585 		}
1586 	}
1587 
1588 	/*
1589 	 * Set each is_good_child to a randomly-selected child which
1590 	 * is known to contain validated data.
1591 	 */
1592 	int error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1593 	if (error)
1594 		goto out;
1595 
1596 	/*
1597 	 * Damage all but the known good copy by zeroing it.  This will
1598 	 * result in two or less unique copies per indirect_child_t.
1599 	 * Both may need to be checked in order to reconstruct the block.
1600 	 * Set iv->iv_attempts_max such that all unique combinations will
1601 	 * enumerated, but limit the damage to at most 16 indirect splits.
1602 	 */
1603 	iv->iv_attempts_max = 1;
1604 
1605 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1606 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1607 		for (int c = 0; c < is->is_children; c++) {
1608 			indirect_child_t *ic = &is->is_child[c];
1609 
1610 			if (ic == is->is_good_child)
1611 				continue;
1612 			if (ic->ic_data == NULL)
1613 				continue;
1614 
1615 			abd_zero(ic->ic_data, ic->ic_data->abd_size);
1616 		}
1617 
1618 		iv->iv_attempts_max *= 2;
1619 		if (iv->iv_attempts_max > (1ULL << 16)) {
1620 			iv->iv_attempts_max = UINT64_MAX;
1621 			break;
1622 		}
1623 	}
1624 
1625 out:
1626 	/* Empty the unique children lists so they can be reconstructed. */
1627 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1628 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1629 		indirect_child_t *ic;
1630 		while ((ic = list_head(&is->is_unique_child)) != NULL)
1631 			list_remove(&is->is_unique_child, ic);
1632 
1633 		is->is_unique_children = 0;
1634 	}
1635 
1636 	return (error);
1637 }
1638 
1639 /*
1640  * This function is called when we have read all copies of the data and need
1641  * to try to find a combination of copies that gives us the right checksum.
1642  *
1643  * If we pointed to any mirror vdevs, this effectively does the job of the
1644  * mirror.  The mirror vdev code can't do its own job because we don't know
1645  * the checksum of each split segment individually.
1646  *
1647  * We have to try every unique combination of copies of split segments, until
1648  * we find one that checksums correctly.  Duplicate segment copies are first
1649  * identified and latter skipped during reconstruction.  This optimization
1650  * reduces the search space and ensures that of the remaining combinations
1651  * at most one is correct.
1652  *
1653  * When the total number of combinations is small they can all be checked.
1654  * For example, if we have 3 segments in the split, and each points to a
1655  * 2-way mirror with unique copies, we will have the following pieces of data:
1656  *
1657  *       |     mirror child
1658  * split |     [0]        [1]
1659  * ======|=====================
1660  *   A   |  data_A_0   data_A_1
1661  *   B   |  data_B_0   data_B_1
1662  *   C   |  data_C_0   data_C_1
1663  *
1664  * We will try the following (mirror children)^(number of splits) (2^3=8)
1665  * combinations, which is similar to bitwise-little-endian counting in
1666  * binary.  In general each "digit" corresponds to a split segment, and the
1667  * base of each digit is is_children, which can be different for each
1668  * digit.
1669  *
1670  * "low bit"        "high bit"
1671  *        v                 v
1672  * data_A_0 data_B_0 data_C_0
1673  * data_A_1 data_B_0 data_C_0
1674  * data_A_0 data_B_1 data_C_0
1675  * data_A_1 data_B_1 data_C_0
1676  * data_A_0 data_B_0 data_C_1
1677  * data_A_1 data_B_0 data_C_1
1678  * data_A_0 data_B_1 data_C_1
1679  * data_A_1 data_B_1 data_C_1
1680  *
1681  * Note that the split segments may be on the same or different top-level
1682  * vdevs. In either case, we may need to try lots of combinations (see
1683  * zfs_reconstruct_indirect_combinations_max).  This ensures that if a mirror
1684  * has small silent errors on all of its children, we can still reconstruct
1685  * the correct data, as long as those errors are at sufficiently-separated
1686  * offsets (specifically, separated by the largest block size - default of
1687  * 128KB, but up to 16MB).
1688  */
1689 static void
1690 vdev_indirect_reconstruct_io_done(zio_t *zio)
1691 {
1692 	indirect_vsd_t *iv = zio->io_vsd;
1693 	boolean_t known_good = B_FALSE;
1694 	int error;
1695 
1696 	iv->iv_unique_combinations = 1;
1697 	iv->iv_attempts_max = UINT64_MAX;
1698 
1699 	if (zfs_reconstruct_indirect_combinations_max > 0)
1700 		iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
1701 
1702 	/*
1703 	 * If nonzero, every 1/x blocks will be damaged, in order to validate
1704 	 * reconstruction when there are split segments with damaged copies.
1705 	 * Known_good will TRUE when reconstruction is known to be possible.
1706 	 */
1707 	if (zfs_reconstruct_indirect_damage_fraction != 0 &&
1708 	    spa_get_random(zfs_reconstruct_indirect_damage_fraction) == 0)
1709 		known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
1710 
1711 	/*
1712 	 * Determine the unique children for a split segment and add them
1713 	 * to the is_unique_child list.  By restricting reconstruction
1714 	 * to these children, only unique combinations will be considered.
1715 	 * This can vastly reduce the search space when there are a large
1716 	 * number of indirect splits.
1717 	 */
1718 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1719 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1720 		is->is_unique_children = 0;
1721 
1722 		for (int i = 0; i < is->is_children; i++) {
1723 			indirect_child_t *ic_i = &is->is_child[i];
1724 
1725 			if (ic_i->ic_data == NULL ||
1726 			    ic_i->ic_duplicate != NULL)
1727 				continue;
1728 
1729 			for (int j = i + 1; j < is->is_children; j++) {
1730 				indirect_child_t *ic_j = &is->is_child[j];
1731 
1732 				if (ic_j->ic_data == NULL ||
1733 				    ic_j->ic_duplicate != NULL)
1734 					continue;
1735 
1736 				if (abd_cmp(ic_i->ic_data, ic_j->ic_data,
1737 				    is->is_size) == 0) {
1738 					ic_j->ic_duplicate = ic_i;
1739 				}
1740 			}
1741 
1742 			is->is_unique_children++;
1743 			list_insert_tail(&is->is_unique_child, ic_i);
1744 		}
1745 
1746 		/* Reconstruction is impossible, no valid children */
1747 		EQUIV(list_is_empty(&is->is_unique_child),
1748 		    is->is_unique_children == 0);
1749 		if (list_is_empty(&is->is_unique_child)) {
1750 			zio->io_error = EIO;
1751 			vdev_indirect_all_checksum_errors(zio);
1752 			zio_checksum_verified(zio);
1753 			return;
1754 		}
1755 
1756 		iv->iv_unique_combinations *= is->is_unique_children;
1757 	}
1758 
1759 	if (iv->iv_unique_combinations <= iv->iv_attempts_max)
1760 		error = vdev_indirect_splits_enumerate_all(iv, zio);
1761 	else
1762 		error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1763 
1764 	if (error != 0) {
1765 		/* All attempted combinations failed. */
1766 		ASSERT3B(known_good, ==, B_FALSE);
1767 		zio->io_error = error;
1768 		vdev_indirect_all_checksum_errors(zio);
1769 	} else {
1770 		/*
1771 		 * The checksum has been successfully validated.  Issue
1772 		 * repair I/Os to any copies of splits which don't match
1773 		 * the validated version.
1774 		 */
1775 		ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
1776 		vdev_indirect_repair(zio);
1777 		zio_checksum_verified(zio);
1778 	}
1779 }
1780 
1781 static void
1782 vdev_indirect_io_done(zio_t *zio)
1783 {
1784 	indirect_vsd_t *iv = zio->io_vsd;
1785 
1786 	if (iv->iv_reconstruct) {
1787 		/*
1788 		 * We have read all copies of the data (e.g. from mirrors),
1789 		 * either because this was a scrub/resilver, or because the
1790 		 * one-copy read didn't checksum correctly.
1791 		 */
1792 		vdev_indirect_reconstruct_io_done(zio);
1793 		return;
1794 	}
1795 
1796 	if (!iv->iv_split_block) {
1797 		/*
1798 		 * This was not a split block, so we passed the BP down,
1799 		 * and the checksum was handled by the (one) child zio.
1800 		 */
1801 		return;
1802 	}
1803 
1804 	zio_bad_cksum_t zbc;
1805 	int ret = zio_checksum_error(zio, &zbc);
1806 	if (ret == 0) {
1807 		zio_checksum_verified(zio);
1808 		return;
1809 	}
1810 
1811 	/*
1812 	 * The checksum didn't match.  Read all copies of all splits, and
1813 	 * then we will try to reconstruct.  The next time
1814 	 * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1815 	 */
1816 	vdev_indirect_read_all(zio);
1817 
1818 	zio_vdev_io_redone(zio);
1819 }
1820 
1821 vdev_ops_t vdev_indirect_ops = {
1822 	.vdev_op_open = vdev_indirect_open,
1823 	.vdev_op_close = vdev_indirect_close,
1824 	.vdev_op_asize = vdev_default_asize,
1825 	.vdev_op_io_start = vdev_indirect_io_start,
1826 	.vdev_op_io_done = vdev_indirect_io_done,
1827 	.vdev_op_state_change = NULL,
1828 	.vdev_op_need_resilver = NULL,
1829 	.vdev_op_hold = NULL,
1830 	.vdev_op_rele = NULL,
1831 	.vdev_op_remap = vdev_indirect_remap,
1832 	.vdev_op_xlate = NULL,
1833 	.vdev_op_type = VDEV_TYPE_INDIRECT,	/* name of this vdev type */
1834 	.vdev_op_leaf = B_FALSE			/* leaf vdev */
1835 };
1836