xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_indirect.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * This file and its contents are supplied under the terms of the
6  * Common Development and Distribution License ("CDDL"), version 1.0.
7  * You may only use this file in accordance with the terms of version
8  * 1.0 of the CDDL.
9  *
10  * A full copy of the text of the CDDL should have accompanied this
11  * source.  A copy of the CDDL is also available via the Internet at
12  * http://www.illumos.org/license/CDDL.
13  *
14  * CDDL HEADER END
15  */
16 
17 /*
18  * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
19  * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
20  * Copyright (c) 2014, 2020 by Delphix. All rights reserved.
21  */
22 
23 #include <sys/zfs_context.h>
24 #include <sys/spa.h>
25 #include <sys/spa_impl.h>
26 #include <sys/vdev_impl.h>
27 #include <sys/fs/zfs.h>
28 #include <sys/zio.h>
29 #include <sys/zio_checksum.h>
30 #include <sys/metaslab.h>
31 #include <sys/dmu.h>
32 #include <sys/vdev_indirect_mapping.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/zap.h>
36 #include <sys/abd.h>
37 #include <sys/zthr.h>
38 #include <sys/fm/fs/zfs.h>
39 
40 /*
41  * An indirect vdev corresponds to a vdev that has been removed.  Since
42  * we cannot rewrite block pointers of snapshots, etc., we keep a
43  * mapping from old location on the removed device to the new location
44  * on another device in the pool and use this mapping whenever we need
45  * to access the DVA.  Unfortunately, this mapping did not respect
46  * logical block boundaries when it was first created, and so a DVA on
47  * this indirect vdev may be "split" into multiple sections that each
48  * map to a different location.  As a consequence, not all DVAs can be
49  * translated to an equivalent new DVA.  Instead we must provide a
50  * "vdev_remap" operation that executes a callback on each contiguous
51  * segment of the new location.  This function is used in multiple ways:
52  *
53  *  - I/Os to this vdev use the callback to determine where the
54  *    data is now located, and issue child I/Os for each segment's new
55  *    location.
56  *
57  *  - frees and claims to this vdev use the callback to free or claim
58  *    each mapped segment.  (Note that we don't actually need to claim
59  *    log blocks on indirect vdevs, because we don't allocate to
60  *    removing vdevs.  However, zdb uses zio_claim() for its leak
61  *    detection.)
62  */
63 
64 /*
65  * "Big theory statement" for how we mark blocks obsolete.
66  *
67  * When a block on an indirect vdev is freed or remapped, a section of
68  * that vdev's mapping may no longer be referenced (aka "obsolete").  We
69  * keep track of how much of each mapping entry is obsolete.  When
70  * an entry becomes completely obsolete, we can remove it, thus reducing
71  * the memory used by the mapping.  The complete picture of obsolescence
72  * is given by the following data structures, described below:
73  *  - the entry-specific obsolete count
74  *  - the vdev-specific obsolete spacemap
75  *  - the pool-specific obsolete bpobj
76  *
77  * == On disk data structures used ==
78  *
79  * We track the obsolete space for the pool using several objects.  Each
80  * of these objects is created on demand and freed when no longer
81  * needed, and is assumed to be empty if it does not exist.
82  * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
83  *
84  *  - Each vic_mapping_object (associated with an indirect vdev) can
85  *    have a vimp_counts_object.  This is an array of uint32_t's
86  *    with the same number of entries as the vic_mapping_object.  When
87  *    the mapping is condensed, entries from the vic_obsolete_sm_object
88  *    (see below) are folded into the counts.  Therefore, each
89  *    obsolete_counts entry tells us the number of bytes in the
90  *    corresponding mapping entry that were not referenced when the
91  *    mapping was last condensed.
92  *
93  *  - Each indirect or removing vdev can have a vic_obsolete_sm_object.
94  *    This is a space map containing an alloc entry for every DVA that
95  *    has been obsoleted since the last time this indirect vdev was
96  *    condensed.  We use this object in order to improve performance
97  *    when marking a DVA as obsolete.  Instead of modifying an arbitrary
98  *    offset of the vimp_counts_object, we only need to append an entry
99  *    to the end of this object.  When a DVA becomes obsolete, it is
100  *    added to the obsolete space map.  This happens when the DVA is
101  *    freed, remapped and not referenced by a snapshot, or the last
102  *    snapshot referencing it is destroyed.
103  *
104  *  - Each dataset can have a ds_remap_deadlist object.  This is a
105  *    deadlist object containing all blocks that were remapped in this
106  *    dataset but referenced in a previous snapshot.  Blocks can *only*
107  *    appear on this list if they were remapped (dsl_dataset_block_remapped);
108  *    blocks that were killed in a head dataset are put on the normal
109  *    ds_deadlist and marked obsolete when they are freed.
110  *
111  *  - The pool can have a dp_obsolete_bpobj.  This is a list of blocks
112  *    in the pool that need to be marked obsolete.  When a snapshot is
113  *    destroyed, we move some of the ds_remap_deadlist to the obsolete
114  *    bpobj (see dsl_destroy_snapshot_handle_remaps()).  We then
115  *    asynchronously process the obsolete bpobj, moving its entries to
116  *    the specific vdevs' obsolete space maps.
117  *
118  * == Summary of how we mark blocks as obsolete ==
119  *
120  * - When freeing a block: if any DVA is on an indirect vdev, append to
121  *   vic_obsolete_sm_object.
122  * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
123  *   references; otherwise append to vic_obsolete_sm_object).
124  * - When freeing a snapshot: move parts of ds_remap_deadlist to
125  *   dp_obsolete_bpobj (same algorithm as ds_deadlist).
126  * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
127  *   individual vdev's vic_obsolete_sm_object.
128  */
129 
130 /*
131  * "Big theory statement" for how we condense indirect vdevs.
132  *
133  * Condensing an indirect vdev's mapping is the process of determining
134  * the precise counts of obsolete space for each mapping entry (by
135  * integrating the obsolete spacemap into the obsolete counts) and
136  * writing out a new mapping that contains only referenced entries.
137  *
138  * We condense a vdev when we expect the mapping to shrink (see
139  * vdev_indirect_should_condense()), but only perform one condense at a
140  * time to limit the memory usage.  In addition, we use a separate
141  * open-context thread (spa_condense_indirect_thread) to incrementally
142  * create the new mapping object in a way that minimizes the impact on
143  * the rest of the system.
144  *
145  * == Generating a new mapping ==
146  *
147  * To generate a new mapping, we follow these steps:
148  *
149  * 1. Save the old obsolete space map and create a new mapping object
150  *    (see spa_condense_indirect_start_sync()).  This initializes the
151  *    spa_condensing_indirect_phys with the "previous obsolete space map",
152  *    which is now read only.  Newly obsolete DVAs will be added to a
153  *    new (initially empty) obsolete space map, and will not be
154  *    considered as part of this condense operation.
155  *
156  * 2. Construct in memory the precise counts of obsolete space for each
157  *    mapping entry, by incorporating the obsolete space map into the
158  *    counts.  (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
159  *
160  * 3. Iterate through each mapping entry, writing to the new mapping any
161  *    entries that are not completely obsolete (i.e. which don't have
162  *    obsolete count == mapping length).  (See
163  *    spa_condense_indirect_generate_new_mapping().)
164  *
165  * 4. Destroy the old mapping object and switch over to the new one
166  *    (spa_condense_indirect_complete_sync).
167  *
168  * == Restarting from failure ==
169  *
170  * To restart the condense when we import/open the pool, we must start
171  * at the 2nd step above: reconstruct the precise counts in memory,
172  * based on the space map + counts.  Then in the 3rd step, we start
173  * iterating where we left off: at vimp_max_offset of the new mapping
174  * object.
175  */
176 
177 static int zfs_condense_indirect_vdevs_enable = B_TRUE;
178 
179 /*
180  * Condense if at least this percent of the bytes in the mapping is
181  * obsolete.  With the default of 25%, the amount of space mapped
182  * will be reduced to 1% of its original size after at most 16
183  * condenses.  Higher values will condense less often (causing less
184  * i/o); lower values will reduce the mapping size more quickly.
185  */
186 static uint_t zfs_condense_indirect_obsolete_pct = 25;
187 
188 /*
189  * Condense if the obsolete space map takes up more than this amount of
190  * space on disk (logically).  This limits the amount of disk space
191  * consumed by the obsolete space map; the default of 1GB is small enough
192  * that we typically don't mind "wasting" it.
193  */
194 static uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
195 
196 /*
197  * Don't bother condensing if the mapping uses less than this amount of
198  * memory.  The default of 128KB is considered a "trivial" amount of
199  * memory and not worth reducing.
200  */
201 static uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
202 
203 /*
204  * This is used by the test suite so that it can ensure that certain
205  * actions happen while in the middle of a condense (which might otherwise
206  * complete too quickly).  If used to reduce the performance impact of
207  * condensing in production, a maximum value of 1 should be sufficient.
208  */
209 static uint_t zfs_condense_indirect_commit_entry_delay_ms = 0;
210 
211 /*
212  * If an indirect split block contains more than this many possible unique
213  * combinations when being reconstructed, consider it too computationally
214  * expensive to check them all. Instead, try at most 100 randomly-selected
215  * combinations each time the block is accessed.  This allows all segment
216  * copies to participate fairly in the reconstruction when all combinations
217  * cannot be checked and prevents repeated use of one bad copy.
218  */
219 uint_t zfs_reconstruct_indirect_combinations_max = 4096;
220 
221 /*
222  * Enable to simulate damaged segments and validate reconstruction.  This
223  * is intentionally not exposed as a module parameter.
224  */
225 unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
226 
227 /*
228  * The indirect_child_t represents the vdev that we will read from, when we
229  * need to read all copies of the data (e.g. for scrub or reconstruction).
230  * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
231  * ic_vdev is the same as is_vdev.  However, for mirror top-level vdevs,
232  * ic_vdev is a child of the mirror.
233  */
234 typedef struct indirect_child {
235 	abd_t *ic_data;
236 	vdev_t *ic_vdev;
237 
238 	/*
239 	 * ic_duplicate is NULL when the ic_data contents are unique, when it
240 	 * is determined to be a duplicate it references the primary child.
241 	 */
242 	struct indirect_child *ic_duplicate;
243 	list_node_t ic_node; /* node on is_unique_child */
244 	int ic_error; /* set when a child does not contain the data */
245 } indirect_child_t;
246 
247 /*
248  * The indirect_split_t represents one mapped segment of an i/o to the
249  * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
250  * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
251  * For split blocks, there will be several of these.
252  */
253 typedef struct indirect_split {
254 	list_node_t is_node; /* link on iv_splits */
255 
256 	/*
257 	 * is_split_offset is the offset into the i/o.
258 	 * This is the sum of the previous splits' is_size's.
259 	 */
260 	uint64_t is_split_offset;
261 
262 	vdev_t *is_vdev; /* top-level vdev */
263 	uint64_t is_target_offset; /* offset on is_vdev */
264 	uint64_t is_size;
265 	int is_children; /* number of entries in is_child[] */
266 	int is_unique_children; /* number of entries in is_unique_child */
267 	list_t is_unique_child;
268 
269 	/*
270 	 * is_good_child is the child that we are currently using to
271 	 * attempt reconstruction.
272 	 */
273 	indirect_child_t *is_good_child;
274 
275 	indirect_child_t is_child[];
276 } indirect_split_t;
277 
278 /*
279  * The indirect_vsd_t is associated with each i/o to the indirect vdev.
280  * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
281  */
282 typedef struct indirect_vsd {
283 	boolean_t iv_split_block;
284 	boolean_t iv_reconstruct;
285 	uint64_t iv_unique_combinations;
286 	uint64_t iv_attempts;
287 	uint64_t iv_attempts_max;
288 
289 	list_t iv_splits; /* list of indirect_split_t's */
290 } indirect_vsd_t;
291 
292 static void
vdev_indirect_map_free(zio_t * zio)293 vdev_indirect_map_free(zio_t *zio)
294 {
295 	indirect_vsd_t *iv = zio->io_vsd;
296 
297 	indirect_split_t *is;
298 	while ((is = list_remove_head(&iv->iv_splits)) != NULL) {
299 		for (int c = 0; c < is->is_children; c++) {
300 			indirect_child_t *ic = &is->is_child[c];
301 			if (ic->ic_data != NULL)
302 				abd_free(ic->ic_data);
303 		}
304 
305 		indirect_child_t *ic;
306 		while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
307 			;
308 
309 		list_destroy(&is->is_unique_child);
310 
311 		kmem_free(is,
312 		    offsetof(indirect_split_t, is_child[is->is_children]));
313 	}
314 	kmem_free(iv, sizeof (*iv));
315 }
316 
317 static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
318 	.vsd_free = vdev_indirect_map_free,
319 };
320 
321 /*
322  * Mark the given offset and size as being obsolete.
323  */
324 void
vdev_indirect_mark_obsolete(vdev_t * vd,uint64_t offset,uint64_t size)325 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
326 {
327 	spa_t *spa = vd->vdev_spa;
328 
329 	ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
330 	ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
331 	ASSERT(size > 0);
332 	VERIFY(vdev_indirect_mapping_entry_for_offset(
333 	    vd->vdev_indirect_mapping, offset) != NULL);
334 
335 	if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
336 		mutex_enter(&vd->vdev_obsolete_lock);
337 		zfs_range_tree_add(vd->vdev_obsolete_segments, offset, size);
338 		mutex_exit(&vd->vdev_obsolete_lock);
339 		vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
340 	}
341 }
342 
343 /*
344  * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
345  * wrapper is provided because the DMU does not know about vdev_t's and
346  * cannot directly call vdev_indirect_mark_obsolete.
347  */
348 void
spa_vdev_indirect_mark_obsolete(spa_t * spa,uint64_t vdev_id,uint64_t offset,uint64_t size,dmu_tx_t * tx)349 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
350     uint64_t size, dmu_tx_t *tx)
351 {
352 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
353 	ASSERT(dmu_tx_is_syncing(tx));
354 
355 	/* The DMU can only remap indirect vdevs. */
356 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
357 	vdev_indirect_mark_obsolete(vd, offset, size);
358 }
359 
360 static spa_condensing_indirect_t *
spa_condensing_indirect_create(spa_t * spa)361 spa_condensing_indirect_create(spa_t *spa)
362 {
363 	spa_condensing_indirect_phys_t *scip =
364 	    &spa->spa_condensing_indirect_phys;
365 	spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
366 	objset_t *mos = spa->spa_meta_objset;
367 
368 	for (int i = 0; i < TXG_SIZE; i++) {
369 		list_create(&sci->sci_new_mapping_entries[i],
370 		    sizeof (vdev_indirect_mapping_entry_t),
371 		    offsetof(vdev_indirect_mapping_entry_t, vime_node));
372 	}
373 
374 	sci->sci_new_mapping =
375 	    vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
376 
377 	return (sci);
378 }
379 
380 static void
spa_condensing_indirect_destroy(spa_condensing_indirect_t * sci)381 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
382 {
383 	for (int i = 0; i < TXG_SIZE; i++)
384 		list_destroy(&sci->sci_new_mapping_entries[i]);
385 
386 	if (sci->sci_new_mapping != NULL)
387 		vdev_indirect_mapping_close(sci->sci_new_mapping);
388 
389 	kmem_free(sci, sizeof (*sci));
390 }
391 
392 boolean_t
vdev_indirect_should_condense(vdev_t * vd)393 vdev_indirect_should_condense(vdev_t *vd)
394 {
395 	vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
396 	spa_t *spa = vd->vdev_spa;
397 
398 	ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
399 
400 	if (!zfs_condense_indirect_vdevs_enable)
401 		return (B_FALSE);
402 
403 	/*
404 	 * We can only condense one indirect vdev at a time.
405 	 */
406 	if (spa->spa_condensing_indirect != NULL)
407 		return (B_FALSE);
408 
409 	if (spa_shutting_down(spa))
410 		return (B_FALSE);
411 
412 	/*
413 	 * The mapping object size must not change while we are
414 	 * condensing, so we can only condense indirect vdevs
415 	 * (not vdevs that are still in the middle of being removed).
416 	 */
417 	if (vd->vdev_ops != &vdev_indirect_ops)
418 		return (B_FALSE);
419 
420 	/*
421 	 * If nothing new has been marked obsolete, there is no
422 	 * point in condensing.
423 	 */
424 	uint64_t obsolete_sm_obj __maybe_unused;
425 	ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
426 	if (vd->vdev_obsolete_sm == NULL) {
427 		ASSERT0(obsolete_sm_obj);
428 		return (B_FALSE);
429 	}
430 
431 	ASSERT(vd->vdev_obsolete_sm != NULL);
432 
433 	ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
434 
435 	uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
436 	uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
437 	uint64_t mapping_size = vdev_indirect_mapping_size(vim);
438 	uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
439 
440 	ASSERT3U(bytes_obsolete, <=, bytes_mapped);
441 
442 	/*
443 	 * If a high percentage of the bytes that are mapped have become
444 	 * obsolete, condense (unless the mapping is already small enough).
445 	 * This has a good chance of reducing the amount of memory used
446 	 * by the mapping.
447 	 */
448 	if (bytes_obsolete * 100 / bytes_mapped >=
449 	    zfs_condense_indirect_obsolete_pct &&
450 	    mapping_size > zfs_condense_min_mapping_bytes) {
451 		zfs_dbgmsg("should condense vdev %llu because obsolete "
452 		    "spacemap covers %d%% of %lluMB mapping",
453 		    (u_longlong_t)vd->vdev_id,
454 		    (int)(bytes_obsolete * 100 / bytes_mapped),
455 		    (u_longlong_t)bytes_mapped / 1024 / 1024);
456 		return (B_TRUE);
457 	}
458 
459 	/*
460 	 * If the obsolete space map takes up too much space on disk,
461 	 * condense in order to free up this disk space.
462 	 */
463 	if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
464 		zfs_dbgmsg("should condense vdev %llu because obsolete sm "
465 		    "length %lluMB >= max size %lluMB",
466 		    (u_longlong_t)vd->vdev_id,
467 		    (u_longlong_t)obsolete_sm_size / 1024 / 1024,
468 		    (u_longlong_t)zfs_condense_max_obsolete_bytes /
469 		    1024 / 1024);
470 		return (B_TRUE);
471 	}
472 
473 	return (B_FALSE);
474 }
475 
476 /*
477  * This sync task completes (finishes) a condense, deleting the old
478  * mapping and replacing it with the new one.
479  */
480 static void
spa_condense_indirect_complete_sync(void * arg,dmu_tx_t * tx)481 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
482 {
483 	spa_condensing_indirect_t *sci = arg;
484 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
485 	spa_condensing_indirect_phys_t *scip =
486 	    &spa->spa_condensing_indirect_phys;
487 	vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
488 	vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
489 	objset_t *mos = spa->spa_meta_objset;
490 	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
491 	uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
492 	uint64_t new_count =
493 	    vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
494 
495 	ASSERT(dmu_tx_is_syncing(tx));
496 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
497 	ASSERT3P(sci, ==, spa->spa_condensing_indirect);
498 	for (int i = 0; i < TXG_SIZE; i++) {
499 		ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
500 	}
501 	ASSERT(vic->vic_mapping_object != 0);
502 	ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
503 	ASSERT(scip->scip_next_mapping_object != 0);
504 	ASSERT(scip->scip_prev_obsolete_sm_object != 0);
505 
506 	/*
507 	 * Reset vdev_indirect_mapping to refer to the new object.
508 	 */
509 	rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
510 	vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
511 	vd->vdev_indirect_mapping = sci->sci_new_mapping;
512 	rw_exit(&vd->vdev_indirect_rwlock);
513 
514 	sci->sci_new_mapping = NULL;
515 	vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
516 	vic->vic_mapping_object = scip->scip_next_mapping_object;
517 	scip->scip_next_mapping_object = 0;
518 
519 	space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
520 	spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
521 	scip->scip_prev_obsolete_sm_object = 0;
522 
523 	scip->scip_vdev = 0;
524 
525 	VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
526 	    DMU_POOL_CONDENSING_INDIRECT, tx));
527 	spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
528 	spa->spa_condensing_indirect = NULL;
529 
530 	zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
531 	    "new mapping object %llu has %llu entries "
532 	    "(was %llu entries)",
533 	    (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
534 	    (u_longlong_t)vic->vic_mapping_object,
535 	    (u_longlong_t)new_count, (u_longlong_t)old_count);
536 
537 	vdev_config_dirty(spa->spa_root_vdev);
538 }
539 
540 /*
541  * This sync task appends entries to the new mapping object.
542  */
543 static void
spa_condense_indirect_commit_sync(void * arg,dmu_tx_t * tx)544 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
545 {
546 	spa_condensing_indirect_t *sci = arg;
547 	uint64_t txg = dmu_tx_get_txg(tx);
548 	spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
549 
550 	ASSERT(dmu_tx_is_syncing(tx));
551 	ASSERT3P(sci, ==, spa->spa_condensing_indirect);
552 
553 	vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
554 	    &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
555 	ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
556 }
557 
558 /*
559  * Open-context function to add one entry to the new mapping.  The new
560  * entry will be remembered and written from syncing context.
561  */
562 static void
spa_condense_indirect_commit_entry(spa_t * spa,vdev_indirect_mapping_entry_phys_t * vimep,uint32_t count)563 spa_condense_indirect_commit_entry(spa_t *spa,
564     vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
565 {
566 	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
567 
568 	ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
569 
570 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
571 	dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
572 	VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
573 	int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
574 
575 	/*
576 	 * If we are the first entry committed this txg, kick off the sync
577 	 * task to write to the MOS on our behalf.
578 	 */
579 	if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
580 		dsl_sync_task_nowait(dmu_tx_pool(tx),
581 		    spa_condense_indirect_commit_sync, sci, tx);
582 	}
583 
584 	vdev_indirect_mapping_entry_t *vime =
585 	    kmem_alloc(sizeof (*vime), KM_SLEEP);
586 	vime->vime_mapping = *vimep;
587 	vime->vime_obsolete_count = count;
588 	list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
589 
590 	dmu_tx_commit(tx);
591 }
592 
593 static void
spa_condense_indirect_generate_new_mapping(vdev_t * vd,uint32_t * obsolete_counts,uint64_t start_index,zthr_t * zthr)594 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
595     uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
596 {
597 	spa_t *spa = vd->vdev_spa;
598 	uint64_t mapi = start_index;
599 	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
600 	uint64_t old_num_entries =
601 	    vdev_indirect_mapping_num_entries(old_mapping);
602 
603 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
604 	ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
605 
606 	zfs_dbgmsg("starting condense of vdev %llu from index %llu",
607 	    (u_longlong_t)vd->vdev_id,
608 	    (u_longlong_t)mapi);
609 
610 	while (mapi < old_num_entries) {
611 
612 		if (zthr_iscancelled(zthr)) {
613 			zfs_dbgmsg("pausing condense of vdev %llu "
614 			    "at index %llu", (u_longlong_t)vd->vdev_id,
615 			    (u_longlong_t)mapi);
616 			break;
617 		}
618 
619 		vdev_indirect_mapping_entry_phys_t *entry =
620 		    &old_mapping->vim_entries[mapi];
621 		uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
622 		ASSERT3U(obsolete_counts[mapi], <=, entry_size);
623 		if (obsolete_counts[mapi] < entry_size) {
624 			spa_condense_indirect_commit_entry(spa, entry,
625 			    obsolete_counts[mapi]);
626 
627 			/*
628 			 * This delay may be requested for testing, debugging,
629 			 * or performance reasons.
630 			 */
631 			hrtime_t now = gethrtime();
632 			hrtime_t sleep_until = now + MSEC2NSEC(
633 			    zfs_condense_indirect_commit_entry_delay_ms);
634 			zfs_sleep_until(sleep_until);
635 		}
636 
637 		mapi++;
638 	}
639 }
640 
641 static boolean_t
spa_condense_indirect_thread_check(void * arg,zthr_t * zthr)642 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
643 {
644 	(void) zthr;
645 	spa_t *spa = arg;
646 
647 	return (spa->spa_condensing_indirect != NULL);
648 }
649 
650 static void
spa_condense_indirect_thread(void * arg,zthr_t * zthr)651 spa_condense_indirect_thread(void *arg, zthr_t *zthr)
652 {
653 	spa_t *spa = arg;
654 	vdev_t *vd;
655 
656 	ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
657 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
658 	vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
659 	ASSERT3P(vd, !=, NULL);
660 	spa_config_exit(spa, SCL_VDEV, FTAG);
661 
662 	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
663 	spa_condensing_indirect_phys_t *scip =
664 	    &spa->spa_condensing_indirect_phys;
665 	uint32_t *counts;
666 	uint64_t start_index;
667 	vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
668 	space_map_t *prev_obsolete_sm = NULL;
669 
670 	ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
671 	ASSERT(scip->scip_next_mapping_object != 0);
672 	ASSERT(scip->scip_prev_obsolete_sm_object != 0);
673 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
674 
675 	for (int i = 0; i < TXG_SIZE; i++) {
676 		/*
677 		 * The list must start out empty in order for the
678 		 * _commit_sync() sync task to be properly registered
679 		 * on the first call to _commit_entry(); so it's wise
680 		 * to double check and ensure we actually are starting
681 		 * with empty lists.
682 		 */
683 		ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
684 	}
685 
686 	VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
687 	    scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
688 	counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
689 	if (prev_obsolete_sm != NULL) {
690 		vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
691 		    counts, prev_obsolete_sm);
692 	}
693 	space_map_close(prev_obsolete_sm);
694 
695 	/*
696 	 * Generate new mapping.  Determine what index to continue from
697 	 * based on the max offset that we've already written in the
698 	 * new mapping.
699 	 */
700 	uint64_t max_offset =
701 	    vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
702 	if (max_offset == 0) {
703 		/* We haven't written anything to the new mapping yet. */
704 		start_index = 0;
705 	} else {
706 		/*
707 		 * Pick up from where we left off. _entry_for_offset()
708 		 * returns a pointer into the vim_entries array. If
709 		 * max_offset is greater than any of the mappings
710 		 * contained in the table  NULL will be returned and
711 		 * that indicates we've exhausted our iteration of the
712 		 * old_mapping.
713 		 */
714 
715 		vdev_indirect_mapping_entry_phys_t *entry =
716 		    vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
717 		    max_offset);
718 
719 		if (entry == NULL) {
720 			/*
721 			 * We've already written the whole new mapping.
722 			 * This special value will cause us to skip the
723 			 * generate_new_mapping step and just do the sync
724 			 * task to complete the condense.
725 			 */
726 			start_index = UINT64_MAX;
727 		} else {
728 			start_index = entry - old_mapping->vim_entries;
729 			ASSERT3U(start_index, <,
730 			    vdev_indirect_mapping_num_entries(old_mapping));
731 		}
732 	}
733 
734 	spa_condense_indirect_generate_new_mapping(vd, counts,
735 	    start_index, zthr);
736 
737 	vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
738 
739 	/*
740 	 * If the zthr has received a cancellation signal while running
741 	 * in generate_new_mapping() or at any point after that, then bail
742 	 * early. We don't want to complete the condense if the spa is
743 	 * shutting down.
744 	 */
745 	if (zthr_iscancelled(zthr))
746 		return;
747 
748 	VERIFY0(dsl_sync_task(spa_name(spa), NULL,
749 	    spa_condense_indirect_complete_sync, sci, 0,
750 	    ZFS_SPACE_CHECK_EXTRA_RESERVED));
751 }
752 
753 /*
754  * Sync task to begin the condensing process.
755  */
756 void
spa_condense_indirect_start_sync(vdev_t * vd,dmu_tx_t * tx)757 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
758 {
759 	spa_t *spa = vd->vdev_spa;
760 	spa_condensing_indirect_phys_t *scip =
761 	    &spa->spa_condensing_indirect_phys;
762 
763 	ASSERT0(scip->scip_next_mapping_object);
764 	ASSERT0(scip->scip_prev_obsolete_sm_object);
765 	ASSERT0(scip->scip_vdev);
766 	ASSERT(dmu_tx_is_syncing(tx));
767 	ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
768 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
769 	ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
770 
771 	uint64_t obsolete_sm_obj;
772 	VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
773 	ASSERT3U(obsolete_sm_obj, !=, 0);
774 
775 	scip->scip_vdev = vd->vdev_id;
776 	scip->scip_next_mapping_object =
777 	    vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
778 
779 	scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
780 
781 	/*
782 	 * We don't need to allocate a new space map object, since
783 	 * vdev_indirect_sync_obsolete will allocate one when needed.
784 	 */
785 	space_map_close(vd->vdev_obsolete_sm);
786 	vd->vdev_obsolete_sm = NULL;
787 	VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
788 	    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
789 
790 	VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
791 	    DMU_POOL_DIRECTORY_OBJECT,
792 	    DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
793 	    sizeof (*scip) / sizeof (uint64_t), scip, tx));
794 
795 	ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
796 	spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
797 
798 	zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
799 	    "posm=%llu nm=%llu",
800 	    (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
801 	    (u_longlong_t)scip->scip_prev_obsolete_sm_object,
802 	    (u_longlong_t)scip->scip_next_mapping_object);
803 
804 	zthr_wakeup(spa->spa_condense_zthr);
805 }
806 
807 /*
808  * Sync to the given vdev's obsolete space map any segments that are no longer
809  * referenced as of the given txg.
810  *
811  * If the obsolete space map doesn't exist yet, create and open it.
812  */
813 void
vdev_indirect_sync_obsolete(vdev_t * vd,dmu_tx_t * tx)814 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
815 {
816 	spa_t *spa = vd->vdev_spa;
817 	vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
818 
819 	ASSERT3U(vic->vic_mapping_object, !=, 0);
820 	ASSERT(zfs_range_tree_space(vd->vdev_obsolete_segments) > 0);
821 	ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
822 	ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
823 
824 	uint64_t obsolete_sm_object;
825 	VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
826 	if (obsolete_sm_object == 0) {
827 		obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
828 		    zfs_vdev_standard_sm_blksz, tx);
829 
830 		ASSERT(vd->vdev_top_zap != 0);
831 		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
832 		    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
833 		    sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
834 		ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
835 		ASSERT3U(obsolete_sm_object, !=, 0);
836 
837 		spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
838 		VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
839 		    spa->spa_meta_objset, obsolete_sm_object,
840 		    0, vd->vdev_asize, 0));
841 	}
842 
843 	ASSERT(vd->vdev_obsolete_sm != NULL);
844 	ASSERT3U(obsolete_sm_object, ==,
845 	    space_map_object(vd->vdev_obsolete_sm));
846 
847 	space_map_write(vd->vdev_obsolete_sm,
848 	    vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
849 	zfs_range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
850 }
851 
852 int
spa_condense_init(spa_t * spa)853 spa_condense_init(spa_t *spa)
854 {
855 	int error = zap_lookup(spa->spa_meta_objset,
856 	    DMU_POOL_DIRECTORY_OBJECT,
857 	    DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
858 	    sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
859 	    &spa->spa_condensing_indirect_phys);
860 	if (error == 0) {
861 		if (spa_writeable(spa)) {
862 			spa->spa_condensing_indirect =
863 			    spa_condensing_indirect_create(spa);
864 		}
865 		return (0);
866 	} else if (error == ENOENT) {
867 		return (0);
868 	} else {
869 		return (error);
870 	}
871 }
872 
873 void
spa_condense_fini(spa_t * spa)874 spa_condense_fini(spa_t *spa)
875 {
876 	if (spa->spa_condensing_indirect != NULL) {
877 		spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
878 		spa->spa_condensing_indirect = NULL;
879 	}
880 }
881 
882 void
spa_start_indirect_condensing_thread(spa_t * spa)883 spa_start_indirect_condensing_thread(spa_t *spa)
884 {
885 	ASSERT3P(spa->spa_condense_zthr, ==, NULL);
886 	spa->spa_condense_zthr = zthr_create("z_indirect_condense",
887 	    spa_condense_indirect_thread_check,
888 	    spa_condense_indirect_thread, spa, minclsyspri);
889 }
890 
891 /*
892  * Gets the obsolete spacemap object from the vdev's ZAP.  On success sm_obj
893  * will contain either the obsolete spacemap object or zero if none exists.
894  * All other errors are returned to the caller.
895  */
896 int
vdev_obsolete_sm_object(vdev_t * vd,uint64_t * sm_obj)897 vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
898 {
899 	ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
900 
901 	if (vd->vdev_top_zap == 0) {
902 		*sm_obj = 0;
903 		return (0);
904 	}
905 
906 	int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
907 	    VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
908 	if (error == ENOENT) {
909 		*sm_obj = 0;
910 		error = 0;
911 	}
912 
913 	return (error);
914 }
915 
916 /*
917  * Gets the obsolete count are precise spacemap object from the vdev's ZAP.
918  * On success are_precise will be set to reflect if the counts are precise.
919  * All other errors are returned to the caller.
920  */
921 int
vdev_obsolete_counts_are_precise(vdev_t * vd,boolean_t * are_precise)922 vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
923 {
924 	ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
925 
926 	if (vd->vdev_top_zap == 0) {
927 		*are_precise = B_FALSE;
928 		return (0);
929 	}
930 
931 	uint64_t val = 0;
932 	int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
933 	    VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
934 	if (error == 0) {
935 		*are_precise = (val != 0);
936 	} else if (error == ENOENT) {
937 		*are_precise = B_FALSE;
938 		error = 0;
939 	}
940 
941 	return (error);
942 }
943 
944 static void
vdev_indirect_close(vdev_t * vd)945 vdev_indirect_close(vdev_t *vd)
946 {
947 	(void) vd;
948 }
949 
950 static int
vdev_indirect_open(vdev_t * vd,uint64_t * psize,uint64_t * max_psize,uint64_t * logical_ashift,uint64_t * physical_ashift)951 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
952     uint64_t *logical_ashift, uint64_t *physical_ashift)
953 {
954 	*psize = *max_psize = vd->vdev_asize +
955 	    VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
956 	*logical_ashift = vd->vdev_ashift;
957 	*physical_ashift = vd->vdev_physical_ashift;
958 	return (0);
959 }
960 
961 typedef struct remap_segment {
962 	vdev_t *rs_vd;
963 	uint64_t rs_offset;
964 	uint64_t rs_asize;
965 	uint64_t rs_split_offset;
966 	list_node_t rs_node;
967 } remap_segment_t;
968 
969 static remap_segment_t *
rs_alloc(vdev_t * vd,uint64_t offset,uint64_t asize,uint64_t split_offset)970 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
971 {
972 	remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
973 	rs->rs_vd = vd;
974 	rs->rs_offset = offset;
975 	rs->rs_asize = asize;
976 	rs->rs_split_offset = split_offset;
977 	return (rs);
978 }
979 
980 /*
981  * Given an indirect vdev and an extent on that vdev, it duplicates the
982  * physical entries of the indirect mapping that correspond to the extent
983  * to a new array and returns a pointer to it. In addition, copied_entries
984  * is populated with the number of mapping entries that were duplicated.
985  *
986  * Note that the function assumes that the caller holds vdev_indirect_rwlock.
987  * This ensures that the mapping won't change due to condensing as we
988  * copy over its contents.
989  *
990  * Finally, since we are doing an allocation, it is up to the caller to
991  * free the array allocated in this function.
992  */
993 static vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t * vd,uint64_t offset,uint64_t asize,uint64_t * copied_entries)994 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
995     uint64_t asize, uint64_t *copied_entries)
996 {
997 	vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
998 	vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
999 	uint64_t entries = 0;
1000 
1001 	ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
1002 
1003 	vdev_indirect_mapping_entry_phys_t *first_mapping =
1004 	    vdev_indirect_mapping_entry_for_offset(vim, offset);
1005 	ASSERT3P(first_mapping, !=, NULL);
1006 
1007 	vdev_indirect_mapping_entry_phys_t *m = first_mapping;
1008 	while (asize > 0) {
1009 		uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1010 
1011 		ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
1012 		ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1013 
1014 		uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
1015 		uint64_t inner_size = MIN(asize, size - inner_offset);
1016 
1017 		offset += inner_size;
1018 		asize -= inner_size;
1019 		entries++;
1020 		m++;
1021 	}
1022 
1023 	size_t copy_length = entries * sizeof (*first_mapping);
1024 	duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
1025 	memcpy(duplicate_mappings, first_mapping, copy_length);
1026 	*copied_entries = entries;
1027 
1028 	return (duplicate_mappings);
1029 }
1030 
1031 /*
1032  * Goes through the relevant indirect mappings until it hits a concrete vdev
1033  * and issues the callback. On the way to the concrete vdev, if any other
1034  * indirect vdevs are encountered, then the callback will also be called on
1035  * each of those indirect vdevs. For example, if the segment is mapped to
1036  * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
1037  * mapped to segment B on concrete vdev 2, then the callback will be called on
1038  * both vdev 1 and vdev 2.
1039  *
1040  * While the callback passed to vdev_indirect_remap() is called on every vdev
1041  * the function encounters, certain callbacks only care about concrete vdevs.
1042  * These types of callbacks should return immediately and explicitly when they
1043  * are called on an indirect vdev.
1044  *
1045  * Because there is a possibility that a DVA section in the indirect device
1046  * has been split into multiple sections in our mapping, we keep track
1047  * of the relevant contiguous segments of the new location (remap_segment_t)
1048  * in a stack. This way we can call the callback for each of the new sections
1049  * created by a single section of the indirect device. Note though, that in
1050  * this scenario the callbacks in each split block won't occur in-order in
1051  * terms of offset, so callers should not make any assumptions about that.
1052  *
1053  * For callbacks that don't handle split blocks and immediately return when
1054  * they encounter them (as is the case for remap_blkptr_cb), the caller can
1055  * assume that its callback will be applied from the first indirect vdev
1056  * encountered to the last one and then the concrete vdev, in that order.
1057  */
1058 static void
vdev_indirect_remap(vdev_t * vd,uint64_t offset,uint64_t asize,void (* func)(uint64_t,vdev_t *,uint64_t,uint64_t,void *),void * arg)1059 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
1060     void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
1061 {
1062 	list_t stack;
1063 	spa_t *spa = vd->vdev_spa;
1064 
1065 	list_create(&stack, sizeof (remap_segment_t),
1066 	    offsetof(remap_segment_t, rs_node));
1067 
1068 	for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
1069 	    rs != NULL; rs = list_remove_head(&stack)) {
1070 		vdev_t *v = rs->rs_vd;
1071 		uint64_t num_entries = 0;
1072 
1073 		ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1074 		ASSERT(rs->rs_asize > 0);
1075 
1076 		/*
1077 		 * Note: As this function can be called from open context
1078 		 * (e.g. zio_read()), we need the following rwlock to
1079 		 * prevent the mapping from being changed by condensing.
1080 		 *
1081 		 * So we grab the lock and we make a copy of the entries
1082 		 * that are relevant to the extent that we are working on.
1083 		 * Once that is done, we drop the lock and iterate over
1084 		 * our copy of the mapping. Once we are done with the with
1085 		 * the remap segment and we free it, we also free our copy
1086 		 * of the indirect mapping entries that are relevant to it.
1087 		 *
1088 		 * This way we don't need to wait until the function is
1089 		 * finished with a segment, to condense it. In addition, we
1090 		 * don't need a recursive rwlock for the case that a call to
1091 		 * vdev_indirect_remap() needs to call itself (through the
1092 		 * codepath of its callback) for the same vdev in the middle
1093 		 * of its execution.
1094 		 */
1095 		rw_enter(&v->vdev_indirect_rwlock, RW_READER);
1096 		ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
1097 
1098 		vdev_indirect_mapping_entry_phys_t *mapping =
1099 		    vdev_indirect_mapping_duplicate_adjacent_entries(v,
1100 		    rs->rs_offset, rs->rs_asize, &num_entries);
1101 		ASSERT3P(mapping, !=, NULL);
1102 		ASSERT3U(num_entries, >, 0);
1103 		rw_exit(&v->vdev_indirect_rwlock);
1104 
1105 		for (uint64_t i = 0; i < num_entries; i++) {
1106 			/*
1107 			 * Note: the vdev_indirect_mapping can not change
1108 			 * while we are running.  It only changes while the
1109 			 * removal is in progress, and then only from syncing
1110 			 * context. While a removal is in progress, this
1111 			 * function is only called for frees, which also only
1112 			 * happen from syncing context.
1113 			 */
1114 			vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
1115 
1116 			ASSERT3P(m, !=, NULL);
1117 			ASSERT3U(rs->rs_asize, >, 0);
1118 
1119 			uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
1120 			uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
1121 			uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
1122 
1123 			ASSERT3U(rs->rs_offset, >=,
1124 			    DVA_MAPPING_GET_SRC_OFFSET(m));
1125 			ASSERT3U(rs->rs_offset, <,
1126 			    DVA_MAPPING_GET_SRC_OFFSET(m) + size);
1127 			ASSERT3U(dst_vdev, !=, v->vdev_id);
1128 
1129 			uint64_t inner_offset = rs->rs_offset -
1130 			    DVA_MAPPING_GET_SRC_OFFSET(m);
1131 			uint64_t inner_size =
1132 			    MIN(rs->rs_asize, size - inner_offset);
1133 
1134 			vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
1135 			ASSERT3P(dst_v, !=, NULL);
1136 
1137 			if (dst_v->vdev_ops == &vdev_indirect_ops) {
1138 				list_insert_head(&stack,
1139 				    rs_alloc(dst_v, dst_offset + inner_offset,
1140 				    inner_size, rs->rs_split_offset));
1141 
1142 			}
1143 
1144 			if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
1145 			    IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
1146 				/*
1147 				 * Note: This clause exists only solely for
1148 				 * testing purposes. We use it to ensure that
1149 				 * split blocks work and that the callbacks
1150 				 * using them yield the same result if issued
1151 				 * in reverse order.
1152 				 */
1153 				uint64_t inner_half = inner_size / 2;
1154 
1155 				func(rs->rs_split_offset + inner_half, dst_v,
1156 				    dst_offset + inner_offset + inner_half,
1157 				    inner_half, arg);
1158 
1159 				func(rs->rs_split_offset, dst_v,
1160 				    dst_offset + inner_offset,
1161 				    inner_half, arg);
1162 			} else {
1163 				func(rs->rs_split_offset, dst_v,
1164 				    dst_offset + inner_offset,
1165 				    inner_size, arg);
1166 			}
1167 
1168 			rs->rs_offset += inner_size;
1169 			rs->rs_asize -= inner_size;
1170 			rs->rs_split_offset += inner_size;
1171 		}
1172 		VERIFY0(rs->rs_asize);
1173 
1174 		kmem_free(mapping, num_entries * sizeof (*mapping));
1175 		kmem_free(rs, sizeof (remap_segment_t));
1176 	}
1177 	list_destroy(&stack);
1178 }
1179 
1180 static void
vdev_indirect_child_io_done(zio_t * zio)1181 vdev_indirect_child_io_done(zio_t *zio)
1182 {
1183 	zio_t *pio = zio->io_private;
1184 
1185 	mutex_enter(&pio->io_lock);
1186 	pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
1187 	mutex_exit(&pio->io_lock);
1188 
1189 	abd_free(zio->io_abd);
1190 }
1191 
1192 /*
1193  * This is a callback for vdev_indirect_remap() which allocates an
1194  * indirect_split_t for each split segment and adds it to iv_splits.
1195  */
1196 static void
vdev_indirect_gather_splits(uint64_t split_offset,vdev_t * vd,uint64_t offset,uint64_t size,void * arg)1197 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
1198     uint64_t size, void *arg)
1199 {
1200 	zio_t *zio = arg;
1201 	indirect_vsd_t *iv = zio->io_vsd;
1202 
1203 	ASSERT3P(vd, !=, NULL);
1204 
1205 	if (vd->vdev_ops == &vdev_indirect_ops)
1206 		return;
1207 
1208 	int n = 1;
1209 	if (vd->vdev_ops == &vdev_mirror_ops)
1210 		n = vd->vdev_children;
1211 
1212 	indirect_split_t *is =
1213 	    kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
1214 
1215 	is->is_children = n;
1216 	is->is_size = size;
1217 	is->is_split_offset = split_offset;
1218 	is->is_target_offset = offset;
1219 	is->is_vdev = vd;
1220 	list_create(&is->is_unique_child, sizeof (indirect_child_t),
1221 	    offsetof(indirect_child_t, ic_node));
1222 
1223 	/*
1224 	 * Note that we only consider multiple copies of the data for
1225 	 * *mirror* vdevs.  We don't for "replacing" or "spare" vdevs, even
1226 	 * though they use the same ops as mirror, because there's only one
1227 	 * "good" copy under the replacing/spare.
1228 	 */
1229 	if (vd->vdev_ops == &vdev_mirror_ops) {
1230 		for (int i = 0; i < n; i++) {
1231 			is->is_child[i].ic_vdev = vd->vdev_child[i];
1232 			list_link_init(&is->is_child[i].ic_node);
1233 		}
1234 	} else {
1235 		is->is_child[0].ic_vdev = vd;
1236 	}
1237 
1238 	list_insert_tail(&iv->iv_splits, is);
1239 }
1240 
1241 static void
vdev_indirect_read_split_done(zio_t * zio)1242 vdev_indirect_read_split_done(zio_t *zio)
1243 {
1244 	indirect_child_t *ic = zio->io_private;
1245 
1246 	if (zio->io_error != 0) {
1247 		/*
1248 		 * Clear ic_data to indicate that we do not have data for this
1249 		 * child.
1250 		 */
1251 		abd_free(ic->ic_data);
1252 		ic->ic_data = NULL;
1253 	}
1254 }
1255 
1256 /*
1257  * Issue reads for all copies (mirror children) of all splits.
1258  */
1259 static void
vdev_indirect_read_all(zio_t * zio)1260 vdev_indirect_read_all(zio_t *zio)
1261 {
1262 	indirect_vsd_t *iv = zio->io_vsd;
1263 
1264 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1265 
1266 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1267 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1268 		for (int i = 0; i < is->is_children; i++) {
1269 			indirect_child_t *ic = &is->is_child[i];
1270 
1271 			if (!vdev_readable(ic->ic_vdev))
1272 				continue;
1273 
1274 			/*
1275 			 * If a child is missing the data, set ic_error. Used
1276 			 * in vdev_indirect_repair(). We perform the read
1277 			 * nevertheless which provides the opportunity to
1278 			 * reconstruct the split block if at all possible.
1279 			 */
1280 			if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING,
1281 			    zio->io_txg, 1))
1282 				ic->ic_error = SET_ERROR(ESTALE);
1283 
1284 			ic->ic_data = abd_alloc_sametype(zio->io_abd,
1285 			    is->is_size);
1286 			ic->ic_duplicate = NULL;
1287 
1288 			zio_nowait(zio_vdev_child_io(zio, NULL,
1289 			    ic->ic_vdev, is->is_target_offset, ic->ic_data,
1290 			    is->is_size, zio->io_type, zio->io_priority, 0,
1291 			    vdev_indirect_read_split_done, ic));
1292 		}
1293 	}
1294 	iv->iv_reconstruct = B_TRUE;
1295 }
1296 
1297 static void
vdev_indirect_io_start(zio_t * zio)1298 vdev_indirect_io_start(zio_t *zio)
1299 {
1300 	spa_t *spa __maybe_unused = zio->io_spa;
1301 	indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
1302 	list_create(&iv->iv_splits,
1303 	    sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
1304 
1305 	zio->io_vsd = iv;
1306 	zio->io_vsd_ops = &vdev_indirect_vsd_ops;
1307 
1308 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1309 	if (zio->io_type != ZIO_TYPE_READ) {
1310 		ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1311 		/*
1312 		 * Note: this code can handle other kinds of writes,
1313 		 * but we don't expect them.
1314 		 */
1315 		ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
1316 		    ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1317 	}
1318 
1319 	vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1320 	    vdev_indirect_gather_splits, zio);
1321 
1322 	indirect_split_t *first = list_head(&iv->iv_splits);
1323 	ASSERT3P(first, !=, NULL);
1324 	if (first->is_size == zio->io_size) {
1325 		/*
1326 		 * This is not a split block; we are pointing to the entire
1327 		 * data, which will checksum the same as the original data.
1328 		 * Pass the BP down so that the child i/o can verify the
1329 		 * checksum, and try a different location if available
1330 		 * (e.g. on a mirror).
1331 		 *
1332 		 * While this special case could be handled the same as the
1333 		 * general (split block) case, doing it this way ensures
1334 		 * that the vast majority of blocks on indirect vdevs
1335 		 * (which are not split) are handled identically to blocks
1336 		 * on non-indirect vdevs.  This allows us to be less strict
1337 		 * about performance in the general (but rare) case.
1338 		 */
1339 		ASSERT0(first->is_split_offset);
1340 		ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
1341 		zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
1342 		    first->is_vdev, first->is_target_offset,
1343 		    abd_get_offset(zio->io_abd, 0),
1344 		    zio->io_size, zio->io_type, zio->io_priority, 0,
1345 		    vdev_indirect_child_io_done, zio));
1346 	} else {
1347 		iv->iv_split_block = B_TRUE;
1348 		if (zio->io_type == ZIO_TYPE_READ &&
1349 		    zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
1350 			/*
1351 			 * Read all copies.  Note that for simplicity,
1352 			 * we don't bother consulting the DTL in the
1353 			 * resilver case.
1354 			 */
1355 			vdev_indirect_read_all(zio);
1356 		} else {
1357 			/*
1358 			 * If this is a read zio, we read one copy of each
1359 			 * split segment, from the top-level vdev.  Since
1360 			 * we don't know the checksum of each split
1361 			 * individually, the child zio can't ensure that
1362 			 * we get the right data. E.g. if it's a mirror,
1363 			 * it will just read from a random (healthy) leaf
1364 			 * vdev. We have to verify the checksum in
1365 			 * vdev_indirect_io_done().
1366 			 *
1367 			 * For write zios, the vdev code will ensure we write
1368 			 * to all children.
1369 			 */
1370 			for (indirect_split_t *is = list_head(&iv->iv_splits);
1371 			    is != NULL; is = list_next(&iv->iv_splits, is)) {
1372 				zio_nowait(zio_vdev_child_io(zio, NULL,
1373 				    is->is_vdev, is->is_target_offset,
1374 				    abd_get_offset_size(zio->io_abd,
1375 				    is->is_split_offset, is->is_size),
1376 				    is->is_size, zio->io_type,
1377 				    zio->io_priority, 0,
1378 				    vdev_indirect_child_io_done, zio));
1379 			}
1380 
1381 		}
1382 	}
1383 
1384 	zio_execute(zio);
1385 }
1386 
1387 /*
1388  * Report a checksum error for a child.
1389  */
1390 static void
vdev_indirect_checksum_error(zio_t * zio,indirect_split_t * is,indirect_child_t * ic)1391 vdev_indirect_checksum_error(zio_t *zio,
1392     indirect_split_t *is, indirect_child_t *ic)
1393 {
1394 	vdev_t *vd = ic->ic_vdev;
1395 
1396 	if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1397 		return;
1398 
1399 	mutex_enter(&vd->vdev_stat_lock);
1400 	vd->vdev_stat.vs_checksum_errors++;
1401 	mutex_exit(&vd->vdev_stat_lock);
1402 
1403 	zio_bad_cksum_t zbc = { 0 };
1404 	abd_t *bad_abd = ic->ic_data;
1405 	abd_t *good_abd = is->is_good_child->ic_data;
1406 	(void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
1407 	    is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
1408 }
1409 
1410 /*
1411  * Issue repair i/os for any incorrect copies.  We do this by comparing
1412  * each split segment's correct data (is_good_child's ic_data) with each
1413  * other copy of the data.  If they differ, then we overwrite the bad data
1414  * with the good copy.  The DTL is checked in vdev_indirect_read_all() and
1415  * if a vdev is missing a copy of the data we set ic_error and the read is
1416  * performed. This provides the opportunity to reconstruct the split block
1417  * if at all possible. ic_error is checked here and if set it suppresses
1418  * incrementing the checksum counter. Aside from this DTLs are not checked,
1419  * which simplifies this code and also issues the optimal number of writes
1420  * (based on which copies actually read bad data, as opposed to which we
1421  * think might be wrong).  For the same reason, we always use
1422  * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1423  */
1424 static void
vdev_indirect_repair(zio_t * zio)1425 vdev_indirect_repair(zio_t *zio)
1426 {
1427 	indirect_vsd_t *iv = zio->io_vsd;
1428 
1429 	if (!spa_writeable(zio->io_spa))
1430 		return;
1431 
1432 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1433 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1434 		for (int c = 0; c < is->is_children; c++) {
1435 			indirect_child_t *ic = &is->is_child[c];
1436 			if (ic == is->is_good_child)
1437 				continue;
1438 			if (ic->ic_data == NULL)
1439 				continue;
1440 			if (ic->ic_duplicate == is->is_good_child)
1441 				continue;
1442 
1443 			zio_nowait(zio_vdev_child_io(zio, NULL,
1444 			    ic->ic_vdev, is->is_target_offset,
1445 			    is->is_good_child->ic_data, is->is_size,
1446 			    ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
1447 			    ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
1448 			    NULL, NULL));
1449 
1450 			/*
1451 			 * If ic_error is set the current child does not have
1452 			 * a copy of the data, so suppress incrementing the
1453 			 * checksum counter.
1454 			 */
1455 			if (ic->ic_error == ESTALE)
1456 				continue;
1457 
1458 			vdev_indirect_checksum_error(zio, is, ic);
1459 		}
1460 	}
1461 }
1462 
1463 /*
1464  * Report checksum errors on all children that we read from.
1465  */
1466 static void
vdev_indirect_all_checksum_errors(zio_t * zio)1467 vdev_indirect_all_checksum_errors(zio_t *zio)
1468 {
1469 	indirect_vsd_t *iv = zio->io_vsd;
1470 
1471 	if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
1472 		return;
1473 
1474 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1475 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1476 		for (int c = 0; c < is->is_children; c++) {
1477 			indirect_child_t *ic = &is->is_child[c];
1478 
1479 			if (ic->ic_data == NULL)
1480 				continue;
1481 
1482 			vdev_t *vd = ic->ic_vdev;
1483 
1484 			mutex_enter(&vd->vdev_stat_lock);
1485 			vd->vdev_stat.vs_checksum_errors++;
1486 			mutex_exit(&vd->vdev_stat_lock);
1487 			(void) zfs_ereport_post_checksum(zio->io_spa, vd,
1488 			    NULL, zio, is->is_target_offset, is->is_size,
1489 			    NULL, NULL, NULL);
1490 		}
1491 	}
1492 }
1493 
1494 /*
1495  * Copy data from all the splits to a main zio then validate the checksum.
1496  * If then checksum is successfully validated return success.
1497  */
1498 static int
vdev_indirect_splits_checksum_validate(indirect_vsd_t * iv,zio_t * zio)1499 vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
1500 {
1501 	zio_bad_cksum_t zbc;
1502 
1503 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1504 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1505 
1506 		ASSERT3P(is->is_good_child->ic_data, !=, NULL);
1507 		ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
1508 
1509 		abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
1510 		    is->is_split_offset, 0, is->is_size);
1511 	}
1512 
1513 	return (zio_checksum_error(zio, &zbc));
1514 }
1515 
1516 /*
1517  * There are relatively few possible combinations making it feasible to
1518  * deterministically check them all.  We do this by setting the good_child
1519  * to the next unique split version.  If we reach the end of the list then
1520  * "carry over" to the next unique split version (like counting in base
1521  * is_unique_children, but each digit can have a different base).
1522  */
1523 static int
vdev_indirect_splits_enumerate_all(indirect_vsd_t * iv,zio_t * zio)1524 vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
1525 {
1526 	boolean_t more = B_TRUE;
1527 
1528 	iv->iv_attempts = 0;
1529 
1530 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1531 	    is != NULL; is = list_next(&iv->iv_splits, is))
1532 		is->is_good_child = list_head(&is->is_unique_child);
1533 
1534 	while (more == B_TRUE) {
1535 		iv->iv_attempts++;
1536 		more = B_FALSE;
1537 
1538 		if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1539 			return (0);
1540 
1541 		for (indirect_split_t *is = list_head(&iv->iv_splits);
1542 		    is != NULL; is = list_next(&iv->iv_splits, is)) {
1543 			is->is_good_child = list_next(&is->is_unique_child,
1544 			    is->is_good_child);
1545 			if (is->is_good_child != NULL) {
1546 				more = B_TRUE;
1547 				break;
1548 			}
1549 
1550 			is->is_good_child = list_head(&is->is_unique_child);
1551 		}
1552 	}
1553 
1554 	ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
1555 
1556 	return (SET_ERROR(ECKSUM));
1557 }
1558 
1559 /*
1560  * There are too many combinations to try all of them in a reasonable amount
1561  * of time.  So try a fixed number of random combinations from the unique
1562  * split versions, after which we'll consider the block unrecoverable.
1563  */
1564 static int
vdev_indirect_splits_enumerate_randomly(indirect_vsd_t * iv,zio_t * zio)1565 vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
1566 {
1567 	iv->iv_attempts = 0;
1568 
1569 	while (iv->iv_attempts < iv->iv_attempts_max) {
1570 		iv->iv_attempts++;
1571 
1572 		for (indirect_split_t *is = list_head(&iv->iv_splits);
1573 		    is != NULL; is = list_next(&iv->iv_splits, is)) {
1574 			indirect_child_t *ic = list_head(&is->is_unique_child);
1575 			int children = is->is_unique_children;
1576 
1577 			for (int i = random_in_range(children); i > 0; i--)
1578 				ic = list_next(&is->is_unique_child, ic);
1579 
1580 			ASSERT3P(ic, !=, NULL);
1581 			is->is_good_child = ic;
1582 		}
1583 
1584 		if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
1585 			return (0);
1586 	}
1587 
1588 	return (SET_ERROR(ECKSUM));
1589 }
1590 
1591 /*
1592  * This is a validation function for reconstruction.  It randomly selects
1593  * a good combination, if one can be found, and then it intentionally
1594  * damages all other segment copes by zeroing them.  This forces the
1595  * reconstruction algorithm to locate the one remaining known good copy.
1596  */
1597 static int
vdev_indirect_splits_damage(indirect_vsd_t * iv,zio_t * zio)1598 vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
1599 {
1600 	int error;
1601 
1602 	/* Presume all the copies are unique for initial selection. */
1603 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1604 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1605 		is->is_unique_children = 0;
1606 
1607 		for (int i = 0; i < is->is_children; i++) {
1608 			indirect_child_t *ic = &is->is_child[i];
1609 			if (ic->ic_data != NULL) {
1610 				is->is_unique_children++;
1611 				list_insert_tail(&is->is_unique_child, ic);
1612 			}
1613 		}
1614 
1615 		if (list_is_empty(&is->is_unique_child)) {
1616 			error = SET_ERROR(EIO);
1617 			goto out;
1618 		}
1619 	}
1620 
1621 	/*
1622 	 * Set each is_good_child to a randomly-selected child which
1623 	 * is known to contain validated data.
1624 	 */
1625 	error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1626 	if (error)
1627 		goto out;
1628 
1629 	/*
1630 	 * Damage all but the known good copy by zeroing it.  This will
1631 	 * result in two or less unique copies per indirect_child_t.
1632 	 * Both may need to be checked in order to reconstruct the block.
1633 	 * Set iv->iv_attempts_max such that all unique combinations will
1634 	 * enumerated, but limit the damage to at most 12 indirect splits.
1635 	 */
1636 	iv->iv_attempts_max = 1;
1637 
1638 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1639 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1640 		for (int c = 0; c < is->is_children; c++) {
1641 			indirect_child_t *ic = &is->is_child[c];
1642 
1643 			if (ic == is->is_good_child)
1644 				continue;
1645 			if (ic->ic_data == NULL)
1646 				continue;
1647 
1648 			abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
1649 		}
1650 
1651 		iv->iv_attempts_max *= 2;
1652 		if (iv->iv_attempts_max >= (1ULL << 12)) {
1653 			iv->iv_attempts_max = UINT64_MAX;
1654 			break;
1655 		}
1656 	}
1657 
1658 out:
1659 	/* Empty the unique children lists so they can be reconstructed. */
1660 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1661 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1662 		indirect_child_t *ic;
1663 		while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
1664 			;
1665 
1666 		is->is_unique_children = 0;
1667 	}
1668 
1669 	return (error);
1670 }
1671 
1672 /*
1673  * This function is called when we have read all copies of the data and need
1674  * to try to find a combination of copies that gives us the right checksum.
1675  *
1676  * If we pointed to any mirror vdevs, this effectively does the job of the
1677  * mirror.  The mirror vdev code can't do its own job because we don't know
1678  * the checksum of each split segment individually.
1679  *
1680  * We have to try every unique combination of copies of split segments, until
1681  * we find one that checksums correctly.  Duplicate segment copies are first
1682  * identified and latter skipped during reconstruction.  This optimization
1683  * reduces the search space and ensures that of the remaining combinations
1684  * at most one is correct.
1685  *
1686  * When the total number of combinations is small they can all be checked.
1687  * For example, if we have 3 segments in the split, and each points to a
1688  * 2-way mirror with unique copies, we will have the following pieces of data:
1689  *
1690  *       |     mirror child
1691  * split |     [0]        [1]
1692  * ======|=====================
1693  *   A   |  data_A_0   data_A_1
1694  *   B   |  data_B_0   data_B_1
1695  *   C   |  data_C_0   data_C_1
1696  *
1697  * We will try the following (mirror children)^(number of splits) (2^3=8)
1698  * combinations, which is similar to bitwise-little-endian counting in
1699  * binary.  In general each "digit" corresponds to a split segment, and the
1700  * base of each digit is is_children, which can be different for each
1701  * digit.
1702  *
1703  * "low bit"        "high bit"
1704  *        v                 v
1705  * data_A_0 data_B_0 data_C_0
1706  * data_A_1 data_B_0 data_C_0
1707  * data_A_0 data_B_1 data_C_0
1708  * data_A_1 data_B_1 data_C_0
1709  * data_A_0 data_B_0 data_C_1
1710  * data_A_1 data_B_0 data_C_1
1711  * data_A_0 data_B_1 data_C_1
1712  * data_A_1 data_B_1 data_C_1
1713  *
1714  * Note that the split segments may be on the same or different top-level
1715  * vdevs. In either case, we may need to try lots of combinations (see
1716  * zfs_reconstruct_indirect_combinations_max).  This ensures that if a mirror
1717  * has small silent errors on all of its children, we can still reconstruct
1718  * the correct data, as long as those errors are at sufficiently-separated
1719  * offsets (specifically, separated by the largest block size - default of
1720  * 128KB, but up to 16MB).
1721  */
1722 static void
vdev_indirect_reconstruct_io_done(zio_t * zio)1723 vdev_indirect_reconstruct_io_done(zio_t *zio)
1724 {
1725 	indirect_vsd_t *iv = zio->io_vsd;
1726 	boolean_t known_good = B_FALSE;
1727 	int error;
1728 
1729 	iv->iv_unique_combinations = 1;
1730 	iv->iv_attempts_max = UINT64_MAX;
1731 
1732 	if (zfs_reconstruct_indirect_combinations_max > 0)
1733 		iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
1734 
1735 	/*
1736 	 * If nonzero, every 1/x blocks will be damaged, in order to validate
1737 	 * reconstruction when there are split segments with damaged copies.
1738 	 * Known_good will be TRUE when reconstruction is known to be possible.
1739 	 */
1740 	if (zfs_reconstruct_indirect_damage_fraction != 0 &&
1741 	    random_in_range(zfs_reconstruct_indirect_damage_fraction) == 0)
1742 		known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
1743 
1744 	/*
1745 	 * Determine the unique children for a split segment and add them
1746 	 * to the is_unique_child list.  By restricting reconstruction
1747 	 * to these children, only unique combinations will be considered.
1748 	 * This can vastly reduce the search space when there are a large
1749 	 * number of indirect splits.
1750 	 */
1751 	for (indirect_split_t *is = list_head(&iv->iv_splits);
1752 	    is != NULL; is = list_next(&iv->iv_splits, is)) {
1753 		is->is_unique_children = 0;
1754 
1755 		for (int i = 0; i < is->is_children; i++) {
1756 			indirect_child_t *ic_i = &is->is_child[i];
1757 
1758 			if (ic_i->ic_data == NULL ||
1759 			    ic_i->ic_duplicate != NULL)
1760 				continue;
1761 
1762 			for (int j = i + 1; j < is->is_children; j++) {
1763 				indirect_child_t *ic_j = &is->is_child[j];
1764 
1765 				if (ic_j->ic_data == NULL ||
1766 				    ic_j->ic_duplicate != NULL)
1767 					continue;
1768 
1769 				if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
1770 					ic_j->ic_duplicate = ic_i;
1771 			}
1772 
1773 			is->is_unique_children++;
1774 			list_insert_tail(&is->is_unique_child, ic_i);
1775 		}
1776 
1777 		/* Reconstruction is impossible, no valid children */
1778 		EQUIV(list_is_empty(&is->is_unique_child),
1779 		    is->is_unique_children == 0);
1780 		if (list_is_empty(&is->is_unique_child)) {
1781 			zio->io_error = EIO;
1782 			vdev_indirect_all_checksum_errors(zio);
1783 			zio_checksum_verified(zio);
1784 			return;
1785 		}
1786 
1787 		iv->iv_unique_combinations *= is->is_unique_children;
1788 	}
1789 
1790 	if (iv->iv_unique_combinations <= iv->iv_attempts_max)
1791 		error = vdev_indirect_splits_enumerate_all(iv, zio);
1792 	else
1793 		error = vdev_indirect_splits_enumerate_randomly(iv, zio);
1794 
1795 	if (error != 0) {
1796 		/* All attempted combinations failed. */
1797 		ASSERT3B(known_good, ==, B_FALSE);
1798 		zio->io_error = error;
1799 		vdev_indirect_all_checksum_errors(zio);
1800 	} else {
1801 		/*
1802 		 * The checksum has been successfully validated.  Issue
1803 		 * repair I/Os to any copies of splits which don't match
1804 		 * the validated version.
1805 		 */
1806 		ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
1807 		vdev_indirect_repair(zio);
1808 		zio_checksum_verified(zio);
1809 	}
1810 }
1811 
1812 static void
vdev_indirect_io_done(zio_t * zio)1813 vdev_indirect_io_done(zio_t *zio)
1814 {
1815 	indirect_vsd_t *iv = zio->io_vsd;
1816 
1817 	if (iv->iv_reconstruct) {
1818 		/*
1819 		 * We have read all copies of the data (e.g. from mirrors),
1820 		 * either because this was a scrub/resilver, or because the
1821 		 * one-copy read didn't checksum correctly.
1822 		 */
1823 		vdev_indirect_reconstruct_io_done(zio);
1824 		return;
1825 	}
1826 
1827 	if (!iv->iv_split_block) {
1828 		/*
1829 		 * This was not a split block, so we passed the BP down,
1830 		 * and the checksum was handled by the (one) child zio.
1831 		 */
1832 		return;
1833 	}
1834 
1835 	zio_bad_cksum_t zbc;
1836 	int ret = zio_checksum_error(zio, &zbc);
1837 	/*
1838 	 * Any Direct I/O read that has a checksum error must be treated as
1839 	 * suspicious as the contents of the buffer could be getting
1840 	 * manipulated while the I/O is taking place. The checksum verify error
1841 	 * will be reported to the top-level VDEV.
1842 	 */
1843 	if (zio->io_flags & ZIO_FLAG_DIO_READ && ret == ECKSUM) {
1844 		zio->io_error = ret;
1845 		zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
1846 		zio_dio_chksum_verify_error_report(zio);
1847 		ret = 0;
1848 	}
1849 
1850 	if (ret == 0) {
1851 		zio_checksum_verified(zio);
1852 		return;
1853 	}
1854 
1855 	/*
1856 	 * The checksum didn't match.  Read all copies of all splits, and
1857 	 * then we will try to reconstruct.  The next time
1858 	 * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1859 	 */
1860 	vdev_indirect_read_all(zio);
1861 
1862 	zio_vdev_io_redone(zio);
1863 }
1864 
1865 vdev_ops_t vdev_indirect_ops = {
1866 	.vdev_op_init = NULL,
1867 	.vdev_op_fini = NULL,
1868 	.vdev_op_open = vdev_indirect_open,
1869 	.vdev_op_close = vdev_indirect_close,
1870 	.vdev_op_asize = vdev_default_asize,
1871 	.vdev_op_min_asize = vdev_default_min_asize,
1872 	.vdev_op_min_alloc = NULL,
1873 	.vdev_op_io_start = vdev_indirect_io_start,
1874 	.vdev_op_io_done = vdev_indirect_io_done,
1875 	.vdev_op_state_change = NULL,
1876 	.vdev_op_need_resilver = NULL,
1877 	.vdev_op_hold = NULL,
1878 	.vdev_op_rele = NULL,
1879 	.vdev_op_remap = vdev_indirect_remap,
1880 	.vdev_op_xlate = NULL,
1881 	.vdev_op_rebuild_asize = NULL,
1882 	.vdev_op_metaslab_init = NULL,
1883 	.vdev_op_config_generate = NULL,
1884 	.vdev_op_nparity = NULL,
1885 	.vdev_op_ndisks = NULL,
1886 	.vdev_op_type = VDEV_TYPE_INDIRECT,	/* name of this vdev type */
1887 	.vdev_op_leaf = B_FALSE			/* leaf vdev */
1888 };
1889 
1890 EXPORT_SYMBOL(spa_condense_fini);
1891 EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
1892 EXPORT_SYMBOL(spa_condense_indirect_start_sync);
1893 EXPORT_SYMBOL(spa_condense_init);
1894 EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
1895 EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
1896 EXPORT_SYMBOL(vdev_indirect_should_condense);
1897 EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
1898 EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
1899 EXPORT_SYMBOL(vdev_obsolete_sm_object);
1900 
1901 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
1902 	ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
1903 
1904 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, UINT,
1905 	ZMOD_RW,
1906 	"Minimum obsolete percent of bytes in the mapping "
1907 	"to attempt condensing");
1908 
1909 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, U64, ZMOD_RW,
1910 	"Don't bother condensing if the mapping uses less than this amount of "
1911 	"memory");
1912 
1913 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, U64,
1914 	ZMOD_RW,
1915 	"Minimum size obsolete spacemap to attempt condensing");
1916 
1917 ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
1918 	UINT, ZMOD_RW,
1919 	"Used by tests to ensure certain actions happen in the middle of a "
1920 	"condense. A maximum value of 1 should be sufficient.");
1921 
1922 ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
1923 	UINT, ZMOD_RW,
1924 	"Maximum number of combinations when reconstructing split segments");
1925