xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_mirror.c (revision 071ab5a1f3cbfd29c8fbec27f7e619418adaf074)
161145dc2SMartin Matuska // SPDX-License-Identifier: CDDL-1.0
2eda14cbcSMatt Macy /*
3eda14cbcSMatt Macy  * CDDL HEADER START
4eda14cbcSMatt Macy  *
5eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
6eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
7eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
8eda14cbcSMatt Macy  *
9eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
11eda14cbcSMatt Macy  * See the License for the specific language governing permissions
12eda14cbcSMatt Macy  * and limitations under the License.
13eda14cbcSMatt Macy  *
14eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
15eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
17eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
18eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
19eda14cbcSMatt Macy  *
20eda14cbcSMatt Macy  * CDDL HEADER END
21eda14cbcSMatt Macy  */
22eda14cbcSMatt Macy /*
23eda14cbcSMatt Macy  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24eda14cbcSMatt Macy  * Use is subject to license terms.
25eda14cbcSMatt Macy  */
26eda14cbcSMatt Macy 
27eda14cbcSMatt Macy /*
28eda14cbcSMatt Macy  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
29eda14cbcSMatt Macy  */
30eda14cbcSMatt Macy 
31eda14cbcSMatt Macy #include <sys/zfs_context.h>
32eda14cbcSMatt Macy #include <sys/spa.h>
33eda14cbcSMatt Macy #include <sys/spa_impl.h>
34eda14cbcSMatt Macy #include <sys/dsl_pool.h>
35eda14cbcSMatt Macy #include <sys/dsl_scan.h>
36eda14cbcSMatt Macy #include <sys/vdev_impl.h>
377877fdebSMatt Macy #include <sys/vdev_draid.h>
38eda14cbcSMatt Macy #include <sys/zio.h>
39a0b956f5SMartin Matuska #include <sys/zio_checksum.h>
40eda14cbcSMatt Macy #include <sys/abd.h>
41eda14cbcSMatt Macy #include <sys/fs/zfs.h>
42eda14cbcSMatt Macy 
43eda14cbcSMatt Macy /*
44eda14cbcSMatt Macy  * Vdev mirror kstats
45eda14cbcSMatt Macy  */
46eda14cbcSMatt Macy static kstat_t *mirror_ksp = NULL;
47eda14cbcSMatt Macy 
48eda14cbcSMatt Macy typedef struct mirror_stats {
49eda14cbcSMatt Macy 	kstat_named_t vdev_mirror_stat_rotating_linear;
50eda14cbcSMatt Macy 	kstat_named_t vdev_mirror_stat_rotating_offset;
51eda14cbcSMatt Macy 	kstat_named_t vdev_mirror_stat_rotating_seek;
52eda14cbcSMatt Macy 	kstat_named_t vdev_mirror_stat_non_rotating_linear;
53eda14cbcSMatt Macy 	kstat_named_t vdev_mirror_stat_non_rotating_seek;
54eda14cbcSMatt Macy 
55eda14cbcSMatt Macy 	kstat_named_t vdev_mirror_stat_preferred_found;
56eda14cbcSMatt Macy 	kstat_named_t vdev_mirror_stat_preferred_not_found;
57eda14cbcSMatt Macy } mirror_stats_t;
58eda14cbcSMatt Macy 
59eda14cbcSMatt Macy static mirror_stats_t mirror_stats = {
60eda14cbcSMatt Macy 	/* New I/O follows directly the last I/O */
61eda14cbcSMatt Macy 	{ "rotating_linear",			KSTAT_DATA_UINT64 },
62eda14cbcSMatt Macy 	/* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
63eda14cbcSMatt Macy 	{ "rotating_offset",			KSTAT_DATA_UINT64 },
64eda14cbcSMatt Macy 	/* New I/O requires random seek */
65eda14cbcSMatt Macy 	{ "rotating_seek",			KSTAT_DATA_UINT64 },
66eda14cbcSMatt Macy 	/* New I/O follows directly the last I/O  (nonrot) */
67eda14cbcSMatt Macy 	{ "non_rotating_linear",		KSTAT_DATA_UINT64 },
68eda14cbcSMatt Macy 	/* New I/O requires random seek (nonrot) */
69eda14cbcSMatt Macy 	{ "non_rotating_seek",			KSTAT_DATA_UINT64 },
70eda14cbcSMatt Macy 	/* Preferred child vdev found */
71eda14cbcSMatt Macy 	{ "preferred_found",			KSTAT_DATA_UINT64 },
72eda14cbcSMatt Macy 	/* Preferred child vdev not found or equal load  */
73eda14cbcSMatt Macy 	{ "preferred_not_found",		KSTAT_DATA_UINT64 },
74eda14cbcSMatt Macy 
75eda14cbcSMatt Macy };
76eda14cbcSMatt Macy 
77eda14cbcSMatt Macy #define	MIRROR_STAT(stat)		(mirror_stats.stat.value.ui64)
78eda14cbcSMatt Macy #define	MIRROR_INCR(stat, val) 		atomic_add_64(&MIRROR_STAT(stat), val)
79eda14cbcSMatt Macy #define	MIRROR_BUMP(stat)		MIRROR_INCR(stat, 1)
80eda14cbcSMatt Macy 
81eda14cbcSMatt Macy void
vdev_mirror_stat_init(void)82eda14cbcSMatt Macy vdev_mirror_stat_init(void)
83eda14cbcSMatt Macy {
84eda14cbcSMatt Macy 	mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats",
85eda14cbcSMatt Macy 	    "misc", KSTAT_TYPE_NAMED,
86eda14cbcSMatt Macy 	    sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
87eda14cbcSMatt Macy 	if (mirror_ksp != NULL) {
88eda14cbcSMatt Macy 		mirror_ksp->ks_data = &mirror_stats;
89eda14cbcSMatt Macy 		kstat_install(mirror_ksp);
90eda14cbcSMatt Macy 	}
91eda14cbcSMatt Macy }
92eda14cbcSMatt Macy 
93eda14cbcSMatt Macy void
vdev_mirror_stat_fini(void)94eda14cbcSMatt Macy vdev_mirror_stat_fini(void)
95eda14cbcSMatt Macy {
96eda14cbcSMatt Macy 	if (mirror_ksp != NULL) {
97eda14cbcSMatt Macy 		kstat_delete(mirror_ksp);
98eda14cbcSMatt Macy 		mirror_ksp = NULL;
99eda14cbcSMatt Macy 	}
100eda14cbcSMatt Macy }
101eda14cbcSMatt Macy 
102eda14cbcSMatt Macy /*
103eda14cbcSMatt Macy  * Virtual device vector for mirroring.
104eda14cbcSMatt Macy  */
105eda14cbcSMatt Macy typedef struct mirror_child {
106eda14cbcSMatt Macy 	vdev_t		*mc_vd;
107a0b956f5SMartin Matuska 	abd_t		*mc_abd;
108eda14cbcSMatt Macy 	uint64_t	mc_offset;
109eda14cbcSMatt Macy 	int		mc_error;
110eda14cbcSMatt Macy 	int		mc_load;
111eda14cbcSMatt Macy 	uint8_t		mc_tried;
112eda14cbcSMatt Macy 	uint8_t		mc_skipped;
113eda14cbcSMatt Macy 	uint8_t		mc_speculative;
1147877fdebSMatt Macy 	uint8_t		mc_rebuilding;
115eda14cbcSMatt Macy } mirror_child_t;
116eda14cbcSMatt Macy 
117eda14cbcSMatt Macy typedef struct mirror_map {
118eda14cbcSMatt Macy 	int		*mm_preferred;
119eda14cbcSMatt Macy 	int		mm_preferred_cnt;
120eda14cbcSMatt Macy 	int		mm_children;
121eda14cbcSMatt Macy 	boolean_t	mm_resilvering;
1227877fdebSMatt Macy 	boolean_t	mm_rebuilding;
123eda14cbcSMatt Macy 	boolean_t	mm_root;
124eda14cbcSMatt Macy 	mirror_child_t	mm_child[];
125eda14cbcSMatt Macy } mirror_map_t;
126eda14cbcSMatt Macy 
127e92ffd9bSMartin Matuska static const int vdev_mirror_shift = 21;
128eda14cbcSMatt Macy 
129eda14cbcSMatt Macy /*
130eda14cbcSMatt Macy  * The load configuration settings below are tuned by default for
131eda14cbcSMatt Macy  * the case where all devices are of the same rotational type.
132eda14cbcSMatt Macy  *
133eda14cbcSMatt Macy  * If there is a mixture of rotating and non-rotating media, setting
134eda14cbcSMatt Macy  * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
135eda14cbcSMatt Macy  * as it will direct more reads to the non-rotating vdevs which are more likely
136eda14cbcSMatt Macy  * to have a higher performance.
137eda14cbcSMatt Macy  */
138eda14cbcSMatt Macy 
139eda14cbcSMatt Macy /* Rotating media load calculation configuration. */
140eda14cbcSMatt Macy static int zfs_vdev_mirror_rotating_inc = 0;
141eda14cbcSMatt Macy static int zfs_vdev_mirror_rotating_seek_inc = 5;
142eda14cbcSMatt Macy static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
143eda14cbcSMatt Macy 
144eda14cbcSMatt Macy /* Non-rotating media load calculation configuration. */
145eda14cbcSMatt Macy static int zfs_vdev_mirror_non_rotating_inc = 0;
146eda14cbcSMatt Macy static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
147eda14cbcSMatt Macy 
148eda14cbcSMatt Macy static inline size_t
vdev_mirror_map_size(int children)149eda14cbcSMatt Macy vdev_mirror_map_size(int children)
150eda14cbcSMatt Macy {
151eda14cbcSMatt Macy 	return (offsetof(mirror_map_t, mm_child[children]) +
152eda14cbcSMatt Macy 	    sizeof (int) * children);
153eda14cbcSMatt Macy }
154eda14cbcSMatt Macy 
155eda14cbcSMatt Macy static inline mirror_map_t *
vdev_mirror_map_alloc(int children,boolean_t resilvering,boolean_t root)156eda14cbcSMatt Macy vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root)
157eda14cbcSMatt Macy {
158eda14cbcSMatt Macy 	mirror_map_t *mm;
159eda14cbcSMatt Macy 
160eda14cbcSMatt Macy 	mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
161eda14cbcSMatt Macy 	mm->mm_children = children;
162eda14cbcSMatt Macy 	mm->mm_resilvering = resilvering;
163eda14cbcSMatt Macy 	mm->mm_root = root;
164eda14cbcSMatt Macy 	mm->mm_preferred = (int *)((uintptr_t)mm +
165eda14cbcSMatt Macy 	    offsetof(mirror_map_t, mm_child[children]));
166eda14cbcSMatt Macy 
167eda14cbcSMatt Macy 	return (mm);
168eda14cbcSMatt Macy }
169eda14cbcSMatt Macy 
170eda14cbcSMatt Macy static void
vdev_mirror_map_free(zio_t * zio)171eda14cbcSMatt Macy vdev_mirror_map_free(zio_t *zio)
172eda14cbcSMatt Macy {
173eda14cbcSMatt Macy 	mirror_map_t *mm = zio->io_vsd;
174eda14cbcSMatt Macy 
175eda14cbcSMatt Macy 	kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
176eda14cbcSMatt Macy }
177eda14cbcSMatt Macy 
178eda14cbcSMatt Macy static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
179eda14cbcSMatt Macy 	.vsd_free = vdev_mirror_map_free,
180eda14cbcSMatt Macy };
181eda14cbcSMatt Macy 
182eda14cbcSMatt Macy static int
vdev_mirror_load(mirror_map_t * mm,vdev_t * vd,uint64_t zio_offset)183eda14cbcSMatt Macy vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
184eda14cbcSMatt Macy {
185eda14cbcSMatt Macy 	uint64_t last_offset;
186eda14cbcSMatt Macy 	int64_t offset_diff;
187eda14cbcSMatt Macy 	int load;
188eda14cbcSMatt Macy 
189eda14cbcSMatt Macy 	/* All DVAs have equal weight at the root. */
190eda14cbcSMatt Macy 	if (mm->mm_root)
191eda14cbcSMatt Macy 		return (INT_MAX);
192eda14cbcSMatt Macy 
193eda14cbcSMatt Macy 	/*
194eda14cbcSMatt Macy 	 * We don't return INT_MAX if the device is resilvering i.e.
195eda14cbcSMatt Macy 	 * vdev_resilver_txg != 0 as when tested performance was slightly
196eda14cbcSMatt Macy 	 * worse overall when resilvering with compared to without.
197eda14cbcSMatt Macy 	 */
198eda14cbcSMatt Macy 
199eda14cbcSMatt Macy 	/* Fix zio_offset for leaf vdevs */
200eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_leaf)
201eda14cbcSMatt Macy 		zio_offset += VDEV_LABEL_START_SIZE;
202eda14cbcSMatt Macy 
203eda14cbcSMatt Macy 	/* Standard load based on pending queue length. */
204eda14cbcSMatt Macy 	load = vdev_queue_length(vd);
205eda14cbcSMatt Macy 	last_offset = vdev_queue_last_offset(vd);
206eda14cbcSMatt Macy 
207eda14cbcSMatt Macy 	if (vd->vdev_nonrot) {
208eda14cbcSMatt Macy 		/* Non-rotating media. */
209eda14cbcSMatt Macy 		if (last_offset == zio_offset) {
210eda14cbcSMatt Macy 			MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear);
211eda14cbcSMatt Macy 			return (load + zfs_vdev_mirror_non_rotating_inc);
212eda14cbcSMatt Macy 		}
213eda14cbcSMatt Macy 
214eda14cbcSMatt Macy 		/*
215eda14cbcSMatt Macy 		 * Apply a seek penalty even for non-rotating devices as
216eda14cbcSMatt Macy 		 * sequential I/O's can be aggregated into fewer operations on
217eda14cbcSMatt Macy 		 * the device, thus avoiding unnecessary per-command overhead
218eda14cbcSMatt Macy 		 * and boosting performance.
219eda14cbcSMatt Macy 		 */
220eda14cbcSMatt Macy 		MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek);
221eda14cbcSMatt Macy 		return (load + zfs_vdev_mirror_non_rotating_seek_inc);
222eda14cbcSMatt Macy 	}
223eda14cbcSMatt Macy 
224eda14cbcSMatt Macy 	/* Rotating media I/O's which directly follow the last I/O. */
225eda14cbcSMatt Macy 	if (last_offset == zio_offset) {
226eda14cbcSMatt Macy 		MIRROR_BUMP(vdev_mirror_stat_rotating_linear);
227eda14cbcSMatt Macy 		return (load + zfs_vdev_mirror_rotating_inc);
228eda14cbcSMatt Macy 	}
229eda14cbcSMatt Macy 
230eda14cbcSMatt Macy 	/*
231eda14cbcSMatt Macy 	 * Apply half the seek increment to I/O's within seek offset
232eda14cbcSMatt Macy 	 * of the last I/O issued to this vdev as they should incur less
233eda14cbcSMatt Macy 	 * of a seek increment.
234eda14cbcSMatt Macy 	 */
235eda14cbcSMatt Macy 	offset_diff = (int64_t)(last_offset - zio_offset);
236eda14cbcSMatt Macy 	if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) {
237eda14cbcSMatt Macy 		MIRROR_BUMP(vdev_mirror_stat_rotating_offset);
238eda14cbcSMatt Macy 		return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
239eda14cbcSMatt Macy 	}
240eda14cbcSMatt Macy 
241eda14cbcSMatt Macy 	/* Apply the full seek increment to all other I/O's. */
242eda14cbcSMatt Macy 	MIRROR_BUMP(vdev_mirror_stat_rotating_seek);
243eda14cbcSMatt Macy 	return (load + zfs_vdev_mirror_rotating_seek_inc);
244eda14cbcSMatt Macy }
245eda14cbcSMatt Macy 
2467877fdebSMatt Macy static boolean_t
vdev_mirror_rebuilding(vdev_t * vd)2477877fdebSMatt Macy vdev_mirror_rebuilding(vdev_t *vd)
2487877fdebSMatt Macy {
2497877fdebSMatt Macy 	if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
2507877fdebSMatt Macy 		return (B_TRUE);
2517877fdebSMatt Macy 
2527877fdebSMatt Macy 	for (int i = 0; i < vd->vdev_children; i++) {
2537877fdebSMatt Macy 		if (vdev_mirror_rebuilding(vd->vdev_child[i])) {
2547877fdebSMatt Macy 			return (B_TRUE);
2557877fdebSMatt Macy 		}
2567877fdebSMatt Macy 	}
2577877fdebSMatt Macy 
2587877fdebSMatt Macy 	return (B_FALSE);
2597877fdebSMatt Macy }
2607877fdebSMatt Macy 
261eda14cbcSMatt Macy /*
262eda14cbcSMatt Macy  * Avoid inlining the function to keep vdev_mirror_io_start(), which
263eda14cbcSMatt Macy  * is this functions only caller, as small as possible on the stack.
264eda14cbcSMatt Macy  */
265eda14cbcSMatt Macy noinline static mirror_map_t *
vdev_mirror_map_init(zio_t * zio)266eda14cbcSMatt Macy vdev_mirror_map_init(zio_t *zio)
267eda14cbcSMatt Macy {
268eda14cbcSMatt Macy 	mirror_map_t *mm = NULL;
269eda14cbcSMatt Macy 	mirror_child_t *mc;
270eda14cbcSMatt Macy 	vdev_t *vd = zio->io_vd;
271eda14cbcSMatt Macy 	int c;
272eda14cbcSMatt Macy 
273eda14cbcSMatt Macy 	if (vd == NULL) {
274eda14cbcSMatt Macy 		dva_t *dva = zio->io_bp->blk_dva;
275eda14cbcSMatt Macy 		spa_t *spa = zio->io_spa;
276eda14cbcSMatt Macy 		dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
277eda14cbcSMatt Macy 		dva_t dva_copy[SPA_DVAS_PER_BP];
278eda14cbcSMatt Macy 
279eda14cbcSMatt Macy 		/*
280eda14cbcSMatt Macy 		 * The sequential scrub code sorts and issues all DVAs
281eda14cbcSMatt Macy 		 * of a bp separately. Each of these IOs includes all
282eda14cbcSMatt Macy 		 * original DVA copies so that repairs can be performed
283eda14cbcSMatt Macy 		 * in the event of an error, but we only actually want
284eda14cbcSMatt Macy 		 * to check the first DVA since the others will be
285eda14cbcSMatt Macy 		 * checked by their respective sorted IOs. Only if we
286eda14cbcSMatt Macy 		 * hit an error will we try all DVAs upon retrying.
287eda14cbcSMatt Macy 		 *
288eda14cbcSMatt Macy 		 * Note: This check is safe even if the user switches
289eda14cbcSMatt Macy 		 * from a legacy scrub to a sequential one in the middle
290eda14cbcSMatt Macy 		 * of processing, since scn_is_sorted isn't updated until
291eda14cbcSMatt Macy 		 * all outstanding IOs from the previous scrub pass
292eda14cbcSMatt Macy 		 * complete.
293eda14cbcSMatt Macy 		 */
294eda14cbcSMatt Macy 		if ((zio->io_flags & ZIO_FLAG_SCRUB) &&
295eda14cbcSMatt Macy 		    !(zio->io_flags & ZIO_FLAG_IO_RETRY) &&
296eda14cbcSMatt Macy 		    dsl_scan_scrubbing(spa->spa_dsl_pool) &&
297eda14cbcSMatt Macy 		    scn->scn_is_sorted) {
298eda14cbcSMatt Macy 			c = 1;
299eda14cbcSMatt Macy 		} else {
300eda14cbcSMatt Macy 			c = BP_GET_NDVAS(zio->io_bp);
301eda14cbcSMatt Macy 		}
302eda14cbcSMatt Macy 
303eda14cbcSMatt Macy 		/*
304eda14cbcSMatt Macy 		 * If the pool cannot be written to, then infer that some
305eda14cbcSMatt Macy 		 * DVAs might be invalid or point to vdevs that do not exist.
306eda14cbcSMatt Macy 		 * We skip them.
307eda14cbcSMatt Macy 		 */
308eda14cbcSMatt Macy 		if (!spa_writeable(spa)) {
309eda14cbcSMatt Macy 			ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
310eda14cbcSMatt Macy 			int j = 0;
311eda14cbcSMatt Macy 			for (int i = 0; i < c; i++) {
312eda14cbcSMatt Macy 				if (zfs_dva_valid(spa, &dva[i], zio->io_bp))
313eda14cbcSMatt Macy 					dva_copy[j++] = dva[i];
314eda14cbcSMatt Macy 			}
315eda14cbcSMatt Macy 			if (j == 0) {
316eda14cbcSMatt Macy 				zio->io_vsd = NULL;
317eda14cbcSMatt Macy 				zio->io_error = ENXIO;
318eda14cbcSMatt Macy 				return (NULL);
319eda14cbcSMatt Macy 			}
320eda14cbcSMatt Macy 			if (j < c) {
321eda14cbcSMatt Macy 				dva = dva_copy;
322eda14cbcSMatt Macy 				c = j;
323eda14cbcSMatt Macy 			}
324eda14cbcSMatt Macy 		}
325eda14cbcSMatt Macy 
326eda14cbcSMatt Macy 		mm = vdev_mirror_map_alloc(c, B_FALSE, B_TRUE);
327eda14cbcSMatt Macy 		for (c = 0; c < mm->mm_children; c++) {
328eda14cbcSMatt Macy 			mc = &mm->mm_child[c];
329eda14cbcSMatt Macy 
330eda14cbcSMatt Macy 			mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
331eda14cbcSMatt Macy 			mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
332eda14cbcSMatt Macy 			if (mc->mc_vd == NULL) {
333eda14cbcSMatt Macy 				kmem_free(mm, vdev_mirror_map_size(
334eda14cbcSMatt Macy 				    mm->mm_children));
335eda14cbcSMatt Macy 				zio->io_vsd = NULL;
336eda14cbcSMatt Macy 				zio->io_error = ENXIO;
337eda14cbcSMatt Macy 				return (NULL);
338eda14cbcSMatt Macy 			}
339eda14cbcSMatt Macy 		}
340eda14cbcSMatt Macy 	} else {
341eda14cbcSMatt Macy 		/*
342eda14cbcSMatt Macy 		 * If we are resilvering, then we should handle scrub reads
343eda14cbcSMatt Macy 		 * differently; we shouldn't issue them to the resilvering
344eda14cbcSMatt Macy 		 * device because it might not have those blocks.
345eda14cbcSMatt Macy 		 *
346eda14cbcSMatt Macy 		 * We are resilvering iff:
347eda14cbcSMatt Macy 		 * 1) We are a replacing vdev (ie our name is "replacing-1" or
348eda14cbcSMatt Macy 		 *    "spare-1" or something like that), and
349eda14cbcSMatt Macy 		 * 2) The pool is currently being resilvered.
350eda14cbcSMatt Macy 		 *
351eda14cbcSMatt Macy 		 * We cannot simply check vd->vdev_resilver_txg, because it's
352eda14cbcSMatt Macy 		 * not set in this path.
353eda14cbcSMatt Macy 		 *
354eda14cbcSMatt Macy 		 * Nor can we just check our vdev_ops; there are cases (such as
355eda14cbcSMatt Macy 		 * when a user types "zpool replace pool odev spare_dev" and
356eda14cbcSMatt Macy 		 * spare_dev is in the spare list, or when a spare device is
357eda14cbcSMatt Macy 		 * automatically used to replace a DEGRADED device) when
358eda14cbcSMatt Macy 		 * resilvering is complete but both the original vdev and the
359eda14cbcSMatt Macy 		 * spare vdev remain in the pool.  That behavior is intentional.
360eda14cbcSMatt Macy 		 * It helps implement the policy that a spare should be
361eda14cbcSMatt Macy 		 * automatically removed from the pool after the user replaces
362eda14cbcSMatt Macy 		 * the device that originally failed.
363eda14cbcSMatt Macy 		 *
364eda14cbcSMatt Macy 		 * If a spa load is in progress, then spa_dsl_pool may be
365eda14cbcSMatt Macy 		 * uninitialized.  But we shouldn't be resilvering during a spa
366eda14cbcSMatt Macy 		 * load anyway.
367eda14cbcSMatt Macy 		 */
368eda14cbcSMatt Macy 		boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops ||
369eda14cbcSMatt Macy 		    vd->vdev_ops == &vdev_spare_ops) &&
370eda14cbcSMatt Macy 		    spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE &&
371eda14cbcSMatt Macy 		    dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool);
372eda14cbcSMatt Macy 		mm = vdev_mirror_map_alloc(vd->vdev_children, replacing,
373eda14cbcSMatt Macy 		    B_FALSE);
374eda14cbcSMatt Macy 		for (c = 0; c < mm->mm_children; c++) {
375eda14cbcSMatt Macy 			mc = &mm->mm_child[c];
376eda14cbcSMatt Macy 			mc->mc_vd = vd->vdev_child[c];
377eda14cbcSMatt Macy 			mc->mc_offset = zio->io_offset;
3787877fdebSMatt Macy 
3797877fdebSMatt Macy 			if (vdev_mirror_rebuilding(mc->mc_vd))
3807877fdebSMatt Macy 				mm->mm_rebuilding = mc->mc_rebuilding = B_TRUE;
381eda14cbcSMatt Macy 		}
382eda14cbcSMatt Macy 	}
383eda14cbcSMatt Macy 
384eda14cbcSMatt Macy 	return (mm);
385eda14cbcSMatt Macy }
386eda14cbcSMatt Macy 
387eda14cbcSMatt Macy static int
vdev_mirror_open(vdev_t * vd,uint64_t * asize,uint64_t * max_asize,uint64_t * logical_ashift,uint64_t * physical_ashift)388eda14cbcSMatt Macy vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
389eda14cbcSMatt Macy     uint64_t *logical_ashift, uint64_t *physical_ashift)
390eda14cbcSMatt Macy {
391eda14cbcSMatt Macy 	int numerrors = 0;
392eda14cbcSMatt Macy 	int lasterror = 0;
393eda14cbcSMatt Macy 
394eda14cbcSMatt Macy 	if (vd->vdev_children == 0) {
395eda14cbcSMatt Macy 		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
396eda14cbcSMatt Macy 		return (SET_ERROR(EINVAL));
397eda14cbcSMatt Macy 	}
398eda14cbcSMatt Macy 
399eda14cbcSMatt Macy 	vdev_open_children(vd);
400eda14cbcSMatt Macy 
401eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++) {
402eda14cbcSMatt Macy 		vdev_t *cvd = vd->vdev_child[c];
403eda14cbcSMatt Macy 
404eda14cbcSMatt Macy 		if (cvd->vdev_open_error) {
405eda14cbcSMatt Macy 			lasterror = cvd->vdev_open_error;
406eda14cbcSMatt Macy 			numerrors++;
407eda14cbcSMatt Macy 			continue;
408eda14cbcSMatt Macy 		}
409eda14cbcSMatt Macy 
410eda14cbcSMatt Macy 		*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
411eda14cbcSMatt Macy 		*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
412eda14cbcSMatt Macy 		*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
413c7046f76SMartin Matuska 	}
414c7046f76SMartin Matuska 	for (int c = 0; c < vd->vdev_children; c++) {
415c7046f76SMartin Matuska 		vdev_t *cvd = vd->vdev_child[c];
416c7046f76SMartin Matuska 
417c7046f76SMartin Matuska 		if (cvd->vdev_open_error)
418c7046f76SMartin Matuska 			continue;
419c7046f76SMartin Matuska 		*physical_ashift = vdev_best_ashift(*logical_ashift,
420c7046f76SMartin Matuska 		    *physical_ashift, cvd->vdev_physical_ashift);
421eda14cbcSMatt Macy 	}
422eda14cbcSMatt Macy 
423eda14cbcSMatt Macy 	if (numerrors == vd->vdev_children) {
424eda14cbcSMatt Macy 		if (vdev_children_are_offline(vd))
425eda14cbcSMatt Macy 			vd->vdev_stat.vs_aux = VDEV_AUX_CHILDREN_OFFLINE;
426eda14cbcSMatt Macy 		else
427eda14cbcSMatt Macy 			vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
428eda14cbcSMatt Macy 		return (lasterror);
429eda14cbcSMatt Macy 	}
430eda14cbcSMatt Macy 
431eda14cbcSMatt Macy 	return (0);
432eda14cbcSMatt Macy }
433eda14cbcSMatt Macy 
434eda14cbcSMatt Macy static void
vdev_mirror_close(vdev_t * vd)435eda14cbcSMatt Macy vdev_mirror_close(vdev_t *vd)
436eda14cbcSMatt Macy {
437eda14cbcSMatt Macy 	for (int c = 0; c < vd->vdev_children; c++)
438eda14cbcSMatt Macy 		vdev_close(vd->vdev_child[c]);
439eda14cbcSMatt Macy }
440eda14cbcSMatt Macy 
441eda14cbcSMatt Macy static void
vdev_mirror_child_done(zio_t * zio)442eda14cbcSMatt Macy vdev_mirror_child_done(zio_t *zio)
443eda14cbcSMatt Macy {
444eda14cbcSMatt Macy 	mirror_child_t *mc = zio->io_private;
445eda14cbcSMatt Macy 
446eda14cbcSMatt Macy 	mc->mc_error = zio->io_error;
447eda14cbcSMatt Macy 	mc->mc_tried = 1;
448eda14cbcSMatt Macy 	mc->mc_skipped = 0;
449eda14cbcSMatt Macy }
450eda14cbcSMatt Macy 
451eda14cbcSMatt Macy /*
452eda14cbcSMatt Macy  * Check the other, lower-index DVAs to see if they're on the same
453eda14cbcSMatt Macy  * vdev as the child we picked.  If they are, use them since they
454eda14cbcSMatt Macy  * are likely to have been allocated from the primary metaslab in
455eda14cbcSMatt Macy  * use at the time, and hence are more likely to have locality with
456eda14cbcSMatt Macy  * single-copy data.
457eda14cbcSMatt Macy  */
458eda14cbcSMatt Macy static int
vdev_mirror_dva_select(zio_t * zio,int p)459eda14cbcSMatt Macy vdev_mirror_dva_select(zio_t *zio, int p)
460eda14cbcSMatt Macy {
461eda14cbcSMatt Macy 	dva_t *dva = zio->io_bp->blk_dva;
462eda14cbcSMatt Macy 	mirror_map_t *mm = zio->io_vsd;
463eda14cbcSMatt Macy 	int preferred;
464eda14cbcSMatt Macy 	int c;
465eda14cbcSMatt Macy 
466eda14cbcSMatt Macy 	preferred = mm->mm_preferred[p];
467eda14cbcSMatt Macy 	for (p--; p >= 0; p--) {
468eda14cbcSMatt Macy 		c = mm->mm_preferred[p];
469eda14cbcSMatt Macy 		if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
470eda14cbcSMatt Macy 			preferred = c;
471eda14cbcSMatt Macy 	}
472eda14cbcSMatt Macy 	return (preferred);
473eda14cbcSMatt Macy }
474eda14cbcSMatt Macy 
475eda14cbcSMatt Macy static int
vdev_mirror_preferred_child_randomize(zio_t * zio)476eda14cbcSMatt Macy vdev_mirror_preferred_child_randomize(zio_t *zio)
477eda14cbcSMatt Macy {
478eda14cbcSMatt Macy 	mirror_map_t *mm = zio->io_vsd;
479eda14cbcSMatt Macy 	int p;
480eda14cbcSMatt Macy 
481eda14cbcSMatt Macy 	if (mm->mm_root) {
48233b8c039SMartin Matuska 		p = random_in_range(mm->mm_preferred_cnt);
483eda14cbcSMatt Macy 		return (vdev_mirror_dva_select(zio, p));
484eda14cbcSMatt Macy 	}
485eda14cbcSMatt Macy 
486eda14cbcSMatt Macy 	/*
487eda14cbcSMatt Macy 	 * To ensure we don't always favour the first matching vdev,
488eda14cbcSMatt Macy 	 * which could lead to wear leveling issues on SSD's, we
489eda14cbcSMatt Macy 	 * use the I/O offset as a pseudo random seed into the vdevs
490eda14cbcSMatt Macy 	 * which have the lowest load.
491eda14cbcSMatt Macy 	 */
492eda14cbcSMatt Macy 	p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
493eda14cbcSMatt Macy 	return (mm->mm_preferred[p]);
494eda14cbcSMatt Macy }
495eda14cbcSMatt Macy 
4967877fdebSMatt Macy static boolean_t
vdev_mirror_child_readable(mirror_child_t * mc)4977877fdebSMatt Macy vdev_mirror_child_readable(mirror_child_t *mc)
4987877fdebSMatt Macy {
4997877fdebSMatt Macy 	vdev_t *vd = mc->mc_vd;
5007877fdebSMatt Macy 
5017877fdebSMatt Macy 	if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops)
5027877fdebSMatt Macy 		return (vdev_draid_readable(vd, mc->mc_offset));
5037877fdebSMatt Macy 	else
5047877fdebSMatt Macy 		return (vdev_readable(vd));
5057877fdebSMatt Macy }
5067877fdebSMatt Macy 
5077877fdebSMatt Macy static boolean_t
vdev_mirror_child_missing(mirror_child_t * mc,uint64_t txg,uint64_t size)5087877fdebSMatt Macy vdev_mirror_child_missing(mirror_child_t *mc, uint64_t txg, uint64_t size)
5097877fdebSMatt Macy {
5107877fdebSMatt Macy 	vdev_t *vd = mc->mc_vd;
5117877fdebSMatt Macy 
5127877fdebSMatt Macy 	if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops)
5137877fdebSMatt Macy 		return (vdev_draid_missing(vd, mc->mc_offset, txg, size));
5147877fdebSMatt Macy 	else
5157877fdebSMatt Macy 		return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
5167877fdebSMatt Macy }
5177877fdebSMatt Macy 
518eda14cbcSMatt Macy /*
519eda14cbcSMatt Macy  * Try to find a vdev whose DTL doesn't contain the block we want to read
5207877fdebSMatt Macy  * preferring vdevs based on determined load. If we can't, try the read on
5217877fdebSMatt Macy  * any vdev we haven't already tried.
522eda14cbcSMatt Macy  *
5237877fdebSMatt Macy  * Distributed spares are an exception to the above load rule. They are
5247877fdebSMatt Macy  * always preferred in order to detect gaps in the distributed spare which
5257877fdebSMatt Macy  * are created when another disk in the dRAID fails. In order to restore
5267877fdebSMatt Macy  * redundancy those gaps must be read to trigger the required repair IO.
527eda14cbcSMatt Macy  */
528eda14cbcSMatt Macy static int
vdev_mirror_child_select(zio_t * zio)529eda14cbcSMatt Macy vdev_mirror_child_select(zio_t *zio)
530eda14cbcSMatt Macy {
531eda14cbcSMatt Macy 	mirror_map_t *mm = zio->io_vsd;
532eda14cbcSMatt Macy 	uint64_t txg = zio->io_txg;
533eda14cbcSMatt Macy 	int c, lowest_load;
534eda14cbcSMatt Macy 
535783d3ff6SMartin Matuska 	ASSERT(zio->io_bp == NULL || BP_GET_BIRTH(zio->io_bp) == txg);
536eda14cbcSMatt Macy 
537eda14cbcSMatt Macy 	lowest_load = INT_MAX;
538eda14cbcSMatt Macy 	mm->mm_preferred_cnt = 0;
539eda14cbcSMatt Macy 	for (c = 0; c < mm->mm_children; c++) {
540eda14cbcSMatt Macy 		mirror_child_t *mc;
541eda14cbcSMatt Macy 
542eda14cbcSMatt Macy 		mc = &mm->mm_child[c];
543eda14cbcSMatt Macy 		if (mc->mc_tried || mc->mc_skipped)
544eda14cbcSMatt Macy 			continue;
545eda14cbcSMatt Macy 
5467877fdebSMatt Macy 		if (mc->mc_vd == NULL ||
5477877fdebSMatt Macy 		    !vdev_mirror_child_readable(mc)) {
548eda14cbcSMatt Macy 			mc->mc_error = SET_ERROR(ENXIO);
549eda14cbcSMatt Macy 			mc->mc_tried = 1;	/* don't even try */
550eda14cbcSMatt Macy 			mc->mc_skipped = 1;
551eda14cbcSMatt Macy 			continue;
552eda14cbcSMatt Macy 		}
553eda14cbcSMatt Macy 
5547877fdebSMatt Macy 		if (vdev_mirror_child_missing(mc, txg, 1)) {
555eda14cbcSMatt Macy 			mc->mc_error = SET_ERROR(ESTALE);
556eda14cbcSMatt Macy 			mc->mc_skipped = 1;
557eda14cbcSMatt Macy 			mc->mc_speculative = 1;
558eda14cbcSMatt Macy 			continue;
559eda14cbcSMatt Macy 		}
560eda14cbcSMatt Macy 
5617877fdebSMatt Macy 		if (mc->mc_vd->vdev_ops == &vdev_draid_spare_ops) {
5627877fdebSMatt Macy 			mm->mm_preferred[0] = c;
5637877fdebSMatt Macy 			mm->mm_preferred_cnt = 1;
5647877fdebSMatt Macy 			break;
5657877fdebSMatt Macy 		}
5667877fdebSMatt Macy 
567eda14cbcSMatt Macy 		mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
568eda14cbcSMatt Macy 		if (mc->mc_load > lowest_load)
569eda14cbcSMatt Macy 			continue;
570eda14cbcSMatt Macy 
571eda14cbcSMatt Macy 		if (mc->mc_load < lowest_load) {
572eda14cbcSMatt Macy 			lowest_load = mc->mc_load;
573eda14cbcSMatt Macy 			mm->mm_preferred_cnt = 0;
574eda14cbcSMatt Macy 		}
575eda14cbcSMatt Macy 		mm->mm_preferred[mm->mm_preferred_cnt] = c;
576eda14cbcSMatt Macy 		mm->mm_preferred_cnt++;
577eda14cbcSMatt Macy 	}
578eda14cbcSMatt Macy 
579eda14cbcSMatt Macy 	if (mm->mm_preferred_cnt == 1) {
580eda14cbcSMatt Macy 		MIRROR_BUMP(vdev_mirror_stat_preferred_found);
581eda14cbcSMatt Macy 		return (mm->mm_preferred[0]);
582eda14cbcSMatt Macy 	}
583eda14cbcSMatt Macy 
584eda14cbcSMatt Macy 	if (mm->mm_preferred_cnt > 1) {
585eda14cbcSMatt Macy 		MIRROR_BUMP(vdev_mirror_stat_preferred_not_found);
586eda14cbcSMatt Macy 		return (vdev_mirror_preferred_child_randomize(zio));
587eda14cbcSMatt Macy 	}
588eda14cbcSMatt Macy 
589eda14cbcSMatt Macy 	/*
590eda14cbcSMatt Macy 	 * Every device is either missing or has this txg in its DTL.
591eda14cbcSMatt Macy 	 * Look for any child we haven't already tried before giving up.
592eda14cbcSMatt Macy 	 */
593eda14cbcSMatt Macy 	for (c = 0; c < mm->mm_children; c++) {
594eda14cbcSMatt Macy 		if (!mm->mm_child[c].mc_tried)
595eda14cbcSMatt Macy 			return (c);
596eda14cbcSMatt Macy 	}
597eda14cbcSMatt Macy 
598eda14cbcSMatt Macy 	/*
599eda14cbcSMatt Macy 	 * Every child failed.  There's no place left to look.
600eda14cbcSMatt Macy 	 */
601eda14cbcSMatt Macy 	return (-1);
602eda14cbcSMatt Macy }
603eda14cbcSMatt Macy 
604eda14cbcSMatt Macy static void
vdev_mirror_io_start(zio_t * zio)605eda14cbcSMatt Macy vdev_mirror_io_start(zio_t *zio)
606eda14cbcSMatt Macy {
607eda14cbcSMatt Macy 	mirror_map_t *mm;
608eda14cbcSMatt Macy 	mirror_child_t *mc;
609eda14cbcSMatt Macy 	int c, children;
610eda14cbcSMatt Macy 
611eda14cbcSMatt Macy 	mm = vdev_mirror_map_init(zio);
612f9693befSMartin Matuska 	zio->io_vsd = mm;
613f9693befSMartin Matuska 	zio->io_vsd_ops = &vdev_mirror_vsd_ops;
614eda14cbcSMatt Macy 
615eda14cbcSMatt Macy 	if (mm == NULL) {
616eda14cbcSMatt Macy 		ASSERT(!spa_trust_config(zio->io_spa));
617eda14cbcSMatt Macy 		ASSERT(zio->io_type == ZIO_TYPE_READ);
618eda14cbcSMatt Macy 		zio_execute(zio);
619eda14cbcSMatt Macy 		return;
620eda14cbcSMatt Macy 	}
621eda14cbcSMatt Macy 
622eda14cbcSMatt Macy 	if (zio->io_type == ZIO_TYPE_READ) {
623a0b956f5SMartin Matuska 		if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering) {
624eda14cbcSMatt Macy 			/*
625a0b956f5SMartin Matuska 			 * For scrubbing reads we need to issue reads to all
626a0b956f5SMartin Matuska 			 * children.  One child can reuse parent buffer, but
627a0b956f5SMartin Matuska 			 * for others we have to allocate separate ones to
628a0b956f5SMartin Matuska 			 * verify checksums if io_bp is non-NULL, or compare
629a0b956f5SMartin Matuska 			 * them in vdev_mirror_io_done() otherwise.
630eda14cbcSMatt Macy 			 */
631a0b956f5SMartin Matuska 			boolean_t first = B_TRUE;
632eda14cbcSMatt Macy 			for (c = 0; c < mm->mm_children; c++) {
633eda14cbcSMatt Macy 				mc = &mm->mm_child[c];
63416038816SMartin Matuska 
63516038816SMartin Matuska 				/* Don't issue ZIOs to offline children */
63616038816SMartin Matuska 				if (!vdev_mirror_child_readable(mc)) {
63716038816SMartin Matuska 					mc->mc_error = SET_ERROR(ENXIO);
63816038816SMartin Matuska 					mc->mc_tried = 1;
63916038816SMartin Matuska 					mc->mc_skipped = 1;
64016038816SMartin Matuska 					continue;
64116038816SMartin Matuska 				}
64216038816SMartin Matuska 
643a0b956f5SMartin Matuska 				mc->mc_abd = first ? zio->io_abd :
644eda14cbcSMatt Macy 				    abd_alloc_sametype(zio->io_abd,
645a0b956f5SMartin Matuska 				    zio->io_size);
646a0b956f5SMartin Matuska 				zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
647a0b956f5SMartin Matuska 				    mc->mc_vd, mc->mc_offset, mc->mc_abd,
648a0b956f5SMartin Matuska 				    zio->io_size, zio->io_type,
649a0b956f5SMartin Matuska 				    zio->io_priority, 0,
650a0b956f5SMartin Matuska 				    vdev_mirror_child_done, mc));
651a0b956f5SMartin Matuska 				first = B_FALSE;
652eda14cbcSMatt Macy 			}
653eda14cbcSMatt Macy 			zio_execute(zio);
654eda14cbcSMatt Macy 			return;
655eda14cbcSMatt Macy 		}
656eda14cbcSMatt Macy 		/*
657eda14cbcSMatt Macy 		 * For normal reads just pick one child.
658eda14cbcSMatt Macy 		 */
659eda14cbcSMatt Macy 		c = vdev_mirror_child_select(zio);
660eda14cbcSMatt Macy 		children = (c >= 0);
661eda14cbcSMatt Macy 	} else {
662eda14cbcSMatt Macy 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
663eda14cbcSMatt Macy 
664eda14cbcSMatt Macy 		/*
665eda14cbcSMatt Macy 		 * Writes go to all children.
666eda14cbcSMatt Macy 		 */
667eda14cbcSMatt Macy 		c = 0;
668eda14cbcSMatt Macy 		children = mm->mm_children;
669eda14cbcSMatt Macy 	}
670eda14cbcSMatt Macy 
671eda14cbcSMatt Macy 	while (children--) {
672eda14cbcSMatt Macy 		mc = &mm->mm_child[c];
6737877fdebSMatt Macy 		c++;
6747877fdebSMatt Macy 
6757877fdebSMatt Macy 		/*
6767877fdebSMatt Macy 		 * When sequentially resilvering only issue write repair
6777877fdebSMatt Macy 		 * IOs to the vdev which is being rebuilt since performance
6787877fdebSMatt Macy 		 * is limited by the slowest child.  This is an issue for
6797877fdebSMatt Macy 		 * faster replacement devices such as distributed spares.
6807877fdebSMatt Macy 		 */
6817877fdebSMatt Macy 		if ((zio->io_priority == ZIO_PRIORITY_REBUILD) &&
6827877fdebSMatt Macy 		    (zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
6837877fdebSMatt Macy 		    !(zio->io_flags & ZIO_FLAG_SCRUB) &&
6847877fdebSMatt Macy 		    mm->mm_rebuilding && !mc->mc_rebuilding) {
6857877fdebSMatt Macy 			continue;
6867877fdebSMatt Macy 		}
6877877fdebSMatt Macy 
688eda14cbcSMatt Macy 		zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
689eda14cbcSMatt Macy 		    mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
690eda14cbcSMatt Macy 		    zio->io_type, zio->io_priority, 0,
691eda14cbcSMatt Macy 		    vdev_mirror_child_done, mc));
692eda14cbcSMatt Macy 	}
693eda14cbcSMatt Macy 
694eda14cbcSMatt Macy 	zio_execute(zio);
695eda14cbcSMatt Macy }
696eda14cbcSMatt Macy 
697eda14cbcSMatt Macy static int
vdev_mirror_worst_error(mirror_map_t * mm)698eda14cbcSMatt Macy vdev_mirror_worst_error(mirror_map_t *mm)
699eda14cbcSMatt Macy {
700eda14cbcSMatt Macy 	int error[2] = { 0, 0 };
701eda14cbcSMatt Macy 
702eda14cbcSMatt Macy 	for (int c = 0; c < mm->mm_children; c++) {
703eda14cbcSMatt Macy 		mirror_child_t *mc = &mm->mm_child[c];
704eda14cbcSMatt Macy 		int s = mc->mc_speculative;
705eda14cbcSMatt Macy 		error[s] = zio_worst_error(error[s], mc->mc_error);
706eda14cbcSMatt Macy 	}
707eda14cbcSMatt Macy 
708eda14cbcSMatt Macy 	return (error[0] ? error[0] : error[1]);
709eda14cbcSMatt Macy }
710eda14cbcSMatt Macy 
711eda14cbcSMatt Macy static void
vdev_mirror_io_done(zio_t * zio)712eda14cbcSMatt Macy vdev_mirror_io_done(zio_t *zio)
713eda14cbcSMatt Macy {
714eda14cbcSMatt Macy 	mirror_map_t *mm = zio->io_vsd;
715eda14cbcSMatt Macy 	mirror_child_t *mc;
716eda14cbcSMatt Macy 	int c;
717eda14cbcSMatt Macy 	int good_copies = 0;
718eda14cbcSMatt Macy 	int unexpected_errors = 0;
719a0b956f5SMartin Matuska 	int last_good_copy = -1;
720eda14cbcSMatt Macy 
721eda14cbcSMatt Macy 	if (mm == NULL)
722eda14cbcSMatt Macy 		return;
723eda14cbcSMatt Macy 
724eda14cbcSMatt Macy 	for (c = 0; c < mm->mm_children; c++) {
725eda14cbcSMatt Macy 		mc = &mm->mm_child[c];
726eda14cbcSMatt Macy 
727eda14cbcSMatt Macy 		if (mc->mc_error) {
728eda14cbcSMatt Macy 			if (!mc->mc_skipped)
729eda14cbcSMatt Macy 				unexpected_errors++;
730eda14cbcSMatt Macy 		} else if (mc->mc_tried) {
731a0b956f5SMartin Matuska 			last_good_copy = c;
732eda14cbcSMatt Macy 			good_copies++;
733eda14cbcSMatt Macy 		}
734eda14cbcSMatt Macy 	}
735eda14cbcSMatt Macy 
736eda14cbcSMatt Macy 	if (zio->io_type == ZIO_TYPE_WRITE) {
737eda14cbcSMatt Macy 		/*
738eda14cbcSMatt Macy 		 * XXX -- for now, treat partial writes as success.
739eda14cbcSMatt Macy 		 *
740eda14cbcSMatt Macy 		 * Now that we support write reallocation, it would be better
741eda14cbcSMatt Macy 		 * to treat partial failure as real failure unless there are
742eda14cbcSMatt Macy 		 * no non-degraded top-level vdevs left, and not update DTLs
743eda14cbcSMatt Macy 		 * if we intend to reallocate.
744eda14cbcSMatt Macy 		 */
745eda14cbcSMatt Macy 		if (good_copies != mm->mm_children) {
746eda14cbcSMatt Macy 			/*
747eda14cbcSMatt Macy 			 * Always require at least one good copy.
748eda14cbcSMatt Macy 			 *
749eda14cbcSMatt Macy 			 * For ditto blocks (io_vd == NULL), require
750eda14cbcSMatt Macy 			 * all copies to be good.
751eda14cbcSMatt Macy 			 *
752eda14cbcSMatt Macy 			 * XXX -- for replacing vdevs, there's no great answer.
753eda14cbcSMatt Macy 			 * If the old device is really dead, we may not even
754eda14cbcSMatt Macy 			 * be able to access it -- so we only want to
755eda14cbcSMatt Macy 			 * require good writes to the new device.  But if
756eda14cbcSMatt Macy 			 * the new device turns out to be flaky, we want
757eda14cbcSMatt Macy 			 * to be able to detach it -- which requires all
758eda14cbcSMatt Macy 			 * writes to the old device to have succeeded.
759eda14cbcSMatt Macy 			 */
760eda14cbcSMatt Macy 			if (good_copies == 0 || zio->io_vd == NULL)
761eda14cbcSMatt Macy 				zio->io_error = vdev_mirror_worst_error(mm);
762eda14cbcSMatt Macy 		}
763eda14cbcSMatt Macy 		return;
764eda14cbcSMatt Macy 	}
765eda14cbcSMatt Macy 
766eda14cbcSMatt Macy 	ASSERT(zio->io_type == ZIO_TYPE_READ);
767eda14cbcSMatt Macy 
768eda14cbcSMatt Macy 	/*
76987bf66d4SMartin Matuska 	 * Any Direct I/O read that has a checksum error must be treated as
77087bf66d4SMartin Matuska 	 * suspicious as the contents of the buffer could be getting
77187bf66d4SMartin Matuska 	 * manipulated while the I/O is taking place. The checksum verify error
77287bf66d4SMartin Matuska 	 * will be reported to the top-level Mirror VDEV.
77387bf66d4SMartin Matuska 	 *
77487bf66d4SMartin Matuska 	 * There will be no attampt at reading any additional data copies. If
77587bf66d4SMartin Matuska 	 * the buffer is still being manipulated while attempting to read from
77687bf66d4SMartin Matuska 	 * another child, there exists a possibly that the checksum could be
77787bf66d4SMartin Matuska 	 * verified as valid. However, the buffer contents could again get
77887bf66d4SMartin Matuska 	 * manipulated after verifying the checksum. This would lead to bad data
77987bf66d4SMartin Matuska 	 * being written out during self healing.
78087bf66d4SMartin Matuska 	 */
78187bf66d4SMartin Matuska 	if ((zio->io_flags & ZIO_FLAG_DIO_READ) &&
78287bf66d4SMartin Matuska 	    (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
78387bf66d4SMartin Matuska 		zio_dio_chksum_verify_error_report(zio);
78487bf66d4SMartin Matuska 		zio->io_error = vdev_mirror_worst_error(mm);
78587bf66d4SMartin Matuska 		ASSERT3U(zio->io_error, ==, ECKSUM);
78687bf66d4SMartin Matuska 		return;
78787bf66d4SMartin Matuska 	}
78887bf66d4SMartin Matuska 
78987bf66d4SMartin Matuska 	/*
790eda14cbcSMatt Macy 	 * If we don't have a good copy yet, keep trying other children.
791eda14cbcSMatt Macy 	 */
792eda14cbcSMatt Macy 	if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
793eda14cbcSMatt Macy 		ASSERT(c >= 0 && c < mm->mm_children);
794eda14cbcSMatt Macy 		mc = &mm->mm_child[c];
795eda14cbcSMatt Macy 		zio_vdev_io_redone(zio);
796eda14cbcSMatt Macy 		zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
797eda14cbcSMatt Macy 		    mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
798eda14cbcSMatt Macy 		    ZIO_TYPE_READ, zio->io_priority, 0,
799eda14cbcSMatt Macy 		    vdev_mirror_child_done, mc));
800eda14cbcSMatt Macy 		return;
801eda14cbcSMatt Macy 	}
802eda14cbcSMatt Macy 
803a0b956f5SMartin Matuska 	if (zio->io_flags & ZIO_FLAG_SCRUB && !mm->mm_resilvering) {
804a0b956f5SMartin Matuska 		abd_t *best_abd = NULL;
805a0b956f5SMartin Matuska 		if (last_good_copy >= 0)
806a0b956f5SMartin Matuska 			best_abd = mm->mm_child[last_good_copy].mc_abd;
807a0b956f5SMartin Matuska 
808a0b956f5SMartin Matuska 		/*
809a0b956f5SMartin Matuska 		 * If we're scrubbing but don't have a BP available (because
810a0b956f5SMartin Matuska 		 * this vdev is under a raidz or draid vdev) then the best we
811a0b956f5SMartin Matuska 		 * can do is compare all of the copies read.  If they're not
812a0b956f5SMartin Matuska 		 * identical then return a checksum error and the most likely
813a0b956f5SMartin Matuska 		 * correct data.  The raidz code will issue a repair I/O if
814a0b956f5SMartin Matuska 		 * possible.
815a0b956f5SMartin Matuska 		 */
816a0b956f5SMartin Matuska 		if (zio->io_bp == NULL) {
817a0b956f5SMartin Matuska 			ASSERT(zio->io_vd->vdev_ops == &vdev_replacing_ops ||
818a0b956f5SMartin Matuska 			    zio->io_vd->vdev_ops == &vdev_spare_ops);
819a0b956f5SMartin Matuska 
820a0b956f5SMartin Matuska 			abd_t *pref_abd = NULL;
821a0b956f5SMartin Matuska 			for (c = 0; c < last_good_copy; c++) {
822a0b956f5SMartin Matuska 				mc = &mm->mm_child[c];
823a0b956f5SMartin Matuska 				if (mc->mc_error || !mc->mc_tried)
824a0b956f5SMartin Matuska 					continue;
825a0b956f5SMartin Matuska 
826a0b956f5SMartin Matuska 				if (abd_cmp(mc->mc_abd, best_abd) != 0)
827a0b956f5SMartin Matuska 					zio->io_error = SET_ERROR(ECKSUM);
828a0b956f5SMartin Matuska 
829a0b956f5SMartin Matuska 				/*
830a0b956f5SMartin Matuska 				 * The distributed spare is always prefered
831a0b956f5SMartin Matuska 				 * by vdev_mirror_child_select() so it's
832a0b956f5SMartin Matuska 				 * considered to be the best candidate.
833a0b956f5SMartin Matuska 				 */
834a0b956f5SMartin Matuska 				if (pref_abd == NULL &&
835a0b956f5SMartin Matuska 				    mc->mc_vd->vdev_ops ==
836a0b956f5SMartin Matuska 				    &vdev_draid_spare_ops)
837a0b956f5SMartin Matuska 					pref_abd = mc->mc_abd;
838a0b956f5SMartin Matuska 
839a0b956f5SMartin Matuska 				/*
840a0b956f5SMartin Matuska 				 * In the absence of a preferred copy, use
841a0b956f5SMartin Matuska 				 * the parent pointer to avoid a memory copy.
842a0b956f5SMartin Matuska 				 */
843a0b956f5SMartin Matuska 				if (mc->mc_abd == zio->io_abd)
844a0b956f5SMartin Matuska 					best_abd = mc->mc_abd;
845a0b956f5SMartin Matuska 			}
846a0b956f5SMartin Matuska 			if (pref_abd)
847a0b956f5SMartin Matuska 				best_abd = pref_abd;
848a0b956f5SMartin Matuska 		} else {
849a0b956f5SMartin Matuska 
850a0b956f5SMartin Matuska 			/*
851a0b956f5SMartin Matuska 			 * If we have a BP available, then checksums are
852a0b956f5SMartin Matuska 			 * already verified and we just need a buffer
853a0b956f5SMartin Matuska 			 * with valid data, preferring parent one to
854a0b956f5SMartin Matuska 			 * avoid a memory copy.
855a0b956f5SMartin Matuska 			 */
856a0b956f5SMartin Matuska 			for (c = 0; c < last_good_copy; c++) {
857a0b956f5SMartin Matuska 				mc = &mm->mm_child[c];
858a0b956f5SMartin Matuska 				if (mc->mc_error || !mc->mc_tried)
859a0b956f5SMartin Matuska 					continue;
860a0b956f5SMartin Matuska 				if (mc->mc_abd == zio->io_abd) {
861a0b956f5SMartin Matuska 					best_abd = mc->mc_abd;
862a0b956f5SMartin Matuska 					break;
863a0b956f5SMartin Matuska 				}
864a0b956f5SMartin Matuska 			}
865a0b956f5SMartin Matuska 		}
866a0b956f5SMartin Matuska 
867a0b956f5SMartin Matuska 		if (best_abd && best_abd != zio->io_abd)
868a0b956f5SMartin Matuska 			abd_copy(zio->io_abd, best_abd, zio->io_size);
869a0b956f5SMartin Matuska 		for (c = 0; c < mm->mm_children; c++) {
870a0b956f5SMartin Matuska 			mc = &mm->mm_child[c];
871a0b956f5SMartin Matuska 			if (mc->mc_abd != zio->io_abd)
872a0b956f5SMartin Matuska 				abd_free(mc->mc_abd);
873a0b956f5SMartin Matuska 			mc->mc_abd = NULL;
874a0b956f5SMartin Matuska 		}
875a0b956f5SMartin Matuska 	}
876a0b956f5SMartin Matuska 
877eda14cbcSMatt Macy 	if (good_copies == 0) {
878eda14cbcSMatt Macy 		zio->io_error = vdev_mirror_worst_error(mm);
879eda14cbcSMatt Macy 		ASSERT(zio->io_error != 0);
880eda14cbcSMatt Macy 	}
881eda14cbcSMatt Macy 
882eda14cbcSMatt Macy 	if (good_copies && spa_writeable(zio->io_spa) &&
883eda14cbcSMatt Macy 	    (unexpected_errors ||
884eda14cbcSMatt Macy 	    (zio->io_flags & ZIO_FLAG_RESILVER) ||
885eda14cbcSMatt Macy 	    ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) {
886eda14cbcSMatt Macy 		/*
887eda14cbcSMatt Macy 		 * Use the good data we have in hand to repair damaged children.
888eda14cbcSMatt Macy 		 */
889eda14cbcSMatt Macy 		for (c = 0; c < mm->mm_children; c++) {
890eda14cbcSMatt Macy 			/*
891eda14cbcSMatt Macy 			 * Don't rewrite known good children.
892eda14cbcSMatt Macy 			 * Not only is it unnecessary, it could
893eda14cbcSMatt Macy 			 * actually be harmful: if the system lost
894eda14cbcSMatt Macy 			 * power while rewriting the only good copy,
895eda14cbcSMatt Macy 			 * there would be no good copies left!
896eda14cbcSMatt Macy 			 */
897eda14cbcSMatt Macy 			mc = &mm->mm_child[c];
898eda14cbcSMatt Macy 
899eda14cbcSMatt Macy 			if (mc->mc_error == 0) {
9007877fdebSMatt Macy 				vdev_ops_t *ops = mc->mc_vd->vdev_ops;
9017877fdebSMatt Macy 
902eda14cbcSMatt Macy 				if (mc->mc_tried)
903eda14cbcSMatt Macy 					continue;
904eda14cbcSMatt Macy 				/*
905eda14cbcSMatt Macy 				 * We didn't try this child.  We need to
906eda14cbcSMatt Macy 				 * repair it if:
907eda14cbcSMatt Macy 				 * 1. it's a scrub (in which case we have
908eda14cbcSMatt Macy 				 * tried everything that was healthy)
909eda14cbcSMatt Macy 				 *  - or -
9107877fdebSMatt Macy 				 * 2. it's an indirect or distributed spare
9117877fdebSMatt Macy 				 * vdev (in which case it could point to any
9127877fdebSMatt Macy 				 * other vdev, which might have a bad DTL)
913eda14cbcSMatt Macy 				 *  - or -
914eda14cbcSMatt Macy 				 * 3. the DTL indicates that this data is
915eda14cbcSMatt Macy 				 * missing from this vdev
916eda14cbcSMatt Macy 				 */
917eda14cbcSMatt Macy 				if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
9187877fdebSMatt Macy 				    ops != &vdev_indirect_ops &&
9197877fdebSMatt Macy 				    ops != &vdev_draid_spare_ops &&
920eda14cbcSMatt Macy 				    !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
921eda14cbcSMatt Macy 				    zio->io_txg, 1))
922eda14cbcSMatt Macy 					continue;
923eda14cbcSMatt Macy 				mc->mc_error = SET_ERROR(ESTALE);
924eda14cbcSMatt Macy 			}
925eda14cbcSMatt Macy 
926eda14cbcSMatt Macy 			zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
927eda14cbcSMatt Macy 			    mc->mc_vd, mc->mc_offset,
928eda14cbcSMatt Macy 			    zio->io_abd, zio->io_size, ZIO_TYPE_WRITE,
929eda14cbcSMatt Macy 			    zio->io_priority == ZIO_PRIORITY_REBUILD ?
930eda14cbcSMatt Macy 			    ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
931eda14cbcSMatt Macy 			    ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
932eda14cbcSMatt Macy 			    ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
933eda14cbcSMatt Macy 		}
934eda14cbcSMatt Macy 	}
935eda14cbcSMatt Macy }
936eda14cbcSMatt Macy 
937eda14cbcSMatt Macy static void
vdev_mirror_state_change(vdev_t * vd,int faulted,int degraded)938eda14cbcSMatt Macy vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
939eda14cbcSMatt Macy {
940eda14cbcSMatt Macy 	if (faulted == vd->vdev_children) {
941eda14cbcSMatt Macy 		if (vdev_children_are_offline(vd)) {
942eda14cbcSMatt Macy 			vdev_set_state(vd, B_FALSE, VDEV_STATE_OFFLINE,
943eda14cbcSMatt Macy 			    VDEV_AUX_CHILDREN_OFFLINE);
944eda14cbcSMatt Macy 		} else {
945eda14cbcSMatt Macy 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
946eda14cbcSMatt Macy 			    VDEV_AUX_NO_REPLICAS);
947eda14cbcSMatt Macy 		}
948eda14cbcSMatt Macy 	} else if (degraded + faulted != 0) {
949eda14cbcSMatt Macy 		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
950eda14cbcSMatt Macy 	} else {
951eda14cbcSMatt Macy 		vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
952eda14cbcSMatt Macy 	}
953eda14cbcSMatt Macy }
954eda14cbcSMatt Macy 
9557877fdebSMatt Macy /*
9567877fdebSMatt Macy  * Return the maximum asize for a rebuild zio in the provided range.
9577877fdebSMatt Macy  */
9587877fdebSMatt Macy static uint64_t
vdev_mirror_rebuild_asize(vdev_t * vd,uint64_t start,uint64_t asize,uint64_t max_segment)9597877fdebSMatt Macy vdev_mirror_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
9607877fdebSMatt Macy     uint64_t max_segment)
9617877fdebSMatt Macy {
962e92ffd9bSMartin Matuska 	(void) start;
963e92ffd9bSMartin Matuska 
9647877fdebSMatt Macy 	uint64_t psize = MIN(P2ROUNDUP(max_segment, 1 << vd->vdev_ashift),
9657877fdebSMatt Macy 	    SPA_MAXBLOCKSIZE);
9667877fdebSMatt Macy 
9677877fdebSMatt Macy 	return (MIN(asize, vdev_psize_to_asize(vd, psize)));
9687877fdebSMatt Macy }
9697877fdebSMatt Macy 
970eda14cbcSMatt Macy vdev_ops_t vdev_mirror_ops = {
9717877fdebSMatt Macy 	.vdev_op_init = NULL,
9727877fdebSMatt Macy 	.vdev_op_fini = NULL,
973eda14cbcSMatt Macy 	.vdev_op_open = vdev_mirror_open,
974eda14cbcSMatt Macy 	.vdev_op_close = vdev_mirror_close,
975*071ab5a1SMartin Matuska 	.vdev_op_psize_to_asize = vdev_default_asize,
976*071ab5a1SMartin Matuska 	.vdev_op_asize_to_psize = vdev_default_psize,
9777877fdebSMatt Macy 	.vdev_op_min_asize = vdev_default_min_asize,
9787877fdebSMatt Macy 	.vdev_op_min_alloc = NULL,
979eda14cbcSMatt Macy 	.vdev_op_io_start = vdev_mirror_io_start,
980eda14cbcSMatt Macy 	.vdev_op_io_done = vdev_mirror_io_done,
981eda14cbcSMatt Macy 	.vdev_op_state_change = vdev_mirror_state_change,
9827877fdebSMatt Macy 	.vdev_op_need_resilver = vdev_default_need_resilver,
983eda14cbcSMatt Macy 	.vdev_op_hold = NULL,
984eda14cbcSMatt Macy 	.vdev_op_rele = NULL,
985eda14cbcSMatt Macy 	.vdev_op_remap = NULL,
986eda14cbcSMatt Macy 	.vdev_op_xlate = vdev_default_xlate,
9877877fdebSMatt Macy 	.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
9887877fdebSMatt Macy 	.vdev_op_metaslab_init = NULL,
9897877fdebSMatt Macy 	.vdev_op_config_generate = NULL,
9907877fdebSMatt Macy 	.vdev_op_nparity = NULL,
9917877fdebSMatt Macy 	.vdev_op_ndisks = NULL,
992eda14cbcSMatt Macy 	.vdev_op_type = VDEV_TYPE_MIRROR,	/* name of this vdev type */
993eda14cbcSMatt Macy 	.vdev_op_leaf = B_FALSE			/* not a leaf vdev */
994eda14cbcSMatt Macy };
995eda14cbcSMatt Macy 
996eda14cbcSMatt Macy vdev_ops_t vdev_replacing_ops = {
9977877fdebSMatt Macy 	.vdev_op_init = NULL,
9987877fdebSMatt Macy 	.vdev_op_fini = NULL,
999eda14cbcSMatt Macy 	.vdev_op_open = vdev_mirror_open,
1000eda14cbcSMatt Macy 	.vdev_op_close = vdev_mirror_close,
1001*071ab5a1SMartin Matuska 	.vdev_op_psize_to_asize = vdev_default_asize,
1002*071ab5a1SMartin Matuska 	.vdev_op_asize_to_psize = vdev_default_psize,
10037877fdebSMatt Macy 	.vdev_op_min_asize = vdev_default_min_asize,
10047877fdebSMatt Macy 	.vdev_op_min_alloc = NULL,
1005eda14cbcSMatt Macy 	.vdev_op_io_start = vdev_mirror_io_start,
1006eda14cbcSMatt Macy 	.vdev_op_io_done = vdev_mirror_io_done,
1007eda14cbcSMatt Macy 	.vdev_op_state_change = vdev_mirror_state_change,
10087877fdebSMatt Macy 	.vdev_op_need_resilver = vdev_default_need_resilver,
1009eda14cbcSMatt Macy 	.vdev_op_hold = NULL,
1010eda14cbcSMatt Macy 	.vdev_op_rele = NULL,
1011eda14cbcSMatt Macy 	.vdev_op_remap = NULL,
1012eda14cbcSMatt Macy 	.vdev_op_xlate = vdev_default_xlate,
10137877fdebSMatt Macy 	.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
10147877fdebSMatt Macy 	.vdev_op_metaslab_init = NULL,
10157877fdebSMatt Macy 	.vdev_op_config_generate = NULL,
10167877fdebSMatt Macy 	.vdev_op_nparity = NULL,
10177877fdebSMatt Macy 	.vdev_op_ndisks = NULL,
1018eda14cbcSMatt Macy 	.vdev_op_type = VDEV_TYPE_REPLACING,	/* name of this vdev type */
1019eda14cbcSMatt Macy 	.vdev_op_leaf = B_FALSE			/* not a leaf vdev */
1020eda14cbcSMatt Macy };
1021eda14cbcSMatt Macy 
1022eda14cbcSMatt Macy vdev_ops_t vdev_spare_ops = {
10237877fdebSMatt Macy 	.vdev_op_init = NULL,
10247877fdebSMatt Macy 	.vdev_op_fini = NULL,
1025eda14cbcSMatt Macy 	.vdev_op_open = vdev_mirror_open,
1026eda14cbcSMatt Macy 	.vdev_op_close = vdev_mirror_close,
1027*071ab5a1SMartin Matuska 	.vdev_op_psize_to_asize = vdev_default_asize,
1028*071ab5a1SMartin Matuska 	.vdev_op_asize_to_psize = vdev_default_psize,
10297877fdebSMatt Macy 	.vdev_op_min_asize = vdev_default_min_asize,
10307877fdebSMatt Macy 	.vdev_op_min_alloc = NULL,
1031eda14cbcSMatt Macy 	.vdev_op_io_start = vdev_mirror_io_start,
1032eda14cbcSMatt Macy 	.vdev_op_io_done = vdev_mirror_io_done,
1033eda14cbcSMatt Macy 	.vdev_op_state_change = vdev_mirror_state_change,
10347877fdebSMatt Macy 	.vdev_op_need_resilver = vdev_default_need_resilver,
1035eda14cbcSMatt Macy 	.vdev_op_hold = NULL,
1036eda14cbcSMatt Macy 	.vdev_op_rele = NULL,
1037eda14cbcSMatt Macy 	.vdev_op_remap = NULL,
1038eda14cbcSMatt Macy 	.vdev_op_xlate = vdev_default_xlate,
10397877fdebSMatt Macy 	.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
10407877fdebSMatt Macy 	.vdev_op_metaslab_init = NULL,
10417877fdebSMatt Macy 	.vdev_op_config_generate = NULL,
10427877fdebSMatt Macy 	.vdev_op_nparity = NULL,
10437877fdebSMatt Macy 	.vdev_op_ndisks = NULL,
1044eda14cbcSMatt Macy 	.vdev_op_type = VDEV_TYPE_SPARE,	/* name of this vdev type */
1045eda14cbcSMatt Macy 	.vdev_op_leaf = B_FALSE			/* not a leaf vdev */
1046eda14cbcSMatt Macy };
1047eda14cbcSMatt Macy 
1048eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_inc, INT, ZMOD_RW,
1049c03c5b1cSMartin Matuska 	"Rotating media load increment for non-seeking I/Os");
1050eda14cbcSMatt Macy 
1051c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_inc, INT,
1052c03c5b1cSMartin Matuska 	ZMOD_RW, "Rotating media load increment for seeking I/Os");
1053eda14cbcSMatt Macy 
1054c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_offset, INT,
1055c03c5b1cSMartin Matuska 	ZMOD_RW,
1056eda14cbcSMatt Macy 	"Offset in bytes from the last I/O which triggers "
1057eda14cbcSMatt Macy 	"a reduced rotating media seek increment");
1058c03c5b1cSMartin Matuska 
1059c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_inc, INT,
1060c03c5b1cSMartin Matuska 	ZMOD_RW, "Non-rotating media load increment for non-seeking I/Os");
1061c03c5b1cSMartin Matuska 
1062c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_seek_inc, INT,
1063c03c5b1cSMartin Matuska 	ZMOD_RW, "Non-rotating media load increment for seeking I/Os");
1064