xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_draid.c (revision d8fbbd371ca11d9ad4b29b9d3a316885a5da0b15)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2018 Intel Corporation.
24  * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
25  * Copyright (c) 2025, Klara, Inc.
26  * Copyright (c) 2026, Seagate Technology, LLC.
27  * Copyright (c) 2026, Wasabi Technologies, Inc.
28  */
29 
30 #include <sys/zfs_context.h>
31 #include <sys/spa.h>
32 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/vdev_draid.h>
35 #include <sys/vdev_raidz.h>
36 #include <sys/vdev_rebuild.h>
37 #include <sys/abd.h>
38 #include <sys/zio.h>
39 #include <sys/nvpair.h>
40 #include <sys/zio_checksum.h>
41 #include <sys/fs/zfs.h>
42 #include <sys/fm/fs/zfs.h>
43 #include <zfs_fletcher.h>
44 
45 #ifdef ZFS_DEBUG
46 #include <sys/vdev.h>	/* For vdev_xlate() in vdev_draid_io_verify() */
47 #endif
48 
49 /*
50  * dRAID is a distributed spare implementation for ZFS. A dRAID vdev is
51  * comprised of multiple raidz redundancy groups which are spread over the
52  * dRAID children. To ensure an even distribution, and avoid hot spots, a
53  * permutation mapping is applied to the order of the dRAID children.
54  * This mixing effectively distributes the parity columns evenly over all
55  * of the disks in the dRAID.
56  *
57  * This is beneficial because it means when resilvering all of the disks
58  * can participate thereby increasing the available IOPs and bandwidth.
59  * Furthermore, by reserving a small fraction of each child's total capacity
60  * virtual distributed spare disks can be created. These spares similarly
61  * benefit from the performance gains of spanning all of the children. The
62  * consequence of which is that resilvering to a distributed spare can
63  * substantially reduce the time required to restore full parity to pool
64  * with a failed disks.
65  *
66  * === dRAID group layout ===
67  *
68  * First, let's define a "row" in the configuration to be a 16M chunk from
69  * each physical drive at the same offset. This is the minimum allowable
70  * size since it must be possible to store a full 16M block when there is
71  * only a single data column. Next, we define a "group" to be a set of
72  * sequential disks containing both the parity and data columns. We allow
73  * groups to span multiple rows in order to align any group size to any
74  * number of physical drives. Finally, a "slice" is comprised of the rows
75  * which contain the target number of groups. The permutation mappings
76  * are applied in a round robin fashion to each slice.
77  *
78  * Given D+P drives in a group (including parity drives) and C-S physical
79  * drives (not including the spare drives), we can distribute the groups
80  * across R rows without remainder by selecting the least common multiple
81  * of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S).
82  *
83  * In the example below, there are C=14 physical drives in the configuration
84  * with S=2 drives worth of spare capacity. Each group has a width of 9
85  * which includes D=8 data and P=1 parity drive. There are 4 groups and
86  * 3 rows per slice.  Each group has a size of 144M (16M * 9) and a slice
87  * size is 576M (144M * 4). When allocating from a dRAID each group is
88  * filled before moving on to the next as show in slice0 below.
89  *
90  *             data disks (8 data + 1 parity)          spares (2)
91  *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
92  *  ^  | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0
93  *  |  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
94  *  |  |              group 0              |  group 1..|       |
95  *  |  +-----------------------------------+-----------+-------|
96  *  |  | 0   1   2   3   4   5   6   7   8 | 36  37  38|       |  r
97  *  |  | 9   10  11  12  13  14  15  16  17| 45  46  47|       |  o
98  *  |  | 18  19  20  21  22  23  24  25  26| 54  55  56|       |  w
99  *     | 27  28  29  30  31  32  33  34  35| 63  64  65|       |  0
100  *  s  +-----------------------+-----------------------+-------+
101  *  l  |       ..group 1       |        group 2..      |       |
102  *  i  +-----------------------+-----------------------+-------+
103  *  c  | 39  40  41  42  43  44| 72  73  74  75  76  77|       |  r
104  *  e  | 48  49  50  51  52  53| 81  82  83  84  85  86|       |  o
105  *  0  | 57  58  59  60  61  62| 90  91  92  93  94  95|       |  w
106  *     | 66  67  68  69  70  71| 99 100 101 102 103 104|       |  1
107  *  |  +-----------+-----------+-----------------------+-------+
108  *  |  |..group 2  |            group 3                |       |
109  *  |  +-----------+-----------+-----------------------+-------+
110  *  |  | 78  79  80|108 109 110 111 112 113 114 115 116|       |  r
111  *  |  | 87  88  89|117 118 119 120 121 122 123 124 125|       |  o
112  *  |  | 96  97  98|126 127 128 129 130 131 132 133 134|       |  w
113  *  v  |105 106 107|135 136 137 138 139 140 141 142 143|       |  2
114  *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
115  *     | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1
116  *  s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
117  *  l  |              group 4              |  group 5..|       | row 3
118  *  i  +-----------------------+-----------+-----------+-------|
119  *  c  |       ..group 5       |        group 6..      |       | row 4
120  *  e  +-----------+-----------+-----------------------+-------+
121  *  1  |..group 6  |            group 7                |       | row 5
122  *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
123  *     | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2
124  *  s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
125  *  l  |              group 8              |  group 9..|       | row 6
126  *  i  +-----------------------------------------------+-------|
127  *  c  |       ..group 9       |        group 10..     |       | row 7
128  *  e  +-----------------------+-----------------------+-------+
129  *  2  |..group 10 |            group 11               |       | row 8
130  *     +-----------+-----------------------------------+-------+
131  *
132  * This layout has several advantages over requiring that each row contain
133  * a whole number of groups.
134  *
135  * 1. The group count is not a relevant parameter when defining a dRAID
136  *    layout. Only the group width is needed, and *all* groups will have
137  *    the desired size.
138  *
139  * 2. All possible group widths (<= physical disk count) can be supported.
140  *
141  * 3. The logic within vdev_draid.c is simplified when the group width is
142  *    the same for all groups (although some of the logic around computing
143  *    permutation numbers and drive offsets is more complicated).
144  *
145  * === dRAID failure domains ===
146  *
147  * If we put several slices alongside in a row and configure each disk in
148  * slice to be from different failure domain (for example an enclosure), we
149  * can then tolerate the failure of the whole domain -- only one device
150  * will be failed in every slice in this case. The column of such slices
151  * we will call failure group, and the row with such slices alongside we
152  * will call "big width row", width being multiple of children (W = C*n).
153  *
154  * Here's an example of configuration with 7 failure domains and two
155  * failure groups:
156  *
157  *         7 C disks in each slice, 2 slices in big 14 W rows
158  *      +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
159  *      | 1 | 7 | 3 | 9 | 11| 5 | 13| 6 | 10| 4 | 8 | 0 | 12| 2 | device map 0
160  *   s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
161  *   l  |    group 0    |  gr1..| S |    group 3    | gr4.. | S | row 0
162  *   c  +-------+-------+-------+---+-------+-------+-------+---+
163  *  0,1 | ..gr1 |    group 2    | S | ..gr4 |   group 5     | S | row 1
164  *      +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
165  *      | 2 | 10| 12| 7 | 8 | 13| 11| 1 | 5 | 4 | 6 | 3 | 9 | 0 | device map 1
166  *   s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
167  *   l  |    group 6    |  gr7..| S |    group 9    |gr10.. | S | row 2
168  *   c  +-------+-------+-------+---+---------------+-------+---+
169  *  2,3 | ..gr7 |    group 8    | S |..gr10 |   group 11    | S | row 3
170  *      +-------+---------------+---+-------+---------------+---+
171  *            failure group 0            failure group 1
172  *
173  * In practice, there might be much more failure groups. And in theory, the
174  * width of the big rows can be much larger than curent limit of 255 imposed
175  * for the number of children. But we kept the same limit for now for the
176  * sake of simplicity of implementation.
177  *
178  * In order to preserve fast sequential resilvering in case of a disk failure,
179  * all failure groups much share all disks between themselves, and this is
180  * achieved by shuffling the disks between the groups. But only i-th disks
181  * in each group are shuffled between themselves, i.e. the disks from the
182  * same failure domains (enclosures). After that, they are shuffled within
183  * each group. Thus, no more than one disk from any failure domain can appear
184  * in any failure group as a result of this shuffling. In the above example,
185  * you won't find any tuple of (0, 7) or (1, 8) or (2, 9) or ... (6, 13)
186  * mapped to the same slice. This is done in vdev_draid_shuffle_perms().
187  *
188  * Spare disks are evenly distributed among failure groups, and they are
189  * shared by all groups.  However, to support domain failure, we cannot have
190  * more than (nparity - 1) failed disks in any group, no matter if they are
191  * rebuilt to draid spares or not (the blocks of those spares can be mapped
192  * to the disks from the failed domain (enclosure), and we cannot tolerate
193  * more than nparity failures in any failure group).
194  *
195  *
196  * N.B. The following array describes all valid dRAID permutation maps.
197  * Each row is used to generate a permutation map for a different number
198  * of children from a unique seed. The seeds were generated and carefully
199  * evaluated by the 'draid' utility in order to provide balanced mappings.
200  * In addition to the seed a checksum of the in-memory mapping is stored
201  * for verification.
202  *
203  * The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed,
204  * with a given permutation map) is the ratio of the amounts of I/O that will
205  * be sent to the least and most busy disks when resilvering. The average
206  * imbalance ratio (of a given number of disks and permutation map) is the
207  * average of the ratios of all possible single and double disk failures.
208  *
209  * In order to achieve a low imbalance ratio the number of permutations in
210  * the mapping must be significantly larger than the number of children.
211  * For dRAID the number of permutations has been limited to 512 to minimize
212  * the map size. This does result in a gradually increasing imbalance ratio
213  * as seen in the table below. Increasing the number of permutations for
214  * larger child counts would reduce the imbalance ratio. However, in practice
215  * when there are a large number of children each child is responsible for
216  * fewer total IOs so it's less of a concern.
217  *
218  * Note these values are hard coded and must never be changed.  Existing
219  * pools depend on the same mapping always being generated in order to
220  * read and write from the correct locations.  Any change would make
221  * existing pools completely inaccessible.
222  */
223 static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = {
224 	{   2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d },	/* 1.000 */
225 	{   3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 },	/* 1.000 */
226 	{   4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 },	/* 1.000 */
227 	{   5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 },	/* 1.010 */
228 	{   6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 },	/* 1.031 */
229 	{   7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee },	/* 1.043 */
230 	{   8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 },	/* 1.059 */
231 	{   9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 },	/* 1.056 */
232 	{  10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 },	/* 1.072 */
233 	{  11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c },	/* 1.083 */
234 	{  12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e },	/* 1.097 */
235 	{  13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 },	/* 1.100 */
236 	{  14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 },	/* 1.121 */
237 	{  15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 },	/* 1.103 */
238 	{  16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 },	/* 1.111 */
239 	{  17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe },	/* 1.133 */
240 	{  18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 },	/* 1.131 */
241 	{  19, 256, 0x892e343f2f31d690, 0x00000029eb392835 },	/* 1.130 */
242 	{  20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c },	/* 1.141 */
243 	{  21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 },	/* 1.139 */
244 	{  22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 },	/* 1.150 */
245 	{  23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f },	/* 1.174 */
246 	{  24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 },	/* 1.168 */
247 	{  25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 },	/* 1.180 */
248 	{  26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba },	/* 1.226 */
249 	{  27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 },	/* 1.228 */
250 	{  28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c },	/* 1.217 */
251 	{  29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c },	/* 1.239 */
252 	{  30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 },	/* 1.238 */
253 	{  31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f },	/* 1.273 */
254 	{  32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 },	/* 1.191 */
255 	{  33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 },	/* 1.199 */
256 	{  34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 },	/* 1.195 */
257 	{  35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 },	/* 1.201 */
258 	{  36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef },	/* 1.194 */
259 	{  37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 },	/* 1.237 */
260 	{  38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 },	/* 1.242 */
261 	{  39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd },	/* 1.231 */
262 	{  40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 },	/* 1.233 */
263 	{  41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 },	/* 1.271 */
264 	{  42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 },	/* 1.263 */
265 	{  43, 512, 0xbaa5125faa781854, 0x000001c76789e278 },	/* 1.270 */
266 	{  44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb },	/* 1.281 */
267 	{  45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 },	/* 1.282 */
268 	{  46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b },	/* 1.286 */
269 	{  47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 },	/* 1.329 */
270 	{  48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b },	/* 1.286 */
271 	{  49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 },	/* 1.322 */
272 	{  50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 },	/* 1.335 */
273 	{  51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 },	/* 1.305 */
274 	{  52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf },	/* 1.330 */
275 	{  53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 },	/* 1.365 */
276 	{  54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 },	/* 1.334 */
277 	{  55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 },	/* 1.364 */
278 	{  56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e },	/* 1.374 */
279 	{  57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 },	/* 1.363 */
280 	{  58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 },	/* 1.401 */
281 	{  59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c },	/* 1.392 */
282 	{  60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 },	/* 1.360 */
283 	{  61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd },	/* 1.396 */
284 	{  62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c },	/* 1.453 */
285 	{  63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 },	/* 1.437 */
286 	{  64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 },	/* 1.402 */
287 	{  65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 },	/* 1.459 */
288 	{  66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 },	/* 1.423 */
289 	{  67, 512, 0x910b9714f698a877, 0x00000451ea65d5db },	/* 1.447 */
290 	{  68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 },	/* 1.450 */
291 	{  69, 512, 0x836d4968fbaa3706, 0x000004954068a380 },	/* 1.455 */
292 	{  70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d },	/* 1.463 */
293 	{  71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 },	/* 1.463 */
294 	{  72, 512, 0x42763a680d5bed8e, 0x000005084275c680 },	/* 1.452 */
295 	{  73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab },	/* 1.498 */
296 	{  74, 512, 0x9fa08548b1621a44, 0x0000054708019247 },	/* 1.526 */
297 	{  75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 },	/* 1.491 */
298 	{  76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 },	/* 1.470 */
299 	{  77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 },	/* 1.527 */
300 	{  78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 },	/* 1.509 */
301 	{  79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e },	/* 1.569 */
302 	{  80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c },	/* 1.555 */
303 	{  81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 },	/* 1.509 */
304 	{  82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 },	/* 1.596 */
305 	{  83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e },	/* 1.568 */
306 	{  84, 512, 0xba02545069ddc6dc, 0x000006d19861364f },	/* 1.541 */
307 	{  85, 512, 0x447c73192c35073e, 0x000006fce315ce35 },	/* 1.623 */
308 	{  86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b },	/* 1.620 */
309 	{  87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 },	/* 1.597 */
310 	{  88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b },	/* 1.575 */
311 	{  89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc },	/* 1.627 */
312 	{  90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb },	/* 1.596 */
313 	{  91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 },	/* 1.622 */
314 	{  92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e },	/* 1.695 */
315 	{  93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c },	/* 1.605 */
316 	{  94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc },	/* 1.625 */
317 	{  95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 },	/* 1.687 */
318 	{  96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a },	/* 1.621 */
319 	{  97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 },	/* 1.699 */
320 	{  98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b },	/* 1.688 */
321 	{  99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce },	/* 1.642 */
322 	{ 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc },	/* 1.683 */
323 	{ 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 },	/* 1.755 */
324 	{ 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 },	/* 1.692 */
325 	{ 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 },	/* 1.747 */
326 	{ 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 },	/* 1.751 */
327 	{ 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 },	/* 1.751 */
328 	{ 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f },	/* 1.726 */
329 	{ 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d },	/* 1.788 */
330 	{ 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 },	/* 1.740 */
331 	{ 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 },	/* 1.780 */
332 	{ 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 },	/* 1.836 */
333 	{ 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 },	/* 1.778 */
334 	{ 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 },	/* 1.831 */
335 	{ 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df },	/* 1.825 */
336 	{ 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 },	/* 1.826 */
337 	{ 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 },	/* 1.843 */
338 	{ 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d },	/* 1.826 */
339 	{ 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b },	/* 1.803 */
340 	{ 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 },	/* 1.857 */
341 	{ 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 },	/* 1.877 */
342 	{ 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 },	/* 1.849 */
343 	{ 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d },	/* 1.867 */
344 	{ 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 },	/* 1.978 */
345 	{ 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d },	/* 1.947 */
346 	{ 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea },	/* 1.865 */
347 	{ 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f },	/* 1.881 */
348 	{ 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b },	/* 1.882 */
349 	{ 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e },	/* 1.867 */
350 	{ 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e },	/* 1.972 */
351 	{ 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 },	/* 1.896 */
352 	{ 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d },	/* 1.965 */
353 	{ 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 },	/* 1.963 */
354 	{ 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 },	/* 1.925 */
355 	{ 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 },	/* 1.862 */
356 	{ 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 },	/* 2.042 */
357 	{ 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 },	/* 1.935 */
358 	{ 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 },	/* 2.005 */
359 	{ 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c },	/* 2.041 */
360 	{ 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 },	/* 1.997 */
361 	{ 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 },	/* 1.996 */
362 	{ 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d },	/* 2.053 */
363 	{ 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a },	/* 1.971 */
364 	{ 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 },	/* 2.018 */
365 	{ 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd },	/* 1.961 */
366 	{ 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 },	/* 2.046 */
367 	{ 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb },	/* 1.968 */
368 	{ 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 },	/* 2.143 */
369 	{ 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 },	/* 2.064 */
370 	{ 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 },	/* 2.023 */
371 	{ 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c },	/* 2.136 */
372 	{ 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 },	/* 2.063 */
373 	{ 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 },	/* 1.974 */
374 	{ 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 },	/* 2.210 */
375 	{ 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a },	/* 2.006 */
376 	{ 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 },	/* 2.193 */
377 	{ 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 },	/* 2.163 */
378 	{ 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc },	/* 2.046 */
379 	{ 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 },	/* 2.084 */
380 	{ 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 },	/* 2.264 */
381 	{ 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 },	/* 2.074 */
382 	{ 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 },	/* 2.282 */
383 	{ 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf },	/* 2.148 */
384 	{ 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 },	/* 2.355 */
385 	{ 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 },	/* 2.164 */
386 	{ 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a },	/* 2.393 */
387 	{ 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 },	/* 2.178 */
388 	{ 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc },	/* 2.334 */
389 	{ 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b },	/* 2.266 */
390 	{ 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 },	/* 2.304 */
391 	{ 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d },	/* 2.218 */
392 	{ 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff },	/* 2.377 */
393 	{ 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 },	/* 2.155 */
394 	{ 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 },	/* 2.404 */
395 	{ 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 },	/* 2.205 */
396 	{ 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d },	/* 2.359 */
397 	{ 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 },	/* 2.158 */
398 	{ 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b },	/* 2.614 */
399 	{ 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc },	/* 2.239 */
400 	{ 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc },	/* 2.493 */
401 	{ 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c },	/* 2.327 */
402 	{ 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 },	/* 2.231 */
403 	{ 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c },	/* 2.237 */
404 	{ 182, 512, 0xe6035defea48f933, 0x00002038e3346658 },	/* 2.691 */
405 	{ 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e },	/* 2.170 */
406 	{ 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 },	/* 2.600 */
407 	{ 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc },	/* 2.391 */
408 	{ 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 },	/* 2.677 */
409 	{ 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c },	/* 2.410 */
410 	{ 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 },	/* 2.776 */
411 	{ 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 },	/* 2.266 */
412 	{ 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 },	/* 2.717 */
413 	{ 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c },	/* 2.474 */
414 	{ 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 },	/* 2.673 */
415 	{ 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 },	/* 2.420 */
416 	{ 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 },	/* 2.898 */
417 	{ 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c },	/* 2.363 */
418 	{ 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e },	/* 2.747 */
419 	{ 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 },	/* 2.531 */
420 	{ 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 },	/* 2.707 */
421 	{ 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 },	/* 2.315 */
422 	{ 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf },	/* 3.012 */
423 	{ 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 },	/* 2.378 */
424 	{ 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 },	/* 2.969 */
425 	{ 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d },	/* 2.594 */
426 	{ 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd },	/* 2.763 */
427 	{ 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 },	/* 2.457 */
428 	{ 206, 512, 0xc02fc96684715a16, 0x0000297515608601 },	/* 3.057 */
429 	{ 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 },	/* 2.590 */
430 	{ 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b },	/* 3.047 */
431 	{ 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 },	/* 2.676 */
432 	{ 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 },	/* 2.993 */
433 	{ 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 },	/* 2.457 */
434 	{ 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 },	/* 3.182 */
435 	{ 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 },	/* 2.563 */
436 	{ 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 },	/* 3.025 */
437 	{ 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f },	/* 2.730 */
438 	{ 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 },	/* 3.036 */
439 	{ 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 },	/* 2.722 */
440 	{ 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 },	/* 3.356 */
441 	{ 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 },	/* 2.697 */
442 	{ 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 },	/* 2.979 */
443 	{ 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 },	/* 2.858 */
444 	{ 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e },	/* 3.258 */
445 	{ 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 },	/* 2.693 */
446 	{ 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 },	/* 3.259 */
447 	{ 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c },	/* 2.733 */
448 	{ 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 },	/* 3.235 */
449 	{ 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 },	/* 2.983 */
450 	{ 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e },	/* 3.308 */
451 	{ 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 },	/* 2.715 */
452 	{ 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f },	/* 3.540 */
453 	{ 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 },	/* 2.779 */
454 	{ 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c },	/* 3.084 */
455 	{ 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc },	/* 2.987 */
456 	{ 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae },	/* 3.341 */
457 	{ 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 },	/* 2.793 */
458 	{ 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 },	/* 3.518 */
459 	{ 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 },	/* 2.962 */
460 	{ 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 },	/* 3.196 */
461 	{ 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 },	/* 2.914 */
462 	{ 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 },	/* 3.408 */
463 	{ 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 },	/* 2.903 */
464 	{ 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 },	/* 3.778 */
465 	{ 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c },	/* 3.026 */
466 	{ 244, 512, 0xc740263f0301efa8, 0x00003a147146512d },	/* 3.347 */
467 	{ 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d },	/* 3.212 */
468 	{ 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 },	/* 3.482 */
469 	{ 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 },	/* 3.146 */
470 	{ 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f },	/* 3.626 */
471 	{ 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 },	/* 2.952 */
472 	{ 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e },	/* 3.463 */
473 	{ 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 },	/* 3.131 */
474 	{ 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c },	/* 3.538 */
475 	{ 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac },	/* 2.974 */
476 	{ 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 },	/* 3.843 */
477 	{ 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 },	/* 3.088 */
478 };
479 
480 /*
481  * Verify the map is valid. Each device index must appear exactly
482  * once in every row, and the permutation array checksum must match.
483  */
484 static int
485 verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms,
486     uint64_t checksum)
487 {
488 	int countssz = sizeof (uint16_t) * children;
489 	uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP);
490 
491 	for (int i = 0; i < nperms; i++) {
492 		for (int j = 0; j < children; j++) {
493 			uint8_t val = perms[(i * children) + j];
494 
495 			if (val >= children || counts[val] != i) {
496 				kmem_free(counts, countssz);
497 				return (EINVAL);
498 			}
499 
500 			counts[val]++;
501 		}
502 	}
503 
504 	if (checksum != 0) {
505 		int permssz = sizeof (uint8_t) * children * nperms;
506 		zio_cksum_t cksum;
507 
508 		fletcher_4_native_varsize(perms, permssz, &cksum);
509 
510 		if (checksum != cksum.zc_word[0]) {
511 			kmem_free(counts, countssz);
512 			return (ECKSUM);
513 		}
514 	}
515 
516 	kmem_free(counts, countssz);
517 
518 	return (0);
519 }
520 
521 /*
522  * Generate the permutation array for the draid_map_t.  These maps control
523  * the placement of all data in a dRAID.  Therefore it's critical that the
524  * seed always generates the same mapping.  We provide our own pseudo-random
525  * number generator for this purpose.
526  */
527 int
528 vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
529 {
530 	VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN);
531 	VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
532 	VERIFY3U(map->dm_seed, !=, 0);
533 	VERIFY3U(map->dm_nperms, !=, 0);
534 	VERIFY0P(map->dm_perms);
535 
536 #ifdef _KERNEL
537 	/*
538 	 * The kernel code always provides both a map_seed and checksum.
539 	 * Only the tests/zfs-tests/cmd/draid/draid.c utility will provide
540 	 * a zero checksum when generating new candidate maps.
541 	 */
542 	VERIFY3U(map->dm_checksum, !=, 0);
543 #endif
544 	uint64_t children = map->dm_children;
545 	uint64_t nperms = map->dm_nperms;
546 	int rowsz = sizeof (uint8_t) * children;
547 	int permssz = rowsz * nperms;
548 	uint8_t *perms;
549 
550 	/* Allocate the permutation array */
551 	perms = vmem_alloc(permssz, KM_SLEEP);
552 
553 	/* Setup an initial row with a known pattern */
554 	uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP);
555 	for (int i = 0; i < children; i++)
556 		initial_row[i] = i;
557 
558 	uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
559 	uint8_t *current_row, *previous_row = initial_row;
560 
561 	/*
562 	 * Perform a Fisher-Yates shuffle of each row using the previous
563 	 * row as the starting point.  An initial_row with known pattern
564 	 * is used as the input for the first row.
565 	 */
566 	for (int i = 0; i < nperms; i++) {
567 		current_row = &perms[i * children];
568 		memcpy(current_row, previous_row, rowsz);
569 
570 		for (int j = children - 1; j > 0; j--) {
571 			uint64_t k = vdev_draid_rand(draid_seed) % (j + 1);
572 			uint8_t val = current_row[j];
573 			current_row[j] = current_row[k];
574 			current_row[k] = val;
575 		}
576 
577 		previous_row = current_row;
578 	}
579 
580 	kmem_free(initial_row, rowsz);
581 
582 	int error = verify_perms(perms, children, nperms, map->dm_checksum);
583 	if (error) {
584 		vmem_free(perms, permssz);
585 		return (error);
586 	}
587 
588 	*permsp = perms;
589 
590 	return (0);
591 }
592 
593 static void
594 vdev_draid_swap_perms(uint8_t *perms, uint64_t i, uint64_t j)
595 {
596 	uint8_t val = perms[i];
597 
598 	perms[i] = perms[j];
599 	perms[j] = val;
600 }
601 
602 /*
603  * Shuffle every i-th disk in slices that lie alongside in the big width row,
604  * increasing disk indices in each next slice in the row accordingly. The
605  * input to this function is the array of ready permutations from
606  * vdev_draid_generate_perms(), so in order to correctly shuffle i-th disks,
607  * we need to locate their position first and build a map of their locations.
608  *
609  * Note: the same Fisher-Yates shuffle algorithm is used as in
610  * vdev_draid_generate_perms().
611  */
612 static void
613 vdev_draid_shuffle_perms(const draid_map_t *map, uint8_t *perms, uint64_t width)
614 {
615 	uint64_t cn = map->dm_children;
616 	uint64_t n = width / cn;
617 	uint64_t nperms = map->dm_nperms / n * n;
618 
619 	if (width <= cn)
620 		return;
621 
622 	VERIFY3U(width, >=, VDEV_DRAID_MIN_CHILDREN);
623 	VERIFY3U(width, <=, VDEV_DRAID_MAX_CHILDREN);
624 	ASSERT0(width % cn);
625 
626 	uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
627 
628 	uint8_t *cmap = kmem_alloc(n, KM_SLEEP);
629 
630 	for (int i = 0; i < nperms; i += n) {
631 		for (int j = 0; j < cn; j++) {
632 
633 			/* locate position of the same child in other slices */
634 			for (int k = n - 1; k > 0; k--)
635 				for (int l = 0; l < cn; l++)
636 					if (perms[(i+k) * cn + l] ==
637 					    perms[(i+0) * cn + j])
638 						cmap[k] = l;
639 			cmap[0] = j;
640 
641 			/* increase index values for slices on the right */
642 			for (int k = n - 1; k > 0; k--)
643 				perms[(i+k) * cn + cmap[k]] += k * cn;
644 
645 			/* shuffle */
646 			for (int k = n - 1; k > 0; k--) {
647 				int l = vdev_draid_rand(draid_seed) % (k + 1);
648 				if (k == l)
649 					continue;
650 				vdev_draid_swap_perms(perms,
651 				    (i+k) * cn + cmap[k],
652 				    (i+l) * cn + cmap[l]);
653 			}
654 		}
655 	}
656 
657 	kmem_free(cmap, n);
658 }
659 
660 /*
661  * Lookup the fixed draid_map_t for the requested number of children.
662  */
663 int
664 vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp)
665 {
666 	for (int i = 0; i < VDEV_DRAID_MAX_MAPS; i++) {
667 		if (draid_maps[i].dm_children == children) {
668 			*mapp = &draid_maps[i];
669 			return (0);
670 		}
671 	}
672 
673 	return (ENOENT);
674 }
675 
676 /*
677  * Lookup the permutation array and iteration id for the provided offset.
678  */
679 static void
680 vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex,
681     uint8_t **base, uint64_t *iter)
682 {
683 	uint64_t n = vdc->vdc_width / vdc->vdc_children;
684 	uint64_t ncols = vdc->vdc_children;
685 	uint64_t nperms = (vdc->vdc_nperms / n) * n;
686 	uint64_t poff = pindex % (nperms * ncols);
687 
688 	ASSERT3P(nperms, >=, ncols * n);
689 
690 	*base = vdc->vdc_perms + (poff / (ncols * n)) * (ncols * n);
691 	*iter = (poff % ncols) + (pindex % n) * ncols;
692 }
693 
694 static inline uint64_t
695 vdev_draid_permute_id(vdev_draid_config_t *vdc,
696     uint8_t *base, uint64_t iter, uint64_t index)
697 {
698 	if (vdc->vdc_width > vdc->vdc_children) {
699 		uint64_t off = (iter / vdc->vdc_children) * vdc->vdc_children;
700 		return (base[(index + iter) % vdc->vdc_children + off]);
701 	}
702 
703 	return ((base[index] + iter) % vdc->vdc_children);
704 }
705 
706 /*
707  * Return the asize which is the psize rounded up to a full group width.
708  * i.e. vdev_draid_psize_to_asize().
709  */
710 static uint64_t
711 vdev_draid_psize_to_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
712 {
713 	(void) txg;
714 	vdev_draid_config_t *vdc = vd->vdev_tsd;
715 	uint64_t ashift = vd->vdev_ashift;
716 
717 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
718 
719 	uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1;
720 	uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
721 
722 	ASSERT3U(asize, !=, 0);
723 	ASSERT0(asize % (vdc->vdc_groupwidth));
724 
725 	return (asize);
726 }
727 
728 /*
729  * Deflate the asize to the psize, this includes stripping parity.
730  */
731 uint64_t
732 vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize, uint64_t txg)
733 {
734 	(void) txg;
735 	vdev_draid_config_t *vdc = vd->vdev_tsd;
736 
737 	ASSERT0(asize % vdc->vdc_groupwidth);
738 
739 	return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata);
740 }
741 
742 /*
743  * Convert a logical offset to the corresponding group number.
744  */
745 static uint64_t
746 vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset)
747 {
748 	vdev_draid_config_t *vdc = vd->vdev_tsd;
749 
750 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
751 
752 	return (offset / vdc->vdc_groupsz);
753 }
754 
755 /*
756  * Convert a group number to the logical starting offset for that group.
757  */
758 static uint64_t
759 vdev_draid_group_to_offset(vdev_t *vd, uint64_t group)
760 {
761 	vdev_draid_config_t *vdc = vd->vdev_tsd;
762 
763 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
764 
765 	return (group * vdc->vdc_groupsz);
766 }
767 
768 /*
769  * Full stripe writes.  When writing, all columns (D+P) are required.  Parity
770  * is calculated over all the columns, including empty zero filled sectors,
771  * and each is written to disk.  While only the data columns are needed for
772  * a normal read, all of the columns are required for reconstruction when
773  * performing a sequential resilver.
774  *
775  * For "big columns" it's sufficient to map the correct range of the zio ABD.
776  * Partial columns require allocating a gang ABD in order to zero fill the
777  * empty sectors.  When the column is empty a zero filled sector must be
778  * mapped.  In all cases the data ABDs must be the same size as the parity
779  * ABDs (e.g. rc->rc_size == parity_size).
780  */
781 static void
782 vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
783 {
784 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
785 	uint64_t parity_size = rr->rr_col[0].rc_size;
786 	uint64_t abd_off = abd_offset;
787 
788 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
789 	ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
790 
791 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
792 		raidz_col_t *rc = &rr->rr_col[c];
793 
794 		if (rc->rc_size == 0) {
795 			/* empty data column (small write), add a skip sector */
796 			ASSERT3U(skip_size, ==, parity_size);
797 			rc->rc_abd = abd_get_zeros(skip_size);
798 		} else if (rc->rc_size == parity_size) {
799 			/* this is a "big column" */
800 			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
801 			    zio->io_abd, abd_off, rc->rc_size);
802 		} else {
803 			/* short data column, add a skip sector */
804 			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
805 			rc->rc_abd = abd_alloc_gang();
806 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
807 			    zio->io_abd, abd_off, rc->rc_size), B_TRUE);
808 			abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size),
809 			    B_TRUE);
810 		}
811 
812 		ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size);
813 
814 		abd_off += rc->rc_size;
815 		rc->rc_size = parity_size;
816 	}
817 
818 	IMPLY(abd_offset != 0, abd_off == zio->io_size);
819 }
820 
821 /*
822  * Scrub/resilver reads.  In order to store the contents of the skip sectors
823  * an additional ABD is allocated.  The columns are handled in the same way
824  * as a full stripe write except instead of using the zero ABD the newly
825  * allocated skip ABD is used to back the skip sectors.  In all cases the
826  * data ABD must be the same size as the parity ABDs.
827  */
828 static void
829 vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
830 {
831 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
832 	uint64_t parity_size = rr->rr_col[0].rc_size;
833 	uint64_t abd_off = abd_offset;
834 	uint64_t skip_off = 0;
835 
836 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
837 	ASSERT0P(rr->rr_abd_empty);
838 
839 	if (rr->rr_nempty > 0) {
840 		rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
841 		    B_FALSE);
842 	}
843 
844 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
845 		raidz_col_t *rc = &rr->rr_col[c];
846 
847 		if (rc->rc_size == 0) {
848 			/* empty data column (small read), add a skip sector */
849 			ASSERT3U(skip_size, ==, parity_size);
850 			ASSERT3U(rr->rr_nempty, !=, 0);
851 			rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
852 			    skip_off, skip_size);
853 			skip_off += skip_size;
854 		} else if (rc->rc_size == parity_size) {
855 			/* this is a "big column" */
856 			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
857 			    zio->io_abd, abd_off, rc->rc_size);
858 		} else {
859 			/* short data column, add a skip sector */
860 			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
861 			ASSERT3U(rr->rr_nempty, !=, 0);
862 			rc->rc_abd = abd_alloc_gang();
863 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
864 			    zio->io_abd, abd_off, rc->rc_size), B_TRUE);
865 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
866 			    rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
867 			skip_off += skip_size;
868 		}
869 
870 		uint64_t abd_size = abd_get_size(rc->rc_abd);
871 		ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
872 
873 		/*
874 		 * Increase rc_size so the skip ABD is included in subsequent
875 		 * parity calculations.
876 		 */
877 		abd_off += rc->rc_size;
878 		rc->rc_size = abd_size;
879 	}
880 
881 	IMPLY(abd_offset != 0, abd_off == zio->io_size);
882 	ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
883 }
884 
885 /*
886  * Normal reads.  In this common case only the columns containing data
887  * are read in to the zio ABDs.  Neither the parity columns or empty skip
888  * sectors are read unless the checksum fails verification.  In which case
889  * vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand
890  * the raid map in order to allow reconstruction using the parity data and
891  * skip sectors.
892  */
893 static void
894 vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
895 {
896 	uint64_t abd_off = abd_offset;
897 
898 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
899 
900 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
901 		raidz_col_t *rc = &rr->rr_col[c];
902 
903 		if (rc->rc_size > 0) {
904 			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
905 			    zio->io_abd, abd_off, rc->rc_size);
906 			abd_off += rc->rc_size;
907 		}
908 	}
909 
910 	IMPLY(abd_offset != 0, abd_off == zio->io_size);
911 }
912 
913 /*
914  * Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key
915  * difference is that an ABD is allocated to back skip sectors so they may
916  * be read in to memory, verified, and repaired if needed.
917  */
918 void
919 vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
920 {
921 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
922 	uint64_t parity_size = rr->rr_col[0].rc_size;
923 	uint64_t skip_off = 0;
924 
925 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
926 	ASSERT0P(rr->rr_abd_empty);
927 
928 	if (rr->rr_nempty > 0) {
929 		rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
930 		    B_FALSE);
931 	}
932 
933 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
934 		raidz_col_t *rc = &rr->rr_col[c];
935 
936 		if (rc->rc_size == 0) {
937 			/* empty data column (small read), add a skip sector */
938 			ASSERT3U(skip_size, ==, parity_size);
939 			ASSERT3U(rr->rr_nempty, !=, 0);
940 			ASSERT0P(rc->rc_abd);
941 			rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
942 			    skip_off, skip_size);
943 			skip_off += skip_size;
944 		} else if (rc->rc_size == parity_size) {
945 			/* this is a "big column", nothing to add */
946 			ASSERT3P(rc->rc_abd, !=, NULL);
947 		} else {
948 			/*
949 			 * short data column, add a skip sector and clear
950 			 * rc_tried to force the entire column to be re-read
951 			 * thereby including the missing skip sector data
952 			 * which is needed for reconstruction.
953 			 */
954 			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
955 			ASSERT3U(rr->rr_nempty, !=, 0);
956 			ASSERT3P(rc->rc_abd, !=, NULL);
957 			ASSERT(!abd_is_gang(rc->rc_abd));
958 			abd_t *read_abd = rc->rc_abd;
959 			rc->rc_abd = abd_alloc_gang();
960 			abd_gang_add(rc->rc_abd, read_abd, B_TRUE);
961 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
962 			    rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
963 			skip_off += skip_size;
964 			rc->rc_tried = 0;
965 		}
966 
967 		/*
968 		 * Increase rc_size so the empty ABD is included in subsequent
969 		 * parity calculations.
970 		 */
971 		rc->rc_size = parity_size;
972 	}
973 
974 	ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
975 }
976 
977 /*
978  * Verify that all empty sectors are zero filled before using them to
979  * calculate parity.  Otherwise, silent corruption in an empty sector will
980  * result in bad parity being generated.  That bad parity will then be
981  * considered authoritative and overwrite the good parity on disk.  This
982  * is possible because the checksum is only calculated over the data,
983  * thus it cannot be used to detect damage in empty sectors.
984  */
985 int
986 vdev_draid_map_verify_empty(zio_t *zio, raidz_row_t *rr)
987 {
988 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
989 	uint64_t parity_size = rr->rr_col[0].rc_size;
990 	uint64_t skip_off = parity_size - skip_size;
991 	uint64_t empty_off = 0;
992 	int ret = 0;
993 
994 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
995 	ASSERT3P(rr->rr_abd_empty, !=, NULL);
996 	ASSERT3U(rr->rr_bigcols, >, 0);
997 
998 	void *zero_buf = kmem_zalloc(skip_size, KM_SLEEP);
999 
1000 	for (int c = rr->rr_bigcols; c < rr->rr_cols; c++) {
1001 		raidz_col_t *rc = &rr->rr_col[c];
1002 
1003 		ASSERT3P(rc->rc_abd, !=, NULL);
1004 		ASSERT3U(rc->rc_size, ==, parity_size);
1005 
1006 		if (abd_cmp_buf_off(rc->rc_abd, zero_buf, skip_off,
1007 		    skip_size) != 0) {
1008 			vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
1009 			abd_zero_off(rc->rc_abd, skip_off, skip_size);
1010 			rc->rc_error = SET_ERROR(ECKSUM);
1011 			ret++;
1012 		}
1013 
1014 		empty_off += skip_size;
1015 	}
1016 
1017 	ASSERT3U(empty_off, ==, abd_get_size(rr->rr_abd_empty));
1018 
1019 	kmem_free(zero_buf, skip_size);
1020 
1021 	return (ret);
1022 }
1023 
1024 /*
1025  * Given a logical address within a dRAID configuration, return the physical
1026  * address on the first drive in the group that this address maps to
1027  * (at position 'start' in permutation number 'perm').
1028  */
1029 static uint64_t
1030 vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset,
1031     uint64_t *perm, uint64_t *start, uint64_t *ndisks)
1032 {
1033 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1034 
1035 	/* b is the dRAID (parent) sector offset. */
1036 	uint64_t ashift = vd->vdev_top->vdev_ashift;
1037 	uint64_t b_offset = logical_offset >> ashift;
1038 
1039 	/*
1040 	 * The height of a row in units of the vdev's minimum sector size.
1041 	 * This is the amount of data written to each disk of each group
1042 	 * in a given permutation.
1043 	 */
1044 	uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift;
1045 
1046 	/*
1047 	 * We cycle through a disk permutation every groupsz * ngroups chunk
1048 	 * of address space. Note that ngroups * groupsz must be a multiple
1049 	 * of the number of data drives (ndisks) in order to guarantee
1050 	 * alignment. So, for example, if our row height is 16MB, our group
1051 	 * size is 10, and there are 13 data drives in the draid, then ngroups
1052 	 * will be 13, we will change permutation every 2.08GB and each
1053 	 * disk will have 160MB of data per chunk.
1054 	 */
1055 	uint64_t groupwidth = vdc->vdc_groupwidth;
1056 	uint64_t ngroups = vdc->vdc_ngroups;
1057 
1058 	uint64_t group = logical_offset / vdc->vdc_groupsz;
1059 	uint64_t fgrps = vdc->vdc_width / vdc->vdc_children;
1060 
1061 	*perm = (group / ngroups) * fgrps;
1062 
1063 	/*
1064 	 * Failure groups starting from (vdc_nspares % fgrps) have one less
1065 	 * spare, so they have one more ndisks.
1066 	 */
1067 	uint64_t biggies = vdc->vdc_nspares % fgrps;
1068 
1069 	uint64_t poff = 0;
1070 	group %= ngroups;
1071 	uint64_t ngroups1 = ngroups / fgrps;
1072 	if (!biggies || group < biggies * ngroups1)
1073 		poff = group / ngroups1;
1074 	else
1075 		poff = biggies +
1076 		    (group - (biggies * ngroups1)) / (ngroups1 + 1);
1077 	ASSERT3U(poff, <, fgrps);
1078 	*perm += poff;
1079 
1080 	*ndisks = (vdc->vdc_ndisks / fgrps) +
1081 	    (biggies ? ((poff >= biggies) ? 1 : 0) : 0);
1082 
1083 	/* b_offset is the sector offset within a group chunk */
1084 	b_offset = b_offset % (rowheight_sectors * groupwidth);
1085 	ASSERT0(b_offset % groupwidth);
1086 
1087 	/*
1088 	 * Find the starting byte offset on each child vdev:
1089 	 * - within a permutation there are ngroups groups spread over the
1090 	 *   rows, where each row covers a slice portion of the disk
1091 	 * - each permutation has (groupwidth * ngroups) / ndisks rows
1092 	 * - so each permutation covers rows * slice portion of the disk
1093 	 * - so we need to find the row where this IO group target begins
1094 	 */
1095 	uint64_t perm_rows = (groupwidth * ngroups) / vdc->vdc_ndisks;
1096 
1097 	/* Adjust group for our failure group. */
1098 	if (!biggies || poff <= biggies)
1099 		group -= poff * ngroups1;
1100 	else
1101 		group -= (biggies * ngroups1) +
1102 		    (poff - biggies) * (ngroups1 + 1);
1103 
1104 	IMPLY(poff < biggies, group < ngroups1);
1105 	ASSERT3U(group, <=, ngroups1);
1106 
1107 	/*
1108 	 * groupstart is where the group this IO will land in "starts" in
1109 	 * the permutation array.
1110 	 */
1111 	uint64_t groupstart = (group * groupwidth) % *ndisks;
1112 	ASSERT3U(groupstart + groupwidth, <=, *ndisks + groupstart);
1113 	*start = groupstart;
1114 
1115 	/* Adjust ngroups for our failure group. */
1116 	ngroups = ngroups1 + ((biggies && poff >= biggies) ? 1 : 0);
1117 
1118 	ASSERT3U(group, <, ngroups);
1119 
1120 	uint64_t row = ((*perm / fgrps) * perm_rows) +
1121 	    (((group % ngroups) * groupwidth) / *ndisks);
1122 
1123 	return (((rowheight_sectors * row) +
1124 	    (b_offset / groupwidth)) << ashift);
1125 }
1126 
1127 static uint64_t
1128 vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
1129     uint64_t abd_offset, uint64_t abd_size)
1130 {
1131 	vdev_t *vd = zio->io_vd;
1132 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1133 	uint64_t ashift = vd->vdev_top->vdev_ashift;
1134 	uint64_t io_size = abd_size;
1135 	uint64_t io_asize = vdev_draid_psize_to_asize(vd, io_size, 0);
1136 	uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
1137 	uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
1138 
1139 	/*
1140 	 * Limit the io_size to the space remaining in the group.  A second
1141 	 * row in the raidz_map_t is created for the remainder.
1142 	 */
1143 	if (io_offset + io_asize > start_offset) {
1144 		io_size = vdev_draid_asize_to_psize(vd,
1145 		    start_offset - io_offset, 0);
1146 	}
1147 
1148 	/*
1149 	 * At most a block may span the logical end of one group and the start
1150 	 * of the next group. Therefore, at the end of a group the io_size must
1151 	 * span the group width evenly and the remainder must be aligned to the
1152 	 * start of the next group.
1153 	 */
1154 	IMPLY(abd_offset == 0 && io_size < zio->io_size,
1155 	    (io_asize >> ashift) % vdc->vdc_groupwidth == 0);
1156 	IMPLY(abd_offset != 0,
1157 	    vdev_draid_group_to_offset(vd, group) == io_offset);
1158 
1159 	/* Lookup starting byte offset on each child vdev */
1160 	uint64_t groupstart, perm, ndisks;
1161 	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1162 	    io_offset, &perm, &groupstart, &ndisks);
1163 
1164 	/*
1165 	 * If there is less than groupwidth drives available after the group
1166 	 * start, the group is going to wrap onto the next row. 'wrap' is the
1167 	 * group disk number that starts on the next row.
1168 	 */
1169 	uint64_t groupwidth = vdc->vdc_groupwidth;
1170 	uint64_t wrap = groupwidth;
1171 
1172 	if (groupstart + groupwidth > ndisks)
1173 		wrap = ndisks - groupstart;
1174 
1175 	/* The io size in units of the vdev's minimum sector size. */
1176 	const uint64_t psize = io_size >> ashift;
1177 
1178 	/*
1179 	 * "Quotient": The number of data sectors for this stripe on all but
1180 	 * the "big column" child vdevs that also contain "remainder" data.
1181 	 */
1182 	uint64_t q = psize / vdc->vdc_ndata;
1183 
1184 	/*
1185 	 * "Remainder": The number of partial stripe data sectors in this I/O.
1186 	 * This will add a sector to some, but not all, child vdevs.
1187 	 */
1188 	uint64_t r = psize - q * vdc->vdc_ndata;
1189 
1190 	/* The number of "big columns" - those which contain remainder data. */
1191 	uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity);
1192 	ASSERT3U(bc, <, groupwidth);
1193 
1194 	/* The total number of data and parity sectors for this I/O. */
1195 	uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1)));
1196 
1197 	ASSERT3U(vdc->vdc_nparity, >, 0);
1198 
1199 	raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth, zio);
1200 	rr->rr_bigcols = bc;
1201 	rr->rr_firstdatacol = vdc->vdc_nparity;
1202 #ifdef ZFS_DEBUG
1203 	rr->rr_offset = io_offset;
1204 	rr->rr_size = io_size;
1205 #endif
1206 	*rrp = rr;
1207 
1208 	uint8_t *base;
1209 	uint64_t iter, asize = 0;
1210 	vdev_draid_get_perm(vdc, perm, &base, &iter);
1211 	for (uint64_t i = 0; i < groupwidth; i++) {
1212 		raidz_col_t *rc = &rr->rr_col[i];
1213 		uint64_t c = (groupstart + i) % ndisks;
1214 
1215 		/* increment the offset if we wrap to the next row */
1216 		if (i == wrap)
1217 			physical_offset += VDEV_DRAID_ROWHEIGHT;
1218 
1219 		rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
1220 		rc->rc_offset = physical_offset;
1221 
1222 		if (q == 0 && i >= bc)
1223 			rc->rc_size = 0;
1224 		else if (i < bc)
1225 			rc->rc_size = (q + 1) << ashift;
1226 		else
1227 			rc->rc_size = q << ashift;
1228 
1229 		asize += rc->rc_size;
1230 	}
1231 
1232 	ASSERT3U(asize, ==, tot << ashift);
1233 	rr->rr_nempty = roundup(tot, groupwidth) - tot;
1234 	IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc);
1235 
1236 	/* Allocate buffers for the parity columns */
1237 	for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) {
1238 		raidz_col_t *rc = &rr->rr_col[c];
1239 		rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
1240 	}
1241 
1242 	/*
1243 	 * Map buffers for data columns and allocate/map buffers for skip
1244 	 * sectors.  There are three distinct cases for dRAID which are
1245 	 * required to support sequential rebuild.
1246 	 */
1247 	if (zio->io_type == ZIO_TYPE_WRITE) {
1248 		vdev_draid_map_alloc_write(zio, abd_offset, rr);
1249 	} else if ((rr->rr_nempty > 0) &&
1250 	    (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
1251 		vdev_draid_map_alloc_scrub(zio, abd_offset, rr);
1252 	} else {
1253 		ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1254 		vdev_draid_map_alloc_read(zio, abd_offset, rr);
1255 	}
1256 
1257 	return (io_size);
1258 }
1259 
1260 /*
1261  * Allocate the raidz mapping to be applied to the dRAID I/O.  The parity
1262  * calculations for dRAID are identical to raidz however there are a few
1263  * differences in the layout.
1264  *
1265  * - dRAID always allocates a full stripe width. Any extra sectors due
1266  *   this padding are zero filled and written to disk. They will be read
1267  *   back during a scrub or repair operation since they are included in
1268  *   the parity calculation. This property enables sequential resilvering.
1269  *
1270  * - When the block at the logical offset spans redundancy groups then two
1271  *   rows are allocated in the raidz_map_t. One row resides at the end of
1272  *   the first group and the other at the start of the following group.
1273  */
1274 static raidz_map_t *
1275 vdev_draid_map_alloc(zio_t *zio)
1276 {
1277 	raidz_row_t *rr[2];
1278 	uint64_t abd_offset = 0;
1279 	uint64_t abd_size = zio->io_size;
1280 	uint64_t io_offset = zio->io_offset;
1281 	uint64_t size;
1282 	int nrows = 1;
1283 
1284 	size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset,
1285 	    abd_offset, abd_size);
1286 	if (size < abd_size) {
1287 		vdev_t *vd = zio->io_vd;
1288 
1289 		io_offset += vdev_draid_psize_to_asize(vd, size, 0);
1290 		abd_offset += size;
1291 		abd_size -= size;
1292 		nrows++;
1293 
1294 		ASSERT3U(io_offset, ==, vdev_draid_group_to_offset(
1295 		    vd, vdev_draid_offset_to_group(vd, io_offset)));
1296 		ASSERT3U(abd_offset, <, zio->io_size);
1297 		ASSERT3U(abd_size, !=, 0);
1298 
1299 		size = vdev_draid_map_alloc_row(zio, &rr[1],
1300 		    io_offset, abd_offset, abd_size);
1301 		VERIFY3U(size, ==, abd_size);
1302 	}
1303 
1304 	raidz_map_t *rm;
1305 	rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP);
1306 	rm->rm_ops = vdev_raidz_math_get_ops();
1307 	rm->rm_nrows = nrows;
1308 	rm->rm_row[0] = rr[0];
1309 	if (nrows == 2)
1310 		rm->rm_row[1] = rr[1];
1311 	return (rm);
1312 }
1313 
1314 /*
1315  * Given an offset into a dRAID return the next group width aligned offset
1316  * which can be used to start an allocation.
1317  */
1318 static uint64_t
1319 vdev_draid_get_astart(vdev_t *vd, const uint64_t start)
1320 {
1321 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1322 
1323 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1324 
1325 	return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift));
1326 }
1327 
1328 /*
1329  * Allocatable space for dRAID is (children - nspares) * sizeof(smallest child)
1330  * rounded down to the last full slice.  So each child must provide at least
1331  * 1 / (children - nspares) of its asize rounded up to VDEV_DRAID_ROWHEIGHT.
1332  */
1333 static uint64_t
1334 vdev_draid_min_asize(vdev_t *vd)
1335 {
1336 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1337 
1338 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1339 
1340 	return (VDEV_DRAID_REFLOW_RESERVE +
1341 	    DIV_ROUND_UP(DIV_ROUND_UP(vd->vdev_min_asize, vdc->vdc_ndisks),
1342 	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT);
1343 }
1344 
1345 /*
1346  * When using dRAID the minimum allocation size is determined by the number
1347  * of data disks in the redundancy group.  Full stripes are always used.
1348  */
1349 static uint64_t
1350 vdev_draid_min_alloc(vdev_t *vd)
1351 {
1352 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1353 
1354 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1355 
1356 	return (vdc->vdc_ndata << vd->vdev_ashift);
1357 }
1358 
1359 /*
1360  * Returns false if the txg range exists on any leaf vdev, true otherwise.
1361  *
1362  * A dRAID spare does not fit into the DTL model. While it has child vdevs
1363  * there is no redundancy among them, and the effective child vdev is
1364  * determined by offset. Essentially we do a vdev_dtl_reassess() on the
1365  * fly by replacing a dRAID spare with the child vdev under the offset.
1366  * Note that it is a recursive process because the child vdev can be
1367  * another dRAID spare and so on.
1368  */
1369 boolean_t
1370 vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1371     uint64_t size)
1372 {
1373 	if (vd->vdev_ops == &vdev_spare_ops ||
1374 	    vd->vdev_ops == &vdev_replacing_ops) {
1375 		/*
1376 		 * Check all of the readable children, if any child
1377 		 * contains the txg range the data it is not missing.
1378 		 */
1379 		for (int c = 0; c < vd->vdev_children; c++) {
1380 			vdev_t *cvd = vd->vdev_child[c];
1381 
1382 			if (!vdev_readable(cvd))
1383 				continue;
1384 
1385 			if (!vdev_draid_missing(cvd, physical_offset,
1386 			    txg, size))
1387 				return (B_FALSE);
1388 		}
1389 
1390 		return (B_TRUE);
1391 	}
1392 
1393 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1394 		/*
1395 		 * When sequentially resilvering we don't have a proper
1396 		 * txg range so instead we must presume all txgs are
1397 		 * missing on this vdev until the resilver completes.
1398 		 */
1399 		if (vd->vdev_rebuild_txg != 0)
1400 			return (B_TRUE);
1401 
1402 		/*
1403 		 * DTL_MISSING is set for all prior txgs when a resilver
1404 		 * is started in spa_vdev_attach().
1405 		 */
1406 		if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1407 			return (B_TRUE);
1408 
1409 		/*
1410 		 * Consult the DTL on the relevant vdev. Either a vdev
1411 		 * leaf or spare/replace mirror child may be returned so
1412 		 * we must recursively call vdev_draid_missing_impl().
1413 		 */
1414 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1415 		if (vd == NULL)
1416 			return (B_TRUE);
1417 
1418 		return (vdev_draid_missing(vd, physical_offset, txg, size));
1419 	}
1420 
1421 	return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1422 }
1423 
1424 /*
1425  * Returns true if the txg is only partially replicated on the leaf vdevs.
1426  */
1427 static boolean_t
1428 vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1429     uint64_t size)
1430 {
1431 	if (vd->vdev_ops == &vdev_spare_ops ||
1432 	    vd->vdev_ops == &vdev_replacing_ops) {
1433 		/*
1434 		 * Check all of the readable children, if any child is
1435 		 * missing the txg range then it is partially replicated.
1436 		 */
1437 		for (int c = 0; c < vd->vdev_children; c++) {
1438 			vdev_t *cvd = vd->vdev_child[c];
1439 
1440 			if (!vdev_readable(cvd))
1441 				continue;
1442 
1443 			if (vdev_draid_partial(cvd, physical_offset, txg, size))
1444 				return (B_TRUE);
1445 		}
1446 
1447 		return (B_FALSE);
1448 	}
1449 
1450 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1451 		/*
1452 		 * When sequentially resilvering we don't have a proper
1453 		 * txg range so instead we must presume all txgs are
1454 		 * missing on this vdev until the resilver completes.
1455 		 */
1456 		if (vd->vdev_rebuild_txg != 0)
1457 			return (B_TRUE);
1458 
1459 		/*
1460 		 * DTL_MISSING is set for all prior txgs when a resilver
1461 		 * is started in spa_vdev_attach().
1462 		 */
1463 		if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1464 			return (B_TRUE);
1465 
1466 		/*
1467 		 * Consult the DTL on the relevant vdev. Either a vdev
1468 		 * leaf or spare/replace mirror child may be returned so
1469 		 * we must recursively call vdev_draid_missing_impl().
1470 		 */
1471 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1472 		if (vd == NULL)
1473 			return (B_TRUE);
1474 
1475 		return (vdev_draid_partial(vd, physical_offset, txg, size));
1476 	}
1477 
1478 	return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1479 }
1480 
1481 /*
1482  * Determine if the vdev is readable at the given offset.
1483  */
1484 boolean_t
1485 vdev_draid_readable(vdev_t *vd, uint64_t physical_offset)
1486 {
1487 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1488 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1489 		if (vd == NULL)
1490 			return (B_FALSE);
1491 	}
1492 
1493 	if (vd->vdev_ops == &vdev_spare_ops ||
1494 	    vd->vdev_ops == &vdev_replacing_ops) {
1495 
1496 		for (int c = 0; c < vd->vdev_children; c++) {
1497 			vdev_t *cvd = vd->vdev_child[c];
1498 
1499 			if (!vdev_readable(cvd))
1500 				continue;
1501 
1502 			if (vdev_draid_readable(cvd, physical_offset))
1503 				return (B_TRUE);
1504 		}
1505 
1506 		return (B_FALSE);
1507 	}
1508 
1509 	return (vdev_readable(vd));
1510 }
1511 
1512 /*
1513  * Returns the first distributed spare found under the provided vdev tree.
1514  */
1515 static vdev_t *
1516 vdev_draid_find_spare(vdev_t *vd)
1517 {
1518 	if (vd->vdev_ops == &vdev_draid_spare_ops)
1519 		return (vd);
1520 
1521 	for (int c = 0; c < vd->vdev_children; c++) {
1522 		vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]);
1523 		if (svd != NULL)
1524 			return (svd);
1525 	}
1526 
1527 	return (NULL);
1528 }
1529 
1530 /*
1531  * Returns B_TRUE if the passed in vdev is currently "faulted".
1532  * Faulted, in this context, means that the vdev represents a
1533  * replacing or sparing vdev tree.
1534  */
1535 static boolean_t
1536 vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset)
1537 {
1538 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1539 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1540 		if (vd == NULL)
1541 			return (B_FALSE);
1542 
1543 		/*
1544 		 * After resolving the distributed spare to a leaf vdev
1545 		 * check the parent to determine if it's "faulted".
1546 		 */
1547 		vd = vd->vdev_parent;
1548 	}
1549 
1550 	return (vd->vdev_ops == &vdev_replacing_ops ||
1551 	    vd->vdev_ops == &vdev_spare_ops);
1552 }
1553 
1554 /*
1555  * Determine if the dRAID block at the logical offset is degraded.
1556  * Used by sequential resilver.
1557  */
1558 static boolean_t
1559 vdev_draid_group_degraded(vdev_t *vd, uint64_t offset)
1560 {
1561 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1562 
1563 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1564 	ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1565 
1566 	uint64_t groupstart, perm, ndisks;
1567 	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1568 	    offset, &perm, &groupstart, &ndisks);
1569 
1570 	uint8_t *base;
1571 	uint64_t iter;
1572 	vdev_draid_get_perm(vdc, perm, &base, &iter);
1573 
1574 	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1575 		uint64_t c = (groupstart + i) % ndisks;
1576 		uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1577 		vdev_t *cvd = vd->vdev_child[cid];
1578 
1579 		/* Group contains a faulted vdev. */
1580 		if (vdev_draid_faulted(cvd, physical_offset))
1581 			return (B_TRUE);
1582 
1583 		/*
1584 		 * Always check groups with active distributed spares
1585 		 * because any vdev failure in the pool will affect them.
1586 		 */
1587 		if (vdev_draid_find_spare(cvd) != NULL)
1588 			return (B_TRUE);
1589 	}
1590 
1591 	return (B_FALSE);
1592 }
1593 
1594 /*
1595  * Determine if the txg is missing.  Used by healing resilver.
1596  */
1597 static boolean_t
1598 vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg,
1599     uint64_t size)
1600 {
1601 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1602 
1603 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1604 	ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1605 
1606 	uint64_t groupstart, perm, ndisks;
1607 	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1608 	    offset, &perm, &groupstart, &ndisks);
1609 
1610 	uint8_t *base;
1611 	uint64_t iter;
1612 	vdev_draid_get_perm(vdc, perm, &base, &iter);
1613 
1614 	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1615 		uint64_t c = (groupstart + i) % ndisks;
1616 		uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1617 		vdev_t *cvd = vd->vdev_child[cid];
1618 
1619 		/* Transaction group is known to be partially replicated. */
1620 		if (vdev_draid_partial(cvd, physical_offset, txg, size))
1621 			return (B_TRUE);
1622 	}
1623 
1624 	return (B_FALSE);
1625 }
1626 
1627 /*
1628  * Find the smallest child asize and largest sector size to calculate the
1629  * available capacity.  Distributed spares are ignored since their capacity
1630  * is also based of the minimum child size in the top-level dRAID.
1631  */
1632 static void
1633 vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep,
1634     uint64_t *logical_ashiftp, uint64_t *physical_ashiftp)
1635 {
1636 	uint64_t logical_ashift = 0, physical_ashift = 0;
1637 	uint64_t asize = 0, max_asize = 0;
1638 
1639 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1640 
1641 	for (int c = 0; c < vd->vdev_children; c++) {
1642 		vdev_t *cvd = vd->vdev_child[c];
1643 
1644 		if (cvd->vdev_ops == &vdev_draid_spare_ops)
1645 			continue;
1646 
1647 		asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1;
1648 		max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1;
1649 		logical_ashift = MAX(logical_ashift, cvd->vdev_ashift);
1650 	}
1651 	for (int c = 0; c < vd->vdev_children; c++) {
1652 		vdev_t *cvd = vd->vdev_child[c];
1653 
1654 		if (cvd->vdev_ops == &vdev_draid_spare_ops)
1655 			continue;
1656 		physical_ashift = vdev_best_ashift(logical_ashift,
1657 		    physical_ashift, cvd->vdev_physical_ashift);
1658 	}
1659 
1660 	*asizep = asize;
1661 	*max_asizep = max_asize;
1662 	*logical_ashiftp = logical_ashift;
1663 	*physical_ashiftp = physical_ashift;
1664 }
1665 
1666 /*
1667  * Open spare vdevs.
1668  */
1669 static boolean_t
1670 vdev_draid_open_spares(vdev_t *vd)
1671 {
1672 	return (vd->vdev_ops == &vdev_draid_spare_ops ||
1673 	    vd->vdev_ops == &vdev_replacing_ops ||
1674 	    vd->vdev_ops == &vdev_spare_ops);
1675 }
1676 
1677 /*
1678  * Open all children, excluding spares.
1679  */
1680 static boolean_t
1681 vdev_draid_open_children(vdev_t *vd)
1682 {
1683 	return (!vdev_draid_open_spares(vd));
1684 }
1685 
1686 /*
1687  * Open a top-level dRAID vdev.
1688  */
1689 static int
1690 vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
1691     uint64_t *logical_ashift, uint64_t *physical_ashift)
1692 {
1693 	vdev_draid_config_t *vdc =  vd->vdev_tsd;
1694 	uint64_t nparity = vdc->vdc_nparity;
1695 	int open_errors = 0;
1696 
1697 	if (nparity > VDEV_DRAID_MAXPARITY ||
1698 	    vdc->vdc_children < nparity + 1) {
1699 		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
1700 		return (SET_ERROR(EINVAL));
1701 	}
1702 
1703 	/*
1704 	 * First open the normal children then the distributed spares.  This
1705 	 * ordering is important to ensure the distributed spares calculate
1706 	 * the correct psize in the event that the dRAID vdevs were expanded.
1707 	 */
1708 	vdev_open_children_subset(vd, vdev_draid_open_children);
1709 	vdev_open_children_subset(vd, vdev_draid_open_spares);
1710 
1711 	/*
1712 	 * Verify enough of the children are available to continue.
1713 	 * If several disks got failed on i-th position in each slice in the
1714 	 * big width row (failure groups) - they are counted as one failure,
1715 	 * but only if the failures threshold is not reached in any group.
1716 	 */
1717 	boolean_t safe2skip = B_FALSE;
1718 	if (vdc->vdc_width > vdc->vdc_children &&
1719 	    vdev_draid_fail_domain_allowed(vd))
1720 		safe2skip = B_TRUE;
1721 	for (int c = 0; c < vdc->vdc_children; c++) {
1722 		for (int i = c; i < vdc->vdc_width; i += vdc->vdc_children) {
1723 			if (vd->vdev_child[i]->vdev_open_error != 0) {
1724 				if ((++open_errors) > nparity) {
1725 					vd->vdev_stat.vs_aux =
1726 					    VDEV_AUX_NO_REPLICAS;
1727 					return (SET_ERROR(ENXIO));
1728 				}
1729 				if (safe2skip)
1730 					break;
1731 			}
1732 		}
1733 	}
1734 
1735 	/*
1736 	 * Allocatable capacity is the sum of the space on all children less
1737 	 * the number of distributed spares rounded down to last full row
1738 	 * and then to the last full group. An additional 32MB of scratch
1739 	 * space is reserved at the end of each child for use by the dRAID
1740 	 * expansion feature.
1741 	 */
1742 	uint64_t child_asize, child_max_asize;
1743 	vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize,
1744 	    logical_ashift, physical_ashift);
1745 
1746 	/*
1747 	 * Should be unreachable since the minimum child size is 64MB, but
1748 	 * we want to make sure an underflow absolutely cannot occur here.
1749 	 */
1750 	if (child_asize < VDEV_DRAID_REFLOW_RESERVE ||
1751 	    child_max_asize < VDEV_DRAID_REFLOW_RESERVE) {
1752 		return (SET_ERROR(ENXIO));
1753 	}
1754 
1755 	child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) /
1756 	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1757 	child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) /
1758 	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1759 
1760 	*asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1761 	    vdc->vdc_groupsz);
1762 	*max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1763 	    vdc->vdc_groupsz);
1764 
1765 	/*
1766 	 * For failure groups with multiple silices in the big width row,
1767 	 * round down to the big slice size.
1768 	 */
1769 	if (vdc->vdc_width > vdc->vdc_children) {
1770 		uint64_t slicesz = vdc->vdc_devslicesz * vdc->vdc_ndisks;
1771 		*asize = (*asize / slicesz) * slicesz;
1772 		*max_asize = (*max_asize / slicesz) * slicesz;
1773 	}
1774 
1775 	return (0);
1776 }
1777 
1778 /*
1779  * Close a top-level dRAID vdev.
1780  */
1781 static void
1782 vdev_draid_close(vdev_t *vd)
1783 {
1784 	for (int c = 0; c < vd->vdev_children; c++) {
1785 		if (vd->vdev_child[c] != NULL)
1786 			vdev_close(vd->vdev_child[c]);
1787 	}
1788 }
1789 
1790 /*
1791  * Return the maximum asize for a rebuild zio in the provided range
1792  * given the following constraints.  A dRAID chunks may not:
1793  *
1794  * - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or
1795  * - Span dRAID redundancy groups.
1796  */
1797 static uint64_t
1798 vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
1799     uint64_t max_segment)
1800 {
1801 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1802 
1803 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1804 
1805 	uint64_t ashift = vd->vdev_ashift;
1806 	uint64_t ndata = vdc->vdc_ndata;
1807 	uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift),
1808 	    SPA_MAXBLOCKSIZE);
1809 
1810 	ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
1811 	ASSERT0(asize % (vdc->vdc_groupwidth << ashift));
1812 
1813 	/* Chunks must evenly span all data columns in the group. */
1814 	psize = (((psize >> ashift) / ndata) * ndata) << ashift;
1815 	uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize));
1816 
1817 	/* Reduce the chunk size to the group space remaining. */
1818 	uint64_t group = vdev_draid_offset_to_group(vd, start);
1819 	uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
1820 	chunk_size = MIN(chunk_size, left);
1821 
1822 	ASSERT0(chunk_size % (vdc->vdc_groupwidth << ashift));
1823 	ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
1824 	    vdev_draid_offset_to_group(vd, start + chunk_size - 1));
1825 
1826 	return (chunk_size);
1827 }
1828 
1829 /*
1830  * Align the start of the metaslab to the group width and slightly reduce
1831  * its size to a multiple of the group width.  Since full stripe writes are
1832  * required by dRAID this space is unallocable.  Furthermore, aligning the
1833  * metaslab start is important for vdev initialize and TRIM which both operate
1834  * on metaslab boundaries which vdev_xlate() expects to be aligned.
1835  */
1836 static void
1837 vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size)
1838 {
1839 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1840 
1841 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1842 
1843 	uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift;
1844 	uint64_t astart = vdev_draid_get_astart(vd, *ms_start);
1845 	uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz;
1846 
1847 	*ms_start = astart;
1848 	*ms_size = asize;
1849 
1850 	ASSERT0(*ms_start % sz);
1851 	ASSERT0(*ms_size % sz);
1852 }
1853 
1854 /*
1855  * Add virtual dRAID spares to the list of valid spares. In order to accomplish
1856  * this the existing array must be freed and reallocated with the additional
1857  * entries.
1858  */
1859 int
1860 vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
1861     uint64_t *nfgroupp, uint64_t next_vdev_id)
1862 {
1863 	uint64_t draid_nspares = 0;
1864 	uint64_t ndraid = 0;
1865 	uint64_t nfgroup = 0;
1866 	int error;
1867 
1868 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
1869 		vdev_t *cvd = vd->vdev_child[i];
1870 
1871 		if (cvd->vdev_ops == &vdev_draid_ops) {
1872 			vdev_draid_config_t *vdc = cvd->vdev_tsd;
1873 			draid_nspares += vdc->vdc_nspares;
1874 			ndraid++;
1875 			if (vdc->vdc_width > vdc->vdc_children)
1876 				nfgroup++;
1877 		}
1878 	}
1879 
1880 	if (draid_nspares == 0) {
1881 		*ndraidp = ndraid;
1882 		*nfgroupp = nfgroup;
1883 		return (0);
1884 	}
1885 
1886 	nvlist_t **old_spares, **new_spares;
1887 	uint_t old_nspares;
1888 	error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1889 	    &old_spares, &old_nspares);
1890 	if (error)
1891 		old_nspares = 0;
1892 
1893 	/* Allocate memory and copy of the existing spares. */
1894 	new_spares = kmem_alloc(sizeof (nvlist_t *) *
1895 	    (draid_nspares + old_nspares), KM_SLEEP);
1896 	for (uint_t i = 0; i < old_nspares; i++)
1897 		new_spares[i] = fnvlist_dup(old_spares[i]);
1898 
1899 	/* Add new distributed spares to ZPOOL_CONFIG_SPARES. */
1900 	uint64_t n = old_nspares;
1901 	for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) {
1902 		vdev_t *cvd = vd->vdev_child[vdev_id];
1903 		char path[64];
1904 
1905 		if (cvd->vdev_ops != &vdev_draid_ops)
1906 			continue;
1907 
1908 		vdev_draid_config_t *vdc = cvd->vdev_tsd;
1909 		uint64_t nspares = vdc->vdc_nspares;
1910 		uint64_t nparity = vdc->vdc_nparity;
1911 
1912 		for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
1913 			memset(path, 0, sizeof (path));
1914 			(void) snprintf(path, sizeof (path) - 1,
1915 			    "%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
1916 			    (u_longlong_t)nparity,
1917 			    (u_longlong_t)next_vdev_id + vdev_id,
1918 			    (u_longlong_t)spare_id);
1919 
1920 			nvlist_t *spare = fnvlist_alloc();
1921 			fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path);
1922 			fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE,
1923 			    VDEV_TYPE_DRAID_SPARE);
1924 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID,
1925 			    cvd->vdev_guid);
1926 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID,
1927 			    spare_id);
1928 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0);
1929 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1);
1930 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1);
1931 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT,
1932 			    cvd->vdev_ashift);
1933 
1934 			new_spares[n] = spare;
1935 			n++;
1936 		}
1937 	}
1938 
1939 	if (n > 0) {
1940 		(void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES);
1941 		fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1942 		    (const nvlist_t **)new_spares, n);
1943 	}
1944 
1945 	for (int i = 0; i < n; i++)
1946 		nvlist_free(new_spares[i]);
1947 
1948 	kmem_free(new_spares, sizeof (*new_spares) * n);
1949 	*ndraidp = ndraid;
1950 	*nfgroupp = nfgroup;
1951 
1952 	return (0);
1953 }
1954 
1955 /*
1956  * Determine if any portion of the provided block resides on a child vdev
1957  * with a dirty DTL and therefore needs to be resilvered.
1958  */
1959 static boolean_t
1960 vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
1961     uint64_t phys_birth)
1962 {
1963 	uint64_t offset = DVA_GET_OFFSET(dva);
1964 	uint64_t asize = vdev_draid_psize_to_asize(vd, psize, 0);
1965 
1966 	if (phys_birth == TXG_UNKNOWN) {
1967 		/*
1968 		 * Sequential resilver.  There is no meaningful phys_birth
1969 		 * for this block, we can only determine if block resides
1970 		 * in a degraded group in which case it must be resilvered.
1971 		 */
1972 		ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==,
1973 		    vdev_draid_offset_to_group(vd, offset + asize - 1));
1974 
1975 		return (vdev_draid_group_degraded(vd, offset));
1976 	} else {
1977 		/*
1978 		 * Healing resilver.  TXGs not in DTL_PARTIAL are intact,
1979 		 * as are blocks in non-degraded groups.
1980 		 */
1981 		if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
1982 			return (B_FALSE);
1983 
1984 		if (vdev_draid_group_missing(vd, offset, phys_birth, 1))
1985 			return (B_TRUE);
1986 
1987 		/* The block may span groups in which case check both. */
1988 		if (vdev_draid_offset_to_group(vd, offset) !=
1989 		    vdev_draid_offset_to_group(vd, offset + asize - 1)) {
1990 			if (vdev_draid_group_missing(vd,
1991 			    offset + asize, phys_birth, 1))
1992 				return (B_TRUE);
1993 		}
1994 
1995 		return (B_FALSE);
1996 	}
1997 }
1998 
1999 static boolean_t
2000 vdev_draid_rebuilding(vdev_t *vd)
2001 {
2002 	if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
2003 		return (B_TRUE);
2004 
2005 	for (int i = 0; i < vd->vdev_children; i++) {
2006 		if (vdev_draid_rebuilding(vd->vdev_child[i])) {
2007 			return (B_TRUE);
2008 		}
2009 	}
2010 
2011 	return (B_FALSE);
2012 }
2013 
2014 static void
2015 vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
2016 {
2017 #ifdef ZFS_DEBUG
2018 	zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
2019 	logical_rs.rs_start = rr->rr_offset;
2020 	logical_rs.rs_end = logical_rs.rs_start +
2021 	    vdev_draid_psize_to_asize(vd, rr->rr_size, 0);
2022 
2023 	raidz_col_t *rc = &rr->rr_col[col];
2024 	vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
2025 
2026 	vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
2027 	ASSERT(vdev_xlate_is_empty(&remain_rs));
2028 	ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
2029 	ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
2030 	ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end);
2031 #endif
2032 }
2033 
2034 /*
2035  * For write operations:
2036  * 1. Generate the parity data
2037  * 2. Create child zio write operations to each column's vdev, for both
2038  *    data and parity.  A gang ABD is allocated by vdev_draid_map_alloc()
2039  *    if a skip sector needs to be added to a column.
2040  */
2041 static void
2042 vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr)
2043 {
2044 	vdev_t *vd = zio->io_vd;
2045 	raidz_map_t *rm = zio->io_vsd;
2046 
2047 	vdev_raidz_generate_parity_row(rm, rr);
2048 
2049 	for (int c = 0; c < rr->rr_cols; c++) {
2050 		raidz_col_t *rc = &rr->rr_col[c];
2051 
2052 		/*
2053 		 * Empty columns are zero filled and included in the parity
2054 		 * calculation and therefore must be written.
2055 		 */
2056 		ASSERT3U(rc->rc_size, !=, 0);
2057 
2058 		/* Verify physical to logical translation */
2059 		vdev_draid_io_verify(vd, rr, c);
2060 
2061 		zio_nowait(zio_vdev_child_io(zio, NULL,
2062 		    vd->vdev_child[rc->rc_devidx], rc->rc_offset,
2063 		    rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
2064 		    0, vdev_raidz_child_done, rc));
2065 	}
2066 }
2067 
2068 /*
2069  * For read operations:
2070  * 1. The vdev_draid_map_alloc() function will create a minimal raidz
2071  *    mapping for the read based on the zio->io_flags.  There are two
2072  *    possible mappings either 1) a normal read, or 2) a scrub/resilver.
2073  * 2. Create the zio read operations.  This will include all parity
2074  *    columns and skip sectors for a scrub/resilver.
2075  */
2076 static void
2077 vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
2078 {
2079 	vdev_t *vd = zio->io_vd;
2080 
2081 	/* Sequential rebuild must do IO at redundancy group boundary. */
2082 	IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0);
2083 
2084 	/*
2085 	 * Iterate over the columns in reverse order so that we hit the parity
2086 	 * last.  Any errors along the way will force us to read the parity.
2087 	 * For scrub/resilver IOs which verify skip sectors, a gang ABD will
2088 	 * have been allocated to store them and rc->rc_size is increased.
2089 	 */
2090 	for (int c = rr->rr_cols - 1; c >= 0; c--) {
2091 		raidz_col_t *rc = &rr->rr_col[c];
2092 		vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
2093 
2094 		if (!vdev_draid_readable(cvd, rc->rc_offset)) {
2095 			if (c >= rr->rr_firstdatacol)
2096 				rr->rr_missingdata++;
2097 			else
2098 				rr->rr_missingparity++;
2099 			rc->rc_error = SET_ERROR(ENXIO);
2100 			rc->rc_tried = 1;
2101 			rc->rc_skipped = 1;
2102 			continue;
2103 		}
2104 
2105 		if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) {
2106 			vdev_t *svd;
2107 
2108 			if (c >= rr->rr_firstdatacol)
2109 				rr->rr_missingdata++;
2110 			else
2111 				rr->rr_missingparity++;
2112 			rc->rc_error = SET_ERROR(ESTALE);
2113 			rc->rc_skipped = 1;
2114 
2115 			/*
2116 			 * If this child has draid spare attached, and that
2117 			 * spare by rc_offset maps to another spare, the repair
2118 			 * would go to that spare, and we want all mirrored
2119 			 * children on it to be updated with the repaired data,
2120 			 * even when we cannot vouch for it during rebuilds
2121 			 * (which don't have checksums). Otherwise, we will have
2122 			 * a lot of checksum errors on that spares during scrub.
2123 			 * The worst thing that can happen in this case is that
2124 			 * we will update the reserved spare column on some
2125 			 * device with unverified data, which is harmless.
2126 			 */
2127 			if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
2128 				svd = vdev_draid_spare_get_child(svd,
2129 				    rc->rc_offset);
2130 				if (svd && (svd->vdev_ops == &vdev_spare_ops ||
2131 				    svd->vdev_ops == &vdev_replacing_ops))
2132 					rc->rc_tgt_is_dspare = 1;
2133 			}
2134 			continue;
2135 		}
2136 
2137 		/*
2138 		 * Empty columns may be read during vdev_draid_io_done().
2139 		 * Only skip them after the readable and missing checks
2140 		 * verify they are available.
2141 		 */
2142 		if (rc->rc_size == 0) {
2143 			rc->rc_skipped = 1;
2144 			continue;
2145 		}
2146 
2147 		if (zio->io_flags & ZIO_FLAG_RESILVER) {
2148 			vdev_t *svd;
2149 
2150 			/*
2151 			 * Repairs need to be constrained to the devices being
2152 			 * rebuilt since without a checksum we cannot verify the
2153 			 * data is actually correct and performing an incorrect
2154 			 * repair could result in locking in the damage and
2155 			 * making the data unrecoverable.
2156 			 */
2157 			if (zio->io_priority == ZIO_PRIORITY_REBUILD &&
2158 			    !vdev_draid_rebuilding(cvd))
2159 				rc->rc_allow_repair = 0;
2160 
2161 			/*
2162 			 * If this child is a distributed spare then the
2163 			 * offset might reside on the vdev being replaced.
2164 			 * In which case this data must be written to the
2165 			 * new device.  Failure to do so would result in
2166 			 * checksum errors when the old device is detached
2167 			 * and the pool is scrubbed.
2168 			 */
2169 			if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
2170 				svd = vdev_draid_spare_get_child(svd,
2171 				    rc->rc_offset);
2172 				if (svd && (svd->vdev_ops == &vdev_spare_ops ||
2173 				    svd->vdev_ops == &vdev_replacing_ops)) {
2174 					rc->rc_force_repair = 1;
2175 
2176 					if (vdev_draid_rebuilding(svd))
2177 						rc->rc_allow_repair = 1;
2178 				}
2179 			}
2180 
2181 			/*
2182 			 * Always issue a repair IO to this child when its
2183 			 * a spare or replacing vdev with an active rebuild.
2184 			 */
2185 			if ((cvd->vdev_ops == &vdev_spare_ops ||
2186 			    cvd->vdev_ops == &vdev_replacing_ops) &&
2187 			    vdev_draid_rebuilding(cvd)) {
2188 				rc->rc_force_repair = 1;
2189 				rc->rc_allow_repair = 1;
2190 			}
2191 		}
2192 
2193 		if (vdev_sit_out_reads(cvd, zio->io_flags)) {
2194 			rr->rr_outlier_cnt++;
2195 			ASSERT0(rc->rc_latency_outlier);
2196 			rc->rc_latency_outlier = 1;
2197 		}
2198 	}
2199 
2200 	/*
2201 	 * When the row contains a latency outlier and sufficient parity
2202 	 * exists to reconstruct the column data, then skip reading the
2203 	 * known slow child vdev as a performance optimization.
2204 	 */
2205 	if (rr->rr_outlier_cnt > 0 &&
2206 	    (rr->rr_firstdatacol - rr->rr_missingparity) >=
2207 	    (rr->rr_missingdata + 1)) {
2208 
2209 		for (int c = rr->rr_cols - 1; c >= rr->rr_firstdatacol; c--) {
2210 			raidz_col_t *rc = &rr->rr_col[c];
2211 
2212 			if (rc->rc_error == 0 && rc->rc_latency_outlier) {
2213 				rr->rr_missingdata++;
2214 				rc->rc_error = SET_ERROR(EAGAIN);
2215 				rc->rc_skipped = 1;
2216 				break;
2217 			}
2218 		}
2219 	}
2220 
2221 	/*
2222 	 * Either a parity or data column is missing this means a repair
2223 	 * may be attempted by vdev_draid_io_done().  Expand the raid map
2224 	 * to read in empty columns which are needed along with the parity
2225 	 * during reconstruction.
2226 	 */
2227 	if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) &&
2228 	    rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) {
2229 		vdev_draid_map_alloc_empty(zio, rr);
2230 	}
2231 
2232 	for (int c = rr->rr_cols - 1; c >= 0; c--) {
2233 		raidz_col_t *rc = &rr->rr_col[c];
2234 		vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
2235 
2236 		if (rc->rc_error || rc->rc_size == 0)
2237 			continue;
2238 
2239 		if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
2240 		    (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
2241 			zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2242 			    rc->rc_offset, rc->rc_abd, rc->rc_size,
2243 			    zio->io_type, zio->io_priority, 0,
2244 			    vdev_raidz_child_done, rc));
2245 		}
2246 	}
2247 }
2248 
2249 /*
2250  * Start an IO operation to a dRAID vdev.
2251  */
2252 static void
2253 vdev_draid_io_start(zio_t *zio)
2254 {
2255 	vdev_t *vd __maybe_unused = zio->io_vd;
2256 
2257 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2258 	ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset));
2259 
2260 	raidz_map_t *rm = vdev_draid_map_alloc(zio);
2261 	zio->io_vsd = rm;
2262 	zio->io_vsd_ops = &vdev_raidz_vsd_ops;
2263 
2264 	if (zio->io_type == ZIO_TYPE_WRITE) {
2265 		for (int i = 0; i < rm->rm_nrows; i++) {
2266 			vdev_draid_io_start_write(zio, rm->rm_row[i]);
2267 		}
2268 	} else {
2269 		ASSERT(zio->io_type == ZIO_TYPE_READ);
2270 
2271 		for (int i = 0; i < rm->rm_nrows; i++) {
2272 			vdev_draid_io_start_read(zio, rm->rm_row[i]);
2273 		}
2274 	}
2275 
2276 	zio_execute(zio);
2277 }
2278 
2279 /*
2280  * Complete an IO operation on a dRAID vdev.  The raidz logic can be applied
2281  * to dRAID since the layout is fully described by the raidz_map_t.
2282  */
2283 static void
2284 vdev_draid_io_done(zio_t *zio)
2285 {
2286 	vdev_raidz_io_done(zio);
2287 }
2288 
2289 static void
2290 vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
2291 {
2292 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2293 	ASSERT(vd->vdev_ops == &vdev_draid_ops);
2294 
2295 	if (faulted > vdc->vdc_nparity * (vdc->vdc_width / vdc->vdc_children))
2296 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2297 		    VDEV_AUX_NO_REPLICAS);
2298 	else if (degraded + faulted != 0)
2299 		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
2300 	else
2301 		vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
2302 }
2303 
2304 static void
2305 vdev_draid_xlate(vdev_t *cvd, const zfs_range_seg64_t *logical_rs,
2306     zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
2307 {
2308 	vdev_t *raidvd = cvd->vdev_parent;
2309 	ASSERT(raidvd->vdev_ops == &vdev_draid_ops);
2310 
2311 	vdev_draid_config_t *vdc = raidvd->vdev_tsd;
2312 	uint64_t ashift = raidvd->vdev_top->vdev_ashift;
2313 
2314 	/* Make sure the offsets are block-aligned */
2315 	ASSERT0(logical_rs->rs_start % (1 << ashift));
2316 	ASSERT0(logical_rs->rs_end % (1 << ashift));
2317 
2318 	uint64_t logical_start = logical_rs->rs_start;
2319 	uint64_t logical_end = logical_rs->rs_end;
2320 
2321 	/*
2322 	 * Unaligned ranges must be skipped. All metaslabs are correctly
2323 	 * aligned so this should not happen, but this case is handled in
2324 	 * case it's needed by future callers.
2325 	 */
2326 	uint64_t astart = vdev_draid_get_astart(raidvd, logical_start);
2327 	if (astart != logical_start) {
2328 		physical_rs->rs_start = logical_start;
2329 		physical_rs->rs_end = logical_start;
2330 		remain_rs->rs_start = MIN(astart, logical_end);
2331 		remain_rs->rs_end = logical_end;
2332 		return;
2333 	}
2334 
2335 	/*
2336 	 * Unlike with mirrors and raidz a dRAID logical range can map
2337 	 * to multiple non-contiguous physical ranges. This is handled by
2338 	 * limiting the size of the logical range to a single group and
2339 	 * setting the remain argument such that it describes the remaining
2340 	 * unmapped logical range. This is stricter than absolutely
2341 	 * necessary but helps simplify the logic below.
2342 	 */
2343 	uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start);
2344 	uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1);
2345 	if (logical_end > nextstart)
2346 		logical_end = nextstart;
2347 
2348 	/* Find the starting offset for each vdev in the group */
2349 	uint64_t perm, groupstart, ndisks;
2350 	uint64_t start = vdev_draid_logical_to_physical(raidvd,
2351 	    logical_start, &perm, &groupstart, &ndisks);
2352 	uint64_t end = start;
2353 
2354 	uint8_t *base;
2355 	uint64_t iter, id;
2356 	vdev_draid_get_perm(vdc, perm, &base, &iter);
2357 
2358 	/*
2359 	 * Check if the passed child falls within the group.  If it does
2360 	 * update the start and end to reflect the physical range.
2361 	 * Otherwise, leave them unmodified which will result in an empty
2362 	 * (zero-length) physical range being returned.
2363 	 */
2364 	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
2365 		uint64_t c = (groupstart + i) % ndisks;
2366 
2367 		if (c == 0 && i != 0) {
2368 			/* the group wrapped, increment the start */
2369 			start += VDEV_DRAID_ROWHEIGHT;
2370 			end = start;
2371 		}
2372 
2373 		id = vdev_draid_permute_id(vdc, base, iter, c);
2374 		if (id == cvd->vdev_id) {
2375 			uint64_t b_size = (logical_end >> ashift) -
2376 			    (logical_start >> ashift);
2377 			ASSERT3U(b_size, >, 0);
2378 			end = start + ((((b_size - 1) /
2379 			    vdc->vdc_groupwidth) + 1) << ashift);
2380 			break;
2381 		}
2382 	}
2383 	physical_rs->rs_start = start;
2384 	physical_rs->rs_end = end;
2385 
2386 	/*
2387 	 * Only top-level vdevs are allowed to set remain_rs because
2388 	 * when .vdev_op_xlate() is called for their children the full
2389 	 * logical range is not provided by vdev_xlate().
2390 	 */
2391 	remain_rs->rs_start = logical_end;
2392 	remain_rs->rs_end = logical_rs->rs_end;
2393 
2394 	ASSERT3U(physical_rs->rs_start, <=, logical_start);
2395 	ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
2396 	    logical_end - logical_start);
2397 }
2398 
2399 /*
2400  * Add dRAID specific fields to the config nvlist.
2401  */
2402 static void
2403 vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv)
2404 {
2405 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2406 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2407 
2408 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity);
2409 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata);
2410 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares);
2411 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups);
2412 
2413 	if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_DRAID_FAIL_DOMAINS))
2414 		fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NCHILDREN,
2415 		    vdc->vdc_children);
2416 }
2417 
2418 /*
2419  * Initialize private dRAID specific fields from the nvlist.
2420  */
2421 static int
2422 vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
2423 {
2424 	(void) spa;
2425 	uint64_t ndata, nparity, nspares, ngroups;
2426 	int error;
2427 
2428 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata))
2429 		return (SET_ERROR(EINVAL));
2430 
2431 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) ||
2432 	    nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
2433 		return (SET_ERROR(EINVAL));
2434 	}
2435 
2436 	uint_t width;
2437 	uint64_t children;
2438 	nvlist_t **child;
2439 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2440 	    &child, &width) != 0 || width == 0) {
2441 		return (SET_ERROR(EINVAL));
2442 	}
2443 
2444 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NCHILDREN, &children)) {
2445 		children = width;
2446 		if (children > VDEV_DRAID_MAX_CHILDREN)
2447 			return (SET_ERROR(EINVAL));
2448 	}
2449 
2450 	if (children == 0 || width % children != 0)
2451 		return (SET_ERROR(EINVAL));
2452 
2453 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) ||
2454 	    nspares > 100) {
2455 		return (SET_ERROR(EINVAL));
2456 	}
2457 
2458 	int fgrps = width / children;
2459 	int nspare = nspares / fgrps;
2460 	if (nspares % fgrps)
2461 		nspare++;
2462 
2463 	/*
2464 	 * Validate the minimum number of children exist per group for the
2465 	 * specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4).
2466 	 */
2467 	if (children < (ndata + nparity + nspare))
2468 		return (SET_ERROR(EINVAL));
2469 
2470 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) ||
2471 	    ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) {
2472 		return (SET_ERROR(EINVAL));
2473 	}
2474 
2475 	/*
2476 	 * Create the dRAID configuration using the pool nvlist configuration
2477 	 * and the fixed mapping for the correct number of children.
2478 	 */
2479 	vdev_draid_config_t *vdc;
2480 	const draid_map_t *map;
2481 
2482 	error = vdev_draid_lookup_map(children, &map);
2483 	if (error)
2484 		return (SET_ERROR(EINVAL));
2485 
2486 	vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP);
2487 	vdc->vdc_ndata = ndata;
2488 	vdc->vdc_nparity = nparity;
2489 	vdc->vdc_nspares = nspares;
2490 	vdc->vdc_children = children;
2491 	vdc->vdc_ngroups = ngroups;
2492 	vdc->vdc_width = width;
2493 	vdc->vdc_nperms = map->dm_nperms;
2494 
2495 	error = vdev_draid_generate_perms(map, &vdc->vdc_perms);
2496 	if (error) {
2497 		kmem_free(vdc, sizeof (*vdc));
2498 		return (SET_ERROR(EINVAL));
2499 	}
2500 
2501 	if (width > children)
2502 		vdev_draid_shuffle_perms(map, vdc->vdc_perms, width);
2503 
2504 	/*
2505 	 * Derived constants.
2506 	 */
2507 	vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity;
2508 	vdc->vdc_ndisks = vdc->vdc_width - vdc->vdc_nspares;
2509 	vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT;
2510 	vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) /
2511 	    vdc->vdc_ndisks;
2512 
2513 	ASSERT3U(vdc->vdc_groupwidth, >=, 2);
2514 	ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
2515 	ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
2516 	ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
2517 	ASSERT0(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT);
2518 	ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
2519 	    vdc->vdc_ndisks, ==, 0);
2520 
2521 	*tsd = vdc;
2522 
2523 	return (0);
2524 }
2525 
2526 static void
2527 vdev_draid_fini(vdev_t *vd)
2528 {
2529 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2530 
2531 	vmem_free(vdc->vdc_perms, sizeof (uint8_t) *
2532 	    vdc->vdc_children * vdc->vdc_nperms);
2533 	kmem_free(vdc, sizeof (*vdc));
2534 }
2535 
2536 static uint64_t
2537 vdev_draid_nparity(vdev_t *vd)
2538 {
2539 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2540 
2541 	return (vdc->vdc_nparity * (vdc->vdc_width / vdc->vdc_children));
2542 }
2543 
2544 static uint64_t
2545 vdev_draid_ndisks(vdev_t *vd)
2546 {
2547 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2548 
2549 	return (vdc->vdc_ndisks);
2550 }
2551 
2552 vdev_ops_t vdev_draid_ops = {
2553 	.vdev_op_init = vdev_draid_init,
2554 	.vdev_op_fini = vdev_draid_fini,
2555 	.vdev_op_open = vdev_draid_open,
2556 	.vdev_op_close = vdev_draid_close,
2557 	.vdev_op_psize_to_asize = vdev_draid_psize_to_asize,
2558 	.vdev_op_asize_to_psize = vdev_draid_asize_to_psize,
2559 	.vdev_op_min_asize = vdev_draid_min_asize,
2560 	.vdev_op_min_alloc = vdev_draid_min_alloc,
2561 	.vdev_op_io_start = vdev_draid_io_start,
2562 	.vdev_op_io_done = vdev_draid_io_done,
2563 	.vdev_op_state_change = vdev_draid_state_change,
2564 	.vdev_op_need_resilver = vdev_draid_need_resilver,
2565 	.vdev_op_hold = NULL,
2566 	.vdev_op_rele = NULL,
2567 	.vdev_op_remap = NULL,
2568 	.vdev_op_xlate = vdev_draid_xlate,
2569 	.vdev_op_rebuild_asize = vdev_draid_rebuild_asize,
2570 	.vdev_op_metaslab_init = vdev_draid_metaslab_init,
2571 	.vdev_op_config_generate = vdev_draid_config_generate,
2572 	.vdev_op_nparity = vdev_draid_nparity,
2573 	.vdev_op_ndisks = vdev_draid_ndisks,
2574 	.vdev_op_type = VDEV_TYPE_DRAID,
2575 	.vdev_op_leaf = B_FALSE,
2576 };
2577 
2578 
2579 /*
2580  * A dRAID distributed spare is a virtual leaf vdev which is included in the
2581  * parent dRAID configuration.  The last N columns of the dRAID permutation
2582  * table are used to determine on which dRAID children a specific offset
2583  * should be written.  These spare leaf vdevs can only be used to replace
2584  * faulted children in the same dRAID configuration.
2585  */
2586 
2587 /*
2588  * Distributed spare state.  All fields are set when the distributed spare is
2589  * first opened and are immutable.
2590  */
2591 typedef struct {
2592 	vdev_t *vds_draid_vdev;		/* top-level parent dRAID vdev */
2593 	uint64_t vds_top_guid;		/* top-level parent dRAID guid */
2594 	uint64_t vds_spare_id;		/* spare id (0 - vdc->vdc_nspares-1) */
2595 } vdev_draid_spare_t;
2596 
2597 /*
2598  * Returns the parent dRAID vdev to which the distributed spare belongs.
2599  * This may be safely called even when the vdev is not open.
2600  */
2601 vdev_t *
2602 vdev_draid_spare_get_parent(vdev_t *vd)
2603 {
2604 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2605 
2606 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2607 
2608 	if (vds->vds_draid_vdev != NULL)
2609 		return (vds->vds_draid_vdev);
2610 
2611 	return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev,
2612 	    vds->vds_top_guid));
2613 }
2614 
2615 /*
2616  * A dRAID space is active when it's the child of a vdev using the
2617  * vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops.
2618  */
2619 static boolean_t
2620 vdev_draid_spare_is_active(vdev_t *vd)
2621 {
2622 	vdev_t *pvd = vd->vdev_parent;
2623 
2624 	if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops ||
2625 	    pvd->vdev_ops == &vdev_replacing_ops ||
2626 	    pvd->vdev_ops == &vdev_draid_ops)) {
2627 		return (B_TRUE);
2628 	} else {
2629 		return (B_FALSE);
2630 	}
2631 }
2632 
2633 /*
2634  * Given a dRAID distribute spare vdev, returns the physical child vdev
2635  * on which the provided offset resides.  This may involve recursing through
2636  * multiple layers of distributed spares.  Note that offset is relative to
2637  * this vdev.
2638  */
2639 vdev_t *
2640 vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset)
2641 {
2642 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2643 
2644 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2645 
2646 	/* The vdev is closed */
2647 	if (vds->vds_draid_vdev == NULL)
2648 		return (NULL);
2649 
2650 	vdev_t *tvd = vds->vds_draid_vdev;
2651 	vdev_draid_config_t *vdc = tvd->vdev_tsd;
2652 
2653 	uint64_t fgrps = vdc->vdc_width / vdc->vdc_children;
2654 
2655 	ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops);
2656 	ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares);
2657 
2658 	uint8_t *base;
2659 	uint64_t iter;
2660 	uint64_t perm = (physical_offset / vdc->vdc_devslicesz) * fgrps;
2661 
2662 	/*
2663 	 * Adjust permutation so that it points to the correct slice in the
2664 	 * big width row.
2665 	 */
2666 	perm += vds->vds_spare_id % fgrps;
2667 
2668 	vdev_draid_get_perm(vdc, perm, &base, &iter);
2669 
2670 	uint64_t cid = vdev_draid_permute_id(vdc, base, iter,
2671 	    (vdc->vdc_children - 1) - (vds->vds_spare_id / fgrps));
2672 	vdev_t *cvd = tvd->vdev_child[cid];
2673 
2674 	if (cvd->vdev_ops == &vdev_draid_spare_ops)
2675 		return (vdev_draid_spare_get_child(cvd, physical_offset));
2676 
2677 	return (cvd);
2678 }
2679 
2680 /*
2681  * Returns true if no failure group reached failures threshold so that
2682  * enclosure failure cannot be tolerated anymore. Used spares are counted
2683  * as failures because in case of enclosure failure their blocks can belong
2684  * to the disks from that enclosure and can be lost.
2685  */
2686 boolean_t
2687 vdev_draid_fail_domain_allowed(vdev_t *vd)
2688 {
2689 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2690 
2691 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2692 	ASSERT3P(vdc->vdc_width, >, vdc->vdc_children);
2693 
2694 	int counter = 0;
2695 
2696 	for (int c = 0; c < vdc->vdc_width; c++) {
2697 		vdev_t *cvd = vd->vdev_child[c];
2698 
2699 		if ((c % vdc->vdc_children) == 0)
2700 			counter = 0;
2701 
2702 		if (cvd->vdev_ops == &vdev_spare_ops ||
2703 		    cvd->vdev_ops == &vdev_draid_spare_ops ||
2704 		    !vdev_readable(cvd))
2705 			counter++;
2706 
2707 		if (counter > vdc->vdc_nparity)
2708 			return (B_FALSE);
2709 	}
2710 
2711 	return (B_TRUE);
2712 }
2713 
2714 static void
2715 vdev_draid_spare_close(vdev_t *vd)
2716 {
2717 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2718 	vds->vds_draid_vdev = NULL;
2719 }
2720 
2721 /*
2722  * Opening a dRAID spare device is done by looking up the associated dRAID
2723  * top-level vdev guid from the spare configuration.
2724  */
2725 static int
2726 vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
2727     uint64_t *logical_ashift, uint64_t *physical_ashift)
2728 {
2729 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2730 	vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2731 	uint64_t asize, max_asize;
2732 
2733 	vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid);
2734 	if (tvd == NULL) {
2735 		/*
2736 		 * When spa_vdev_add() is labeling new spares the
2737 		 * associated dRAID is not attached to the root vdev
2738 		 * nor does this spare have a parent.  Simulate a valid
2739 		 * device in order to allow the label to be initialized
2740 		 * and the distributed spare added to the configuration.
2741 		 */
2742 		if (vd->vdev_parent == NULL) {
2743 			*psize = *max_psize = SPA_MINDEVSIZE;
2744 			*logical_ashift = *physical_ashift = ASHIFT_MIN;
2745 			return (0);
2746 		}
2747 
2748 		return (SET_ERROR(EINVAL));
2749 	}
2750 
2751 	vdev_draid_config_t *vdc = tvd->vdev_tsd;
2752 	if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL)
2753 		return (SET_ERROR(EINVAL));
2754 
2755 	if (vds->vds_spare_id >= vdc->vdc_nspares)
2756 		return (SET_ERROR(EINVAL));
2757 
2758 	/*
2759 	 * Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here
2760 	 * because the caller may be vdev_draid_open() in which case the
2761 	 * values are stale as they haven't yet been updated by vdev_open().
2762 	 * To avoid this always recalculate the dRAID asize and max_asize.
2763 	 */
2764 	vdev_draid_calculate_asize(tvd, &asize, &max_asize,
2765 	    logical_ashift, physical_ashift);
2766 
2767 	*psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2768 	*max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2769 
2770 	vds->vds_draid_vdev = tvd;
2771 	vd->vdev_nonrot = tvd->vdev_nonrot;
2772 
2773 	return (0);
2774 }
2775 
2776 /*
2777  * Completed distributed spare IO.  Store the result in the parent zio
2778  * as if it had performed the operation itself.  Only the first error is
2779  * preserved if there are multiple errors.
2780  */
2781 static void
2782 vdev_draid_spare_child_done(zio_t *zio)
2783 {
2784 	zio_t *pio = zio->io_private;
2785 
2786 	/*
2787 	 * IOs are issued to non-writable vdevs in order to keep their
2788 	 * DTLs accurate.  However, we don't want to propagate the
2789 	 * error in to the distributed spare's DTL.  When resilvering
2790 	 * vdev_draid_need_resilver() will consult the relevant DTL
2791 	 * to determine if the data is missing and must be repaired.
2792 	 */
2793 	if (!vdev_writeable(zio->io_vd))
2794 		return;
2795 
2796 	if (pio->io_error == 0)
2797 		pio->io_error = zio->io_error;
2798 }
2799 
2800 /*
2801  * Returns a valid label nvlist for the distributed spare vdev.  This is
2802  * used to bypass the IO pipeline to avoid the complexity of constructing
2803  * a complete label with valid checksum to return when read.
2804  */
2805 nvlist_t *
2806 vdev_draid_read_config_spare(vdev_t *vd)
2807 {
2808 	spa_t *spa = vd->vdev_spa;
2809 	spa_aux_vdev_t *sav = &spa->spa_spares;
2810 	uint64_t guid = vd->vdev_guid;
2811 
2812 	nvlist_t *nv = fnvlist_alloc();
2813 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
2814 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
2815 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa));
2816 	fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa));
2817 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
2818 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
2819 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid);
2820 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE,
2821 	    vdev_draid_spare_is_active(vd) ?
2822 	    POOL_STATE_ACTIVE : POOL_STATE_SPARE);
2823 
2824 	/* Set the vdev guid based on the vdev list in sav_count. */
2825 	for (int i = 0; i < sav->sav_count; i++) {
2826 		if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops &&
2827 		    strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) {
2828 			guid = sav->sav_vdevs[i]->vdev_guid;
2829 			break;
2830 		}
2831 	}
2832 
2833 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid);
2834 
2835 	return (nv);
2836 }
2837 
2838 /*
2839  * Handle any flush requested of the distributed spare. All children must be
2840  * flushed.
2841  */
2842 static int
2843 vdev_draid_spare_flush(zio_t *zio)
2844 {
2845 	vdev_t *vd = zio->io_vd;
2846 	int error = 0;
2847 
2848 	for (int c = 0; c < vd->vdev_children; c++) {
2849 		zio_nowait(zio_vdev_child_io(zio, NULL,
2850 		    vd->vdev_child[c], zio->io_offset, zio->io_abd,
2851 		    zio->io_size, zio->io_type, zio->io_priority, 0,
2852 		    vdev_draid_spare_child_done, zio));
2853 	}
2854 
2855 	return (error);
2856 }
2857 
2858 /*
2859  * Initiate an IO to the distributed spare.  For normal IOs this entails using
2860  * the zio->io_offset and permutation table to calculate which child dRAID vdev
2861  * is responsible for the data.  Then passing along the zio to that child to
2862  * perform the actual IO.  The label ranges are not stored on disk and require
2863  * some special handling which is described below.
2864  */
2865 static void
2866 vdev_draid_spare_io_start(zio_t *zio)
2867 {
2868 	vdev_t *cvd = NULL, *vd = zio->io_vd;
2869 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2870 	uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE;
2871 
2872 	/*
2873 	 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
2874 	 * Nothing to be done here but return failure.
2875 	 */
2876 	if (vds == NULL) {
2877 		zio->io_error = ENXIO;
2878 		zio_interrupt(zio);
2879 		return;
2880 	}
2881 
2882 	switch (zio->io_type) {
2883 	case ZIO_TYPE_FLUSH:
2884 		zio->io_error = vdev_draid_spare_flush(zio);
2885 		break;
2886 
2887 	case ZIO_TYPE_WRITE:
2888 		if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2889 			/*
2890 			 * Accept probe IOs and config writers to simulate the
2891 			 * existence of an on disk label.  vdev_label_sync(),
2892 			 * vdev_uberblock_sync() and vdev_copy_uberblocks()
2893 			 * skip the distributed spares.  This only leaves
2894 			 * vdev_label_init() which is allowed to succeed to
2895 			 * avoid adding special cases the function.
2896 			 */
2897 			if (zio->io_flags & ZIO_FLAG_PROBE ||
2898 			    zio->io_flags & ZIO_FLAG_CONFIG_WRITER) {
2899 				zio->io_error = 0;
2900 			} else {
2901 				zio->io_error = SET_ERROR(EIO);
2902 			}
2903 		} else {
2904 			cvd = vdev_draid_spare_get_child(vd, offset);
2905 
2906 			if (cvd == NULL) {
2907 				zio->io_error = SET_ERROR(ENXIO);
2908 			} else {
2909 				zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2910 				    offset, zio->io_abd, zio->io_size,
2911 				    zio->io_type, zio->io_priority, 0,
2912 				    vdev_draid_spare_child_done, zio));
2913 			}
2914 		}
2915 		break;
2916 
2917 	case ZIO_TYPE_READ:
2918 		if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2919 			/*
2920 			 * Accept probe IOs to simulate the existence of a
2921 			 * label.  vdev_label_read_config() bypasses the
2922 			 * pipeline to read the label configuration and
2923 			 * vdev_uberblock_load() skips distributed spares
2924 			 * when attempting to locate the best uberblock.
2925 			 */
2926 			if (zio->io_flags & ZIO_FLAG_PROBE) {
2927 				zio->io_error = 0;
2928 			} else {
2929 				zio->io_error = SET_ERROR(EIO);
2930 			}
2931 		} else {
2932 			cvd = vdev_draid_spare_get_child(vd, offset);
2933 
2934 			if (cvd == NULL || !vdev_readable(cvd)) {
2935 				zio->io_error = SET_ERROR(ENXIO);
2936 			} else {
2937 				zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2938 				    offset, zio->io_abd, zio->io_size,
2939 				    zio->io_type, zio->io_priority, 0,
2940 				    vdev_draid_spare_child_done, zio));
2941 			}
2942 		}
2943 		break;
2944 
2945 	case ZIO_TYPE_TRIM:
2946 		/* The vdev label ranges are never trimmed */
2947 		ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset));
2948 
2949 		cvd = vdev_draid_spare_get_child(vd, offset);
2950 
2951 		if (cvd == NULL || !cvd->vdev_has_trim) {
2952 			zio->io_error = SET_ERROR(ENXIO);
2953 		} else {
2954 			zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2955 			    offset, zio->io_abd, zio->io_size,
2956 			    zio->io_type, zio->io_priority, 0,
2957 			    vdev_draid_spare_child_done, zio));
2958 		}
2959 		break;
2960 
2961 	default:
2962 		zio->io_error = SET_ERROR(ENOTSUP);
2963 		break;
2964 	}
2965 
2966 	zio_execute(zio);
2967 }
2968 
2969 static void
2970 vdev_draid_spare_io_done(zio_t *zio)
2971 {
2972 	(void) zio;
2973 }
2974 
2975 /*
2976  * Lookup the full spare config in spa->spa_spares.sav_config and
2977  * return the top_guid and spare_id for the named spare.
2978  */
2979 static int
2980 vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp,
2981     uint64_t *spare_idp)
2982 {
2983 	nvlist_t **spares;
2984 	uint_t nspares;
2985 	int error;
2986 
2987 	if ((spa->spa_spares.sav_config == NULL) ||
2988 	    (nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2989 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) {
2990 		return (SET_ERROR(ENOENT));
2991 	}
2992 
2993 	const char *spare_name;
2994 	error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name);
2995 	if (error != 0)
2996 		return (SET_ERROR(EINVAL));
2997 
2998 	for (int i = 0; i < nspares; i++) {
2999 		nvlist_t *spare = spares[i];
3000 		uint64_t top_guid, spare_id;
3001 		const char *type, *path;
3002 
3003 		/* Skip non-distributed spares */
3004 		error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type);
3005 		if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0)
3006 			continue;
3007 
3008 		/* Skip spares with the wrong name */
3009 		error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path);
3010 		if (error != 0 || strcmp(path, spare_name) != 0)
3011 			continue;
3012 
3013 		/* Found the matching spare */
3014 		error = nvlist_lookup_uint64(spare,
3015 		    ZPOOL_CONFIG_TOP_GUID, &top_guid);
3016 		if (error == 0) {
3017 			error = nvlist_lookup_uint64(spare,
3018 			    ZPOOL_CONFIG_SPARE_ID, &spare_id);
3019 		}
3020 
3021 		if (error != 0) {
3022 			return (SET_ERROR(EINVAL));
3023 		} else {
3024 			*top_guidp = top_guid;
3025 			*spare_idp = spare_id;
3026 			return (0);
3027 		}
3028 	}
3029 
3030 	return (SET_ERROR(ENOENT));
3031 }
3032 
3033 /*
3034  * Initialize private dRAID spare specific fields from the nvlist.
3035  */
3036 static int
3037 vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd)
3038 {
3039 	vdev_draid_spare_t *vds;
3040 	uint64_t top_guid = 0;
3041 	uint64_t spare_id;
3042 
3043 	/*
3044 	 * In the normal case check the list of spares stored in the spa
3045 	 * to lookup the top_guid and spare_id for provided spare config.
3046 	 * When creating a new pool or adding vdevs the spare list is not
3047 	 * yet populated and the values are provided in the passed config.
3048 	 */
3049 	if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) {
3050 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID,
3051 		    &top_guid) != 0)
3052 			return (SET_ERROR(EINVAL));
3053 
3054 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID,
3055 		    &spare_id) != 0)
3056 			return (SET_ERROR(EINVAL));
3057 	}
3058 
3059 	vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP);
3060 	vds->vds_draid_vdev = NULL;
3061 	vds->vds_top_guid = top_guid;
3062 	vds->vds_spare_id = spare_id;
3063 
3064 	*tsd = vds;
3065 
3066 	return (0);
3067 }
3068 
3069 static void
3070 vdev_draid_spare_fini(vdev_t *vd)
3071 {
3072 	kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t));
3073 }
3074 
3075 static void
3076 vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv)
3077 {
3078 	vdev_draid_spare_t *vds = vd->vdev_tsd;
3079 
3080 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
3081 
3082 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid);
3083 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id);
3084 }
3085 
3086 vdev_ops_t vdev_draid_spare_ops = {
3087 	.vdev_op_init = vdev_draid_spare_init,
3088 	.vdev_op_fini = vdev_draid_spare_fini,
3089 	.vdev_op_open = vdev_draid_spare_open,
3090 	.vdev_op_close = vdev_draid_spare_close,
3091 	.vdev_op_psize_to_asize = vdev_default_asize,
3092 	.vdev_op_asize_to_psize = vdev_default_psize,
3093 	.vdev_op_min_asize = vdev_default_min_asize,
3094 	.vdev_op_min_alloc = NULL,
3095 	.vdev_op_io_start = vdev_draid_spare_io_start,
3096 	.vdev_op_io_done = vdev_draid_spare_io_done,
3097 	.vdev_op_state_change = NULL,
3098 	.vdev_op_need_resilver = NULL,
3099 	.vdev_op_hold = NULL,
3100 	.vdev_op_rele = NULL,
3101 	.vdev_op_remap = NULL,
3102 	.vdev_op_xlate = vdev_default_xlate,
3103 	.vdev_op_rebuild_asize = NULL,
3104 	.vdev_op_metaslab_init = NULL,
3105 	.vdev_op_config_generate = vdev_draid_spare_config_generate,
3106 	.vdev_op_nparity = NULL,
3107 	.vdev_op_ndisks = NULL,
3108 	.vdev_op_type = VDEV_TYPE_DRAID_SPARE,
3109 	.vdev_op_leaf = B_TRUE,
3110 };
3111