xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_draid.c (revision f9590540c524607d22fa7e718c758725c4365375)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2018 Intel Corporation.
24  * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
25  * Copyright (c) 2025, Klara, Inc.
26  * Copyright (c) 2026, Seagate Technology, LLC.
27  * Copyright (c) 2026, Wasabi Technologies, Inc.
28  */
29 
30 #include <sys/zfs_context.h>
31 #include <sys/spa.h>
32 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/vdev_draid.h>
35 #include <sys/vdev_raidz.h>
36 #include <sys/vdev_rebuild.h>
37 #include <sys/abd.h>
38 #include <sys/zio.h>
39 #include <sys/nvpair.h>
40 #include <sys/zio_checksum.h>
41 #include <sys/fs/zfs.h>
42 #include <sys/fm/fs/zfs.h>
43 #include <zfs_fletcher.h>
44 
45 #ifdef ZFS_DEBUG
46 #include <sys/vdev.h>	/* For vdev_xlate() in vdev_draid_io_verify() */
47 #endif
48 
49 /*
50  * dRAID is a distributed spare implementation for ZFS. A dRAID vdev is
51  * comprised of multiple raidz redundancy groups which are spread over the
52  * dRAID children. To ensure an even distribution, and avoid hot spots, a
53  * permutation mapping is applied to the order of the dRAID children.
54  * This mixing effectively distributes the parity columns evenly over all
55  * of the disks in the dRAID.
56  *
57  * This is beneficial because it means when resilvering all of the disks
58  * can participate thereby increasing the available IOPs and bandwidth.
59  * Furthermore, by reserving a small fraction of each child's total capacity
60  * virtual distributed spare disks can be created. These spares similarly
61  * benefit from the performance gains of spanning all of the children. The
62  * consequence of which is that resilvering to a distributed spare can
63  * substantially reduce the time required to restore full parity to pool
64  * with a failed disks.
65  *
66  * === dRAID group layout ===
67  *
68  * First, let's define a "row" in the configuration to be a 16M chunk from
69  * each physical drive at the same offset. This is the minimum allowable
70  * size since it must be possible to store a full 16M block when there is
71  * only a single data column. Next, we define a "group" to be a set of
72  * sequential disks containing both the parity and data columns. We allow
73  * groups to span multiple rows in order to align any group size to any
74  * number of physical drives. Finally, a "slice" is comprised of the rows
75  * which contain the target number of groups. The permutation mappings
76  * are applied in a round robin fashion to each slice.
77  *
78  * Given D+P drives in a group (including parity drives) and C-S physical
79  * drives (not including the spare drives), we can distribute the groups
80  * across R rows without remainder by selecting the least common multiple
81  * of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S).
82  *
83  * In the example below, there are C=14 physical drives in the configuration
84  * with S=2 drives worth of spare capacity. Each group has a width of 9
85  * which includes D=8 data and P=1 parity drive. There are 4 groups and
86  * 3 rows per slice.  Each group has a size of 144M (16M * 9) and a slice
87  * size is 576M (144M * 4). When allocating from a dRAID each group is
88  * filled before moving on to the next as show in slice0 below.
89  *
90  *             data disks (8 data + 1 parity)          spares (2)
91  *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
92  *  ^  | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0
93  *  |  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
94  *  |  |              group 0              |  group 1..|       |
95  *  |  +-----------------------------------+-----------+-------|
96  *  |  | 0   1   2   3   4   5   6   7   8 | 36  37  38|       |  r
97  *  |  | 9   10  11  12  13  14  15  16  17| 45  46  47|       |  o
98  *  |  | 18  19  20  21  22  23  24  25  26| 54  55  56|       |  w
99  *     | 27  28  29  30  31  32  33  34  35| 63  64  65|       |  0
100  *  s  +-----------------------+-----------------------+-------+
101  *  l  |       ..group 1       |        group 2..      |       |
102  *  i  +-----------------------+-----------------------+-------+
103  *  c  | 39  40  41  42  43  44| 72  73  74  75  76  77|       |  r
104  *  e  | 48  49  50  51  52  53| 81  82  83  84  85  86|       |  o
105  *  0  | 57  58  59  60  61  62| 90  91  92  93  94  95|       |  w
106  *     | 66  67  68  69  70  71| 99 100 101 102 103 104|       |  1
107  *  |  +-----------+-----------+-----------------------+-------+
108  *  |  |..group 2  |            group 3                |       |
109  *  |  +-----------+-----------+-----------------------+-------+
110  *  |  | 78  79  80|108 109 110 111 112 113 114 115 116|       |  r
111  *  |  | 87  88  89|117 118 119 120 121 122 123 124 125|       |  o
112  *  |  | 96  97  98|126 127 128 129 130 131 132 133 134|       |  w
113  *  v  |105 106 107|135 136 137 138 139 140 141 142 143|       |  2
114  *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
115  *     | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1
116  *  s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
117  *  l  |              group 4              |  group 5..|       | row 3
118  *  i  +-----------------------+-----------+-----------+-------|
119  *  c  |       ..group 5       |        group 6..      |       | row 4
120  *  e  +-----------+-----------+-----------------------+-------+
121  *  1  |..group 6  |            group 7                |       | row 5
122  *     +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
123  *     | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2
124  *  s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
125  *  l  |              group 8              |  group 9..|       | row 6
126  *  i  +-----------------------------------------------+-------|
127  *  c  |       ..group 9       |        group 10..     |       | row 7
128  *  e  +-----------------------+-----------------------+-------+
129  *  2  |..group 10 |            group 11               |       | row 8
130  *     +-----------+-----------------------------------+-------+
131  *
132  * This layout has several advantages over requiring that each row contain
133  * a whole number of groups.
134  *
135  * 1. The group count is not a relevant parameter when defining a dRAID
136  *    layout. Only the group width is needed, and *all* groups will have
137  *    the desired size.
138  *
139  * 2. All possible group widths (<= physical disk count) can be supported.
140  *
141  * 3. The logic within vdev_draid.c is simplified when the group width is
142  *    the same for all groups (although some of the logic around computing
143  *    permutation numbers and drive offsets is more complicated).
144  *
145  * === dRAID failure domains ===
146  *
147  * If we put several slices alongside in a row and configure each disk in
148  * slice to be from different failure domain (for example an enclosure), we
149  * can then tolerate the failure of the whole domain -- only one device
150  * will be failed in every slice in this case. The column of such slices
151  * we will call failure group, and the row with such slices alongside we
152  * will call "big width row", width being multiple of children (W = C*n).
153  *
154  * Here's an example of configuration with 7 failure domains and two
155  * failure groups:
156  *
157  *         7 C disks in each slice, 2 slices in big 14 W rows
158  *      +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
159  *      | 1 | 7 | 3 | 9 | 11| 5 | 13| 6 | 10| 4 | 8 | 0 | 12| 2 | device map 0
160  *   s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
161  *   l  |    group 0    |  gr1..| S |    group 3    | gr4.. | S | row 0
162  *   c  +-------+-------+-------+---+-------+-------+-------+---+
163  *  0,1 | ..gr1 |    group 2    | S | ..gr4 |   group 5     | S | row 1
164  *      +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
165  *      | 2 | 10| 12| 7 | 8 | 13| 11| 1 | 5 | 4 | 6 | 3 | 9 | 0 | device map 1
166  *   s  +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
167  *   l  |    group 6    |  gr7..| S |    group 9    |gr10.. | S | row 2
168  *   c  +-------+-------+-------+---+---------------+-------+---+
169  *  2,3 | ..gr7 |    group 8    | S |..gr10 |   group 11    | S | row 3
170  *      +-------+---------------+---+-------+---------------+---+
171  *            failure group 0            failure group 1
172  *
173  * In practice, there might be much more failure groups. And in theory, the
174  * width of the big rows can be much larger than curent limit of 255 imposed
175  * for the number of children. But we kept the same limit for now for the
176  * sake of simplicity of implementation.
177  *
178  * In order to preserve fast sequential resilvering in case of a disk failure,
179  * all failure groups much share all disks between themselves, and this is
180  * achieved by shuffling the disks between the groups. But only i-th disks
181  * in each group are shuffled between themselves, i.e. the disks from the
182  * same failure domains (enclosures). After that, they are shuffled within
183  * each group. Thus, no more than one disk from any failure domain can appear
184  * in any failure group as a result of this shuffling. In the above example,
185  * you won't find any tuple of (0, 7) or (1, 8) or (2, 9) or ... (6, 13)
186  * mapped to the same slice. This is done in vdev_draid_shuffle_perms().
187  *
188  * Spare disks are evenly distributed among failure groups, and they are
189  * shared by all groups.  However, to support domain failure, we cannot have
190  * more than (nparity - 1) failed disks in any group, no matter if they are
191  * rebuilt to draid spares or not (the blocks of those spares can be mapped
192  * to the disks from the failed domain (enclosure), and we cannot tolerate
193  * more than nparity failures in any failure group).
194  *
195  *
196  * N.B. The following array describes all valid dRAID permutation maps.
197  * Each row is used to generate a permutation map for a different number
198  * of children from a unique seed. The seeds were generated and carefully
199  * evaluated by the 'draid' utility in order to provide balanced mappings.
200  * In addition to the seed a checksum of the in-memory mapping is stored
201  * for verification.
202  *
203  * The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed,
204  * with a given permutation map) is the ratio of the amounts of I/O that will
205  * be sent to the least and most busy disks when resilvering. The average
206  * imbalance ratio (of a given number of disks and permutation map) is the
207  * average of the ratios of all possible single and double disk failures.
208  *
209  * In order to achieve a low imbalance ratio the number of permutations in
210  * the mapping must be significantly larger than the number of children.
211  * For dRAID the number of permutations has been limited to 512 to minimize
212  * the map size. This does result in a gradually increasing imbalance ratio
213  * as seen in the table below. Increasing the number of permutations for
214  * larger child counts would reduce the imbalance ratio. However, in practice
215  * when there are a large number of children each child is responsible for
216  * fewer total IOs so it's less of a concern.
217  *
218  * Note these values are hard coded and must never be changed.  Existing
219  * pools depend on the same mapping always being generated in order to
220  * read and write from the correct locations.  Any change would make
221  * existing pools completely inaccessible.
222  */
223 static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = {
224 	{   2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d },	/* 1.000 */
225 	{   3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 },	/* 1.000 */
226 	{   4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 },	/* 1.000 */
227 	{   5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 },	/* 1.010 */
228 	{   6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 },	/* 1.031 */
229 	{   7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee },	/* 1.043 */
230 	{   8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 },	/* 1.059 */
231 	{   9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 },	/* 1.056 */
232 	{  10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 },	/* 1.072 */
233 	{  11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c },	/* 1.083 */
234 	{  12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e },	/* 1.097 */
235 	{  13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 },	/* 1.100 */
236 	{  14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 },	/* 1.121 */
237 	{  15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 },	/* 1.103 */
238 	{  16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 },	/* 1.111 */
239 	{  17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe },	/* 1.133 */
240 	{  18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 },	/* 1.131 */
241 	{  19, 256, 0x892e343f2f31d690, 0x00000029eb392835 },	/* 1.130 */
242 	{  20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c },	/* 1.141 */
243 	{  21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 },	/* 1.139 */
244 	{  22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 },	/* 1.150 */
245 	{  23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f },	/* 1.174 */
246 	{  24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 },	/* 1.168 */
247 	{  25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 },	/* 1.180 */
248 	{  26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba },	/* 1.226 */
249 	{  27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 },	/* 1.228 */
250 	{  28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c },	/* 1.217 */
251 	{  29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c },	/* 1.239 */
252 	{  30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 },	/* 1.238 */
253 	{  31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f },	/* 1.273 */
254 	{  32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 },	/* 1.191 */
255 	{  33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 },	/* 1.199 */
256 	{  34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 },	/* 1.195 */
257 	{  35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 },	/* 1.201 */
258 	{  36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef },	/* 1.194 */
259 	{  37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 },	/* 1.237 */
260 	{  38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 },	/* 1.242 */
261 	{  39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd },	/* 1.231 */
262 	{  40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 },	/* 1.233 */
263 	{  41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 },	/* 1.271 */
264 	{  42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 },	/* 1.263 */
265 	{  43, 512, 0xbaa5125faa781854, 0x000001c76789e278 },	/* 1.270 */
266 	{  44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb },	/* 1.281 */
267 	{  45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 },	/* 1.282 */
268 	{  46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b },	/* 1.286 */
269 	{  47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 },	/* 1.329 */
270 	{  48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b },	/* 1.286 */
271 	{  49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 },	/* 1.322 */
272 	{  50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 },	/* 1.335 */
273 	{  51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 },	/* 1.305 */
274 	{  52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf },	/* 1.330 */
275 	{  53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 },	/* 1.365 */
276 	{  54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 },	/* 1.334 */
277 	{  55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 },	/* 1.364 */
278 	{  56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e },	/* 1.374 */
279 	{  57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 },	/* 1.363 */
280 	{  58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 },	/* 1.401 */
281 	{  59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c },	/* 1.392 */
282 	{  60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 },	/* 1.360 */
283 	{  61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd },	/* 1.396 */
284 	{  62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c },	/* 1.453 */
285 	{  63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 },	/* 1.437 */
286 	{  64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 },	/* 1.402 */
287 	{  65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 },	/* 1.459 */
288 	{  66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 },	/* 1.423 */
289 	{  67, 512, 0x910b9714f698a877, 0x00000451ea65d5db },	/* 1.447 */
290 	{  68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 },	/* 1.450 */
291 	{  69, 512, 0x836d4968fbaa3706, 0x000004954068a380 },	/* 1.455 */
292 	{  70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d },	/* 1.463 */
293 	{  71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 },	/* 1.463 */
294 	{  72, 512, 0x42763a680d5bed8e, 0x000005084275c680 },	/* 1.452 */
295 	{  73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab },	/* 1.498 */
296 	{  74, 512, 0x9fa08548b1621a44, 0x0000054708019247 },	/* 1.526 */
297 	{  75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 },	/* 1.491 */
298 	{  76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 },	/* 1.470 */
299 	{  77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 },	/* 1.527 */
300 	{  78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 },	/* 1.509 */
301 	{  79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e },	/* 1.569 */
302 	{  80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c },	/* 1.555 */
303 	{  81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 },	/* 1.509 */
304 	{  82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 },	/* 1.596 */
305 	{  83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e },	/* 1.568 */
306 	{  84, 512, 0xba02545069ddc6dc, 0x000006d19861364f },	/* 1.541 */
307 	{  85, 512, 0x447c73192c35073e, 0x000006fce315ce35 },	/* 1.623 */
308 	{  86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b },	/* 1.620 */
309 	{  87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 },	/* 1.597 */
310 	{  88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b },	/* 1.575 */
311 	{  89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc },	/* 1.627 */
312 	{  90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb },	/* 1.596 */
313 	{  91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 },	/* 1.622 */
314 	{  92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e },	/* 1.695 */
315 	{  93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c },	/* 1.605 */
316 	{  94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc },	/* 1.625 */
317 	{  95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 },	/* 1.687 */
318 	{  96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a },	/* 1.621 */
319 	{  97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 },	/* 1.699 */
320 	{  98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b },	/* 1.688 */
321 	{  99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce },	/* 1.642 */
322 	{ 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc },	/* 1.683 */
323 	{ 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 },	/* 1.755 */
324 	{ 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 },	/* 1.692 */
325 	{ 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 },	/* 1.747 */
326 	{ 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 },	/* 1.751 */
327 	{ 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 },	/* 1.751 */
328 	{ 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f },	/* 1.726 */
329 	{ 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d },	/* 1.788 */
330 	{ 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 },	/* 1.740 */
331 	{ 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 },	/* 1.780 */
332 	{ 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 },	/* 1.836 */
333 	{ 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 },	/* 1.778 */
334 	{ 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 },	/* 1.831 */
335 	{ 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df },	/* 1.825 */
336 	{ 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 },	/* 1.826 */
337 	{ 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 },	/* 1.843 */
338 	{ 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d },	/* 1.826 */
339 	{ 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b },	/* 1.803 */
340 	{ 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 },	/* 1.857 */
341 	{ 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 },	/* 1.877 */
342 	{ 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 },	/* 1.849 */
343 	{ 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d },	/* 1.867 */
344 	{ 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 },	/* 1.978 */
345 	{ 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d },	/* 1.947 */
346 	{ 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea },	/* 1.865 */
347 	{ 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f },	/* 1.881 */
348 	{ 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b },	/* 1.882 */
349 	{ 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e },	/* 1.867 */
350 	{ 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e },	/* 1.972 */
351 	{ 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 },	/* 1.896 */
352 	{ 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d },	/* 1.965 */
353 	{ 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 },	/* 1.963 */
354 	{ 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 },	/* 1.925 */
355 	{ 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 },	/* 1.862 */
356 	{ 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 },	/* 2.042 */
357 	{ 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 },	/* 1.935 */
358 	{ 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 },	/* 2.005 */
359 	{ 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c },	/* 2.041 */
360 	{ 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 },	/* 1.997 */
361 	{ 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 },	/* 1.996 */
362 	{ 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d },	/* 2.053 */
363 	{ 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a },	/* 1.971 */
364 	{ 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 },	/* 2.018 */
365 	{ 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd },	/* 1.961 */
366 	{ 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 },	/* 2.046 */
367 	{ 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb },	/* 1.968 */
368 	{ 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 },	/* 2.143 */
369 	{ 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 },	/* 2.064 */
370 	{ 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 },	/* 2.023 */
371 	{ 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c },	/* 2.136 */
372 	{ 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 },	/* 2.063 */
373 	{ 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 },	/* 1.974 */
374 	{ 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 },	/* 2.210 */
375 	{ 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a },	/* 2.006 */
376 	{ 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 },	/* 2.193 */
377 	{ 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 },	/* 2.163 */
378 	{ 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc },	/* 2.046 */
379 	{ 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 },	/* 2.084 */
380 	{ 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 },	/* 2.264 */
381 	{ 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 },	/* 2.074 */
382 	{ 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 },	/* 2.282 */
383 	{ 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf },	/* 2.148 */
384 	{ 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 },	/* 2.355 */
385 	{ 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 },	/* 2.164 */
386 	{ 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a },	/* 2.393 */
387 	{ 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 },	/* 2.178 */
388 	{ 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc },	/* 2.334 */
389 	{ 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b },	/* 2.266 */
390 	{ 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 },	/* 2.304 */
391 	{ 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d },	/* 2.218 */
392 	{ 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff },	/* 2.377 */
393 	{ 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 },	/* 2.155 */
394 	{ 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 },	/* 2.404 */
395 	{ 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 },	/* 2.205 */
396 	{ 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d },	/* 2.359 */
397 	{ 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 },	/* 2.158 */
398 	{ 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b },	/* 2.614 */
399 	{ 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc },	/* 2.239 */
400 	{ 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc },	/* 2.493 */
401 	{ 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c },	/* 2.327 */
402 	{ 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 },	/* 2.231 */
403 	{ 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c },	/* 2.237 */
404 	{ 182, 512, 0xe6035defea48f933, 0x00002038e3346658 },	/* 2.691 */
405 	{ 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e },	/* 2.170 */
406 	{ 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 },	/* 2.600 */
407 	{ 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc },	/* 2.391 */
408 	{ 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 },	/* 2.677 */
409 	{ 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c },	/* 2.410 */
410 	{ 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 },	/* 2.776 */
411 	{ 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 },	/* 2.266 */
412 	{ 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 },	/* 2.717 */
413 	{ 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c },	/* 2.474 */
414 	{ 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 },	/* 2.673 */
415 	{ 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 },	/* 2.420 */
416 	{ 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 },	/* 2.898 */
417 	{ 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c },	/* 2.363 */
418 	{ 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e },	/* 2.747 */
419 	{ 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 },	/* 2.531 */
420 	{ 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 },	/* 2.707 */
421 	{ 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 },	/* 2.315 */
422 	{ 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf },	/* 3.012 */
423 	{ 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 },	/* 2.378 */
424 	{ 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 },	/* 2.969 */
425 	{ 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d },	/* 2.594 */
426 	{ 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd },	/* 2.763 */
427 	{ 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 },	/* 2.457 */
428 	{ 206, 512, 0xc02fc96684715a16, 0x0000297515608601 },	/* 3.057 */
429 	{ 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 },	/* 2.590 */
430 	{ 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b },	/* 3.047 */
431 	{ 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 },	/* 2.676 */
432 	{ 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 },	/* 2.993 */
433 	{ 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 },	/* 2.457 */
434 	{ 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 },	/* 3.182 */
435 	{ 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 },	/* 2.563 */
436 	{ 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 },	/* 3.025 */
437 	{ 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f },	/* 2.730 */
438 	{ 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 },	/* 3.036 */
439 	{ 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 },	/* 2.722 */
440 	{ 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 },	/* 3.356 */
441 	{ 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 },	/* 2.697 */
442 	{ 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 },	/* 2.979 */
443 	{ 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 },	/* 2.858 */
444 	{ 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e },	/* 3.258 */
445 	{ 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 },	/* 2.693 */
446 	{ 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 },	/* 3.259 */
447 	{ 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c },	/* 2.733 */
448 	{ 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 },	/* 3.235 */
449 	{ 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 },	/* 2.983 */
450 	{ 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e },	/* 3.308 */
451 	{ 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 },	/* 2.715 */
452 	{ 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f },	/* 3.540 */
453 	{ 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 },	/* 2.779 */
454 	{ 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c },	/* 3.084 */
455 	{ 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc },	/* 2.987 */
456 	{ 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae },	/* 3.341 */
457 	{ 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 },	/* 2.793 */
458 	{ 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 },	/* 3.518 */
459 	{ 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 },	/* 2.962 */
460 	{ 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 },	/* 3.196 */
461 	{ 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 },	/* 2.914 */
462 	{ 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 },	/* 3.408 */
463 	{ 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 },	/* 2.903 */
464 	{ 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 },	/* 3.778 */
465 	{ 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c },	/* 3.026 */
466 	{ 244, 512, 0xc740263f0301efa8, 0x00003a147146512d },	/* 3.347 */
467 	{ 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d },	/* 3.212 */
468 	{ 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 },	/* 3.482 */
469 	{ 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 },	/* 3.146 */
470 	{ 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f },	/* 3.626 */
471 	{ 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 },	/* 2.952 */
472 	{ 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e },	/* 3.463 */
473 	{ 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 },	/* 3.131 */
474 	{ 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c },	/* 3.538 */
475 	{ 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac },	/* 2.974 */
476 	{ 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 },	/* 3.843 */
477 	{ 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 },	/* 3.088 */
478 };
479 
480 /*
481  * Verify the map is valid. Each device index must appear exactly
482  * once in every row, and the permutation array checksum must match.
483  */
484 static int
verify_perms(uint8_t * perms,uint64_t children,uint64_t nperms,uint64_t checksum)485 verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms,
486     uint64_t checksum)
487 {
488 	int countssz = sizeof (uint16_t) * children;
489 	uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP);
490 
491 	for (int i = 0; i < nperms; i++) {
492 		for (int j = 0; j < children; j++) {
493 			uint8_t val = perms[(i * children) + j];
494 
495 			if (val >= children || counts[val] != i) {
496 				kmem_free(counts, countssz);
497 				return (EINVAL);
498 			}
499 
500 			counts[val]++;
501 		}
502 	}
503 
504 	if (checksum != 0) {
505 		int permssz = sizeof (uint8_t) * children * nperms;
506 		zio_cksum_t cksum;
507 
508 #if defined(_ZFS_BIG_ENDIAN)
509 		fletcher_4_byteswap_varsize(perms, permssz, &cksum);
510 #else
511 		fletcher_4_native_varsize(perms, permssz, &cksum);
512 #endif
513 
514 		if (checksum != cksum.zc_word[0]) {
515 			kmem_free(counts, countssz);
516 			return (ECKSUM);
517 		}
518 	}
519 
520 	kmem_free(counts, countssz);
521 
522 	return (0);
523 }
524 
525 /*
526  * Generate the permutation array for the draid_map_t.  These maps control
527  * the placement of all data in a dRAID.  Therefore it's critical that the
528  * seed always generates the same mapping.  We provide our own pseudo-random
529  * number generator for this purpose.
530  */
531 int
vdev_draid_generate_perms(const draid_map_t * map,uint8_t ** permsp)532 vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
533 {
534 	VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN);
535 	VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
536 	VERIFY3U(map->dm_seed, !=, 0);
537 	VERIFY3U(map->dm_nperms, !=, 0);
538 	VERIFY0P(map->dm_perms);
539 
540 #ifdef _KERNEL
541 	/*
542 	 * The kernel code always provides both a map_seed and checksum.
543 	 * Only the tests/zfs-tests/cmd/draid/draid.c utility will provide
544 	 * a zero checksum when generating new candidate maps.
545 	 */
546 	VERIFY3U(map->dm_checksum, !=, 0);
547 #endif
548 	uint64_t children = map->dm_children;
549 	uint64_t nperms = map->dm_nperms;
550 	int rowsz = sizeof (uint8_t) * children;
551 	int permssz = rowsz * nperms;
552 	uint8_t *perms;
553 
554 	/* Allocate the permutation array */
555 	perms = vmem_alloc(permssz, KM_SLEEP);
556 
557 	/* Setup an initial row with a known pattern */
558 	uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP);
559 	for (int i = 0; i < children; i++)
560 		initial_row[i] = i;
561 
562 	uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
563 	uint8_t *current_row, *previous_row = initial_row;
564 
565 	/*
566 	 * Perform a Fisher-Yates shuffle of each row using the previous
567 	 * row as the starting point.  An initial_row with known pattern
568 	 * is used as the input for the first row.
569 	 */
570 	for (int i = 0; i < nperms; i++) {
571 		current_row = &perms[i * children];
572 		memcpy(current_row, previous_row, rowsz);
573 
574 		for (int j = children - 1; j > 0; j--) {
575 			uint64_t k = vdev_draid_rand(draid_seed) % (j + 1);
576 			uint8_t val = current_row[j];
577 			current_row[j] = current_row[k];
578 			current_row[k] = val;
579 		}
580 
581 		previous_row = current_row;
582 	}
583 
584 	kmem_free(initial_row, rowsz);
585 
586 	int error = verify_perms(perms, children, nperms, map->dm_checksum);
587 	if (error) {
588 		vmem_free(perms, permssz);
589 		return (error);
590 	}
591 
592 	*permsp = perms;
593 
594 	return (0);
595 }
596 
597 static void
vdev_draid_swap_perms(uint8_t * perms,uint64_t i,uint64_t j)598 vdev_draid_swap_perms(uint8_t *perms, uint64_t i, uint64_t j)
599 {
600 	uint8_t val = perms[i];
601 
602 	perms[i] = perms[j];
603 	perms[j] = val;
604 }
605 
606 /*
607  * Shuffle every i-th disk in slices that lie alongside in the big width row,
608  * increasing disk indices in each next slice in the row accordingly. The
609  * input to this function is the array of ready permutations from
610  * vdev_draid_generate_perms(), so in order to correctly shuffle i-th disks,
611  * we need to locate their position first and build a map of their locations.
612  *
613  * Note: the same Fisher-Yates shuffle algorithm is used as in
614  * vdev_draid_generate_perms().
615  */
616 static void
vdev_draid_shuffle_perms(const draid_map_t * map,uint8_t * perms,uint64_t width)617 vdev_draid_shuffle_perms(const draid_map_t *map, uint8_t *perms, uint64_t width)
618 {
619 	uint64_t cn = map->dm_children;
620 	uint64_t n = width / cn;
621 	uint64_t nperms = map->dm_nperms / n * n;
622 
623 	if (width <= cn)
624 		return;
625 
626 	VERIFY3U(width, >=, VDEV_DRAID_MIN_CHILDREN);
627 	VERIFY3U(width, <=, VDEV_DRAID_MAX_CHILDREN);
628 	ASSERT0(width % cn);
629 
630 	uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
631 
632 	uint8_t *cmap = kmem_alloc(n, KM_SLEEP);
633 
634 	for (int i = 0; i < nperms; i += n) {
635 		for (int j = 0; j < cn; j++) {
636 
637 			/* locate position of the same child in other slices */
638 			for (int k = n - 1; k > 0; k--)
639 				for (int l = 0; l < cn; l++)
640 					if (perms[(i+k) * cn + l] ==
641 					    perms[(i+0) * cn + j])
642 						cmap[k] = l;
643 			cmap[0] = j;
644 
645 			/* increase index values for slices on the right */
646 			for (int k = n - 1; k > 0; k--)
647 				perms[(i+k) * cn + cmap[k]] += k * cn;
648 
649 			/* shuffle */
650 			for (int k = n - 1; k > 0; k--) {
651 				int l = vdev_draid_rand(draid_seed) % (k + 1);
652 				if (k == l)
653 					continue;
654 				vdev_draid_swap_perms(perms,
655 				    (i+k) * cn + cmap[k],
656 				    (i+l) * cn + cmap[l]);
657 			}
658 		}
659 	}
660 
661 	kmem_free(cmap, n);
662 }
663 
664 /*
665  * Lookup the fixed draid_map_t for the requested number of children.
666  */
667 int
vdev_draid_lookup_map(uint64_t children,const draid_map_t ** mapp)668 vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp)
669 {
670 	for (int i = 0; i < VDEV_DRAID_MAX_MAPS; i++) {
671 		if (draid_maps[i].dm_children == children) {
672 			*mapp = &draid_maps[i];
673 			return (0);
674 		}
675 	}
676 
677 	return (ENOENT);
678 }
679 
680 /*
681  * Lookup the permutation array and iteration id for the provided offset.
682  */
683 static void
vdev_draid_get_perm(vdev_draid_config_t * vdc,uint64_t pindex,uint8_t ** base,uint64_t * iter)684 vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex,
685     uint8_t **base, uint64_t *iter)
686 {
687 	uint64_t n = vdc->vdc_width / vdc->vdc_children;
688 	uint64_t ncols = vdc->vdc_children;
689 	uint64_t nperms = (vdc->vdc_nperms / n) * n;
690 	uint64_t poff = pindex % (nperms * ncols);
691 
692 	ASSERT3P(nperms, >=, ncols * n);
693 
694 	*base = vdc->vdc_perms + (poff / (ncols * n)) * (ncols * n);
695 	*iter = (poff % ncols) + (pindex % n) * ncols;
696 }
697 
698 static inline uint64_t
vdev_draid_permute_id(vdev_draid_config_t * vdc,uint8_t * base,uint64_t iter,uint64_t index)699 vdev_draid_permute_id(vdev_draid_config_t *vdc,
700     uint8_t *base, uint64_t iter, uint64_t index)
701 {
702 	if (vdc->vdc_width > vdc->vdc_children) {
703 		uint64_t off = (iter / vdc->vdc_children) * vdc->vdc_children;
704 		return (base[(index + iter) % vdc->vdc_children + off]);
705 	}
706 
707 	return ((base[index] + iter) % vdc->vdc_children);
708 }
709 
710 /*
711  * Return the asize which is the psize rounded up to a full group width.
712  * i.e. vdev_draid_psize_to_asize().
713  */
714 static uint64_t
vdev_draid_psize_to_asize(vdev_t * vd,uint64_t psize,uint64_t txg)715 vdev_draid_psize_to_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
716 {
717 	(void) txg;
718 	vdev_draid_config_t *vdc = vd->vdev_tsd;
719 	uint64_t ashift = vd->vdev_ashift;
720 
721 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
722 
723 	uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1;
724 	uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
725 
726 	ASSERT3U(asize, !=, 0);
727 	ASSERT0(asize % (vdc->vdc_groupwidth));
728 
729 	return (asize);
730 }
731 
732 /*
733  * Deflate the asize to the psize, this includes stripping parity.
734  */
735 uint64_t
vdev_draid_asize_to_psize(vdev_t * vd,uint64_t asize,uint64_t txg)736 vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize, uint64_t txg)
737 {
738 	(void) txg;
739 	vdev_draid_config_t *vdc = vd->vdev_tsd;
740 
741 	ASSERT0(asize % vdc->vdc_groupwidth);
742 
743 	return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata);
744 }
745 
746 /*
747  * Convert a logical offset to the corresponding group number.
748  */
749 static uint64_t
vdev_draid_offset_to_group(vdev_t * vd,uint64_t offset)750 vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset)
751 {
752 	vdev_draid_config_t *vdc = vd->vdev_tsd;
753 
754 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
755 
756 	return (offset / vdc->vdc_groupsz);
757 }
758 
759 /*
760  * Convert a group number to the logical starting offset for that group.
761  */
762 static uint64_t
vdev_draid_group_to_offset(vdev_t * vd,uint64_t group)763 vdev_draid_group_to_offset(vdev_t *vd, uint64_t group)
764 {
765 	vdev_draid_config_t *vdc = vd->vdev_tsd;
766 
767 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
768 
769 	return (group * vdc->vdc_groupsz);
770 }
771 
772 /*
773  * Full stripe writes.  When writing, all columns (D+P) are required.  Parity
774  * is calculated over all the columns, including empty zero filled sectors,
775  * and each is written to disk.  While only the data columns are needed for
776  * a normal read, all of the columns are required for reconstruction when
777  * performing a sequential resilver.
778  *
779  * For "big columns" it's sufficient to map the correct range of the zio ABD.
780  * Partial columns require allocating a gang ABD in order to zero fill the
781  * empty sectors.  When the column is empty a zero filled sector must be
782  * mapped.  In all cases the data ABDs must be the same size as the parity
783  * ABDs (e.g. rc->rc_size == parity_size).
784  */
785 static void
vdev_draid_map_alloc_write(zio_t * zio,uint64_t abd_offset,raidz_row_t * rr)786 vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
787 {
788 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
789 	uint64_t parity_size = rr->rr_col[0].rc_size;
790 	uint64_t abd_off = abd_offset;
791 
792 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
793 	ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
794 
795 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
796 		raidz_col_t *rc = &rr->rr_col[c];
797 
798 		if (rc->rc_size == 0) {
799 			/* empty data column (small write), add a skip sector */
800 			ASSERT3U(skip_size, ==, parity_size);
801 			rc->rc_abd = abd_get_zeros(skip_size);
802 		} else if (rc->rc_size == parity_size) {
803 			/* this is a "big column" */
804 			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
805 			    zio->io_abd, abd_off, rc->rc_size);
806 		} else {
807 			/* short data column, add a skip sector */
808 			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
809 			rc->rc_abd = abd_alloc_gang();
810 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
811 			    zio->io_abd, abd_off, rc->rc_size), B_TRUE);
812 			abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size),
813 			    B_TRUE);
814 		}
815 
816 		ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size);
817 
818 		abd_off += rc->rc_size;
819 		rc->rc_size = parity_size;
820 	}
821 
822 	IMPLY(abd_offset != 0, abd_off == zio->io_size);
823 }
824 
825 /*
826  * Scrub/resilver reads.  In order to store the contents of the skip sectors
827  * an additional ABD is allocated.  The columns are handled in the same way
828  * as a full stripe write except instead of using the zero ABD the newly
829  * allocated skip ABD is used to back the skip sectors.  In all cases the
830  * data ABD must be the same size as the parity ABDs.
831  */
832 static void
vdev_draid_map_alloc_scrub(zio_t * zio,uint64_t abd_offset,raidz_row_t * rr)833 vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
834 {
835 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
836 	uint64_t parity_size = rr->rr_col[0].rc_size;
837 	uint64_t abd_off = abd_offset;
838 	uint64_t skip_off = 0;
839 
840 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
841 	ASSERT0P(rr->rr_abd_empty);
842 
843 	if (rr->rr_nempty > 0) {
844 		rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
845 		    B_FALSE);
846 	}
847 
848 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
849 		raidz_col_t *rc = &rr->rr_col[c];
850 
851 		if (rc->rc_size == 0) {
852 			/* empty data column (small read), add a skip sector */
853 			ASSERT3U(skip_size, ==, parity_size);
854 			ASSERT3U(rr->rr_nempty, !=, 0);
855 			rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
856 			    skip_off, skip_size);
857 			skip_off += skip_size;
858 		} else if (rc->rc_size == parity_size) {
859 			/* this is a "big column" */
860 			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
861 			    zio->io_abd, abd_off, rc->rc_size);
862 		} else {
863 			/* short data column, add a skip sector */
864 			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
865 			ASSERT3U(rr->rr_nempty, !=, 0);
866 			rc->rc_abd = abd_alloc_gang();
867 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
868 			    zio->io_abd, abd_off, rc->rc_size), B_TRUE);
869 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
870 			    rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
871 			skip_off += skip_size;
872 		}
873 
874 		uint64_t abd_size = abd_get_size(rc->rc_abd);
875 		ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
876 
877 		/*
878 		 * Increase rc_size so the skip ABD is included in subsequent
879 		 * parity calculations.
880 		 */
881 		abd_off += rc->rc_size;
882 		rc->rc_size = abd_size;
883 	}
884 
885 	IMPLY(abd_offset != 0, abd_off == zio->io_size);
886 	ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
887 }
888 
889 /*
890  * Normal reads.  In this common case only the columns containing data
891  * are read in to the zio ABDs.  Neither the parity columns or empty skip
892  * sectors are read unless the checksum fails verification.  In which case
893  * vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand
894  * the raid map in order to allow reconstruction using the parity data and
895  * skip sectors.
896  */
897 static void
vdev_draid_map_alloc_read(zio_t * zio,uint64_t abd_offset,raidz_row_t * rr)898 vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
899 {
900 	uint64_t abd_off = abd_offset;
901 
902 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
903 
904 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
905 		raidz_col_t *rc = &rr->rr_col[c];
906 
907 		if (rc->rc_size > 0) {
908 			rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
909 			    zio->io_abd, abd_off, rc->rc_size);
910 			abd_off += rc->rc_size;
911 		}
912 	}
913 
914 	IMPLY(abd_offset != 0, abd_off == zio->io_size);
915 }
916 
917 /*
918  * Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key
919  * difference is that an ABD is allocated to back skip sectors so they may
920  * be read in to memory, verified, and repaired if needed.
921  */
922 void
vdev_draid_map_alloc_empty(zio_t * zio,raidz_row_t * rr)923 vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
924 {
925 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
926 	uint64_t parity_size = rr->rr_col[0].rc_size;
927 	uint64_t skip_off = 0;
928 
929 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
930 	ASSERT0P(rr->rr_abd_empty);
931 
932 	if (rr->rr_nempty > 0) {
933 		rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
934 		    B_FALSE);
935 	}
936 
937 	for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
938 		raidz_col_t *rc = &rr->rr_col[c];
939 
940 		if (rc->rc_size == 0) {
941 			/* empty data column (small read), add a skip sector */
942 			ASSERT3U(skip_size, ==, parity_size);
943 			ASSERT3U(rr->rr_nempty, !=, 0);
944 			ASSERT0P(rc->rc_abd);
945 			rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
946 			    skip_off, skip_size);
947 			skip_off += skip_size;
948 		} else if (rc->rc_size == parity_size) {
949 			/* this is a "big column", nothing to add */
950 			ASSERT3P(rc->rc_abd, !=, NULL);
951 		} else {
952 			/*
953 			 * short data column, add a skip sector and clear
954 			 * rc_tried to force the entire column to be re-read
955 			 * thereby including the missing skip sector data
956 			 * which is needed for reconstruction.
957 			 */
958 			ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
959 			ASSERT3U(rr->rr_nempty, !=, 0);
960 			ASSERT3P(rc->rc_abd, !=, NULL);
961 			ASSERT(!abd_is_gang(rc->rc_abd));
962 			abd_t *read_abd = rc->rc_abd;
963 			rc->rc_abd = abd_alloc_gang();
964 			abd_gang_add(rc->rc_abd, read_abd, B_TRUE);
965 			abd_gang_add(rc->rc_abd, abd_get_offset_size(
966 			    rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
967 			skip_off += skip_size;
968 			rc->rc_tried = 0;
969 		}
970 
971 		/*
972 		 * Increase rc_size so the empty ABD is included in subsequent
973 		 * parity calculations.
974 		 */
975 		rc->rc_size = parity_size;
976 	}
977 
978 	ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
979 }
980 
981 /*
982  * Verify that all empty sectors are zero filled before using them to
983  * calculate parity.  Otherwise, silent corruption in an empty sector will
984  * result in bad parity being generated.  That bad parity will then be
985  * considered authoritative and overwrite the good parity on disk.  This
986  * is possible because the checksum is only calculated over the data,
987  * thus it cannot be used to detect damage in empty sectors.
988  */
989 int
vdev_draid_map_verify_empty(zio_t * zio,raidz_row_t * rr)990 vdev_draid_map_verify_empty(zio_t *zio, raidz_row_t *rr)
991 {
992 	uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
993 	uint64_t parity_size = rr->rr_col[0].rc_size;
994 	uint64_t skip_off = parity_size - skip_size;
995 	uint64_t empty_off = 0;
996 	int ret = 0;
997 
998 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
999 	ASSERT3P(rr->rr_abd_empty, !=, NULL);
1000 	ASSERT3U(rr->rr_bigcols, >, 0);
1001 
1002 	void *zero_buf = kmem_zalloc(skip_size, KM_SLEEP);
1003 
1004 	for (int c = rr->rr_bigcols; c < rr->rr_cols; c++) {
1005 		raidz_col_t *rc = &rr->rr_col[c];
1006 
1007 		ASSERT3P(rc->rc_abd, !=, NULL);
1008 		ASSERT3U(rc->rc_size, ==, parity_size);
1009 
1010 		if (abd_cmp_buf_off(rc->rc_abd, zero_buf, skip_off,
1011 		    skip_size) != 0) {
1012 			vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
1013 			abd_zero_off(rc->rc_abd, skip_off, skip_size);
1014 			rc->rc_error = SET_ERROR(ECKSUM);
1015 			ret++;
1016 		}
1017 
1018 		empty_off += skip_size;
1019 	}
1020 
1021 	ASSERT3U(empty_off, ==, abd_get_size(rr->rr_abd_empty));
1022 
1023 	kmem_free(zero_buf, skip_size);
1024 
1025 	return (ret);
1026 }
1027 
1028 /*
1029  * Given a logical address within a dRAID configuration, return the physical
1030  * address on the first drive in the group that this address maps to
1031  * (at position 'start' in permutation number 'perm').
1032  */
1033 static uint64_t
vdev_draid_logical_to_physical(vdev_t * vd,uint64_t logical_offset,uint64_t * perm,uint64_t * start,uint64_t * ndisks)1034 vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset,
1035     uint64_t *perm, uint64_t *start, uint64_t *ndisks)
1036 {
1037 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1038 
1039 	/* b is the dRAID (parent) sector offset. */
1040 	uint64_t ashift = vd->vdev_top->vdev_ashift;
1041 	uint64_t b_offset = logical_offset >> ashift;
1042 
1043 	/*
1044 	 * The height of a row in units of the vdev's minimum sector size.
1045 	 * This is the amount of data written to each disk of each group
1046 	 * in a given permutation.
1047 	 */
1048 	uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift;
1049 
1050 	/*
1051 	 * We cycle through a disk permutation every groupsz * ngroups chunk
1052 	 * of address space. Note that ngroups * groupsz must be a multiple
1053 	 * of the number of data drives (ndisks) in order to guarantee
1054 	 * alignment. So, for example, if our row height is 16MB, our group
1055 	 * size is 10, and there are 13 data drives in the draid, then ngroups
1056 	 * will be 13, we will change permutation every 2.08GB and each
1057 	 * disk will have 160MB of data per chunk.
1058 	 */
1059 	uint64_t groupwidth = vdc->vdc_groupwidth;
1060 	uint64_t ngroups = vdc->vdc_ngroups;
1061 
1062 	uint64_t group = logical_offset / vdc->vdc_groupsz;
1063 	uint64_t fgrps = vdc->vdc_width / vdc->vdc_children;
1064 
1065 	*perm = (group / ngroups) * fgrps;
1066 
1067 	/*
1068 	 * Failure groups starting from (vdc_nspares % fgrps) have one less
1069 	 * spare, so they have one more ndisks.
1070 	 */
1071 	uint64_t biggies = vdc->vdc_nspares % fgrps;
1072 
1073 	uint64_t poff = 0;
1074 	group %= ngroups;
1075 	uint64_t ngroups1 = ngroups / fgrps;
1076 	if (!biggies || group < biggies * ngroups1)
1077 		poff = group / ngroups1;
1078 	else
1079 		poff = biggies +
1080 		    (group - (biggies * ngroups1)) / (ngroups1 + 1);
1081 	ASSERT3U(poff, <, fgrps);
1082 	*perm += poff;
1083 
1084 	*ndisks = (vdc->vdc_ndisks / fgrps) +
1085 	    (biggies ? ((poff >= biggies) ? 1 : 0) : 0);
1086 
1087 	/* b_offset is the sector offset within a group chunk */
1088 	b_offset = b_offset % (rowheight_sectors * groupwidth);
1089 	ASSERT0(b_offset % groupwidth);
1090 
1091 	/*
1092 	 * Find the starting byte offset on each child vdev:
1093 	 * - within a permutation there are ngroups groups spread over the
1094 	 *   rows, where each row covers a slice portion of the disk
1095 	 * - each permutation has (groupwidth * ngroups) / ndisks rows
1096 	 * - so each permutation covers rows * slice portion of the disk
1097 	 * - so we need to find the row where this IO group target begins
1098 	 */
1099 	uint64_t perm_rows = (groupwidth * ngroups) / vdc->vdc_ndisks;
1100 
1101 	/* Adjust group for our failure group. */
1102 	if (!biggies || poff <= biggies)
1103 		group -= poff * ngroups1;
1104 	else
1105 		group -= (biggies * ngroups1) +
1106 		    (poff - biggies) * (ngroups1 + 1);
1107 
1108 	IMPLY(poff < biggies, group < ngroups1);
1109 	ASSERT3U(group, <=, ngroups1);
1110 
1111 	/*
1112 	 * groupstart is where the group this IO will land in "starts" in
1113 	 * the permutation array.
1114 	 */
1115 	uint64_t groupstart = (group * groupwidth) % *ndisks;
1116 	ASSERT3U(groupstart + groupwidth, <=, *ndisks + groupstart);
1117 	*start = groupstart;
1118 
1119 	/* Adjust ngroups for our failure group. */
1120 	ngroups = ngroups1 + ((biggies && poff >= biggies) ? 1 : 0);
1121 
1122 	ASSERT3U(group, <, ngroups);
1123 
1124 	uint64_t row = ((*perm / fgrps) * perm_rows) +
1125 	    (((group % ngroups) * groupwidth) / *ndisks);
1126 
1127 	return (((rowheight_sectors * row) +
1128 	    (b_offset / groupwidth)) << ashift);
1129 }
1130 
1131 static uint64_t
vdev_draid_map_alloc_row(zio_t * zio,raidz_row_t ** rrp,uint64_t io_offset,uint64_t abd_offset,uint64_t abd_size)1132 vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
1133     uint64_t abd_offset, uint64_t abd_size)
1134 {
1135 	vdev_t *vd = zio->io_vd;
1136 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1137 	uint64_t ashift = vd->vdev_top->vdev_ashift;
1138 	uint64_t io_size = abd_size;
1139 	uint64_t io_asize = vdev_draid_psize_to_asize(vd, io_size, 0);
1140 	uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
1141 	uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
1142 
1143 	/*
1144 	 * Limit the io_size to the space remaining in the group.  A second
1145 	 * row in the raidz_map_t is created for the remainder.
1146 	 */
1147 	if (io_offset + io_asize > start_offset) {
1148 		io_size = vdev_draid_asize_to_psize(vd,
1149 		    start_offset - io_offset, 0);
1150 	}
1151 
1152 	/*
1153 	 * At most a block may span the logical end of one group and the start
1154 	 * of the next group. Therefore, at the end of a group the io_size must
1155 	 * span the group width evenly and the remainder must be aligned to the
1156 	 * start of the next group.
1157 	 */
1158 	IMPLY(abd_offset == 0 && io_size < zio->io_size,
1159 	    (io_asize >> ashift) % vdc->vdc_groupwidth == 0);
1160 	IMPLY(abd_offset != 0,
1161 	    vdev_draid_group_to_offset(vd, group) == io_offset);
1162 
1163 	/* Lookup starting byte offset on each child vdev */
1164 	uint64_t groupstart, perm, ndisks;
1165 	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1166 	    io_offset, &perm, &groupstart, &ndisks);
1167 
1168 	/*
1169 	 * If there is less than groupwidth drives available after the group
1170 	 * start, the group is going to wrap onto the next row. 'wrap' is the
1171 	 * group disk number that starts on the next row.
1172 	 */
1173 	uint64_t groupwidth = vdc->vdc_groupwidth;
1174 	uint64_t wrap = groupwidth;
1175 
1176 	if (groupstart + groupwidth > ndisks)
1177 		wrap = ndisks - groupstart;
1178 
1179 	/* The io size in units of the vdev's minimum sector size. */
1180 	const uint64_t psize = io_size >> ashift;
1181 
1182 	/*
1183 	 * "Quotient": The number of data sectors for this stripe on all but
1184 	 * the "big column" child vdevs that also contain "remainder" data.
1185 	 */
1186 	uint64_t q = psize / vdc->vdc_ndata;
1187 
1188 	/*
1189 	 * "Remainder": The number of partial stripe data sectors in this I/O.
1190 	 * This will add a sector to some, but not all, child vdevs.
1191 	 */
1192 	uint64_t r = psize - q * vdc->vdc_ndata;
1193 
1194 	/* The number of "big columns" - those which contain remainder data. */
1195 	uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity);
1196 	ASSERT3U(bc, <, groupwidth);
1197 
1198 	/* The total number of data and parity sectors for this I/O. */
1199 	uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1)));
1200 
1201 	ASSERT3U(vdc->vdc_nparity, >, 0);
1202 
1203 	raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth, zio);
1204 	rr->rr_bigcols = bc;
1205 	rr->rr_firstdatacol = vdc->vdc_nparity;
1206 #ifdef ZFS_DEBUG
1207 	rr->rr_offset = io_offset;
1208 	rr->rr_size = io_size;
1209 #endif
1210 	*rrp = rr;
1211 
1212 	uint8_t *base;
1213 	uint64_t iter, asize = 0;
1214 	vdev_draid_get_perm(vdc, perm, &base, &iter);
1215 	for (uint64_t i = 0; i < groupwidth; i++) {
1216 		raidz_col_t *rc = &rr->rr_col[i];
1217 		uint64_t c = (groupstart + i) % ndisks;
1218 
1219 		/* increment the offset if we wrap to the next row */
1220 		if (i == wrap)
1221 			physical_offset += VDEV_DRAID_ROWHEIGHT;
1222 
1223 		rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
1224 		rc->rc_offset = physical_offset;
1225 
1226 		if (q == 0 && i >= bc)
1227 			rc->rc_size = 0;
1228 		else if (i < bc)
1229 			rc->rc_size = (q + 1) << ashift;
1230 		else
1231 			rc->rc_size = q << ashift;
1232 
1233 		asize += rc->rc_size;
1234 	}
1235 
1236 	ASSERT3U(asize, ==, tot << ashift);
1237 	rr->rr_nempty = roundup(tot, groupwidth) - tot;
1238 	IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc);
1239 
1240 	/* Allocate buffers for the parity columns */
1241 	for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) {
1242 		raidz_col_t *rc = &rr->rr_col[c];
1243 		rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
1244 	}
1245 
1246 	/*
1247 	 * Map buffers for data columns and allocate/map buffers for skip
1248 	 * sectors.  There are three distinct cases for dRAID which are
1249 	 * required to support sequential rebuild.
1250 	 */
1251 	if (zio->io_type == ZIO_TYPE_WRITE) {
1252 		vdev_draid_map_alloc_write(zio, abd_offset, rr);
1253 	} else if ((rr->rr_nempty > 0) &&
1254 	    (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
1255 		vdev_draid_map_alloc_scrub(zio, abd_offset, rr);
1256 	} else {
1257 		ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1258 		vdev_draid_map_alloc_read(zio, abd_offset, rr);
1259 	}
1260 
1261 	return (io_size);
1262 }
1263 
1264 /*
1265  * Allocate the raidz mapping to be applied to the dRAID I/O.  The parity
1266  * calculations for dRAID are identical to raidz however there are a few
1267  * differences in the layout.
1268  *
1269  * - dRAID always allocates a full stripe width. Any extra sectors due
1270  *   this padding are zero filled and written to disk. They will be read
1271  *   back during a scrub or repair operation since they are included in
1272  *   the parity calculation. This property enables sequential resilvering.
1273  *
1274  * - When the block at the logical offset spans redundancy groups then two
1275  *   rows are allocated in the raidz_map_t. One row resides at the end of
1276  *   the first group and the other at the start of the following group.
1277  */
1278 static raidz_map_t *
vdev_draid_map_alloc(zio_t * zio)1279 vdev_draid_map_alloc(zio_t *zio)
1280 {
1281 	raidz_row_t *rr[2];
1282 	uint64_t abd_offset = 0;
1283 	uint64_t abd_size = zio->io_size;
1284 	uint64_t io_offset = zio->io_offset;
1285 	uint64_t size;
1286 	int nrows = 1;
1287 
1288 	size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset,
1289 	    abd_offset, abd_size);
1290 	if (size < abd_size) {
1291 		vdev_t *vd = zio->io_vd;
1292 
1293 		io_offset += vdev_draid_psize_to_asize(vd, size, 0);
1294 		abd_offset += size;
1295 		abd_size -= size;
1296 		nrows++;
1297 
1298 		ASSERT3U(io_offset, ==, vdev_draid_group_to_offset(
1299 		    vd, vdev_draid_offset_to_group(vd, io_offset)));
1300 		ASSERT3U(abd_offset, <, zio->io_size);
1301 		ASSERT3U(abd_size, !=, 0);
1302 
1303 		size = vdev_draid_map_alloc_row(zio, &rr[1],
1304 		    io_offset, abd_offset, abd_size);
1305 		VERIFY3U(size, ==, abd_size);
1306 	}
1307 
1308 	raidz_map_t *rm;
1309 	rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP);
1310 	rm->rm_ops = vdev_raidz_math_get_ops();
1311 	rm->rm_nrows = nrows;
1312 	rm->rm_row[0] = rr[0];
1313 	if (nrows == 2)
1314 		rm->rm_row[1] = rr[1];
1315 	return (rm);
1316 }
1317 
1318 /*
1319  * Given an offset into a dRAID return the next group width aligned offset
1320  * which can be used to start an allocation.
1321  */
1322 static uint64_t
vdev_draid_get_astart(vdev_t * vd,const uint64_t start)1323 vdev_draid_get_astart(vdev_t *vd, const uint64_t start)
1324 {
1325 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1326 
1327 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1328 
1329 	return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift));
1330 }
1331 
1332 /*
1333  * Allocatable space for dRAID is (children - nspares) * sizeof(smallest child)
1334  * rounded down to the last full slice.  So each child must provide at least
1335  * 1 / (children - nspares) of its asize rounded up to VDEV_DRAID_ROWHEIGHT.
1336  */
1337 static uint64_t
vdev_draid_min_asize(vdev_t * vd)1338 vdev_draid_min_asize(vdev_t *vd)
1339 {
1340 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1341 
1342 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1343 
1344 	return (VDEV_DRAID_REFLOW_RESERVE +
1345 	    DIV_ROUND_UP(DIV_ROUND_UP(vd->vdev_min_asize, vdc->vdc_ndisks),
1346 	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT);
1347 }
1348 
1349 /*
1350  * When using dRAID the minimum allocation size is determined by the number
1351  * of data disks in the redundancy group.  Full stripes are always used.
1352  */
1353 static uint64_t
vdev_draid_min_alloc(vdev_t * vd)1354 vdev_draid_min_alloc(vdev_t *vd)
1355 {
1356 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1357 
1358 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1359 
1360 	return (vdc->vdc_ndata << vd->vdev_ashift);
1361 }
1362 
1363 /*
1364  * Returns false if the txg range exists on any leaf vdev, true otherwise.
1365  *
1366  * A dRAID spare does not fit into the DTL model. While it has child vdevs
1367  * there is no redundancy among them, and the effective child vdev is
1368  * determined by offset. Essentially we do a vdev_dtl_reassess() on the
1369  * fly by replacing a dRAID spare with the child vdev under the offset.
1370  * Note that it is a recursive process because the child vdev can be
1371  * another dRAID spare and so on.
1372  */
1373 boolean_t
vdev_draid_missing(vdev_t * vd,uint64_t physical_offset,uint64_t txg,uint64_t size)1374 vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1375     uint64_t size)
1376 {
1377 	if (vd->vdev_ops == &vdev_spare_ops ||
1378 	    vd->vdev_ops == &vdev_replacing_ops) {
1379 		/*
1380 		 * Check all of the readable children, if any child
1381 		 * contains the txg range the data it is not missing.
1382 		 */
1383 		for (int c = 0; c < vd->vdev_children; c++) {
1384 			vdev_t *cvd = vd->vdev_child[c];
1385 
1386 			if (!vdev_readable(cvd))
1387 				continue;
1388 
1389 			if (!vdev_draid_missing(cvd, physical_offset,
1390 			    txg, size))
1391 				return (B_FALSE);
1392 		}
1393 
1394 		return (B_TRUE);
1395 	}
1396 
1397 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1398 		/*
1399 		 * When sequentially resilvering we don't have a proper
1400 		 * txg range so instead we must presume all txgs are
1401 		 * missing on this vdev until the resilver completes.
1402 		 */
1403 		if (vd->vdev_rebuild_txg != 0)
1404 			return (B_TRUE);
1405 
1406 		/*
1407 		 * DTL_MISSING is set for all prior txgs when a resilver
1408 		 * is started in spa_vdev_attach().
1409 		 */
1410 		if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1411 			return (B_TRUE);
1412 
1413 		/*
1414 		 * Consult the DTL on the relevant vdev. Either a vdev
1415 		 * leaf or spare/replace mirror child may be returned so
1416 		 * we must recursively call vdev_draid_missing_impl().
1417 		 */
1418 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1419 		if (vd == NULL)
1420 			return (B_TRUE);
1421 
1422 		return (vdev_draid_missing(vd, physical_offset, txg, size));
1423 	}
1424 
1425 	return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1426 }
1427 
1428 /*
1429  * Returns true if the txg is only partially replicated on the leaf vdevs.
1430  */
1431 static boolean_t
vdev_draid_partial(vdev_t * vd,uint64_t physical_offset,uint64_t txg,uint64_t size)1432 vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1433     uint64_t size)
1434 {
1435 	if (vd->vdev_ops == &vdev_spare_ops ||
1436 	    vd->vdev_ops == &vdev_replacing_ops) {
1437 		/*
1438 		 * Check all of the readable children, if any child is
1439 		 * missing the txg range then it is partially replicated.
1440 		 */
1441 		for (int c = 0; c < vd->vdev_children; c++) {
1442 			vdev_t *cvd = vd->vdev_child[c];
1443 
1444 			if (!vdev_readable(cvd))
1445 				continue;
1446 
1447 			if (vdev_draid_partial(cvd, physical_offset, txg, size))
1448 				return (B_TRUE);
1449 		}
1450 
1451 		return (B_FALSE);
1452 	}
1453 
1454 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1455 		/*
1456 		 * When sequentially resilvering we don't have a proper
1457 		 * txg range so instead we must presume all txgs are
1458 		 * missing on this vdev until the resilver completes.
1459 		 */
1460 		if (vd->vdev_rebuild_txg != 0)
1461 			return (B_TRUE);
1462 
1463 		/*
1464 		 * DTL_MISSING is set for all prior txgs when a resilver
1465 		 * is started in spa_vdev_attach().
1466 		 */
1467 		if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1468 			return (B_TRUE);
1469 
1470 		/*
1471 		 * Consult the DTL on the relevant vdev. Either a vdev
1472 		 * leaf or spare/replace mirror child may be returned so
1473 		 * we must recursively call vdev_draid_missing_impl().
1474 		 */
1475 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1476 		if (vd == NULL)
1477 			return (B_TRUE);
1478 
1479 		return (vdev_draid_partial(vd, physical_offset, txg, size));
1480 	}
1481 
1482 	return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1483 }
1484 
1485 /*
1486  * Determine if the vdev is readable at the given offset.
1487  */
1488 boolean_t
vdev_draid_readable(vdev_t * vd,uint64_t physical_offset)1489 vdev_draid_readable(vdev_t *vd, uint64_t physical_offset)
1490 {
1491 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1492 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1493 		if (vd == NULL)
1494 			return (B_FALSE);
1495 	}
1496 
1497 	if (vd->vdev_ops == &vdev_spare_ops ||
1498 	    vd->vdev_ops == &vdev_replacing_ops) {
1499 
1500 		for (int c = 0; c < vd->vdev_children; c++) {
1501 			vdev_t *cvd = vd->vdev_child[c];
1502 
1503 			if (!vdev_readable(cvd))
1504 				continue;
1505 
1506 			if (vdev_draid_readable(cvd, physical_offset))
1507 				return (B_TRUE);
1508 		}
1509 
1510 		return (B_FALSE);
1511 	}
1512 
1513 	return (vdev_readable(vd));
1514 }
1515 
1516 /*
1517  * Returns the first distributed spare found under the provided vdev tree.
1518  */
1519 static vdev_t *
vdev_draid_find_spare(vdev_t * vd)1520 vdev_draid_find_spare(vdev_t *vd)
1521 {
1522 	if (vd->vdev_ops == &vdev_draid_spare_ops)
1523 		return (vd);
1524 
1525 	for (int c = 0; c < vd->vdev_children; c++) {
1526 		vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]);
1527 		if (svd != NULL)
1528 			return (svd);
1529 	}
1530 
1531 	return (NULL);
1532 }
1533 
1534 /*
1535  * Returns B_TRUE if the passed in vdev is currently "faulted".
1536  * Faulted, in this context, means that the vdev represents a
1537  * replacing or sparing vdev tree.
1538  */
1539 static boolean_t
vdev_draid_faulted(vdev_t * vd,uint64_t physical_offset)1540 vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset)
1541 {
1542 	if (vd->vdev_ops == &vdev_draid_spare_ops) {
1543 		vd = vdev_draid_spare_get_child(vd, physical_offset);
1544 		if (vd == NULL)
1545 			return (B_FALSE);
1546 
1547 		/*
1548 		 * After resolving the distributed spare to a leaf vdev
1549 		 * check the parent to determine if it's "faulted".
1550 		 */
1551 		vd = vd->vdev_parent;
1552 	}
1553 
1554 	return (vd->vdev_ops == &vdev_replacing_ops ||
1555 	    vd->vdev_ops == &vdev_spare_ops);
1556 }
1557 
1558 /*
1559  * Determine if the dRAID block at the logical offset is degraded.
1560  * Used by sequential resilver.
1561  */
1562 static boolean_t
vdev_draid_group_degraded(vdev_t * vd,uint64_t offset)1563 vdev_draid_group_degraded(vdev_t *vd, uint64_t offset)
1564 {
1565 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1566 
1567 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1568 	ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1569 
1570 	uint64_t groupstart, perm, ndisks;
1571 	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1572 	    offset, &perm, &groupstart, &ndisks);
1573 
1574 	uint8_t *base;
1575 	uint64_t iter;
1576 	vdev_draid_get_perm(vdc, perm, &base, &iter);
1577 
1578 	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1579 		uint64_t c = (groupstart + i) % ndisks;
1580 		uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1581 		vdev_t *cvd = vd->vdev_child[cid];
1582 
1583 		/* Group contains a faulted vdev. */
1584 		if (vdev_draid_faulted(cvd, physical_offset))
1585 			return (B_TRUE);
1586 
1587 		/*
1588 		 * Always check groups with active distributed spares
1589 		 * because any vdev failure in the pool will affect them.
1590 		 */
1591 		if (vdev_draid_find_spare(cvd) != NULL)
1592 			return (B_TRUE);
1593 	}
1594 
1595 	return (B_FALSE);
1596 }
1597 
1598 /*
1599  * Determine if the txg is missing.  Used by healing resilver.
1600  */
1601 static boolean_t
vdev_draid_group_missing(vdev_t * vd,uint64_t offset,uint64_t txg,uint64_t size)1602 vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg,
1603     uint64_t size)
1604 {
1605 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1606 
1607 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1608 	ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1609 
1610 	uint64_t groupstart, perm, ndisks;
1611 	uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1612 	    offset, &perm, &groupstart, &ndisks);
1613 
1614 	uint8_t *base;
1615 	uint64_t iter;
1616 	vdev_draid_get_perm(vdc, perm, &base, &iter);
1617 
1618 	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1619 		uint64_t c = (groupstart + i) % ndisks;
1620 		uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1621 		vdev_t *cvd = vd->vdev_child[cid];
1622 
1623 		/* Transaction group is known to be partially replicated. */
1624 		if (vdev_draid_partial(cvd, physical_offset, txg, size))
1625 			return (B_TRUE);
1626 	}
1627 
1628 	return (B_FALSE);
1629 }
1630 
1631 /*
1632  * Find the smallest child asize and largest sector size to calculate the
1633  * available capacity.  Distributed spares are ignored since their capacity
1634  * is also based of the minimum child size in the top-level dRAID.
1635  */
1636 static void
vdev_draid_calculate_asize(vdev_t * vd,uint64_t * asizep,uint64_t * max_asizep,uint64_t * logical_ashiftp,uint64_t * physical_ashiftp)1637 vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep,
1638     uint64_t *logical_ashiftp, uint64_t *physical_ashiftp)
1639 {
1640 	uint64_t logical_ashift = 0, physical_ashift = 0;
1641 	uint64_t asize = 0, max_asize = 0;
1642 
1643 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1644 
1645 	for (int c = 0; c < vd->vdev_children; c++) {
1646 		vdev_t *cvd = vd->vdev_child[c];
1647 
1648 		if (cvd->vdev_ops == &vdev_draid_spare_ops)
1649 			continue;
1650 
1651 		asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1;
1652 		max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1;
1653 		logical_ashift = MAX(logical_ashift, cvd->vdev_ashift);
1654 	}
1655 	for (int c = 0; c < vd->vdev_children; c++) {
1656 		vdev_t *cvd = vd->vdev_child[c];
1657 
1658 		if (cvd->vdev_ops == &vdev_draid_spare_ops)
1659 			continue;
1660 		physical_ashift = vdev_best_ashift(logical_ashift,
1661 		    physical_ashift, cvd->vdev_physical_ashift);
1662 	}
1663 
1664 	*asizep = asize;
1665 	*max_asizep = max_asize;
1666 	*logical_ashiftp = logical_ashift;
1667 	*physical_ashiftp = physical_ashift;
1668 }
1669 
1670 /*
1671  * Open spare vdevs.
1672  */
1673 static boolean_t
vdev_draid_open_spares(vdev_t * vd)1674 vdev_draid_open_spares(vdev_t *vd)
1675 {
1676 	return (vd->vdev_ops == &vdev_draid_spare_ops ||
1677 	    vd->vdev_ops == &vdev_replacing_ops ||
1678 	    vd->vdev_ops == &vdev_spare_ops);
1679 }
1680 
1681 /*
1682  * Open all children, excluding spares.
1683  */
1684 static boolean_t
vdev_draid_open_children(vdev_t * vd)1685 vdev_draid_open_children(vdev_t *vd)
1686 {
1687 	return (!vdev_draid_open_spares(vd));
1688 }
1689 
1690 /*
1691  * Open a top-level dRAID vdev.
1692  */
1693 static int
vdev_draid_open(vdev_t * vd,uint64_t * asize,uint64_t * max_asize,uint64_t * logical_ashift,uint64_t * physical_ashift)1694 vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
1695     uint64_t *logical_ashift, uint64_t *physical_ashift)
1696 {
1697 	vdev_draid_config_t *vdc =  vd->vdev_tsd;
1698 	uint64_t nparity = vdc->vdc_nparity;
1699 	int open_errors = 0;
1700 
1701 	if (nparity > VDEV_DRAID_MAXPARITY ||
1702 	    vdc->vdc_children < nparity + 1) {
1703 		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
1704 		return (SET_ERROR(EINVAL));
1705 	}
1706 
1707 	/*
1708 	 * First open the normal children then the distributed spares.  This
1709 	 * ordering is important to ensure the distributed spares calculate
1710 	 * the correct psize in the event that the dRAID vdevs were expanded.
1711 	 */
1712 	vdev_open_children_subset(vd, vdev_draid_open_children);
1713 	vdev_open_children_subset(vd, vdev_draid_open_spares);
1714 
1715 	/*
1716 	 * Verify enough of the children are available to continue.
1717 	 * If several disks got failed on i-th position in each slice in the
1718 	 * big width row (failure groups) - they are counted as one failure,
1719 	 * but only if the failures threshold is not reached in any group.
1720 	 */
1721 	boolean_t safe2skip = B_FALSE;
1722 	if (vdc->vdc_width > vdc->vdc_children &&
1723 	    vdev_draid_fail_domain_allowed(vd))
1724 		safe2skip = B_TRUE;
1725 	for (int c = 0; c < vdc->vdc_children; c++) {
1726 		for (int i = c; i < vdc->vdc_width; i += vdc->vdc_children) {
1727 			if (vd->vdev_child[i]->vdev_open_error != 0) {
1728 				if ((++open_errors) > nparity) {
1729 					vd->vdev_stat.vs_aux =
1730 					    VDEV_AUX_NO_REPLICAS;
1731 					return (SET_ERROR(ENXIO));
1732 				}
1733 				if (safe2skip)
1734 					break;
1735 			}
1736 		}
1737 	}
1738 
1739 	/*
1740 	 * Allocatable capacity is the sum of the space on all children less
1741 	 * the number of distributed spares rounded down to last full row
1742 	 * and then to the last full group. An additional 32MB of scratch
1743 	 * space is reserved at the end of each child for use by the dRAID
1744 	 * expansion feature.
1745 	 */
1746 	uint64_t child_asize, child_max_asize;
1747 	vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize,
1748 	    logical_ashift, physical_ashift);
1749 
1750 	/*
1751 	 * Should be unreachable since the minimum child size is 64MB, but
1752 	 * we want to make sure an underflow absolutely cannot occur here.
1753 	 */
1754 	if (child_asize < VDEV_DRAID_REFLOW_RESERVE ||
1755 	    child_max_asize < VDEV_DRAID_REFLOW_RESERVE) {
1756 		return (SET_ERROR(ENXIO));
1757 	}
1758 
1759 	child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) /
1760 	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1761 	child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) /
1762 	    VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1763 
1764 	*asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1765 	    vdc->vdc_groupsz);
1766 	*max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1767 	    vdc->vdc_groupsz);
1768 
1769 	/*
1770 	 * For failure groups with multiple silices in the big width row,
1771 	 * round down to the big slice size.
1772 	 */
1773 	if (vdc->vdc_width > vdc->vdc_children) {
1774 		uint64_t slicesz = vdc->vdc_devslicesz * vdc->vdc_ndisks;
1775 		*asize = (*asize / slicesz) * slicesz;
1776 		*max_asize = (*max_asize / slicesz) * slicesz;
1777 	}
1778 
1779 	return (0);
1780 }
1781 
1782 /*
1783  * Close a top-level dRAID vdev.
1784  */
1785 static void
vdev_draid_close(vdev_t * vd)1786 vdev_draid_close(vdev_t *vd)
1787 {
1788 	for (int c = 0; c < vd->vdev_children; c++) {
1789 		if (vd->vdev_child[c] != NULL)
1790 			vdev_close(vd->vdev_child[c]);
1791 	}
1792 }
1793 
1794 /*
1795  * Return the maximum asize for a rebuild zio in the provided range
1796  * given the following constraints.  A dRAID chunks may not:
1797  *
1798  * - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or
1799  * - Span dRAID redundancy groups.
1800  */
1801 static uint64_t
vdev_draid_rebuild_asize(vdev_t * vd,uint64_t start,uint64_t asize,uint64_t max_segment)1802 vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
1803     uint64_t max_segment)
1804 {
1805 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1806 
1807 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1808 
1809 	uint64_t ashift = vd->vdev_ashift;
1810 	uint64_t ndata = vdc->vdc_ndata;
1811 	uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift),
1812 	    SPA_MAXBLOCKSIZE);
1813 
1814 	ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
1815 	ASSERT0(asize % (vdc->vdc_groupwidth << ashift));
1816 
1817 	/* Chunks must evenly span all data columns in the group. */
1818 	psize = (((psize >> ashift) / ndata) * ndata) << ashift;
1819 	uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize));
1820 
1821 	/* Reduce the chunk size to the group space remaining. */
1822 	uint64_t group = vdev_draid_offset_to_group(vd, start);
1823 	uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
1824 	chunk_size = MIN(chunk_size, left);
1825 
1826 	ASSERT0(chunk_size % (vdc->vdc_groupwidth << ashift));
1827 	ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
1828 	    vdev_draid_offset_to_group(vd, start + chunk_size - 1));
1829 
1830 	return (chunk_size);
1831 }
1832 
1833 /*
1834  * Align the start of the metaslab to the group width and slightly reduce
1835  * its size to a multiple of the group width.  Since full stripe writes are
1836  * required by dRAID this space is unallocable.  Furthermore, aligning the
1837  * metaslab start is important for vdev initialize and TRIM which both operate
1838  * on metaslab boundaries which vdev_xlate() expects to be aligned.
1839  */
1840 static void
vdev_draid_metaslab_init(vdev_t * vd,uint64_t * ms_start,uint64_t * ms_size)1841 vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size)
1842 {
1843 	vdev_draid_config_t *vdc = vd->vdev_tsd;
1844 
1845 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1846 
1847 	uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift;
1848 	uint64_t astart = vdev_draid_get_astart(vd, *ms_start);
1849 	uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz;
1850 
1851 	*ms_start = astart;
1852 	*ms_size = asize;
1853 
1854 	ASSERT0(*ms_start % sz);
1855 	ASSERT0(*ms_size % sz);
1856 }
1857 
1858 /*
1859  * Add virtual dRAID spares to the list of valid spares. In order to accomplish
1860  * this the existing array must be freed and reallocated with the additional
1861  * entries.
1862  */
1863 int
vdev_draid_spare_create(nvlist_t * nvroot,vdev_t * vd,uint64_t * ndraidp,uint64_t * nfgroupp,uint64_t next_vdev_id)1864 vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
1865     uint64_t *nfgroupp, uint64_t next_vdev_id)
1866 {
1867 	uint64_t draid_nspares = 0;
1868 	uint64_t ndraid = 0;
1869 	uint64_t nfgroup = 0;
1870 	int error;
1871 
1872 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
1873 		vdev_t *cvd = vd->vdev_child[i];
1874 
1875 		if (cvd->vdev_ops == &vdev_draid_ops) {
1876 			vdev_draid_config_t *vdc = cvd->vdev_tsd;
1877 			draid_nspares += vdc->vdc_nspares;
1878 			ndraid++;
1879 			if (vdc->vdc_width > vdc->vdc_children)
1880 				nfgroup++;
1881 		}
1882 	}
1883 
1884 	if (draid_nspares == 0) {
1885 		*ndraidp = ndraid;
1886 		*nfgroupp = nfgroup;
1887 		return (0);
1888 	}
1889 
1890 	nvlist_t **old_spares, **new_spares;
1891 	uint_t old_nspares;
1892 	error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1893 	    &old_spares, &old_nspares);
1894 	if (error)
1895 		old_nspares = 0;
1896 
1897 	/* Allocate memory and copy of the existing spares. */
1898 	new_spares = kmem_alloc(sizeof (nvlist_t *) *
1899 	    (draid_nspares + old_nspares), KM_SLEEP);
1900 	for (uint_t i = 0; i < old_nspares; i++)
1901 		new_spares[i] = fnvlist_dup(old_spares[i]);
1902 
1903 	/* Add new distributed spares to ZPOOL_CONFIG_SPARES. */
1904 	uint64_t n = old_nspares;
1905 	for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) {
1906 		vdev_t *cvd = vd->vdev_child[vdev_id];
1907 		char path[64];
1908 
1909 		if (cvd->vdev_ops != &vdev_draid_ops)
1910 			continue;
1911 
1912 		vdev_draid_config_t *vdc = cvd->vdev_tsd;
1913 		uint64_t nspares = vdc->vdc_nspares;
1914 		uint64_t nparity = vdc->vdc_nparity;
1915 
1916 		for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
1917 			memset(path, 0, sizeof (path));
1918 			(void) snprintf(path, sizeof (path) - 1,
1919 			    "%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
1920 			    (u_longlong_t)nparity,
1921 			    (u_longlong_t)next_vdev_id + vdev_id,
1922 			    (u_longlong_t)spare_id);
1923 
1924 			nvlist_t *spare = fnvlist_alloc();
1925 			fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path);
1926 			fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE,
1927 			    VDEV_TYPE_DRAID_SPARE);
1928 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID,
1929 			    cvd->vdev_guid);
1930 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID,
1931 			    spare_id);
1932 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0);
1933 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1);
1934 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1);
1935 			fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT,
1936 			    cvd->vdev_ashift);
1937 
1938 			new_spares[n] = spare;
1939 			n++;
1940 		}
1941 	}
1942 
1943 	if (n > 0) {
1944 		(void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES);
1945 		fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1946 		    (const nvlist_t **)new_spares, n);
1947 	}
1948 
1949 	for (int i = 0; i < n; i++)
1950 		nvlist_free(new_spares[i]);
1951 
1952 	kmem_free(new_spares, sizeof (*new_spares) * n);
1953 	*ndraidp = ndraid;
1954 	*nfgroupp = nfgroup;
1955 
1956 	return (0);
1957 }
1958 
1959 /*
1960  * Determine if any portion of the provided block resides on a child vdev
1961  * with a dirty DTL and therefore needs to be resilvered.
1962  */
1963 static boolean_t
vdev_draid_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)1964 vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
1965     uint64_t phys_birth)
1966 {
1967 	uint64_t offset = DVA_GET_OFFSET(dva);
1968 	uint64_t asize = vdev_draid_psize_to_asize(vd, psize, 0);
1969 
1970 	if (phys_birth == TXG_UNKNOWN) {
1971 		/*
1972 		 * Sequential resilver.  There is no meaningful phys_birth
1973 		 * for this block, we can only determine if block resides
1974 		 * in a degraded group in which case it must be resilvered.
1975 		 */
1976 		ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==,
1977 		    vdev_draid_offset_to_group(vd, offset + asize - 1));
1978 
1979 		return (vdev_draid_group_degraded(vd, offset));
1980 	} else {
1981 		/*
1982 		 * Healing resilver.  TXGs not in DTL_PARTIAL are intact,
1983 		 * as are blocks in non-degraded groups.
1984 		 */
1985 		if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
1986 			return (B_FALSE);
1987 
1988 		if (vdev_draid_group_missing(vd, offset, phys_birth, 1))
1989 			return (B_TRUE);
1990 
1991 		/* The block may span groups in which case check both. */
1992 		if (vdev_draid_offset_to_group(vd, offset) !=
1993 		    vdev_draid_offset_to_group(vd, offset + asize - 1)) {
1994 			if (vdev_draid_group_missing(vd,
1995 			    offset + asize, phys_birth, 1))
1996 				return (B_TRUE);
1997 		}
1998 
1999 		return (B_FALSE);
2000 	}
2001 }
2002 
2003 static boolean_t
vdev_draid_rebuilding(vdev_t * vd)2004 vdev_draid_rebuilding(vdev_t *vd)
2005 {
2006 	if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
2007 		return (B_TRUE);
2008 
2009 	for (int i = 0; i < vd->vdev_children; i++) {
2010 		if (vdev_draid_rebuilding(vd->vdev_child[i])) {
2011 			return (B_TRUE);
2012 		}
2013 	}
2014 
2015 	return (B_FALSE);
2016 }
2017 
2018 static void
vdev_draid_io_verify(vdev_t * vd,raidz_row_t * rr,int col)2019 vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
2020 {
2021 #ifdef ZFS_DEBUG
2022 	zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
2023 	logical_rs.rs_start = rr->rr_offset;
2024 	logical_rs.rs_end = logical_rs.rs_start +
2025 	    vdev_draid_psize_to_asize(vd, rr->rr_size, 0);
2026 
2027 	raidz_col_t *rc = &rr->rr_col[col];
2028 	vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
2029 
2030 	vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
2031 	ASSERT(vdev_xlate_is_empty(&remain_rs));
2032 	ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
2033 	ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
2034 	ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end);
2035 #endif
2036 }
2037 
2038 /*
2039  * For write operations:
2040  * 1. Generate the parity data
2041  * 2. Create child zio write operations to each column's vdev, for both
2042  *    data and parity.  A gang ABD is allocated by vdev_draid_map_alloc()
2043  *    if a skip sector needs to be added to a column.
2044  */
2045 static void
vdev_draid_io_start_write(zio_t * zio,raidz_row_t * rr)2046 vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr)
2047 {
2048 	vdev_t *vd = zio->io_vd;
2049 	raidz_map_t *rm = zio->io_vsd;
2050 
2051 	vdev_raidz_generate_parity_row(rm, rr);
2052 
2053 	for (int c = 0; c < rr->rr_cols; c++) {
2054 		raidz_col_t *rc = &rr->rr_col[c];
2055 
2056 		/*
2057 		 * Empty columns are zero filled and included in the parity
2058 		 * calculation and therefore must be written.
2059 		 */
2060 		ASSERT3U(rc->rc_size, !=, 0);
2061 
2062 		/* Verify physical to logical translation */
2063 		vdev_draid_io_verify(vd, rr, c);
2064 
2065 		zio_nowait(zio_vdev_child_io(zio, NULL,
2066 		    vd->vdev_child[rc->rc_devidx], rc->rc_offset,
2067 		    rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
2068 		    0, vdev_raidz_child_done, rc));
2069 	}
2070 }
2071 
2072 /*
2073  * For read operations:
2074  * 1. The vdev_draid_map_alloc() function will create a minimal raidz
2075  *    mapping for the read based on the zio->io_flags.  There are two
2076  *    possible mappings either 1) a normal read, or 2) a scrub/resilver.
2077  * 2. Create the zio read operations.  This will include all parity
2078  *    columns and skip sectors for a scrub/resilver.
2079  */
2080 static void
vdev_draid_io_start_read(zio_t * zio,raidz_row_t * rr)2081 vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
2082 {
2083 	vdev_t *vd = zio->io_vd;
2084 
2085 	/* Sequential rebuild must do IO at redundancy group boundary. */
2086 	IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0);
2087 
2088 	/*
2089 	 * Iterate over the columns in reverse order so that we hit the parity
2090 	 * last.  Any errors along the way will force us to read the parity.
2091 	 * For scrub/resilver IOs which verify skip sectors, a gang ABD will
2092 	 * have been allocated to store them and rc->rc_size is increased.
2093 	 */
2094 	for (int c = rr->rr_cols - 1; c >= 0; c--) {
2095 		raidz_col_t *rc = &rr->rr_col[c];
2096 		vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
2097 
2098 		if (!vdev_draid_readable(cvd, rc->rc_offset)) {
2099 			if (c >= rr->rr_firstdatacol)
2100 				rr->rr_missingdata++;
2101 			else
2102 				rr->rr_missingparity++;
2103 			rc->rc_error = SET_ERROR(ENXIO);
2104 			rc->rc_tried = 1;
2105 			rc->rc_skipped = 1;
2106 			continue;
2107 		}
2108 
2109 		if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) {
2110 			vdev_t *svd;
2111 
2112 			if (c >= rr->rr_firstdatacol)
2113 				rr->rr_missingdata++;
2114 			else
2115 				rr->rr_missingparity++;
2116 			rc->rc_error = SET_ERROR(ESTALE);
2117 			rc->rc_skipped = 1;
2118 
2119 			/*
2120 			 * If this child has draid spare attached, and that
2121 			 * spare by rc_offset maps to another spare, the repair
2122 			 * would go to that spare, and we want all mirrored
2123 			 * children on it to be updated with the repaired data,
2124 			 * even when we cannot vouch for it during rebuilds
2125 			 * (which don't have checksums). Otherwise, we will have
2126 			 * a lot of checksum errors on that spares during scrub.
2127 			 * The worst thing that can happen in this case is that
2128 			 * we will update the reserved spare column on some
2129 			 * device with unverified data, which is harmless.
2130 			 */
2131 			if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
2132 				svd = vdev_draid_spare_get_child(svd,
2133 				    rc->rc_offset);
2134 				if (svd && (svd->vdev_ops == &vdev_spare_ops ||
2135 				    svd->vdev_ops == &vdev_replacing_ops))
2136 					rc->rc_tgt_is_dspare = 1;
2137 			}
2138 			continue;
2139 		}
2140 
2141 		/*
2142 		 * Empty columns may be read during vdev_draid_io_done().
2143 		 * Only skip them after the readable and missing checks
2144 		 * verify they are available.
2145 		 */
2146 		if (rc->rc_size == 0) {
2147 			rc->rc_skipped = 1;
2148 			continue;
2149 		}
2150 
2151 		if (zio->io_flags & ZIO_FLAG_RESILVER) {
2152 			vdev_t *svd;
2153 
2154 			/*
2155 			 * Repairs need to be constrained to the devices being
2156 			 * rebuilt since without a checksum we cannot verify the
2157 			 * data is actually correct and performing an incorrect
2158 			 * repair could result in locking in the damage and
2159 			 * making the data unrecoverable.
2160 			 */
2161 			if (zio->io_priority == ZIO_PRIORITY_REBUILD &&
2162 			    !vdev_draid_rebuilding(cvd))
2163 				rc->rc_allow_repair = 0;
2164 
2165 			/*
2166 			 * If this child is a distributed spare then the
2167 			 * offset might reside on the vdev being replaced.
2168 			 * In which case this data must be written to the
2169 			 * new device.  Failure to do so would result in
2170 			 * checksum errors when the old device is detached
2171 			 * and the pool is scrubbed.
2172 			 */
2173 			if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
2174 				svd = vdev_draid_spare_get_child(svd,
2175 				    rc->rc_offset);
2176 				if (svd && (svd->vdev_ops == &vdev_spare_ops ||
2177 				    svd->vdev_ops == &vdev_replacing_ops)) {
2178 					rc->rc_force_repair = 1;
2179 
2180 					if (vdev_draid_rebuilding(svd))
2181 						rc->rc_allow_repair = 1;
2182 				}
2183 			}
2184 
2185 			/*
2186 			 * Always issue a repair IO to this child when its
2187 			 * a spare or replacing vdev with an active rebuild.
2188 			 */
2189 			if ((cvd->vdev_ops == &vdev_spare_ops ||
2190 			    cvd->vdev_ops == &vdev_replacing_ops) &&
2191 			    vdev_draid_rebuilding(cvd)) {
2192 				rc->rc_force_repair = 1;
2193 				rc->rc_allow_repair = 1;
2194 			}
2195 		}
2196 
2197 		if (vdev_sit_out_reads(cvd, zio->io_flags)) {
2198 			rr->rr_outlier_cnt++;
2199 			ASSERT0(rc->rc_latency_outlier);
2200 			rc->rc_latency_outlier = 1;
2201 		}
2202 	}
2203 
2204 	/*
2205 	 * When the row contains a latency outlier and sufficient parity
2206 	 * exists to reconstruct the column data, then skip reading the
2207 	 * known slow child vdev as a performance optimization.
2208 	 */
2209 	if (rr->rr_outlier_cnt > 0 &&
2210 	    (rr->rr_firstdatacol - rr->rr_missingparity) >=
2211 	    (rr->rr_missingdata + 1)) {
2212 
2213 		for (int c = rr->rr_cols - 1; c >= rr->rr_firstdatacol; c--) {
2214 			raidz_col_t *rc = &rr->rr_col[c];
2215 
2216 			if (rc->rc_error == 0 && rc->rc_latency_outlier) {
2217 				rr->rr_missingdata++;
2218 				rc->rc_error = SET_ERROR(EAGAIN);
2219 				rc->rc_skipped = 1;
2220 				break;
2221 			}
2222 		}
2223 	}
2224 
2225 	/*
2226 	 * Either a parity or data column is missing this means a repair
2227 	 * may be attempted by vdev_draid_io_done().  Expand the raid map
2228 	 * to read in empty columns which are needed along with the parity
2229 	 * during reconstruction.
2230 	 */
2231 	if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) &&
2232 	    rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) {
2233 		vdev_draid_map_alloc_empty(zio, rr);
2234 	}
2235 
2236 	for (int c = rr->rr_cols - 1; c >= 0; c--) {
2237 		raidz_col_t *rc = &rr->rr_col[c];
2238 		vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
2239 
2240 		if (rc->rc_error || rc->rc_size == 0)
2241 			continue;
2242 
2243 		if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
2244 		    (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
2245 			zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2246 			    rc->rc_offset, rc->rc_abd, rc->rc_size,
2247 			    zio->io_type, zio->io_priority, 0,
2248 			    vdev_raidz_child_done, rc));
2249 		}
2250 	}
2251 }
2252 
2253 /*
2254  * Start an IO operation to a dRAID vdev.
2255  */
2256 static void
vdev_draid_io_start(zio_t * zio)2257 vdev_draid_io_start(zio_t *zio)
2258 {
2259 	vdev_t *vd __maybe_unused = zio->io_vd;
2260 
2261 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2262 	ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset));
2263 
2264 	raidz_map_t *rm = vdev_draid_map_alloc(zio);
2265 	zio->io_vsd = rm;
2266 	zio->io_vsd_ops = &vdev_raidz_vsd_ops;
2267 
2268 	if (zio->io_type == ZIO_TYPE_WRITE) {
2269 		for (int i = 0; i < rm->rm_nrows; i++) {
2270 			vdev_draid_io_start_write(zio, rm->rm_row[i]);
2271 		}
2272 	} else {
2273 		ASSERT(zio->io_type == ZIO_TYPE_READ);
2274 
2275 		for (int i = 0; i < rm->rm_nrows; i++) {
2276 			vdev_draid_io_start_read(zio, rm->rm_row[i]);
2277 		}
2278 	}
2279 
2280 	zio_execute(zio);
2281 }
2282 
2283 /*
2284  * Complete an IO operation on a dRAID vdev.  The raidz logic can be applied
2285  * to dRAID since the layout is fully described by the raidz_map_t.
2286  */
2287 static void
vdev_draid_io_done(zio_t * zio)2288 vdev_draid_io_done(zio_t *zio)
2289 {
2290 	vdev_raidz_io_done(zio);
2291 }
2292 
2293 static void
vdev_draid_state_change(vdev_t * vd,int faulted,int degraded)2294 vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
2295 {
2296 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2297 	ASSERT(vd->vdev_ops == &vdev_draid_ops);
2298 
2299 	if (faulted > vdc->vdc_nparity * (vdc->vdc_width / vdc->vdc_children))
2300 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2301 		    VDEV_AUX_NO_REPLICAS);
2302 	else if (degraded + faulted != 0)
2303 		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
2304 	else
2305 		vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
2306 }
2307 
2308 static void
vdev_draid_xlate(vdev_t * cvd,const zfs_range_seg64_t * logical_rs,zfs_range_seg64_t * physical_rs,zfs_range_seg64_t * remain_rs)2309 vdev_draid_xlate(vdev_t *cvd, const zfs_range_seg64_t *logical_rs,
2310     zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
2311 {
2312 	vdev_t *raidvd = cvd->vdev_parent;
2313 	ASSERT(raidvd->vdev_ops == &vdev_draid_ops);
2314 
2315 	vdev_draid_config_t *vdc = raidvd->vdev_tsd;
2316 	uint64_t ashift = raidvd->vdev_top->vdev_ashift;
2317 
2318 	/* Make sure the offsets are block-aligned */
2319 	ASSERT0(logical_rs->rs_start % (1 << ashift));
2320 	ASSERT0(logical_rs->rs_end % (1 << ashift));
2321 
2322 	uint64_t logical_start = logical_rs->rs_start;
2323 	uint64_t logical_end = logical_rs->rs_end;
2324 
2325 	/*
2326 	 * Unaligned ranges must be skipped. All metaslabs are correctly
2327 	 * aligned so this should not happen, but this case is handled in
2328 	 * case it's needed by future callers.
2329 	 */
2330 	uint64_t astart = vdev_draid_get_astart(raidvd, logical_start);
2331 	if (astart != logical_start) {
2332 		physical_rs->rs_start = logical_start;
2333 		physical_rs->rs_end = logical_start;
2334 		remain_rs->rs_start = MIN(astart, logical_end);
2335 		remain_rs->rs_end = logical_end;
2336 		return;
2337 	}
2338 
2339 	/*
2340 	 * Unlike with mirrors and raidz a dRAID logical range can map
2341 	 * to multiple non-contiguous physical ranges. This is handled by
2342 	 * limiting the size of the logical range to a single group and
2343 	 * setting the remain argument such that it describes the remaining
2344 	 * unmapped logical range. This is stricter than absolutely
2345 	 * necessary but helps simplify the logic below.
2346 	 */
2347 	uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start);
2348 	uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1);
2349 	if (logical_end > nextstart)
2350 		logical_end = nextstart;
2351 
2352 	/* Find the starting offset for each vdev in the group */
2353 	uint64_t perm, groupstart, ndisks;
2354 	uint64_t start = vdev_draid_logical_to_physical(raidvd,
2355 	    logical_start, &perm, &groupstart, &ndisks);
2356 	uint64_t end = start;
2357 
2358 	uint8_t *base;
2359 	uint64_t iter, id;
2360 	vdev_draid_get_perm(vdc, perm, &base, &iter);
2361 
2362 	/*
2363 	 * Check if the passed child falls within the group.  If it does
2364 	 * update the start and end to reflect the physical range.
2365 	 * Otherwise, leave them unmodified which will result in an empty
2366 	 * (zero-length) physical range being returned.
2367 	 */
2368 	for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
2369 		uint64_t c = (groupstart + i) % ndisks;
2370 
2371 		if (c == 0 && i != 0) {
2372 			/* the group wrapped, increment the start */
2373 			start += VDEV_DRAID_ROWHEIGHT;
2374 			end = start;
2375 		}
2376 
2377 		id = vdev_draid_permute_id(vdc, base, iter, c);
2378 		if (id == cvd->vdev_id) {
2379 			uint64_t b_size = (logical_end >> ashift) -
2380 			    (logical_start >> ashift);
2381 			ASSERT3U(b_size, >, 0);
2382 			end = start + ((((b_size - 1) /
2383 			    vdc->vdc_groupwidth) + 1) << ashift);
2384 			break;
2385 		}
2386 	}
2387 	physical_rs->rs_start = start;
2388 	physical_rs->rs_end = end;
2389 
2390 	/*
2391 	 * Only top-level vdevs are allowed to set remain_rs because
2392 	 * when .vdev_op_xlate() is called for their children the full
2393 	 * logical range is not provided by vdev_xlate().
2394 	 */
2395 	remain_rs->rs_start = logical_end;
2396 	remain_rs->rs_end = logical_rs->rs_end;
2397 
2398 	ASSERT3U(physical_rs->rs_start, <=, logical_start);
2399 	ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
2400 	    logical_end - logical_start);
2401 }
2402 
2403 /*
2404  * Add dRAID specific fields to the config nvlist.
2405  */
2406 static void
vdev_draid_config_generate(vdev_t * vd,nvlist_t * nv)2407 vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv)
2408 {
2409 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2410 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2411 
2412 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity);
2413 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata);
2414 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares);
2415 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups);
2416 
2417 	if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_DRAID_FAIL_DOMAINS))
2418 		fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NCHILDREN,
2419 		    vdc->vdc_children);
2420 }
2421 
2422 /*
2423  * Initialize private dRAID specific fields from the nvlist.
2424  */
2425 static int
vdev_draid_init(spa_t * spa,nvlist_t * nv,void ** tsd)2426 vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
2427 {
2428 	(void) spa;
2429 	uint64_t ndata, nparity, nspares, ngroups;
2430 	int error;
2431 
2432 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata))
2433 		return (SET_ERROR(EINVAL));
2434 
2435 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) ||
2436 	    nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
2437 		return (SET_ERROR(EINVAL));
2438 	}
2439 
2440 	uint_t width;
2441 	uint64_t children;
2442 	nvlist_t **child;
2443 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2444 	    &child, &width) != 0 || width == 0) {
2445 		return (SET_ERROR(EINVAL));
2446 	}
2447 
2448 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NCHILDREN, &children)) {
2449 		children = width;
2450 		if (children > VDEV_DRAID_MAX_CHILDREN)
2451 			return (SET_ERROR(EINVAL));
2452 	}
2453 
2454 	if (children == 0 || width % children != 0)
2455 		return (SET_ERROR(EINVAL));
2456 
2457 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) ||
2458 	    nspares > 100) {
2459 		return (SET_ERROR(EINVAL));
2460 	}
2461 
2462 	int fgrps = width / children;
2463 	int nspare = nspares / fgrps;
2464 	if (nspares % fgrps)
2465 		nspare++;
2466 
2467 	/*
2468 	 * Validate the minimum number of children exist per group for the
2469 	 * specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4).
2470 	 */
2471 	if (children < (ndata + nparity + nspare))
2472 		return (SET_ERROR(EINVAL));
2473 
2474 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) ||
2475 	    ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) {
2476 		return (SET_ERROR(EINVAL));
2477 	}
2478 
2479 	/*
2480 	 * Create the dRAID configuration using the pool nvlist configuration
2481 	 * and the fixed mapping for the correct number of children.
2482 	 */
2483 	vdev_draid_config_t *vdc;
2484 	const draid_map_t *map;
2485 
2486 	error = vdev_draid_lookup_map(children, &map);
2487 	if (error)
2488 		return (SET_ERROR(EINVAL));
2489 
2490 	vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP);
2491 	vdc->vdc_ndata = ndata;
2492 	vdc->vdc_nparity = nparity;
2493 	vdc->vdc_nspares = nspares;
2494 	vdc->vdc_children = children;
2495 	vdc->vdc_ngroups = ngroups;
2496 	vdc->vdc_width = width;
2497 	vdc->vdc_nperms = map->dm_nperms;
2498 
2499 	error = vdev_draid_generate_perms(map, &vdc->vdc_perms);
2500 	if (error) {
2501 		kmem_free(vdc, sizeof (*vdc));
2502 		return (SET_ERROR(EINVAL));
2503 	}
2504 
2505 	if (width > children)
2506 		vdev_draid_shuffle_perms(map, vdc->vdc_perms, width);
2507 
2508 	/*
2509 	 * Derived constants.
2510 	 */
2511 	vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity;
2512 	vdc->vdc_ndisks = vdc->vdc_width - vdc->vdc_nspares;
2513 	vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT;
2514 	vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) /
2515 	    vdc->vdc_ndisks;
2516 
2517 	ASSERT3U(vdc->vdc_groupwidth, >=, 2);
2518 	ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
2519 	ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
2520 	ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
2521 	ASSERT0(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT);
2522 	ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
2523 	    vdc->vdc_ndisks, ==, 0);
2524 
2525 	*tsd = vdc;
2526 
2527 	return (0);
2528 }
2529 
2530 static void
vdev_draid_fini(vdev_t * vd)2531 vdev_draid_fini(vdev_t *vd)
2532 {
2533 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2534 
2535 	vmem_free(vdc->vdc_perms, sizeof (uint8_t) *
2536 	    vdc->vdc_children * vdc->vdc_nperms);
2537 	kmem_free(vdc, sizeof (*vdc));
2538 }
2539 
2540 static uint64_t
vdev_draid_nparity(vdev_t * vd)2541 vdev_draid_nparity(vdev_t *vd)
2542 {
2543 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2544 
2545 	return (vdc->vdc_nparity * (vdc->vdc_width / vdc->vdc_children));
2546 }
2547 
2548 static uint64_t
vdev_draid_ndisks(vdev_t * vd)2549 vdev_draid_ndisks(vdev_t *vd)
2550 {
2551 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2552 
2553 	return (vdc->vdc_ndisks);
2554 }
2555 
2556 vdev_ops_t vdev_draid_ops = {
2557 	.vdev_op_init = vdev_draid_init,
2558 	.vdev_op_fini = vdev_draid_fini,
2559 	.vdev_op_open = vdev_draid_open,
2560 	.vdev_op_close = vdev_draid_close,
2561 	.vdev_op_psize_to_asize = vdev_draid_psize_to_asize,
2562 	.vdev_op_asize_to_psize = vdev_draid_asize_to_psize,
2563 	.vdev_op_min_asize = vdev_draid_min_asize,
2564 	.vdev_op_min_alloc = vdev_draid_min_alloc,
2565 	.vdev_op_io_start = vdev_draid_io_start,
2566 	.vdev_op_io_done = vdev_draid_io_done,
2567 	.vdev_op_state_change = vdev_draid_state_change,
2568 	.vdev_op_need_resilver = vdev_draid_need_resilver,
2569 	.vdev_op_hold = NULL,
2570 	.vdev_op_rele = NULL,
2571 	.vdev_op_remap = NULL,
2572 	.vdev_op_xlate = vdev_draid_xlate,
2573 	.vdev_op_rebuild_asize = vdev_draid_rebuild_asize,
2574 	.vdev_op_metaslab_init = vdev_draid_metaslab_init,
2575 	.vdev_op_config_generate = vdev_draid_config_generate,
2576 	.vdev_op_nparity = vdev_draid_nparity,
2577 	.vdev_op_ndisks = vdev_draid_ndisks,
2578 	.vdev_op_type = VDEV_TYPE_DRAID,
2579 	.vdev_op_leaf = B_FALSE,
2580 };
2581 
2582 
2583 /*
2584  * A dRAID distributed spare is a virtual leaf vdev which is included in the
2585  * parent dRAID configuration.  The last N columns of the dRAID permutation
2586  * table are used to determine on which dRAID children a specific offset
2587  * should be written.  These spare leaf vdevs can only be used to replace
2588  * faulted children in the same dRAID configuration.
2589  */
2590 
2591 /*
2592  * Distributed spare state.  All fields are set when the distributed spare is
2593  * first opened and are immutable.
2594  */
2595 typedef struct {
2596 	vdev_t *vds_draid_vdev;		/* top-level parent dRAID vdev */
2597 	uint64_t vds_top_guid;		/* top-level parent dRAID guid */
2598 	uint64_t vds_spare_id;		/* spare id (0 - vdc->vdc_nspares-1) */
2599 } vdev_draid_spare_t;
2600 
2601 /*
2602  * Returns the parent dRAID vdev to which the distributed spare belongs.
2603  * This may be safely called even when the vdev is not open.
2604  */
2605 vdev_t *
vdev_draid_spare_get_parent(vdev_t * vd)2606 vdev_draid_spare_get_parent(vdev_t *vd)
2607 {
2608 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2609 
2610 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2611 
2612 	if (vds->vds_draid_vdev != NULL)
2613 		return (vds->vds_draid_vdev);
2614 
2615 	return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev,
2616 	    vds->vds_top_guid));
2617 }
2618 
2619 /*
2620  * A dRAID space is active when it's the child of a vdev using the
2621  * vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops.
2622  */
2623 static boolean_t
vdev_draid_spare_is_active(vdev_t * vd)2624 vdev_draid_spare_is_active(vdev_t *vd)
2625 {
2626 	vdev_t *pvd = vd->vdev_parent;
2627 
2628 	if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops ||
2629 	    pvd->vdev_ops == &vdev_replacing_ops ||
2630 	    pvd->vdev_ops == &vdev_draid_ops)) {
2631 		return (B_TRUE);
2632 	} else {
2633 		return (B_FALSE);
2634 	}
2635 }
2636 
2637 /*
2638  * Given a dRAID distribute spare vdev, returns the physical child vdev
2639  * on which the provided offset resides.  This may involve recursing through
2640  * multiple layers of distributed spares.  Note that offset is relative to
2641  * this vdev.
2642  */
2643 vdev_t *
vdev_draid_spare_get_child(vdev_t * vd,uint64_t physical_offset)2644 vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset)
2645 {
2646 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2647 
2648 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2649 
2650 	/* The vdev is closed */
2651 	if (vds->vds_draid_vdev == NULL)
2652 		return (NULL);
2653 
2654 	vdev_t *tvd = vds->vds_draid_vdev;
2655 	vdev_draid_config_t *vdc = tvd->vdev_tsd;
2656 
2657 	uint64_t fgrps = vdc->vdc_width / vdc->vdc_children;
2658 
2659 	ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops);
2660 	ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares);
2661 
2662 	uint8_t *base;
2663 	uint64_t iter;
2664 	uint64_t perm = (physical_offset / vdc->vdc_devslicesz) * fgrps;
2665 
2666 	/*
2667 	 * Adjust permutation so that it points to the correct slice in the
2668 	 * big width row.
2669 	 */
2670 	perm += vds->vds_spare_id % fgrps;
2671 
2672 	vdev_draid_get_perm(vdc, perm, &base, &iter);
2673 
2674 	uint64_t cid = vdev_draid_permute_id(vdc, base, iter,
2675 	    (vdc->vdc_children - 1) - (vds->vds_spare_id / fgrps));
2676 	vdev_t *cvd = tvd->vdev_child[cid];
2677 
2678 	if (cvd->vdev_ops == &vdev_draid_spare_ops)
2679 		return (vdev_draid_spare_get_child(cvd, physical_offset));
2680 
2681 	return (cvd);
2682 }
2683 
2684 /*
2685  * Returns true if no failure group reached failures threshold so that
2686  * enclosure failure cannot be tolerated anymore. Used spares are counted
2687  * as failures because in case of enclosure failure their blocks can belong
2688  * to the disks from that enclosure and can be lost.
2689  */
2690 boolean_t
vdev_draid_fail_domain_allowed(vdev_t * vd)2691 vdev_draid_fail_domain_allowed(vdev_t *vd)
2692 {
2693 	vdev_draid_config_t *vdc = vd->vdev_tsd;
2694 
2695 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2696 	ASSERT3P(vdc->vdc_width, >, vdc->vdc_children);
2697 
2698 	int counter = 0;
2699 
2700 	for (int c = 0; c < vdc->vdc_width; c++) {
2701 		vdev_t *cvd = vd->vdev_child[c];
2702 
2703 		if ((c % vdc->vdc_children) == 0)
2704 			counter = 0;
2705 
2706 		if (cvd->vdev_ops == &vdev_spare_ops ||
2707 		    cvd->vdev_ops == &vdev_draid_spare_ops ||
2708 		    !vdev_readable(cvd))
2709 			counter++;
2710 
2711 		if (counter > vdc->vdc_nparity)
2712 			return (B_FALSE);
2713 	}
2714 
2715 	return (B_TRUE);
2716 }
2717 
2718 static void
vdev_draid_spare_close(vdev_t * vd)2719 vdev_draid_spare_close(vdev_t *vd)
2720 {
2721 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2722 	vds->vds_draid_vdev = NULL;
2723 }
2724 
2725 /*
2726  * Opening a dRAID spare device is done by looking up the associated dRAID
2727  * top-level vdev guid from the spare configuration.
2728  */
2729 static int
vdev_draid_spare_open(vdev_t * vd,uint64_t * psize,uint64_t * max_psize,uint64_t * logical_ashift,uint64_t * physical_ashift)2730 vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
2731     uint64_t *logical_ashift, uint64_t *physical_ashift)
2732 {
2733 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2734 	vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2735 	uint64_t asize, max_asize;
2736 
2737 	vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid);
2738 	if (tvd == NULL) {
2739 		/*
2740 		 * When spa_vdev_add() is labeling new spares the
2741 		 * associated dRAID is not attached to the root vdev
2742 		 * nor does this spare have a parent.  Simulate a valid
2743 		 * device in order to allow the label to be initialized
2744 		 * and the distributed spare added to the configuration.
2745 		 */
2746 		if (vd->vdev_parent == NULL) {
2747 			*psize = *max_psize = SPA_MINDEVSIZE;
2748 			*logical_ashift = *physical_ashift = ASHIFT_MIN;
2749 			return (0);
2750 		}
2751 
2752 		return (SET_ERROR(EINVAL));
2753 	}
2754 
2755 	vdev_draid_config_t *vdc = tvd->vdev_tsd;
2756 	if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL)
2757 		return (SET_ERROR(EINVAL));
2758 
2759 	if (vds->vds_spare_id >= vdc->vdc_nspares)
2760 		return (SET_ERROR(EINVAL));
2761 
2762 	/*
2763 	 * Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here
2764 	 * because the caller may be vdev_draid_open() in which case the
2765 	 * values are stale as they haven't yet been updated by vdev_open().
2766 	 * To avoid this always recalculate the dRAID asize and max_asize.
2767 	 */
2768 	vdev_draid_calculate_asize(tvd, &asize, &max_asize,
2769 	    logical_ashift, physical_ashift);
2770 
2771 	*psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2772 	*max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2773 
2774 	vds->vds_draid_vdev = tvd;
2775 	vd->vdev_nonrot = tvd->vdev_nonrot;
2776 
2777 	return (0);
2778 }
2779 
2780 /*
2781  * Completed distributed spare IO.  Store the result in the parent zio
2782  * as if it had performed the operation itself.  Only the first error is
2783  * preserved if there are multiple errors.
2784  */
2785 static void
vdev_draid_spare_child_done(zio_t * zio)2786 vdev_draid_spare_child_done(zio_t *zio)
2787 {
2788 	zio_t *pio = zio->io_private;
2789 
2790 	/*
2791 	 * IOs are issued to non-writable vdevs in order to keep their
2792 	 * DTLs accurate.  However, we don't want to propagate the
2793 	 * error in to the distributed spare's DTL.  When resilvering
2794 	 * vdev_draid_need_resilver() will consult the relevant DTL
2795 	 * to determine if the data is missing and must be repaired.
2796 	 */
2797 	if (!vdev_writeable(zio->io_vd))
2798 		return;
2799 
2800 	if (pio->io_error == 0)
2801 		pio->io_error = zio->io_error;
2802 }
2803 
2804 /*
2805  * Returns a valid label nvlist for the distributed spare vdev.  This is
2806  * used to bypass the IO pipeline to avoid the complexity of constructing
2807  * a complete label with valid checksum to return when read.
2808  */
2809 nvlist_t *
vdev_draid_read_config_spare(vdev_t * vd)2810 vdev_draid_read_config_spare(vdev_t *vd)
2811 {
2812 	spa_t *spa = vd->vdev_spa;
2813 	spa_aux_vdev_t *sav = &spa->spa_spares;
2814 	uint64_t guid = vd->vdev_guid;
2815 
2816 	nvlist_t *nv = fnvlist_alloc();
2817 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
2818 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
2819 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa));
2820 	fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa));
2821 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
2822 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
2823 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid);
2824 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE,
2825 	    vdev_draid_spare_is_active(vd) ?
2826 	    POOL_STATE_ACTIVE : POOL_STATE_SPARE);
2827 
2828 	/* Set the vdev guid based on the vdev list in sav_count. */
2829 	for (int i = 0; i < sav->sav_count; i++) {
2830 		if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops &&
2831 		    strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) {
2832 			guid = sav->sav_vdevs[i]->vdev_guid;
2833 			break;
2834 		}
2835 	}
2836 
2837 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid);
2838 
2839 	return (nv);
2840 }
2841 
2842 /*
2843  * Handle any flush requested of the distributed spare. All children must be
2844  * flushed.
2845  */
2846 static int
vdev_draid_spare_flush(zio_t * zio)2847 vdev_draid_spare_flush(zio_t *zio)
2848 {
2849 	vdev_t *vd = zio->io_vd;
2850 	int error = 0;
2851 
2852 	for (int c = 0; c < vd->vdev_children; c++) {
2853 		zio_nowait(zio_vdev_child_io(zio, NULL,
2854 		    vd->vdev_child[c], zio->io_offset, zio->io_abd,
2855 		    zio->io_size, zio->io_type, zio->io_priority, 0,
2856 		    vdev_draid_spare_child_done, zio));
2857 	}
2858 
2859 	return (error);
2860 }
2861 
2862 /*
2863  * Initiate an IO to the distributed spare.  For normal IOs this entails using
2864  * the zio->io_offset and permutation table to calculate which child dRAID vdev
2865  * is responsible for the data.  Then passing along the zio to that child to
2866  * perform the actual IO.  The label ranges are not stored on disk and require
2867  * some special handling which is described below.
2868  */
2869 static void
vdev_draid_spare_io_start(zio_t * zio)2870 vdev_draid_spare_io_start(zio_t *zio)
2871 {
2872 	vdev_t *cvd = NULL, *vd = zio->io_vd;
2873 	vdev_draid_spare_t *vds = vd->vdev_tsd;
2874 	uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE;
2875 
2876 	/*
2877 	 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
2878 	 * Nothing to be done here but return failure.
2879 	 */
2880 	if (vds == NULL) {
2881 		zio->io_error = ENXIO;
2882 		zio_interrupt(zio);
2883 		return;
2884 	}
2885 
2886 	switch (zio->io_type) {
2887 	case ZIO_TYPE_FLUSH:
2888 		zio->io_error = vdev_draid_spare_flush(zio);
2889 		break;
2890 
2891 	case ZIO_TYPE_WRITE:
2892 		if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2893 			/*
2894 			 * Accept probe IOs and config writers to simulate the
2895 			 * existence of an on disk label.  vdev_label_sync(),
2896 			 * vdev_uberblock_sync() and vdev_copy_uberblocks()
2897 			 * skip the distributed spares.  This only leaves
2898 			 * vdev_label_init() which is allowed to succeed to
2899 			 * avoid adding special cases the function.
2900 			 */
2901 			if (zio->io_flags & ZIO_FLAG_PROBE ||
2902 			    zio->io_flags & ZIO_FLAG_CONFIG_WRITER) {
2903 				zio->io_error = 0;
2904 			} else {
2905 				zio->io_error = SET_ERROR(EIO);
2906 			}
2907 		} else {
2908 			cvd = vdev_draid_spare_get_child(vd, offset);
2909 
2910 			if (cvd == NULL) {
2911 				zio->io_error = SET_ERROR(ENXIO);
2912 			} else {
2913 				zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2914 				    offset, zio->io_abd, zio->io_size,
2915 				    zio->io_type, zio->io_priority, 0,
2916 				    vdev_draid_spare_child_done, zio));
2917 			}
2918 		}
2919 		break;
2920 
2921 	case ZIO_TYPE_READ:
2922 		if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2923 			/*
2924 			 * Accept probe IOs to simulate the existence of a
2925 			 * label.  vdev_label_read_config() bypasses the
2926 			 * pipeline to read the label configuration and
2927 			 * vdev_uberblock_load() skips distributed spares
2928 			 * when attempting to locate the best uberblock.
2929 			 */
2930 			if (zio->io_flags & ZIO_FLAG_PROBE) {
2931 				zio->io_error = 0;
2932 			} else {
2933 				zio->io_error = SET_ERROR(EIO);
2934 			}
2935 		} else {
2936 			cvd = vdev_draid_spare_get_child(vd, offset);
2937 
2938 			if (cvd == NULL || !vdev_readable(cvd)) {
2939 				zio->io_error = SET_ERROR(ENXIO);
2940 			} else {
2941 				zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2942 				    offset, zio->io_abd, zio->io_size,
2943 				    zio->io_type, zio->io_priority, 0,
2944 				    vdev_draid_spare_child_done, zio));
2945 			}
2946 		}
2947 		break;
2948 
2949 	case ZIO_TYPE_TRIM:
2950 		/* The vdev label ranges are never trimmed */
2951 		ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset));
2952 
2953 		cvd = vdev_draid_spare_get_child(vd, offset);
2954 
2955 		if (cvd == NULL || !cvd->vdev_has_trim) {
2956 			zio->io_error = SET_ERROR(ENXIO);
2957 		} else {
2958 			zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2959 			    offset, zio->io_abd, zio->io_size,
2960 			    zio->io_type, zio->io_priority, 0,
2961 			    vdev_draid_spare_child_done, zio));
2962 		}
2963 		break;
2964 
2965 	default:
2966 		zio->io_error = SET_ERROR(ENOTSUP);
2967 		break;
2968 	}
2969 
2970 	zio_execute(zio);
2971 }
2972 
2973 static void
vdev_draid_spare_io_done(zio_t * zio)2974 vdev_draid_spare_io_done(zio_t *zio)
2975 {
2976 	(void) zio;
2977 }
2978 
2979 /*
2980  * Lookup the full spare config in spa->spa_spares.sav_config and
2981  * return the top_guid and spare_id for the named spare.
2982  */
2983 static int
vdev_draid_spare_lookup(spa_t * spa,nvlist_t * nv,uint64_t * top_guidp,uint64_t * spare_idp)2984 vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp,
2985     uint64_t *spare_idp)
2986 {
2987 	nvlist_t **spares;
2988 	uint_t nspares;
2989 	int error;
2990 
2991 	if ((spa->spa_spares.sav_config == NULL) ||
2992 	    (nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2993 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) {
2994 		return (SET_ERROR(ENOENT));
2995 	}
2996 
2997 	const char *spare_name;
2998 	error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name);
2999 	if (error != 0)
3000 		return (SET_ERROR(EINVAL));
3001 
3002 	for (int i = 0; i < nspares; i++) {
3003 		nvlist_t *spare = spares[i];
3004 		uint64_t top_guid, spare_id;
3005 		const char *type, *path;
3006 
3007 		/* Skip non-distributed spares */
3008 		error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type);
3009 		if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0)
3010 			continue;
3011 
3012 		/* Skip spares with the wrong name */
3013 		error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path);
3014 		if (error != 0 || strcmp(path, spare_name) != 0)
3015 			continue;
3016 
3017 		/* Found the matching spare */
3018 		error = nvlist_lookup_uint64(spare,
3019 		    ZPOOL_CONFIG_TOP_GUID, &top_guid);
3020 		if (error == 0) {
3021 			error = nvlist_lookup_uint64(spare,
3022 			    ZPOOL_CONFIG_SPARE_ID, &spare_id);
3023 		}
3024 
3025 		if (error != 0) {
3026 			return (SET_ERROR(EINVAL));
3027 		} else {
3028 			*top_guidp = top_guid;
3029 			*spare_idp = spare_id;
3030 			return (0);
3031 		}
3032 	}
3033 
3034 	return (SET_ERROR(ENOENT));
3035 }
3036 
3037 /*
3038  * Initialize private dRAID spare specific fields from the nvlist.
3039  */
3040 static int
vdev_draid_spare_init(spa_t * spa,nvlist_t * nv,void ** tsd)3041 vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd)
3042 {
3043 	vdev_draid_spare_t *vds;
3044 	uint64_t top_guid = 0;
3045 	uint64_t spare_id;
3046 
3047 	/*
3048 	 * In the normal case check the list of spares stored in the spa
3049 	 * to lookup the top_guid and spare_id for provided spare config.
3050 	 * When creating a new pool or adding vdevs the spare list is not
3051 	 * yet populated and the values are provided in the passed config.
3052 	 */
3053 	if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) {
3054 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID,
3055 		    &top_guid) != 0)
3056 			return (SET_ERROR(EINVAL));
3057 
3058 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID,
3059 		    &spare_id) != 0)
3060 			return (SET_ERROR(EINVAL));
3061 	}
3062 
3063 	vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP);
3064 	vds->vds_draid_vdev = NULL;
3065 	vds->vds_top_guid = top_guid;
3066 	vds->vds_spare_id = spare_id;
3067 
3068 	*tsd = vds;
3069 
3070 	return (0);
3071 }
3072 
3073 static void
vdev_draid_spare_fini(vdev_t * vd)3074 vdev_draid_spare_fini(vdev_t *vd)
3075 {
3076 	kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t));
3077 }
3078 
3079 static void
vdev_draid_spare_config_generate(vdev_t * vd,nvlist_t * nv)3080 vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv)
3081 {
3082 	vdev_draid_spare_t *vds = vd->vdev_tsd;
3083 
3084 	ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
3085 
3086 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid);
3087 	fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id);
3088 }
3089 
3090 vdev_ops_t vdev_draid_spare_ops = {
3091 	.vdev_op_init = vdev_draid_spare_init,
3092 	.vdev_op_fini = vdev_draid_spare_fini,
3093 	.vdev_op_open = vdev_draid_spare_open,
3094 	.vdev_op_close = vdev_draid_spare_close,
3095 	.vdev_op_psize_to_asize = vdev_default_asize,
3096 	.vdev_op_asize_to_psize = vdev_default_psize,
3097 	.vdev_op_min_asize = vdev_default_min_asize,
3098 	.vdev_op_min_alloc = NULL,
3099 	.vdev_op_io_start = vdev_draid_spare_io_start,
3100 	.vdev_op_io_done = vdev_draid_spare_io_done,
3101 	.vdev_op_state_change = NULL,
3102 	.vdev_op_need_resilver = NULL,
3103 	.vdev_op_hold = NULL,
3104 	.vdev_op_rele = NULL,
3105 	.vdev_op_remap = NULL,
3106 	.vdev_op_xlate = vdev_default_xlate,
3107 	.vdev_op_rebuild_asize = NULL,
3108 	.vdev_op_metaslab_init = NULL,
3109 	.vdev_op_config_generate = vdev_draid_spare_config_generate,
3110 	.vdev_op_nparity = NULL,
3111 	.vdev_op_ndisks = NULL,
3112 	.vdev_op_type = VDEV_TYPE_DRAID_SPARE,
3113 	.vdev_op_leaf = B_TRUE,
3114 };
3115