1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2018 Intel Corporation.
24 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/spa.h>
29 #include <sys/spa_impl.h>
30 #include <sys/vdev_impl.h>
31 #include <sys/vdev_draid.h>
32 #include <sys/vdev_raidz.h>
33 #include <sys/vdev_rebuild.h>
34 #include <sys/abd.h>
35 #include <sys/zio.h>
36 #include <sys/nvpair.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/fm/fs/zfs.h>
40 #include <zfs_fletcher.h>
41
42 #ifdef ZFS_DEBUG
43 #include <sys/vdev.h> /* For vdev_xlate() in vdev_draid_io_verify() */
44 #endif
45
46 /*
47 * dRAID is a distributed spare implementation for ZFS. A dRAID vdev is
48 * comprised of multiple raidz redundancy groups which are spread over the
49 * dRAID children. To ensure an even distribution, and avoid hot spots, a
50 * permutation mapping is applied to the order of the dRAID children.
51 * This mixing effectively distributes the parity columns evenly over all
52 * of the disks in the dRAID.
53 *
54 * This is beneficial because it means when resilvering all of the disks
55 * can participate thereby increasing the available IOPs and bandwidth.
56 * Furthermore, by reserving a small fraction of each child's total capacity
57 * virtual distributed spare disks can be created. These spares similarly
58 * benefit from the performance gains of spanning all of the children. The
59 * consequence of which is that resilvering to a distributed spare can
60 * substantially reduce the time required to restore full parity to pool
61 * with a failed disks.
62 *
63 * === dRAID group layout ===
64 *
65 * First, let's define a "row" in the configuration to be a 16M chunk from
66 * each physical drive at the same offset. This is the minimum allowable
67 * size since it must be possible to store a full 16M block when there is
68 * only a single data column. Next, we define a "group" to be a set of
69 * sequential disks containing both the parity and data columns. We allow
70 * groups to span multiple rows in order to align any group size to any
71 * number of physical drives. Finally, a "slice" is comprised of the rows
72 * which contain the target number of groups. The permutation mappings
73 * are applied in a round robin fashion to each slice.
74 *
75 * Given D+P drives in a group (including parity drives) and C-S physical
76 * drives (not including the spare drives), we can distribute the groups
77 * across R rows without remainder by selecting the least common multiple
78 * of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S).
79 *
80 * In the example below, there are C=14 physical drives in the configuration
81 * with S=2 drives worth of spare capacity. Each group has a width of 9
82 * which includes D=8 data and P=1 parity drive. There are 4 groups and
83 * 3 rows per slice. Each group has a size of 144M (16M * 9) and a slice
84 * size is 576M (144M * 4). When allocating from a dRAID each group is
85 * filled before moving on to the next as show in slice0 below.
86 *
87 * data disks (8 data + 1 parity) spares (2)
88 * +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
89 * ^ | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0
90 * | +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
91 * | | group 0 | group 1..| |
92 * | +-----------------------------------+-----------+-------|
93 * | | 0 1 2 3 4 5 6 7 8 | 36 37 38| | r
94 * | | 9 10 11 12 13 14 15 16 17| 45 46 47| | o
95 * | | 18 19 20 21 22 23 24 25 26| 54 55 56| | w
96 * | 27 28 29 30 31 32 33 34 35| 63 64 65| | 0
97 * s +-----------------------+-----------------------+-------+
98 * l | ..group 1 | group 2.. | |
99 * i +-----------------------+-----------------------+-------+
100 * c | 39 40 41 42 43 44| 72 73 74 75 76 77| | r
101 * e | 48 49 50 51 52 53| 81 82 83 84 85 86| | o
102 * 0 | 57 58 59 60 61 62| 90 91 92 93 94 95| | w
103 * | 66 67 68 69 70 71| 99 100 101 102 103 104| | 1
104 * | +-----------+-----------+-----------------------+-------+
105 * | |..group 2 | group 3 | |
106 * | +-----------+-----------+-----------------------+-------+
107 * | | 78 79 80|108 109 110 111 112 113 114 115 116| | r
108 * | | 87 88 89|117 118 119 120 121 122 123 124 125| | o
109 * | | 96 97 98|126 127 128 129 130 131 132 133 134| | w
110 * v |105 106 107|135 136 137 138 139 140 141 142 143| | 2
111 * +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
112 * | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1
113 * s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
114 * l | group 4 | group 5..| | row 3
115 * i +-----------------------+-----------+-----------+-------|
116 * c | ..group 5 | group 6.. | | row 4
117 * e +-----------+-----------+-----------------------+-------+
118 * 1 |..group 6 | group 7 | | row 5
119 * +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
120 * | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2
121 * s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
122 * l | group 8 | group 9..| | row 6
123 * i +-----------------------------------------------+-------|
124 * c | ..group 9 | group 10.. | | row 7
125 * e +-----------------------+-----------------------+-------+
126 * 2 |..group 10 | group 11 | | row 8
127 * +-----------+-----------------------------------+-------+
128 *
129 * This layout has several advantages over requiring that each row contain
130 * a whole number of groups.
131 *
132 * 1. The group count is not a relevant parameter when defining a dRAID
133 * layout. Only the group width is needed, and *all* groups will have
134 * the desired size.
135 *
136 * 2. All possible group widths (<= physical disk count) can be supported.
137 *
138 * 3. The logic within vdev_draid.c is simplified when the group width is
139 * the same for all groups (although some of the logic around computing
140 * permutation numbers and drive offsets is more complicated).
141 *
142 * N.B. The following array describes all valid dRAID permutation maps.
143 * Each row is used to generate a permutation map for a different number
144 * of children from a unique seed. The seeds were generated and carefully
145 * evaluated by the 'draid' utility in order to provide balanced mappings.
146 * In addition to the seed a checksum of the in-memory mapping is stored
147 * for verification.
148 *
149 * The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed,
150 * with a given permutation map) is the ratio of the amounts of I/O that will
151 * be sent to the least and most busy disks when resilvering. The average
152 * imbalance ratio (of a given number of disks and permutation map) is the
153 * average of the ratios of all possible single and double disk failures.
154 *
155 * In order to achieve a low imbalance ratio the number of permutations in
156 * the mapping must be significantly larger than the number of children.
157 * For dRAID the number of permutations has been limited to 512 to minimize
158 * the map size. This does result in a gradually increasing imbalance ratio
159 * as seen in the table below. Increasing the number of permutations for
160 * larger child counts would reduce the imbalance ratio. However, in practice
161 * when there are a large number of children each child is responsible for
162 * fewer total IOs so it's less of a concern.
163 *
164 * Note these values are hard coded and must never be changed. Existing
165 * pools depend on the same mapping always being generated in order to
166 * read and write from the correct locations. Any change would make
167 * existing pools completely inaccessible.
168 */
169 static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = {
170 { 2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d }, /* 1.000 */
171 { 3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 }, /* 1.000 */
172 { 4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 }, /* 1.000 */
173 { 5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 }, /* 1.010 */
174 { 6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 }, /* 1.031 */
175 { 7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee }, /* 1.043 */
176 { 8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 }, /* 1.059 */
177 { 9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 }, /* 1.056 */
178 { 10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 }, /* 1.072 */
179 { 11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c }, /* 1.083 */
180 { 12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e }, /* 1.097 */
181 { 13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 }, /* 1.100 */
182 { 14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 }, /* 1.121 */
183 { 15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 }, /* 1.103 */
184 { 16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 }, /* 1.111 */
185 { 17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe }, /* 1.133 */
186 { 18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 }, /* 1.131 */
187 { 19, 256, 0x892e343f2f31d690, 0x00000029eb392835 }, /* 1.130 */
188 { 20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c }, /* 1.141 */
189 { 21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 }, /* 1.139 */
190 { 22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 }, /* 1.150 */
191 { 23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f }, /* 1.174 */
192 { 24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 }, /* 1.168 */
193 { 25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 }, /* 1.180 */
194 { 26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba }, /* 1.226 */
195 { 27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 }, /* 1.228 */
196 { 28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c }, /* 1.217 */
197 { 29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c }, /* 1.239 */
198 { 30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 }, /* 1.238 */
199 { 31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f }, /* 1.273 */
200 { 32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 }, /* 1.191 */
201 { 33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 }, /* 1.199 */
202 { 34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 }, /* 1.195 */
203 { 35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 }, /* 1.201 */
204 { 36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef }, /* 1.194 */
205 { 37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 }, /* 1.237 */
206 { 38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 }, /* 1.242 */
207 { 39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd }, /* 1.231 */
208 { 40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 }, /* 1.233 */
209 { 41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 }, /* 1.271 */
210 { 42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 }, /* 1.263 */
211 { 43, 512, 0xbaa5125faa781854, 0x000001c76789e278 }, /* 1.270 */
212 { 44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb }, /* 1.281 */
213 { 45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 }, /* 1.282 */
214 { 46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b }, /* 1.286 */
215 { 47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 }, /* 1.329 */
216 { 48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b }, /* 1.286 */
217 { 49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 }, /* 1.322 */
218 { 50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 }, /* 1.335 */
219 { 51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 }, /* 1.305 */
220 { 52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf }, /* 1.330 */
221 { 53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 }, /* 1.365 */
222 { 54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 }, /* 1.334 */
223 { 55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 }, /* 1.364 */
224 { 56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e }, /* 1.374 */
225 { 57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 }, /* 1.363 */
226 { 58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 }, /* 1.401 */
227 { 59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c }, /* 1.392 */
228 { 60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 }, /* 1.360 */
229 { 61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd }, /* 1.396 */
230 { 62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c }, /* 1.453 */
231 { 63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 }, /* 1.437 */
232 { 64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 }, /* 1.402 */
233 { 65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 }, /* 1.459 */
234 { 66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 }, /* 1.423 */
235 { 67, 512, 0x910b9714f698a877, 0x00000451ea65d5db }, /* 1.447 */
236 { 68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 }, /* 1.450 */
237 { 69, 512, 0x836d4968fbaa3706, 0x000004954068a380 }, /* 1.455 */
238 { 70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d }, /* 1.463 */
239 { 71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 }, /* 1.463 */
240 { 72, 512, 0x42763a680d5bed8e, 0x000005084275c680 }, /* 1.452 */
241 { 73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab }, /* 1.498 */
242 { 74, 512, 0x9fa08548b1621a44, 0x0000054708019247 }, /* 1.526 */
243 { 75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 }, /* 1.491 */
244 { 76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 }, /* 1.470 */
245 { 77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 }, /* 1.527 */
246 { 78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 }, /* 1.509 */
247 { 79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e }, /* 1.569 */
248 { 80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c }, /* 1.555 */
249 { 81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 }, /* 1.509 */
250 { 82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 }, /* 1.596 */
251 { 83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e }, /* 1.568 */
252 { 84, 512, 0xba02545069ddc6dc, 0x000006d19861364f }, /* 1.541 */
253 { 85, 512, 0x447c73192c35073e, 0x000006fce315ce35 }, /* 1.623 */
254 { 86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b }, /* 1.620 */
255 { 87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 }, /* 1.597 */
256 { 88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b }, /* 1.575 */
257 { 89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc }, /* 1.627 */
258 { 90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb }, /* 1.596 */
259 { 91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 }, /* 1.622 */
260 { 92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e }, /* 1.695 */
261 { 93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c }, /* 1.605 */
262 { 94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc }, /* 1.625 */
263 { 95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 }, /* 1.687 */
264 { 96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a }, /* 1.621 */
265 { 97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 }, /* 1.699 */
266 { 98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b }, /* 1.688 */
267 { 99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce }, /* 1.642 */
268 { 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc }, /* 1.683 */
269 { 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 }, /* 1.755 */
270 { 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 }, /* 1.692 */
271 { 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 }, /* 1.747 */
272 { 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 }, /* 1.751 */
273 { 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 }, /* 1.751 */
274 { 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f }, /* 1.726 */
275 { 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d }, /* 1.788 */
276 { 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 }, /* 1.740 */
277 { 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 }, /* 1.780 */
278 { 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 }, /* 1.836 */
279 { 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 }, /* 1.778 */
280 { 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 }, /* 1.831 */
281 { 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df }, /* 1.825 */
282 { 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 }, /* 1.826 */
283 { 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 }, /* 1.843 */
284 { 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d }, /* 1.826 */
285 { 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b }, /* 1.803 */
286 { 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 }, /* 1.857 */
287 { 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 }, /* 1.877 */
288 { 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 }, /* 1.849 */
289 { 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d }, /* 1.867 */
290 { 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 }, /* 1.978 */
291 { 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d }, /* 1.947 */
292 { 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea }, /* 1.865 */
293 { 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f }, /* 1.881 */
294 { 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b }, /* 1.882 */
295 { 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e }, /* 1.867 */
296 { 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e }, /* 1.972 */
297 { 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 }, /* 1.896 */
298 { 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d }, /* 1.965 */
299 { 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 }, /* 1.963 */
300 { 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 }, /* 1.925 */
301 { 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 }, /* 1.862 */
302 { 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 }, /* 2.042 */
303 { 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 }, /* 1.935 */
304 { 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 }, /* 2.005 */
305 { 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c }, /* 2.041 */
306 { 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 }, /* 1.997 */
307 { 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 }, /* 1.996 */
308 { 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d }, /* 2.053 */
309 { 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a }, /* 1.971 */
310 { 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 }, /* 2.018 */
311 { 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd }, /* 1.961 */
312 { 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 }, /* 2.046 */
313 { 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb }, /* 1.968 */
314 { 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 }, /* 2.143 */
315 { 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 }, /* 2.064 */
316 { 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 }, /* 2.023 */
317 { 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c }, /* 2.136 */
318 { 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 }, /* 2.063 */
319 { 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 }, /* 1.974 */
320 { 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 }, /* 2.210 */
321 { 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a }, /* 2.006 */
322 { 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 }, /* 2.193 */
323 { 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 }, /* 2.163 */
324 { 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc }, /* 2.046 */
325 { 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 }, /* 2.084 */
326 { 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 }, /* 2.264 */
327 { 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 }, /* 2.074 */
328 { 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 }, /* 2.282 */
329 { 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf }, /* 2.148 */
330 { 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 }, /* 2.355 */
331 { 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 }, /* 2.164 */
332 { 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a }, /* 2.393 */
333 { 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 }, /* 2.178 */
334 { 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc }, /* 2.334 */
335 { 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b }, /* 2.266 */
336 { 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 }, /* 2.304 */
337 { 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d }, /* 2.218 */
338 { 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff }, /* 2.377 */
339 { 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 }, /* 2.155 */
340 { 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 }, /* 2.404 */
341 { 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 }, /* 2.205 */
342 { 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d }, /* 2.359 */
343 { 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 }, /* 2.158 */
344 { 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b }, /* 2.614 */
345 { 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc }, /* 2.239 */
346 { 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc }, /* 2.493 */
347 { 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c }, /* 2.327 */
348 { 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 }, /* 2.231 */
349 { 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c }, /* 2.237 */
350 { 182, 512, 0xe6035defea48f933, 0x00002038e3346658 }, /* 2.691 */
351 { 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e }, /* 2.170 */
352 { 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 }, /* 2.600 */
353 { 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc }, /* 2.391 */
354 { 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 }, /* 2.677 */
355 { 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c }, /* 2.410 */
356 { 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 }, /* 2.776 */
357 { 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 }, /* 2.266 */
358 { 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 }, /* 2.717 */
359 { 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c }, /* 2.474 */
360 { 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 }, /* 2.673 */
361 { 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 }, /* 2.420 */
362 { 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 }, /* 2.898 */
363 { 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c }, /* 2.363 */
364 { 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e }, /* 2.747 */
365 { 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 }, /* 2.531 */
366 { 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 }, /* 2.707 */
367 { 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 }, /* 2.315 */
368 { 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf }, /* 3.012 */
369 { 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 }, /* 2.378 */
370 { 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 }, /* 2.969 */
371 { 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d }, /* 2.594 */
372 { 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd }, /* 2.763 */
373 { 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 }, /* 2.457 */
374 { 206, 512, 0xc02fc96684715a16, 0x0000297515608601 }, /* 3.057 */
375 { 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 }, /* 2.590 */
376 { 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b }, /* 3.047 */
377 { 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 }, /* 2.676 */
378 { 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 }, /* 2.993 */
379 { 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 }, /* 2.457 */
380 { 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 }, /* 3.182 */
381 { 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 }, /* 2.563 */
382 { 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 }, /* 3.025 */
383 { 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f }, /* 2.730 */
384 { 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 }, /* 3.036 */
385 { 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 }, /* 2.722 */
386 { 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 }, /* 3.356 */
387 { 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 }, /* 2.697 */
388 { 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 }, /* 2.979 */
389 { 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 }, /* 2.858 */
390 { 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e }, /* 3.258 */
391 { 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 }, /* 2.693 */
392 { 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 }, /* 3.259 */
393 { 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c }, /* 2.733 */
394 { 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 }, /* 3.235 */
395 { 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 }, /* 2.983 */
396 { 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e }, /* 3.308 */
397 { 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 }, /* 2.715 */
398 { 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f }, /* 3.540 */
399 { 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 }, /* 2.779 */
400 { 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c }, /* 3.084 */
401 { 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc }, /* 2.987 */
402 { 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae }, /* 3.341 */
403 { 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 }, /* 2.793 */
404 { 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 }, /* 3.518 */
405 { 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 }, /* 2.962 */
406 { 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 }, /* 3.196 */
407 { 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 }, /* 2.914 */
408 { 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 }, /* 3.408 */
409 { 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 }, /* 2.903 */
410 { 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 }, /* 3.778 */
411 { 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c }, /* 3.026 */
412 { 244, 512, 0xc740263f0301efa8, 0x00003a147146512d }, /* 3.347 */
413 { 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d }, /* 3.212 */
414 { 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 }, /* 3.482 */
415 { 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 }, /* 3.146 */
416 { 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f }, /* 3.626 */
417 { 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 }, /* 2.952 */
418 { 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e }, /* 3.463 */
419 { 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 }, /* 3.131 */
420 { 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c }, /* 3.538 */
421 { 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac }, /* 2.974 */
422 { 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 }, /* 3.843 */
423 { 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 }, /* 3.088 */
424 };
425
426 /*
427 * Verify the map is valid. Each device index must appear exactly
428 * once in every row, and the permutation array checksum must match.
429 */
430 static int
verify_perms(uint8_t * perms,uint64_t children,uint64_t nperms,uint64_t checksum)431 verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms,
432 uint64_t checksum)
433 {
434 int countssz = sizeof (uint16_t) * children;
435 uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP);
436
437 for (int i = 0; i < nperms; i++) {
438 for (int j = 0; j < children; j++) {
439 uint8_t val = perms[(i * children) + j];
440
441 if (val >= children || counts[val] != i) {
442 kmem_free(counts, countssz);
443 return (EINVAL);
444 }
445
446 counts[val]++;
447 }
448 }
449
450 if (checksum != 0) {
451 int permssz = sizeof (uint8_t) * children * nperms;
452 zio_cksum_t cksum;
453
454 fletcher_4_native_varsize(perms, permssz, &cksum);
455
456 if (checksum != cksum.zc_word[0]) {
457 kmem_free(counts, countssz);
458 return (ECKSUM);
459 }
460 }
461
462 kmem_free(counts, countssz);
463
464 return (0);
465 }
466
467 /*
468 * Generate the permutation array for the draid_map_t. These maps control
469 * the placement of all data in a dRAID. Therefore it's critical that the
470 * seed always generates the same mapping. We provide our own pseudo-random
471 * number generator for this purpose.
472 */
473 int
vdev_draid_generate_perms(const draid_map_t * map,uint8_t ** permsp)474 vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
475 {
476 VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN);
477 VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
478 VERIFY3U(map->dm_seed, !=, 0);
479 VERIFY3U(map->dm_nperms, !=, 0);
480 VERIFY3P(map->dm_perms, ==, NULL);
481
482 #ifdef _KERNEL
483 /*
484 * The kernel code always provides both a map_seed and checksum.
485 * Only the tests/zfs-tests/cmd/draid/draid.c utility will provide
486 * a zero checksum when generating new candidate maps.
487 */
488 VERIFY3U(map->dm_checksum, !=, 0);
489 #endif
490 uint64_t children = map->dm_children;
491 uint64_t nperms = map->dm_nperms;
492 int rowsz = sizeof (uint8_t) * children;
493 int permssz = rowsz * nperms;
494 uint8_t *perms;
495
496 /* Allocate the permutation array */
497 perms = vmem_alloc(permssz, KM_SLEEP);
498
499 /* Setup an initial row with a known pattern */
500 uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP);
501 for (int i = 0; i < children; i++)
502 initial_row[i] = i;
503
504 uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
505 uint8_t *current_row, *previous_row = initial_row;
506
507 /*
508 * Perform a Fisher-Yates shuffle of each row using the previous
509 * row as the starting point. An initial_row with known pattern
510 * is used as the input for the first row.
511 */
512 for (int i = 0; i < nperms; i++) {
513 current_row = &perms[i * children];
514 memcpy(current_row, previous_row, rowsz);
515
516 for (int j = children - 1; j > 0; j--) {
517 uint64_t k = vdev_draid_rand(draid_seed) % (j + 1);
518 uint8_t val = current_row[j];
519 current_row[j] = current_row[k];
520 current_row[k] = val;
521 }
522
523 previous_row = current_row;
524 }
525
526 kmem_free(initial_row, rowsz);
527
528 int error = verify_perms(perms, children, nperms, map->dm_checksum);
529 if (error) {
530 vmem_free(perms, permssz);
531 return (error);
532 }
533
534 *permsp = perms;
535
536 return (0);
537 }
538
539 /*
540 * Lookup the fixed draid_map_t for the requested number of children.
541 */
542 int
vdev_draid_lookup_map(uint64_t children,const draid_map_t ** mapp)543 vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp)
544 {
545 for (int i = 0; i < VDEV_DRAID_MAX_MAPS; i++) {
546 if (draid_maps[i].dm_children == children) {
547 *mapp = &draid_maps[i];
548 return (0);
549 }
550 }
551
552 return (ENOENT);
553 }
554
555 /*
556 * Lookup the permutation array and iteration id for the provided offset.
557 */
558 static void
vdev_draid_get_perm(vdev_draid_config_t * vdc,uint64_t pindex,uint8_t ** base,uint64_t * iter)559 vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex,
560 uint8_t **base, uint64_t *iter)
561 {
562 uint64_t ncols = vdc->vdc_children;
563 uint64_t poff = pindex % (vdc->vdc_nperms * ncols);
564
565 *base = vdc->vdc_perms + (poff / ncols) * ncols;
566 *iter = poff % ncols;
567 }
568
569 static inline uint64_t
vdev_draid_permute_id(vdev_draid_config_t * vdc,uint8_t * base,uint64_t iter,uint64_t index)570 vdev_draid_permute_id(vdev_draid_config_t *vdc,
571 uint8_t *base, uint64_t iter, uint64_t index)
572 {
573 return ((base[index] + iter) % vdc->vdc_children);
574 }
575
576 /*
577 * Return the asize which is the psize rounded up to a full group width.
578 * i.e. vdev_draid_psize_to_asize().
579 */
580 static uint64_t
vdev_draid_asize(vdev_t * vd,uint64_t psize,uint64_t txg)581 vdev_draid_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
582 {
583 (void) txg;
584 vdev_draid_config_t *vdc = vd->vdev_tsd;
585 uint64_t ashift = vd->vdev_ashift;
586
587 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
588
589 uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1;
590 uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
591
592 ASSERT3U(asize, !=, 0);
593 ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0);
594
595 return (asize);
596 }
597
598 /*
599 * Deflate the asize to the psize, this includes stripping parity.
600 */
601 uint64_t
vdev_draid_asize_to_psize(vdev_t * vd,uint64_t asize)602 vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize)
603 {
604 vdev_draid_config_t *vdc = vd->vdev_tsd;
605
606 ASSERT0(asize % vdc->vdc_groupwidth);
607
608 return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata);
609 }
610
611 /*
612 * Convert a logical offset to the corresponding group number.
613 */
614 static uint64_t
vdev_draid_offset_to_group(vdev_t * vd,uint64_t offset)615 vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset)
616 {
617 vdev_draid_config_t *vdc = vd->vdev_tsd;
618
619 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
620
621 return (offset / vdc->vdc_groupsz);
622 }
623
624 /*
625 * Convert a group number to the logical starting offset for that group.
626 */
627 static uint64_t
vdev_draid_group_to_offset(vdev_t * vd,uint64_t group)628 vdev_draid_group_to_offset(vdev_t *vd, uint64_t group)
629 {
630 vdev_draid_config_t *vdc = vd->vdev_tsd;
631
632 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
633
634 return (group * vdc->vdc_groupsz);
635 }
636
637 /*
638 * Full stripe writes. When writing, all columns (D+P) are required. Parity
639 * is calculated over all the columns, including empty zero filled sectors,
640 * and each is written to disk. While only the data columns are needed for
641 * a normal read, all of the columns are required for reconstruction when
642 * performing a sequential resilver.
643 *
644 * For "big columns" it's sufficient to map the correct range of the zio ABD.
645 * Partial columns require allocating a gang ABD in order to zero fill the
646 * empty sectors. When the column is empty a zero filled sector must be
647 * mapped. In all cases the data ABDs must be the same size as the parity
648 * ABDs (e.g. rc->rc_size == parity_size).
649 */
650 static void
vdev_draid_map_alloc_write(zio_t * zio,uint64_t abd_offset,raidz_row_t * rr)651 vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
652 {
653 uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
654 uint64_t parity_size = rr->rr_col[0].rc_size;
655 uint64_t abd_off = abd_offset;
656
657 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
658 ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
659
660 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
661 raidz_col_t *rc = &rr->rr_col[c];
662
663 if (rc->rc_size == 0) {
664 /* empty data column (small write), add a skip sector */
665 ASSERT3U(skip_size, ==, parity_size);
666 rc->rc_abd = abd_get_zeros(skip_size);
667 } else if (rc->rc_size == parity_size) {
668 /* this is a "big column" */
669 rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
670 zio->io_abd, abd_off, rc->rc_size);
671 } else {
672 /* short data column, add a skip sector */
673 ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
674 rc->rc_abd = abd_alloc_gang();
675 abd_gang_add(rc->rc_abd, abd_get_offset_size(
676 zio->io_abd, abd_off, rc->rc_size), B_TRUE);
677 abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size),
678 B_TRUE);
679 }
680
681 ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size);
682
683 abd_off += rc->rc_size;
684 rc->rc_size = parity_size;
685 }
686
687 IMPLY(abd_offset != 0, abd_off == zio->io_size);
688 }
689
690 /*
691 * Scrub/resilver reads. In order to store the contents of the skip sectors
692 * an additional ABD is allocated. The columns are handled in the same way
693 * as a full stripe write except instead of using the zero ABD the newly
694 * allocated skip ABD is used to back the skip sectors. In all cases the
695 * data ABD must be the same size as the parity ABDs.
696 */
697 static void
vdev_draid_map_alloc_scrub(zio_t * zio,uint64_t abd_offset,raidz_row_t * rr)698 vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
699 {
700 uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
701 uint64_t parity_size = rr->rr_col[0].rc_size;
702 uint64_t abd_off = abd_offset;
703 uint64_t skip_off = 0;
704
705 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
706 ASSERT3P(rr->rr_abd_empty, ==, NULL);
707
708 if (rr->rr_nempty > 0) {
709 rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
710 B_FALSE);
711 }
712
713 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
714 raidz_col_t *rc = &rr->rr_col[c];
715
716 if (rc->rc_size == 0) {
717 /* empty data column (small read), add a skip sector */
718 ASSERT3U(skip_size, ==, parity_size);
719 ASSERT3U(rr->rr_nempty, !=, 0);
720 rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
721 skip_off, skip_size);
722 skip_off += skip_size;
723 } else if (rc->rc_size == parity_size) {
724 /* this is a "big column" */
725 rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
726 zio->io_abd, abd_off, rc->rc_size);
727 } else {
728 /* short data column, add a skip sector */
729 ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
730 ASSERT3U(rr->rr_nempty, !=, 0);
731 rc->rc_abd = abd_alloc_gang();
732 abd_gang_add(rc->rc_abd, abd_get_offset_size(
733 zio->io_abd, abd_off, rc->rc_size), B_TRUE);
734 abd_gang_add(rc->rc_abd, abd_get_offset_size(
735 rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
736 skip_off += skip_size;
737 }
738
739 uint64_t abd_size = abd_get_size(rc->rc_abd);
740 ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
741
742 /*
743 * Increase rc_size so the skip ABD is included in subsequent
744 * parity calculations.
745 */
746 abd_off += rc->rc_size;
747 rc->rc_size = abd_size;
748 }
749
750 IMPLY(abd_offset != 0, abd_off == zio->io_size);
751 ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
752 }
753
754 /*
755 * Normal reads. In this common case only the columns containing data
756 * are read in to the zio ABDs. Neither the parity columns or empty skip
757 * sectors are read unless the checksum fails verification. In which case
758 * vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand
759 * the raid map in order to allow reconstruction using the parity data and
760 * skip sectors.
761 */
762 static void
vdev_draid_map_alloc_read(zio_t * zio,uint64_t abd_offset,raidz_row_t * rr)763 vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
764 {
765 uint64_t abd_off = abd_offset;
766
767 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
768
769 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
770 raidz_col_t *rc = &rr->rr_col[c];
771
772 if (rc->rc_size > 0) {
773 rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
774 zio->io_abd, abd_off, rc->rc_size);
775 abd_off += rc->rc_size;
776 }
777 }
778
779 IMPLY(abd_offset != 0, abd_off == zio->io_size);
780 }
781
782 /*
783 * Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key
784 * difference is that an ABD is allocated to back skip sectors so they may
785 * be read in to memory, verified, and repaired if needed.
786 */
787 void
vdev_draid_map_alloc_empty(zio_t * zio,raidz_row_t * rr)788 vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
789 {
790 uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
791 uint64_t parity_size = rr->rr_col[0].rc_size;
792 uint64_t skip_off = 0;
793
794 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
795 ASSERT3P(rr->rr_abd_empty, ==, NULL);
796
797 if (rr->rr_nempty > 0) {
798 rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
799 B_FALSE);
800 }
801
802 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
803 raidz_col_t *rc = &rr->rr_col[c];
804
805 if (rc->rc_size == 0) {
806 /* empty data column (small read), add a skip sector */
807 ASSERT3U(skip_size, ==, parity_size);
808 ASSERT3U(rr->rr_nempty, !=, 0);
809 ASSERT3P(rc->rc_abd, ==, NULL);
810 rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
811 skip_off, skip_size);
812 skip_off += skip_size;
813 } else if (rc->rc_size == parity_size) {
814 /* this is a "big column", nothing to add */
815 ASSERT3P(rc->rc_abd, !=, NULL);
816 } else {
817 /*
818 * short data column, add a skip sector and clear
819 * rc_tried to force the entire column to be re-read
820 * thereby including the missing skip sector data
821 * which is needed for reconstruction.
822 */
823 ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
824 ASSERT3U(rr->rr_nempty, !=, 0);
825 ASSERT3P(rc->rc_abd, !=, NULL);
826 ASSERT(!abd_is_gang(rc->rc_abd));
827 abd_t *read_abd = rc->rc_abd;
828 rc->rc_abd = abd_alloc_gang();
829 abd_gang_add(rc->rc_abd, read_abd, B_TRUE);
830 abd_gang_add(rc->rc_abd, abd_get_offset_size(
831 rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
832 skip_off += skip_size;
833 rc->rc_tried = 0;
834 }
835
836 /*
837 * Increase rc_size so the empty ABD is included in subsequent
838 * parity calculations.
839 */
840 rc->rc_size = parity_size;
841 }
842
843 ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
844 }
845
846 /*
847 * Verify that all empty sectors are zero filled before using them to
848 * calculate parity. Otherwise, silent corruption in an empty sector will
849 * result in bad parity being generated. That bad parity will then be
850 * considered authoritative and overwrite the good parity on disk. This
851 * is possible because the checksum is only calculated over the data,
852 * thus it cannot be used to detect damage in empty sectors.
853 */
854 int
vdev_draid_map_verify_empty(zio_t * zio,raidz_row_t * rr)855 vdev_draid_map_verify_empty(zio_t *zio, raidz_row_t *rr)
856 {
857 uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
858 uint64_t parity_size = rr->rr_col[0].rc_size;
859 uint64_t skip_off = parity_size - skip_size;
860 uint64_t empty_off = 0;
861 int ret = 0;
862
863 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
864 ASSERT3P(rr->rr_abd_empty, !=, NULL);
865 ASSERT3U(rr->rr_bigcols, >, 0);
866
867 void *zero_buf = kmem_zalloc(skip_size, KM_SLEEP);
868
869 for (int c = rr->rr_bigcols; c < rr->rr_cols; c++) {
870 raidz_col_t *rc = &rr->rr_col[c];
871
872 ASSERT3P(rc->rc_abd, !=, NULL);
873 ASSERT3U(rc->rc_size, ==, parity_size);
874
875 if (abd_cmp_buf_off(rc->rc_abd, zero_buf, skip_off,
876 skip_size) != 0) {
877 vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
878 abd_zero_off(rc->rc_abd, skip_off, skip_size);
879 rc->rc_error = SET_ERROR(ECKSUM);
880 ret++;
881 }
882
883 empty_off += skip_size;
884 }
885
886 ASSERT3U(empty_off, ==, abd_get_size(rr->rr_abd_empty));
887
888 kmem_free(zero_buf, skip_size);
889
890 return (ret);
891 }
892
893 /*
894 * Given a logical address within a dRAID configuration, return the physical
895 * address on the first drive in the group that this address maps to
896 * (at position 'start' in permutation number 'perm').
897 */
898 static uint64_t
vdev_draid_logical_to_physical(vdev_t * vd,uint64_t logical_offset,uint64_t * perm,uint64_t * start)899 vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset,
900 uint64_t *perm, uint64_t *start)
901 {
902 vdev_draid_config_t *vdc = vd->vdev_tsd;
903
904 /* b is the dRAID (parent) sector offset. */
905 uint64_t ashift = vd->vdev_top->vdev_ashift;
906 uint64_t b_offset = logical_offset >> ashift;
907
908 /*
909 * The height of a row in units of the vdev's minimum sector size.
910 * This is the amount of data written to each disk of each group
911 * in a given permutation.
912 */
913 uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift;
914
915 /*
916 * We cycle through a disk permutation every groupsz * ngroups chunk
917 * of address space. Note that ngroups * groupsz must be a multiple
918 * of the number of data drives (ndisks) in order to guarantee
919 * alignment. So, for example, if our row height is 16MB, our group
920 * size is 10, and there are 13 data drives in the draid, then ngroups
921 * will be 13, we will change permutation every 2.08GB and each
922 * disk will have 160MB of data per chunk.
923 */
924 uint64_t groupwidth = vdc->vdc_groupwidth;
925 uint64_t ngroups = vdc->vdc_ngroups;
926 uint64_t ndisks = vdc->vdc_ndisks;
927
928 /*
929 * groupstart is where the group this IO will land in "starts" in
930 * the permutation array.
931 */
932 uint64_t group = logical_offset / vdc->vdc_groupsz;
933 uint64_t groupstart = (group * groupwidth) % ndisks;
934 ASSERT3U(groupstart + groupwidth, <=, ndisks + groupstart);
935 *start = groupstart;
936
937 /* b_offset is the sector offset within a group chunk */
938 b_offset = b_offset % (rowheight_sectors * groupwidth);
939 ASSERT0(b_offset % groupwidth);
940
941 /*
942 * Find the starting byte offset on each child vdev:
943 * - within a permutation there are ngroups groups spread over the
944 * rows, where each row covers a slice portion of the disk
945 * - each permutation has (groupwidth * ngroups) / ndisks rows
946 * - so each permutation covers rows * slice portion of the disk
947 * - so we need to find the row where this IO group target begins
948 */
949 *perm = group / ngroups;
950 uint64_t row = (*perm * ((groupwidth * ngroups) / ndisks)) +
951 (((group % ngroups) * groupwidth) / ndisks);
952
953 return (((rowheight_sectors * row) +
954 (b_offset / groupwidth)) << ashift);
955 }
956
957 static uint64_t
vdev_draid_map_alloc_row(zio_t * zio,raidz_row_t ** rrp,uint64_t io_offset,uint64_t abd_offset,uint64_t abd_size)958 vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
959 uint64_t abd_offset, uint64_t abd_size)
960 {
961 vdev_t *vd = zio->io_vd;
962 vdev_draid_config_t *vdc = vd->vdev_tsd;
963 uint64_t ashift = vd->vdev_top->vdev_ashift;
964 uint64_t io_size = abd_size;
965 uint64_t io_asize = vdev_draid_asize(vd, io_size, 0);
966 uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
967 uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
968
969 /*
970 * Limit the io_size to the space remaining in the group. A second
971 * row in the raidz_map_t is created for the remainder.
972 */
973 if (io_offset + io_asize > start_offset) {
974 io_size = vdev_draid_asize_to_psize(vd,
975 start_offset - io_offset);
976 }
977
978 /*
979 * At most a block may span the logical end of one group and the start
980 * of the next group. Therefore, at the end of a group the io_size must
981 * span the group width evenly and the remainder must be aligned to the
982 * start of the next group.
983 */
984 IMPLY(abd_offset == 0 && io_size < zio->io_size,
985 (io_asize >> ashift) % vdc->vdc_groupwidth == 0);
986 IMPLY(abd_offset != 0,
987 vdev_draid_group_to_offset(vd, group) == io_offset);
988
989 /* Lookup starting byte offset on each child vdev */
990 uint64_t groupstart, perm;
991 uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
992 io_offset, &perm, &groupstart);
993
994 /*
995 * If there is less than groupwidth drives available after the group
996 * start, the group is going to wrap onto the next row. 'wrap' is the
997 * group disk number that starts on the next row.
998 */
999 uint64_t ndisks = vdc->vdc_ndisks;
1000 uint64_t groupwidth = vdc->vdc_groupwidth;
1001 uint64_t wrap = groupwidth;
1002
1003 if (groupstart + groupwidth > ndisks)
1004 wrap = ndisks - groupstart;
1005
1006 /* The io size in units of the vdev's minimum sector size. */
1007 const uint64_t psize = io_size >> ashift;
1008
1009 /*
1010 * "Quotient": The number of data sectors for this stripe on all but
1011 * the "big column" child vdevs that also contain "remainder" data.
1012 */
1013 uint64_t q = psize / vdc->vdc_ndata;
1014
1015 /*
1016 * "Remainder": The number of partial stripe data sectors in this I/O.
1017 * This will add a sector to some, but not all, child vdevs.
1018 */
1019 uint64_t r = psize - q * vdc->vdc_ndata;
1020
1021 /* The number of "big columns" - those which contain remainder data. */
1022 uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity);
1023 ASSERT3U(bc, <, groupwidth);
1024
1025 /* The total number of data and parity sectors for this I/O. */
1026 uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1)));
1027
1028 ASSERT3U(vdc->vdc_nparity, >, 0);
1029
1030 raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth, zio);
1031 rr->rr_bigcols = bc;
1032 rr->rr_firstdatacol = vdc->vdc_nparity;
1033 #ifdef ZFS_DEBUG
1034 rr->rr_offset = io_offset;
1035 rr->rr_size = io_size;
1036 #endif
1037 *rrp = rr;
1038
1039 uint8_t *base;
1040 uint64_t iter, asize = 0;
1041 vdev_draid_get_perm(vdc, perm, &base, &iter);
1042 for (uint64_t i = 0; i < groupwidth; i++) {
1043 raidz_col_t *rc = &rr->rr_col[i];
1044 uint64_t c = (groupstart + i) % ndisks;
1045
1046 /* increment the offset if we wrap to the next row */
1047 if (i == wrap)
1048 physical_offset += VDEV_DRAID_ROWHEIGHT;
1049
1050 rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
1051 rc->rc_offset = physical_offset;
1052
1053 if (q == 0 && i >= bc)
1054 rc->rc_size = 0;
1055 else if (i < bc)
1056 rc->rc_size = (q + 1) << ashift;
1057 else
1058 rc->rc_size = q << ashift;
1059
1060 asize += rc->rc_size;
1061 }
1062
1063 ASSERT3U(asize, ==, tot << ashift);
1064 rr->rr_nempty = roundup(tot, groupwidth) - tot;
1065 IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc);
1066
1067 /* Allocate buffers for the parity columns */
1068 for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) {
1069 raidz_col_t *rc = &rr->rr_col[c];
1070 rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
1071 }
1072
1073 /*
1074 * Map buffers for data columns and allocate/map buffers for skip
1075 * sectors. There are three distinct cases for dRAID which are
1076 * required to support sequential rebuild.
1077 */
1078 if (zio->io_type == ZIO_TYPE_WRITE) {
1079 vdev_draid_map_alloc_write(zio, abd_offset, rr);
1080 } else if ((rr->rr_nempty > 0) &&
1081 (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
1082 vdev_draid_map_alloc_scrub(zio, abd_offset, rr);
1083 } else {
1084 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
1085 vdev_draid_map_alloc_read(zio, abd_offset, rr);
1086 }
1087
1088 return (io_size);
1089 }
1090
1091 /*
1092 * Allocate the raidz mapping to be applied to the dRAID I/O. The parity
1093 * calculations for dRAID are identical to raidz however there are a few
1094 * differences in the layout.
1095 *
1096 * - dRAID always allocates a full stripe width. Any extra sectors due
1097 * this padding are zero filled and written to disk. They will be read
1098 * back during a scrub or repair operation since they are included in
1099 * the parity calculation. This property enables sequential resilvering.
1100 *
1101 * - When the block at the logical offset spans redundancy groups then two
1102 * rows are allocated in the raidz_map_t. One row resides at the end of
1103 * the first group and the other at the start of the following group.
1104 */
1105 static raidz_map_t *
vdev_draid_map_alloc(zio_t * zio)1106 vdev_draid_map_alloc(zio_t *zio)
1107 {
1108 raidz_row_t *rr[2];
1109 uint64_t abd_offset = 0;
1110 uint64_t abd_size = zio->io_size;
1111 uint64_t io_offset = zio->io_offset;
1112 uint64_t size;
1113 int nrows = 1;
1114
1115 size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset,
1116 abd_offset, abd_size);
1117 if (size < abd_size) {
1118 vdev_t *vd = zio->io_vd;
1119
1120 io_offset += vdev_draid_asize(vd, size, 0);
1121 abd_offset += size;
1122 abd_size -= size;
1123 nrows++;
1124
1125 ASSERT3U(io_offset, ==, vdev_draid_group_to_offset(
1126 vd, vdev_draid_offset_to_group(vd, io_offset)));
1127 ASSERT3U(abd_offset, <, zio->io_size);
1128 ASSERT3U(abd_size, !=, 0);
1129
1130 size = vdev_draid_map_alloc_row(zio, &rr[1],
1131 io_offset, abd_offset, abd_size);
1132 VERIFY3U(size, ==, abd_size);
1133 }
1134
1135 raidz_map_t *rm;
1136 rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP);
1137 rm->rm_ops = vdev_raidz_math_get_ops();
1138 rm->rm_nrows = nrows;
1139 rm->rm_row[0] = rr[0];
1140 if (nrows == 2)
1141 rm->rm_row[1] = rr[1];
1142 return (rm);
1143 }
1144
1145 /*
1146 * Given an offset into a dRAID return the next group width aligned offset
1147 * which can be used to start an allocation.
1148 */
1149 static uint64_t
vdev_draid_get_astart(vdev_t * vd,const uint64_t start)1150 vdev_draid_get_astart(vdev_t *vd, const uint64_t start)
1151 {
1152 vdev_draid_config_t *vdc = vd->vdev_tsd;
1153
1154 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1155
1156 return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift));
1157 }
1158
1159 /*
1160 * Allocatable space for dRAID is (children - nspares) * sizeof(smallest child)
1161 * rounded down to the last full slice. So each child must provide at least
1162 * 1 / (children - nspares) of its asize.
1163 */
1164 static uint64_t
vdev_draid_min_asize(vdev_t * vd)1165 vdev_draid_min_asize(vdev_t *vd)
1166 {
1167 vdev_draid_config_t *vdc = vd->vdev_tsd;
1168
1169 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1170
1171 return (VDEV_DRAID_REFLOW_RESERVE +
1172 (vd->vdev_min_asize + vdc->vdc_ndisks - 1) / (vdc->vdc_ndisks));
1173 }
1174
1175 /*
1176 * When using dRAID the minimum allocation size is determined by the number
1177 * of data disks in the redundancy group. Full stripes are always used.
1178 */
1179 static uint64_t
vdev_draid_min_alloc(vdev_t * vd)1180 vdev_draid_min_alloc(vdev_t *vd)
1181 {
1182 vdev_draid_config_t *vdc = vd->vdev_tsd;
1183
1184 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1185
1186 return (vdc->vdc_ndata << vd->vdev_ashift);
1187 }
1188
1189 /*
1190 * Returns true if the txg range does not exist on any leaf vdev.
1191 *
1192 * A dRAID spare does not fit into the DTL model. While it has child vdevs
1193 * there is no redundancy among them, and the effective child vdev is
1194 * determined by offset. Essentially we do a vdev_dtl_reassess() on the
1195 * fly by replacing a dRAID spare with the child vdev under the offset.
1196 * Note that it is a recursive process because the child vdev can be
1197 * another dRAID spare and so on.
1198 */
1199 boolean_t
vdev_draid_missing(vdev_t * vd,uint64_t physical_offset,uint64_t txg,uint64_t size)1200 vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1201 uint64_t size)
1202 {
1203 if (vd->vdev_ops == &vdev_spare_ops ||
1204 vd->vdev_ops == &vdev_replacing_ops) {
1205 /*
1206 * Check all of the readable children, if any child
1207 * contains the txg range the data it is not missing.
1208 */
1209 for (int c = 0; c < vd->vdev_children; c++) {
1210 vdev_t *cvd = vd->vdev_child[c];
1211
1212 if (!vdev_readable(cvd))
1213 continue;
1214
1215 if (!vdev_draid_missing(cvd, physical_offset,
1216 txg, size))
1217 return (B_FALSE);
1218 }
1219
1220 return (B_TRUE);
1221 }
1222
1223 if (vd->vdev_ops == &vdev_draid_spare_ops) {
1224 /*
1225 * When sequentially resilvering we don't have a proper
1226 * txg range so instead we must presume all txgs are
1227 * missing on this vdev until the resilver completes.
1228 */
1229 if (vd->vdev_rebuild_txg != 0)
1230 return (B_TRUE);
1231
1232 /*
1233 * DTL_MISSING is set for all prior txgs when a resilver
1234 * is started in spa_vdev_attach().
1235 */
1236 if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1237 return (B_TRUE);
1238
1239 /*
1240 * Consult the DTL on the relevant vdev. Either a vdev
1241 * leaf or spare/replace mirror child may be returned so
1242 * we must recursively call vdev_draid_missing_impl().
1243 */
1244 vd = vdev_draid_spare_get_child(vd, physical_offset);
1245 if (vd == NULL)
1246 return (B_TRUE);
1247
1248 return (vdev_draid_missing(vd, physical_offset,
1249 txg, size));
1250 }
1251
1252 return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1253 }
1254
1255 /*
1256 * Returns true if the txg is only partially replicated on the leaf vdevs.
1257 */
1258 static boolean_t
vdev_draid_partial(vdev_t * vd,uint64_t physical_offset,uint64_t txg,uint64_t size)1259 vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
1260 uint64_t size)
1261 {
1262 if (vd->vdev_ops == &vdev_spare_ops ||
1263 vd->vdev_ops == &vdev_replacing_ops) {
1264 /*
1265 * Check all of the readable children, if any child is
1266 * missing the txg range then it is partially replicated.
1267 */
1268 for (int c = 0; c < vd->vdev_children; c++) {
1269 vdev_t *cvd = vd->vdev_child[c];
1270
1271 if (!vdev_readable(cvd))
1272 continue;
1273
1274 if (vdev_draid_partial(cvd, physical_offset, txg, size))
1275 return (B_TRUE);
1276 }
1277
1278 return (B_FALSE);
1279 }
1280
1281 if (vd->vdev_ops == &vdev_draid_spare_ops) {
1282 /*
1283 * When sequentially resilvering we don't have a proper
1284 * txg range so instead we must presume all txgs are
1285 * missing on this vdev until the resilver completes.
1286 */
1287 if (vd->vdev_rebuild_txg != 0)
1288 return (B_TRUE);
1289
1290 /*
1291 * DTL_MISSING is set for all prior txgs when a resilver
1292 * is started in spa_vdev_attach().
1293 */
1294 if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
1295 return (B_TRUE);
1296
1297 /*
1298 * Consult the DTL on the relevant vdev. Either a vdev
1299 * leaf or spare/replace mirror child may be returned so
1300 * we must recursively call vdev_draid_missing_impl().
1301 */
1302 vd = vdev_draid_spare_get_child(vd, physical_offset);
1303 if (vd == NULL)
1304 return (B_TRUE);
1305
1306 return (vdev_draid_partial(vd, physical_offset, txg, size));
1307 }
1308
1309 return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
1310 }
1311
1312 /*
1313 * Determine if the vdev is readable at the given offset.
1314 */
1315 boolean_t
vdev_draid_readable(vdev_t * vd,uint64_t physical_offset)1316 vdev_draid_readable(vdev_t *vd, uint64_t physical_offset)
1317 {
1318 if (vd->vdev_ops == &vdev_draid_spare_ops) {
1319 vd = vdev_draid_spare_get_child(vd, physical_offset);
1320 if (vd == NULL)
1321 return (B_FALSE);
1322 }
1323
1324 if (vd->vdev_ops == &vdev_spare_ops ||
1325 vd->vdev_ops == &vdev_replacing_ops) {
1326
1327 for (int c = 0; c < vd->vdev_children; c++) {
1328 vdev_t *cvd = vd->vdev_child[c];
1329
1330 if (!vdev_readable(cvd))
1331 continue;
1332
1333 if (vdev_draid_readable(cvd, physical_offset))
1334 return (B_TRUE);
1335 }
1336
1337 return (B_FALSE);
1338 }
1339
1340 return (vdev_readable(vd));
1341 }
1342
1343 /*
1344 * Returns the first distributed spare found under the provided vdev tree.
1345 */
1346 static vdev_t *
vdev_draid_find_spare(vdev_t * vd)1347 vdev_draid_find_spare(vdev_t *vd)
1348 {
1349 if (vd->vdev_ops == &vdev_draid_spare_ops)
1350 return (vd);
1351
1352 for (int c = 0; c < vd->vdev_children; c++) {
1353 vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]);
1354 if (svd != NULL)
1355 return (svd);
1356 }
1357
1358 return (NULL);
1359 }
1360
1361 /*
1362 * Returns B_TRUE if the passed in vdev is currently "faulted".
1363 * Faulted, in this context, means that the vdev represents a
1364 * replacing or sparing vdev tree.
1365 */
1366 static boolean_t
vdev_draid_faulted(vdev_t * vd,uint64_t physical_offset)1367 vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset)
1368 {
1369 if (vd->vdev_ops == &vdev_draid_spare_ops) {
1370 vd = vdev_draid_spare_get_child(vd, physical_offset);
1371 if (vd == NULL)
1372 return (B_FALSE);
1373
1374 /*
1375 * After resolving the distributed spare to a leaf vdev
1376 * check the parent to determine if it's "faulted".
1377 */
1378 vd = vd->vdev_parent;
1379 }
1380
1381 return (vd->vdev_ops == &vdev_replacing_ops ||
1382 vd->vdev_ops == &vdev_spare_ops);
1383 }
1384
1385 /*
1386 * Determine if the dRAID block at the logical offset is degraded.
1387 * Used by sequential resilver.
1388 */
1389 static boolean_t
vdev_draid_group_degraded(vdev_t * vd,uint64_t offset)1390 vdev_draid_group_degraded(vdev_t *vd, uint64_t offset)
1391 {
1392 vdev_draid_config_t *vdc = vd->vdev_tsd;
1393
1394 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1395 ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1396
1397 uint64_t groupstart, perm;
1398 uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1399 offset, &perm, &groupstart);
1400
1401 uint8_t *base;
1402 uint64_t iter;
1403 vdev_draid_get_perm(vdc, perm, &base, &iter);
1404
1405 for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1406 uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
1407 uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1408 vdev_t *cvd = vd->vdev_child[cid];
1409
1410 /* Group contains a faulted vdev. */
1411 if (vdev_draid_faulted(cvd, physical_offset))
1412 return (B_TRUE);
1413
1414 /*
1415 * Always check groups with active distributed spares
1416 * because any vdev failure in the pool will affect them.
1417 */
1418 if (vdev_draid_find_spare(cvd) != NULL)
1419 return (B_TRUE);
1420 }
1421
1422 return (B_FALSE);
1423 }
1424
1425 /*
1426 * Determine if the txg is missing. Used by healing resilver.
1427 */
1428 static boolean_t
vdev_draid_group_missing(vdev_t * vd,uint64_t offset,uint64_t txg,uint64_t size)1429 vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg,
1430 uint64_t size)
1431 {
1432 vdev_draid_config_t *vdc = vd->vdev_tsd;
1433
1434 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1435 ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
1436
1437 uint64_t groupstart, perm;
1438 uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
1439 offset, &perm, &groupstart);
1440
1441 uint8_t *base;
1442 uint64_t iter;
1443 vdev_draid_get_perm(vdc, perm, &base, &iter);
1444
1445 for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
1446 uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
1447 uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
1448 vdev_t *cvd = vd->vdev_child[cid];
1449
1450 /* Transaction group is known to be partially replicated. */
1451 if (vdev_draid_partial(cvd, physical_offset, txg, size))
1452 return (B_TRUE);
1453
1454 /*
1455 * Always check groups with active distributed spares
1456 * because any vdev failure in the pool will affect them.
1457 */
1458 if (vdev_draid_find_spare(cvd) != NULL)
1459 return (B_TRUE);
1460 }
1461
1462 return (B_FALSE);
1463 }
1464
1465 /*
1466 * Find the smallest child asize and largest sector size to calculate the
1467 * available capacity. Distributed spares are ignored since their capacity
1468 * is also based of the minimum child size in the top-level dRAID.
1469 */
1470 static void
vdev_draid_calculate_asize(vdev_t * vd,uint64_t * asizep,uint64_t * max_asizep,uint64_t * logical_ashiftp,uint64_t * physical_ashiftp)1471 vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep,
1472 uint64_t *logical_ashiftp, uint64_t *physical_ashiftp)
1473 {
1474 uint64_t logical_ashift = 0, physical_ashift = 0;
1475 uint64_t asize = 0, max_asize = 0;
1476
1477 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1478
1479 for (int c = 0; c < vd->vdev_children; c++) {
1480 vdev_t *cvd = vd->vdev_child[c];
1481
1482 if (cvd->vdev_ops == &vdev_draid_spare_ops)
1483 continue;
1484
1485 asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1;
1486 max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1;
1487 logical_ashift = MAX(logical_ashift, cvd->vdev_ashift);
1488 }
1489 for (int c = 0; c < vd->vdev_children; c++) {
1490 vdev_t *cvd = vd->vdev_child[c];
1491
1492 if (cvd->vdev_ops == &vdev_draid_spare_ops)
1493 continue;
1494 physical_ashift = vdev_best_ashift(logical_ashift,
1495 physical_ashift, cvd->vdev_physical_ashift);
1496 }
1497
1498 *asizep = asize;
1499 *max_asizep = max_asize;
1500 *logical_ashiftp = logical_ashift;
1501 *physical_ashiftp = physical_ashift;
1502 }
1503
1504 /*
1505 * Open spare vdevs.
1506 */
1507 static boolean_t
vdev_draid_open_spares(vdev_t * vd)1508 vdev_draid_open_spares(vdev_t *vd)
1509 {
1510 return (vd->vdev_ops == &vdev_draid_spare_ops ||
1511 vd->vdev_ops == &vdev_replacing_ops ||
1512 vd->vdev_ops == &vdev_spare_ops);
1513 }
1514
1515 /*
1516 * Open all children, excluding spares.
1517 */
1518 static boolean_t
vdev_draid_open_children(vdev_t * vd)1519 vdev_draid_open_children(vdev_t *vd)
1520 {
1521 return (!vdev_draid_open_spares(vd));
1522 }
1523
1524 /*
1525 * Open a top-level dRAID vdev.
1526 */
1527 static int
vdev_draid_open(vdev_t * vd,uint64_t * asize,uint64_t * max_asize,uint64_t * logical_ashift,uint64_t * physical_ashift)1528 vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
1529 uint64_t *logical_ashift, uint64_t *physical_ashift)
1530 {
1531 vdev_draid_config_t *vdc = vd->vdev_tsd;
1532 uint64_t nparity = vdc->vdc_nparity;
1533 int open_errors = 0;
1534
1535 if (nparity > VDEV_DRAID_MAXPARITY ||
1536 vd->vdev_children < nparity + 1) {
1537 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
1538 return (SET_ERROR(EINVAL));
1539 }
1540
1541 /*
1542 * First open the normal children then the distributed spares. This
1543 * ordering is important to ensure the distributed spares calculate
1544 * the correct psize in the event that the dRAID vdevs were expanded.
1545 */
1546 vdev_open_children_subset(vd, vdev_draid_open_children);
1547 vdev_open_children_subset(vd, vdev_draid_open_spares);
1548
1549 /* Verify enough of the children are available to continue. */
1550 for (int c = 0; c < vd->vdev_children; c++) {
1551 if (vd->vdev_child[c]->vdev_open_error != 0) {
1552 if ((++open_errors) > nparity) {
1553 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
1554 return (SET_ERROR(ENXIO));
1555 }
1556 }
1557 }
1558
1559 /*
1560 * Allocatable capacity is the sum of the space on all children less
1561 * the number of distributed spares rounded down to last full row
1562 * and then to the last full group. An additional 32MB of scratch
1563 * space is reserved at the end of each child for use by the dRAID
1564 * expansion feature.
1565 */
1566 uint64_t child_asize, child_max_asize;
1567 vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize,
1568 logical_ashift, physical_ashift);
1569
1570 /*
1571 * Should be unreachable since the minimum child size is 64MB, but
1572 * we want to make sure an underflow absolutely cannot occur here.
1573 */
1574 if (child_asize < VDEV_DRAID_REFLOW_RESERVE ||
1575 child_max_asize < VDEV_DRAID_REFLOW_RESERVE) {
1576 return (SET_ERROR(ENXIO));
1577 }
1578
1579 child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) /
1580 VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1581 child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) /
1582 VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
1583
1584 *asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1585 vdc->vdc_groupsz);
1586 *max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
1587 vdc->vdc_groupsz);
1588
1589 return (0);
1590 }
1591
1592 /*
1593 * Close a top-level dRAID vdev.
1594 */
1595 static void
vdev_draid_close(vdev_t * vd)1596 vdev_draid_close(vdev_t *vd)
1597 {
1598 for (int c = 0; c < vd->vdev_children; c++) {
1599 if (vd->vdev_child[c] != NULL)
1600 vdev_close(vd->vdev_child[c]);
1601 }
1602 }
1603
1604 /*
1605 * Return the maximum asize for a rebuild zio in the provided range
1606 * given the following constraints. A dRAID chunks may not:
1607 *
1608 * - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or
1609 * - Span dRAID redundancy groups.
1610 */
1611 static uint64_t
vdev_draid_rebuild_asize(vdev_t * vd,uint64_t start,uint64_t asize,uint64_t max_segment)1612 vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
1613 uint64_t max_segment)
1614 {
1615 vdev_draid_config_t *vdc = vd->vdev_tsd;
1616
1617 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1618
1619 uint64_t ashift = vd->vdev_ashift;
1620 uint64_t ndata = vdc->vdc_ndata;
1621 uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift),
1622 SPA_MAXBLOCKSIZE);
1623
1624 ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
1625 ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0);
1626
1627 /* Chunks must evenly span all data columns in the group. */
1628 psize = (((psize >> ashift) / ndata) * ndata) << ashift;
1629 uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize));
1630
1631 /* Reduce the chunk size to the group space remaining. */
1632 uint64_t group = vdev_draid_offset_to_group(vd, start);
1633 uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
1634 chunk_size = MIN(chunk_size, left);
1635
1636 ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0);
1637 ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
1638 vdev_draid_offset_to_group(vd, start + chunk_size - 1));
1639
1640 return (chunk_size);
1641 }
1642
1643 /*
1644 * Align the start of the metaslab to the group width and slightly reduce
1645 * its size to a multiple of the group width. Since full stripe writes are
1646 * required by dRAID this space is unallocable. Furthermore, aligning the
1647 * metaslab start is important for vdev initialize and TRIM which both operate
1648 * on metaslab boundaries which vdev_xlate() expects to be aligned.
1649 */
1650 static void
vdev_draid_metaslab_init(vdev_t * vd,uint64_t * ms_start,uint64_t * ms_size)1651 vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size)
1652 {
1653 vdev_draid_config_t *vdc = vd->vdev_tsd;
1654
1655 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
1656
1657 uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift;
1658 uint64_t astart = vdev_draid_get_astart(vd, *ms_start);
1659 uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz;
1660
1661 *ms_start = astart;
1662 *ms_size = asize;
1663
1664 ASSERT0(*ms_start % sz);
1665 ASSERT0(*ms_size % sz);
1666 }
1667
1668 /*
1669 * Add virtual dRAID spares to the list of valid spares. In order to accomplish
1670 * this the existing array must be freed and reallocated with the additional
1671 * entries.
1672 */
1673 int
vdev_draid_spare_create(nvlist_t * nvroot,vdev_t * vd,uint64_t * ndraidp,uint64_t next_vdev_id)1674 vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
1675 uint64_t next_vdev_id)
1676 {
1677 uint64_t draid_nspares = 0;
1678 uint64_t ndraid = 0;
1679 int error;
1680
1681 for (uint64_t i = 0; i < vd->vdev_children; i++) {
1682 vdev_t *cvd = vd->vdev_child[i];
1683
1684 if (cvd->vdev_ops == &vdev_draid_ops) {
1685 vdev_draid_config_t *vdc = cvd->vdev_tsd;
1686 draid_nspares += vdc->vdc_nspares;
1687 ndraid++;
1688 }
1689 }
1690
1691 if (draid_nspares == 0) {
1692 *ndraidp = ndraid;
1693 return (0);
1694 }
1695
1696 nvlist_t **old_spares, **new_spares;
1697 uint_t old_nspares;
1698 error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1699 &old_spares, &old_nspares);
1700 if (error)
1701 old_nspares = 0;
1702
1703 /* Allocate memory and copy of the existing spares. */
1704 new_spares = kmem_alloc(sizeof (nvlist_t *) *
1705 (draid_nspares + old_nspares), KM_SLEEP);
1706 for (uint_t i = 0; i < old_nspares; i++)
1707 new_spares[i] = fnvlist_dup(old_spares[i]);
1708
1709 /* Add new distributed spares to ZPOOL_CONFIG_SPARES. */
1710 uint64_t n = old_nspares;
1711 for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) {
1712 vdev_t *cvd = vd->vdev_child[vdev_id];
1713 char path[64];
1714
1715 if (cvd->vdev_ops != &vdev_draid_ops)
1716 continue;
1717
1718 vdev_draid_config_t *vdc = cvd->vdev_tsd;
1719 uint64_t nspares = vdc->vdc_nspares;
1720 uint64_t nparity = vdc->vdc_nparity;
1721
1722 for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
1723 memset(path, 0, sizeof (path));
1724 (void) snprintf(path, sizeof (path) - 1,
1725 "%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
1726 (u_longlong_t)nparity,
1727 (u_longlong_t)next_vdev_id + vdev_id,
1728 (u_longlong_t)spare_id);
1729
1730 nvlist_t *spare = fnvlist_alloc();
1731 fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path);
1732 fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE,
1733 VDEV_TYPE_DRAID_SPARE);
1734 fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID,
1735 cvd->vdev_guid);
1736 fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID,
1737 spare_id);
1738 fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0);
1739 fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1);
1740 fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1);
1741 fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT,
1742 cvd->vdev_ashift);
1743
1744 new_spares[n] = spare;
1745 n++;
1746 }
1747 }
1748
1749 if (n > 0) {
1750 (void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES);
1751 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1752 (const nvlist_t **)new_spares, n);
1753 }
1754
1755 for (int i = 0; i < n; i++)
1756 nvlist_free(new_spares[i]);
1757
1758 kmem_free(new_spares, sizeof (*new_spares) * n);
1759 *ndraidp = ndraid;
1760
1761 return (0);
1762 }
1763
1764 /*
1765 * Determine if any portion of the provided block resides on a child vdev
1766 * with a dirty DTL and therefore needs to be resilvered.
1767 */
1768 static boolean_t
vdev_draid_need_resilver(vdev_t * vd,const dva_t * dva,size_t psize,uint64_t phys_birth)1769 vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
1770 uint64_t phys_birth)
1771 {
1772 uint64_t offset = DVA_GET_OFFSET(dva);
1773 uint64_t asize = vdev_draid_asize(vd, psize, 0);
1774
1775 if (phys_birth == TXG_UNKNOWN) {
1776 /*
1777 * Sequential resilver. There is no meaningful phys_birth
1778 * for this block, we can only determine if block resides
1779 * in a degraded group in which case it must be resilvered.
1780 */
1781 ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==,
1782 vdev_draid_offset_to_group(vd, offset + asize - 1));
1783
1784 return (vdev_draid_group_degraded(vd, offset));
1785 } else {
1786 /*
1787 * Healing resilver. TXGs not in DTL_PARTIAL are intact,
1788 * as are blocks in non-degraded groups.
1789 */
1790 if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
1791 return (B_FALSE);
1792
1793 if (vdev_draid_group_missing(vd, offset, phys_birth, 1))
1794 return (B_TRUE);
1795
1796 /* The block may span groups in which case check both. */
1797 if (vdev_draid_offset_to_group(vd, offset) !=
1798 vdev_draid_offset_to_group(vd, offset + asize - 1)) {
1799 if (vdev_draid_group_missing(vd,
1800 offset + asize, phys_birth, 1))
1801 return (B_TRUE);
1802 }
1803
1804 return (B_FALSE);
1805 }
1806 }
1807
1808 static boolean_t
vdev_draid_rebuilding(vdev_t * vd)1809 vdev_draid_rebuilding(vdev_t *vd)
1810 {
1811 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
1812 return (B_TRUE);
1813
1814 for (int i = 0; i < vd->vdev_children; i++) {
1815 if (vdev_draid_rebuilding(vd->vdev_child[i])) {
1816 return (B_TRUE);
1817 }
1818 }
1819
1820 return (B_FALSE);
1821 }
1822
1823 static void
vdev_draid_io_verify(vdev_t * vd,raidz_row_t * rr,int col)1824 vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
1825 {
1826 #ifdef ZFS_DEBUG
1827 zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
1828 logical_rs.rs_start = rr->rr_offset;
1829 logical_rs.rs_end = logical_rs.rs_start +
1830 vdev_draid_asize(vd, rr->rr_size, 0);
1831
1832 raidz_col_t *rc = &rr->rr_col[col];
1833 vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
1834
1835 vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
1836 ASSERT(vdev_xlate_is_empty(&remain_rs));
1837 ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
1838 ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
1839 ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end);
1840 #endif
1841 }
1842
1843 /*
1844 * For write operations:
1845 * 1. Generate the parity data
1846 * 2. Create child zio write operations to each column's vdev, for both
1847 * data and parity. A gang ABD is allocated by vdev_draid_map_alloc()
1848 * if a skip sector needs to be added to a column.
1849 */
1850 static void
vdev_draid_io_start_write(zio_t * zio,raidz_row_t * rr)1851 vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr)
1852 {
1853 vdev_t *vd = zio->io_vd;
1854 raidz_map_t *rm = zio->io_vsd;
1855
1856 vdev_raidz_generate_parity_row(rm, rr);
1857
1858 for (int c = 0; c < rr->rr_cols; c++) {
1859 raidz_col_t *rc = &rr->rr_col[c];
1860
1861 /*
1862 * Empty columns are zero filled and included in the parity
1863 * calculation and therefore must be written.
1864 */
1865 ASSERT3U(rc->rc_size, !=, 0);
1866
1867 /* Verify physical to logical translation */
1868 vdev_draid_io_verify(vd, rr, c);
1869
1870 zio_nowait(zio_vdev_child_io(zio, NULL,
1871 vd->vdev_child[rc->rc_devidx], rc->rc_offset,
1872 rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
1873 0, vdev_raidz_child_done, rc));
1874 }
1875 }
1876
1877 /*
1878 * For read operations:
1879 * 1. The vdev_draid_map_alloc() function will create a minimal raidz
1880 * mapping for the read based on the zio->io_flags. There are two
1881 * possible mappings either 1) a normal read, or 2) a scrub/resilver.
1882 * 2. Create the zio read operations. This will include all parity
1883 * columns and skip sectors for a scrub/resilver.
1884 */
1885 static void
vdev_draid_io_start_read(zio_t * zio,raidz_row_t * rr)1886 vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
1887 {
1888 vdev_t *vd = zio->io_vd;
1889
1890 /* Sequential rebuild must do IO at redundancy group boundary. */
1891 IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0);
1892
1893 /*
1894 * Iterate over the columns in reverse order so that we hit the parity
1895 * last. Any errors along the way will force us to read the parity.
1896 * For scrub/resilver IOs which verify skip sectors, a gang ABD will
1897 * have been allocated to store them and rc->rc_size is increased.
1898 */
1899 for (int c = rr->rr_cols - 1; c >= 0; c--) {
1900 raidz_col_t *rc = &rr->rr_col[c];
1901 vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
1902
1903 if (!vdev_draid_readable(cvd, rc->rc_offset)) {
1904 if (c >= rr->rr_firstdatacol)
1905 rr->rr_missingdata++;
1906 else
1907 rr->rr_missingparity++;
1908 rc->rc_error = SET_ERROR(ENXIO);
1909 rc->rc_tried = 1;
1910 rc->rc_skipped = 1;
1911 continue;
1912 }
1913
1914 if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) {
1915 if (c >= rr->rr_firstdatacol)
1916 rr->rr_missingdata++;
1917 else
1918 rr->rr_missingparity++;
1919 rc->rc_error = SET_ERROR(ESTALE);
1920 rc->rc_skipped = 1;
1921 continue;
1922 }
1923
1924 /*
1925 * Empty columns may be read during vdev_draid_io_done().
1926 * Only skip them after the readable and missing checks
1927 * verify they are available.
1928 */
1929 if (rc->rc_size == 0) {
1930 rc->rc_skipped = 1;
1931 continue;
1932 }
1933
1934 if (zio->io_flags & ZIO_FLAG_RESILVER) {
1935 vdev_t *svd;
1936
1937 /*
1938 * Sequential rebuilds need to always consider the data
1939 * on the child being rebuilt to be stale. This is
1940 * important when all columns are available to aid
1941 * known reconstruction in identifing which columns
1942 * contain incorrect data.
1943 *
1944 * Furthermore, all repairs need to be constrained to
1945 * the devices being rebuilt because without a checksum
1946 * we cannot verify the data is actually correct and
1947 * performing an incorrect repair could result in
1948 * locking in damage and making the data unrecoverable.
1949 */
1950 if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
1951 if (vdev_draid_rebuilding(cvd)) {
1952 if (c >= rr->rr_firstdatacol)
1953 rr->rr_missingdata++;
1954 else
1955 rr->rr_missingparity++;
1956 rc->rc_error = SET_ERROR(ESTALE);
1957 rc->rc_skipped = 1;
1958 rc->rc_allow_repair = 1;
1959 continue;
1960 } else {
1961 rc->rc_allow_repair = 0;
1962 }
1963 } else {
1964 rc->rc_allow_repair = 1;
1965 }
1966
1967 /*
1968 * If this child is a distributed spare then the
1969 * offset might reside on the vdev being replaced.
1970 * In which case this data must be written to the
1971 * new device. Failure to do so would result in
1972 * checksum errors when the old device is detached
1973 * and the pool is scrubbed.
1974 */
1975 if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
1976 svd = vdev_draid_spare_get_child(svd,
1977 rc->rc_offset);
1978 if (svd && (svd->vdev_ops == &vdev_spare_ops ||
1979 svd->vdev_ops == &vdev_replacing_ops)) {
1980 rc->rc_force_repair = 1;
1981
1982 if (vdev_draid_rebuilding(svd))
1983 rc->rc_allow_repair = 1;
1984 }
1985 }
1986
1987 /*
1988 * Always issue a repair IO to this child when its
1989 * a spare or replacing vdev with an active rebuild.
1990 */
1991 if ((cvd->vdev_ops == &vdev_spare_ops ||
1992 cvd->vdev_ops == &vdev_replacing_ops) &&
1993 vdev_draid_rebuilding(cvd)) {
1994 rc->rc_force_repair = 1;
1995 rc->rc_allow_repair = 1;
1996 }
1997 }
1998 }
1999
2000 /*
2001 * Either a parity or data column is missing this means a repair
2002 * may be attempted by vdev_draid_io_done(). Expand the raid map
2003 * to read in empty columns which are needed along with the parity
2004 * during reconstruction.
2005 */
2006 if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) &&
2007 rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) {
2008 vdev_draid_map_alloc_empty(zio, rr);
2009 }
2010
2011 for (int c = rr->rr_cols - 1; c >= 0; c--) {
2012 raidz_col_t *rc = &rr->rr_col[c];
2013 vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
2014
2015 if (rc->rc_error || rc->rc_size == 0)
2016 continue;
2017
2018 if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
2019 (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
2020 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2021 rc->rc_offset, rc->rc_abd, rc->rc_size,
2022 zio->io_type, zio->io_priority, 0,
2023 vdev_raidz_child_done, rc));
2024 }
2025 }
2026 }
2027
2028 /*
2029 * Start an IO operation to a dRAID vdev.
2030 */
2031 static void
vdev_draid_io_start(zio_t * zio)2032 vdev_draid_io_start(zio_t *zio)
2033 {
2034 vdev_t *vd __maybe_unused = zio->io_vd;
2035
2036 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2037 ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset));
2038
2039 raidz_map_t *rm = vdev_draid_map_alloc(zio);
2040 zio->io_vsd = rm;
2041 zio->io_vsd_ops = &vdev_raidz_vsd_ops;
2042
2043 if (zio->io_type == ZIO_TYPE_WRITE) {
2044 for (int i = 0; i < rm->rm_nrows; i++) {
2045 vdev_draid_io_start_write(zio, rm->rm_row[i]);
2046 }
2047 } else {
2048 ASSERT(zio->io_type == ZIO_TYPE_READ);
2049
2050 for (int i = 0; i < rm->rm_nrows; i++) {
2051 vdev_draid_io_start_read(zio, rm->rm_row[i]);
2052 }
2053 }
2054
2055 zio_execute(zio);
2056 }
2057
2058 /*
2059 * Complete an IO operation on a dRAID vdev. The raidz logic can be applied
2060 * to dRAID since the layout is fully described by the raidz_map_t.
2061 */
2062 static void
vdev_draid_io_done(zio_t * zio)2063 vdev_draid_io_done(zio_t *zio)
2064 {
2065 vdev_raidz_io_done(zio);
2066 }
2067
2068 static void
vdev_draid_state_change(vdev_t * vd,int faulted,int degraded)2069 vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
2070 {
2071 vdev_draid_config_t *vdc = vd->vdev_tsd;
2072 ASSERT(vd->vdev_ops == &vdev_draid_ops);
2073
2074 if (faulted > vdc->vdc_nparity)
2075 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2076 VDEV_AUX_NO_REPLICAS);
2077 else if (degraded + faulted != 0)
2078 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
2079 else
2080 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
2081 }
2082
2083 static void
vdev_draid_xlate(vdev_t * cvd,const zfs_range_seg64_t * logical_rs,zfs_range_seg64_t * physical_rs,zfs_range_seg64_t * remain_rs)2084 vdev_draid_xlate(vdev_t *cvd, const zfs_range_seg64_t *logical_rs,
2085 zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
2086 {
2087 vdev_t *raidvd = cvd->vdev_parent;
2088 ASSERT(raidvd->vdev_ops == &vdev_draid_ops);
2089
2090 vdev_draid_config_t *vdc = raidvd->vdev_tsd;
2091 uint64_t ashift = raidvd->vdev_top->vdev_ashift;
2092
2093 /* Make sure the offsets are block-aligned */
2094 ASSERT0(logical_rs->rs_start % (1 << ashift));
2095 ASSERT0(logical_rs->rs_end % (1 << ashift));
2096
2097 uint64_t logical_start = logical_rs->rs_start;
2098 uint64_t logical_end = logical_rs->rs_end;
2099
2100 /*
2101 * Unaligned ranges must be skipped. All metaslabs are correctly
2102 * aligned so this should not happen, but this case is handled in
2103 * case it's needed by future callers.
2104 */
2105 uint64_t astart = vdev_draid_get_astart(raidvd, logical_start);
2106 if (astart != logical_start) {
2107 physical_rs->rs_start = logical_start;
2108 physical_rs->rs_end = logical_start;
2109 remain_rs->rs_start = MIN(astart, logical_end);
2110 remain_rs->rs_end = logical_end;
2111 return;
2112 }
2113
2114 /*
2115 * Unlike with mirrors and raidz a dRAID logical range can map
2116 * to multiple non-contiguous physical ranges. This is handled by
2117 * limiting the size of the logical range to a single group and
2118 * setting the remain argument such that it describes the remaining
2119 * unmapped logical range. This is stricter than absolutely
2120 * necessary but helps simplify the logic below.
2121 */
2122 uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start);
2123 uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1);
2124 if (logical_end > nextstart)
2125 logical_end = nextstart;
2126
2127 /* Find the starting offset for each vdev in the group */
2128 uint64_t perm, groupstart;
2129 uint64_t start = vdev_draid_logical_to_physical(raidvd,
2130 logical_start, &perm, &groupstart);
2131 uint64_t end = start;
2132
2133 uint8_t *base;
2134 uint64_t iter, id;
2135 vdev_draid_get_perm(vdc, perm, &base, &iter);
2136
2137 /*
2138 * Check if the passed child falls within the group. If it does
2139 * update the start and end to reflect the physical range.
2140 * Otherwise, leave them unmodified which will result in an empty
2141 * (zero-length) physical range being returned.
2142 */
2143 for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
2144 uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
2145
2146 if (c == 0 && i != 0) {
2147 /* the group wrapped, increment the start */
2148 start += VDEV_DRAID_ROWHEIGHT;
2149 end = start;
2150 }
2151
2152 id = vdev_draid_permute_id(vdc, base, iter, c);
2153 if (id == cvd->vdev_id) {
2154 uint64_t b_size = (logical_end >> ashift) -
2155 (logical_start >> ashift);
2156 ASSERT3U(b_size, >, 0);
2157 end = start + ((((b_size - 1) /
2158 vdc->vdc_groupwidth) + 1) << ashift);
2159 break;
2160 }
2161 }
2162 physical_rs->rs_start = start;
2163 physical_rs->rs_end = end;
2164
2165 /*
2166 * Only top-level vdevs are allowed to set remain_rs because
2167 * when .vdev_op_xlate() is called for their children the full
2168 * logical range is not provided by vdev_xlate().
2169 */
2170 remain_rs->rs_start = logical_end;
2171 remain_rs->rs_end = logical_rs->rs_end;
2172
2173 ASSERT3U(physical_rs->rs_start, <=, logical_start);
2174 ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
2175 logical_end - logical_start);
2176 }
2177
2178 /*
2179 * Add dRAID specific fields to the config nvlist.
2180 */
2181 static void
vdev_draid_config_generate(vdev_t * vd,nvlist_t * nv)2182 vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv)
2183 {
2184 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
2185 vdev_draid_config_t *vdc = vd->vdev_tsd;
2186
2187 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity);
2188 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata);
2189 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares);
2190 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups);
2191 }
2192
2193 /*
2194 * Initialize private dRAID specific fields from the nvlist.
2195 */
2196 static int
vdev_draid_init(spa_t * spa,nvlist_t * nv,void ** tsd)2197 vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
2198 {
2199 (void) spa;
2200 uint64_t ndata, nparity, nspares, ngroups;
2201 int error;
2202
2203 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata))
2204 return (SET_ERROR(EINVAL));
2205
2206 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) ||
2207 nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
2208 return (SET_ERROR(EINVAL));
2209 }
2210
2211 uint_t children;
2212 nvlist_t **child;
2213 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2214 &child, &children) != 0 || children == 0 ||
2215 children > VDEV_DRAID_MAX_CHILDREN) {
2216 return (SET_ERROR(EINVAL));
2217 }
2218
2219 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) ||
2220 nspares > 100 || nspares > (children - (ndata + nparity))) {
2221 return (SET_ERROR(EINVAL));
2222 }
2223
2224 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) ||
2225 ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) {
2226 return (SET_ERROR(EINVAL));
2227 }
2228
2229 /*
2230 * Validate the minimum number of children exist per group for the
2231 * specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4).
2232 */
2233 if (children < (ndata + nparity + nspares))
2234 return (SET_ERROR(EINVAL));
2235
2236 /*
2237 * Create the dRAID configuration using the pool nvlist configuration
2238 * and the fixed mapping for the correct number of children.
2239 */
2240 vdev_draid_config_t *vdc;
2241 const draid_map_t *map;
2242
2243 error = vdev_draid_lookup_map(children, &map);
2244 if (error)
2245 return (SET_ERROR(EINVAL));
2246
2247 vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP);
2248 vdc->vdc_ndata = ndata;
2249 vdc->vdc_nparity = nparity;
2250 vdc->vdc_nspares = nspares;
2251 vdc->vdc_children = children;
2252 vdc->vdc_ngroups = ngroups;
2253 vdc->vdc_nperms = map->dm_nperms;
2254
2255 error = vdev_draid_generate_perms(map, &vdc->vdc_perms);
2256 if (error) {
2257 kmem_free(vdc, sizeof (*vdc));
2258 return (SET_ERROR(EINVAL));
2259 }
2260
2261 /*
2262 * Derived constants.
2263 */
2264 vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity;
2265 vdc->vdc_ndisks = vdc->vdc_children - vdc->vdc_nspares;
2266 vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT;
2267 vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) /
2268 vdc->vdc_ndisks;
2269
2270 ASSERT3U(vdc->vdc_groupwidth, >=, 2);
2271 ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
2272 ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
2273 ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
2274 ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0);
2275 ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
2276 vdc->vdc_ndisks, ==, 0);
2277
2278 *tsd = vdc;
2279
2280 return (0);
2281 }
2282
2283 static void
vdev_draid_fini(vdev_t * vd)2284 vdev_draid_fini(vdev_t *vd)
2285 {
2286 vdev_draid_config_t *vdc = vd->vdev_tsd;
2287
2288 vmem_free(vdc->vdc_perms, sizeof (uint8_t) *
2289 vdc->vdc_children * vdc->vdc_nperms);
2290 kmem_free(vdc, sizeof (*vdc));
2291 }
2292
2293 static uint64_t
vdev_draid_nparity(vdev_t * vd)2294 vdev_draid_nparity(vdev_t *vd)
2295 {
2296 vdev_draid_config_t *vdc = vd->vdev_tsd;
2297
2298 return (vdc->vdc_nparity);
2299 }
2300
2301 static uint64_t
vdev_draid_ndisks(vdev_t * vd)2302 vdev_draid_ndisks(vdev_t *vd)
2303 {
2304 vdev_draid_config_t *vdc = vd->vdev_tsd;
2305
2306 return (vdc->vdc_ndisks);
2307 }
2308
2309 vdev_ops_t vdev_draid_ops = {
2310 .vdev_op_init = vdev_draid_init,
2311 .vdev_op_fini = vdev_draid_fini,
2312 .vdev_op_open = vdev_draid_open,
2313 .vdev_op_close = vdev_draid_close,
2314 .vdev_op_asize = vdev_draid_asize,
2315 .vdev_op_min_asize = vdev_draid_min_asize,
2316 .vdev_op_min_alloc = vdev_draid_min_alloc,
2317 .vdev_op_io_start = vdev_draid_io_start,
2318 .vdev_op_io_done = vdev_draid_io_done,
2319 .vdev_op_state_change = vdev_draid_state_change,
2320 .vdev_op_need_resilver = vdev_draid_need_resilver,
2321 .vdev_op_hold = NULL,
2322 .vdev_op_rele = NULL,
2323 .vdev_op_remap = NULL,
2324 .vdev_op_xlate = vdev_draid_xlate,
2325 .vdev_op_rebuild_asize = vdev_draid_rebuild_asize,
2326 .vdev_op_metaslab_init = vdev_draid_metaslab_init,
2327 .vdev_op_config_generate = vdev_draid_config_generate,
2328 .vdev_op_nparity = vdev_draid_nparity,
2329 .vdev_op_ndisks = vdev_draid_ndisks,
2330 .vdev_op_type = VDEV_TYPE_DRAID,
2331 .vdev_op_leaf = B_FALSE,
2332 };
2333
2334
2335 /*
2336 * A dRAID distributed spare is a virtual leaf vdev which is included in the
2337 * parent dRAID configuration. The last N columns of the dRAID permutation
2338 * table are used to determine on which dRAID children a specific offset
2339 * should be written. These spare leaf vdevs can only be used to replace
2340 * faulted children in the same dRAID configuration.
2341 */
2342
2343 /*
2344 * Distributed spare state. All fields are set when the distributed spare is
2345 * first opened and are immutable.
2346 */
2347 typedef struct {
2348 vdev_t *vds_draid_vdev; /* top-level parent dRAID vdev */
2349 uint64_t vds_top_guid; /* top-level parent dRAID guid */
2350 uint64_t vds_spare_id; /* spare id (0 - vdc->vdc_nspares-1) */
2351 } vdev_draid_spare_t;
2352
2353 /*
2354 * Returns the parent dRAID vdev to which the distributed spare belongs.
2355 * This may be safely called even when the vdev is not open.
2356 */
2357 vdev_t *
vdev_draid_spare_get_parent(vdev_t * vd)2358 vdev_draid_spare_get_parent(vdev_t *vd)
2359 {
2360 vdev_draid_spare_t *vds = vd->vdev_tsd;
2361
2362 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2363
2364 if (vds->vds_draid_vdev != NULL)
2365 return (vds->vds_draid_vdev);
2366
2367 return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev,
2368 vds->vds_top_guid));
2369 }
2370
2371 /*
2372 * A dRAID space is active when it's the child of a vdev using the
2373 * vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops.
2374 */
2375 static boolean_t
vdev_draid_spare_is_active(vdev_t * vd)2376 vdev_draid_spare_is_active(vdev_t *vd)
2377 {
2378 vdev_t *pvd = vd->vdev_parent;
2379
2380 if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops ||
2381 pvd->vdev_ops == &vdev_replacing_ops ||
2382 pvd->vdev_ops == &vdev_draid_ops)) {
2383 return (B_TRUE);
2384 } else {
2385 return (B_FALSE);
2386 }
2387 }
2388
2389 /*
2390 * Given a dRAID distribute spare vdev, returns the physical child vdev
2391 * on which the provided offset resides. This may involve recursing through
2392 * multiple layers of distributed spares. Note that offset is relative to
2393 * this vdev.
2394 */
2395 vdev_t *
vdev_draid_spare_get_child(vdev_t * vd,uint64_t physical_offset)2396 vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset)
2397 {
2398 vdev_draid_spare_t *vds = vd->vdev_tsd;
2399
2400 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2401
2402 /* The vdev is closed */
2403 if (vds->vds_draid_vdev == NULL)
2404 return (NULL);
2405
2406 vdev_t *tvd = vds->vds_draid_vdev;
2407 vdev_draid_config_t *vdc = tvd->vdev_tsd;
2408
2409 ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops);
2410 ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares);
2411
2412 uint8_t *base;
2413 uint64_t iter;
2414 uint64_t perm = physical_offset / vdc->vdc_devslicesz;
2415
2416 vdev_draid_get_perm(vdc, perm, &base, &iter);
2417
2418 uint64_t cid = vdev_draid_permute_id(vdc, base, iter,
2419 (tvd->vdev_children - 1) - vds->vds_spare_id);
2420 vdev_t *cvd = tvd->vdev_child[cid];
2421
2422 if (cvd->vdev_ops == &vdev_draid_spare_ops)
2423 return (vdev_draid_spare_get_child(cvd, physical_offset));
2424
2425 return (cvd);
2426 }
2427
2428 static void
vdev_draid_spare_close(vdev_t * vd)2429 vdev_draid_spare_close(vdev_t *vd)
2430 {
2431 vdev_draid_spare_t *vds = vd->vdev_tsd;
2432 vds->vds_draid_vdev = NULL;
2433 }
2434
2435 /*
2436 * Opening a dRAID spare device is done by looking up the associated dRAID
2437 * top-level vdev guid from the spare configuration.
2438 */
2439 static int
vdev_draid_spare_open(vdev_t * vd,uint64_t * psize,uint64_t * max_psize,uint64_t * logical_ashift,uint64_t * physical_ashift)2440 vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
2441 uint64_t *logical_ashift, uint64_t *physical_ashift)
2442 {
2443 vdev_draid_spare_t *vds = vd->vdev_tsd;
2444 vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2445 uint64_t asize, max_asize;
2446
2447 vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid);
2448 if (tvd == NULL) {
2449 /*
2450 * When spa_vdev_add() is labeling new spares the
2451 * associated dRAID is not attached to the root vdev
2452 * nor does this spare have a parent. Simulate a valid
2453 * device in order to allow the label to be initialized
2454 * and the distributed spare added to the configuration.
2455 */
2456 if (vd->vdev_parent == NULL) {
2457 *psize = *max_psize = SPA_MINDEVSIZE;
2458 *logical_ashift = *physical_ashift = ASHIFT_MIN;
2459 return (0);
2460 }
2461
2462 return (SET_ERROR(EINVAL));
2463 }
2464
2465 vdev_draid_config_t *vdc = tvd->vdev_tsd;
2466 if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL)
2467 return (SET_ERROR(EINVAL));
2468
2469 if (vds->vds_spare_id >= vdc->vdc_nspares)
2470 return (SET_ERROR(EINVAL));
2471
2472 /*
2473 * Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here
2474 * because the caller may be vdev_draid_open() in which case the
2475 * values are stale as they haven't yet been updated by vdev_open().
2476 * To avoid this always recalculate the dRAID asize and max_asize.
2477 */
2478 vdev_draid_calculate_asize(tvd, &asize, &max_asize,
2479 logical_ashift, physical_ashift);
2480
2481 *psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2482 *max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2483
2484 vds->vds_draid_vdev = tvd;
2485
2486 return (0);
2487 }
2488
2489 /*
2490 * Completed distributed spare IO. Store the result in the parent zio
2491 * as if it had performed the operation itself. Only the first error is
2492 * preserved if there are multiple errors.
2493 */
2494 static void
vdev_draid_spare_child_done(zio_t * zio)2495 vdev_draid_spare_child_done(zio_t *zio)
2496 {
2497 zio_t *pio = zio->io_private;
2498
2499 /*
2500 * IOs are issued to non-writable vdevs in order to keep their
2501 * DTLs accurate. However, we don't want to propagate the
2502 * error in to the distributed spare's DTL. When resilvering
2503 * vdev_draid_need_resilver() will consult the relevant DTL
2504 * to determine if the data is missing and must be repaired.
2505 */
2506 if (!vdev_writeable(zio->io_vd))
2507 return;
2508
2509 if (pio->io_error == 0)
2510 pio->io_error = zio->io_error;
2511 }
2512
2513 /*
2514 * Returns a valid label nvlist for the distributed spare vdev. This is
2515 * used to bypass the IO pipeline to avoid the complexity of constructing
2516 * a complete label with valid checksum to return when read.
2517 */
2518 nvlist_t *
vdev_draid_read_config_spare(vdev_t * vd)2519 vdev_draid_read_config_spare(vdev_t *vd)
2520 {
2521 spa_t *spa = vd->vdev_spa;
2522 spa_aux_vdev_t *sav = &spa->spa_spares;
2523 uint64_t guid = vd->vdev_guid;
2524
2525 nvlist_t *nv = fnvlist_alloc();
2526 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
2527 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
2528 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa));
2529 fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa));
2530 fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
2531 fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
2532 fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid);
2533 fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE,
2534 vdev_draid_spare_is_active(vd) ?
2535 POOL_STATE_ACTIVE : POOL_STATE_SPARE);
2536
2537 /* Set the vdev guid based on the vdev list in sav_count. */
2538 for (int i = 0; i < sav->sav_count; i++) {
2539 if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops &&
2540 strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) {
2541 guid = sav->sav_vdevs[i]->vdev_guid;
2542 break;
2543 }
2544 }
2545
2546 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid);
2547
2548 return (nv);
2549 }
2550
2551 /*
2552 * Handle any flush requested of the distributed spare. All children must be
2553 * flushed.
2554 */
2555 static int
vdev_draid_spare_flush(zio_t * zio)2556 vdev_draid_spare_flush(zio_t *zio)
2557 {
2558 vdev_t *vd = zio->io_vd;
2559 int error = 0;
2560
2561 for (int c = 0; c < vd->vdev_children; c++) {
2562 zio_nowait(zio_vdev_child_io(zio, NULL,
2563 vd->vdev_child[c], zio->io_offset, zio->io_abd,
2564 zio->io_size, zio->io_type, zio->io_priority, 0,
2565 vdev_draid_spare_child_done, zio));
2566 }
2567
2568 return (error);
2569 }
2570
2571 /*
2572 * Initiate an IO to the distributed spare. For normal IOs this entails using
2573 * the zio->io_offset and permutation table to calculate which child dRAID vdev
2574 * is responsible for the data. Then passing along the zio to that child to
2575 * perform the actual IO. The label ranges are not stored on disk and require
2576 * some special handling which is described below.
2577 */
2578 static void
vdev_draid_spare_io_start(zio_t * zio)2579 vdev_draid_spare_io_start(zio_t *zio)
2580 {
2581 vdev_t *cvd = NULL, *vd = zio->io_vd;
2582 vdev_draid_spare_t *vds = vd->vdev_tsd;
2583 uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE;
2584
2585 /*
2586 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
2587 * Nothing to be done here but return failure.
2588 */
2589 if (vds == NULL) {
2590 zio->io_error = ENXIO;
2591 zio_interrupt(zio);
2592 return;
2593 }
2594
2595 switch (zio->io_type) {
2596 case ZIO_TYPE_FLUSH:
2597 zio->io_error = vdev_draid_spare_flush(zio);
2598 break;
2599
2600 case ZIO_TYPE_WRITE:
2601 if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2602 /*
2603 * Accept probe IOs and config writers to simulate the
2604 * existence of an on disk label. vdev_label_sync(),
2605 * vdev_uberblock_sync() and vdev_copy_uberblocks()
2606 * skip the distributed spares. This only leaves
2607 * vdev_label_init() which is allowed to succeed to
2608 * avoid adding special cases the function.
2609 */
2610 if (zio->io_flags & ZIO_FLAG_PROBE ||
2611 zio->io_flags & ZIO_FLAG_CONFIG_WRITER) {
2612 zio->io_error = 0;
2613 } else {
2614 zio->io_error = SET_ERROR(EIO);
2615 }
2616 } else {
2617 cvd = vdev_draid_spare_get_child(vd, offset);
2618
2619 if (cvd == NULL) {
2620 zio->io_error = SET_ERROR(ENXIO);
2621 } else {
2622 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2623 offset, zio->io_abd, zio->io_size,
2624 zio->io_type, zio->io_priority, 0,
2625 vdev_draid_spare_child_done, zio));
2626 }
2627 }
2628 break;
2629
2630 case ZIO_TYPE_READ:
2631 if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
2632 /*
2633 * Accept probe IOs to simulate the existence of a
2634 * label. vdev_label_read_config() bypasses the
2635 * pipeline to read the label configuration and
2636 * vdev_uberblock_load() skips distributed spares
2637 * when attempting to locate the best uberblock.
2638 */
2639 if (zio->io_flags & ZIO_FLAG_PROBE) {
2640 zio->io_error = 0;
2641 } else {
2642 zio->io_error = SET_ERROR(EIO);
2643 }
2644 } else {
2645 cvd = vdev_draid_spare_get_child(vd, offset);
2646
2647 if (cvd == NULL || !vdev_readable(cvd)) {
2648 zio->io_error = SET_ERROR(ENXIO);
2649 } else {
2650 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2651 offset, zio->io_abd, zio->io_size,
2652 zio->io_type, zio->io_priority, 0,
2653 vdev_draid_spare_child_done, zio));
2654 }
2655 }
2656 break;
2657
2658 case ZIO_TYPE_TRIM:
2659 /* The vdev label ranges are never trimmed */
2660 ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset));
2661
2662 cvd = vdev_draid_spare_get_child(vd, offset);
2663
2664 if (cvd == NULL || !cvd->vdev_has_trim) {
2665 zio->io_error = SET_ERROR(ENXIO);
2666 } else {
2667 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2668 offset, zio->io_abd, zio->io_size,
2669 zio->io_type, zio->io_priority, 0,
2670 vdev_draid_spare_child_done, zio));
2671 }
2672 break;
2673
2674 default:
2675 zio->io_error = SET_ERROR(ENOTSUP);
2676 break;
2677 }
2678
2679 zio_execute(zio);
2680 }
2681
2682 static void
vdev_draid_spare_io_done(zio_t * zio)2683 vdev_draid_spare_io_done(zio_t *zio)
2684 {
2685 (void) zio;
2686 }
2687
2688 /*
2689 * Lookup the full spare config in spa->spa_spares.sav_config and
2690 * return the top_guid and spare_id for the named spare.
2691 */
2692 static int
vdev_draid_spare_lookup(spa_t * spa,nvlist_t * nv,uint64_t * top_guidp,uint64_t * spare_idp)2693 vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp,
2694 uint64_t *spare_idp)
2695 {
2696 nvlist_t **spares;
2697 uint_t nspares;
2698 int error;
2699
2700 if ((spa->spa_spares.sav_config == NULL) ||
2701 (nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2702 ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) {
2703 return (SET_ERROR(ENOENT));
2704 }
2705
2706 const char *spare_name;
2707 error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name);
2708 if (error != 0)
2709 return (SET_ERROR(EINVAL));
2710
2711 for (int i = 0; i < nspares; i++) {
2712 nvlist_t *spare = spares[i];
2713 uint64_t top_guid, spare_id;
2714 const char *type, *path;
2715
2716 /* Skip non-distributed spares */
2717 error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type);
2718 if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0)
2719 continue;
2720
2721 /* Skip spares with the wrong name */
2722 error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path);
2723 if (error != 0 || strcmp(path, spare_name) != 0)
2724 continue;
2725
2726 /* Found the matching spare */
2727 error = nvlist_lookup_uint64(spare,
2728 ZPOOL_CONFIG_TOP_GUID, &top_guid);
2729 if (error == 0) {
2730 error = nvlist_lookup_uint64(spare,
2731 ZPOOL_CONFIG_SPARE_ID, &spare_id);
2732 }
2733
2734 if (error != 0) {
2735 return (SET_ERROR(EINVAL));
2736 } else {
2737 *top_guidp = top_guid;
2738 *spare_idp = spare_id;
2739 return (0);
2740 }
2741 }
2742
2743 return (SET_ERROR(ENOENT));
2744 }
2745
2746 /*
2747 * Initialize private dRAID spare specific fields from the nvlist.
2748 */
2749 static int
vdev_draid_spare_init(spa_t * spa,nvlist_t * nv,void ** tsd)2750 vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd)
2751 {
2752 vdev_draid_spare_t *vds;
2753 uint64_t top_guid = 0;
2754 uint64_t spare_id;
2755
2756 /*
2757 * In the normal case check the list of spares stored in the spa
2758 * to lookup the top_guid and spare_id for provided spare config.
2759 * When creating a new pool or adding vdevs the spare list is not
2760 * yet populated and the values are provided in the passed config.
2761 */
2762 if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) {
2763 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID,
2764 &top_guid) != 0)
2765 return (SET_ERROR(EINVAL));
2766
2767 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID,
2768 &spare_id) != 0)
2769 return (SET_ERROR(EINVAL));
2770 }
2771
2772 vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP);
2773 vds->vds_draid_vdev = NULL;
2774 vds->vds_top_guid = top_guid;
2775 vds->vds_spare_id = spare_id;
2776
2777 *tsd = vds;
2778
2779 return (0);
2780 }
2781
2782 static void
vdev_draid_spare_fini(vdev_t * vd)2783 vdev_draid_spare_fini(vdev_t *vd)
2784 {
2785 kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t));
2786 }
2787
2788 static void
vdev_draid_spare_config_generate(vdev_t * vd,nvlist_t * nv)2789 vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv)
2790 {
2791 vdev_draid_spare_t *vds = vd->vdev_tsd;
2792
2793 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
2794
2795 fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid);
2796 fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id);
2797 }
2798
2799 vdev_ops_t vdev_draid_spare_ops = {
2800 .vdev_op_init = vdev_draid_spare_init,
2801 .vdev_op_fini = vdev_draid_spare_fini,
2802 .vdev_op_open = vdev_draid_spare_open,
2803 .vdev_op_close = vdev_draid_spare_close,
2804 .vdev_op_asize = vdev_default_asize,
2805 .vdev_op_min_asize = vdev_default_min_asize,
2806 .vdev_op_min_alloc = NULL,
2807 .vdev_op_io_start = vdev_draid_spare_io_start,
2808 .vdev_op_io_done = vdev_draid_spare_io_done,
2809 .vdev_op_state_change = NULL,
2810 .vdev_op_need_resilver = NULL,
2811 .vdev_op_hold = NULL,
2812 .vdev_op_rele = NULL,
2813 .vdev_op_remap = NULL,
2814 .vdev_op_xlate = vdev_default_xlate,
2815 .vdev_op_rebuild_asize = NULL,
2816 .vdev_op_metaslab_init = NULL,
2817 .vdev_op_config_generate = vdev_draid_spare_config_generate,
2818 .vdev_op_nparity = NULL,
2819 .vdev_op_ndisks = NULL,
2820 .vdev_op_type = VDEV_TYPE_DRAID_SPARE,
2821 .vdev_op_leaf = B_TRUE,
2822 };
2823