1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2018 Intel Corporation. 23 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa.h> 28 #include <sys/spa_impl.h> 29 #include <sys/vdev_impl.h> 30 #include <sys/vdev_draid.h> 31 #include <sys/vdev_raidz.h> 32 #include <sys/vdev_rebuild.h> 33 #include <sys/abd.h> 34 #include <sys/zio.h> 35 #include <sys/nvpair.h> 36 #include <sys/zio_checksum.h> 37 #include <sys/fs/zfs.h> 38 #include <sys/fm/fs/zfs.h> 39 #include <zfs_fletcher.h> 40 41 #ifdef ZFS_DEBUG 42 #include <sys/vdev.h> /* For vdev_xlate() in vdev_draid_io_verify() */ 43 #endif 44 45 /* 46 * dRAID is a distributed spare implementation for ZFS. A dRAID vdev is 47 * comprised of multiple raidz redundancy groups which are spread over the 48 * dRAID children. To ensure an even distribution, and avoid hot spots, a 49 * permutation mapping is applied to the order of the dRAID children. 50 * This mixing effectively distributes the parity columns evenly over all 51 * of the disks in the dRAID. 52 * 53 * This is beneficial because it means when resilvering all of the disks 54 * can participate thereby increasing the available IOPs and bandwidth. 55 * Furthermore, by reserving a small fraction of each child's total capacity 56 * virtual distributed spare disks can be created. These spares similarly 57 * benefit from the performance gains of spanning all of the children. The 58 * consequence of which is that resilvering to a distributed spare can 59 * substantially reduce the time required to restore full parity to pool 60 * with a failed disks. 61 * 62 * === dRAID group layout === 63 * 64 * First, let's define a "row" in the configuration to be a 16M chunk from 65 * each physical drive at the same offset. This is the minimum allowable 66 * size since it must be possible to store a full 16M block when there is 67 * only a single data column. Next, we define a "group" to be a set of 68 * sequential disks containing both the parity and data columns. We allow 69 * groups to span multiple rows in order to align any group size to any 70 * number of physical drives. Finally, a "slice" is comprised of the rows 71 * which contain the target number of groups. The permutation mappings 72 * are applied in a round robin fashion to each slice. 73 * 74 * Given D+P drives in a group (including parity drives) and C-S physical 75 * drives (not including the spare drives), we can distribute the groups 76 * across R rows without remainder by selecting the least common multiple 77 * of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S). 78 * 79 * In the example below, there are C=14 physical drives in the configuration 80 * with S=2 drives worth of spare capacity. Each group has a width of 9 81 * which includes D=8 data and P=1 parity drive. There are 4 groups and 82 * 3 rows per slice. Each group has a size of 144M (16M * 9) and a slice 83 * size is 576M (144M * 4). When allocating from a dRAID each group is 84 * filled before moving on to the next as show in slice0 below. 85 * 86 * data disks (8 data + 1 parity) spares (2) 87 * +===+===+===+===+===+===+===+===+===+===+===+===+===+===+ 88 * ^ | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0 89 * | +===+===+===+===+===+===+===+===+===+===+===+===+===+===+ 90 * | | group 0 | group 1..| | 91 * | +-----------------------------------+-----------+-------| 92 * | | 0 1 2 3 4 5 6 7 8 | 36 37 38| | r 93 * | | 9 10 11 12 13 14 15 16 17| 45 46 47| | o 94 * | | 18 19 20 21 22 23 24 25 26| 54 55 56| | w 95 * | 27 28 29 30 31 32 33 34 35| 63 64 65| | 0 96 * s +-----------------------+-----------------------+-------+ 97 * l | ..group 1 | group 2.. | | 98 * i +-----------------------+-----------------------+-------+ 99 * c | 39 40 41 42 43 44| 72 73 74 75 76 77| | r 100 * e | 48 49 50 51 52 53| 81 82 83 84 85 86| | o 101 * 0 | 57 58 59 60 61 62| 90 91 92 93 94 95| | w 102 * | 66 67 68 69 70 71| 99 100 101 102 103 104| | 1 103 * | +-----------+-----------+-----------------------+-------+ 104 * | |..group 2 | group 3 | | 105 * | +-----------+-----------+-----------------------+-------+ 106 * | | 78 79 80|108 109 110 111 112 113 114 115 116| | r 107 * | | 87 88 89|117 118 119 120 121 122 123 124 125| | o 108 * | | 96 97 98|126 127 128 129 130 131 132 133 134| | w 109 * v |105 106 107|135 136 137 138 139 140 141 142 143| | 2 110 * +===+===+===+===+===+===+===+===+===+===+===+===+===+===+ 111 * | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1 112 * s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+ 113 * l | group 4 | group 5..| | row 3 114 * i +-----------------------+-----------+-----------+-------| 115 * c | ..group 5 | group 6.. | | row 4 116 * e +-----------+-----------+-----------------------+-------+ 117 * 1 |..group 6 | group 7 | | row 5 118 * +===+===+===+===+===+===+===+===+===+===+===+===+===+===+ 119 * | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2 120 * s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+ 121 * l | group 8 | group 9..| | row 6 122 * i +-----------------------------------------------+-------| 123 * c | ..group 9 | group 10.. | | row 7 124 * e +-----------------------+-----------------------+-------+ 125 * 2 |..group 10 | group 11 | | row 8 126 * +-----------+-----------------------------------+-------+ 127 * 128 * This layout has several advantages over requiring that each row contain 129 * a whole number of groups. 130 * 131 * 1. The group count is not a relevant parameter when defining a dRAID 132 * layout. Only the group width is needed, and *all* groups will have 133 * the desired size. 134 * 135 * 2. All possible group widths (<= physical disk count) can be supported. 136 * 137 * 3. The logic within vdev_draid.c is simplified when the group width is 138 * the same for all groups (although some of the logic around computing 139 * permutation numbers and drive offsets is more complicated). 140 * 141 * N.B. The following array describes all valid dRAID permutation maps. 142 * Each row is used to generate a permutation map for a different number 143 * of children from a unique seed. The seeds were generated and carefully 144 * evaluated by the 'draid' utility in order to provide balanced mappings. 145 * In addition to the seed a checksum of the in-memory mapping is stored 146 * for verification. 147 * 148 * The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed, 149 * with a given permutation map) is the ratio of the amounts of I/O that will 150 * be sent to the least and most busy disks when resilvering. The average 151 * imbalance ratio (of a given number of disks and permutation map) is the 152 * average of the ratios of all possible single and double disk failures. 153 * 154 * In order to achieve a low imbalance ratio the number of permutations in 155 * the mapping must be significantly larger than the number of children. 156 * For dRAID the number of permutations has been limited to 512 to minimize 157 * the map size. This does result in a gradually increasing imbalance ratio 158 * as seen in the table below. Increasing the number of permutations for 159 * larger child counts would reduce the imbalance ratio. However, in practice 160 * when there are a large number of children each child is responsible for 161 * fewer total IOs so it's less of a concern. 162 * 163 * Note these values are hard coded and must never be changed. Existing 164 * pools depend on the same mapping always being generated in order to 165 * read and write from the correct locations. Any change would make 166 * existing pools completely inaccessible. 167 */ 168 static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = { 169 { 2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d }, /* 1.000 */ 170 { 3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 }, /* 1.000 */ 171 { 4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 }, /* 1.000 */ 172 { 5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 }, /* 1.010 */ 173 { 6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 }, /* 1.031 */ 174 { 7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee }, /* 1.043 */ 175 { 8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 }, /* 1.059 */ 176 { 9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 }, /* 1.056 */ 177 { 10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 }, /* 1.072 */ 178 { 11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c }, /* 1.083 */ 179 { 12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e }, /* 1.097 */ 180 { 13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 }, /* 1.100 */ 181 { 14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 }, /* 1.121 */ 182 { 15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 }, /* 1.103 */ 183 { 16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 }, /* 1.111 */ 184 { 17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe }, /* 1.133 */ 185 { 18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 }, /* 1.131 */ 186 { 19, 256, 0x892e343f2f31d690, 0x00000029eb392835 }, /* 1.130 */ 187 { 20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c }, /* 1.141 */ 188 { 21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 }, /* 1.139 */ 189 { 22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 }, /* 1.150 */ 190 { 23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f }, /* 1.174 */ 191 { 24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 }, /* 1.168 */ 192 { 25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 }, /* 1.180 */ 193 { 26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba }, /* 1.226 */ 194 { 27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 }, /* 1.228 */ 195 { 28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c }, /* 1.217 */ 196 { 29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c }, /* 1.239 */ 197 { 30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 }, /* 1.238 */ 198 { 31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f }, /* 1.273 */ 199 { 32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 }, /* 1.191 */ 200 { 33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 }, /* 1.199 */ 201 { 34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 }, /* 1.195 */ 202 { 35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 }, /* 1.201 */ 203 { 36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef }, /* 1.194 */ 204 { 37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 }, /* 1.237 */ 205 { 38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 }, /* 1.242 */ 206 { 39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd }, /* 1.231 */ 207 { 40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 }, /* 1.233 */ 208 { 41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 }, /* 1.271 */ 209 { 42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 }, /* 1.263 */ 210 { 43, 512, 0xbaa5125faa781854, 0x000001c76789e278 }, /* 1.270 */ 211 { 44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb }, /* 1.281 */ 212 { 45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 }, /* 1.282 */ 213 { 46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b }, /* 1.286 */ 214 { 47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 }, /* 1.329 */ 215 { 48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b }, /* 1.286 */ 216 { 49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 }, /* 1.322 */ 217 { 50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 }, /* 1.335 */ 218 { 51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 }, /* 1.305 */ 219 { 52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf }, /* 1.330 */ 220 { 53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 }, /* 1.365 */ 221 { 54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 }, /* 1.334 */ 222 { 55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 }, /* 1.364 */ 223 { 56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e }, /* 1.374 */ 224 { 57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 }, /* 1.363 */ 225 { 58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 }, /* 1.401 */ 226 { 59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c }, /* 1.392 */ 227 { 60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 }, /* 1.360 */ 228 { 61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd }, /* 1.396 */ 229 { 62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c }, /* 1.453 */ 230 { 63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 }, /* 1.437 */ 231 { 64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 }, /* 1.402 */ 232 { 65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 }, /* 1.459 */ 233 { 66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 }, /* 1.423 */ 234 { 67, 512, 0x910b9714f698a877, 0x00000451ea65d5db }, /* 1.447 */ 235 { 68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 }, /* 1.450 */ 236 { 69, 512, 0x836d4968fbaa3706, 0x000004954068a380 }, /* 1.455 */ 237 { 70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d }, /* 1.463 */ 238 { 71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 }, /* 1.463 */ 239 { 72, 512, 0x42763a680d5bed8e, 0x000005084275c680 }, /* 1.452 */ 240 { 73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab }, /* 1.498 */ 241 { 74, 512, 0x9fa08548b1621a44, 0x0000054708019247 }, /* 1.526 */ 242 { 75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 }, /* 1.491 */ 243 { 76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 }, /* 1.470 */ 244 { 77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 }, /* 1.527 */ 245 { 78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 }, /* 1.509 */ 246 { 79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e }, /* 1.569 */ 247 { 80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c }, /* 1.555 */ 248 { 81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 }, /* 1.509 */ 249 { 82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 }, /* 1.596 */ 250 { 83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e }, /* 1.568 */ 251 { 84, 512, 0xba02545069ddc6dc, 0x000006d19861364f }, /* 1.541 */ 252 { 85, 512, 0x447c73192c35073e, 0x000006fce315ce35 }, /* 1.623 */ 253 { 86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b }, /* 1.620 */ 254 { 87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 }, /* 1.597 */ 255 { 88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b }, /* 1.575 */ 256 { 89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc }, /* 1.627 */ 257 { 90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb }, /* 1.596 */ 258 { 91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 }, /* 1.622 */ 259 { 92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e }, /* 1.695 */ 260 { 93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c }, /* 1.605 */ 261 { 94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc }, /* 1.625 */ 262 { 95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 }, /* 1.687 */ 263 { 96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a }, /* 1.621 */ 264 { 97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 }, /* 1.699 */ 265 { 98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b }, /* 1.688 */ 266 { 99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce }, /* 1.642 */ 267 { 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc }, /* 1.683 */ 268 { 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 }, /* 1.755 */ 269 { 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 }, /* 1.692 */ 270 { 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 }, /* 1.747 */ 271 { 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 }, /* 1.751 */ 272 { 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 }, /* 1.751 */ 273 { 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f }, /* 1.726 */ 274 { 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d }, /* 1.788 */ 275 { 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 }, /* 1.740 */ 276 { 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 }, /* 1.780 */ 277 { 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 }, /* 1.836 */ 278 { 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 }, /* 1.778 */ 279 { 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 }, /* 1.831 */ 280 { 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df }, /* 1.825 */ 281 { 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 }, /* 1.826 */ 282 { 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 }, /* 1.843 */ 283 { 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d }, /* 1.826 */ 284 { 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b }, /* 1.803 */ 285 { 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 }, /* 1.857 */ 286 { 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 }, /* 1.877 */ 287 { 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 }, /* 1.849 */ 288 { 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d }, /* 1.867 */ 289 { 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 }, /* 1.978 */ 290 { 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d }, /* 1.947 */ 291 { 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea }, /* 1.865 */ 292 { 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f }, /* 1.881 */ 293 { 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b }, /* 1.882 */ 294 { 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e }, /* 1.867 */ 295 { 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e }, /* 1.972 */ 296 { 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 }, /* 1.896 */ 297 { 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d }, /* 1.965 */ 298 { 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 }, /* 1.963 */ 299 { 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 }, /* 1.925 */ 300 { 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 }, /* 1.862 */ 301 { 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 }, /* 2.042 */ 302 { 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 }, /* 1.935 */ 303 { 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 }, /* 2.005 */ 304 { 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c }, /* 2.041 */ 305 { 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 }, /* 1.997 */ 306 { 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 }, /* 1.996 */ 307 { 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d }, /* 2.053 */ 308 { 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a }, /* 1.971 */ 309 { 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 }, /* 2.018 */ 310 { 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd }, /* 1.961 */ 311 { 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 }, /* 2.046 */ 312 { 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb }, /* 1.968 */ 313 { 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 }, /* 2.143 */ 314 { 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 }, /* 2.064 */ 315 { 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 }, /* 2.023 */ 316 { 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c }, /* 2.136 */ 317 { 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 }, /* 2.063 */ 318 { 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 }, /* 1.974 */ 319 { 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 }, /* 2.210 */ 320 { 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a }, /* 2.006 */ 321 { 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 }, /* 2.193 */ 322 { 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 }, /* 2.163 */ 323 { 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc }, /* 2.046 */ 324 { 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 }, /* 2.084 */ 325 { 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 }, /* 2.264 */ 326 { 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 }, /* 2.074 */ 327 { 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 }, /* 2.282 */ 328 { 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf }, /* 2.148 */ 329 { 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 }, /* 2.355 */ 330 { 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 }, /* 2.164 */ 331 { 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a }, /* 2.393 */ 332 { 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 }, /* 2.178 */ 333 { 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc }, /* 2.334 */ 334 { 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b }, /* 2.266 */ 335 { 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 }, /* 2.304 */ 336 { 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d }, /* 2.218 */ 337 { 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff }, /* 2.377 */ 338 { 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 }, /* 2.155 */ 339 { 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 }, /* 2.404 */ 340 { 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 }, /* 2.205 */ 341 { 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d }, /* 2.359 */ 342 { 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 }, /* 2.158 */ 343 { 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b }, /* 2.614 */ 344 { 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc }, /* 2.239 */ 345 { 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc }, /* 2.493 */ 346 { 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c }, /* 2.327 */ 347 { 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 }, /* 2.231 */ 348 { 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c }, /* 2.237 */ 349 { 182, 512, 0xe6035defea48f933, 0x00002038e3346658 }, /* 2.691 */ 350 { 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e }, /* 2.170 */ 351 { 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 }, /* 2.600 */ 352 { 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc }, /* 2.391 */ 353 { 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 }, /* 2.677 */ 354 { 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c }, /* 2.410 */ 355 { 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 }, /* 2.776 */ 356 { 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 }, /* 2.266 */ 357 { 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 }, /* 2.717 */ 358 { 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c }, /* 2.474 */ 359 { 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 }, /* 2.673 */ 360 { 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 }, /* 2.420 */ 361 { 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 }, /* 2.898 */ 362 { 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c }, /* 2.363 */ 363 { 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e }, /* 2.747 */ 364 { 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 }, /* 2.531 */ 365 { 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 }, /* 2.707 */ 366 { 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 }, /* 2.315 */ 367 { 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf }, /* 3.012 */ 368 { 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 }, /* 2.378 */ 369 { 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 }, /* 2.969 */ 370 { 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d }, /* 2.594 */ 371 { 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd }, /* 2.763 */ 372 { 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 }, /* 2.457 */ 373 { 206, 512, 0xc02fc96684715a16, 0x0000297515608601 }, /* 3.057 */ 374 { 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 }, /* 2.590 */ 375 { 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b }, /* 3.047 */ 376 { 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 }, /* 2.676 */ 377 { 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 }, /* 2.993 */ 378 { 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 }, /* 2.457 */ 379 { 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 }, /* 3.182 */ 380 { 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 }, /* 2.563 */ 381 { 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 }, /* 3.025 */ 382 { 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f }, /* 2.730 */ 383 { 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 }, /* 3.036 */ 384 { 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 }, /* 2.722 */ 385 { 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 }, /* 3.356 */ 386 { 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 }, /* 2.697 */ 387 { 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 }, /* 2.979 */ 388 { 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 }, /* 2.858 */ 389 { 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e }, /* 3.258 */ 390 { 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 }, /* 2.693 */ 391 { 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 }, /* 3.259 */ 392 { 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c }, /* 2.733 */ 393 { 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 }, /* 3.235 */ 394 { 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 }, /* 2.983 */ 395 { 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e }, /* 3.308 */ 396 { 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 }, /* 2.715 */ 397 { 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f }, /* 3.540 */ 398 { 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 }, /* 2.779 */ 399 { 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c }, /* 3.084 */ 400 { 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc }, /* 2.987 */ 401 { 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae }, /* 3.341 */ 402 { 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 }, /* 2.793 */ 403 { 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 }, /* 3.518 */ 404 { 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 }, /* 2.962 */ 405 { 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 }, /* 3.196 */ 406 { 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 }, /* 2.914 */ 407 { 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 }, /* 3.408 */ 408 { 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 }, /* 2.903 */ 409 { 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 }, /* 3.778 */ 410 { 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c }, /* 3.026 */ 411 { 244, 512, 0xc740263f0301efa8, 0x00003a147146512d }, /* 3.347 */ 412 { 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d }, /* 3.212 */ 413 { 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 }, /* 3.482 */ 414 { 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 }, /* 3.146 */ 415 { 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f }, /* 3.626 */ 416 { 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 }, /* 2.952 */ 417 { 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e }, /* 3.463 */ 418 { 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 }, /* 3.131 */ 419 { 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c }, /* 3.538 */ 420 { 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac }, /* 2.974 */ 421 { 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 }, /* 3.843 */ 422 { 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 }, /* 3.088 */ 423 }; 424 425 /* 426 * Verify the map is valid. Each device index must appear exactly 427 * once in every row, and the permutation array checksum must match. 428 */ 429 static int 430 verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms, 431 uint64_t checksum) 432 { 433 int countssz = sizeof (uint16_t) * children; 434 uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP); 435 436 for (int i = 0; i < nperms; i++) { 437 for (int j = 0; j < children; j++) { 438 uint8_t val = perms[(i * children) + j]; 439 440 if (val >= children || counts[val] != i) { 441 kmem_free(counts, countssz); 442 return (EINVAL); 443 } 444 445 counts[val]++; 446 } 447 } 448 449 if (checksum != 0) { 450 int permssz = sizeof (uint8_t) * children * nperms; 451 zio_cksum_t cksum; 452 453 fletcher_4_native_varsize(perms, permssz, &cksum); 454 455 if (checksum != cksum.zc_word[0]) { 456 kmem_free(counts, countssz); 457 return (ECKSUM); 458 } 459 } 460 461 kmem_free(counts, countssz); 462 463 return (0); 464 } 465 466 /* 467 * Generate the permutation array for the draid_map_t. These maps control 468 * the placement of all data in a dRAID. Therefore it's critical that the 469 * seed always generates the same mapping. We provide our own pseudo-random 470 * number generator for this purpose. 471 */ 472 int 473 vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp) 474 { 475 VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN); 476 VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN); 477 VERIFY3U(map->dm_seed, !=, 0); 478 VERIFY3U(map->dm_nperms, !=, 0); 479 VERIFY3P(map->dm_perms, ==, NULL); 480 481 #ifdef _KERNEL 482 /* 483 * The kernel code always provides both a map_seed and checksum. 484 * Only the tests/zfs-tests/cmd/draid/draid.c utility will provide 485 * a zero checksum when generating new candidate maps. 486 */ 487 VERIFY3U(map->dm_checksum, !=, 0); 488 #endif 489 uint64_t children = map->dm_children; 490 uint64_t nperms = map->dm_nperms; 491 int rowsz = sizeof (uint8_t) * children; 492 int permssz = rowsz * nperms; 493 uint8_t *perms; 494 495 /* Allocate the permutation array */ 496 perms = vmem_alloc(permssz, KM_SLEEP); 497 498 /* Setup an initial row with a known pattern */ 499 uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP); 500 for (int i = 0; i < children; i++) 501 initial_row[i] = i; 502 503 uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed }; 504 uint8_t *current_row, *previous_row = initial_row; 505 506 /* 507 * Perform a Fisher-Yates shuffle of each row using the previous 508 * row as the starting point. An initial_row with known pattern 509 * is used as the input for the first row. 510 */ 511 for (int i = 0; i < nperms; i++) { 512 current_row = &perms[i * children]; 513 memcpy(current_row, previous_row, rowsz); 514 515 for (int j = children - 1; j > 0; j--) { 516 uint64_t k = vdev_draid_rand(draid_seed) % (j + 1); 517 uint8_t val = current_row[j]; 518 current_row[j] = current_row[k]; 519 current_row[k] = val; 520 } 521 522 previous_row = current_row; 523 } 524 525 kmem_free(initial_row, rowsz); 526 527 int error = verify_perms(perms, children, nperms, map->dm_checksum); 528 if (error) { 529 vmem_free(perms, permssz); 530 return (error); 531 } 532 533 *permsp = perms; 534 535 return (0); 536 } 537 538 /* 539 * Lookup the fixed draid_map_t for the requested number of children. 540 */ 541 int 542 vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp) 543 { 544 for (int i = 0; i <= VDEV_DRAID_MAX_MAPS; i++) { 545 if (draid_maps[i].dm_children == children) { 546 *mapp = &draid_maps[i]; 547 return (0); 548 } 549 } 550 551 return (ENOENT); 552 } 553 554 /* 555 * Lookup the permutation array and iteration id for the provided offset. 556 */ 557 static void 558 vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex, 559 uint8_t **base, uint64_t *iter) 560 { 561 uint64_t ncols = vdc->vdc_children; 562 uint64_t poff = pindex % (vdc->vdc_nperms * ncols); 563 564 *base = vdc->vdc_perms + (poff / ncols) * ncols; 565 *iter = poff % ncols; 566 } 567 568 static inline uint64_t 569 vdev_draid_permute_id(vdev_draid_config_t *vdc, 570 uint8_t *base, uint64_t iter, uint64_t index) 571 { 572 return ((base[index] + iter) % vdc->vdc_children); 573 } 574 575 /* 576 * Return the asize which is the psize rounded up to a full group width. 577 * i.e. vdev_draid_psize_to_asize(). 578 */ 579 static uint64_t 580 vdev_draid_asize(vdev_t *vd, uint64_t psize) 581 { 582 vdev_draid_config_t *vdc = vd->vdev_tsd; 583 uint64_t ashift = vd->vdev_ashift; 584 585 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 586 587 uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1; 588 uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift; 589 590 ASSERT3U(asize, !=, 0); 591 ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0); 592 593 return (asize); 594 } 595 596 /* 597 * Deflate the asize to the psize, this includes stripping parity. 598 */ 599 uint64_t 600 vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize) 601 { 602 vdev_draid_config_t *vdc = vd->vdev_tsd; 603 604 ASSERT0(asize % vdc->vdc_groupwidth); 605 606 return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata); 607 } 608 609 /* 610 * Convert a logical offset to the corresponding group number. 611 */ 612 static uint64_t 613 vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset) 614 { 615 vdev_draid_config_t *vdc = vd->vdev_tsd; 616 617 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 618 619 return (offset / vdc->vdc_groupsz); 620 } 621 622 /* 623 * Convert a group number to the logical starting offset for that group. 624 */ 625 static uint64_t 626 vdev_draid_group_to_offset(vdev_t *vd, uint64_t group) 627 { 628 vdev_draid_config_t *vdc = vd->vdev_tsd; 629 630 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 631 632 return (group * vdc->vdc_groupsz); 633 } 634 635 636 static void 637 vdev_draid_map_free_vsd(zio_t *zio) 638 { 639 raidz_map_t *rm = zio->io_vsd; 640 641 ASSERT0(rm->rm_freed); 642 rm->rm_freed = B_TRUE; 643 644 if (rm->rm_reports == 0) { 645 vdev_raidz_map_free(rm); 646 } 647 } 648 649 /*ARGSUSED*/ 650 static void 651 vdev_draid_cksum_free(void *arg, size_t ignored) 652 { 653 raidz_map_t *rm = arg; 654 655 ASSERT3U(rm->rm_reports, >, 0); 656 657 if (--rm->rm_reports == 0 && rm->rm_freed) 658 vdev_raidz_map_free(rm); 659 } 660 661 static void 662 vdev_draid_cksum_finish(zio_cksum_report_t *zcr, const abd_t *good_data) 663 { 664 raidz_map_t *rm = zcr->zcr_cbdata; 665 const size_t c = zcr->zcr_cbinfo; 666 uint64_t skip_size = zcr->zcr_sector; 667 uint64_t parity_size; 668 size_t x, offset, size; 669 670 if (good_data == NULL) { 671 zfs_ereport_finish_checksum(zcr, NULL, NULL, B_FALSE); 672 return; 673 } 674 675 /* 676 * Detailed cksum reporting is currently only supported for single 677 * row draid mappings, this covers the vast majority of zios. Only 678 * a dRAID zio which spans groups will have multiple rows. 679 */ 680 if (rm->rm_nrows != 1) { 681 zfs_ereport_finish_checksum(zcr, NULL, NULL, B_FALSE); 682 return; 683 } 684 685 raidz_row_t *rr = rm->rm_row[0]; 686 const abd_t *good = NULL; 687 const abd_t *bad = rr->rr_col[c].rc_abd; 688 689 if (c < rr->rr_firstdatacol) { 690 /* 691 * The first time through, calculate the parity blocks for 692 * the good data (this relies on the fact that the good 693 * data never changes for a given logical zio) 694 */ 695 if (rr->rr_col[0].rc_gdata == NULL) { 696 abd_t *bad_parity[VDEV_DRAID_MAXPARITY]; 697 698 /* 699 * Set up the rr_col[]s to generate the parity for 700 * good_data, first saving the parity bufs and 701 * replacing them with buffers to hold the result. 702 */ 703 for (x = 0; x < rr->rr_firstdatacol; x++) { 704 bad_parity[x] = rr->rr_col[x].rc_abd; 705 rr->rr_col[x].rc_abd = rr->rr_col[x].rc_gdata = 706 abd_alloc_sametype(rr->rr_col[x].rc_abd, 707 rr->rr_col[x].rc_size); 708 } 709 710 /* 711 * Fill in the data columns from good_data being 712 * careful to pad short columns and empty columns 713 * with a skip sector. 714 */ 715 uint64_t good_size = abd_get_size((abd_t *)good_data); 716 717 offset = 0; 718 for (; x < rr->rr_cols; x++) { 719 abd_put(rr->rr_col[x].rc_abd); 720 721 if (offset == good_size) { 722 /* empty data column (small write) */ 723 rr->rr_col[x].rc_abd = 724 abd_get_zeros(skip_size); 725 } else if (x < rr->rr_bigcols) { 726 /* this is a "big column" */ 727 size = rr->rr_col[x].rc_size; 728 rr->rr_col[x].rc_abd = 729 abd_get_offset_size( 730 (abd_t *)good_data, offset, size); 731 offset += size; 732 } else { 733 /* short data column, add skip sector */ 734 size = rr->rr_col[x].rc_size -skip_size; 735 rr->rr_col[x].rc_abd = abd_alloc( 736 rr->rr_col[x].rc_size, B_TRUE); 737 abd_copy_off(rr->rr_col[x].rc_abd, 738 (abd_t *)good_data, 0, offset, 739 size); 740 abd_zero_off(rr->rr_col[x].rc_abd, 741 size, skip_size); 742 offset += size; 743 } 744 } 745 746 /* 747 * Construct the parity from the good data. 748 */ 749 vdev_raidz_generate_parity_row(rm, rr); 750 751 /* restore everything back to its original state */ 752 for (x = 0; x < rr->rr_firstdatacol; x++) 753 rr->rr_col[x].rc_abd = bad_parity[x]; 754 755 offset = 0; 756 for (x = rr->rr_firstdatacol; x < rr->rr_cols; x++) { 757 if (offset == good_size || x < rr->rr_bigcols) 758 abd_put(rr->rr_col[x].rc_abd); 759 else 760 abd_free(rr->rr_col[x].rc_abd); 761 762 rr->rr_col[x].rc_abd = abd_get_offset_size( 763 rr->rr_abd_copy, offset, 764 rr->rr_col[x].rc_size); 765 offset += rr->rr_col[x].rc_size; 766 } 767 } 768 769 ASSERT3P(rr->rr_col[c].rc_gdata, !=, NULL); 770 good = abd_get_offset_size(rr->rr_col[c].rc_gdata, 0, 771 rr->rr_col[c].rc_size); 772 } else { 773 /* adjust good_data to point at the start of our column */ 774 parity_size = size = rr->rr_col[0].rc_size; 775 if (c >= rr->rr_bigcols) { 776 size -= skip_size; 777 zcr->zcr_length = size; 778 } 779 780 /* empty column */ 781 if (size == 0) { 782 zfs_ereport_finish_checksum(zcr, NULL, NULL, B_TRUE); 783 return; 784 } 785 786 offset = 0; 787 for (x = rr->rr_firstdatacol; x < c; x++) { 788 if (x < rr->rr_bigcols) { 789 offset += parity_size; 790 } else { 791 offset += parity_size - skip_size; 792 } 793 } 794 795 good = abd_get_offset_size((abd_t *)good_data, offset, size); 796 } 797 798 /* we drop the ereport if it ends up that the data was good */ 799 zfs_ereport_finish_checksum(zcr, good, bad, B_TRUE); 800 abd_put((abd_t *)good); 801 } 802 803 /* 804 * Invoked indirectly by zfs_ereport_start_checksum(), called 805 * below when our read operation fails completely. The main point 806 * is to keep a copy of everything we read from disk, so that at 807 * vdev_draid_cksum_finish() time we can compare it with the good data. 808 */ 809 static void 810 vdev_draid_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *arg) 811 { 812 size_t c = (size_t)(uintptr_t)arg; 813 raidz_map_t *rm = zio->io_vsd; 814 815 /* set up the report and bump the refcount */ 816 zcr->zcr_cbdata = rm; 817 zcr->zcr_cbinfo = c; 818 zcr->zcr_finish = vdev_draid_cksum_finish; 819 zcr->zcr_free = vdev_draid_cksum_free; 820 821 rm->rm_reports++; 822 ASSERT3U(rm->rm_reports, >, 0); 823 824 if (rm->rm_row[0]->rr_abd_copy != NULL) 825 return; 826 827 /* 828 * It's the first time we're called for this raidz_map_t, so we need 829 * to copy the data aside; there's no guarantee that our zio's buffer 830 * won't be re-used for something else. 831 * 832 * Our parity data is already in separate buffers, so there's no need 833 * to copy them. Furthermore, all columns should have been expanded 834 * by vdev_draid_map_alloc_empty() when attempting reconstruction. 835 */ 836 for (int i = 0; i < rm->rm_nrows; i++) { 837 raidz_row_t *rr = rm->rm_row[i]; 838 size_t offset = 0; 839 size_t size = 0; 840 841 for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) { 842 ASSERT3U(rr->rr_col[c].rc_size, ==, 843 rr->rr_col[0].rc_size); 844 size += rr->rr_col[c].rc_size; 845 } 846 847 rr->rr_abd_copy = abd_alloc_for_io(size, B_FALSE); 848 849 for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) { 850 raidz_col_t *col = &rr->rr_col[c]; 851 abd_t *tmp = abd_get_offset_size(rr->rr_abd_copy, 852 offset, col->rc_size); 853 854 abd_copy(tmp, col->rc_abd, col->rc_size); 855 856 if (abd_is_gang(col->rc_abd)) 857 abd_free(col->rc_abd); 858 else 859 abd_put(col->rc_abd); 860 861 col->rc_abd = tmp; 862 offset += col->rc_size; 863 } 864 ASSERT3U(offset, ==, size); 865 } 866 } 867 868 const zio_vsd_ops_t vdev_draid_vsd_ops = { 869 .vsd_free = vdev_draid_map_free_vsd, 870 .vsd_cksum_report = vdev_draid_cksum_report 871 }; 872 873 /* 874 * Full stripe writes. When writing, all columns (D+P) are required. Parity 875 * is calculated over all the columns, including empty zero filled sectors, 876 * and each is written to disk. While only the data columns are needed for 877 * a normal read, all of the columns are required for reconstruction when 878 * performing a sequential resilver. 879 * 880 * For "big columns" it's sufficient to map the correct range of the zio ABD. 881 * Partial columns require allocating a gang ABD in order to zero fill the 882 * empty sectors. When the column is empty a zero filled sector must be 883 * mapped. In all cases the data ABDs must be the same size as the parity 884 * ABDs (e.g. rc->rc_size == parity_size). 885 */ 886 static void 887 vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr) 888 { 889 uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift; 890 uint64_t parity_size = rr->rr_col[0].rc_size; 891 uint64_t abd_off = abd_offset; 892 893 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 894 ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd)); 895 896 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) { 897 raidz_col_t *rc = &rr->rr_col[c]; 898 899 if (rc->rc_size == 0) { 900 /* empty data column (small write), add a skip sector */ 901 ASSERT3U(skip_size, ==, parity_size); 902 rc->rc_abd = abd_get_zeros(skip_size); 903 } else if (rc->rc_size == parity_size) { 904 /* this is a "big column" */ 905 rc->rc_abd = abd_get_offset_size(zio->io_abd, 906 abd_off, rc->rc_size); 907 } else { 908 /* short data column, add a skip sector */ 909 ASSERT3U(rc->rc_size + skip_size, ==, parity_size); 910 rc->rc_abd = abd_alloc_gang_abd(); 911 abd_gang_add(rc->rc_abd, abd_get_offset_size( 912 zio->io_abd, abd_off, rc->rc_size), B_TRUE); 913 abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size), 914 B_TRUE); 915 } 916 917 ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size); 918 919 abd_off += rc->rc_size; 920 rc->rc_size = parity_size; 921 } 922 923 IMPLY(abd_offset != 0, abd_off == zio->io_size); 924 } 925 926 /* 927 * Scrub/resilver reads. In order to store the contents of the skip sectors 928 * an additional ABD is allocated. The columns are handled in the same way 929 * as a full stripe write except instead of using the zero ABD the newly 930 * allocated skip ABD is used to back the skip sectors. In all cases the 931 * data ABD must be the same size as the parity ABDs. 932 */ 933 static void 934 vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr) 935 { 936 uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift; 937 uint64_t parity_size = rr->rr_col[0].rc_size; 938 uint64_t abd_off = abd_offset; 939 uint64_t skip_off = 0; 940 941 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 942 ASSERT3P(rr->rr_abd_empty, ==, NULL); 943 944 if (rr->rr_nempty > 0) { 945 rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size, 946 B_FALSE); 947 } 948 949 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) { 950 raidz_col_t *rc = &rr->rr_col[c]; 951 952 if (rc->rc_size == 0) { 953 /* empty data column (small read), add a skip sector */ 954 ASSERT3U(skip_size, ==, parity_size); 955 ASSERT3U(rr->rr_nempty, !=, 0); 956 rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty, 957 skip_off, skip_size); 958 skip_off += skip_size; 959 } else if (rc->rc_size == parity_size) { 960 /* this is a "big column" */ 961 rc->rc_abd = abd_get_offset_size(zio->io_abd, 962 abd_off, rc->rc_size); 963 } else { 964 /* short data column, add a skip sector */ 965 ASSERT3U(rc->rc_size + skip_size, ==, parity_size); 966 ASSERT3U(rr->rr_nempty, !=, 0); 967 rc->rc_abd = abd_alloc_gang_abd(); 968 abd_gang_add(rc->rc_abd, abd_get_offset_size( 969 zio->io_abd, abd_off, rc->rc_size), B_TRUE); 970 abd_gang_add(rc->rc_abd, abd_get_offset_size( 971 rr->rr_abd_empty, skip_off, skip_size), B_TRUE); 972 skip_off += skip_size; 973 } 974 975 uint64_t abd_size = abd_get_size(rc->rc_abd); 976 ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd)); 977 978 /* 979 * Increase rc_size so the skip ABD is included in subsequent 980 * parity calculations. 981 */ 982 abd_off += rc->rc_size; 983 rc->rc_size = abd_size; 984 } 985 986 IMPLY(abd_offset != 0, abd_off == zio->io_size); 987 ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size); 988 } 989 990 /* 991 * Normal reads. In this common case only the columns containing data 992 * are read in to the zio ABDs. Neither the parity columns or empty skip 993 * sectors are read unless the checksum fails verification. In which case 994 * vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand 995 * the raid map in order to allow reconstruction using the parity data and 996 * skip sectors. 997 */ 998 static void 999 vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr) 1000 { 1001 uint64_t abd_off = abd_offset; 1002 1003 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 1004 1005 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) { 1006 raidz_col_t *rc = &rr->rr_col[c]; 1007 1008 if (rc->rc_size > 0) { 1009 rc->rc_abd = abd_get_offset_size(zio->io_abd, 1010 abd_off, rc->rc_size); 1011 abd_off += rc->rc_size; 1012 } 1013 } 1014 1015 IMPLY(abd_offset != 0, abd_off == zio->io_size); 1016 } 1017 1018 /* 1019 * Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key 1020 * difference is that an ABD is allocated to back skip sectors so they may 1021 * be read in to memory, verified, and repaired if needed. 1022 */ 1023 void 1024 vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr) 1025 { 1026 uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift; 1027 uint64_t parity_size = rr->rr_col[0].rc_size; 1028 uint64_t skip_off = 0; 1029 1030 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 1031 ASSERT3P(rr->rr_abd_empty, ==, NULL); 1032 1033 if (rr->rr_nempty > 0) { 1034 rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size, 1035 B_FALSE); 1036 } 1037 1038 for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) { 1039 raidz_col_t *rc = &rr->rr_col[c]; 1040 1041 if (rc->rc_size == 0) { 1042 /* empty data column (small read), add a skip sector */ 1043 ASSERT3U(skip_size, ==, parity_size); 1044 ASSERT3U(rr->rr_nempty, !=, 0); 1045 ASSERT3P(rc->rc_abd, ==, NULL); 1046 rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty, 1047 skip_off, skip_size); 1048 skip_off += skip_size; 1049 } else if (rc->rc_size == parity_size) { 1050 /* this is a "big column", nothing to add */ 1051 ASSERT3P(rc->rc_abd, !=, NULL); 1052 } else { 1053 /* short data column, add a skip sector */ 1054 ASSERT3U(rc->rc_size + skip_size, ==, parity_size); 1055 ASSERT3U(rr->rr_nempty, !=, 0); 1056 ASSERT3P(rc->rc_abd, !=, NULL); 1057 ASSERT(!abd_is_gang(rc->rc_abd)); 1058 abd_t *read_abd = rc->rc_abd; 1059 rc->rc_abd = abd_alloc_gang_abd(); 1060 abd_gang_add(rc->rc_abd, read_abd, B_TRUE); 1061 abd_gang_add(rc->rc_abd, abd_get_offset_size( 1062 rr->rr_abd_empty, skip_off, skip_size), B_TRUE); 1063 skip_off += skip_size; 1064 } 1065 1066 /* 1067 * Increase rc_size so the empty ABD is included in subsequent 1068 * parity calculations. 1069 */ 1070 rc->rc_size = parity_size; 1071 } 1072 1073 ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size); 1074 } 1075 1076 /* 1077 * Given a logical address within a dRAID configuration, return the physical 1078 * address on the first drive in the group that this address maps to 1079 * (at position 'start' in permutation number 'perm'). 1080 */ 1081 static uint64_t 1082 vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset, 1083 uint64_t *perm, uint64_t *start) 1084 { 1085 vdev_draid_config_t *vdc = vd->vdev_tsd; 1086 1087 /* b is the dRAID (parent) sector offset. */ 1088 uint64_t ashift = vd->vdev_top->vdev_ashift; 1089 uint64_t b_offset = logical_offset >> ashift; 1090 1091 /* 1092 * The height of a row in units of the vdev's minimum sector size. 1093 * This is the amount of data written to each disk of each group 1094 * in a given permutation. 1095 */ 1096 uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift; 1097 1098 /* 1099 * We cycle through a disk permutation every groupsz * ngroups chunk 1100 * of address space. Note that ngroups * groupsz must be a multiple 1101 * of the number of data drives (ndisks) in order to guarantee 1102 * alignment. So, for example, if our row height is 16MB, our group 1103 * size is 10, and there are 13 data drives in the draid, then ngroups 1104 * will be 13, we will change permutation every 2.08GB and each 1105 * disk will have 160MB of data per chunk. 1106 */ 1107 uint64_t groupwidth = vdc->vdc_groupwidth; 1108 uint64_t ngroups = vdc->vdc_ngroups; 1109 uint64_t ndisks = vdc->vdc_ndisks; 1110 1111 /* 1112 * groupstart is where the group this IO will land in "starts" in 1113 * the permutation array. 1114 */ 1115 uint64_t group = logical_offset / vdc->vdc_groupsz; 1116 uint64_t groupstart = (group * groupwidth) % ndisks; 1117 ASSERT3U(groupstart + groupwidth, <=, ndisks + groupstart); 1118 *start = groupstart; 1119 1120 /* b_offset is the sector offset within a group chunk */ 1121 b_offset = b_offset % (rowheight_sectors * groupwidth); 1122 ASSERT0(b_offset % groupwidth); 1123 1124 /* 1125 * Find the starting byte offset on each child vdev: 1126 * - within a permutation there are ngroups groups spread over the 1127 * rows, where each row covers a slice portion of the disk 1128 * - each permutation has (groupwidth * ngroups) / ndisks rows 1129 * - so each permutation covers rows * slice portion of the disk 1130 * - so we need to find the row where this IO group target begins 1131 */ 1132 *perm = group / ngroups; 1133 uint64_t row = (*perm * ((groupwidth * ngroups) / ndisks)) + 1134 (((group % ngroups) * groupwidth) / ndisks); 1135 1136 return (((rowheight_sectors * row) + 1137 (b_offset / groupwidth)) << ashift); 1138 } 1139 1140 static uint64_t 1141 vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset, 1142 uint64_t abd_offset, uint64_t abd_size) 1143 { 1144 vdev_t *vd = zio->io_vd; 1145 vdev_draid_config_t *vdc = vd->vdev_tsd; 1146 uint64_t ashift = vd->vdev_top->vdev_ashift; 1147 uint64_t io_size = abd_size; 1148 uint64_t io_asize = vdev_draid_asize(vd, io_size); 1149 uint64_t group = vdev_draid_offset_to_group(vd, io_offset); 1150 uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1); 1151 1152 /* 1153 * Limit the io_size to the space remaining in the group. A second 1154 * row in the raidz_map_t is created for the remainder. 1155 */ 1156 if (io_offset + io_asize > start_offset) { 1157 io_size = vdev_draid_asize_to_psize(vd, 1158 start_offset - io_offset); 1159 } 1160 1161 /* 1162 * At most a block may span the logical end of one group and the start 1163 * of the next group. Therefore, at the end of a group the io_size must 1164 * span the group width evenly and the remainder must be aligned to the 1165 * start of the next group. 1166 */ 1167 IMPLY(abd_offset == 0 && io_size < zio->io_size, 1168 (io_asize >> ashift) % vdc->vdc_groupwidth == 0); 1169 IMPLY(abd_offset != 0, 1170 vdev_draid_group_to_offset(vd, group) == io_offset); 1171 1172 /* Lookup starting byte offset on each child vdev */ 1173 uint64_t groupstart, perm; 1174 uint64_t physical_offset = vdev_draid_logical_to_physical(vd, 1175 io_offset, &perm, &groupstart); 1176 1177 /* 1178 * If there is less than groupwidth drives available after the group 1179 * start, the group is going to wrap onto the next row. 'wrap' is the 1180 * group disk number that starts on the next row. 1181 */ 1182 uint64_t ndisks = vdc->vdc_ndisks; 1183 uint64_t groupwidth = vdc->vdc_groupwidth; 1184 uint64_t wrap = groupwidth; 1185 1186 if (groupstart + groupwidth > ndisks) 1187 wrap = ndisks - groupstart; 1188 1189 /* The io size in units of the vdev's minimum sector size. */ 1190 const uint64_t psize = io_size >> ashift; 1191 1192 /* 1193 * "Quotient": The number of data sectors for this stripe on all but 1194 * the "big column" child vdevs that also contain "remainder" data. 1195 */ 1196 uint64_t q = psize / vdc->vdc_ndata; 1197 1198 /* 1199 * "Remainder": The number of partial stripe data sectors in this I/O. 1200 * This will add a sector to some, but not all, child vdevs. 1201 */ 1202 uint64_t r = psize - q * vdc->vdc_ndata; 1203 1204 /* The number of "big columns" - those which contain remainder data. */ 1205 uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity); 1206 ASSERT3U(bc, <, groupwidth); 1207 1208 /* The total number of data and parity sectors for this I/O. */ 1209 uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1))); 1210 1211 raidz_row_t *rr; 1212 rr = kmem_alloc(offsetof(raidz_row_t, rr_col[groupwidth]), KM_SLEEP); 1213 rr->rr_cols = groupwidth; 1214 rr->rr_scols = groupwidth; 1215 rr->rr_bigcols = bc; 1216 rr->rr_missingdata = 0; 1217 rr->rr_missingparity = 0; 1218 rr->rr_firstdatacol = vdc->vdc_nparity; 1219 rr->rr_abd_copy = NULL; 1220 rr->rr_abd_empty = NULL; 1221 #ifdef ZFS_DEBUG 1222 rr->rr_offset = io_offset; 1223 rr->rr_size = io_size; 1224 #endif 1225 *rrp = rr; 1226 1227 uint8_t *base; 1228 uint64_t iter, asize = 0; 1229 vdev_draid_get_perm(vdc, perm, &base, &iter); 1230 for (uint64_t i = 0; i < groupwidth; i++) { 1231 raidz_col_t *rc = &rr->rr_col[i]; 1232 uint64_t c = (groupstart + i) % ndisks; 1233 1234 /* increment the offset if we wrap to the next row */ 1235 if (i == wrap) 1236 physical_offset += VDEV_DRAID_ROWHEIGHT; 1237 1238 rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c); 1239 rc->rc_offset = physical_offset; 1240 rc->rc_abd = NULL; 1241 rc->rc_gdata = NULL; 1242 rc->rc_orig_data = NULL; 1243 rc->rc_error = 0; 1244 rc->rc_tried = 0; 1245 rc->rc_skipped = 0; 1246 rc->rc_repair = 0; 1247 rc->rc_need_orig_restore = B_FALSE; 1248 1249 if (q == 0 && i >= bc) 1250 rc->rc_size = 0; 1251 else if (i < bc) 1252 rc->rc_size = (q + 1) << ashift; 1253 else 1254 rc->rc_size = q << ashift; 1255 1256 asize += rc->rc_size; 1257 } 1258 1259 ASSERT3U(asize, ==, tot << ashift); 1260 rr->rr_nempty = roundup(tot, groupwidth) - tot; 1261 IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc); 1262 1263 /* Allocate buffers for the parity columns */ 1264 for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) { 1265 raidz_col_t *rc = &rr->rr_col[c]; 1266 rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE); 1267 } 1268 1269 /* 1270 * Map buffers for data columns and allocate/map buffers for skip 1271 * sectors. There are three distinct cases for dRAID which are 1272 * required to support sequential rebuild. 1273 */ 1274 if (zio->io_type == ZIO_TYPE_WRITE) { 1275 vdev_draid_map_alloc_write(zio, abd_offset, rr); 1276 } else if ((rr->rr_nempty > 0) && 1277 (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) { 1278 vdev_draid_map_alloc_scrub(zio, abd_offset, rr); 1279 } else { 1280 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 1281 vdev_draid_map_alloc_read(zio, abd_offset, rr); 1282 } 1283 1284 return (io_size); 1285 } 1286 1287 /* 1288 * Allocate the raidz mapping to be applied to the dRAID I/O. The parity 1289 * calculations for dRAID are identical to raidz however there are a few 1290 * differences in the layout. 1291 * 1292 * - dRAID always allocates a full stripe width. Any extra sectors due 1293 * this padding are zero filled and written to disk. They will be read 1294 * back during a scrub or repair operation since they are included in 1295 * the parity calculation. This property enables sequential resilvering. 1296 * 1297 * - When the block at the logical offset spans redundancy groups then two 1298 * rows are allocated in the raidz_map_t. One row resides at the end of 1299 * the first group and the other at the start of the following group. 1300 */ 1301 static raidz_map_t * 1302 vdev_draid_map_alloc(zio_t *zio) 1303 { 1304 raidz_row_t *rr[2]; 1305 uint64_t abd_offset = 0; 1306 uint64_t abd_size = zio->io_size; 1307 uint64_t io_offset = zio->io_offset; 1308 uint64_t size; 1309 int nrows = 1; 1310 1311 size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset, 1312 abd_offset, abd_size); 1313 if (size < abd_size) { 1314 vdev_t *vd = zio->io_vd; 1315 1316 io_offset += vdev_draid_asize(vd, size); 1317 abd_offset += size; 1318 abd_size -= size; 1319 nrows++; 1320 1321 ASSERT3U(io_offset, ==, vdev_draid_group_to_offset( 1322 vd, vdev_draid_offset_to_group(vd, io_offset))); 1323 ASSERT3U(abd_offset, <, zio->io_size); 1324 ASSERT3U(abd_size, !=, 0); 1325 1326 size = vdev_draid_map_alloc_row(zio, &rr[1], 1327 io_offset, abd_offset, abd_size); 1328 VERIFY3U(size, ==, abd_size); 1329 } 1330 1331 raidz_map_t *rm; 1332 rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP); 1333 rm->rm_ops = vdev_raidz_math_get_ops(); 1334 rm->rm_nrows = nrows; 1335 rm->rm_row[0] = rr[0]; 1336 if (nrows == 2) 1337 rm->rm_row[1] = rr[1]; 1338 1339 zio->io_vsd = rm; 1340 zio->io_vsd_ops = &vdev_draid_vsd_ops; 1341 1342 return (rm); 1343 } 1344 1345 /* 1346 * Given an offset into a dRAID return the next group width aligned offset 1347 * which can be used to start an allocation. 1348 */ 1349 static uint64_t 1350 vdev_draid_get_astart(vdev_t *vd, const uint64_t start) 1351 { 1352 vdev_draid_config_t *vdc = vd->vdev_tsd; 1353 1354 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1355 1356 return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift)); 1357 } 1358 1359 /* 1360 * Allocatable space for dRAID is (children - nspares) * sizeof(smallest child) 1361 * rounded down to the last full slice. So each child must provide at least 1362 * 1 / (children - nspares) of its asize. 1363 */ 1364 static uint64_t 1365 vdev_draid_min_asize(vdev_t *vd) 1366 { 1367 vdev_draid_config_t *vdc = vd->vdev_tsd; 1368 1369 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1370 1371 return ((vd->vdev_min_asize + vdc->vdc_ndisks - 1) / (vdc->vdc_ndisks)); 1372 } 1373 1374 /* 1375 * When using dRAID the minimum allocation size is determined by the number 1376 * of data disks in the redundancy group. Full stripes are always used. 1377 */ 1378 static uint64_t 1379 vdev_draid_min_alloc(vdev_t *vd) 1380 { 1381 vdev_draid_config_t *vdc = vd->vdev_tsd; 1382 1383 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1384 1385 return (vdc->vdc_ndata << vd->vdev_ashift); 1386 } 1387 1388 /* 1389 * Returns true if the txg range does not exist on any leaf vdev. 1390 * 1391 * A dRAID spare does not fit into the DTL model. While it has child vdevs 1392 * there is no redundancy among them, and the effective child vdev is 1393 * determined by offset. Essentially we do a vdev_dtl_reassess() on the 1394 * fly by replacing a dRAID spare with the child vdev under the offset. 1395 * Note that it is a recursive process because the child vdev can be 1396 * another dRAID spare and so on. 1397 */ 1398 boolean_t 1399 vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg, 1400 uint64_t size) 1401 { 1402 if (vd->vdev_ops == &vdev_spare_ops || 1403 vd->vdev_ops == &vdev_replacing_ops) { 1404 /* 1405 * Check all of the readable children, if any child 1406 * contains the txg range the data it is not missing. 1407 */ 1408 for (int c = 0; c < vd->vdev_children; c++) { 1409 vdev_t *cvd = vd->vdev_child[c]; 1410 1411 if (!vdev_readable(cvd)) 1412 continue; 1413 1414 if (!vdev_draid_missing(cvd, physical_offset, 1415 txg, size)) 1416 return (B_FALSE); 1417 } 1418 1419 return (B_TRUE); 1420 } 1421 1422 if (vd->vdev_ops == &vdev_draid_spare_ops) { 1423 /* 1424 * When sequentially resilvering we don't have a proper 1425 * txg range so instead we must presume all txgs are 1426 * missing on this vdev until the resilver completes. 1427 */ 1428 if (vd->vdev_rebuild_txg != 0) 1429 return (B_TRUE); 1430 1431 /* 1432 * DTL_MISSING is set for all prior txgs when a resilver 1433 * is started in spa_vdev_attach(). 1434 */ 1435 if (vdev_dtl_contains(vd, DTL_MISSING, txg, size)) 1436 return (B_TRUE); 1437 1438 /* 1439 * Consult the DTL on the relevant vdev. Either a vdev 1440 * leaf or spare/replace mirror child may be returned so 1441 * we must recursively call vdev_draid_missing_impl(). 1442 */ 1443 vd = vdev_draid_spare_get_child(vd, physical_offset); 1444 if (vd == NULL) 1445 return (B_TRUE); 1446 1447 return (vdev_draid_missing(vd, physical_offset, 1448 txg, size)); 1449 } 1450 1451 return (vdev_dtl_contains(vd, DTL_MISSING, txg, size)); 1452 } 1453 1454 /* 1455 * Returns true if the txg is only partially replicated on the leaf vdevs. 1456 */ 1457 static boolean_t 1458 vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg, 1459 uint64_t size) 1460 { 1461 if (vd->vdev_ops == &vdev_spare_ops || 1462 vd->vdev_ops == &vdev_replacing_ops) { 1463 /* 1464 * Check all of the readable children, if any child is 1465 * missing the txg range then it is partially replicated. 1466 */ 1467 for (int c = 0; c < vd->vdev_children; c++) { 1468 vdev_t *cvd = vd->vdev_child[c]; 1469 1470 if (!vdev_readable(cvd)) 1471 continue; 1472 1473 if (vdev_draid_partial(cvd, physical_offset, txg, size)) 1474 return (B_TRUE); 1475 } 1476 1477 return (B_FALSE); 1478 } 1479 1480 if (vd->vdev_ops == &vdev_draid_spare_ops) { 1481 /* 1482 * When sequentially resilvering we don't have a proper 1483 * txg range so instead we must presume all txgs are 1484 * missing on this vdev until the resilver completes. 1485 */ 1486 if (vd->vdev_rebuild_txg != 0) 1487 return (B_TRUE); 1488 1489 /* 1490 * DTL_MISSING is set for all prior txgs when a resilver 1491 * is started in spa_vdev_attach(). 1492 */ 1493 if (vdev_dtl_contains(vd, DTL_MISSING, txg, size)) 1494 return (B_TRUE); 1495 1496 /* 1497 * Consult the DTL on the relevant vdev. Either a vdev 1498 * leaf or spare/replace mirror child may be returned so 1499 * we must recursively call vdev_draid_missing_impl(). 1500 */ 1501 vd = vdev_draid_spare_get_child(vd, physical_offset); 1502 if (vd == NULL) 1503 return (B_TRUE); 1504 1505 return (vdev_draid_partial(vd, physical_offset, txg, size)); 1506 } 1507 1508 return (vdev_dtl_contains(vd, DTL_MISSING, txg, size)); 1509 } 1510 1511 /* 1512 * Determine if the vdev is readable at the given offset. 1513 */ 1514 boolean_t 1515 vdev_draid_readable(vdev_t *vd, uint64_t physical_offset) 1516 { 1517 if (vd->vdev_ops == &vdev_draid_spare_ops) { 1518 vd = vdev_draid_spare_get_child(vd, physical_offset); 1519 if (vd == NULL) 1520 return (B_FALSE); 1521 } 1522 1523 if (vd->vdev_ops == &vdev_spare_ops || 1524 vd->vdev_ops == &vdev_replacing_ops) { 1525 1526 for (int c = 0; c < vd->vdev_children; c++) { 1527 vdev_t *cvd = vd->vdev_child[c]; 1528 1529 if (!vdev_readable(cvd)) 1530 continue; 1531 1532 if (vdev_draid_readable(cvd, physical_offset)) 1533 return (B_TRUE); 1534 } 1535 1536 return (B_FALSE); 1537 } 1538 1539 return (vdev_readable(vd)); 1540 } 1541 1542 /* 1543 * Returns the first distributed spare found under the provided vdev tree. 1544 */ 1545 static vdev_t * 1546 vdev_draid_find_spare(vdev_t *vd) 1547 { 1548 if (vd->vdev_ops == &vdev_draid_spare_ops) 1549 return (vd); 1550 1551 for (int c = 0; c < vd->vdev_children; c++) { 1552 vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]); 1553 if (svd != NULL) 1554 return (svd); 1555 } 1556 1557 return (NULL); 1558 } 1559 1560 /* 1561 * Returns B_TRUE if the passed in vdev is currently "faulted". 1562 * Faulted, in this context, means that the vdev represents a 1563 * replacing or sparing vdev tree. 1564 */ 1565 static boolean_t 1566 vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset) 1567 { 1568 if (vd->vdev_ops == &vdev_draid_spare_ops) { 1569 vd = vdev_draid_spare_get_child(vd, physical_offset); 1570 if (vd == NULL) 1571 return (B_FALSE); 1572 1573 /* 1574 * After resolving the distributed spare to a leaf vdev 1575 * check the parent to determine if it's "faulted". 1576 */ 1577 vd = vd->vdev_parent; 1578 } 1579 1580 return (vd->vdev_ops == &vdev_replacing_ops || 1581 vd->vdev_ops == &vdev_spare_ops); 1582 } 1583 1584 /* 1585 * Determine if the dRAID block at the logical offset is degraded. 1586 * Used by sequential resilver. 1587 */ 1588 static boolean_t 1589 vdev_draid_group_degraded(vdev_t *vd, uint64_t offset) 1590 { 1591 vdev_draid_config_t *vdc = vd->vdev_tsd; 1592 1593 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1594 ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset); 1595 1596 uint64_t groupstart, perm; 1597 uint64_t physical_offset = vdev_draid_logical_to_physical(vd, 1598 offset, &perm, &groupstart); 1599 1600 uint8_t *base; 1601 uint64_t iter; 1602 vdev_draid_get_perm(vdc, perm, &base, &iter); 1603 1604 for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) { 1605 uint64_t c = (groupstart + i) % vdc->vdc_ndisks; 1606 uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c); 1607 vdev_t *cvd = vd->vdev_child[cid]; 1608 1609 /* Group contains a faulted vdev. */ 1610 if (vdev_draid_faulted(cvd, physical_offset)) 1611 return (B_TRUE); 1612 1613 /* 1614 * Always check groups with active distributed spares 1615 * because any vdev failure in the pool will affect them. 1616 */ 1617 if (vdev_draid_find_spare(cvd) != NULL) 1618 return (B_TRUE); 1619 } 1620 1621 return (B_FALSE); 1622 } 1623 1624 /* 1625 * Determine if the txg is missing. Used by healing resilver. 1626 */ 1627 static boolean_t 1628 vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg, 1629 uint64_t size) 1630 { 1631 vdev_draid_config_t *vdc = vd->vdev_tsd; 1632 1633 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1634 ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset); 1635 1636 uint64_t groupstart, perm; 1637 uint64_t physical_offset = vdev_draid_logical_to_physical(vd, 1638 offset, &perm, &groupstart); 1639 1640 uint8_t *base; 1641 uint64_t iter; 1642 vdev_draid_get_perm(vdc, perm, &base, &iter); 1643 1644 for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) { 1645 uint64_t c = (groupstart + i) % vdc->vdc_ndisks; 1646 uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c); 1647 vdev_t *cvd = vd->vdev_child[cid]; 1648 1649 /* Transaction group is known to be partially replicated. */ 1650 if (vdev_draid_partial(cvd, physical_offset, txg, size)) 1651 return (B_TRUE); 1652 1653 /* 1654 * Always check groups with active distributed spares 1655 * because any vdev failure in the pool will affect them. 1656 */ 1657 if (vdev_draid_find_spare(cvd) != NULL) 1658 return (B_TRUE); 1659 } 1660 1661 return (B_FALSE); 1662 } 1663 1664 /* 1665 * Find the smallest child asize and largest sector size to calculate the 1666 * available capacity. Distributed spares are ignored since their capacity 1667 * is also based of the minimum child size in the top-level dRAID. 1668 */ 1669 static void 1670 vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep, 1671 uint64_t *logical_ashiftp, uint64_t *physical_ashiftp) 1672 { 1673 uint64_t logical_ashift = 0, physical_ashift = 0; 1674 uint64_t asize = 0, max_asize = 0; 1675 1676 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1677 1678 for (int c = 0; c < vd->vdev_children; c++) { 1679 vdev_t *cvd = vd->vdev_child[c]; 1680 1681 if (cvd->vdev_ops == &vdev_draid_spare_ops) 1682 continue; 1683 1684 asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1; 1685 max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1; 1686 logical_ashift = MAX(logical_ashift, cvd->vdev_ashift); 1687 physical_ashift = MAX(physical_ashift, 1688 cvd->vdev_physical_ashift); 1689 } 1690 1691 *asizep = asize; 1692 *max_asizep = max_asize; 1693 *logical_ashiftp = logical_ashift; 1694 *physical_ashiftp = physical_ashift; 1695 } 1696 1697 /* 1698 * Open spare vdevs. 1699 */ 1700 static boolean_t 1701 vdev_draid_open_spares(vdev_t *vd) 1702 { 1703 return (vd->vdev_ops == &vdev_draid_spare_ops || 1704 vd->vdev_ops == &vdev_replacing_ops || 1705 vd->vdev_ops == &vdev_spare_ops); 1706 } 1707 1708 /* 1709 * Open all children, excluding spares. 1710 */ 1711 static boolean_t 1712 vdev_draid_open_children(vdev_t *vd) 1713 { 1714 return (!vdev_draid_open_spares(vd)); 1715 } 1716 1717 /* 1718 * Open a top-level dRAID vdev. 1719 */ 1720 static int 1721 vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize, 1722 uint64_t *logical_ashift, uint64_t *physical_ashift) 1723 { 1724 vdev_draid_config_t *vdc = vd->vdev_tsd; 1725 uint64_t nparity = vdc->vdc_nparity; 1726 int open_errors = 0; 1727 1728 if (nparity > VDEV_DRAID_MAXPARITY || 1729 vd->vdev_children < nparity + 1) { 1730 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 1731 return (SET_ERROR(EINVAL)); 1732 } 1733 1734 /* 1735 * First open the normal children then the distributed spares. This 1736 * ordering is important to ensure the distributed spares calculate 1737 * the correct psize in the event that the dRAID vdevs were expanded. 1738 */ 1739 vdev_open_children_subset(vd, vdev_draid_open_children); 1740 vdev_open_children_subset(vd, vdev_draid_open_spares); 1741 1742 /* Verify enough of the children are available to continue. */ 1743 for (int c = 0; c < vd->vdev_children; c++) { 1744 if (vd->vdev_child[c]->vdev_open_error != 0) { 1745 if ((++open_errors) > nparity) { 1746 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; 1747 return (SET_ERROR(ENXIO)); 1748 } 1749 } 1750 } 1751 1752 /* 1753 * Allocatable capacity is the sum of the space on all children less 1754 * the number of distributed spares rounded down to last full row 1755 * and then to the last full group. An additional 32MB of scratch 1756 * space is reserved at the end of each child for use by the dRAID 1757 * expansion feature. 1758 */ 1759 uint64_t child_asize, child_max_asize; 1760 vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize, 1761 logical_ashift, physical_ashift); 1762 1763 /* 1764 * Should be unreachable since the minimum child size is 64MB, but 1765 * we want to make sure an underflow absolutely cannot occur here. 1766 */ 1767 if (child_asize < VDEV_DRAID_REFLOW_RESERVE || 1768 child_max_asize < VDEV_DRAID_REFLOW_RESERVE) { 1769 return (SET_ERROR(ENXIO)); 1770 } 1771 1772 child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) / 1773 VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT; 1774 child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) / 1775 VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT; 1776 1777 *asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) * 1778 vdc->vdc_groupsz); 1779 *max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) * 1780 vdc->vdc_groupsz); 1781 1782 return (0); 1783 } 1784 1785 /* 1786 * Close a top-level dRAID vdev. 1787 */ 1788 static void 1789 vdev_draid_close(vdev_t *vd) 1790 { 1791 for (int c = 0; c < vd->vdev_children; c++) { 1792 if (vd->vdev_child[c] != NULL) 1793 vdev_close(vd->vdev_child[c]); 1794 } 1795 } 1796 1797 /* 1798 * Return the maximum asize for a rebuild zio in the provided range 1799 * given the following constraints. A dRAID chunks may not: 1800 * 1801 * - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or 1802 * - Span dRAID redundancy groups. 1803 */ 1804 static uint64_t 1805 vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize, 1806 uint64_t max_segment) 1807 { 1808 vdev_draid_config_t *vdc = vd->vdev_tsd; 1809 1810 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1811 1812 uint64_t ashift = vd->vdev_ashift; 1813 uint64_t ndata = vdc->vdc_ndata; 1814 uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift), 1815 SPA_MAXBLOCKSIZE); 1816 1817 ASSERT3U(vdev_draid_get_astart(vd, start), ==, start); 1818 ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0); 1819 1820 /* Chunks must evenly span all data columns in the group. */ 1821 psize = (((psize >> ashift) / ndata) * ndata) << ashift; 1822 uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize)); 1823 1824 /* Reduce the chunk size to the group space remaining. */ 1825 uint64_t group = vdev_draid_offset_to_group(vd, start); 1826 uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start; 1827 chunk_size = MIN(chunk_size, left); 1828 1829 ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0); 1830 ASSERT3U(vdev_draid_offset_to_group(vd, start), ==, 1831 vdev_draid_offset_to_group(vd, start + chunk_size - 1)); 1832 1833 return (chunk_size); 1834 } 1835 1836 /* 1837 * Align the start of the metaslab to the group width and slightly reduce 1838 * its size to a multiple of the group width. Since full stripe writes are 1839 * required by dRAID this space is unallocable. Furthermore, aligning the 1840 * metaslab start is important for vdev initialize and TRIM which both operate 1841 * on metaslab boundaries which vdev_xlate() expects to be aligned. 1842 */ 1843 static void 1844 vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size) 1845 { 1846 vdev_draid_config_t *vdc = vd->vdev_tsd; 1847 1848 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 1849 1850 uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift; 1851 uint64_t astart = vdev_draid_get_astart(vd, *ms_start); 1852 uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz; 1853 1854 *ms_start = astart; 1855 *ms_size = asize; 1856 1857 ASSERT0(*ms_start % sz); 1858 ASSERT0(*ms_size % sz); 1859 } 1860 1861 /* 1862 * Add virtual dRAID spares to the list of valid spares. In order to accomplish 1863 * this the existing array must be freed and reallocated with the additional 1864 * entries. 1865 */ 1866 int 1867 vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp, 1868 uint64_t next_vdev_id) 1869 { 1870 uint64_t draid_nspares = 0; 1871 uint64_t ndraid = 0; 1872 int error; 1873 1874 for (uint64_t i = 0; i < vd->vdev_children; i++) { 1875 vdev_t *cvd = vd->vdev_child[i]; 1876 1877 if (cvd->vdev_ops == &vdev_draid_ops) { 1878 vdev_draid_config_t *vdc = cvd->vdev_tsd; 1879 draid_nspares += vdc->vdc_nspares; 1880 ndraid++; 1881 } 1882 } 1883 1884 if (draid_nspares == 0) { 1885 *ndraidp = ndraid; 1886 return (0); 1887 } 1888 1889 nvlist_t **old_spares, **new_spares; 1890 uint_t old_nspares; 1891 error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1892 &old_spares, &old_nspares); 1893 if (error) 1894 old_nspares = 0; 1895 1896 /* Allocate memory and copy of the existing spares. */ 1897 new_spares = kmem_alloc(sizeof (nvlist_t *) * 1898 (draid_nspares + old_nspares), KM_SLEEP); 1899 for (uint_t i = 0; i < old_nspares; i++) 1900 new_spares[i] = fnvlist_dup(old_spares[i]); 1901 1902 /* Add new distributed spares to ZPOOL_CONFIG_SPARES. */ 1903 uint64_t n = old_nspares; 1904 for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) { 1905 vdev_t *cvd = vd->vdev_child[vdev_id]; 1906 char path[64]; 1907 1908 if (cvd->vdev_ops != &vdev_draid_ops) 1909 continue; 1910 1911 vdev_draid_config_t *vdc = cvd->vdev_tsd; 1912 uint64_t nspares = vdc->vdc_nspares; 1913 uint64_t nparity = vdc->vdc_nparity; 1914 1915 for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) { 1916 bzero(path, sizeof (path)); 1917 (void) snprintf(path, sizeof (path) - 1, 1918 "%s%llu-%llu-%llu", VDEV_TYPE_DRAID, 1919 (u_longlong_t)nparity, 1920 (u_longlong_t)next_vdev_id + vdev_id, 1921 (u_longlong_t)spare_id); 1922 1923 nvlist_t *spare = fnvlist_alloc(); 1924 fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path); 1925 fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE, 1926 VDEV_TYPE_DRAID_SPARE); 1927 fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID, 1928 cvd->vdev_guid); 1929 fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID, 1930 spare_id); 1931 fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0); 1932 fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1); 1933 fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1); 1934 fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT, 1935 cvd->vdev_ashift); 1936 1937 new_spares[n] = spare; 1938 n++; 1939 } 1940 } 1941 1942 if (n > 0) { 1943 (void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES); 1944 fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1945 new_spares, n); 1946 } 1947 1948 for (int i = 0; i < n; i++) 1949 nvlist_free(new_spares[i]); 1950 1951 kmem_free(new_spares, sizeof (*new_spares) * n); 1952 *ndraidp = ndraid; 1953 1954 return (0); 1955 } 1956 1957 /* 1958 * Determine if any portion of the provided block resides on a child vdev 1959 * with a dirty DTL and therefore needs to be resilvered. 1960 */ 1961 static boolean_t 1962 vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, 1963 uint64_t phys_birth) 1964 { 1965 uint64_t offset = DVA_GET_OFFSET(dva); 1966 uint64_t asize = vdev_draid_asize(vd, psize); 1967 1968 if (phys_birth == TXG_UNKNOWN) { 1969 /* 1970 * Sequential resilver. There is no meaningful phys_birth 1971 * for this block, we can only determine if block resides 1972 * in a degraded group in which case it must be resilvered. 1973 */ 1974 ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==, 1975 vdev_draid_offset_to_group(vd, offset + asize - 1)); 1976 1977 return (vdev_draid_group_degraded(vd, offset)); 1978 } else { 1979 /* 1980 * Healing resilver. TXGs not in DTL_PARTIAL are intact, 1981 * as are blocks in non-degraded groups. 1982 */ 1983 if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)) 1984 return (B_FALSE); 1985 1986 if (vdev_draid_group_missing(vd, offset, phys_birth, 1)) 1987 return (B_TRUE); 1988 1989 /* The block may span groups in which case check both. */ 1990 if (vdev_draid_offset_to_group(vd, offset) != 1991 vdev_draid_offset_to_group(vd, offset + asize - 1)) { 1992 if (vdev_draid_group_missing(vd, 1993 offset + asize, phys_birth, 1)) 1994 return (B_TRUE); 1995 } 1996 1997 return (B_FALSE); 1998 } 1999 } 2000 2001 static boolean_t 2002 vdev_draid_rebuilding(vdev_t *vd) 2003 { 2004 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg) 2005 return (B_TRUE); 2006 2007 for (int i = 0; i < vd->vdev_children; i++) { 2008 if (vdev_draid_rebuilding(vd->vdev_child[i])) { 2009 return (B_TRUE); 2010 } 2011 } 2012 2013 return (B_FALSE); 2014 } 2015 2016 static void 2017 vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col) 2018 { 2019 #ifdef ZFS_DEBUG 2020 range_seg64_t logical_rs, physical_rs, remain_rs; 2021 logical_rs.rs_start = rr->rr_offset; 2022 logical_rs.rs_end = logical_rs.rs_start + 2023 vdev_draid_asize(vd, rr->rr_size); 2024 2025 raidz_col_t *rc = &rr->rr_col[col]; 2026 vdev_t *cvd = vd->vdev_child[rc->rc_devidx]; 2027 2028 vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs); 2029 ASSERT(vdev_xlate_is_empty(&remain_rs)); 2030 ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start); 2031 ASSERT3U(rc->rc_offset, <, physical_rs.rs_end); 2032 ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end); 2033 #endif 2034 } 2035 2036 /* 2037 * For write operations: 2038 * 1. Generate the parity data 2039 * 2. Create child zio write operations to each column's vdev, for both 2040 * data and parity. A gang ABD is allocated by vdev_draid_map_alloc() 2041 * if a skip sector needs to be added to a column. 2042 */ 2043 static void 2044 vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr) 2045 { 2046 vdev_t *vd = zio->io_vd; 2047 raidz_map_t *rm = zio->io_vsd; 2048 2049 vdev_raidz_generate_parity_row(rm, rr); 2050 2051 for (int c = 0; c < rr->rr_cols; c++) { 2052 raidz_col_t *rc = &rr->rr_col[c]; 2053 2054 /* 2055 * Empty columns are zero filled and included in the parity 2056 * calculation and therefore must be written. 2057 */ 2058 ASSERT3U(rc->rc_size, !=, 0); 2059 2060 /* Verify physical to logical translation */ 2061 vdev_draid_io_verify(vd, rr, c); 2062 2063 zio_nowait(zio_vdev_child_io(zio, NULL, 2064 vd->vdev_child[rc->rc_devidx], rc->rc_offset, 2065 rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority, 2066 0, vdev_raidz_child_done, rc)); 2067 } 2068 } 2069 2070 /* 2071 * For read operations: 2072 * 1. The vdev_draid_map_alloc() function will create a minimal raidz 2073 * mapping for the read based on the zio->io_flags. There are two 2074 * possible mappings either 1) a normal read, or 2) a scrub/resilver. 2075 * 2. Create the zio read operations. This will include all parity 2076 * columns and skip sectors for a scrub/resilver. 2077 */ 2078 static void 2079 vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr) 2080 { 2081 vdev_t *vd = zio->io_vd; 2082 2083 /* Sequential rebuild must do IO at redundancy group boundary. */ 2084 IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0); 2085 2086 /* 2087 * Iterate over the columns in reverse order so that we hit the parity 2088 * last. Any errors along the way will force us to read the parity. 2089 * For scrub/resilver IOs which verify skip sectors, a gang ABD will 2090 * have been allocated to store them and rc->rc_size is increased. 2091 */ 2092 for (int c = rr->rr_cols - 1; c >= 0; c--) { 2093 raidz_col_t *rc = &rr->rr_col[c]; 2094 vdev_t *cvd = vd->vdev_child[rc->rc_devidx]; 2095 2096 if (!vdev_draid_readable(cvd, rc->rc_offset)) { 2097 if (c >= rr->rr_firstdatacol) 2098 rr->rr_missingdata++; 2099 else 2100 rr->rr_missingparity++; 2101 rc->rc_error = SET_ERROR(ENXIO); 2102 rc->rc_tried = 1; 2103 rc->rc_skipped = 1; 2104 continue; 2105 } 2106 2107 if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) { 2108 if (c >= rr->rr_firstdatacol) 2109 rr->rr_missingdata++; 2110 else 2111 rr->rr_missingparity++; 2112 rc->rc_error = SET_ERROR(ESTALE); 2113 rc->rc_skipped = 1; 2114 continue; 2115 } 2116 2117 /* 2118 * Empty columns may be read during vdev_draid_io_done(). 2119 * Only skip them after the readable and missing checks 2120 * verify they are available. 2121 */ 2122 if (rc->rc_size == 0) { 2123 rc->rc_skipped = 1; 2124 continue; 2125 } 2126 2127 if (zio->io_flags & ZIO_FLAG_RESILVER) { 2128 vdev_t *svd; 2129 2130 /* 2131 * If this child is a distributed spare then the 2132 * offset might reside on the vdev being replaced. 2133 * In which case this data must be written to the 2134 * new device. Failure to do so would result in 2135 * checksum errors when the old device is detached 2136 * and the pool is scrubbed. 2137 */ 2138 if ((svd = vdev_draid_find_spare(cvd)) != NULL) { 2139 svd = vdev_draid_spare_get_child(svd, 2140 rc->rc_offset); 2141 if (svd && (svd->vdev_ops == &vdev_spare_ops || 2142 svd->vdev_ops == &vdev_replacing_ops)) { 2143 rc->rc_repair = 1; 2144 } 2145 } 2146 2147 /* 2148 * Always issue a repair IO to this child when its 2149 * a spare or replacing vdev with an active rebuild. 2150 */ 2151 if ((cvd->vdev_ops == &vdev_spare_ops || 2152 cvd->vdev_ops == &vdev_replacing_ops) && 2153 vdev_draid_rebuilding(cvd)) { 2154 rc->rc_repair = 1; 2155 } 2156 } 2157 } 2158 2159 /* 2160 * Either a parity or data column is missing this means a repair 2161 * may be attempted by vdev_draid_io_done(). Expand the raid map 2162 * to read in empty columns which are needed along with the parity 2163 * during reconstruction. 2164 */ 2165 if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) && 2166 rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) { 2167 vdev_draid_map_alloc_empty(zio, rr); 2168 } 2169 2170 for (int c = rr->rr_cols - 1; c >= 0; c--) { 2171 raidz_col_t *rc = &rr->rr_col[c]; 2172 vdev_t *cvd = vd->vdev_child[rc->rc_devidx]; 2173 2174 if (rc->rc_error || rc->rc_size == 0) 2175 continue; 2176 2177 if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 || 2178 (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) { 2179 zio_nowait(zio_vdev_child_io(zio, NULL, cvd, 2180 rc->rc_offset, rc->rc_abd, rc->rc_size, 2181 zio->io_type, zio->io_priority, 0, 2182 vdev_raidz_child_done, rc)); 2183 } 2184 } 2185 } 2186 2187 /* 2188 * Start an IO operation to a dRAID vdev. 2189 */ 2190 static void 2191 vdev_draid_io_start(zio_t *zio) 2192 { 2193 vdev_t *vd __maybe_unused = zio->io_vd; 2194 raidz_map_t *rm; 2195 2196 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 2197 ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset)); 2198 2199 rm = vdev_draid_map_alloc(zio); 2200 2201 if (zio->io_type == ZIO_TYPE_WRITE) { 2202 for (int i = 0; i < rm->rm_nrows; i++) { 2203 vdev_draid_io_start_write(zio, rm->rm_row[i]); 2204 } 2205 } else { 2206 ASSERT(zio->io_type == ZIO_TYPE_READ); 2207 2208 for (int i = 0; i < rm->rm_nrows; i++) { 2209 vdev_draid_io_start_read(zio, rm->rm_row[i]); 2210 } 2211 } 2212 2213 zio_execute(zio); 2214 } 2215 2216 /* 2217 * Complete an IO operation on a dRAID vdev. The raidz logic can be applied 2218 * to dRAID since the layout is fully described by the raidz_map_t. 2219 */ 2220 static void 2221 vdev_draid_io_done(zio_t *zio) 2222 { 2223 vdev_raidz_io_done(zio); 2224 } 2225 2226 static void 2227 vdev_draid_state_change(vdev_t *vd, int faulted, int degraded) 2228 { 2229 vdev_draid_config_t *vdc = vd->vdev_tsd; 2230 ASSERT(vd->vdev_ops == &vdev_draid_ops); 2231 2232 if (faulted > vdc->vdc_nparity) 2233 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2234 VDEV_AUX_NO_REPLICAS); 2235 else if (degraded + faulted != 0) 2236 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); 2237 else 2238 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); 2239 } 2240 2241 static void 2242 vdev_draid_xlate(vdev_t *cvd, const range_seg64_t *logical_rs, 2243 range_seg64_t *physical_rs, range_seg64_t *remain_rs) 2244 { 2245 vdev_t *raidvd = cvd->vdev_parent; 2246 ASSERT(raidvd->vdev_ops == &vdev_draid_ops); 2247 2248 vdev_draid_config_t *vdc = raidvd->vdev_tsd; 2249 uint64_t ashift = raidvd->vdev_top->vdev_ashift; 2250 2251 /* Make sure the offsets are block-aligned */ 2252 ASSERT0(logical_rs->rs_start % (1 << ashift)); 2253 ASSERT0(logical_rs->rs_end % (1 << ashift)); 2254 2255 uint64_t logical_start = logical_rs->rs_start; 2256 uint64_t logical_end = logical_rs->rs_end; 2257 2258 /* 2259 * Unaligned ranges must be skipped. All metaslabs are correctly 2260 * aligned so this should not happen, but this case is handled in 2261 * case it's needed by future callers. 2262 */ 2263 uint64_t astart = vdev_draid_get_astart(raidvd, logical_start); 2264 if (astart != logical_start) { 2265 physical_rs->rs_start = logical_start; 2266 physical_rs->rs_end = logical_start; 2267 remain_rs->rs_start = MIN(astart, logical_end); 2268 remain_rs->rs_end = logical_end; 2269 return; 2270 } 2271 2272 /* 2273 * Unlike with mirrors and raidz a dRAID logical range can map 2274 * to multiple non-contiguous physical ranges. This is handled by 2275 * limiting the size of the logical range to a single group and 2276 * setting the remain argument such that it describes the remaining 2277 * unmapped logical range. This is stricter than absolutely 2278 * necessary but helps simplify the logic below. 2279 */ 2280 uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start); 2281 uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1); 2282 if (logical_end > nextstart) 2283 logical_end = nextstart; 2284 2285 /* Find the starting offset for each vdev in the group */ 2286 uint64_t perm, groupstart; 2287 uint64_t start = vdev_draid_logical_to_physical(raidvd, 2288 logical_start, &perm, &groupstart); 2289 uint64_t end = start; 2290 2291 uint8_t *base; 2292 uint64_t iter, id; 2293 vdev_draid_get_perm(vdc, perm, &base, &iter); 2294 2295 /* 2296 * Check if the passed child falls within the group. If it does 2297 * update the start and end to reflect the physical range. 2298 * Otherwise, leave them unmodified which will result in an empty 2299 * (zero-length) physical range being returned. 2300 */ 2301 for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) { 2302 uint64_t c = (groupstart + i) % vdc->vdc_ndisks; 2303 2304 if (c == 0 && i != 0) { 2305 /* the group wrapped, increment the start */ 2306 start += VDEV_DRAID_ROWHEIGHT; 2307 end = start; 2308 } 2309 2310 id = vdev_draid_permute_id(vdc, base, iter, c); 2311 if (id == cvd->vdev_id) { 2312 uint64_t b_size = (logical_end >> ashift) - 2313 (logical_start >> ashift); 2314 ASSERT3U(b_size, >, 0); 2315 end = start + ((((b_size - 1) / 2316 vdc->vdc_groupwidth) + 1) << ashift); 2317 break; 2318 } 2319 } 2320 physical_rs->rs_start = start; 2321 physical_rs->rs_end = end; 2322 2323 /* 2324 * Only top-level vdevs are allowed to set remain_rs because 2325 * when .vdev_op_xlate() is called for their children the full 2326 * logical range is not provided by vdev_xlate(). 2327 */ 2328 remain_rs->rs_start = logical_end; 2329 remain_rs->rs_end = logical_rs->rs_end; 2330 2331 ASSERT3U(physical_rs->rs_start, <=, logical_start); 2332 ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=, 2333 logical_end - logical_start); 2334 } 2335 2336 /* 2337 * Add dRAID specific fields to the config nvlist. 2338 */ 2339 static void 2340 vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv) 2341 { 2342 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops); 2343 vdev_draid_config_t *vdc = vd->vdev_tsd; 2344 2345 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity); 2346 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata); 2347 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares); 2348 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups); 2349 } 2350 2351 /* 2352 * Initialize private dRAID specific fields from the nvlist. 2353 */ 2354 static int 2355 vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd) 2356 { 2357 uint64_t ndata, nparity, nspares, ngroups; 2358 int error; 2359 2360 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata)) 2361 return (SET_ERROR(EINVAL)); 2362 2363 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) || 2364 nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) { 2365 return (SET_ERROR(EINVAL)); 2366 } 2367 2368 uint_t children; 2369 nvlist_t **child; 2370 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2371 &child, &children) != 0 || children == 0 || 2372 children > VDEV_DRAID_MAX_CHILDREN) { 2373 return (SET_ERROR(EINVAL)); 2374 } 2375 2376 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) || 2377 nspares > 100 || nspares > (children - (ndata + nparity))) { 2378 return (SET_ERROR(EINVAL)); 2379 } 2380 2381 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) || 2382 ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) { 2383 return (SET_ERROR(EINVAL)); 2384 } 2385 2386 /* 2387 * Validate the minimum number of children exist per group for the 2388 * specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4). 2389 */ 2390 if (children < (ndata + nparity + nspares)) 2391 return (SET_ERROR(EINVAL)); 2392 2393 /* 2394 * Create the dRAID configuration using the pool nvlist configuration 2395 * and the fixed mapping for the correct number of children. 2396 */ 2397 vdev_draid_config_t *vdc; 2398 const draid_map_t *map; 2399 2400 error = vdev_draid_lookup_map(children, &map); 2401 if (error) 2402 return (SET_ERROR(EINVAL)); 2403 2404 vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP); 2405 vdc->vdc_ndata = ndata; 2406 vdc->vdc_nparity = nparity; 2407 vdc->vdc_nspares = nspares; 2408 vdc->vdc_children = children; 2409 vdc->vdc_ngroups = ngroups; 2410 vdc->vdc_nperms = map->dm_nperms; 2411 2412 error = vdev_draid_generate_perms(map, &vdc->vdc_perms); 2413 if (error) { 2414 kmem_free(vdc, sizeof (*vdc)); 2415 return (SET_ERROR(EINVAL)); 2416 } 2417 2418 /* 2419 * Derived constants. 2420 */ 2421 vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity; 2422 vdc->vdc_ndisks = vdc->vdc_children - vdc->vdc_nspares; 2423 vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT; 2424 vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) / 2425 vdc->vdc_ndisks; 2426 2427 ASSERT3U(vdc->vdc_groupwidth, >=, 2); 2428 ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks); 2429 ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT); 2430 ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT); 2431 ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0); 2432 ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) % 2433 vdc->vdc_ndisks, ==, 0); 2434 2435 *tsd = vdc; 2436 2437 return (0); 2438 } 2439 2440 static void 2441 vdev_draid_fini(vdev_t *vd) 2442 { 2443 vdev_draid_config_t *vdc = vd->vdev_tsd; 2444 2445 vmem_free(vdc->vdc_perms, sizeof (uint8_t) * 2446 vdc->vdc_children * vdc->vdc_nperms); 2447 kmem_free(vdc, sizeof (*vdc)); 2448 } 2449 2450 static uint64_t 2451 vdev_draid_nparity(vdev_t *vd) 2452 { 2453 vdev_draid_config_t *vdc = vd->vdev_tsd; 2454 2455 return (vdc->vdc_nparity); 2456 } 2457 2458 static uint64_t 2459 vdev_draid_ndisks(vdev_t *vd) 2460 { 2461 vdev_draid_config_t *vdc = vd->vdev_tsd; 2462 2463 return (vdc->vdc_ndisks); 2464 } 2465 2466 vdev_ops_t vdev_draid_ops = { 2467 .vdev_op_init = vdev_draid_init, 2468 .vdev_op_fini = vdev_draid_fini, 2469 .vdev_op_open = vdev_draid_open, 2470 .vdev_op_close = vdev_draid_close, 2471 .vdev_op_asize = vdev_draid_asize, 2472 .vdev_op_min_asize = vdev_draid_min_asize, 2473 .vdev_op_min_alloc = vdev_draid_min_alloc, 2474 .vdev_op_io_start = vdev_draid_io_start, 2475 .vdev_op_io_done = vdev_draid_io_done, 2476 .vdev_op_state_change = vdev_draid_state_change, 2477 .vdev_op_need_resilver = vdev_draid_need_resilver, 2478 .vdev_op_hold = NULL, 2479 .vdev_op_rele = NULL, 2480 .vdev_op_remap = NULL, 2481 .vdev_op_xlate = vdev_draid_xlate, 2482 .vdev_op_rebuild_asize = vdev_draid_rebuild_asize, 2483 .vdev_op_metaslab_init = vdev_draid_metaslab_init, 2484 .vdev_op_config_generate = vdev_draid_config_generate, 2485 .vdev_op_nparity = vdev_draid_nparity, 2486 .vdev_op_ndisks = vdev_draid_ndisks, 2487 .vdev_op_type = VDEV_TYPE_DRAID, 2488 .vdev_op_leaf = B_FALSE, 2489 }; 2490 2491 2492 /* 2493 * A dRAID distributed spare is a virtual leaf vdev which is included in the 2494 * parent dRAID configuration. The last N columns of the dRAID permutation 2495 * table are used to determine on which dRAID children a specific offset 2496 * should be written. These spare leaf vdevs can only be used to replace 2497 * faulted children in the same dRAID configuration. 2498 */ 2499 2500 /* 2501 * Distributed spare state. All fields are set when the distributed spare is 2502 * first opened and are immutable. 2503 */ 2504 typedef struct { 2505 vdev_t *vds_draid_vdev; /* top-level parent dRAID vdev */ 2506 uint64_t vds_top_guid; /* top-level parent dRAID guid */ 2507 uint64_t vds_spare_id; /* spare id (0 - vdc->vdc_nspares-1) */ 2508 } vdev_draid_spare_t; 2509 2510 /* 2511 * Returns the parent dRAID vdev to which the distributed spare belongs. 2512 * This may be safely called even when the vdev is not open. 2513 */ 2514 vdev_t * 2515 vdev_draid_spare_get_parent(vdev_t *vd) 2516 { 2517 vdev_draid_spare_t *vds = vd->vdev_tsd; 2518 2519 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops); 2520 2521 if (vds->vds_draid_vdev != NULL) 2522 return (vds->vds_draid_vdev); 2523 2524 return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev, 2525 vds->vds_top_guid)); 2526 } 2527 2528 /* 2529 * A dRAID space is active when it's the child of a vdev using the 2530 * vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops. 2531 */ 2532 static boolean_t 2533 vdev_draid_spare_is_active(vdev_t *vd) 2534 { 2535 vdev_t *pvd = vd->vdev_parent; 2536 2537 if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops || 2538 pvd->vdev_ops == &vdev_replacing_ops || 2539 pvd->vdev_ops == &vdev_draid_ops)) { 2540 return (B_TRUE); 2541 } else { 2542 return (B_FALSE); 2543 } 2544 } 2545 2546 /* 2547 * Given a dRAID distribute spare vdev, returns the physical child vdev 2548 * on which the provided offset resides. This may involve recursing through 2549 * multiple layers of distributed spares. Note that offset is relative to 2550 * this vdev. 2551 */ 2552 vdev_t * 2553 vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset) 2554 { 2555 vdev_draid_spare_t *vds = vd->vdev_tsd; 2556 2557 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops); 2558 2559 /* The vdev is closed */ 2560 if (vds->vds_draid_vdev == NULL) 2561 return (NULL); 2562 2563 vdev_t *tvd = vds->vds_draid_vdev; 2564 vdev_draid_config_t *vdc = tvd->vdev_tsd; 2565 2566 ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops); 2567 ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares); 2568 2569 uint8_t *base; 2570 uint64_t iter; 2571 uint64_t perm = physical_offset / vdc->vdc_devslicesz; 2572 2573 vdev_draid_get_perm(vdc, perm, &base, &iter); 2574 2575 uint64_t cid = vdev_draid_permute_id(vdc, base, iter, 2576 (tvd->vdev_children - 1) - vds->vds_spare_id); 2577 vdev_t *cvd = tvd->vdev_child[cid]; 2578 2579 if (cvd->vdev_ops == &vdev_draid_spare_ops) 2580 return (vdev_draid_spare_get_child(cvd, physical_offset)); 2581 2582 return (cvd); 2583 } 2584 2585 /* ARGSUSED */ 2586 static void 2587 vdev_draid_spare_close(vdev_t *vd) 2588 { 2589 vdev_draid_spare_t *vds = vd->vdev_tsd; 2590 vds->vds_draid_vdev = NULL; 2591 } 2592 2593 /* 2594 * Opening a dRAID spare device is done by looking up the associated dRAID 2595 * top-level vdev guid from the spare configuration. 2596 */ 2597 static int 2598 vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 2599 uint64_t *logical_ashift, uint64_t *physical_ashift) 2600 { 2601 vdev_draid_spare_t *vds = vd->vdev_tsd; 2602 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2603 uint64_t asize, max_asize; 2604 2605 vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid); 2606 if (tvd == NULL) { 2607 /* 2608 * When spa_vdev_add() is labeling new spares the 2609 * associated dRAID is not attached to the root vdev 2610 * nor does this spare have a parent. Simulate a valid 2611 * device in order to allow the label to be initialized 2612 * and the distributed spare added to the configuration. 2613 */ 2614 if (vd->vdev_parent == NULL) { 2615 *psize = *max_psize = SPA_MINDEVSIZE; 2616 *logical_ashift = *physical_ashift = ASHIFT_MIN; 2617 return (0); 2618 } 2619 2620 return (SET_ERROR(EINVAL)); 2621 } 2622 2623 vdev_draid_config_t *vdc = tvd->vdev_tsd; 2624 if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL) 2625 return (SET_ERROR(EINVAL)); 2626 2627 if (vds->vds_spare_id >= vdc->vdc_nspares) 2628 return (SET_ERROR(EINVAL)); 2629 2630 /* 2631 * Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here 2632 * because the caller may be vdev_draid_open() in which case the 2633 * values are stale as they haven't yet been updated by vdev_open(). 2634 * To avoid this always recalculate the dRAID asize and max_asize. 2635 */ 2636 vdev_draid_calculate_asize(tvd, &asize, &max_asize, 2637 logical_ashift, physical_ashift); 2638 2639 *psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2640 *max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2641 2642 vds->vds_draid_vdev = tvd; 2643 2644 return (0); 2645 } 2646 2647 /* 2648 * Completed distributed spare IO. Store the result in the parent zio 2649 * as if it had performed the operation itself. Only the first error is 2650 * preserved if there are multiple errors. 2651 */ 2652 static void 2653 vdev_draid_spare_child_done(zio_t *zio) 2654 { 2655 zio_t *pio = zio->io_private; 2656 2657 /* 2658 * IOs are issued to non-writable vdevs in order to keep their 2659 * DTLs accurate. However, we don't want to propagate the 2660 * error in to the distributed spare's DTL. When resilvering 2661 * vdev_draid_need_resilver() will consult the relevant DTL 2662 * to determine if the data is missing and must be repaired. 2663 */ 2664 if (!vdev_writeable(zio->io_vd)) 2665 return; 2666 2667 if (pio->io_error == 0) 2668 pio->io_error = zio->io_error; 2669 } 2670 2671 /* 2672 * Returns a valid label nvlist for the distributed spare vdev. This is 2673 * used to bypass the IO pipeline to avoid the complexity of constructing 2674 * a complete label with valid checksum to return when read. 2675 */ 2676 nvlist_t * 2677 vdev_draid_read_config_spare(vdev_t *vd) 2678 { 2679 spa_t *spa = vd->vdev_spa; 2680 spa_aux_vdev_t *sav = &spa->spa_spares; 2681 uint64_t guid = vd->vdev_guid; 2682 2683 nvlist_t *nv = fnvlist_alloc(); 2684 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1); 2685 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg); 2686 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa)); 2687 fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa)); 2688 fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)); 2689 fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg); 2690 fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid); 2691 fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE, 2692 vdev_draid_spare_is_active(vd) ? 2693 POOL_STATE_ACTIVE : POOL_STATE_SPARE); 2694 2695 /* Set the vdev guid based on the vdev list in sav_count. */ 2696 for (int i = 0; i < sav->sav_count; i++) { 2697 if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops && 2698 strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) { 2699 guid = sav->sav_vdevs[i]->vdev_guid; 2700 break; 2701 } 2702 } 2703 2704 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid); 2705 2706 return (nv); 2707 } 2708 2709 /* 2710 * Handle any ioctl requested of the distributed spare. Only flushes 2711 * are supported in which case all children must be flushed. 2712 */ 2713 static int 2714 vdev_draid_spare_ioctl(zio_t *zio) 2715 { 2716 vdev_t *vd = zio->io_vd; 2717 int error = 0; 2718 2719 if (zio->io_cmd == DKIOCFLUSHWRITECACHE) { 2720 for (int c = 0; c < vd->vdev_children; c++) { 2721 zio_nowait(zio_vdev_child_io(zio, NULL, 2722 vd->vdev_child[c], zio->io_offset, zio->io_abd, 2723 zio->io_size, zio->io_type, zio->io_priority, 0, 2724 vdev_draid_spare_child_done, zio)); 2725 } 2726 } else { 2727 error = SET_ERROR(ENOTSUP); 2728 } 2729 2730 return (error); 2731 } 2732 2733 /* 2734 * Initiate an IO to the distributed spare. For normal IOs this entails using 2735 * the zio->io_offset and permutation table to calculate which child dRAID vdev 2736 * is responsible for the data. Then passing along the zio to that child to 2737 * perform the actual IO. The label ranges are not stored on disk and require 2738 * some special handling which is described below. 2739 */ 2740 static void 2741 vdev_draid_spare_io_start(zio_t *zio) 2742 { 2743 vdev_t *cvd = NULL, *vd = zio->io_vd; 2744 vdev_draid_spare_t *vds = vd->vdev_tsd; 2745 uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE; 2746 2747 /* 2748 * If the vdev is closed, it's likely in the REMOVED or FAULTED state. 2749 * Nothing to be done here but return failure. 2750 */ 2751 if (vds == NULL) { 2752 zio->io_error = ENXIO; 2753 zio_interrupt(zio); 2754 return; 2755 } 2756 2757 switch (zio->io_type) { 2758 case ZIO_TYPE_IOCTL: 2759 zio->io_error = vdev_draid_spare_ioctl(zio); 2760 break; 2761 2762 case ZIO_TYPE_WRITE: 2763 if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) { 2764 /* 2765 * Accept probe IOs and config writers to simulate the 2766 * existence of an on disk label. vdev_label_sync(), 2767 * vdev_uberblock_sync() and vdev_copy_uberblocks() 2768 * skip the distributed spares. This only leaves 2769 * vdev_label_init() which is allowed to succeed to 2770 * avoid adding special cases the function. 2771 */ 2772 if (zio->io_flags & ZIO_FLAG_PROBE || 2773 zio->io_flags & ZIO_FLAG_CONFIG_WRITER) { 2774 zio->io_error = 0; 2775 } else { 2776 zio->io_error = SET_ERROR(EIO); 2777 } 2778 } else { 2779 cvd = vdev_draid_spare_get_child(vd, offset); 2780 2781 if (cvd == NULL) { 2782 zio->io_error = SET_ERROR(ENXIO); 2783 } else { 2784 zio_nowait(zio_vdev_child_io(zio, NULL, cvd, 2785 offset, zio->io_abd, zio->io_size, 2786 zio->io_type, zio->io_priority, 0, 2787 vdev_draid_spare_child_done, zio)); 2788 } 2789 } 2790 break; 2791 2792 case ZIO_TYPE_READ: 2793 if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) { 2794 /* 2795 * Accept probe IOs to simulate the existence of a 2796 * label. vdev_label_read_config() bypasses the 2797 * pipeline to read the label configuration and 2798 * vdev_uberblock_load() skips distributed spares 2799 * when attempting to locate the best uberblock. 2800 */ 2801 if (zio->io_flags & ZIO_FLAG_PROBE) { 2802 zio->io_error = 0; 2803 } else { 2804 zio->io_error = SET_ERROR(EIO); 2805 } 2806 } else { 2807 cvd = vdev_draid_spare_get_child(vd, offset); 2808 2809 if (cvd == NULL || !vdev_readable(cvd)) { 2810 zio->io_error = SET_ERROR(ENXIO); 2811 } else { 2812 zio_nowait(zio_vdev_child_io(zio, NULL, cvd, 2813 offset, zio->io_abd, zio->io_size, 2814 zio->io_type, zio->io_priority, 0, 2815 vdev_draid_spare_child_done, zio)); 2816 } 2817 } 2818 break; 2819 2820 case ZIO_TYPE_TRIM: 2821 /* The vdev label ranges are never trimmed */ 2822 ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)); 2823 2824 cvd = vdev_draid_spare_get_child(vd, offset); 2825 2826 if (cvd == NULL || !cvd->vdev_has_trim) { 2827 zio->io_error = SET_ERROR(ENXIO); 2828 } else { 2829 zio_nowait(zio_vdev_child_io(zio, NULL, cvd, 2830 offset, zio->io_abd, zio->io_size, 2831 zio->io_type, zio->io_priority, 0, 2832 vdev_draid_spare_child_done, zio)); 2833 } 2834 break; 2835 2836 default: 2837 zio->io_error = SET_ERROR(ENOTSUP); 2838 break; 2839 } 2840 2841 zio_execute(zio); 2842 } 2843 2844 /* ARGSUSED */ 2845 static void 2846 vdev_draid_spare_io_done(zio_t *zio) 2847 { 2848 } 2849 2850 /* 2851 * Lookup the full spare config in spa->spa_spares.sav_config and 2852 * return the top_guid and spare_id for the named spare. 2853 */ 2854 static int 2855 vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp, 2856 uint64_t *spare_idp) 2857 { 2858 nvlist_t **spares; 2859 uint_t nspares; 2860 int error; 2861 2862 if ((spa->spa_spares.sav_config == NULL) || 2863 (nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 2864 ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) { 2865 return (SET_ERROR(ENOENT)); 2866 } 2867 2868 char *spare_name; 2869 error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name); 2870 if (error != 0) 2871 return (SET_ERROR(EINVAL)); 2872 2873 for (int i = 0; i < nspares; i++) { 2874 nvlist_t *spare = spares[i]; 2875 uint64_t top_guid, spare_id; 2876 char *type, *path; 2877 2878 /* Skip non-distributed spares */ 2879 error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type); 2880 if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0) 2881 continue; 2882 2883 /* Skip spares with the wrong name */ 2884 error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path); 2885 if (error != 0 || strcmp(path, spare_name) != 0) 2886 continue; 2887 2888 /* Found the matching spare */ 2889 error = nvlist_lookup_uint64(spare, 2890 ZPOOL_CONFIG_TOP_GUID, &top_guid); 2891 if (error == 0) { 2892 error = nvlist_lookup_uint64(spare, 2893 ZPOOL_CONFIG_SPARE_ID, &spare_id); 2894 } 2895 2896 if (error != 0) { 2897 return (SET_ERROR(EINVAL)); 2898 } else { 2899 *top_guidp = top_guid; 2900 *spare_idp = spare_id; 2901 return (0); 2902 } 2903 } 2904 2905 return (SET_ERROR(ENOENT)); 2906 } 2907 2908 /* 2909 * Initialize private dRAID spare specific fields from the nvlist. 2910 */ 2911 static int 2912 vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd) 2913 { 2914 vdev_draid_spare_t *vds; 2915 uint64_t top_guid = 0; 2916 uint64_t spare_id; 2917 2918 /* 2919 * In the normal case check the list of spares stored in the spa 2920 * to lookup the top_guid and spare_id for provided spare config. 2921 * When creating a new pool or adding vdevs the spare list is not 2922 * yet populated and the values are provided in the passed config. 2923 */ 2924 if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) { 2925 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID, 2926 &top_guid) != 0) 2927 return (SET_ERROR(EINVAL)); 2928 2929 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID, 2930 &spare_id) != 0) 2931 return (SET_ERROR(EINVAL)); 2932 } 2933 2934 vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP); 2935 vds->vds_draid_vdev = NULL; 2936 vds->vds_top_guid = top_guid; 2937 vds->vds_spare_id = spare_id; 2938 2939 *tsd = vds; 2940 2941 return (0); 2942 } 2943 2944 static void 2945 vdev_draid_spare_fini(vdev_t *vd) 2946 { 2947 kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t)); 2948 } 2949 2950 static void 2951 vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv) 2952 { 2953 vdev_draid_spare_t *vds = vd->vdev_tsd; 2954 2955 ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops); 2956 2957 fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid); 2958 fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id); 2959 } 2960 2961 vdev_ops_t vdev_draid_spare_ops = { 2962 .vdev_op_init = vdev_draid_spare_init, 2963 .vdev_op_fini = vdev_draid_spare_fini, 2964 .vdev_op_open = vdev_draid_spare_open, 2965 .vdev_op_close = vdev_draid_spare_close, 2966 .vdev_op_asize = vdev_default_asize, 2967 .vdev_op_min_asize = vdev_default_min_asize, 2968 .vdev_op_min_alloc = NULL, 2969 .vdev_op_io_start = vdev_draid_spare_io_start, 2970 .vdev_op_io_done = vdev_draid_spare_io_done, 2971 .vdev_op_state_change = NULL, 2972 .vdev_op_need_resilver = NULL, 2973 .vdev_op_hold = NULL, 2974 .vdev_op_rele = NULL, 2975 .vdev_op_remap = NULL, 2976 .vdev_op_xlate = vdev_default_xlate, 2977 .vdev_op_rebuild_asize = NULL, 2978 .vdev_op_metaslab_init = NULL, 2979 .vdev_op_config_generate = vdev_draid_spare_config_generate, 2980 .vdev_op_nparity = NULL, 2981 .vdev_op_ndisks = NULL, 2982 .vdev_op_type = VDEV_TYPE_DRAID_SPARE, 2983 .vdev_op_leaf = B_TRUE, 2984 }; 2985