1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/bio.h>
31 #include <sys/endian.h>
32 #include <sys/kernel.h>
33 #include <sys/kobj.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39 #include <sys/systm.h>
40 #include <geom/geom.h>
41 #include "geom/raid/g_raid.h"
42 #include "g_raid_tr_if.h"
43
44 static MALLOC_DEFINE(M_TR_RAID5, "tr_raid5_data", "GEOM_RAID RAID5 data");
45
46 #define TR_RAID5_NONE 0
47 #define TR_RAID5_REBUILD 1
48 #define TR_RAID5_RESYNC 2
49
50 #define TR_RAID5_F_DOING_SOME 0x1
51 #define TR_RAID5_F_LOCKED 0x2
52 #define TR_RAID5_F_ABORT 0x4
53
54 struct g_raid_tr_raid5_object {
55 struct g_raid_tr_object trso_base;
56 int trso_starting;
57 int trso_stopping;
58 int trso_type;
59 int trso_recover_slabs; /* slabs before rest */
60 int trso_fair_io;
61 int trso_meta_update;
62 int trso_flags;
63 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
64 void *trso_buffer; /* Buffer space */
65 struct bio trso_bio;
66 };
67
68 static g_raid_tr_taste_t g_raid_tr_taste_raid5;
69 static g_raid_tr_event_t g_raid_tr_event_raid5;
70 static g_raid_tr_start_t g_raid_tr_start_raid5;
71 static g_raid_tr_stop_t g_raid_tr_stop_raid5;
72 static g_raid_tr_iostart_t g_raid_tr_iostart_raid5;
73 static g_raid_tr_iodone_t g_raid_tr_iodone_raid5;
74 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid5;
75 static g_raid_tr_locked_t g_raid_tr_locked_raid5;
76 static g_raid_tr_free_t g_raid_tr_free_raid5;
77
78 static kobj_method_t g_raid_tr_raid5_methods[] = {
79 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid5),
80 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid5),
81 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid5),
82 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid5),
83 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid5),
84 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid5),
85 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid5),
86 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid5),
87 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid5),
88 { 0, 0 }
89 };
90
91 static struct g_raid_tr_class g_raid_tr_raid5_class = {
92 "RAID5",
93 g_raid_tr_raid5_methods,
94 sizeof(struct g_raid_tr_raid5_object),
95 .trc_enable = 1,
96 .trc_priority = 100
97 };
98
99 static int
g_raid_tr_taste_raid5(struct g_raid_tr_object * tr,struct g_raid_volume * vol)100 g_raid_tr_taste_raid5(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
101 {
102 struct g_raid_tr_raid5_object *trs;
103 u_int qual;
104
105 trs = (struct g_raid_tr_raid5_object *)tr;
106 qual = tr->tro_volume->v_raid_level_qualifier;
107 if (tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID4 &&
108 (qual == G_RAID_VOLUME_RLQ_R4P0 ||
109 qual == G_RAID_VOLUME_RLQ_R4PN)) {
110 /* RAID4 */
111 } else if ((tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5 ||
112 tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5E ||
113 tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5EE ||
114 tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5R ||
115 tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID6 ||
116 tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAIDMDF) &&
117 (qual == G_RAID_VOLUME_RLQ_R5RA ||
118 qual == G_RAID_VOLUME_RLQ_R5RS ||
119 qual == G_RAID_VOLUME_RLQ_R5LA ||
120 qual == G_RAID_VOLUME_RLQ_R5LS)) {
121 /* RAID5/5E/5EE/5R/6/MDF */
122 } else
123 return (G_RAID_TR_TASTE_FAIL);
124 trs->trso_starting = 1;
125 return (G_RAID_TR_TASTE_SUCCEED);
126 }
127
128 static int
g_raid_tr_update_state_raid5(struct g_raid_volume * vol,struct g_raid_subdisk * sd)129 g_raid_tr_update_state_raid5(struct g_raid_volume *vol,
130 struct g_raid_subdisk *sd)
131 {
132 struct g_raid_tr_raid5_object *trs;
133 struct g_raid_softc *sc;
134 u_int s;
135 int na, ns, nu;
136
137 sc = vol->v_softc;
138 trs = (struct g_raid_tr_raid5_object *)vol->v_tr;
139 if (trs->trso_stopping &&
140 (trs->trso_flags & TR_RAID5_F_DOING_SOME) == 0)
141 s = G_RAID_VOLUME_S_STOPPED;
142 else if (trs->trso_starting)
143 s = G_RAID_VOLUME_S_STARTING;
144 else {
145 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
146 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
147 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
148 nu = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
149 if (na == vol->v_disks_count)
150 s = G_RAID_VOLUME_S_OPTIMAL;
151 else if (na + ns == vol->v_disks_count ||
152 na + ns + nu == vol->v_disks_count /* XXX: Temporary. */)
153 s = G_RAID_VOLUME_S_SUBOPTIMAL;
154 else if (na == vol->v_disks_count - 1 ||
155 na + ns + nu == vol->v_disks_count)
156 s = G_RAID_VOLUME_S_DEGRADED;
157 else
158 s = G_RAID_VOLUME_S_BROKEN;
159 }
160 if (s != vol->v_state) {
161 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
162 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
163 G_RAID_EVENT_VOLUME);
164 g_raid_change_volume_state(vol, s);
165 if (!trs->trso_starting && !trs->trso_stopping)
166 g_raid_write_metadata(sc, vol, NULL, NULL);
167 }
168 return (0);
169 }
170
171 static int
g_raid_tr_event_raid5(struct g_raid_tr_object * tr,struct g_raid_subdisk * sd,u_int event)172 g_raid_tr_event_raid5(struct g_raid_tr_object *tr,
173 struct g_raid_subdisk *sd, u_int event)
174 {
175
176 g_raid_tr_update_state_raid5(tr->tro_volume, sd);
177 return (0);
178 }
179
180 static int
g_raid_tr_start_raid5(struct g_raid_tr_object * tr)181 g_raid_tr_start_raid5(struct g_raid_tr_object *tr)
182 {
183 struct g_raid_tr_raid5_object *trs;
184 struct g_raid_volume *vol;
185
186 trs = (struct g_raid_tr_raid5_object *)tr;
187 trs->trso_starting = 0;
188 vol = tr->tro_volume;
189 vol->v_read_only = 1;
190 g_raid_tr_update_state_raid5(vol, NULL);
191 return (0);
192 }
193
194 static int
g_raid_tr_stop_raid5(struct g_raid_tr_object * tr)195 g_raid_tr_stop_raid5(struct g_raid_tr_object *tr)
196 {
197 struct g_raid_tr_raid5_object *trs;
198 struct g_raid_volume *vol;
199
200 trs = (struct g_raid_tr_raid5_object *)tr;
201 vol = tr->tro_volume;
202 trs->trso_starting = 0;
203 trs->trso_stopping = 1;
204 g_raid_tr_update_state_raid5(vol, NULL);
205 return (0);
206 }
207
208 static void
g_raid_tr_iostart_raid5_read(struct g_raid_tr_object * tr,struct bio * bp)209 g_raid_tr_iostart_raid5_read(struct g_raid_tr_object *tr, struct bio *bp)
210 {
211 struct g_raid_volume *vol;
212 struct g_raid_subdisk *sd;
213 struct bio_queue_head queue;
214 struct bio *cbp;
215 char *addr;
216 off_t offset, start, length, nstripe, remain;
217 int no, pno, ddisks, pdisks, protate, pleft;
218 u_int strip_size, lvl, qual;
219
220 vol = tr->tro_volume;
221 addr = bp->bio_data;
222 strip_size = vol->v_strip_size;
223 lvl = tr->tro_volume->v_raid_level;
224 qual = tr->tro_volume->v_raid_level_qualifier;
225 protate = tr->tro_volume->v_rotate_parity;
226
227 /* Stripe number. */
228 nstripe = bp->bio_offset / strip_size;
229 /* Start position in stripe. */
230 start = bp->bio_offset % strip_size;
231 /* Number of data and parity disks. */
232 if (lvl == G_RAID_VOLUME_RL_RAIDMDF)
233 pdisks = tr->tro_volume->v_mdf_pdisks;
234 else if (lvl == G_RAID_VOLUME_RL_RAID5EE ||
235 lvl == G_RAID_VOLUME_RL_RAID6)
236 pdisks = 2;
237 else
238 pdisks = 1;
239 ddisks = vol->v_disks_count - pdisks;
240 /* Parity disk number. */
241 if (lvl == G_RAID_VOLUME_RL_RAID4) {
242 if (qual == 0) /* P0 */
243 pno = 0;
244 else /* PN */
245 pno = ddisks;
246 pleft = -1;
247 } else {
248 pno = (nstripe / (ddisks * protate)) % vol->v_disks_count;
249 pleft = protate - (nstripe / ddisks) % protate;
250 if (qual >= 2) { /* PN/Left */
251 pno = ddisks - pno;
252 if (pno < 0)
253 pno += vol->v_disks_count;
254 }
255 }
256 /* Data disk number. */
257 no = nstripe % ddisks;
258 if (lvl == G_RAID_VOLUME_RL_RAID4) {
259 if (qual == 0)
260 no += pdisks;
261 } else if (qual & 1) { /* Continuation/Symmetric */
262 no = (pno + pdisks + no) % vol->v_disks_count;
263 } else if (no >= pno) /* Restart/Asymmetric */
264 no += pdisks;
265 else
266 no += imax(0, pno + pdisks - vol->v_disks_count);
267 /* Stripe start position in disk. */
268 offset = (nstripe / ddisks) * strip_size;
269 /* Length of data to operate. */
270 remain = bp->bio_length;
271
272 bioq_init(&queue);
273 do {
274 length = MIN(strip_size - start, remain);
275 cbp = g_clone_bio(bp);
276 if (cbp == NULL)
277 goto failure;
278 cbp->bio_offset = offset + start;
279 cbp->bio_data = addr;
280 cbp->bio_length = length;
281 cbp->bio_caller1 = &vol->v_subdisks[no];
282 bioq_insert_tail(&queue, cbp);
283 no++;
284 if (lvl == G_RAID_VOLUME_RL_RAID4) {
285 no %= vol->v_disks_count;
286 if (no == pno)
287 no = (no + pdisks) % vol->v_disks_count;
288 } else if (qual & 1) { /* Continuation/Symmetric */
289 no %= vol->v_disks_count;
290 if (no == pno) {
291 if ((--pleft) <= 0) {
292 pleft += protate;
293 if (qual < 2) /* P0/Right */
294 pno++;
295 else /* PN/Left */
296 pno += vol->v_disks_count - 1;
297 pno %= vol->v_disks_count;
298 }
299 no = (pno + pdisks) % vol->v_disks_count;
300 offset += strip_size;
301 }
302 } else { /* Restart/Asymmetric */
303 if (no == pno)
304 no += pdisks;
305 if (no >= vol->v_disks_count) {
306 no -= vol->v_disks_count;
307 if ((--pleft) <= 0) {
308 pleft += protate;
309 if (qual < 2) /* P0/Right */
310 pno++;
311 else /* PN/Left */
312 pno += vol->v_disks_count - 1;
313 pno %= vol->v_disks_count;
314 }
315 if (no == pno)
316 no += pdisks;
317 else
318 no += imax(0, pno + pdisks - vol->v_disks_count);
319 offset += strip_size;
320 }
321 }
322 remain -= length;
323 addr += length;
324 start = 0;
325 } while (remain > 0);
326 while ((cbp = bioq_takefirst(&queue)) != NULL) {
327 sd = cbp->bio_caller1;
328 cbp->bio_caller1 = NULL;
329 g_raid_subdisk_iostart(sd, cbp);
330 }
331 return;
332 failure:
333 while ((cbp = bioq_takefirst(&queue)) != NULL)
334 g_destroy_bio(cbp);
335 if (bp->bio_error == 0)
336 bp->bio_error = ENOMEM;
337 g_raid_iodone(bp, bp->bio_error);
338 }
339
340 static void
g_raid_tr_iostart_raid5(struct g_raid_tr_object * tr,struct bio * bp)341 g_raid_tr_iostart_raid5(struct g_raid_tr_object *tr, struct bio *bp)
342 {
343 struct g_raid_volume *vol;
344
345 vol = tr->tro_volume;
346 if (vol->v_state < G_RAID_VOLUME_S_SUBOPTIMAL) {
347 g_raid_iodone(bp, EIO);
348 return;
349 }
350 switch (bp->bio_cmd) {
351 case BIO_READ:
352 g_raid_tr_iostart_raid5_read(tr, bp);
353 break;
354 case BIO_WRITE:
355 case BIO_DELETE:
356 case BIO_FLUSH:
357 case BIO_SPEEDUP:
358 g_raid_iodone(bp, ENODEV);
359 break;
360 default:
361 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
362 bp->bio_cmd, vol->v_name));
363 break;
364 }
365 }
366
367 static void
g_raid_tr_iodone_raid5(struct g_raid_tr_object * tr,struct g_raid_subdisk * sd,struct bio * bp)368 g_raid_tr_iodone_raid5(struct g_raid_tr_object *tr,
369 struct g_raid_subdisk *sd, struct bio *bp)
370 {
371 struct bio *pbp;
372
373 pbp = bp->bio_parent;
374 if (pbp->bio_error == 0)
375 pbp->bio_error = bp->bio_error;
376 pbp->bio_inbed++;
377 g_destroy_bio(bp);
378 if (pbp->bio_children == pbp->bio_inbed) {
379 pbp->bio_completed = pbp->bio_length;
380 g_raid_iodone(pbp, pbp->bio_error);
381 }
382 }
383
384 static int
g_raid_tr_kerneldump_raid5(struct g_raid_tr_object * tr,void * virtual,off_t offset,size_t length)385 g_raid_tr_kerneldump_raid5(struct g_raid_tr_object *tr, void *virtual,
386 off_t offset, size_t length)
387 {
388
389 return (ENODEV);
390 }
391
392 static int
g_raid_tr_locked_raid5(struct g_raid_tr_object * tr,void * argp)393 g_raid_tr_locked_raid5(struct g_raid_tr_object *tr, void *argp)
394 {
395 struct bio *bp;
396 struct g_raid_subdisk *sd;
397
398 bp = (struct bio *)argp;
399 sd = (struct g_raid_subdisk *)bp->bio_caller1;
400 g_raid_subdisk_iostart(sd, bp);
401
402 return (0);
403 }
404
405 static int
g_raid_tr_free_raid5(struct g_raid_tr_object * tr)406 g_raid_tr_free_raid5(struct g_raid_tr_object *tr)
407 {
408 struct g_raid_tr_raid5_object *trs;
409
410 trs = (struct g_raid_tr_raid5_object *)tr;
411
412 if (trs->trso_buffer != NULL) {
413 free(trs->trso_buffer, M_TR_RAID5);
414 trs->trso_buffer = NULL;
415 }
416 return (0);
417 }
418
419 G_RAID_TR_DECLARE(raid5, "RAID5");
420