1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/bio.h>
31 #include <sys/endian.h>
32 #include <sys/kernel.h>
33 #include <sys/kobj.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39 #include <sys/systm.h>
40 #include <geom/geom.h>
41 #include <geom/geom_dbg.h>
42 #include "geom/raid/g_raid.h"
43 #include "g_raid_tr_if.h"
44
45 SYSCTL_DECL(_kern_geom_raid_raid1);
46
47 #define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
48 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
49 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RWTUN,
50 &g_raid1_rebuild_slab, 0,
51 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
52
53 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
54 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
55 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RWTUN,
56 &g_raid1_rebuild_fair_io, 0,
57 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
58
59 #define RAID1_REBUILD_CLUSTER_IDLE 100
60 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
61 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RWTUN,
62 &g_raid1_rebuild_cluster_idle, 0,
63 "Number of slabs to do each time we trigger a rebuild cycle");
64
65 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
66 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
67 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RWTUN,
68 &g_raid1_rebuild_meta_update, 0,
69 "When to update the meta data.");
70
71 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
72
73 #define TR_RAID1_NONE 0
74 #define TR_RAID1_REBUILD 1
75 #define TR_RAID1_RESYNC 2
76
77 #define TR_RAID1_F_DOING_SOME 0x1
78 #define TR_RAID1_F_LOCKED 0x2
79 #define TR_RAID1_F_ABORT 0x4
80
81 struct g_raid_tr_raid1_object {
82 struct g_raid_tr_object trso_base;
83 int trso_starting;
84 int trso_stopping;
85 int trso_type;
86 int trso_recover_slabs; /* slabs before rest */
87 int trso_fair_io;
88 int trso_meta_update;
89 int trso_flags;
90 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
91 void *trso_buffer; /* Buffer space */
92 struct bio trso_bio;
93 };
94
95 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
96 static g_raid_tr_event_t g_raid_tr_event_raid1;
97 static g_raid_tr_start_t g_raid_tr_start_raid1;
98 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
99 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
100 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
101 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
102 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
103 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
104 static g_raid_tr_free_t g_raid_tr_free_raid1;
105
106 static kobj_method_t g_raid_tr_raid1_methods[] = {
107 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
108 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
109 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
110 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
111 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
112 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
113 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
114 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
115 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
116 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
117 { 0, 0 }
118 };
119
120 static struct g_raid_tr_class g_raid_tr_raid1_class = {
121 "RAID1",
122 g_raid_tr_raid1_methods,
123 sizeof(struct g_raid_tr_raid1_object),
124 .trc_enable = 1,
125 .trc_priority = 100,
126 .trc_accept_unmapped = 1
127 };
128
129 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
130 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
131 struct g_raid_subdisk *sd);
132
133 static int
g_raid_tr_taste_raid1(struct g_raid_tr_object * tr,struct g_raid_volume * vol)134 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
135 {
136 struct g_raid_tr_raid1_object *trs;
137
138 trs = (struct g_raid_tr_raid1_object *)tr;
139 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
140 (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
141 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
142 return (G_RAID_TR_TASTE_FAIL);
143 trs->trso_starting = 1;
144 return (G_RAID_TR_TASTE_SUCCEED);
145 }
146
147 static int
g_raid_tr_update_state_raid1(struct g_raid_volume * vol,struct g_raid_subdisk * sd)148 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
149 struct g_raid_subdisk *sd)
150 {
151 struct g_raid_tr_raid1_object *trs;
152 struct g_raid_softc *sc;
153 struct g_raid_subdisk *tsd, *bestsd;
154 u_int s;
155 int i, na, ns;
156
157 sc = vol->v_softc;
158 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
159 if (trs->trso_stopping &&
160 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
161 s = G_RAID_VOLUME_S_STOPPED;
162 else if (trs->trso_starting)
163 s = G_RAID_VOLUME_S_STARTING;
164 else {
165 /* Make sure we have at least one ACTIVE disk. */
166 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
167 if (na == 0) {
168 /*
169 * Critical situation! We have no any active disk!
170 * Choose the best disk we have to make it active.
171 */
172 bestsd = &vol->v_subdisks[0];
173 for (i = 1; i < vol->v_disks_count; i++) {
174 tsd = &vol->v_subdisks[i];
175 if (tsd->sd_state > bestsd->sd_state)
176 bestsd = tsd;
177 else if (tsd->sd_state == bestsd->sd_state &&
178 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
179 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
180 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
181 bestsd = tsd;
182 }
183 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
184 /* We found reasonable candidate. */
185 G_RAID_DEBUG1(1, sc,
186 "Promote subdisk %s:%d from %s to ACTIVE.",
187 vol->v_name, bestsd->sd_pos,
188 g_raid_subdisk_state2str(bestsd->sd_state));
189 g_raid_change_subdisk_state(bestsd,
190 G_RAID_SUBDISK_S_ACTIVE);
191 g_raid_write_metadata(sc,
192 vol, bestsd, bestsd->sd_disk);
193 }
194 }
195 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
196 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
197 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
198 if (na == vol->v_disks_count)
199 s = G_RAID_VOLUME_S_OPTIMAL;
200 else if (na + ns == vol->v_disks_count)
201 s = G_RAID_VOLUME_S_SUBOPTIMAL;
202 else if (na > 0)
203 s = G_RAID_VOLUME_S_DEGRADED;
204 else
205 s = G_RAID_VOLUME_S_BROKEN;
206 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
207 }
208 if (s != vol->v_state) {
209 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
210 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
211 G_RAID_EVENT_VOLUME);
212 g_raid_change_volume_state(vol, s);
213 if (!trs->trso_starting && !trs->trso_stopping)
214 g_raid_write_metadata(sc, vol, NULL, NULL);
215 }
216 return (0);
217 }
218
219 static void
g_raid_tr_raid1_fail_disk(struct g_raid_softc * sc,struct g_raid_subdisk * sd,struct g_raid_disk * disk)220 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
221 struct g_raid_disk *disk)
222 {
223 /*
224 * We don't fail the last disk in the pack, since it still has decent
225 * data on it and that's better than failing the disk if it is the root
226 * file system.
227 *
228 * XXX should this be controlled via a tunable? It makes sense for
229 * the volume that has / on it. I can't think of a case where we'd
230 * want the volume to go away on this kind of event.
231 */
232 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
233 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
234 return;
235 g_raid_fail_disk(sc, sd, disk);
236 }
237
238 static void
g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object * tr)239 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
240 {
241 struct g_raid_tr_raid1_object *trs;
242 struct g_raid_subdisk *sd, *good_sd;
243 struct bio *bp;
244
245 trs = (struct g_raid_tr_raid1_object *)tr;
246 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
247 return;
248 sd = trs->trso_failed_sd;
249 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
250 if (good_sd == NULL) {
251 g_raid_tr_raid1_rebuild_abort(tr);
252 return;
253 }
254 bp = &trs->trso_bio;
255 memset(bp, 0, sizeof(*bp));
256 bp->bio_offset = sd->sd_rebuild_pos;
257 bp->bio_length = MIN(g_raid1_rebuild_slab,
258 sd->sd_size - sd->sd_rebuild_pos);
259 bp->bio_data = trs->trso_buffer;
260 bp->bio_cmd = BIO_READ;
261 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
262 bp->bio_caller1 = good_sd;
263 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
264 trs->trso_flags |= TR_RAID1_F_LOCKED;
265 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
266 bp->bio_offset, bp->bio_length, NULL, bp);
267 }
268
269 static void
g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object * trs)270 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
271 {
272 struct g_raid_volume *vol;
273 struct g_raid_subdisk *sd;
274
275 vol = trs->trso_base.tro_volume;
276 sd = trs->trso_failed_sd;
277 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
278 free(trs->trso_buffer, M_TR_RAID1);
279 trs->trso_buffer = NULL;
280 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
281 trs->trso_type = TR_RAID1_NONE;
282 trs->trso_recover_slabs = 0;
283 trs->trso_failed_sd = NULL;
284 g_raid_tr_update_state_raid1(vol, NULL);
285 }
286
287 static void
g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object * tr)288 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
289 {
290 struct g_raid_tr_raid1_object *trs;
291 struct g_raid_subdisk *sd;
292
293 trs = (struct g_raid_tr_raid1_object *)tr;
294 sd = trs->trso_failed_sd;
295 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
296 "Subdisk %s:%d-%s rebuild completed.",
297 sd->sd_volume->v_name, sd->sd_pos,
298 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
299 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
300 sd->sd_rebuild_pos = 0;
301 g_raid_tr_raid1_rebuild_done(trs);
302 }
303
304 static void
g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object * tr)305 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
306 {
307 struct g_raid_tr_raid1_object *trs;
308 struct g_raid_subdisk *sd;
309 struct g_raid_volume *vol;
310 off_t len;
311
312 vol = tr->tro_volume;
313 trs = (struct g_raid_tr_raid1_object *)tr;
314 sd = trs->trso_failed_sd;
315 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
316 G_RAID_DEBUG1(1, vol->v_softc,
317 "Subdisk %s:%d-%s rebuild is aborting.",
318 sd->sd_volume->v_name, sd->sd_pos,
319 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
320 trs->trso_flags |= TR_RAID1_F_ABORT;
321 } else {
322 G_RAID_DEBUG1(0, vol->v_softc,
323 "Subdisk %s:%d-%s rebuild aborted.",
324 sd->sd_volume->v_name, sd->sd_pos,
325 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
326 trs->trso_flags &= ~TR_RAID1_F_ABORT;
327 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
328 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
329 len = MIN(g_raid1_rebuild_slab,
330 sd->sd_size - sd->sd_rebuild_pos);
331 g_raid_unlock_range(tr->tro_volume,
332 sd->sd_rebuild_pos, len);
333 }
334 g_raid_tr_raid1_rebuild_done(trs);
335 }
336 }
337
338 static void
g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object * tr)339 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
340 {
341 struct g_raid_volume *vol;
342 struct g_raid_tr_raid1_object *trs;
343 struct g_raid_subdisk *sd, *fsd;
344
345 vol = tr->tro_volume;
346 trs = (struct g_raid_tr_raid1_object *)tr;
347 if (trs->trso_failed_sd) {
348 G_RAID_DEBUG1(1, vol->v_softc,
349 "Already rebuild in start rebuild. pos %jd\n",
350 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
351 return;
352 }
353 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
354 if (sd == NULL) {
355 G_RAID_DEBUG1(1, vol->v_softc,
356 "No active disk to rebuild. night night.");
357 return;
358 }
359 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
360 if (fsd == NULL)
361 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
362 if (fsd == NULL) {
363 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
364 if (fsd != NULL) {
365 fsd->sd_rebuild_pos = 0;
366 g_raid_change_subdisk_state(fsd,
367 G_RAID_SUBDISK_S_RESYNC);
368 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
369 } else {
370 fsd = g_raid_get_subdisk(vol,
371 G_RAID_SUBDISK_S_UNINITIALIZED);
372 if (fsd == NULL)
373 fsd = g_raid_get_subdisk(vol,
374 G_RAID_SUBDISK_S_NEW);
375 if (fsd != NULL) {
376 fsd->sd_rebuild_pos = 0;
377 g_raid_change_subdisk_state(fsd,
378 G_RAID_SUBDISK_S_REBUILD);
379 g_raid_write_metadata(vol->v_softc,
380 vol, fsd, NULL);
381 }
382 }
383 }
384 if (fsd == NULL) {
385 G_RAID_DEBUG1(1, vol->v_softc,
386 "No failed disk to rebuild. night night.");
387 return;
388 }
389 trs->trso_failed_sd = fsd;
390 G_RAID_DEBUG1(0, vol->v_softc,
391 "Subdisk %s:%d-%s rebuild start at %jd.",
392 fsd->sd_volume->v_name, fsd->sd_pos,
393 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
394 trs->trso_failed_sd->sd_rebuild_pos);
395 trs->trso_type = TR_RAID1_REBUILD;
396 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
397 trs->trso_meta_update = g_raid1_rebuild_meta_update;
398 g_raid_tr_raid1_rebuild_some(tr);
399 }
400
401 static void
g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object * tr,struct g_raid_subdisk * sd)402 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
403 struct g_raid_subdisk *sd)
404 {
405 struct g_raid_volume *vol;
406 struct g_raid_tr_raid1_object *trs;
407 int na, nr;
408
409 /*
410 * If we're stopping, don't do anything. If we don't have at least one
411 * good disk and one bad disk, we don't do anything. And if there's a
412 * 'good disk' stored in the trs, then we're in progress and we punt.
413 * If we make it past all these checks, we need to rebuild.
414 */
415 vol = tr->tro_volume;
416 trs = (struct g_raid_tr_raid1_object *)tr;
417 if (trs->trso_stopping)
418 return;
419 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
420 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
421 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
422 switch(trs->trso_type) {
423 case TR_RAID1_NONE:
424 if (na == 0)
425 return;
426 if (nr == 0) {
427 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
428 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
429 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
430 if (nr == 0)
431 return;
432 }
433 g_raid_tr_raid1_rebuild_start(tr);
434 break;
435 case TR_RAID1_REBUILD:
436 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
437 g_raid_tr_raid1_rebuild_abort(tr);
438 break;
439 case TR_RAID1_RESYNC:
440 break;
441 }
442 }
443
444 static int
g_raid_tr_event_raid1(struct g_raid_tr_object * tr,struct g_raid_subdisk * sd,u_int event)445 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
446 struct g_raid_subdisk *sd, u_int event)
447 {
448
449 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
450 return (0);
451 }
452
453 static int
g_raid_tr_start_raid1(struct g_raid_tr_object * tr)454 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
455 {
456 struct g_raid_tr_raid1_object *trs;
457 struct g_raid_volume *vol;
458
459 trs = (struct g_raid_tr_raid1_object *)tr;
460 vol = tr->tro_volume;
461 trs->trso_starting = 0;
462 g_raid_tr_update_state_raid1(vol, NULL);
463 return (0);
464 }
465
466 static int
g_raid_tr_stop_raid1(struct g_raid_tr_object * tr)467 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
468 {
469 struct g_raid_tr_raid1_object *trs;
470 struct g_raid_volume *vol;
471
472 trs = (struct g_raid_tr_raid1_object *)tr;
473 vol = tr->tro_volume;
474 trs->trso_starting = 0;
475 trs->trso_stopping = 1;
476 g_raid_tr_update_state_raid1(vol, NULL);
477 return (0);
478 }
479
480 /*
481 * Select the disk to read from. Take into account: subdisk state, running
482 * error recovery, average disk load, head position and possible cache hits.
483 */
484 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
485 static struct g_raid_subdisk *
g_raid_tr_raid1_select_read_disk(struct g_raid_volume * vol,struct bio * bp,u_int mask)486 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
487 u_int mask)
488 {
489 struct g_raid_subdisk *sd, *best;
490 int i, prio, bestprio;
491
492 best = NULL;
493 bestprio = INT_MAX;
494 for (i = 0; i < vol->v_disks_count; i++) {
495 sd = &vol->v_subdisks[i];
496 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
497 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
498 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
499 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
500 continue;
501 if ((mask & (1 << i)) != 0)
502 continue;
503 prio = G_RAID_SUBDISK_LOAD(sd);
504 prio += min(sd->sd_recovery, 255) << 22;
505 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
506 /* If disk head is precisely in position - highly prefer it. */
507 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
508 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
509 else
510 /* If disk head is close to position - prefer it. */
511 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
512 G_RAID_SUBDISK_TRACK_SIZE)
513 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
514 if (prio < bestprio) {
515 best = sd;
516 bestprio = prio;
517 }
518 }
519 return (best);
520 }
521
522 static void
g_raid_tr_iostart_raid1_read(struct g_raid_tr_object * tr,struct bio * bp)523 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
524 {
525 struct g_raid_subdisk *sd;
526 struct bio *cbp;
527
528 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
529 KASSERT(sd != NULL, ("No active disks in volume %s.",
530 tr->tro_volume->v_name));
531
532 cbp = g_clone_bio(bp);
533 if (cbp == NULL) {
534 g_raid_iodone(bp, ENOMEM);
535 return;
536 }
537
538 g_raid_subdisk_iostart(sd, cbp);
539 }
540
541 static void
g_raid_tr_iostart_raid1_write(struct g_raid_tr_object * tr,struct bio * bp)542 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
543 {
544 struct g_raid_volume *vol;
545 struct g_raid_subdisk *sd;
546 struct bio_queue_head queue;
547 struct bio *cbp;
548 int i;
549
550 vol = tr->tro_volume;
551
552 /*
553 * Allocate all bios before sending any request, so we can return
554 * ENOMEM in nice and clean way.
555 */
556 bioq_init(&queue);
557 for (i = 0; i < vol->v_disks_count; i++) {
558 sd = &vol->v_subdisks[i];
559 switch (sd->sd_state) {
560 case G_RAID_SUBDISK_S_ACTIVE:
561 break;
562 case G_RAID_SUBDISK_S_REBUILD:
563 /*
564 * When rebuilding, only part of this subdisk is
565 * writable, the rest will be written as part of the
566 * that process.
567 */
568 if (bp->bio_offset >= sd->sd_rebuild_pos)
569 continue;
570 break;
571 case G_RAID_SUBDISK_S_STALE:
572 case G_RAID_SUBDISK_S_RESYNC:
573 /*
574 * Resyncing still writes on the theory that the
575 * resync'd disk is very close and writing it will
576 * keep it that way better if we keep up while
577 * resyncing.
578 */
579 break;
580 default:
581 continue;
582 }
583 cbp = g_clone_bio(bp);
584 if (cbp == NULL)
585 goto failure;
586 cbp->bio_caller1 = sd;
587 bioq_insert_tail(&queue, cbp);
588 }
589 while ((cbp = bioq_takefirst(&queue)) != NULL) {
590 sd = cbp->bio_caller1;
591 cbp->bio_caller1 = NULL;
592 g_raid_subdisk_iostart(sd, cbp);
593 }
594 return;
595 failure:
596 while ((cbp = bioq_takefirst(&queue)) != NULL)
597 g_destroy_bio(cbp);
598 if (bp->bio_error == 0)
599 bp->bio_error = ENOMEM;
600 g_raid_iodone(bp, bp->bio_error);
601 }
602
603 static void
g_raid_tr_iostart_raid1(struct g_raid_tr_object * tr,struct bio * bp)604 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
605 {
606 struct g_raid_volume *vol;
607 struct g_raid_tr_raid1_object *trs;
608
609 vol = tr->tro_volume;
610 trs = (struct g_raid_tr_raid1_object *)tr;
611 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
612 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
613 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
614 g_raid_iodone(bp, EIO);
615 return;
616 }
617 /*
618 * If we're rebuilding, squeeze in rebuild activity every so often,
619 * even when the disk is busy. Be sure to only count real I/O
620 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
621 * by this module.
622 */
623 if (trs->trso_failed_sd != NULL &&
624 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
625 /* Make this new or running now round short. */
626 trs->trso_recover_slabs = 0;
627 if (--trs->trso_fair_io <= 0) {
628 trs->trso_fair_io = g_raid1_rebuild_fair_io;
629 g_raid_tr_raid1_rebuild_some(tr);
630 }
631 }
632 switch (bp->bio_cmd) {
633 case BIO_READ:
634 g_raid_tr_iostart_raid1_read(tr, bp);
635 break;
636 case BIO_WRITE:
637 case BIO_DELETE:
638 g_raid_tr_iostart_raid1_write(tr, bp);
639 break;
640 case BIO_SPEEDUP:
641 case BIO_FLUSH:
642 g_raid_tr_flush_common(tr, bp);
643 break;
644 default:
645 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
646 bp->bio_cmd, vol->v_name));
647 break;
648 }
649 }
650
651 static void
g_raid_tr_iodone_raid1(struct g_raid_tr_object * tr,struct g_raid_subdisk * sd,struct bio * bp)652 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
653 struct g_raid_subdisk *sd, struct bio *bp)
654 {
655 struct bio *cbp;
656 struct g_raid_subdisk *nsd;
657 struct g_raid_volume *vol;
658 struct bio *pbp;
659 struct g_raid_tr_raid1_object *trs;
660 uintptr_t *mask;
661 int error, do_write;
662
663 trs = (struct g_raid_tr_raid1_object *)tr;
664 vol = tr->tro_volume;
665 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
666 /*
667 * This operation is part of a rebuild or resync operation.
668 * See what work just got done, then schedule the next bit of
669 * work, if any. Rebuild/resync is done a little bit at a
670 * time. Either when a timeout happens, or after we get a
671 * bunch of I/Os to the disk (to make sure an active system
672 * will complete in a sane amount of time).
673 *
674 * We are setup to do differing amounts of work for each of
675 * these cases. so long as the slabs is smallish (less than
676 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
677 * have any bio starvation issues. For active disks, we do
678 * 5MB of data, for inactive ones, we do 50MB.
679 */
680 if (trs->trso_type == TR_RAID1_REBUILD) {
681 if (bp->bio_cmd == BIO_READ) {
682 /* Immediately abort rebuild, if requested. */
683 if (trs->trso_flags & TR_RAID1_F_ABORT) {
684 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
685 g_raid_tr_raid1_rebuild_abort(tr);
686 return;
687 }
688
689 /* On read error, skip and cross fingers. */
690 if (bp->bio_error != 0) {
691 G_RAID_LOGREQ(0, bp,
692 "Read error during rebuild (%d), "
693 "possible data loss!",
694 bp->bio_error);
695 goto rebuild_round_done;
696 }
697
698 /*
699 * The read operation finished, queue the
700 * write and get out.
701 */
702 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
703 bp->bio_error);
704 bp->bio_cmd = BIO_WRITE;
705 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
706 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
707 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
708 } else {
709 /*
710 * The write operation just finished. Do
711 * another. We keep cloning the master bio
712 * since it has the right buffers allocated to
713 * it.
714 */
715 G_RAID_LOGREQ(4, bp,
716 "rebuild write done. Error %d",
717 bp->bio_error);
718 nsd = trs->trso_failed_sd;
719 if (bp->bio_error != 0 ||
720 trs->trso_flags & TR_RAID1_F_ABORT) {
721 if ((trs->trso_flags &
722 TR_RAID1_F_ABORT) == 0) {
723 g_raid_tr_raid1_fail_disk(sd->sd_softc,
724 nsd, nsd->sd_disk);
725 }
726 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
727 g_raid_tr_raid1_rebuild_abort(tr);
728 return;
729 }
730 rebuild_round_done:
731 nsd = trs->trso_failed_sd;
732 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
733 g_raid_unlock_range(sd->sd_volume,
734 bp->bio_offset, bp->bio_length);
735 nsd->sd_rebuild_pos += bp->bio_length;
736 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
737 g_raid_tr_raid1_rebuild_finish(tr);
738 return;
739 }
740
741 /* Abort rebuild if we are stopping */
742 if (trs->trso_stopping) {
743 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
744 g_raid_tr_raid1_rebuild_abort(tr);
745 return;
746 }
747
748 if (--trs->trso_meta_update <= 0) {
749 g_raid_write_metadata(vol->v_softc,
750 vol, nsd, nsd->sd_disk);
751 trs->trso_meta_update =
752 g_raid1_rebuild_meta_update;
753 }
754 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
755 if (--trs->trso_recover_slabs <= 0)
756 return;
757 g_raid_tr_raid1_rebuild_some(tr);
758 }
759 } else if (trs->trso_type == TR_RAID1_RESYNC) {
760 /*
761 * read good sd, read bad sd in parallel. when both
762 * done, compare the buffers. write good to the bad
763 * if different. do the next bit of work.
764 */
765 panic("Somehow, we think we're doing a resync");
766 }
767 return;
768 }
769 pbp = bp->bio_parent;
770 pbp->bio_inbed++;
771 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
772 /*
773 * Read failed on first drive. Retry the read error on
774 * another disk drive, if available, before erroring out the
775 * read.
776 */
777 sd->sd_disk->d_read_errs++;
778 G_RAID_LOGREQ(0, bp,
779 "Read error (%d), %d read errors total",
780 bp->bio_error, sd->sd_disk->d_read_errs);
781
782 /*
783 * If there are too many read errors, we move to degraded.
784 * XXX Do we want to FAIL the drive (eg, make the user redo
785 * everything to get it back in sync), or just degrade the
786 * drive, which kicks off a resync?
787 */
788 do_write = 1;
789 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
790 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
791 if (pbp->bio_children == 1)
792 do_write = 0;
793 }
794
795 /*
796 * Find the other disk, and try to do the I/O to it.
797 */
798 mask = (uintptr_t *)(&pbp->bio_driver2);
799 if (pbp->bio_children == 1) {
800 /* Save original subdisk. */
801 pbp->bio_driver1 = do_write ? sd : NULL;
802 *mask = 0;
803 }
804 *mask |= 1 << sd->sd_pos;
805 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
806 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
807 g_destroy_bio(bp);
808 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
809 nsd->sd_pos);
810 if (pbp->bio_children == 2 && do_write) {
811 sd->sd_recovery++;
812 cbp->bio_caller1 = nsd;
813 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
814 /* Lock callback starts I/O */
815 g_raid_lock_range(sd->sd_volume,
816 cbp->bio_offset, cbp->bio_length, pbp, cbp);
817 } else {
818 g_raid_subdisk_iostart(nsd, cbp);
819 }
820 return;
821 }
822 /*
823 * We can't retry. Return the original error by falling
824 * through. This will happen when there's only one good disk.
825 * We don't need to fail the raid, since its actual state is
826 * based on the state of the subdisks.
827 */
828 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
829 }
830 if (bp->bio_cmd == BIO_READ &&
831 bp->bio_error == 0 &&
832 pbp->bio_children > 1 &&
833 pbp->bio_driver1 != NULL) {
834 /*
835 * If it was a read, and bio_children is >1, then we just
836 * recovered the data from the second drive. We should try to
837 * write that data to the first drive if sector remapping is
838 * enabled. A write should put the data in a new place on the
839 * disk, remapping the bad sector. Do we need to do that by
840 * queueing a request to the main worker thread? It doesn't
841 * affect the return code of this current read, and can be
842 * done at our leisure. However, to make the code simpler, it
843 * is done synchronously.
844 */
845 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
846 cbp = g_clone_bio(pbp);
847 if (cbp != NULL) {
848 g_destroy_bio(bp);
849 cbp->bio_cmd = BIO_WRITE;
850 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
851 G_RAID_LOGREQ(2, cbp,
852 "Attempting bad sector remap on failing drive.");
853 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
854 return;
855 }
856 }
857 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
858 /*
859 * We're done with a recovery, mark the range as unlocked.
860 * For any write errors, we aggressively fail the disk since
861 * there was both a READ and a WRITE error at this location.
862 * Both types of errors generally indicates the drive is on
863 * the verge of total failure anyway. Better to stop trusting
864 * it now. However, we need to reset error to 0 in that case
865 * because we're not failing the original I/O which succeeded.
866 */
867 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
868 G_RAID_LOGREQ(0, bp, "Remap write failed: "
869 "failing subdisk.");
870 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
871 bp->bio_error = 0;
872 }
873 if (pbp->bio_driver1 != NULL) {
874 ((struct g_raid_subdisk *)pbp->bio_driver1)
875 ->sd_recovery--;
876 }
877 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
878 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
879 bp->bio_length);
880 }
881 if (pbp->bio_cmd != BIO_READ) {
882 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
883 pbp->bio_error = bp->bio_error;
884 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
885 G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
886 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
887 }
888 error = pbp->bio_error;
889 } else
890 error = bp->bio_error;
891 g_destroy_bio(bp);
892 if (pbp->bio_children == pbp->bio_inbed) {
893 pbp->bio_completed = pbp->bio_length;
894 g_raid_iodone(pbp, error);
895 }
896 }
897
898 static int
g_raid_tr_kerneldump_raid1(struct g_raid_tr_object * tr,void * virtual,off_t offset,size_t length)899 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr, void *virtual,
900 off_t offset, size_t length)
901 {
902 struct g_raid_volume *vol;
903 struct g_raid_subdisk *sd;
904 int error, i, ok;
905
906 vol = tr->tro_volume;
907 error = 0;
908 ok = 0;
909 for (i = 0; i < vol->v_disks_count; i++) {
910 sd = &vol->v_subdisks[i];
911 switch (sd->sd_state) {
912 case G_RAID_SUBDISK_S_ACTIVE:
913 break;
914 case G_RAID_SUBDISK_S_REBUILD:
915 /*
916 * When rebuilding, only part of this subdisk is
917 * writable, the rest will be written as part of the
918 * that process.
919 */
920 if (offset >= sd->sd_rebuild_pos)
921 continue;
922 break;
923 case G_RAID_SUBDISK_S_STALE:
924 case G_RAID_SUBDISK_S_RESYNC:
925 /*
926 * Resyncing still writes on the theory that the
927 * resync'd disk is very close and writing it will
928 * keep it that way better if we keep up while
929 * resyncing.
930 */
931 break;
932 default:
933 continue;
934 }
935 error = g_raid_subdisk_kerneldump(sd, virtual, offset, length);
936 if (error == 0)
937 ok++;
938 }
939 return (ok > 0 ? 0 : error);
940 }
941
942 static int
g_raid_tr_locked_raid1(struct g_raid_tr_object * tr,void * argp)943 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
944 {
945 struct bio *bp;
946 struct g_raid_subdisk *sd;
947
948 bp = (struct bio *)argp;
949 sd = (struct g_raid_subdisk *)bp->bio_caller1;
950 g_raid_subdisk_iostart(sd, bp);
951
952 return (0);
953 }
954
955 static int
g_raid_tr_idle_raid1(struct g_raid_tr_object * tr)956 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
957 {
958 struct g_raid_tr_raid1_object *trs;
959
960 trs = (struct g_raid_tr_raid1_object *)tr;
961 trs->trso_fair_io = g_raid1_rebuild_fair_io;
962 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
963 if (trs->trso_type == TR_RAID1_REBUILD)
964 g_raid_tr_raid1_rebuild_some(tr);
965 return (0);
966 }
967
968 static int
g_raid_tr_free_raid1(struct g_raid_tr_object * tr)969 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
970 {
971 struct g_raid_tr_raid1_object *trs;
972
973 trs = (struct g_raid_tr_raid1_object *)tr;
974
975 if (trs->trso_buffer != NULL) {
976 free(trs->trso_buffer, M_TR_RAID1);
977 trs->trso_buffer = NULL;
978 }
979 return (0);
980 }
981
982 G_RAID_TR_DECLARE(raid1, "RAID1");
983