xref: /freebsd/sys/geom/raid3/g_raid3.c (revision f5e9c916afed4a948fe5c03bfaee038d165e12ab)
1 /*-
2  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sbuf.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/eventhandler.h>
42 #include <vm/uma.h>
43 #include <geom/geom.h>
44 #include <sys/proc.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <geom/raid3/g_raid3.h>
48 
49 FEATURE(geom_raid3, "GEOM RAID-3 functionality");
50 
51 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data");
52 
53 SYSCTL_DECL(_kern_geom);
54 static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0,
55     "GEOM_RAID3 stuff");
56 u_int g_raid3_debug = 0;
57 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0,
58     "Debug level");
59 static u_int g_raid3_timeout = 4;
60 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout,
61     0, "Time to wait on all raid3 components");
62 static u_int g_raid3_idletime = 5;
63 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN,
64     &g_raid3_idletime, 0, "Mark components as clean when idling");
65 static u_int g_raid3_disconnect_on_failure = 1;
66 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
67     &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
68 static u_int g_raid3_syncreqs = 2;
69 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
70     &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests.");
71 static u_int g_raid3_use_malloc = 0;
72 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN,
73     &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9).");
74 
75 static u_int g_raid3_n64k = 50;
76 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0,
77     "Maximum number of 64kB allocations");
78 static u_int g_raid3_n16k = 200;
79 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0,
80     "Maximum number of 16kB allocations");
81 static u_int g_raid3_n4k = 1200;
82 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0,
83     "Maximum number of 4kB allocations");
84 
85 static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0,
86     "GEOM_RAID3 statistics");
87 static u_int g_raid3_parity_mismatch = 0;
88 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
89     &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode");
90 
91 #define	MSLEEP(ident, mtx, priority, wmesg, timeout)	do {		\
92 	G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident));	\
93 	msleep((ident), (mtx), (priority), (wmesg), (timeout));		\
94 	G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident));	\
95 } while (0)
96 
97 static eventhandler_tag g_raid3_post_sync = NULL;
98 static int g_raid3_shutdown = 0;
99 
100 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
101     struct g_geom *gp);
102 static g_taste_t g_raid3_taste;
103 static void g_raid3_init(struct g_class *mp);
104 static void g_raid3_fini(struct g_class *mp);
105 
106 struct g_class g_raid3_class = {
107 	.name = G_RAID3_CLASS_NAME,
108 	.version = G_VERSION,
109 	.ctlreq = g_raid3_config,
110 	.taste = g_raid3_taste,
111 	.destroy_geom = g_raid3_destroy_geom,
112 	.init = g_raid3_init,
113 	.fini = g_raid3_fini
114 };
115 
116 
117 static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
118 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state);
119 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force);
120 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent,
121     struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
122 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type);
123 static int g_raid3_register_request(struct bio *pbp);
124 static void g_raid3_sync_release(struct g_raid3_softc *sc);
125 
126 
127 static const char *
128 g_raid3_disk_state2str(int state)
129 {
130 
131 	switch (state) {
132 	case G_RAID3_DISK_STATE_NODISK:
133 		return ("NODISK");
134 	case G_RAID3_DISK_STATE_NONE:
135 		return ("NONE");
136 	case G_RAID3_DISK_STATE_NEW:
137 		return ("NEW");
138 	case G_RAID3_DISK_STATE_ACTIVE:
139 		return ("ACTIVE");
140 	case G_RAID3_DISK_STATE_STALE:
141 		return ("STALE");
142 	case G_RAID3_DISK_STATE_SYNCHRONIZING:
143 		return ("SYNCHRONIZING");
144 	case G_RAID3_DISK_STATE_DISCONNECTED:
145 		return ("DISCONNECTED");
146 	default:
147 		return ("INVALID");
148 	}
149 }
150 
151 static const char *
152 g_raid3_device_state2str(int state)
153 {
154 
155 	switch (state) {
156 	case G_RAID3_DEVICE_STATE_STARTING:
157 		return ("STARTING");
158 	case G_RAID3_DEVICE_STATE_DEGRADED:
159 		return ("DEGRADED");
160 	case G_RAID3_DEVICE_STATE_COMPLETE:
161 		return ("COMPLETE");
162 	default:
163 		return ("INVALID");
164 	}
165 }
166 
167 const char *
168 g_raid3_get_diskname(struct g_raid3_disk *disk)
169 {
170 
171 	if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
172 		return ("[unknown]");
173 	return (disk->d_name);
174 }
175 
176 static void *
177 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags)
178 {
179 	void *ptr;
180 	enum g_raid3_zones zone;
181 
182 	if (g_raid3_use_malloc ||
183 	    (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
184 		ptr = malloc(size, M_RAID3, flags);
185 	else {
186 		ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone,
187 		   &sc->sc_zones[zone], flags);
188 		sc->sc_zones[zone].sz_requested++;
189 		if (ptr == NULL)
190 			sc->sc_zones[zone].sz_failed++;
191 	}
192 	return (ptr);
193 }
194 
195 static void
196 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size)
197 {
198 	enum g_raid3_zones zone;
199 
200 	if (g_raid3_use_malloc ||
201 	    (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
202 		free(ptr, M_RAID3);
203 	else {
204 		uma_zfree_arg(sc->sc_zones[zone].sz_zone,
205 		    ptr, &sc->sc_zones[zone]);
206 	}
207 }
208 
209 static int
210 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags)
211 {
212 	struct g_raid3_zone *sz = arg;
213 
214 	if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max)
215 		return (ENOMEM);
216 	sz->sz_inuse++;
217 	return (0);
218 }
219 
220 static void
221 g_raid3_uma_dtor(void *mem, int size, void *arg)
222 {
223 	struct g_raid3_zone *sz = arg;
224 
225 	sz->sz_inuse--;
226 }
227 
228 #define	g_raid3_xor(src, dst, size)					\
229 	_g_raid3_xor((uint64_t *)(src),					\
230 	    (uint64_t *)(dst), (size_t)size)
231 static void
232 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size)
233 {
234 
235 	KASSERT((size % 128) == 0, ("Invalid size: %zu.", size));
236 	for (; size > 0; size -= 128) {
237 		*dst++ ^= (*src++);
238 		*dst++ ^= (*src++);
239 		*dst++ ^= (*src++);
240 		*dst++ ^= (*src++);
241 		*dst++ ^= (*src++);
242 		*dst++ ^= (*src++);
243 		*dst++ ^= (*src++);
244 		*dst++ ^= (*src++);
245 		*dst++ ^= (*src++);
246 		*dst++ ^= (*src++);
247 		*dst++ ^= (*src++);
248 		*dst++ ^= (*src++);
249 		*dst++ ^= (*src++);
250 		*dst++ ^= (*src++);
251 		*dst++ ^= (*src++);
252 		*dst++ ^= (*src++);
253 	}
254 }
255 
256 static int
257 g_raid3_is_zero(struct bio *bp)
258 {
259 	static const uint64_t zeros[] = {
260 	    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
261 	};
262 	u_char *addr;
263 	ssize_t size;
264 
265 	size = bp->bio_length;
266 	addr = (u_char *)bp->bio_data;
267 	for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) {
268 		if (bcmp(addr, zeros, sizeof(zeros)) != 0)
269 			return (0);
270 	}
271 	return (1);
272 }
273 
274 /*
275  * --- Events handling functions ---
276  * Events in geom_raid3 are used to maintain disks and device status
277  * from one thread to simplify locking.
278  */
279 static void
280 g_raid3_event_free(struct g_raid3_event *ep)
281 {
282 
283 	free(ep, M_RAID3);
284 }
285 
286 int
287 g_raid3_event_send(void *arg, int state, int flags)
288 {
289 	struct g_raid3_softc *sc;
290 	struct g_raid3_disk *disk;
291 	struct g_raid3_event *ep;
292 	int error;
293 
294 	ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK);
295 	G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep);
296 	if ((flags & G_RAID3_EVENT_DEVICE) != 0) {
297 		disk = NULL;
298 		sc = arg;
299 	} else {
300 		disk = arg;
301 		sc = disk->d_softc;
302 	}
303 	ep->e_disk = disk;
304 	ep->e_state = state;
305 	ep->e_flags = flags;
306 	ep->e_error = 0;
307 	mtx_lock(&sc->sc_events_mtx);
308 	TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
309 	mtx_unlock(&sc->sc_events_mtx);
310 	G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
311 	mtx_lock(&sc->sc_queue_mtx);
312 	wakeup(sc);
313 	wakeup(&sc->sc_queue);
314 	mtx_unlock(&sc->sc_queue_mtx);
315 	if ((flags & G_RAID3_EVENT_DONTWAIT) != 0)
316 		return (0);
317 	sx_assert(&sc->sc_lock, SX_XLOCKED);
318 	G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
319 	sx_xunlock(&sc->sc_lock);
320 	while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) {
321 		mtx_lock(&sc->sc_events_mtx);
322 		MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event",
323 		    hz * 5);
324 	}
325 	error = ep->e_error;
326 	g_raid3_event_free(ep);
327 	sx_xlock(&sc->sc_lock);
328 	return (error);
329 }
330 
331 static struct g_raid3_event *
332 g_raid3_event_get(struct g_raid3_softc *sc)
333 {
334 	struct g_raid3_event *ep;
335 
336 	mtx_lock(&sc->sc_events_mtx);
337 	ep = TAILQ_FIRST(&sc->sc_events);
338 	mtx_unlock(&sc->sc_events_mtx);
339 	return (ep);
340 }
341 
342 static void
343 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep)
344 {
345 
346 	mtx_lock(&sc->sc_events_mtx);
347 	TAILQ_REMOVE(&sc->sc_events, ep, e_next);
348 	mtx_unlock(&sc->sc_events_mtx);
349 }
350 
351 static void
352 g_raid3_event_cancel(struct g_raid3_disk *disk)
353 {
354 	struct g_raid3_softc *sc;
355 	struct g_raid3_event *ep, *tmpep;
356 
357 	sc = disk->d_softc;
358 	sx_assert(&sc->sc_lock, SX_XLOCKED);
359 
360 	mtx_lock(&sc->sc_events_mtx);
361 	TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
362 		if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0)
363 			continue;
364 		if (ep->e_disk != disk)
365 			continue;
366 		TAILQ_REMOVE(&sc->sc_events, ep, e_next);
367 		if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
368 			g_raid3_event_free(ep);
369 		else {
370 			ep->e_error = ECANCELED;
371 			wakeup(ep);
372 		}
373 	}
374 	mtx_unlock(&sc->sc_events_mtx);
375 }
376 
377 /*
378  * Return the number of disks in the given state.
379  * If state is equal to -1, count all connected disks.
380  */
381 u_int
382 g_raid3_ndisks(struct g_raid3_softc *sc, int state)
383 {
384 	struct g_raid3_disk *disk;
385 	u_int n, ndisks;
386 
387 	sx_assert(&sc->sc_lock, SX_LOCKED);
388 
389 	for (n = ndisks = 0; n < sc->sc_ndisks; n++) {
390 		disk = &sc->sc_disks[n];
391 		if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
392 			continue;
393 		if (state == -1 || disk->d_state == state)
394 			ndisks++;
395 	}
396 	return (ndisks);
397 }
398 
399 static u_int
400 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp)
401 {
402 	struct bio *bp;
403 	u_int nreqs = 0;
404 
405 	mtx_lock(&sc->sc_queue_mtx);
406 	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
407 		if (bp->bio_from == cp)
408 			nreqs++;
409 	}
410 	mtx_unlock(&sc->sc_queue_mtx);
411 	return (nreqs);
412 }
413 
414 static int
415 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp)
416 {
417 
418 	if (cp->index > 0) {
419 		G_RAID3_DEBUG(2,
420 		    "I/O requests for %s exist, can't destroy it now.",
421 		    cp->provider->name);
422 		return (1);
423 	}
424 	if (g_raid3_nrequests(sc, cp) > 0) {
425 		G_RAID3_DEBUG(2,
426 		    "I/O requests for %s in queue, can't destroy it now.",
427 		    cp->provider->name);
428 		return (1);
429 	}
430 	return (0);
431 }
432 
433 static void
434 g_raid3_destroy_consumer(void *arg, int flags __unused)
435 {
436 	struct g_consumer *cp;
437 
438 	g_topology_assert();
439 
440 	cp = arg;
441 	G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
442 	g_detach(cp);
443 	g_destroy_consumer(cp);
444 }
445 
446 static void
447 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
448 {
449 	struct g_provider *pp;
450 	int retaste_wait;
451 
452 	g_topology_assert();
453 
454 	cp->private = NULL;
455 	if (g_raid3_is_busy(sc, cp))
456 		return;
457 	G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
458 	pp = cp->provider;
459 	retaste_wait = 0;
460 	if (cp->acw == 1) {
461 		if ((pp->geom->flags & G_GEOM_WITHER) == 0)
462 			retaste_wait = 1;
463 	}
464 	G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
465 	    -cp->acw, -cp->ace, 0);
466 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
467 		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
468 	if (retaste_wait) {
469 		/*
470 		 * After retaste event was send (inside g_access()), we can send
471 		 * event to detach and destroy consumer.
472 		 * A class, which has consumer to the given provider connected
473 		 * will not receive retaste event for the provider.
474 		 * This is the way how I ignore retaste events when I close
475 		 * consumers opened for write: I detach and destroy consumer
476 		 * after retaste event is sent.
477 		 */
478 		g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL);
479 		return;
480 	}
481 	G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name);
482 	g_detach(cp);
483 	g_destroy_consumer(cp);
484 }
485 
486 static int
487 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp)
488 {
489 	struct g_consumer *cp;
490 	int error;
491 
492 	g_topology_assert_not();
493 	KASSERT(disk->d_consumer == NULL,
494 	    ("Disk already connected (device %s).", disk->d_softc->sc_name));
495 
496 	g_topology_lock();
497 	cp = g_new_consumer(disk->d_softc->sc_geom);
498 	error = g_attach(cp, pp);
499 	if (error != 0) {
500 		g_destroy_consumer(cp);
501 		g_topology_unlock();
502 		return (error);
503 	}
504 	error = g_access(cp, 1, 1, 1);
505 		g_topology_unlock();
506 	if (error != 0) {
507 		g_detach(cp);
508 		g_destroy_consumer(cp);
509 		G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).",
510 		    pp->name, error);
511 		return (error);
512 	}
513 	disk->d_consumer = cp;
514 	disk->d_consumer->private = disk;
515 	disk->d_consumer->index = 0;
516 	G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk));
517 	return (0);
518 }
519 
520 static void
521 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
522 {
523 
524 	g_topology_assert();
525 
526 	if (cp == NULL)
527 		return;
528 	if (cp->provider != NULL)
529 		g_raid3_kill_consumer(sc, cp);
530 	else
531 		g_destroy_consumer(cp);
532 }
533 
534 /*
535  * Initialize disk. This means allocate memory, create consumer, attach it
536  * to the provider and open access (r1w1e1) to it.
537  */
538 static struct g_raid3_disk *
539 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp,
540     struct g_raid3_metadata *md, int *errorp)
541 {
542 	struct g_raid3_disk *disk;
543 	int error;
544 
545 	disk = &sc->sc_disks[md->md_no];
546 	error = g_raid3_connect_disk(disk, pp);
547 	if (error != 0) {
548 		if (errorp != NULL)
549 			*errorp = error;
550 		return (NULL);
551 	}
552 	disk->d_state = G_RAID3_DISK_STATE_NONE;
553 	disk->d_flags = md->md_dflags;
554 	if (md->md_provider[0] != '\0')
555 		disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED;
556 	disk->d_sync.ds_consumer = NULL;
557 	disk->d_sync.ds_offset = md->md_sync_offset;
558 	disk->d_sync.ds_offset_done = md->md_sync_offset;
559 	disk->d_genid = md->md_genid;
560 	disk->d_sync.ds_syncid = md->md_syncid;
561 	if (errorp != NULL)
562 		*errorp = 0;
563 	return (disk);
564 }
565 
566 static void
567 g_raid3_destroy_disk(struct g_raid3_disk *disk)
568 {
569 	struct g_raid3_softc *sc;
570 
571 	g_topology_assert_not();
572 	sc = disk->d_softc;
573 	sx_assert(&sc->sc_lock, SX_XLOCKED);
574 
575 	if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
576 		return;
577 	g_raid3_event_cancel(disk);
578 	switch (disk->d_state) {
579 	case G_RAID3_DISK_STATE_SYNCHRONIZING:
580 		if (sc->sc_syncdisk != NULL)
581 			g_raid3_sync_stop(sc, 1);
582 		/* FALLTHROUGH */
583 	case G_RAID3_DISK_STATE_NEW:
584 	case G_RAID3_DISK_STATE_STALE:
585 	case G_RAID3_DISK_STATE_ACTIVE:
586 		g_topology_lock();
587 		g_raid3_disconnect_consumer(sc, disk->d_consumer);
588 		g_topology_unlock();
589 		disk->d_consumer = NULL;
590 		break;
591 	default:
592 		KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
593 		    g_raid3_get_diskname(disk),
594 		    g_raid3_disk_state2str(disk->d_state)));
595 	}
596 	disk->d_state = G_RAID3_DISK_STATE_NODISK;
597 }
598 
599 static void
600 g_raid3_destroy_device(struct g_raid3_softc *sc)
601 {
602 	struct g_raid3_event *ep;
603 	struct g_raid3_disk *disk;
604 	struct g_geom *gp;
605 	struct g_consumer *cp;
606 	u_int n;
607 
608 	g_topology_assert_not();
609 	sx_assert(&sc->sc_lock, SX_XLOCKED);
610 
611 	gp = sc->sc_geom;
612 	if (sc->sc_provider != NULL)
613 		g_raid3_destroy_provider(sc);
614 	for (n = 0; n < sc->sc_ndisks; n++) {
615 		disk = &sc->sc_disks[n];
616 		if (disk->d_state != G_RAID3_DISK_STATE_NODISK) {
617 			disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
618 			g_raid3_update_metadata(disk);
619 			g_raid3_destroy_disk(disk);
620 		}
621 	}
622 	while ((ep = g_raid3_event_get(sc)) != NULL) {
623 		g_raid3_event_remove(sc, ep);
624 		if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
625 			g_raid3_event_free(ep);
626 		else {
627 			ep->e_error = ECANCELED;
628 			ep->e_flags |= G_RAID3_EVENT_DONE;
629 			G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep);
630 			mtx_lock(&sc->sc_events_mtx);
631 			wakeup(ep);
632 			mtx_unlock(&sc->sc_events_mtx);
633 		}
634 	}
635 	callout_drain(&sc->sc_callout);
636 	cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer);
637 	g_topology_lock();
638 	if (cp != NULL)
639 		g_raid3_disconnect_consumer(sc, cp);
640 	g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
641 	G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
642 	g_wither_geom(gp, ENXIO);
643 	g_topology_unlock();
644 	if (!g_raid3_use_malloc) {
645 		uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
646 		uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
647 		uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
648 	}
649 	mtx_destroy(&sc->sc_queue_mtx);
650 	mtx_destroy(&sc->sc_events_mtx);
651 	sx_xunlock(&sc->sc_lock);
652 	sx_destroy(&sc->sc_lock);
653 }
654 
655 static void
656 g_raid3_orphan(struct g_consumer *cp)
657 {
658 	struct g_raid3_disk *disk;
659 
660 	g_topology_assert();
661 
662 	disk = cp->private;
663 	if (disk == NULL)
664 		return;
665 	disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID;
666 	g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED,
667 	    G_RAID3_EVENT_DONTWAIT);
668 }
669 
670 static int
671 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
672 {
673 	struct g_raid3_softc *sc;
674 	struct g_consumer *cp;
675 	off_t offset, length;
676 	u_char *sector;
677 	int error = 0;
678 
679 	g_topology_assert_not();
680 	sc = disk->d_softc;
681 	sx_assert(&sc->sc_lock, SX_LOCKED);
682 
683 	cp = disk->d_consumer;
684 	KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
685 	KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
686 	KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
687 	    ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
688 	    cp->acw, cp->ace));
689 	length = cp->provider->sectorsize;
690 	offset = cp->provider->mediasize - length;
691 	sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO);
692 	if (md != NULL)
693 		raid3_metadata_encode(md, sector);
694 	error = g_write_data(cp, offset, sector, length);
695 	free(sector, M_RAID3);
696 	if (error != 0) {
697 		if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
698 			G_RAID3_DEBUG(0, "Cannot write metadata on %s "
699 			    "(device=%s, error=%d).",
700 			    g_raid3_get_diskname(disk), sc->sc_name, error);
701 			disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
702 		} else {
703 			G_RAID3_DEBUG(1, "Cannot write metadata on %s "
704 			    "(device=%s, error=%d).",
705 			    g_raid3_get_diskname(disk), sc->sc_name, error);
706 		}
707 		if (g_raid3_disconnect_on_failure &&
708 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
709 			sc->sc_bump_id |= G_RAID3_BUMP_GENID;
710 			g_raid3_event_send(disk,
711 			    G_RAID3_DISK_STATE_DISCONNECTED,
712 			    G_RAID3_EVENT_DONTWAIT);
713 		}
714 	}
715 	return (error);
716 }
717 
718 int
719 g_raid3_clear_metadata(struct g_raid3_disk *disk)
720 {
721 	int error;
722 
723 	g_topology_assert_not();
724 	sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
725 
726 	error = g_raid3_write_metadata(disk, NULL);
727 	if (error == 0) {
728 		G_RAID3_DEBUG(2, "Metadata on %s cleared.",
729 		    g_raid3_get_diskname(disk));
730 	} else {
731 		G_RAID3_DEBUG(0,
732 		    "Cannot clear metadata on disk %s (error=%d).",
733 		    g_raid3_get_diskname(disk), error);
734 	}
735 	return (error);
736 }
737 
738 void
739 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
740 {
741 	struct g_raid3_softc *sc;
742 	struct g_provider *pp;
743 
744 	sc = disk->d_softc;
745 	strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic));
746 	md->md_version = G_RAID3_VERSION;
747 	strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
748 	md->md_id = sc->sc_id;
749 	md->md_all = sc->sc_ndisks;
750 	md->md_genid = sc->sc_genid;
751 	md->md_mediasize = sc->sc_mediasize;
752 	md->md_sectorsize = sc->sc_sectorsize;
753 	md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK);
754 	md->md_no = disk->d_no;
755 	md->md_syncid = disk->d_sync.ds_syncid;
756 	md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK);
757 	if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
758 		md->md_sync_offset = 0;
759 	else {
760 		md->md_sync_offset =
761 		    disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1);
762 	}
763 	if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL)
764 		pp = disk->d_consumer->provider;
765 	else
766 		pp = NULL;
767 	if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL)
768 		strlcpy(md->md_provider, pp->name, sizeof(md->md_provider));
769 	else
770 		bzero(md->md_provider, sizeof(md->md_provider));
771 	if (pp != NULL)
772 		md->md_provsize = pp->mediasize;
773 	else
774 		md->md_provsize = 0;
775 }
776 
777 void
778 g_raid3_update_metadata(struct g_raid3_disk *disk)
779 {
780 	struct g_raid3_softc *sc;
781 	struct g_raid3_metadata md;
782 	int error;
783 
784 	g_topology_assert_not();
785 	sc = disk->d_softc;
786 	sx_assert(&sc->sc_lock, SX_LOCKED);
787 
788 	g_raid3_fill_metadata(disk, &md);
789 	error = g_raid3_write_metadata(disk, &md);
790 	if (error == 0) {
791 		G_RAID3_DEBUG(2, "Metadata on %s updated.",
792 		    g_raid3_get_diskname(disk));
793 	} else {
794 		G_RAID3_DEBUG(0,
795 		    "Cannot update metadata on disk %s (error=%d).",
796 		    g_raid3_get_diskname(disk), error);
797 	}
798 }
799 
800 static void
801 g_raid3_bump_syncid(struct g_raid3_softc *sc)
802 {
803 	struct g_raid3_disk *disk;
804 	u_int n;
805 
806 	g_topology_assert_not();
807 	sx_assert(&sc->sc_lock, SX_XLOCKED);
808 	KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
809 	    ("%s called with no active disks (device=%s).", __func__,
810 	    sc->sc_name));
811 
812 	sc->sc_syncid++;
813 	G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
814 	    sc->sc_syncid);
815 	for (n = 0; n < sc->sc_ndisks; n++) {
816 		disk = &sc->sc_disks[n];
817 		if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
818 		    disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
819 			disk->d_sync.ds_syncid = sc->sc_syncid;
820 			g_raid3_update_metadata(disk);
821 		}
822 	}
823 }
824 
825 static void
826 g_raid3_bump_genid(struct g_raid3_softc *sc)
827 {
828 	struct g_raid3_disk *disk;
829 	u_int n;
830 
831 	g_topology_assert_not();
832 	sx_assert(&sc->sc_lock, SX_XLOCKED);
833 	KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
834 	    ("%s called with no active disks (device=%s).", __func__,
835 	    sc->sc_name));
836 
837 	sc->sc_genid++;
838 	G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
839 	    sc->sc_genid);
840 	for (n = 0; n < sc->sc_ndisks; n++) {
841 		disk = &sc->sc_disks[n];
842 		if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
843 		    disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
844 			disk->d_genid = sc->sc_genid;
845 			g_raid3_update_metadata(disk);
846 		}
847 	}
848 }
849 
850 static int
851 g_raid3_idle(struct g_raid3_softc *sc, int acw)
852 {
853 	struct g_raid3_disk *disk;
854 	u_int i;
855 	int timeout;
856 
857 	g_topology_assert_not();
858 	sx_assert(&sc->sc_lock, SX_XLOCKED);
859 
860 	if (sc->sc_provider == NULL)
861 		return (0);
862 	if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
863 		return (0);
864 	if (sc->sc_idle)
865 		return (0);
866 	if (sc->sc_writes > 0)
867 		return (0);
868 	if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
869 		timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write);
870 		if (!g_raid3_shutdown && timeout > 0)
871 			return (timeout);
872 	}
873 	sc->sc_idle = 1;
874 	for (i = 0; i < sc->sc_ndisks; i++) {
875 		disk = &sc->sc_disks[i];
876 		if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
877 			continue;
878 		G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
879 		    g_raid3_get_diskname(disk), sc->sc_name);
880 		disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
881 		g_raid3_update_metadata(disk);
882 	}
883 	return (0);
884 }
885 
886 static void
887 g_raid3_unidle(struct g_raid3_softc *sc)
888 {
889 	struct g_raid3_disk *disk;
890 	u_int i;
891 
892 	g_topology_assert_not();
893 	sx_assert(&sc->sc_lock, SX_XLOCKED);
894 
895 	if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
896 		return;
897 	sc->sc_idle = 0;
898 	sc->sc_last_write = time_uptime;
899 	for (i = 0; i < sc->sc_ndisks; i++) {
900 		disk = &sc->sc_disks[i];
901 		if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
902 			continue;
903 		G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
904 		    g_raid3_get_diskname(disk), sc->sc_name);
905 		disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
906 		g_raid3_update_metadata(disk);
907 	}
908 }
909 
910 /*
911  * Treat bio_driver1 field in parent bio as list head and field bio_caller1
912  * in child bio as pointer to the next element on the list.
913  */
914 #define	G_RAID3_HEAD_BIO(pbp)	(pbp)->bio_driver1
915 
916 #define	G_RAID3_NEXT_BIO(cbp)	(cbp)->bio_caller1
917 
918 #define	G_RAID3_FOREACH_BIO(pbp, bp)					\
919 	for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL;		\
920 	    (bp) = G_RAID3_NEXT_BIO(bp))
921 
922 #define	G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp)			\
923 	for ((bp) = G_RAID3_HEAD_BIO(pbp);				\
924 	    (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1);	\
925 	    (bp) = (tmpbp))
926 
927 static void
928 g_raid3_init_bio(struct bio *pbp)
929 {
930 
931 	G_RAID3_HEAD_BIO(pbp) = NULL;
932 }
933 
934 static void
935 g_raid3_remove_bio(struct bio *cbp)
936 {
937 	struct bio *pbp, *bp;
938 
939 	pbp = cbp->bio_parent;
940 	if (G_RAID3_HEAD_BIO(pbp) == cbp)
941 		G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
942 	else {
943 		G_RAID3_FOREACH_BIO(pbp, bp) {
944 			if (G_RAID3_NEXT_BIO(bp) == cbp) {
945 				G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
946 				break;
947 			}
948 		}
949 	}
950 	G_RAID3_NEXT_BIO(cbp) = NULL;
951 }
952 
953 static void
954 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp)
955 {
956 	struct bio *pbp, *bp;
957 
958 	g_raid3_remove_bio(sbp);
959 	pbp = dbp->bio_parent;
960 	G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp);
961 	if (G_RAID3_HEAD_BIO(pbp) == dbp)
962 		G_RAID3_HEAD_BIO(pbp) = sbp;
963 	else {
964 		G_RAID3_FOREACH_BIO(pbp, bp) {
965 			if (G_RAID3_NEXT_BIO(bp) == dbp) {
966 				G_RAID3_NEXT_BIO(bp) = sbp;
967 				break;
968 			}
969 		}
970 	}
971 	G_RAID3_NEXT_BIO(dbp) = NULL;
972 }
973 
974 static void
975 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp)
976 {
977 	struct bio *bp, *pbp;
978 	size_t size;
979 
980 	pbp = cbp->bio_parent;
981 	pbp->bio_children--;
982 	KASSERT(cbp->bio_data != NULL, ("NULL bio_data"));
983 	size = pbp->bio_length / (sc->sc_ndisks - 1);
984 	g_raid3_free(sc, cbp->bio_data, size);
985 	if (G_RAID3_HEAD_BIO(pbp) == cbp) {
986 		G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
987 		G_RAID3_NEXT_BIO(cbp) = NULL;
988 		g_destroy_bio(cbp);
989 	} else {
990 		G_RAID3_FOREACH_BIO(pbp, bp) {
991 			if (G_RAID3_NEXT_BIO(bp) == cbp)
992 				break;
993 		}
994 		if (bp != NULL) {
995 			KASSERT(G_RAID3_NEXT_BIO(bp) != NULL,
996 			    ("NULL bp->bio_driver1"));
997 			G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
998 			G_RAID3_NEXT_BIO(cbp) = NULL;
999 		}
1000 		g_destroy_bio(cbp);
1001 	}
1002 }
1003 
1004 static struct bio *
1005 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp)
1006 {
1007 	struct bio *bp, *cbp;
1008 	size_t size;
1009 	int memflag;
1010 
1011 	cbp = g_clone_bio(pbp);
1012 	if (cbp == NULL)
1013 		return (NULL);
1014 	size = pbp->bio_length / (sc->sc_ndisks - 1);
1015 	if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
1016 		memflag = M_WAITOK;
1017 	else
1018 		memflag = M_NOWAIT;
1019 	cbp->bio_data = g_raid3_alloc(sc, size, memflag);
1020 	if (cbp->bio_data == NULL) {
1021 		pbp->bio_children--;
1022 		g_destroy_bio(cbp);
1023 		return (NULL);
1024 	}
1025 	G_RAID3_NEXT_BIO(cbp) = NULL;
1026 	if (G_RAID3_HEAD_BIO(pbp) == NULL)
1027 		G_RAID3_HEAD_BIO(pbp) = cbp;
1028 	else {
1029 		G_RAID3_FOREACH_BIO(pbp, bp) {
1030 			if (G_RAID3_NEXT_BIO(bp) == NULL) {
1031 				G_RAID3_NEXT_BIO(bp) = cbp;
1032 				break;
1033 			}
1034 		}
1035 	}
1036 	return (cbp);
1037 }
1038 
1039 static void
1040 g_raid3_scatter(struct bio *pbp)
1041 {
1042 	struct g_raid3_softc *sc;
1043 	struct g_raid3_disk *disk;
1044 	struct bio *bp, *cbp, *tmpbp;
1045 	off_t atom, cadd, padd, left;
1046 	int first;
1047 
1048 	sc = pbp->bio_to->geom->softc;
1049 	bp = NULL;
1050 	if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1051 		/*
1052 		 * Find bio for which we should calculate data.
1053 		 */
1054 		G_RAID3_FOREACH_BIO(pbp, cbp) {
1055 			if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1056 				bp = cbp;
1057 				break;
1058 			}
1059 		}
1060 		KASSERT(bp != NULL, ("NULL parity bio."));
1061 	}
1062 	atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1063 	cadd = padd = 0;
1064 	for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1065 		G_RAID3_FOREACH_BIO(pbp, cbp) {
1066 			if (cbp == bp)
1067 				continue;
1068 			bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom);
1069 			padd += atom;
1070 		}
1071 		cadd += atom;
1072 	}
1073 	if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1074 		/*
1075 		 * Calculate parity.
1076 		 */
1077 		first = 1;
1078 		G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1079 			if (cbp == bp)
1080 				continue;
1081 			if (first) {
1082 				bcopy(cbp->bio_data, bp->bio_data,
1083 				    bp->bio_length);
1084 				first = 0;
1085 			} else {
1086 				g_raid3_xor(cbp->bio_data, bp->bio_data,
1087 				    bp->bio_length);
1088 			}
1089 			if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0)
1090 				g_raid3_destroy_bio(sc, cbp);
1091 		}
1092 	}
1093 	G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1094 		struct g_consumer *cp;
1095 
1096 		disk = cbp->bio_caller2;
1097 		cp = disk->d_consumer;
1098 		cbp->bio_to = cp->provider;
1099 		G_RAID3_LOGREQ(3, cbp, "Sending request.");
1100 		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1101 		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1102 		    cp->acr, cp->acw, cp->ace));
1103 		cp->index++;
1104 		sc->sc_writes++;
1105 		g_io_request(cbp, cp);
1106 	}
1107 }
1108 
1109 static void
1110 g_raid3_gather(struct bio *pbp)
1111 {
1112 	struct g_raid3_softc *sc;
1113 	struct g_raid3_disk *disk;
1114 	struct bio *xbp, *fbp, *cbp;
1115 	off_t atom, cadd, padd, left;
1116 
1117 	sc = pbp->bio_to->geom->softc;
1118 	/*
1119 	 * Find bio for which we have to calculate data.
1120 	 * While going through this path, check if all requests
1121 	 * succeeded, if not, deny whole request.
1122 	 * If we're in COMPLETE mode, we allow one request to fail,
1123 	 * so if we find one, we're sending it to the parity consumer.
1124 	 * If there are more failed requests, we deny whole request.
1125 	 */
1126 	xbp = fbp = NULL;
1127 	G_RAID3_FOREACH_BIO(pbp, cbp) {
1128 		if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1129 			KASSERT(xbp == NULL, ("More than one parity bio."));
1130 			xbp = cbp;
1131 		}
1132 		if (cbp->bio_error == 0)
1133 			continue;
1134 		/*
1135 		 * Found failed request.
1136 		 */
1137 		if (fbp == NULL) {
1138 			if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) {
1139 				/*
1140 				 * We are already in degraded mode, so we can't
1141 				 * accept any failures.
1142 				 */
1143 				if (pbp->bio_error == 0)
1144 					pbp->bio_error = cbp->bio_error;
1145 			} else {
1146 				fbp = cbp;
1147 			}
1148 		} else {
1149 			/*
1150 			 * Next failed request, that's too many.
1151 			 */
1152 			if (pbp->bio_error == 0)
1153 				pbp->bio_error = fbp->bio_error;
1154 		}
1155 		disk = cbp->bio_caller2;
1156 		if (disk == NULL)
1157 			continue;
1158 		if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1159 			disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1160 			G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).",
1161 			    cbp->bio_error);
1162 		} else {
1163 			G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).",
1164 			    cbp->bio_error);
1165 		}
1166 		if (g_raid3_disconnect_on_failure &&
1167 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1168 			sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1169 			g_raid3_event_send(disk,
1170 			    G_RAID3_DISK_STATE_DISCONNECTED,
1171 			    G_RAID3_EVENT_DONTWAIT);
1172 		}
1173 	}
1174 	if (pbp->bio_error != 0)
1175 		goto finish;
1176 	if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1177 		pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY;
1178 		if (xbp != fbp)
1179 			g_raid3_replace_bio(xbp, fbp);
1180 		g_raid3_destroy_bio(sc, fbp);
1181 	} else if (fbp != NULL) {
1182 		struct g_consumer *cp;
1183 
1184 		/*
1185 		 * One request failed, so send the same request to
1186 		 * the parity consumer.
1187 		 */
1188 		disk = pbp->bio_driver2;
1189 		if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1190 			pbp->bio_error = fbp->bio_error;
1191 			goto finish;
1192 		}
1193 		pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1194 		pbp->bio_inbed--;
1195 		fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR);
1196 		if (disk->d_no == sc->sc_ndisks - 1)
1197 			fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1198 		fbp->bio_error = 0;
1199 		fbp->bio_completed = 0;
1200 		fbp->bio_children = 0;
1201 		fbp->bio_inbed = 0;
1202 		cp = disk->d_consumer;
1203 		fbp->bio_caller2 = disk;
1204 		fbp->bio_to = cp->provider;
1205 		G_RAID3_LOGREQ(3, fbp, "Sending request (recover).");
1206 		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1207 		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1208 		    cp->acr, cp->acw, cp->ace));
1209 		cp->index++;
1210 		g_io_request(fbp, cp);
1211 		return;
1212 	}
1213 	if (xbp != NULL) {
1214 		/*
1215 		 * Calculate parity.
1216 		 */
1217 		G_RAID3_FOREACH_BIO(pbp, cbp) {
1218 			if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0)
1219 				continue;
1220 			g_raid3_xor(cbp->bio_data, xbp->bio_data,
1221 			    xbp->bio_length);
1222 		}
1223 		xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY;
1224 		if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1225 			if (!g_raid3_is_zero(xbp)) {
1226 				g_raid3_parity_mismatch++;
1227 				pbp->bio_error = EIO;
1228 				goto finish;
1229 			}
1230 			g_raid3_destroy_bio(sc, xbp);
1231 		}
1232 	}
1233 	atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1234 	cadd = padd = 0;
1235 	for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1236 		G_RAID3_FOREACH_BIO(pbp, cbp) {
1237 			bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom);
1238 			pbp->bio_completed += atom;
1239 			padd += atom;
1240 		}
1241 		cadd += atom;
1242 	}
1243 finish:
1244 	if (pbp->bio_error == 0)
1245 		G_RAID3_LOGREQ(3, pbp, "Request finished.");
1246 	else {
1247 		if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0)
1248 			G_RAID3_LOGREQ(1, pbp, "Verification error.");
1249 		else
1250 			G_RAID3_LOGREQ(0, pbp, "Request failed.");
1251 	}
1252 	pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK;
1253 	while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1254 		g_raid3_destroy_bio(sc, cbp);
1255 	g_io_deliver(pbp, pbp->bio_error);
1256 }
1257 
1258 static void
1259 g_raid3_done(struct bio *bp)
1260 {
1261 	struct g_raid3_softc *sc;
1262 
1263 	sc = bp->bio_from->geom->softc;
1264 	bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR;
1265 	G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error);
1266 	mtx_lock(&sc->sc_queue_mtx);
1267 	bioq_insert_head(&sc->sc_queue, bp);
1268 	mtx_unlock(&sc->sc_queue_mtx);
1269 	wakeup(sc);
1270 	wakeup(&sc->sc_queue);
1271 }
1272 
1273 static void
1274 g_raid3_regular_request(struct bio *cbp)
1275 {
1276 	struct g_raid3_softc *sc;
1277 	struct g_raid3_disk *disk;
1278 	struct bio *pbp;
1279 
1280 	g_topology_assert_not();
1281 
1282 	pbp = cbp->bio_parent;
1283 	sc = pbp->bio_to->geom->softc;
1284 	cbp->bio_from->index--;
1285 	if (cbp->bio_cmd == BIO_WRITE)
1286 		sc->sc_writes--;
1287 	disk = cbp->bio_from->private;
1288 	if (disk == NULL) {
1289 		g_topology_lock();
1290 		g_raid3_kill_consumer(sc, cbp->bio_from);
1291 		g_topology_unlock();
1292 	}
1293 
1294 	G_RAID3_LOGREQ(3, cbp, "Request finished.");
1295 	pbp->bio_inbed++;
1296 	KASSERT(pbp->bio_inbed <= pbp->bio_children,
1297 	    ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
1298 	    pbp->bio_children));
1299 	if (pbp->bio_inbed != pbp->bio_children)
1300 		return;
1301 	switch (pbp->bio_cmd) {
1302 	case BIO_READ:
1303 		g_raid3_gather(pbp);
1304 		break;
1305 	case BIO_WRITE:
1306 	case BIO_DELETE:
1307 	    {
1308 		int error = 0;
1309 
1310 		pbp->bio_completed = pbp->bio_length;
1311 		while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) {
1312 			if (cbp->bio_error == 0) {
1313 				g_raid3_destroy_bio(sc, cbp);
1314 				continue;
1315 			}
1316 
1317 			if (error == 0)
1318 				error = cbp->bio_error;
1319 			else if (pbp->bio_error == 0) {
1320 				/*
1321 				 * Next failed request, that's too many.
1322 				 */
1323 				pbp->bio_error = error;
1324 			}
1325 
1326 			disk = cbp->bio_caller2;
1327 			if (disk == NULL) {
1328 				g_raid3_destroy_bio(sc, cbp);
1329 				continue;
1330 			}
1331 
1332 			if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1333 				disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1334 				G_RAID3_LOGREQ(0, cbp,
1335 				    "Request failed (error=%d).",
1336 				    cbp->bio_error);
1337 			} else {
1338 				G_RAID3_LOGREQ(1, cbp,
1339 				    "Request failed (error=%d).",
1340 				    cbp->bio_error);
1341 			}
1342 			if (g_raid3_disconnect_on_failure &&
1343 			    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1344 				sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1345 				g_raid3_event_send(disk,
1346 				    G_RAID3_DISK_STATE_DISCONNECTED,
1347 				    G_RAID3_EVENT_DONTWAIT);
1348 			}
1349 			g_raid3_destroy_bio(sc, cbp);
1350 		}
1351 		if (pbp->bio_error == 0)
1352 			G_RAID3_LOGREQ(3, pbp, "Request finished.");
1353 		else
1354 			G_RAID3_LOGREQ(0, pbp, "Request failed.");
1355 		pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED;
1356 		pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY;
1357 		bioq_remove(&sc->sc_inflight, pbp);
1358 		/* Release delayed sync requests if possible. */
1359 		g_raid3_sync_release(sc);
1360 		g_io_deliver(pbp, pbp->bio_error);
1361 		break;
1362 	    }
1363 	}
1364 }
1365 
1366 static void
1367 g_raid3_sync_done(struct bio *bp)
1368 {
1369 	struct g_raid3_softc *sc;
1370 
1371 	G_RAID3_LOGREQ(3, bp, "Synchronization request delivered.");
1372 	sc = bp->bio_from->geom->softc;
1373 	bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC;
1374 	mtx_lock(&sc->sc_queue_mtx);
1375 	bioq_insert_head(&sc->sc_queue, bp);
1376 	mtx_unlock(&sc->sc_queue_mtx);
1377 	wakeup(sc);
1378 	wakeup(&sc->sc_queue);
1379 }
1380 
1381 static void
1382 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp)
1383 {
1384 	struct bio_queue_head queue;
1385 	struct g_raid3_disk *disk;
1386 	struct g_consumer *cp;
1387 	struct bio *cbp;
1388 	u_int i;
1389 
1390 	bioq_init(&queue);
1391 	for (i = 0; i < sc->sc_ndisks; i++) {
1392 		disk = &sc->sc_disks[i];
1393 		if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
1394 			continue;
1395 		cbp = g_clone_bio(bp);
1396 		if (cbp == NULL) {
1397 			for (cbp = bioq_first(&queue); cbp != NULL;
1398 			    cbp = bioq_first(&queue)) {
1399 				bioq_remove(&queue, cbp);
1400 				g_destroy_bio(cbp);
1401 			}
1402 			if (bp->bio_error == 0)
1403 				bp->bio_error = ENOMEM;
1404 			g_io_deliver(bp, bp->bio_error);
1405 			return;
1406 		}
1407 		bioq_insert_tail(&queue, cbp);
1408 		cbp->bio_done = g_std_done;
1409 		cbp->bio_caller1 = disk;
1410 		cbp->bio_to = disk->d_consumer->provider;
1411 	}
1412 	for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1413 		bioq_remove(&queue, cbp);
1414 		G_RAID3_LOGREQ(3, cbp, "Sending request.");
1415 		disk = cbp->bio_caller1;
1416 		cbp->bio_caller1 = NULL;
1417 		cp = disk->d_consumer;
1418 		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1419 		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1420 		    cp->acr, cp->acw, cp->ace));
1421 		g_io_request(cbp, disk->d_consumer);
1422 	}
1423 }
1424 
1425 static void
1426 g_raid3_start(struct bio *bp)
1427 {
1428 	struct g_raid3_softc *sc;
1429 
1430 	sc = bp->bio_to->geom->softc;
1431 	/*
1432 	 * If sc == NULL or there are no valid disks, provider's error
1433 	 * should be set and g_raid3_start() should not be called at all.
1434 	 */
1435 	KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
1436 	    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE),
1437 	    ("Provider's error should be set (error=%d)(device=%s).",
1438 	    bp->bio_to->error, bp->bio_to->name));
1439 	G_RAID3_LOGREQ(3, bp, "Request received.");
1440 
1441 	switch (bp->bio_cmd) {
1442 	case BIO_READ:
1443 	case BIO_WRITE:
1444 	case BIO_DELETE:
1445 		break;
1446 	case BIO_FLUSH:
1447 		g_raid3_flush(sc, bp);
1448 		return;
1449 	case BIO_GETATTR:
1450 	default:
1451 		g_io_deliver(bp, EOPNOTSUPP);
1452 		return;
1453 	}
1454 	mtx_lock(&sc->sc_queue_mtx);
1455 	bioq_insert_tail(&sc->sc_queue, bp);
1456 	mtx_unlock(&sc->sc_queue_mtx);
1457 	G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1458 	wakeup(sc);
1459 }
1460 
1461 /*
1462  * Return TRUE if the given request is colliding with a in-progress
1463  * synchronization request.
1464  */
1465 static int
1466 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp)
1467 {
1468 	struct g_raid3_disk *disk;
1469 	struct bio *sbp;
1470 	off_t rstart, rend, sstart, send;
1471 	int i;
1472 
1473 	disk = sc->sc_syncdisk;
1474 	if (disk == NULL)
1475 		return (0);
1476 	rstart = bp->bio_offset;
1477 	rend = bp->bio_offset + bp->bio_length;
1478 	for (i = 0; i < g_raid3_syncreqs; i++) {
1479 		sbp = disk->d_sync.ds_bios[i];
1480 		if (sbp == NULL)
1481 			continue;
1482 		sstart = sbp->bio_offset;
1483 		send = sbp->bio_length;
1484 		if (sbp->bio_cmd == BIO_WRITE) {
1485 			sstart *= sc->sc_ndisks - 1;
1486 			send *= sc->sc_ndisks - 1;
1487 		}
1488 		send += sstart;
1489 		if (rend > sstart && rstart < send)
1490 			return (1);
1491 	}
1492 	return (0);
1493 }
1494 
1495 /*
1496  * Return TRUE if the given sync request is colliding with a in-progress regular
1497  * request.
1498  */
1499 static int
1500 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp)
1501 {
1502 	off_t rstart, rend, sstart, send;
1503 	struct bio *bp;
1504 
1505 	if (sc->sc_syncdisk == NULL)
1506 		return (0);
1507 	sstart = sbp->bio_offset;
1508 	send = sstart + sbp->bio_length;
1509 	TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1510 		rstart = bp->bio_offset;
1511 		rend = bp->bio_offset + bp->bio_length;
1512 		if (rend > sstart && rstart < send)
1513 			return (1);
1514 	}
1515 	return (0);
1516 }
1517 
1518 /*
1519  * Puts request onto delayed queue.
1520  */
1521 static void
1522 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp)
1523 {
1524 
1525 	G_RAID3_LOGREQ(2, bp, "Delaying request.");
1526 	bioq_insert_head(&sc->sc_regular_delayed, bp);
1527 }
1528 
1529 /*
1530  * Puts synchronization request onto delayed queue.
1531  */
1532 static void
1533 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp)
1534 {
1535 
1536 	G_RAID3_LOGREQ(2, bp, "Delaying synchronization request.");
1537 	bioq_insert_tail(&sc->sc_sync_delayed, bp);
1538 }
1539 
1540 /*
1541  * Releases delayed regular requests which don't collide anymore with sync
1542  * requests.
1543  */
1544 static void
1545 g_raid3_regular_release(struct g_raid3_softc *sc)
1546 {
1547 	struct bio *bp, *bp2;
1548 
1549 	TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1550 		if (g_raid3_sync_collision(sc, bp))
1551 			continue;
1552 		bioq_remove(&sc->sc_regular_delayed, bp);
1553 		G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1554 		mtx_lock(&sc->sc_queue_mtx);
1555 		bioq_insert_head(&sc->sc_queue, bp);
1556 #if 0
1557 		/*
1558 		 * wakeup() is not needed, because this function is called from
1559 		 * the worker thread.
1560 		 */
1561 		wakeup(&sc->sc_queue);
1562 #endif
1563 		mtx_unlock(&sc->sc_queue_mtx);
1564 	}
1565 }
1566 
1567 /*
1568  * Releases delayed sync requests which don't collide anymore with regular
1569  * requests.
1570  */
1571 static void
1572 g_raid3_sync_release(struct g_raid3_softc *sc)
1573 {
1574 	struct bio *bp, *bp2;
1575 
1576 	TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1577 		if (g_raid3_regular_collision(sc, bp))
1578 			continue;
1579 		bioq_remove(&sc->sc_sync_delayed, bp);
1580 		G_RAID3_LOGREQ(2, bp,
1581 		    "Releasing delayed synchronization request.");
1582 		g_io_request(bp, bp->bio_from);
1583 	}
1584 }
1585 
1586 /*
1587  * Handle synchronization requests.
1588  * Every synchronization request is two-steps process: first, READ request is
1589  * send to active provider and then WRITE request (with read data) to the provider
1590  * beeing synchronized. When WRITE is finished, new synchronization request is
1591  * send.
1592  */
1593 static void
1594 g_raid3_sync_request(struct bio *bp)
1595 {
1596 	struct g_raid3_softc *sc;
1597 	struct g_raid3_disk *disk;
1598 
1599 	bp->bio_from->index--;
1600 	sc = bp->bio_from->geom->softc;
1601 	disk = bp->bio_from->private;
1602 	if (disk == NULL) {
1603 		sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1604 		g_topology_lock();
1605 		g_raid3_kill_consumer(sc, bp->bio_from);
1606 		g_topology_unlock();
1607 		free(bp->bio_data, M_RAID3);
1608 		g_destroy_bio(bp);
1609 		sx_xlock(&sc->sc_lock);
1610 		return;
1611 	}
1612 
1613 	/*
1614 	 * Synchronization request.
1615 	 */
1616 	switch (bp->bio_cmd) {
1617 	case BIO_READ:
1618 	    {
1619 		struct g_consumer *cp;
1620 		u_char *dst, *src;
1621 		off_t left;
1622 		u_int atom;
1623 
1624 		if (bp->bio_error != 0) {
1625 			G_RAID3_LOGREQ(0, bp,
1626 			    "Synchronization request failed (error=%d).",
1627 			    bp->bio_error);
1628 			g_destroy_bio(bp);
1629 			return;
1630 		}
1631 		G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1632 		atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1633 		dst = src = bp->bio_data;
1634 		if (disk->d_no == sc->sc_ndisks - 1) {
1635 			u_int n;
1636 
1637 			/* Parity component. */
1638 			for (left = bp->bio_length; left > 0;
1639 			    left -= sc->sc_sectorsize) {
1640 				bcopy(src, dst, atom);
1641 				src += atom;
1642 				for (n = 1; n < sc->sc_ndisks - 1; n++) {
1643 					g_raid3_xor(src, dst, atom);
1644 					src += atom;
1645 				}
1646 				dst += atom;
1647 			}
1648 		} else {
1649 			/* Regular component. */
1650 			src += atom * disk->d_no;
1651 			for (left = bp->bio_length; left > 0;
1652 			    left -= sc->sc_sectorsize) {
1653 				bcopy(src, dst, atom);
1654 				src += sc->sc_sectorsize;
1655 				dst += atom;
1656 			}
1657 		}
1658 		bp->bio_driver1 = bp->bio_driver2 = NULL;
1659 		bp->bio_pflags = 0;
1660 		bp->bio_offset /= sc->sc_ndisks - 1;
1661 		bp->bio_length /= sc->sc_ndisks - 1;
1662 		bp->bio_cmd = BIO_WRITE;
1663 		bp->bio_cflags = 0;
1664 		bp->bio_children = bp->bio_inbed = 0;
1665 		cp = disk->d_consumer;
1666 		KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1667 		    ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1668 		    cp->acr, cp->acw, cp->ace));
1669 		cp->index++;
1670 		g_io_request(bp, cp);
1671 		return;
1672 	    }
1673 	case BIO_WRITE:
1674 	    {
1675 		struct g_raid3_disk_sync *sync;
1676 		off_t boffset, moffset;
1677 		void *data;
1678 		int i;
1679 
1680 		if (bp->bio_error != 0) {
1681 			G_RAID3_LOGREQ(0, bp,
1682 			    "Synchronization request failed (error=%d).",
1683 			    bp->bio_error);
1684 			g_destroy_bio(bp);
1685 			sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1686 			g_raid3_event_send(disk,
1687 			    G_RAID3_DISK_STATE_DISCONNECTED,
1688 			    G_RAID3_EVENT_DONTWAIT);
1689 			return;
1690 		}
1691 		G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1692 		sync = &disk->d_sync;
1693 		if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) ||
1694 		    sync->ds_consumer == NULL ||
1695 		    (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1696 			/* Don't send more synchronization requests. */
1697 			sync->ds_inflight--;
1698 			if (sync->ds_bios != NULL) {
1699 				i = (int)(uintptr_t)bp->bio_caller1;
1700 				sync->ds_bios[i] = NULL;
1701 			}
1702 			free(bp->bio_data, M_RAID3);
1703 			g_destroy_bio(bp);
1704 			if (sync->ds_inflight > 0)
1705 				return;
1706 			if (sync->ds_consumer == NULL ||
1707 			    (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1708 				return;
1709 			}
1710 			/*
1711 			 * Disk up-to-date, activate it.
1712 			 */
1713 			g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE,
1714 			    G_RAID3_EVENT_DONTWAIT);
1715 			return;
1716 		}
1717 
1718 		/* Send next synchronization request. */
1719 		data = bp->bio_data;
1720 		g_reset_bio(bp);
1721 		bp->bio_cmd = BIO_READ;
1722 		bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
1723 		bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1724 		sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
1725 		bp->bio_done = g_raid3_sync_done;
1726 		bp->bio_data = data;
1727 		bp->bio_from = sync->ds_consumer;
1728 		bp->bio_to = sc->sc_provider;
1729 		G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
1730 		sync->ds_consumer->index++;
1731 		/*
1732 		 * Delay the request if it is colliding with a regular request.
1733 		 */
1734 		if (g_raid3_regular_collision(sc, bp))
1735 			g_raid3_sync_delay(sc, bp);
1736 		else
1737 			g_io_request(bp, sync->ds_consumer);
1738 
1739 		/* Release delayed requests if possible. */
1740 		g_raid3_regular_release(sc);
1741 
1742 		/* Find the smallest offset. */
1743 		moffset = sc->sc_mediasize;
1744 		for (i = 0; i < g_raid3_syncreqs; i++) {
1745 			bp = sync->ds_bios[i];
1746 			boffset = bp->bio_offset;
1747 			if (bp->bio_cmd == BIO_WRITE)
1748 				boffset *= sc->sc_ndisks - 1;
1749 			if (boffset < moffset)
1750 				moffset = boffset;
1751 		}
1752 		if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
1753 			/* Update offset_done on every 100 blocks. */
1754 			sync->ds_offset_done = moffset;
1755 			g_raid3_update_metadata(disk);
1756 		}
1757 		return;
1758 	    }
1759 	default:
1760 		KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1761 		    bp->bio_cmd, sc->sc_name));
1762 		break;
1763 	}
1764 }
1765 
1766 static int
1767 g_raid3_register_request(struct bio *pbp)
1768 {
1769 	struct g_raid3_softc *sc;
1770 	struct g_raid3_disk *disk;
1771 	struct g_consumer *cp;
1772 	struct bio *cbp, *tmpbp;
1773 	off_t offset, length;
1774 	u_int n, ndisks;
1775 	int round_robin, verify;
1776 
1777 	ndisks = 0;
1778 	sc = pbp->bio_to->geom->softc;
1779 	if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
1780 	    sc->sc_syncdisk == NULL) {
1781 		g_io_deliver(pbp, EIO);
1782 		return (0);
1783 	}
1784 	g_raid3_init_bio(pbp);
1785 	length = pbp->bio_length / (sc->sc_ndisks - 1);
1786 	offset = pbp->bio_offset / (sc->sc_ndisks - 1);
1787 	round_robin = verify = 0;
1788 	switch (pbp->bio_cmd) {
1789 	case BIO_READ:
1790 		if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
1791 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1792 			pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY;
1793 			verify = 1;
1794 			ndisks = sc->sc_ndisks;
1795 		} else {
1796 			verify = 0;
1797 			ndisks = sc->sc_ndisks - 1;
1798 		}
1799 		if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 &&
1800 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1801 			round_robin = 1;
1802 		} else {
1803 			round_robin = 0;
1804 		}
1805 		KASSERT(!round_robin || !verify,
1806 		    ("ROUND-ROBIN and VERIFY are mutually exclusive."));
1807 		pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1];
1808 		break;
1809 	case BIO_WRITE:
1810 	case BIO_DELETE:
1811 		/*
1812 		 * Delay the request if it is colliding with a synchronization
1813 		 * request.
1814 		 */
1815 		if (g_raid3_sync_collision(sc, pbp)) {
1816 			g_raid3_regular_delay(sc, pbp);
1817 			return (0);
1818 		}
1819 
1820 		if (sc->sc_idle)
1821 			g_raid3_unidle(sc);
1822 		else
1823 			sc->sc_last_write = time_uptime;
1824 
1825 		ndisks = sc->sc_ndisks;
1826 		break;
1827 	}
1828 	for (n = 0; n < ndisks; n++) {
1829 		disk = &sc->sc_disks[n];
1830 		cbp = g_raid3_clone_bio(sc, pbp);
1831 		if (cbp == NULL) {
1832 			while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1833 				g_raid3_destroy_bio(sc, cbp);
1834 			/*
1835 			 * To prevent deadlock, we must run back up
1836 			 * with the ENOMEM for failed requests of any
1837 			 * of our consumers.  Our own sync requests
1838 			 * can stick around, as they are finite.
1839 			 */
1840 			if ((pbp->bio_cflags &
1841 			    G_RAID3_BIO_CFLAG_REGULAR) != 0) {
1842 				g_io_deliver(pbp, ENOMEM);
1843 				return (0);
1844 			}
1845 			return (ENOMEM);
1846 		}
1847 		cbp->bio_offset = offset;
1848 		cbp->bio_length = length;
1849 		cbp->bio_done = g_raid3_done;
1850 		switch (pbp->bio_cmd) {
1851 		case BIO_READ:
1852 			if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1853 				/*
1854 				 * Replace invalid component with the parity
1855 				 * component.
1856 				 */
1857 				disk = &sc->sc_disks[sc->sc_ndisks - 1];
1858 				cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1859 				pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1860 			} else if (round_robin &&
1861 			    disk->d_no == sc->sc_round_robin) {
1862 				/*
1863 				 * In round-robin mode skip one data component
1864 				 * and use parity component when reading.
1865 				 */
1866 				pbp->bio_driver2 = disk;
1867 				disk = &sc->sc_disks[sc->sc_ndisks - 1];
1868 				cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1869 				sc->sc_round_robin++;
1870 				round_robin = 0;
1871 			} else if (verify && disk->d_no == sc->sc_ndisks - 1) {
1872 				cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1873 			}
1874 			break;
1875 		case BIO_WRITE:
1876 		case BIO_DELETE:
1877 			if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
1878 			    disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
1879 				if (n == ndisks - 1) {
1880 					/*
1881 					 * Active parity component, mark it as such.
1882 					 */
1883 					cbp->bio_cflags |=
1884 					    G_RAID3_BIO_CFLAG_PARITY;
1885 				}
1886 			} else {
1887 				pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1888 				if (n == ndisks - 1) {
1889 					/*
1890 					 * Parity component is not connected,
1891 					 * so destroy its request.
1892 					 */
1893 					pbp->bio_pflags |=
1894 					    G_RAID3_BIO_PFLAG_NOPARITY;
1895 					g_raid3_destroy_bio(sc, cbp);
1896 					cbp = NULL;
1897 				} else {
1898 					cbp->bio_cflags |=
1899 					    G_RAID3_BIO_CFLAG_NODISK;
1900 					disk = NULL;
1901 				}
1902 			}
1903 			break;
1904 		}
1905 		if (cbp != NULL)
1906 			cbp->bio_caller2 = disk;
1907 	}
1908 	switch (pbp->bio_cmd) {
1909 	case BIO_READ:
1910 		if (round_robin) {
1911 			/*
1912 			 * If we are in round-robin mode and 'round_robin' is
1913 			 * still 1, it means, that we skipped parity component
1914 			 * for this read and must reset sc_round_robin field.
1915 			 */
1916 			sc->sc_round_robin = 0;
1917 		}
1918 		G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1919 			disk = cbp->bio_caller2;
1920 			cp = disk->d_consumer;
1921 			cbp->bio_to = cp->provider;
1922 			G_RAID3_LOGREQ(3, cbp, "Sending request.");
1923 			KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1924 			    ("Consumer %s not opened (r%dw%de%d).",
1925 			    cp->provider->name, cp->acr, cp->acw, cp->ace));
1926 			cp->index++;
1927 			g_io_request(cbp, cp);
1928 		}
1929 		break;
1930 	case BIO_WRITE:
1931 	case BIO_DELETE:
1932 		/*
1933 		 * Put request onto inflight queue, so we can check if new
1934 		 * synchronization requests don't collide with it.
1935 		 */
1936 		bioq_insert_tail(&sc->sc_inflight, pbp);
1937 
1938 		/*
1939 		 * Bump syncid on first write.
1940 		 */
1941 		if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) {
1942 			sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
1943 			g_raid3_bump_syncid(sc);
1944 		}
1945 		g_raid3_scatter(pbp);
1946 		break;
1947 	}
1948 	return (0);
1949 }
1950 
1951 static int
1952 g_raid3_can_destroy(struct g_raid3_softc *sc)
1953 {
1954 	struct g_geom *gp;
1955 	struct g_consumer *cp;
1956 
1957 	g_topology_assert();
1958 	gp = sc->sc_geom;
1959 	if (gp->softc == NULL)
1960 		return (1);
1961 	LIST_FOREACH(cp, &gp->consumer, consumer) {
1962 		if (g_raid3_is_busy(sc, cp))
1963 			return (0);
1964 	}
1965 	gp = sc->sc_sync.ds_geom;
1966 	LIST_FOREACH(cp, &gp->consumer, consumer) {
1967 		if (g_raid3_is_busy(sc, cp))
1968 			return (0);
1969 	}
1970 	G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1971 	    sc->sc_name);
1972 	return (1);
1973 }
1974 
1975 static int
1976 g_raid3_try_destroy(struct g_raid3_softc *sc)
1977 {
1978 
1979 	g_topology_assert_not();
1980 	sx_assert(&sc->sc_lock, SX_XLOCKED);
1981 
1982 	if (sc->sc_rootmount != NULL) {
1983 		G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1984 		    sc->sc_rootmount);
1985 		root_mount_rel(sc->sc_rootmount);
1986 		sc->sc_rootmount = NULL;
1987 	}
1988 
1989 	g_topology_lock();
1990 	if (!g_raid3_can_destroy(sc)) {
1991 		g_topology_unlock();
1992 		return (0);
1993 	}
1994 	sc->sc_geom->softc = NULL;
1995 	sc->sc_sync.ds_geom->softc = NULL;
1996 	if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) {
1997 		g_topology_unlock();
1998 		G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
1999 		    &sc->sc_worker);
2000 		/* Unlock sc_lock here, as it can be destroyed after wakeup. */
2001 		sx_xunlock(&sc->sc_lock);
2002 		wakeup(&sc->sc_worker);
2003 		sc->sc_worker = NULL;
2004 	} else {
2005 		g_topology_unlock();
2006 		g_raid3_destroy_device(sc);
2007 		free(sc->sc_disks, M_RAID3);
2008 		free(sc, M_RAID3);
2009 	}
2010 	return (1);
2011 }
2012 
2013 /*
2014  * Worker thread.
2015  */
2016 static void
2017 g_raid3_worker(void *arg)
2018 {
2019 	struct g_raid3_softc *sc;
2020 	struct g_raid3_event *ep;
2021 	struct bio *bp;
2022 	int timeout;
2023 
2024 	sc = arg;
2025 	thread_lock(curthread);
2026 	sched_prio(curthread, PRIBIO);
2027 	thread_unlock(curthread);
2028 
2029 	sx_xlock(&sc->sc_lock);
2030 	for (;;) {
2031 		G_RAID3_DEBUG(5, "%s: Let's see...", __func__);
2032 		/*
2033 		 * First take a look at events.
2034 		 * This is important to handle events before any I/O requests.
2035 		 */
2036 		ep = g_raid3_event_get(sc);
2037 		if (ep != NULL) {
2038 			g_raid3_event_remove(sc, ep);
2039 			if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) {
2040 				/* Update only device status. */
2041 				G_RAID3_DEBUG(3,
2042 				    "Running event for device %s.",
2043 				    sc->sc_name);
2044 				ep->e_error = 0;
2045 				g_raid3_update_device(sc, 1);
2046 			} else {
2047 				/* Update disk status. */
2048 				G_RAID3_DEBUG(3, "Running event for disk %s.",
2049 				     g_raid3_get_diskname(ep->e_disk));
2050 				ep->e_error = g_raid3_update_disk(ep->e_disk,
2051 				    ep->e_state);
2052 				if (ep->e_error == 0)
2053 					g_raid3_update_device(sc, 0);
2054 			}
2055 			if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) {
2056 				KASSERT(ep->e_error == 0,
2057 				    ("Error cannot be handled."));
2058 				g_raid3_event_free(ep);
2059 			} else {
2060 				ep->e_flags |= G_RAID3_EVENT_DONE;
2061 				G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2062 				    ep);
2063 				mtx_lock(&sc->sc_events_mtx);
2064 				wakeup(ep);
2065 				mtx_unlock(&sc->sc_events_mtx);
2066 			}
2067 			if ((sc->sc_flags &
2068 			    G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2069 				if (g_raid3_try_destroy(sc)) {
2070 					curthread->td_pflags &= ~TDP_GEOM;
2071 					G_RAID3_DEBUG(1, "Thread exiting.");
2072 					kproc_exit(0);
2073 				}
2074 			}
2075 			G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__);
2076 			continue;
2077 		}
2078 		/*
2079 		 * Check if we can mark array as CLEAN and if we can't take
2080 		 * how much seconds should we wait.
2081 		 */
2082 		timeout = g_raid3_idle(sc, -1);
2083 		/*
2084 		 * Now I/O requests.
2085 		 */
2086 		/* Get first request from the queue. */
2087 		mtx_lock(&sc->sc_queue_mtx);
2088 		bp = bioq_first(&sc->sc_queue);
2089 		if (bp == NULL) {
2090 			if ((sc->sc_flags &
2091 			    G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2092 				mtx_unlock(&sc->sc_queue_mtx);
2093 				if (g_raid3_try_destroy(sc)) {
2094 					curthread->td_pflags &= ~TDP_GEOM;
2095 					G_RAID3_DEBUG(1, "Thread exiting.");
2096 					kproc_exit(0);
2097 				}
2098 				mtx_lock(&sc->sc_queue_mtx);
2099 			}
2100 			sx_xunlock(&sc->sc_lock);
2101 			/*
2102 			 * XXX: We can miss an event here, because an event
2103 			 *      can be added without sx-device-lock and without
2104 			 *      mtx-queue-lock. Maybe I should just stop using
2105 			 *      dedicated mutex for events synchronization and
2106 			 *      stick with the queue lock?
2107 			 *      The event will hang here until next I/O request
2108 			 *      or next event is received.
2109 			 */
2110 			MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1",
2111 			    timeout * hz);
2112 			sx_xlock(&sc->sc_lock);
2113 			G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__);
2114 			continue;
2115 		}
2116 process:
2117 		bioq_remove(&sc->sc_queue, bp);
2118 		mtx_unlock(&sc->sc_queue_mtx);
2119 
2120 		if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
2121 		    (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) {
2122 			g_raid3_sync_request(bp);	/* READ */
2123 		} else if (bp->bio_to != sc->sc_provider) {
2124 			if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
2125 				g_raid3_regular_request(bp);
2126 			else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0)
2127 				g_raid3_sync_request(bp);	/* WRITE */
2128 			else {
2129 				KASSERT(0,
2130 				    ("Invalid request cflags=0x%hhx to=%s.",
2131 				    bp->bio_cflags, bp->bio_to->name));
2132 			}
2133 		} else if (g_raid3_register_request(bp) != 0) {
2134 			mtx_lock(&sc->sc_queue_mtx);
2135 			bioq_insert_head(&sc->sc_queue, bp);
2136 			/*
2137 			 * We are short in memory, let see if there are finished
2138 			 * request we can free.
2139 			 */
2140 			TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2141 				if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR)
2142 					goto process;
2143 			}
2144 			/*
2145 			 * No finished regular request, so at least keep
2146 			 * synchronization running.
2147 			 */
2148 			TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2149 				if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC)
2150 					goto process;
2151 			}
2152 			sx_xunlock(&sc->sc_lock);
2153 			MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP,
2154 			    "r3:lowmem", hz / 10);
2155 			sx_xlock(&sc->sc_lock);
2156 		}
2157 		G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__);
2158 	}
2159 }
2160 
2161 static void
2162 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk)
2163 {
2164 
2165 	sx_assert(&sc->sc_lock, SX_LOCKED);
2166 	if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
2167 		return;
2168 	if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) {
2169 		G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
2170 		    g_raid3_get_diskname(disk), sc->sc_name);
2171 		disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2172 	} else if (sc->sc_idle &&
2173 	    (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) {
2174 		G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
2175 		    g_raid3_get_diskname(disk), sc->sc_name);
2176 		disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2177 	}
2178 }
2179 
2180 static void
2181 g_raid3_sync_start(struct g_raid3_softc *sc)
2182 {
2183 	struct g_raid3_disk *disk;
2184 	struct g_consumer *cp;
2185 	struct bio *bp;
2186 	int error;
2187 	u_int n;
2188 
2189 	g_topology_assert_not();
2190 	sx_assert(&sc->sc_lock, SX_XLOCKED);
2191 
2192 	KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2193 	    ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2194 	    sc->sc_state));
2195 	KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).",
2196 	    sc->sc_name, sc->sc_state));
2197 	disk = NULL;
2198 	for (n = 0; n < sc->sc_ndisks; n++) {
2199 		if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
2200 			continue;
2201 		disk = &sc->sc_disks[n];
2202 		break;
2203 	}
2204 	if (disk == NULL)
2205 		return;
2206 
2207 	sx_xunlock(&sc->sc_lock);
2208 	g_topology_lock();
2209 	cp = g_new_consumer(sc->sc_sync.ds_geom);
2210 	error = g_attach(cp, sc->sc_provider);
2211 	KASSERT(error == 0,
2212 	    ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2213 	error = g_access(cp, 1, 0, 0);
2214 	KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2215 	g_topology_unlock();
2216 	sx_xlock(&sc->sc_lock);
2217 
2218 	G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2219 	    g_raid3_get_diskname(disk));
2220 	if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0)
2221 		disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2222 	KASSERT(disk->d_sync.ds_consumer == NULL,
2223 	    ("Sync consumer already exists (device=%s, disk=%s).",
2224 	    sc->sc_name, g_raid3_get_diskname(disk)));
2225 
2226 	disk->d_sync.ds_consumer = cp;
2227 	disk->d_sync.ds_consumer->private = disk;
2228 	disk->d_sync.ds_consumer->index = 0;
2229 	sc->sc_syncdisk = disk;
2230 
2231 	/*
2232 	 * Allocate memory for synchronization bios and initialize them.
2233 	 */
2234 	disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs,
2235 	    M_RAID3, M_WAITOK);
2236 	for (n = 0; n < g_raid3_syncreqs; n++) {
2237 		bp = g_alloc_bio();
2238 		disk->d_sync.ds_bios[n] = bp;
2239 		bp->bio_parent = NULL;
2240 		bp->bio_cmd = BIO_READ;
2241 		bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
2242 		bp->bio_cflags = 0;
2243 		bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
2244 		bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2245 		disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
2246 		bp->bio_done = g_raid3_sync_done;
2247 		bp->bio_from = disk->d_sync.ds_consumer;
2248 		bp->bio_to = sc->sc_provider;
2249 		bp->bio_caller1 = (void *)(uintptr_t)n;
2250 	}
2251 
2252 	/* Set the number of in-flight synchronization requests. */
2253 	disk->d_sync.ds_inflight = g_raid3_syncreqs;
2254 
2255 	/*
2256 	 * Fire off first synchronization requests.
2257 	 */
2258 	for (n = 0; n < g_raid3_syncreqs; n++) {
2259 		bp = disk->d_sync.ds_bios[n];
2260 		G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
2261 		disk->d_sync.ds_consumer->index++;
2262 		/*
2263 		 * Delay the request if it is colliding with a regular request.
2264 		 */
2265 		if (g_raid3_regular_collision(sc, bp))
2266 			g_raid3_sync_delay(sc, bp);
2267 		else
2268 			g_io_request(bp, disk->d_sync.ds_consumer);
2269 	}
2270 }
2271 
2272 /*
2273  * Stop synchronization process.
2274  * type: 0 - synchronization finished
2275  *       1 - synchronization stopped
2276  */
2277 static void
2278 g_raid3_sync_stop(struct g_raid3_softc *sc, int type)
2279 {
2280 	struct g_raid3_disk *disk;
2281 	struct g_consumer *cp;
2282 
2283 	g_topology_assert_not();
2284 	sx_assert(&sc->sc_lock, SX_LOCKED);
2285 
2286 	KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2287 	    ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2288 	    sc->sc_state));
2289 	disk = sc->sc_syncdisk;
2290 	sc->sc_syncdisk = NULL;
2291 	KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name));
2292 	KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2293 	    ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2294 	    g_raid3_disk_state2str(disk->d_state)));
2295 	if (disk->d_sync.ds_consumer == NULL)
2296 		return;
2297 
2298 	if (type == 0) {
2299 		G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2300 		    sc->sc_name, g_raid3_get_diskname(disk));
2301 	} else /* if (type == 1) */ {
2302 		G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2303 		    sc->sc_name, g_raid3_get_diskname(disk));
2304 	}
2305 	free(disk->d_sync.ds_bios, M_RAID3);
2306 	disk->d_sync.ds_bios = NULL;
2307 	cp = disk->d_sync.ds_consumer;
2308 	disk->d_sync.ds_consumer = NULL;
2309 	disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2310 	sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2311 	g_topology_lock();
2312 	g_raid3_kill_consumer(sc, cp);
2313 	g_topology_unlock();
2314 	sx_xlock(&sc->sc_lock);
2315 }
2316 
2317 static void
2318 g_raid3_launch_provider(struct g_raid3_softc *sc)
2319 {
2320 	struct g_provider *pp;
2321 	struct g_raid3_disk *disk;
2322 	int n;
2323 
2324 	sx_assert(&sc->sc_lock, SX_LOCKED);
2325 
2326 	g_topology_lock();
2327 	pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name);
2328 	pp->mediasize = sc->sc_mediasize;
2329 	pp->sectorsize = sc->sc_sectorsize;
2330 	pp->stripesize = 0;
2331 	pp->stripeoffset = 0;
2332 	for (n = 0; n < sc->sc_ndisks; n++) {
2333 		disk = &sc->sc_disks[n];
2334 		if (disk->d_consumer && disk->d_consumer->provider &&
2335 		    disk->d_consumer->provider->stripesize > pp->stripesize) {
2336 			pp->stripesize = disk->d_consumer->provider->stripesize;
2337 			pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2338 		}
2339 	}
2340 	pp->stripesize *= sc->sc_ndisks - 1;
2341 	pp->stripeoffset *= sc->sc_ndisks - 1;
2342 	sc->sc_provider = pp;
2343 	g_error_provider(pp, 0);
2344 	g_topology_unlock();
2345 	G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2346 	    g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks);
2347 
2348 	if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED)
2349 		g_raid3_sync_start(sc);
2350 }
2351 
2352 static void
2353 g_raid3_destroy_provider(struct g_raid3_softc *sc)
2354 {
2355 	struct bio *bp;
2356 
2357 	g_topology_assert_not();
2358 	KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2359 	    sc->sc_name));
2360 
2361 	g_topology_lock();
2362 	g_error_provider(sc->sc_provider, ENXIO);
2363 	mtx_lock(&sc->sc_queue_mtx);
2364 	while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2365 		bioq_remove(&sc->sc_queue, bp);
2366 		g_io_deliver(bp, ENXIO);
2367 	}
2368 	mtx_unlock(&sc->sc_queue_mtx);
2369 	G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2370 	    sc->sc_provider->name);
2371 	sc->sc_provider->flags |= G_PF_WITHER;
2372 	g_orphan_provider(sc->sc_provider, ENXIO);
2373 	g_topology_unlock();
2374 	sc->sc_provider = NULL;
2375 	if (sc->sc_syncdisk != NULL)
2376 		g_raid3_sync_stop(sc, 1);
2377 }
2378 
2379 static void
2380 g_raid3_go(void *arg)
2381 {
2382 	struct g_raid3_softc *sc;
2383 
2384 	sc = arg;
2385 	G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2386 	g_raid3_event_send(sc, 0,
2387 	    G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE);
2388 }
2389 
2390 static u_int
2391 g_raid3_determine_state(struct g_raid3_disk *disk)
2392 {
2393 	struct g_raid3_softc *sc;
2394 	u_int state;
2395 
2396 	sc = disk->d_softc;
2397 	if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2398 		if ((disk->d_flags &
2399 		    G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) {
2400 			/* Disk does not need synchronization. */
2401 			state = G_RAID3_DISK_STATE_ACTIVE;
2402 		} else {
2403 			if ((sc->sc_flags &
2404 			     G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2405 			    (disk->d_flags &
2406 			     G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2407 				/*
2408 				 * We can start synchronization from
2409 				 * the stored offset.
2410 				 */
2411 				state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2412 			} else {
2413 				state = G_RAID3_DISK_STATE_STALE;
2414 			}
2415 		}
2416 	} else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2417 		/*
2418 		 * Reset all synchronization data for this disk,
2419 		 * because if it even was synchronized, it was
2420 		 * synchronized to disks with different syncid.
2421 		 */
2422 		disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2423 		disk->d_sync.ds_offset = 0;
2424 		disk->d_sync.ds_offset_done = 0;
2425 		disk->d_sync.ds_syncid = sc->sc_syncid;
2426 		if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2427 		    (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2428 			state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2429 		} else {
2430 			state = G_RAID3_DISK_STATE_STALE;
2431 		}
2432 	} else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2433 		/*
2434 		 * Not good, NOT GOOD!
2435 		 * It means that device was started on stale disks
2436 		 * and more fresh disk just arrive.
2437 		 * If there were writes, device is broken, sorry.
2438 		 * I think the best choice here is don't touch
2439 		 * this disk and inform the user loudly.
2440 		 */
2441 		G_RAID3_DEBUG(0, "Device %s was started before the freshest "
2442 		    "disk (%s) arrives!! It will not be connected to the "
2443 		    "running device.", sc->sc_name,
2444 		    g_raid3_get_diskname(disk));
2445 		g_raid3_destroy_disk(disk);
2446 		state = G_RAID3_DISK_STATE_NONE;
2447 		/* Return immediately, because disk was destroyed. */
2448 		return (state);
2449 	}
2450 	G_RAID3_DEBUG(3, "State for %s disk: %s.",
2451 	    g_raid3_get_diskname(disk), g_raid3_disk_state2str(state));
2452 	return (state);
2453 }
2454 
2455 /*
2456  * Update device state.
2457  */
2458 static void
2459 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force)
2460 {
2461 	struct g_raid3_disk *disk;
2462 	u_int state;
2463 
2464 	sx_assert(&sc->sc_lock, SX_XLOCKED);
2465 
2466 	switch (sc->sc_state) {
2467 	case G_RAID3_DEVICE_STATE_STARTING:
2468 	    {
2469 		u_int n, ndirty, ndisks, genid, syncid;
2470 
2471 		KASSERT(sc->sc_provider == NULL,
2472 		    ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2473 		/*
2474 		 * Are we ready? We are, if all disks are connected or
2475 		 * one disk is missing and 'force' is true.
2476 		 */
2477 		if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) {
2478 			if (!force)
2479 				callout_drain(&sc->sc_callout);
2480 		} else {
2481 			if (force) {
2482 				/*
2483 				 * Timeout expired, so destroy device.
2484 				 */
2485 				sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2486 				G_RAID3_DEBUG(1, "root_mount_rel[%u] %p",
2487 				    __LINE__, sc->sc_rootmount);
2488 				root_mount_rel(sc->sc_rootmount);
2489 				sc->sc_rootmount = NULL;
2490 			}
2491 			return;
2492 		}
2493 
2494 		/*
2495 		 * Find the biggest genid.
2496 		 */
2497 		genid = 0;
2498 		for (n = 0; n < sc->sc_ndisks; n++) {
2499 			disk = &sc->sc_disks[n];
2500 			if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2501 				continue;
2502 			if (disk->d_genid > genid)
2503 				genid = disk->d_genid;
2504 		}
2505 		sc->sc_genid = genid;
2506 		/*
2507 		 * Remove all disks without the biggest genid.
2508 		 */
2509 		for (n = 0; n < sc->sc_ndisks; n++) {
2510 			disk = &sc->sc_disks[n];
2511 			if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2512 				continue;
2513 			if (disk->d_genid < genid) {
2514 				G_RAID3_DEBUG(0,
2515 				    "Component %s (device %s) broken, skipping.",
2516 				    g_raid3_get_diskname(disk), sc->sc_name);
2517 				g_raid3_destroy_disk(disk);
2518 			}
2519 		}
2520 
2521 		/*
2522 		 * There must be at least 'sc->sc_ndisks - 1' components
2523 		 * with the same syncid and without SYNCHRONIZING flag.
2524 		 */
2525 
2526 		/*
2527 		 * Find the biggest syncid, number of valid components and
2528 		 * number of dirty components.
2529 		 */
2530 		ndirty = ndisks = syncid = 0;
2531 		for (n = 0; n < sc->sc_ndisks; n++) {
2532 			disk = &sc->sc_disks[n];
2533 			if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2534 				continue;
2535 			if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0)
2536 				ndirty++;
2537 			if (disk->d_sync.ds_syncid > syncid) {
2538 				syncid = disk->d_sync.ds_syncid;
2539 				ndisks = 0;
2540 			} else if (disk->d_sync.ds_syncid < syncid) {
2541 				continue;
2542 			}
2543 			if ((disk->d_flags &
2544 			    G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) {
2545 				continue;
2546 			}
2547 			ndisks++;
2548 		}
2549 		/*
2550 		 * Do we have enough valid components?
2551 		 */
2552 		if (ndisks + 1 < sc->sc_ndisks) {
2553 			G_RAID3_DEBUG(0,
2554 			    "Device %s is broken, too few valid components.",
2555 			    sc->sc_name);
2556 			sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2557 			return;
2558 		}
2559 		/*
2560 		 * If there is one DIRTY component and all disks are present,
2561 		 * mark it for synchronization. If there is more than one DIRTY
2562 		 * component, mark parity component for synchronization.
2563 		 */
2564 		if (ndisks == sc->sc_ndisks && ndirty == 1) {
2565 			for (n = 0; n < sc->sc_ndisks; n++) {
2566 				disk = &sc->sc_disks[n];
2567 				if ((disk->d_flags &
2568 				    G_RAID3_DISK_FLAG_DIRTY) == 0) {
2569 					continue;
2570 				}
2571 				disk->d_flags |=
2572 				    G_RAID3_DISK_FLAG_SYNCHRONIZING;
2573 			}
2574 		} else if (ndisks == sc->sc_ndisks && ndirty > 1) {
2575 			disk = &sc->sc_disks[sc->sc_ndisks - 1];
2576 			disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2577 		}
2578 
2579 		sc->sc_syncid = syncid;
2580 		if (force) {
2581 			/* Remember to bump syncid on first write. */
2582 			sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2583 		}
2584 		if (ndisks == sc->sc_ndisks)
2585 			state = G_RAID3_DEVICE_STATE_COMPLETE;
2586 		else /* if (ndisks == sc->sc_ndisks - 1) */
2587 			state = G_RAID3_DEVICE_STATE_DEGRADED;
2588 		G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.",
2589 		    sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2590 		    g_raid3_device_state2str(state));
2591 		sc->sc_state = state;
2592 		for (n = 0; n < sc->sc_ndisks; n++) {
2593 			disk = &sc->sc_disks[n];
2594 			if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2595 				continue;
2596 			state = g_raid3_determine_state(disk);
2597 			g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT);
2598 			if (state == G_RAID3_DISK_STATE_STALE)
2599 				sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2600 		}
2601 		break;
2602 	    }
2603 	case G_RAID3_DEVICE_STATE_DEGRADED:
2604 		/*
2605 		 * Genid need to be bumped immediately, so do it here.
2606 		 */
2607 		if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2608 			sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2609 			g_raid3_bump_genid(sc);
2610 		}
2611 
2612 		if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2613 			return;
2614 		if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) <
2615 		    sc->sc_ndisks - 1) {
2616 			if (sc->sc_provider != NULL)
2617 				g_raid3_destroy_provider(sc);
2618 			sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2619 			return;
2620 		}
2621 		if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2622 		    sc->sc_ndisks) {
2623 			state = G_RAID3_DEVICE_STATE_COMPLETE;
2624 			G_RAID3_DEBUG(1,
2625 			    "Device %s state changed from %s to %s.",
2626 			    sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2627 			    g_raid3_device_state2str(state));
2628 			sc->sc_state = state;
2629 		}
2630 		if (sc->sc_provider == NULL)
2631 			g_raid3_launch_provider(sc);
2632 		if (sc->sc_rootmount != NULL) {
2633 			G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2634 			    sc->sc_rootmount);
2635 			root_mount_rel(sc->sc_rootmount);
2636 			sc->sc_rootmount = NULL;
2637 		}
2638 		break;
2639 	case G_RAID3_DEVICE_STATE_COMPLETE:
2640 		/*
2641 		 * Genid need to be bumped immediately, so do it here.
2642 		 */
2643 		if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2644 			sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2645 			g_raid3_bump_genid(sc);
2646 		}
2647 
2648 		if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2649 			return;
2650 		KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >=
2651 		    sc->sc_ndisks - 1,
2652 		    ("Too few ACTIVE components in COMPLETE state (device %s).",
2653 		    sc->sc_name));
2654 		if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2655 		    sc->sc_ndisks - 1) {
2656 			state = G_RAID3_DEVICE_STATE_DEGRADED;
2657 			G_RAID3_DEBUG(1,
2658 			    "Device %s state changed from %s to %s.",
2659 			    sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2660 			    g_raid3_device_state2str(state));
2661 			sc->sc_state = state;
2662 		}
2663 		if (sc->sc_provider == NULL)
2664 			g_raid3_launch_provider(sc);
2665 		if (sc->sc_rootmount != NULL) {
2666 			G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2667 			    sc->sc_rootmount);
2668 			root_mount_rel(sc->sc_rootmount);
2669 			sc->sc_rootmount = NULL;
2670 		}
2671 		break;
2672 	default:
2673 		KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name,
2674 		    g_raid3_device_state2str(sc->sc_state)));
2675 		break;
2676 	}
2677 }
2678 
2679 /*
2680  * Update disk state and device state if needed.
2681  */
2682 #define	DISK_STATE_CHANGED()	G_RAID3_DEBUG(1,			\
2683 	"Disk %s state changed from %s to %s (device %s).",		\
2684 	g_raid3_get_diskname(disk),					\
2685 	g_raid3_disk_state2str(disk->d_state),				\
2686 	g_raid3_disk_state2str(state), sc->sc_name)
2687 static int
2688 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state)
2689 {
2690 	struct g_raid3_softc *sc;
2691 
2692 	sc = disk->d_softc;
2693 	sx_assert(&sc->sc_lock, SX_XLOCKED);
2694 
2695 again:
2696 	G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.",
2697 	    g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state),
2698 	    g_raid3_disk_state2str(state));
2699 	switch (state) {
2700 	case G_RAID3_DISK_STATE_NEW:
2701 		/*
2702 		 * Possible scenarios:
2703 		 * 1. New disk arrive.
2704 		 */
2705 		/* Previous state should be NONE. */
2706 		KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE,
2707 		    ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2708 		    g_raid3_disk_state2str(disk->d_state)));
2709 		DISK_STATE_CHANGED();
2710 
2711 		disk->d_state = state;
2712 		G_RAID3_DEBUG(1, "Device %s: provider %s detected.",
2713 		    sc->sc_name, g_raid3_get_diskname(disk));
2714 		if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING)
2715 			break;
2716 		KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2717 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2718 		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2719 		    g_raid3_device_state2str(sc->sc_state),
2720 		    g_raid3_get_diskname(disk),
2721 		    g_raid3_disk_state2str(disk->d_state)));
2722 		state = g_raid3_determine_state(disk);
2723 		if (state != G_RAID3_DISK_STATE_NONE)
2724 			goto again;
2725 		break;
2726 	case G_RAID3_DISK_STATE_ACTIVE:
2727 		/*
2728 		 * Possible scenarios:
2729 		 * 1. New disk does not need synchronization.
2730 		 * 2. Synchronization process finished successfully.
2731 		 */
2732 		KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2733 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2734 		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2735 		    g_raid3_device_state2str(sc->sc_state),
2736 		    g_raid3_get_diskname(disk),
2737 		    g_raid3_disk_state2str(disk->d_state)));
2738 		/* Previous state should be NEW or SYNCHRONIZING. */
2739 		KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW ||
2740 		    disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2741 		    ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2742 		    g_raid3_disk_state2str(disk->d_state)));
2743 		DISK_STATE_CHANGED();
2744 
2745 		if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
2746 			disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING;
2747 			disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC;
2748 			g_raid3_sync_stop(sc, 0);
2749 		}
2750 		disk->d_state = state;
2751 		disk->d_sync.ds_offset = 0;
2752 		disk->d_sync.ds_offset_done = 0;
2753 		g_raid3_update_idle(sc, disk);
2754 		g_raid3_update_metadata(disk);
2755 		G_RAID3_DEBUG(1, "Device %s: provider %s activated.",
2756 		    sc->sc_name, g_raid3_get_diskname(disk));
2757 		break;
2758 	case G_RAID3_DISK_STATE_STALE:
2759 		/*
2760 		 * Possible scenarios:
2761 		 * 1. Stale disk was connected.
2762 		 */
2763 		/* Previous state should be NEW. */
2764 		KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2765 		    ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2766 		    g_raid3_disk_state2str(disk->d_state)));
2767 		KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2768 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2769 		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2770 		    g_raid3_device_state2str(sc->sc_state),
2771 		    g_raid3_get_diskname(disk),
2772 		    g_raid3_disk_state2str(disk->d_state)));
2773 		/*
2774 		 * STALE state is only possible if device is marked
2775 		 * NOAUTOSYNC.
2776 		 */
2777 		KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0,
2778 		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2779 		    g_raid3_device_state2str(sc->sc_state),
2780 		    g_raid3_get_diskname(disk),
2781 		    g_raid3_disk_state2str(disk->d_state)));
2782 		DISK_STATE_CHANGED();
2783 
2784 		disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2785 		disk->d_state = state;
2786 		g_raid3_update_metadata(disk);
2787 		G_RAID3_DEBUG(0, "Device %s: provider %s is stale.",
2788 		    sc->sc_name, g_raid3_get_diskname(disk));
2789 		break;
2790 	case G_RAID3_DISK_STATE_SYNCHRONIZING:
2791 		/*
2792 		 * Possible scenarios:
2793 		 * 1. Disk which needs synchronization was connected.
2794 		 */
2795 		/* Previous state should be NEW. */
2796 		KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2797 		    ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2798 		    g_raid3_disk_state2str(disk->d_state)));
2799 		KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2800 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2801 		    ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2802 		    g_raid3_device_state2str(sc->sc_state),
2803 		    g_raid3_get_diskname(disk),
2804 		    g_raid3_disk_state2str(disk->d_state)));
2805 		DISK_STATE_CHANGED();
2806 
2807 		if (disk->d_state == G_RAID3_DISK_STATE_NEW)
2808 			disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2809 		disk->d_state = state;
2810 		if (sc->sc_provider != NULL) {
2811 			g_raid3_sync_start(sc);
2812 			g_raid3_update_metadata(disk);
2813 		}
2814 		break;
2815 	case G_RAID3_DISK_STATE_DISCONNECTED:
2816 		/*
2817 		 * Possible scenarios:
2818 		 * 1. Device wasn't running yet, but disk disappear.
2819 		 * 2. Disk was active and disapppear.
2820 		 * 3. Disk disappear during synchronization process.
2821 		 */
2822 		if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2823 		    sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
2824 			/*
2825 			 * Previous state should be ACTIVE, STALE or
2826 			 * SYNCHRONIZING.
2827 			 */
2828 			KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
2829 			    disk->d_state == G_RAID3_DISK_STATE_STALE ||
2830 			    disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2831 			    ("Wrong disk state (%s, %s).",
2832 			    g_raid3_get_diskname(disk),
2833 			    g_raid3_disk_state2str(disk->d_state)));
2834 		} else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) {
2835 			/* Previous state should be NEW. */
2836 			KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2837 			    ("Wrong disk state (%s, %s).",
2838 			    g_raid3_get_diskname(disk),
2839 			    g_raid3_disk_state2str(disk->d_state)));
2840 			/*
2841 			 * Reset bumping syncid if disk disappeared in STARTING
2842 			 * state.
2843 			 */
2844 			if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0)
2845 				sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
2846 #ifdef	INVARIANTS
2847 		} else {
2848 			KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2849 			    sc->sc_name,
2850 			    g_raid3_device_state2str(sc->sc_state),
2851 			    g_raid3_get_diskname(disk),
2852 			    g_raid3_disk_state2str(disk->d_state)));
2853 #endif
2854 		}
2855 		DISK_STATE_CHANGED();
2856 		G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.",
2857 		    sc->sc_name, g_raid3_get_diskname(disk));
2858 
2859 		g_raid3_destroy_disk(disk);
2860 		break;
2861 	default:
2862 		KASSERT(1 == 0, ("Unknown state (%u).", state));
2863 		break;
2864 	}
2865 	return (0);
2866 }
2867 #undef	DISK_STATE_CHANGED
2868 
2869 int
2870 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
2871 {
2872 	struct g_provider *pp;
2873 	u_char *buf;
2874 	int error;
2875 
2876 	g_topology_assert();
2877 
2878 	error = g_access(cp, 1, 0, 0);
2879 	if (error != 0)
2880 		return (error);
2881 	pp = cp->provider;
2882 	g_topology_unlock();
2883 	/* Metadata are stored on last sector. */
2884 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2885 	    &error);
2886 	g_topology_lock();
2887 	g_access(cp, -1, 0, 0);
2888 	if (buf == NULL) {
2889 		G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2890 		    cp->provider->name, error);
2891 		return (error);
2892 	}
2893 
2894 	/* Decode metadata. */
2895 	error = raid3_metadata_decode(buf, md);
2896 	g_free(buf);
2897 	if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0)
2898 		return (EINVAL);
2899 	if (md->md_version > G_RAID3_VERSION) {
2900 		G_RAID3_DEBUG(0,
2901 		    "Kernel module is too old to handle metadata from %s.",
2902 		    cp->provider->name);
2903 		return (EINVAL);
2904 	}
2905 	if (error != 0) {
2906 		G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2907 		    cp->provider->name);
2908 		return (error);
2909 	}
2910 	if (md->md_sectorsize > MAXPHYS) {
2911 		G_RAID3_DEBUG(0, "The blocksize is too big.");
2912 		return (EINVAL);
2913 	}
2914 
2915 	return (0);
2916 }
2917 
2918 static int
2919 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp,
2920     struct g_raid3_metadata *md)
2921 {
2922 
2923 	if (md->md_no >= sc->sc_ndisks) {
2924 		G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.",
2925 		    pp->name, md->md_no);
2926 		return (EINVAL);
2927 	}
2928 	if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) {
2929 		G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.",
2930 		    pp->name, md->md_no);
2931 		return (EEXIST);
2932 	}
2933 	if (md->md_all != sc->sc_ndisks) {
2934 		G_RAID3_DEBUG(1,
2935 		    "Invalid '%s' field on disk %s (device %s), skipping.",
2936 		    "md_all", pp->name, sc->sc_name);
2937 		return (EINVAL);
2938 	}
2939 	if ((md->md_mediasize % md->md_sectorsize) != 0) {
2940 		G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != "
2941 		    "0) on disk %s (device %s), skipping.", pp->name,
2942 		    sc->sc_name);
2943 		return (EINVAL);
2944 	}
2945 	if (md->md_mediasize != sc->sc_mediasize) {
2946 		G_RAID3_DEBUG(1,
2947 		    "Invalid '%s' field on disk %s (device %s), skipping.",
2948 		    "md_mediasize", pp->name, sc->sc_name);
2949 		return (EINVAL);
2950 	}
2951 	if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) {
2952 		G_RAID3_DEBUG(1,
2953 		    "Invalid '%s' field on disk %s (device %s), skipping.",
2954 		    "md_mediasize", pp->name, sc->sc_name);
2955 		return (EINVAL);
2956 	}
2957 	if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) {
2958 		G_RAID3_DEBUG(1,
2959 		    "Invalid size of disk %s (device %s), skipping.", pp->name,
2960 		    sc->sc_name);
2961 		return (EINVAL);
2962 	}
2963 	if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) {
2964 		G_RAID3_DEBUG(1,
2965 		    "Invalid '%s' field on disk %s (device %s), skipping.",
2966 		    "md_sectorsize", pp->name, sc->sc_name);
2967 		return (EINVAL);
2968 	}
2969 	if (md->md_sectorsize != sc->sc_sectorsize) {
2970 		G_RAID3_DEBUG(1,
2971 		    "Invalid '%s' field on disk %s (device %s), skipping.",
2972 		    "md_sectorsize", pp->name, sc->sc_name);
2973 		return (EINVAL);
2974 	}
2975 	if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2976 		G_RAID3_DEBUG(1,
2977 		    "Invalid sector size of disk %s (device %s), skipping.",
2978 		    pp->name, sc->sc_name);
2979 		return (EINVAL);
2980 	}
2981 	if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) {
2982 		G_RAID3_DEBUG(1,
2983 		    "Invalid device flags on disk %s (device %s), skipping.",
2984 		    pp->name, sc->sc_name);
2985 		return (EINVAL);
2986 	}
2987 	if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
2988 	    (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) {
2989 		/*
2990 		 * VERIFY and ROUND-ROBIN options are mutally exclusive.
2991 		 */
2992 		G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on "
2993 		    "disk %s (device %s), skipping.", pp->name, sc->sc_name);
2994 		return (EINVAL);
2995 	}
2996 	if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) {
2997 		G_RAID3_DEBUG(1,
2998 		    "Invalid disk flags on disk %s (device %s), skipping.",
2999 		    pp->name, sc->sc_name);
3000 		return (EINVAL);
3001 	}
3002 	return (0);
3003 }
3004 
3005 int
3006 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
3007     struct g_raid3_metadata *md)
3008 {
3009 	struct g_raid3_disk *disk;
3010 	int error;
3011 
3012 	g_topology_assert_not();
3013 	G_RAID3_DEBUG(2, "Adding disk %s.", pp->name);
3014 
3015 	error = g_raid3_check_metadata(sc, pp, md);
3016 	if (error != 0)
3017 		return (error);
3018 	if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING &&
3019 	    md->md_genid < sc->sc_genid) {
3020 		G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.",
3021 		    pp->name, sc->sc_name);
3022 		return (EINVAL);
3023 	}
3024 	disk = g_raid3_init_disk(sc, pp, md, &error);
3025 	if (disk == NULL)
3026 		return (error);
3027 	error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW,
3028 	    G_RAID3_EVENT_WAIT);
3029 	if (error != 0)
3030 		return (error);
3031 	if (md->md_version < G_RAID3_VERSION) {
3032 		G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
3033 		    pp->name, md->md_version, G_RAID3_VERSION);
3034 		g_raid3_update_metadata(disk);
3035 	}
3036 	return (0);
3037 }
3038 
3039 static void
3040 g_raid3_destroy_delayed(void *arg, int flag)
3041 {
3042 	struct g_raid3_softc *sc;
3043 	int error;
3044 
3045 	if (flag == EV_CANCEL) {
3046 		G_RAID3_DEBUG(1, "Destroying canceled.");
3047 		return;
3048 	}
3049 	sc = arg;
3050 	g_topology_unlock();
3051 	sx_xlock(&sc->sc_lock);
3052 	KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
3053 	    ("DESTROY flag set on %s.", sc->sc_name));
3054 	KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
3055 	    ("DESTROYING flag not set on %s.", sc->sc_name));
3056 	G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
3057 	error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
3058 	if (error != 0) {
3059 		G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
3060 		sx_xunlock(&sc->sc_lock);
3061 	}
3062 	g_topology_lock();
3063 }
3064 
3065 static int
3066 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
3067 {
3068 	struct g_raid3_softc *sc;
3069 	int dcr, dcw, dce, error = 0;
3070 
3071 	g_topology_assert();
3072 	G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3073 	    acw, ace);
3074 
3075 	sc = pp->geom->softc;
3076 	if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
3077 		return (0);
3078 	KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3079 
3080 	dcr = pp->acr + acr;
3081 	dcw = pp->acw + acw;
3082 	dce = pp->ace + ace;
3083 
3084 	g_topology_unlock();
3085 	sx_xlock(&sc->sc_lock);
3086 	if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
3087 	    g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
3088 		if (acr > 0 || acw > 0 || ace > 0)
3089 			error = ENXIO;
3090 		goto end;
3091 	}
3092 	if (dcw == 0)
3093 		g_raid3_idle(sc, dcw);
3094 	if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
3095 		if (acr > 0 || acw > 0 || ace > 0) {
3096 			error = ENXIO;
3097 			goto end;
3098 		}
3099 		if (dcr == 0 && dcw == 0 && dce == 0) {
3100 			g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
3101 			    sc, NULL);
3102 		}
3103 	}
3104 end:
3105 	sx_xunlock(&sc->sc_lock);
3106 	g_topology_lock();
3107 	return (error);
3108 }
3109 
3110 static struct g_geom *
3111 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
3112 {
3113 	struct g_raid3_softc *sc;
3114 	struct g_geom *gp;
3115 	int error, timeout;
3116 	u_int n;
3117 
3118 	g_topology_assert();
3119 	G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
3120 
3121 	/* One disk is minimum. */
3122 	if (md->md_all < 1)
3123 		return (NULL);
3124 	/*
3125 	 * Action geom.
3126 	 */
3127 	gp = g_new_geomf(mp, "%s", md->md_name);
3128 	sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
3129 	sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
3130 	    M_WAITOK | M_ZERO);
3131 	gp->start = g_raid3_start;
3132 	gp->orphan = g_raid3_orphan;
3133 	gp->access = g_raid3_access;
3134 	gp->dumpconf = g_raid3_dumpconf;
3135 
3136 	sc->sc_id = md->md_id;
3137 	sc->sc_mediasize = md->md_mediasize;
3138 	sc->sc_sectorsize = md->md_sectorsize;
3139 	sc->sc_ndisks = md->md_all;
3140 	sc->sc_round_robin = 0;
3141 	sc->sc_flags = md->md_mflags;
3142 	sc->sc_bump_id = 0;
3143 	sc->sc_idle = 1;
3144 	sc->sc_last_write = time_uptime;
3145 	sc->sc_writes = 0;
3146 	for (n = 0; n < sc->sc_ndisks; n++) {
3147 		sc->sc_disks[n].d_softc = sc;
3148 		sc->sc_disks[n].d_no = n;
3149 		sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK;
3150 	}
3151 	sx_init(&sc->sc_lock, "graid3:lock");
3152 	bioq_init(&sc->sc_queue);
3153 	mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF);
3154 	bioq_init(&sc->sc_regular_delayed);
3155 	bioq_init(&sc->sc_inflight);
3156 	bioq_init(&sc->sc_sync_delayed);
3157 	TAILQ_INIT(&sc->sc_events);
3158 	mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF);
3159 	callout_init(&sc->sc_callout, 1);
3160 	sc->sc_state = G_RAID3_DEVICE_STATE_STARTING;
3161 	gp->softc = sc;
3162 	sc->sc_geom = gp;
3163 	sc->sc_provider = NULL;
3164 	/*
3165 	 * Synchronization geom.
3166 	 */
3167 	gp = g_new_geomf(mp, "%s.sync", md->md_name);
3168 	gp->softc = sc;
3169 	gp->orphan = g_raid3_orphan;
3170 	sc->sc_sync.ds_geom = gp;
3171 
3172 	if (!g_raid3_use_malloc) {
3173 		sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k",
3174 		    65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3175 		    UMA_ALIGN_PTR, 0);
3176 		sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0;
3177 		sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k;
3178 		sc->sc_zones[G_RAID3_ZONE_64K].sz_requested =
3179 		    sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0;
3180 		sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k",
3181 		    16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3182 		    UMA_ALIGN_PTR, 0);
3183 		sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0;
3184 		sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k;
3185 		sc->sc_zones[G_RAID3_ZONE_16K].sz_requested =
3186 		    sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0;
3187 		sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k",
3188 		    4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3189 		    UMA_ALIGN_PTR, 0);
3190 		sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0;
3191 		sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k;
3192 		sc->sc_zones[G_RAID3_ZONE_4K].sz_requested =
3193 		    sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0;
3194 	}
3195 
3196 	error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0,
3197 	    "g_raid3 %s", md->md_name);
3198 	if (error != 0) {
3199 		G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
3200 		    sc->sc_name);
3201 		if (!g_raid3_use_malloc) {
3202 			uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
3203 			uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
3204 			uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
3205 		}
3206 		g_destroy_geom(sc->sc_sync.ds_geom);
3207 		mtx_destroy(&sc->sc_events_mtx);
3208 		mtx_destroy(&sc->sc_queue_mtx);
3209 		sx_destroy(&sc->sc_lock);
3210 		g_destroy_geom(sc->sc_geom);
3211 		free(sc->sc_disks, M_RAID3);
3212 		free(sc, M_RAID3);
3213 		return (NULL);
3214 	}
3215 
3216 	G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).",
3217 	    sc->sc_name, sc->sc_ndisks, sc->sc_id);
3218 
3219 	sc->sc_rootmount = root_mount_hold("GRAID3");
3220 	G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3221 
3222 	/*
3223 	 * Run timeout.
3224 	 */
3225 	timeout = atomic_load_acq_int(&g_raid3_timeout);
3226 	callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc);
3227 	return (sc->sc_geom);
3228 }
3229 
3230 int
3231 g_raid3_destroy(struct g_raid3_softc *sc, int how)
3232 {
3233 	struct g_provider *pp;
3234 
3235 	g_topology_assert_not();
3236 	if (sc == NULL)
3237 		return (ENXIO);
3238 	sx_assert(&sc->sc_lock, SX_XLOCKED);
3239 
3240 	pp = sc->sc_provider;
3241 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
3242 		switch (how) {
3243 		case G_RAID3_DESTROY_SOFT:
3244 			G_RAID3_DEBUG(1,
3245 			    "Device %s is still open (r%dw%de%d).", pp->name,
3246 			    pp->acr, pp->acw, pp->ace);
3247 			return (EBUSY);
3248 		case G_RAID3_DESTROY_DELAYED:
3249 			G_RAID3_DEBUG(1,
3250 			    "Device %s will be destroyed on last close.",
3251 			    pp->name);
3252 			if (sc->sc_syncdisk != NULL)
3253 				g_raid3_sync_stop(sc, 1);
3254 			sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
3255 			return (EBUSY);
3256 		case G_RAID3_DESTROY_HARD:
3257 			G_RAID3_DEBUG(1, "Device %s is still open, so it "
3258 			    "can't be definitely removed.", pp->name);
3259 			break;
3260 		}
3261 	}
3262 
3263 	g_topology_lock();
3264 	if (sc->sc_geom->softc == NULL) {
3265 		g_topology_unlock();
3266 		return (0);
3267 	}
3268 	sc->sc_geom->softc = NULL;
3269 	sc->sc_sync.ds_geom->softc = NULL;
3270 	g_topology_unlock();
3271 
3272 	sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
3273 	sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT;
3274 	G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3275 	sx_xunlock(&sc->sc_lock);
3276 	mtx_lock(&sc->sc_queue_mtx);
3277 	wakeup(sc);
3278 	wakeup(&sc->sc_queue);
3279 	mtx_unlock(&sc->sc_queue_mtx);
3280 	G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3281 	while (sc->sc_worker != NULL)
3282 		tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5);
3283 	G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3284 	sx_xlock(&sc->sc_lock);
3285 	g_raid3_destroy_device(sc);
3286 	free(sc->sc_disks, M_RAID3);
3287 	free(sc, M_RAID3);
3288 	return (0);
3289 }
3290 
3291 static void
3292 g_raid3_taste_orphan(struct g_consumer *cp)
3293 {
3294 
3295 	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3296 	    cp->provider->name));
3297 }
3298 
3299 static struct g_geom *
3300 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3301 {
3302 	struct g_raid3_metadata md;
3303 	struct g_raid3_softc *sc;
3304 	struct g_consumer *cp;
3305 	struct g_geom *gp;
3306 	int error;
3307 
3308 	g_topology_assert();
3309 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3310 	G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
3311 
3312 	gp = g_new_geomf(mp, "raid3:taste");
3313 	/* This orphan function should be never called. */
3314 	gp->orphan = g_raid3_taste_orphan;
3315 	cp = g_new_consumer(gp);
3316 	g_attach(cp, pp);
3317 	error = g_raid3_read_metadata(cp, &md);
3318 	g_detach(cp);
3319 	g_destroy_consumer(cp);
3320 	g_destroy_geom(gp);
3321 	if (error != 0)
3322 		return (NULL);
3323 	gp = NULL;
3324 
3325 	if (md.md_provider[0] != '\0' &&
3326 	    !g_compare_names(md.md_provider, pp->name))
3327 		return (NULL);
3328 	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3329 		return (NULL);
3330 	if (g_raid3_debug >= 2)
3331 		raid3_metadata_dump(&md);
3332 
3333 	/*
3334 	 * Let's check if device already exists.
3335 	 */
3336 	sc = NULL;
3337 	LIST_FOREACH(gp, &mp->geom, geom) {
3338 		sc = gp->softc;
3339 		if (sc == NULL)
3340 			continue;
3341 		if (sc->sc_sync.ds_geom == gp)
3342 			continue;
3343 		if (strcmp(md.md_name, sc->sc_name) != 0)
3344 			continue;
3345 		if (md.md_id != sc->sc_id) {
3346 			G_RAID3_DEBUG(0, "Device %s already configured.",
3347 			    sc->sc_name);
3348 			return (NULL);
3349 		}
3350 		break;
3351 	}
3352 	if (gp == NULL) {
3353 		gp = g_raid3_create(mp, &md);
3354 		if (gp == NULL) {
3355 			G_RAID3_DEBUG(0, "Cannot create device %s.",
3356 			    md.md_name);
3357 			return (NULL);
3358 		}
3359 		sc = gp->softc;
3360 	}
3361 	G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3362 	g_topology_unlock();
3363 	sx_xlock(&sc->sc_lock);
3364 	error = g_raid3_add_disk(sc, pp, &md);
3365 	if (error != 0) {
3366 		G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3367 		    pp->name, gp->name, error);
3368 		if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
3369 		    sc->sc_ndisks) {
3370 			g_cancel_event(sc);
3371 			g_raid3_destroy(sc, G_RAID3_DESTROY_HARD);
3372 			g_topology_lock();
3373 			return (NULL);
3374 		}
3375 		gp = NULL;
3376 	}
3377 	sx_xunlock(&sc->sc_lock);
3378 	g_topology_lock();
3379 	return (gp);
3380 }
3381 
3382 static int
3383 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
3384     struct g_geom *gp)
3385 {
3386 	struct g_raid3_softc *sc;
3387 	int error;
3388 
3389 	g_topology_unlock();
3390 	sc = gp->softc;
3391 	sx_xlock(&sc->sc_lock);
3392 	g_cancel_event(sc);
3393 	error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT);
3394 	if (error != 0)
3395 		sx_xunlock(&sc->sc_lock);
3396 	g_topology_lock();
3397 	return (error);
3398 }
3399 
3400 static void
3401 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3402     struct g_consumer *cp, struct g_provider *pp)
3403 {
3404 	struct g_raid3_softc *sc;
3405 
3406 	g_topology_assert();
3407 
3408 	sc = gp->softc;
3409 	if (sc == NULL)
3410 		return;
3411 	/* Skip synchronization geom. */
3412 	if (gp == sc->sc_sync.ds_geom)
3413 		return;
3414 	if (pp != NULL) {
3415 		/* Nothing here. */
3416 	} else if (cp != NULL) {
3417 		struct g_raid3_disk *disk;
3418 
3419 		disk = cp->private;
3420 		if (disk == NULL)
3421 			return;
3422 		g_topology_unlock();
3423 		sx_xlock(&sc->sc_lock);
3424 		sbuf_printf(sb, "%s<Type>", indent);
3425 		if (disk->d_no == sc->sc_ndisks - 1)
3426 			sbuf_printf(sb, "PARITY");
3427 		else
3428 			sbuf_printf(sb, "DATA");
3429 		sbuf_printf(sb, "</Type>\n");
3430 		sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
3431 		    (u_int)disk->d_no);
3432 		if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
3433 			sbuf_printf(sb, "%s<Synchronized>", indent);
3434 			if (disk->d_sync.ds_offset == 0)
3435 				sbuf_printf(sb, "0%%");
3436 			else {
3437 				sbuf_printf(sb, "%u%%",
3438 				    (u_int)((disk->d_sync.ds_offset * 100) /
3439 				    (sc->sc_mediasize / (sc->sc_ndisks - 1))));
3440 			}
3441 			sbuf_printf(sb, "</Synchronized>\n");
3442 			if (disk->d_sync.ds_offset > 0) {
3443 				sbuf_printf(sb, "%s<BytesSynced>%jd"
3444 				    "</BytesSynced>\n", indent,
3445 				    (intmax_t)disk->d_sync.ds_offset);
3446 			}
3447 		}
3448 		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3449 		    disk->d_sync.ds_syncid);
3450 		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid);
3451 		sbuf_printf(sb, "%s<Flags>", indent);
3452 		if (disk->d_flags == 0)
3453 			sbuf_printf(sb, "NONE");
3454 		else {
3455 			int first = 1;
3456 
3457 #define	ADD_FLAG(flag, name)	do {					\
3458 	if ((disk->d_flags & (flag)) != 0) {				\
3459 		if (!first)						\
3460 			sbuf_printf(sb, ", ");				\
3461 		else							\
3462 			first = 0;					\
3463 		sbuf_printf(sb, name);					\
3464 	}								\
3465 } while (0)
3466 			ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY");
3467 			ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED");
3468 			ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING,
3469 			    "SYNCHRONIZING");
3470 			ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3471 			ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN");
3472 #undef	ADD_FLAG
3473 		}
3474 		sbuf_printf(sb, "</Flags>\n");
3475 		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3476 		    g_raid3_disk_state2str(disk->d_state));
3477 		sx_xunlock(&sc->sc_lock);
3478 		g_topology_lock();
3479 	} else {
3480 		g_topology_unlock();
3481 		sx_xlock(&sc->sc_lock);
3482 		if (!g_raid3_use_malloc) {
3483 			sbuf_printf(sb,
3484 			    "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent,
3485 			    sc->sc_zones[G_RAID3_ZONE_4K].sz_requested);
3486 			sbuf_printf(sb,
3487 			    "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent,
3488 			    sc->sc_zones[G_RAID3_ZONE_4K].sz_failed);
3489 			sbuf_printf(sb,
3490 			    "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent,
3491 			    sc->sc_zones[G_RAID3_ZONE_16K].sz_requested);
3492 			sbuf_printf(sb,
3493 			    "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent,
3494 			    sc->sc_zones[G_RAID3_ZONE_16K].sz_failed);
3495 			sbuf_printf(sb,
3496 			    "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent,
3497 			    sc->sc_zones[G_RAID3_ZONE_64K].sz_requested);
3498 			sbuf_printf(sb,
3499 			    "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent,
3500 			    sc->sc_zones[G_RAID3_ZONE_64K].sz_failed);
3501 		}
3502 		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3503 		sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3504 		sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3505 		sbuf_printf(sb, "%s<Flags>", indent);
3506 		if (sc->sc_flags == 0)
3507 			sbuf_printf(sb, "NONE");
3508 		else {
3509 			int first = 1;
3510 
3511 #define	ADD_FLAG(flag, name)	do {					\
3512 	if ((sc->sc_flags & (flag)) != 0) {				\
3513 		if (!first)						\
3514 			sbuf_printf(sb, ", ");				\
3515 		else							\
3516 			first = 0;					\
3517 		sbuf_printf(sb, name);					\
3518 	}								\
3519 } while (0)
3520 			ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3521 			ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3522 			ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN,
3523 			    "ROUND-ROBIN");
3524 			ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY");
3525 #undef	ADD_FLAG
3526 		}
3527 		sbuf_printf(sb, "</Flags>\n");
3528 		sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3529 		    sc->sc_ndisks);
3530 		sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3531 		    g_raid3_device_state2str(sc->sc_state));
3532 		sx_xunlock(&sc->sc_lock);
3533 		g_topology_lock();
3534 	}
3535 }
3536 
3537 static void
3538 g_raid3_shutdown_post_sync(void *arg, int howto)
3539 {
3540 	struct g_class *mp;
3541 	struct g_geom *gp, *gp2;
3542 	struct g_raid3_softc *sc;
3543 	int error;
3544 
3545 	mp = arg;
3546 	DROP_GIANT();
3547 	g_topology_lock();
3548 	g_raid3_shutdown = 1;
3549 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3550 		if ((sc = gp->softc) == NULL)
3551 			continue;
3552 		/* Skip synchronization geom. */
3553 		if (gp == sc->sc_sync.ds_geom)
3554 			continue;
3555 		g_topology_unlock();
3556 		sx_xlock(&sc->sc_lock);
3557 		g_raid3_idle(sc, -1);
3558 		g_cancel_event(sc);
3559 		error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
3560 		if (error != 0)
3561 			sx_xunlock(&sc->sc_lock);
3562 		g_topology_lock();
3563 	}
3564 	g_topology_unlock();
3565 	PICKUP_GIANT();
3566 }
3567 
3568 static void
3569 g_raid3_init(struct g_class *mp)
3570 {
3571 
3572 	g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3573 	    g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3574 	if (g_raid3_post_sync == NULL)
3575 		G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
3576 }
3577 
3578 static void
3579 g_raid3_fini(struct g_class *mp)
3580 {
3581 
3582 	if (g_raid3_post_sync != NULL)
3583 		EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync);
3584 }
3585 
3586 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);
3587