xref: /freebsd/sys/geom/vinum/geom_vinum_volume.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007 Lukas Ertl
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/bio.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 
38 #include <geom/geom.h>
39 #include <geom/vinum/geom_vinum_var.h>
40 #include <geom/vinum/geom_vinum.h>
41 
42 void
43 gv_volume_flush(struct gv_volume *v)
44 {
45 	struct gv_softc *sc;
46 	struct bio *bp;
47 
48 	KASSERT(v != NULL, ("NULL v"));
49 	sc = v->vinumconf;
50 	KASSERT(sc != NULL, ("NULL sc"));
51 
52 	bp = bioq_takefirst(v->wqueue);
53 	while (bp != NULL) {
54 		gv_volume_start(sc, bp);
55 		bp = bioq_takefirst(v->wqueue);
56 	}
57 }
58 
59 void
60 gv_volume_start(struct gv_softc *sc, struct bio *bp)
61 {
62 	struct g_geom *gp;
63 	struct gv_volume *v;
64 	struct gv_plex *p, *lp;
65 	int numwrites;
66 
67 	gp = sc->geom;
68 	v = bp->bio_to->private;
69 	if (v == NULL || v->state != GV_VOL_UP) {
70 		g_io_deliver(bp, ENXIO);
71 		return;
72 	}
73 
74 	switch (bp->bio_cmd) {
75 	case BIO_READ:
76 		/*
77 		 * Try to find a good plex where we can send the request to,
78 		 * round-robin-style.  The plex either has to be up, or it's a
79 		 * degraded RAID5 plex. Check if we have delayed requests. Put
80 		 * this request on the delayed queue if so. This makes sure that
81 		 * we don't read old values.
82 		 */
83 		if (bioq_first(v->wqueue) != NULL) {
84 			bioq_insert_tail(v->wqueue, bp);
85 			break;
86 		}
87 		lp = v->last_read_plex;
88 		if (lp == NULL)
89 			lp = LIST_FIRST(&v->plexes);
90 		p = LIST_NEXT(lp, in_volume);
91 		if (p == NULL)
92 			p = LIST_FIRST(&v->plexes);
93 		do {
94 			if (p == NULL) {
95 				p = lp;
96 				break;
97 			}
98 			if ((p->state > GV_PLEX_DEGRADED) ||
99 			    (p->state >= GV_PLEX_DEGRADED &&
100 			    p->org == GV_PLEX_RAID5))
101 				break;
102 			p = LIST_NEXT(p, in_volume);
103 			if (p == NULL)
104 				p = LIST_FIRST(&v->plexes);
105 		} while (p != lp);
106 
107 		if ((p == NULL) ||
108 		    (p->org == GV_PLEX_RAID5 && p->state < GV_PLEX_DEGRADED) ||
109 		    (p->org != GV_PLEX_RAID5 && p->state <= GV_PLEX_DEGRADED)) {
110 			g_io_deliver(bp, ENXIO);
111 			return;
112 		}
113 		v->last_read_plex = p;
114 
115 		/* Hand it down to the plex logic. */
116 		gv_plex_start(p, bp);
117 		break;
118 
119 	case BIO_WRITE:
120 	case BIO_DELETE:
121 		/* Delay write-requests if any plex is synchronizing. */
122 		LIST_FOREACH(p, &v->plexes, in_volume) {
123 			if (p->flags & GV_PLEX_SYNCING) {
124 				bioq_insert_tail(v->wqueue, bp);
125 				return;
126 			}
127 		}
128 
129 		numwrites = 0;
130 		/* Give the BIO to each plex of this volume. */
131 		LIST_FOREACH(p, &v->plexes, in_volume) {
132 			if (p->state < GV_PLEX_DEGRADED)
133 				continue;
134 			gv_plex_start(p, bp);
135 			numwrites++;
136 		}
137 		if (numwrites == 0)
138 			g_io_deliver(bp, ENXIO);
139 		break;
140 	}
141 }
142 
143 void
144 gv_bio_done(struct gv_softc *sc, struct bio *bp)
145 {
146 	struct gv_volume *v;
147 	struct gv_plex *p;
148 	struct gv_sd *s;
149 
150 	s = bp->bio_caller1;
151 	KASSERT(s != NULL, ("gv_bio_done: NULL s"));
152 	p = s->plex_sc;
153 	KASSERT(p != NULL, ("gv_bio_done: NULL p"));
154 	v = p->vol_sc;
155 	KASSERT(v != NULL, ("gv_bio_done: NULL v"));
156 
157 	switch (p->org) {
158 	case GV_PLEX_CONCAT:
159 	case GV_PLEX_STRIPED:
160 		gv_plex_normal_done(p, bp);
161 		break;
162 	case GV_PLEX_RAID5:
163 		gv_plex_raid5_done(p, bp);
164 		break;
165 	}
166 
167 	gv_drive_done(s->drive_sc);
168 }
169