173679edcSLukas Ertl /*-
2*4d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
33728855aSPedro F. Giffuni *
4c0b9797aSUlf Lilleengen * Copyright (c) 2004, 2007 Lukas Ertl
5c0b9797aSUlf Lilleengen * Copyright (c) 2007, 2009 Ulf Lilleengen
673679edcSLukas Ertl * All rights reserved.
773679edcSLukas Ertl *
873679edcSLukas Ertl * Redistribution and use in source and binary forms, with or without
973679edcSLukas Ertl * modification, are permitted provided that the following conditions
1073679edcSLukas Ertl * are met:
1173679edcSLukas Ertl * 1. Redistributions of source code must retain the above copyright
1273679edcSLukas Ertl * notice, this list of conditions and the following disclaimer.
1373679edcSLukas Ertl * 2. Redistributions in binary form must reproduce the above copyright
1473679edcSLukas Ertl * notice, this list of conditions and the following disclaimer in the
1573679edcSLukas Ertl * documentation and/or other materials provided with the distribution.
1673679edcSLukas Ertl *
1773679edcSLukas Ertl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1873679edcSLukas Ertl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1973679edcSLukas Ertl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2073679edcSLukas Ertl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2173679edcSLukas Ertl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2273679edcSLukas Ertl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2373679edcSLukas Ertl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2473679edcSLukas Ertl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2573679edcSLukas Ertl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2673679edcSLukas Ertl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2773679edcSLukas Ertl * SUCH DAMAGE.
2873679edcSLukas Ertl */
2973679edcSLukas Ertl
3073679edcSLukas Ertl #include <sys/param.h>
3173679edcSLukas Ertl #include <sys/bio.h>
3273679edcSLukas Ertl #include <sys/lock.h>
3373679edcSLukas Ertl #include <sys/malloc.h>
3473679edcSLukas Ertl #include <sys/systm.h>
3573679edcSLukas Ertl
3673679edcSLukas Ertl #include <geom/geom.h>
37ac03832eSConrad Meyer #include <geom/geom_dbg.h>
3873679edcSLukas Ertl #include <geom/vinum/geom_vinum_var.h>
3973679edcSLukas Ertl #include <geom/vinum/geom_vinum_raid5.h>
4073679edcSLukas Ertl #include <geom/vinum/geom_vinum.h>
4173679edcSLukas Ertl
42fb5885afSLukas Ertl static int gv_check_parity(struct gv_plex *, struct bio *,
43fb5885afSLukas Ertl struct gv_raid5_packet *);
44fb5885afSLukas Ertl static int gv_normal_parity(struct gv_plex *, struct bio *,
45fb5885afSLukas Ertl struct gv_raid5_packet *);
46c0b9797aSUlf Lilleengen static void gv_plex_flush(struct gv_plex *);
47c0b9797aSUlf Lilleengen static int gv_plex_offset(struct gv_plex *, off_t, off_t, off_t *, off_t *,
48c0b9797aSUlf Lilleengen int *, int);
49c0b9797aSUlf Lilleengen static int gv_plex_normal_request(struct gv_plex *, struct bio *, off_t,
50c0b9797aSUlf Lilleengen off_t, caddr_t);
51d8d015cdSUlf Lilleengen static void gv_post_bio(struct gv_softc *, struct bio *);
52d8d015cdSUlf Lilleengen
5367e3ab6eSLukas Ertl void
gv_plex_start(struct gv_plex * p,struct bio * bp)54c0b9797aSUlf Lilleengen gv_plex_start(struct gv_plex *p, struct bio *bp)
5573679edcSLukas Ertl {
56c0b9797aSUlf Lilleengen struct bio *cbp;
5773679edcSLukas Ertl struct gv_sd *s;
58c0b9797aSUlf Lilleengen struct gv_raid5_packet *wp;
59c0b9797aSUlf Lilleengen caddr_t addr;
60c0b9797aSUlf Lilleengen off_t bcount, boff, len;
6173679edcSLukas Ertl
62c0b9797aSUlf Lilleengen bcount = bp->bio_length;
63c0b9797aSUlf Lilleengen addr = bp->bio_data;
64c0b9797aSUlf Lilleengen boff = bp->bio_offset;
6573679edcSLukas Ertl
66c0b9797aSUlf Lilleengen /* Walk over the whole length of the request, we might split it up. */
67c0b9797aSUlf Lilleengen while (bcount > 0) {
68c0b9797aSUlf Lilleengen wp = NULL;
6967e3ab6eSLukas Ertl
7073679edcSLukas Ertl /*
71c0b9797aSUlf Lilleengen * RAID5 plexes need special treatment, as a single request
72c0b9797aSUlf Lilleengen * might involve several read/write sub-requests.
7373679edcSLukas Ertl */
74c0b9797aSUlf Lilleengen if (p->org == GV_PLEX_RAID5) {
75c0b9797aSUlf Lilleengen wp = gv_raid5_start(p, bp, addr, boff, bcount);
76c0b9797aSUlf Lilleengen if (wp == NULL)
77c0b9797aSUlf Lilleengen return;
78c0b9797aSUlf Lilleengen
79c0b9797aSUlf Lilleengen len = wp->length;
80c0b9797aSUlf Lilleengen
81c0b9797aSUlf Lilleengen if (TAILQ_EMPTY(&wp->bits))
82c0b9797aSUlf Lilleengen g_free(wp);
83c0b9797aSUlf Lilleengen else if (wp->lockbase != -1)
84c0b9797aSUlf Lilleengen TAILQ_INSERT_TAIL(&p->packets, wp, list);
85c0b9797aSUlf Lilleengen
86c0b9797aSUlf Lilleengen /*
87c0b9797aSUlf Lilleengen * Requests to concatenated and striped plexes go straight
88c0b9797aSUlf Lilleengen * through.
89c0b9797aSUlf Lilleengen */
90c0b9797aSUlf Lilleengen } else {
91c0b9797aSUlf Lilleengen len = gv_plex_normal_request(p, bp, boff, bcount, addr);
92c0b9797aSUlf Lilleengen }
93c0b9797aSUlf Lilleengen if (len < 0)
94c0b9797aSUlf Lilleengen return;
95c0b9797aSUlf Lilleengen
96c0b9797aSUlf Lilleengen bcount -= len;
97c0b9797aSUlf Lilleengen addr += len;
98c0b9797aSUlf Lilleengen boff += len;
99c0b9797aSUlf Lilleengen }
100c0b9797aSUlf Lilleengen
101c0b9797aSUlf Lilleengen /*
102c0b9797aSUlf Lilleengen * Fire off all sub-requests. We get the correct consumer (== drive)
103c0b9797aSUlf Lilleengen * to send each request to via the subdisk that was stored in
104c0b9797aSUlf Lilleengen * cbp->bio_caller1.
105c0b9797aSUlf Lilleengen */
106c0b9797aSUlf Lilleengen cbp = bioq_takefirst(p->bqueue);
107c0b9797aSUlf Lilleengen while (cbp != NULL) {
108c0b9797aSUlf Lilleengen /*
109c0b9797aSUlf Lilleengen * RAID5 sub-requests need to come in correct order, otherwise
110c0b9797aSUlf Lilleengen * we trip over the parity, as it might be overwritten by
111c0b9797aSUlf Lilleengen * another sub-request. We abuse cbp->bio_caller2 to mark
112c0b9797aSUlf Lilleengen * potential overlap situations.
113c0b9797aSUlf Lilleengen */
114c0b9797aSUlf Lilleengen if (cbp->bio_caller2 != NULL && gv_stripe_active(p, cbp)) {
115c0b9797aSUlf Lilleengen /* Park the bio on the waiting queue. */
116d8d015cdSUlf Lilleengen cbp->bio_pflags |= GV_BIO_ONHOLD;
117c0b9797aSUlf Lilleengen bioq_disksort(p->wqueue, cbp);
118c0b9797aSUlf Lilleengen } else {
119c0b9797aSUlf Lilleengen s = cbp->bio_caller1;
120c0b9797aSUlf Lilleengen g_io_request(cbp, s->drive_sc->consumer);
121c0b9797aSUlf Lilleengen }
122c0b9797aSUlf Lilleengen cbp = bioq_takefirst(p->bqueue);
123c0b9797aSUlf Lilleengen }
124c0b9797aSUlf Lilleengen }
125c0b9797aSUlf Lilleengen
126c0b9797aSUlf Lilleengen static int
gv_plex_offset(struct gv_plex * p,off_t boff,off_t bcount,off_t * real_off,off_t * real_len,int * sdno,int growing)127c0b9797aSUlf Lilleengen gv_plex_offset(struct gv_plex *p, off_t boff, off_t bcount, off_t *real_off,
128c0b9797aSUlf Lilleengen off_t *real_len, int *sdno, int growing)
129c0b9797aSUlf Lilleengen {
130c0b9797aSUlf Lilleengen struct gv_sd *s;
131c0b9797aSUlf Lilleengen int i, sdcount;
132c0b9797aSUlf Lilleengen off_t len_left, stripeend, stripeno, stripestart;
133c0b9797aSUlf Lilleengen
13473679edcSLukas Ertl switch (p->org) {
13573679edcSLukas Ertl case GV_PLEX_CONCAT:
13673679edcSLukas Ertl /*
13773679edcSLukas Ertl * Find the subdisk where this request starts. The subdisks in
13873679edcSLukas Ertl * this list must be ordered by plex_offset.
13973679edcSLukas Ertl */
140c0b9797aSUlf Lilleengen i = 0;
14173679edcSLukas Ertl LIST_FOREACH(s, &p->subdisks, in_plex) {
14273679edcSLukas Ertl if (s->plex_offset <= boff &&
143c0b9797aSUlf Lilleengen s->plex_offset + s->size > boff) {
144c0b9797aSUlf Lilleengen *sdno = i;
14573679edcSLukas Ertl break;
14673679edcSLukas Ertl }
147c0b9797aSUlf Lilleengen i++;
148c0b9797aSUlf Lilleengen }
149c0b9797aSUlf Lilleengen if (s == NULL || s->drive_sc == NULL)
150c0b9797aSUlf Lilleengen return (GV_ERR_NOTFOUND);
15173679edcSLukas Ertl
15273679edcSLukas Ertl /* Calculate corresponding offsets on disk. */
153c0b9797aSUlf Lilleengen *real_off = boff - s->plex_offset;
154c0b9797aSUlf Lilleengen len_left = s->size - (*real_off);
155c0b9797aSUlf Lilleengen KASSERT(len_left >= 0, ("gv_plex_offset: len_left < 0"));
156c0b9797aSUlf Lilleengen *real_len = (bcount > len_left) ? len_left : bcount;
15773679edcSLukas Ertl break;
15873679edcSLukas Ertl
15973679edcSLukas Ertl case GV_PLEX_STRIPED:
16073679edcSLukas Ertl /* The number of the stripe where the request starts. */
16173679edcSLukas Ertl stripeno = boff / p->stripesize;
162c0b9797aSUlf Lilleengen KASSERT(stripeno >= 0, ("gv_plex_offset: stripeno < 0"));
16373679edcSLukas Ertl
164c0b9797aSUlf Lilleengen /* Take growing subdisks into account when calculating. */
165c0b9797aSUlf Lilleengen sdcount = gv_sdcount(p, (boff >= p->synced));
166c0b9797aSUlf Lilleengen
167c0b9797aSUlf Lilleengen if (!(boff + bcount <= p->synced) &&
168c0b9797aSUlf Lilleengen (p->flags & GV_PLEX_GROWING) &&
169c0b9797aSUlf Lilleengen !growing)
170c0b9797aSUlf Lilleengen return (GV_ERR_ISBUSY);
171c0b9797aSUlf Lilleengen *sdno = stripeno % sdcount;
172c0b9797aSUlf Lilleengen
17387bb53cbSEd Maste KASSERT(*sdno >= 0, ("gv_plex_offset: sdno < 0"));
174c0b9797aSUlf Lilleengen stripestart = (stripeno / sdcount) *
175c0b9797aSUlf Lilleengen p->stripesize;
176c0b9797aSUlf Lilleengen KASSERT(stripestart >= 0, ("gv_plex_offset: stripestart < 0"));
177c0b9797aSUlf Lilleengen stripeend = stripestart + p->stripesize;
178c0b9797aSUlf Lilleengen *real_off = boff - (stripeno * p->stripesize) +
179c0b9797aSUlf Lilleengen stripestart;
180c0b9797aSUlf Lilleengen len_left = stripeend - *real_off;
181c0b9797aSUlf Lilleengen KASSERT(len_left >= 0, ("gv_plex_offset: len_left < 0"));
182c0b9797aSUlf Lilleengen
183c0b9797aSUlf Lilleengen *real_len = (bcount <= len_left) ? bcount : len_left;
184c0b9797aSUlf Lilleengen break;
185c0b9797aSUlf Lilleengen
186c0b9797aSUlf Lilleengen default:
187c0b9797aSUlf Lilleengen return (GV_ERR_PLEXORG);
188c0b9797aSUlf Lilleengen }
189c0b9797aSUlf Lilleengen return (0);
190c0b9797aSUlf Lilleengen }
191c0b9797aSUlf Lilleengen
192c0b9797aSUlf Lilleengen /*
193c0b9797aSUlf Lilleengen * Prepare a normal plex request.
194c0b9797aSUlf Lilleengen */
195c0b9797aSUlf Lilleengen static int
gv_plex_normal_request(struct gv_plex * p,struct bio * bp,off_t boff,off_t bcount,caddr_t addr)196c0b9797aSUlf Lilleengen gv_plex_normal_request(struct gv_plex *p, struct bio *bp, off_t boff,
197c0b9797aSUlf Lilleengen off_t bcount, caddr_t addr)
198c0b9797aSUlf Lilleengen {
199c0b9797aSUlf Lilleengen struct gv_sd *s;
200c0b9797aSUlf Lilleengen struct bio *cbp;
201c0b9797aSUlf Lilleengen off_t real_len, real_off;
202c0b9797aSUlf Lilleengen int i, err, sdno;
203c0b9797aSUlf Lilleengen
204c0b9797aSUlf Lilleengen s = NULL;
205c0b9797aSUlf Lilleengen sdno = -1;
206c0b9797aSUlf Lilleengen real_len = real_off = 0;
207c0b9797aSUlf Lilleengen
208c0b9797aSUlf Lilleengen err = ENXIO;
209c0b9797aSUlf Lilleengen
210c0b9797aSUlf Lilleengen if (p == NULL || LIST_EMPTY(&p->subdisks))
211c0b9797aSUlf Lilleengen goto bad;
212c0b9797aSUlf Lilleengen
213c0b9797aSUlf Lilleengen err = gv_plex_offset(p, boff, bcount, &real_off,
214d8d015cdSUlf Lilleengen &real_len, &sdno, (bp->bio_pflags & GV_BIO_GROW));
215c0b9797aSUlf Lilleengen /* If the request was blocked, put it into wait. */
216c0b9797aSUlf Lilleengen if (err == GV_ERR_ISBUSY) {
217c0b9797aSUlf Lilleengen bioq_disksort(p->rqueue, bp);
218c0b9797aSUlf Lilleengen return (-1); /* "Fail", and delay request. */
219c0b9797aSUlf Lilleengen }
220c0b9797aSUlf Lilleengen if (err) {
221c0b9797aSUlf Lilleengen err = ENXIO;
222c0b9797aSUlf Lilleengen goto bad;
223c0b9797aSUlf Lilleengen }
224c0b9797aSUlf Lilleengen err = ENXIO;
22573679edcSLukas Ertl
22673679edcSLukas Ertl /* Find the right subdisk. */
22773679edcSLukas Ertl i = 0;
22873679edcSLukas Ertl LIST_FOREACH(s, &p->subdisks, in_plex) {
22973679edcSLukas Ertl if (i == sdno)
23073679edcSLukas Ertl break;
23173679edcSLukas Ertl i++;
23273679edcSLukas Ertl }
23373679edcSLukas Ertl
23473679edcSLukas Ertl /* Subdisk not found. */
235c0b9797aSUlf Lilleengen if (s == NULL || s->drive_sc == NULL)
236c0b9797aSUlf Lilleengen goto bad;
23773679edcSLukas Ertl
23873679edcSLukas Ertl /* Now check if we can handle the request on this subdisk. */
23973679edcSLukas Ertl switch (s->state) {
24073679edcSLukas Ertl case GV_SD_UP:
24173679edcSLukas Ertl /* If the subdisk is up, just continue. */
24273679edcSLukas Ertl break;
243c0b9797aSUlf Lilleengen case GV_SD_DOWN:
244d8d015cdSUlf Lilleengen if (bp->bio_pflags & GV_BIO_INTERNAL)
245c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "subdisk must be in the stale state in"
246c0b9797aSUlf Lilleengen " order to perform administrative requests");
247c0b9797aSUlf Lilleengen goto bad;
24873679edcSLukas Ertl case GV_SD_STALE:
249d8d015cdSUlf Lilleengen if (!(bp->bio_pflags & GV_BIO_SYNCREQ)) {
250c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "subdisk stale, unable to perform "
251c0b9797aSUlf Lilleengen "regular requests");
252c0b9797aSUlf Lilleengen goto bad;
253c0b9797aSUlf Lilleengen }
25473679edcSLukas Ertl
25586b3c6f5SUlf Lilleengen G_VINUM_DEBUG(1, "sd %s is initializing", s->name);
25673679edcSLukas Ertl gv_set_sd_state(s, GV_SD_INITIALIZING, GV_SETSTATE_FORCE);
25773679edcSLukas Ertl break;
25873679edcSLukas Ertl case GV_SD_INITIALIZING:
25973679edcSLukas Ertl if (bp->bio_cmd == BIO_READ)
260c0b9797aSUlf Lilleengen goto bad;
26173679edcSLukas Ertl break;
26273679edcSLukas Ertl default:
26373679edcSLukas Ertl /* All other subdisk states mean it's not accessible. */
264c0b9797aSUlf Lilleengen goto bad;
26573679edcSLukas Ertl }
26673679edcSLukas Ertl
26773679edcSLukas Ertl /* Clone the bio and adjust the offsets and sizes. */
26873679edcSLukas Ertl cbp = g_clone_bio(bp);
269c0b9797aSUlf Lilleengen if (cbp == NULL) {
270c0b9797aSUlf Lilleengen err = ENOMEM;
271c0b9797aSUlf Lilleengen goto bad;
272c0b9797aSUlf Lilleengen }
273c0b9797aSUlf Lilleengen cbp->bio_offset = real_off + s->drive_offset;
27473679edcSLukas Ertl cbp->bio_length = real_len;
27573679edcSLukas Ertl cbp->bio_data = addr;
276c0b9797aSUlf Lilleengen cbp->bio_done = gv_done;
277c0b9797aSUlf Lilleengen cbp->bio_caller1 = s;
278a29df733SAlexander Motin s->drive_sc->active++;
279c0b9797aSUlf Lilleengen
280c0b9797aSUlf Lilleengen /* Store the sub-requests now and let others issue them. */
281c0b9797aSUlf Lilleengen bioq_insert_tail(p->bqueue, cbp);
282c0b9797aSUlf Lilleengen return (real_len);
283c0b9797aSUlf Lilleengen bad:
284c0b9797aSUlf Lilleengen G_VINUM_LOGREQ(0, bp, "plex request failed.");
285c0b9797aSUlf Lilleengen /* Building the sub-request failed. If internal BIO, do not deliver. */
286d8d015cdSUlf Lilleengen if (bp->bio_pflags & GV_BIO_INTERNAL) {
287d8d015cdSUlf Lilleengen if (bp->bio_pflags & GV_BIO_MALLOC)
288c0b9797aSUlf Lilleengen g_free(bp->bio_data);
289c0b9797aSUlf Lilleengen g_destroy_bio(bp);
290c0b9797aSUlf Lilleengen p->flags &= ~(GV_PLEX_SYNCING | GV_PLEX_REBUILDING |
291c0b9797aSUlf Lilleengen GV_PLEX_GROWING);
292c0b9797aSUlf Lilleengen return (-1);
29367e3ab6eSLukas Ertl }
294c0b9797aSUlf Lilleengen g_io_deliver(bp, err);
295c0b9797aSUlf Lilleengen return (-1);
29673679edcSLukas Ertl }
29767e3ab6eSLukas Ertl
29867e3ab6eSLukas Ertl /*
299c0b9797aSUlf Lilleengen * Handle a completed request to a striped or concatenated plex.
30067e3ab6eSLukas Ertl */
301c0b9797aSUlf Lilleengen void
gv_plex_normal_done(struct gv_plex * p,struct bio * bp)302c0b9797aSUlf Lilleengen gv_plex_normal_done(struct gv_plex *p, struct bio *bp)
30367e3ab6eSLukas Ertl {
304c0b9797aSUlf Lilleengen struct bio *pbp;
30567e3ab6eSLukas Ertl
306c0b9797aSUlf Lilleengen pbp = bp->bio_parent;
307c0b9797aSUlf Lilleengen if (pbp->bio_error == 0)
308c0b9797aSUlf Lilleengen pbp->bio_error = bp->bio_error;
309c0b9797aSUlf Lilleengen g_destroy_bio(bp);
310c0b9797aSUlf Lilleengen pbp->bio_inbed++;
311c0b9797aSUlf Lilleengen if (pbp->bio_children == pbp->bio_inbed) {
312c0b9797aSUlf Lilleengen /* Just set it to length since multiple plexes will
313c0b9797aSUlf Lilleengen * screw things up. */
314c0b9797aSUlf Lilleengen pbp->bio_completed = pbp->bio_length;
315d8d015cdSUlf Lilleengen if (pbp->bio_pflags & GV_BIO_SYNCREQ)
316c0b9797aSUlf Lilleengen gv_sync_complete(p, pbp);
317d8d015cdSUlf Lilleengen else if (pbp->bio_pflags & GV_BIO_GROW)
318c0b9797aSUlf Lilleengen gv_grow_complete(p, pbp);
319c3aadfb9SLukas Ertl else
320c0b9797aSUlf Lilleengen g_io_deliver(pbp, pbp->bio_error);
321c0b9797aSUlf Lilleengen }
322c0b9797aSUlf Lilleengen }
323c0b9797aSUlf Lilleengen
32467e3ab6eSLukas Ertl /*
325c0b9797aSUlf Lilleengen * Handle a completed request to a RAID-5 plex.
32667e3ab6eSLukas Ertl */
327c0b9797aSUlf Lilleengen void
gv_plex_raid5_done(struct gv_plex * p,struct bio * bp)328c0b9797aSUlf Lilleengen gv_plex_raid5_done(struct gv_plex *p, struct bio *bp)
329fb5885afSLukas Ertl {
330c0b9797aSUlf Lilleengen struct gv_softc *sc;
331fb5885afSLukas Ertl struct bio *cbp, *pbp;
332c0b9797aSUlf Lilleengen struct gv_bioq *bq, *bq2;
333c0b9797aSUlf Lilleengen struct gv_raid5_packet *wp;
334c0b9797aSUlf Lilleengen off_t completed;
335c0b9797aSUlf Lilleengen int i;
336fb5885afSLukas Ertl
337c0b9797aSUlf Lilleengen completed = 0;
338c0b9797aSUlf Lilleengen sc = p->vinumconf;
339c0b9797aSUlf Lilleengen wp = bp->bio_caller2;
340fb5885afSLukas Ertl
341c0b9797aSUlf Lilleengen switch (bp->bio_parent->bio_cmd) {
342c0b9797aSUlf Lilleengen case BIO_READ:
343c0b9797aSUlf Lilleengen if (wp == NULL) {
344c0b9797aSUlf Lilleengen completed = bp->bio_completed;
345c0b9797aSUlf Lilleengen break;
346fb5885afSLukas Ertl }
347fb5885afSLukas Ertl
348c0b9797aSUlf Lilleengen TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) {
349c0b9797aSUlf Lilleengen if (bq->bp != bp)
350c0b9797aSUlf Lilleengen continue;
351c0b9797aSUlf Lilleengen TAILQ_REMOVE(&wp->bits, bq, queue);
352c0b9797aSUlf Lilleengen g_free(bq);
353c0b9797aSUlf Lilleengen for (i = 0; i < wp->length; i++)
354c0b9797aSUlf Lilleengen wp->data[i] ^= bp->bio_data[i];
355c0b9797aSUlf Lilleengen break;
356c0b9797aSUlf Lilleengen }
357c0b9797aSUlf Lilleengen if (TAILQ_EMPTY(&wp->bits)) {
358c0b9797aSUlf Lilleengen completed = wp->length;
359c0b9797aSUlf Lilleengen if (wp->lockbase != -1) {
360c0b9797aSUlf Lilleengen TAILQ_REMOVE(&p->packets, wp, list);
361c0b9797aSUlf Lilleengen /* Bring the waiting bios back into the game. */
362c0b9797aSUlf Lilleengen pbp = bioq_takefirst(p->wqueue);
363c0b9797aSUlf Lilleengen while (pbp != NULL) {
36404027246SUlf Lilleengen gv_post_bio(sc, pbp);
365c0b9797aSUlf Lilleengen pbp = bioq_takefirst(p->wqueue);
366c0b9797aSUlf Lilleengen }
367c0b9797aSUlf Lilleengen }
368c0b9797aSUlf Lilleengen g_free(wp);
369c0b9797aSUlf Lilleengen }
370c0b9797aSUlf Lilleengen
371c0b9797aSUlf Lilleengen break;
372c0b9797aSUlf Lilleengen
373c0b9797aSUlf Lilleengen case BIO_WRITE:
374c0b9797aSUlf Lilleengen /* XXX can this ever happen? */
375c0b9797aSUlf Lilleengen if (wp == NULL) {
376c0b9797aSUlf Lilleengen completed = bp->bio_completed;
377c0b9797aSUlf Lilleengen break;
378c0b9797aSUlf Lilleengen }
379c0b9797aSUlf Lilleengen
380c0b9797aSUlf Lilleengen /* Check if we need to handle parity data. */
381c0b9797aSUlf Lilleengen TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) {
382c0b9797aSUlf Lilleengen if (bq->bp != bp)
383c0b9797aSUlf Lilleengen continue;
384c0b9797aSUlf Lilleengen TAILQ_REMOVE(&wp->bits, bq, queue);
385c0b9797aSUlf Lilleengen g_free(bq);
386c0b9797aSUlf Lilleengen cbp = wp->parity;
387c0b9797aSUlf Lilleengen if (cbp != NULL) {
388c0b9797aSUlf Lilleengen for (i = 0; i < wp->length; i++)
389c0b9797aSUlf Lilleengen cbp->bio_data[i] ^= bp->bio_data[i];
390c0b9797aSUlf Lilleengen }
391c0b9797aSUlf Lilleengen break;
392c0b9797aSUlf Lilleengen }
393c0b9797aSUlf Lilleengen
394c0b9797aSUlf Lilleengen /* Handle parity data. */
395c0b9797aSUlf Lilleengen if (TAILQ_EMPTY(&wp->bits)) {
396d8d015cdSUlf Lilleengen if (bp->bio_parent->bio_pflags & GV_BIO_CHECK)
397c0b9797aSUlf Lilleengen i = gv_check_parity(p, bp, wp);
398c0b9797aSUlf Lilleengen else
399c0b9797aSUlf Lilleengen i = gv_normal_parity(p, bp, wp);
400c0b9797aSUlf Lilleengen
401c0b9797aSUlf Lilleengen /* All of our sub-requests have finished. */
402c0b9797aSUlf Lilleengen if (i) {
403c0b9797aSUlf Lilleengen completed = wp->length;
404c0b9797aSUlf Lilleengen TAILQ_REMOVE(&p->packets, wp, list);
405c0b9797aSUlf Lilleengen /* Bring the waiting bios back into the game. */
406c0b9797aSUlf Lilleengen pbp = bioq_takefirst(p->wqueue);
407c0b9797aSUlf Lilleengen while (pbp != NULL) {
40804027246SUlf Lilleengen gv_post_bio(sc, pbp);
409c0b9797aSUlf Lilleengen pbp = bioq_takefirst(p->wqueue);
410c0b9797aSUlf Lilleengen }
411c0b9797aSUlf Lilleengen g_free(wp);
412c0b9797aSUlf Lilleengen }
413c0b9797aSUlf Lilleengen }
414c0b9797aSUlf Lilleengen
415c0b9797aSUlf Lilleengen break;
416c0b9797aSUlf Lilleengen }
417c0b9797aSUlf Lilleengen
418c0b9797aSUlf Lilleengen pbp = bp->bio_parent;
419c0b9797aSUlf Lilleengen if (pbp->bio_error == 0)
420c0b9797aSUlf Lilleengen pbp->bio_error = bp->bio_error;
421c0b9797aSUlf Lilleengen pbp->bio_completed += completed;
422c0b9797aSUlf Lilleengen
423c0b9797aSUlf Lilleengen /* When the original request is finished, we deliver it. */
424c0b9797aSUlf Lilleengen pbp->bio_inbed++;
425c0b9797aSUlf Lilleengen if (pbp->bio_inbed == pbp->bio_children) {
426c0b9797aSUlf Lilleengen /* Hand it over for checking or delivery. */
427c0b9797aSUlf Lilleengen if (pbp->bio_cmd == BIO_WRITE &&
428d8d015cdSUlf Lilleengen (pbp->bio_pflags & GV_BIO_CHECK)) {
429c0b9797aSUlf Lilleengen gv_parity_complete(p, pbp);
430c0b9797aSUlf Lilleengen } else if (pbp->bio_cmd == BIO_WRITE &&
431d8d015cdSUlf Lilleengen (pbp->bio_pflags & GV_BIO_REBUILD)) {
432c0b9797aSUlf Lilleengen gv_rebuild_complete(p, pbp);
433d8d015cdSUlf Lilleengen } else if (pbp->bio_pflags & GV_BIO_INIT) {
434c0b9797aSUlf Lilleengen gv_init_complete(p, pbp);
435c0b9797aSUlf Lilleengen } else if (pbp->bio_pflags & GV_BIO_SYNCREQ) {
436d8d015cdSUlf Lilleengen gv_sync_complete(p, pbp);
437d8d015cdSUlf Lilleengen } else if (pbp->bio_pflags & GV_BIO_GROW) {
438c0b9797aSUlf Lilleengen gv_grow_complete(p, pbp);
439c0b9797aSUlf Lilleengen } else {
440c0b9797aSUlf Lilleengen g_io_deliver(pbp, pbp->bio_error);
441c0b9797aSUlf Lilleengen }
442c0b9797aSUlf Lilleengen }
443c0b9797aSUlf Lilleengen
444c0b9797aSUlf Lilleengen /* Clean up what we allocated. */
445c0b9797aSUlf Lilleengen if (bp->bio_cflags & GV_BIO_MALLOC)
446c0b9797aSUlf Lilleengen g_free(bp->bio_data);
447c0b9797aSUlf Lilleengen g_destroy_bio(bp);
448fb5885afSLukas Ertl }
449fb5885afSLukas Ertl
450fb5885afSLukas Ertl static int
gv_check_parity(struct gv_plex * p,struct bio * bp,struct gv_raid5_packet * wp)451fb5885afSLukas Ertl gv_check_parity(struct gv_plex *p, struct bio *bp, struct gv_raid5_packet *wp)
452fb5885afSLukas Ertl {
453d5817a50SLukas Ertl struct bio *pbp;
454c0b9797aSUlf Lilleengen struct gv_sd *s;
455fb5885afSLukas Ertl int err, finished, i;
456fb5885afSLukas Ertl
457fb5885afSLukas Ertl err = 0;
458fb5885afSLukas Ertl finished = 1;
459fb5885afSLukas Ertl
460fb5885afSLukas Ertl if (wp->waiting != NULL) {
461fb5885afSLukas Ertl pbp = wp->waiting;
462fb5885afSLukas Ertl wp->waiting = NULL;
463c0b9797aSUlf Lilleengen s = pbp->bio_caller1;
464c0b9797aSUlf Lilleengen g_io_request(pbp, s->drive_sc->consumer);
465fb5885afSLukas Ertl finished = 0;
466fb5885afSLukas Ertl
467fb5885afSLukas Ertl } else if (wp->parity != NULL) {
468d5817a50SLukas Ertl pbp = wp->parity;
469fb5885afSLukas Ertl wp->parity = NULL;
470fb5885afSLukas Ertl
471fb5885afSLukas Ertl /* Check if the parity is correct. */
472fb5885afSLukas Ertl for (i = 0; i < wp->length; i++) {
473d5817a50SLukas Ertl if (bp->bio_data[i] != pbp->bio_data[i]) {
474fb5885afSLukas Ertl err = 1;
475fb5885afSLukas Ertl break;
476fb5885afSLukas Ertl }
477fb5885afSLukas Ertl }
478fb5885afSLukas Ertl
479fb5885afSLukas Ertl /* The parity is not correct... */
480fb5885afSLukas Ertl if (err) {
481fb5885afSLukas Ertl bp->bio_parent->bio_error = EAGAIN;
482fb5885afSLukas Ertl
483fb5885afSLukas Ertl /* ... but we rebuild it. */
484d8d015cdSUlf Lilleengen if (bp->bio_parent->bio_pflags & GV_BIO_PARITY) {
485c0b9797aSUlf Lilleengen s = pbp->bio_caller1;
486c0b9797aSUlf Lilleengen g_io_request(pbp, s->drive_sc->consumer);
487fb5885afSLukas Ertl finished = 0;
488fb5885afSLukas Ertl }
489fb5885afSLukas Ertl }
490fb5885afSLukas Ertl
491fb5885afSLukas Ertl /*
492fb5885afSLukas Ertl * Clean up the BIO we would have used for rebuilding the
493fb5885afSLukas Ertl * parity.
494fb5885afSLukas Ertl */
495fb5885afSLukas Ertl if (finished) {
496fb5885afSLukas Ertl bp->bio_parent->bio_inbed++;
497d5817a50SLukas Ertl g_destroy_bio(pbp);
498fb5885afSLukas Ertl }
499fb5885afSLukas Ertl }
500fb5885afSLukas Ertl
501fb5885afSLukas Ertl return (finished);
502fb5885afSLukas Ertl }
503fb5885afSLukas Ertl
504c0b9797aSUlf Lilleengen static int
gv_normal_parity(struct gv_plex * p,struct bio * bp,struct gv_raid5_packet * wp)505c0b9797aSUlf Lilleengen gv_normal_parity(struct gv_plex *p, struct bio *bp, struct gv_raid5_packet *wp)
50667e3ab6eSLukas Ertl {
50767e3ab6eSLukas Ertl struct bio *cbp, *pbp;
508c0b9797aSUlf Lilleengen struct gv_sd *s;
509c0b9797aSUlf Lilleengen int finished, i;
51067e3ab6eSLukas Ertl
511c0b9797aSUlf Lilleengen finished = 1;
51267e3ab6eSLukas Ertl
513c0b9797aSUlf Lilleengen if (wp->waiting != NULL) {
514c0b9797aSUlf Lilleengen pbp = wp->waiting;
515c0b9797aSUlf Lilleengen wp->waiting = NULL;
51667e3ab6eSLukas Ertl cbp = wp->parity;
51767e3ab6eSLukas Ertl for (i = 0; i < wp->length; i++)
518c0b9797aSUlf Lilleengen cbp->bio_data[i] ^= pbp->bio_data[i];
519c0b9797aSUlf Lilleengen s = pbp->bio_caller1;
520c0b9797aSUlf Lilleengen g_io_request(pbp, s->drive_sc->consumer);
521c0b9797aSUlf Lilleengen finished = 0;
522c0b9797aSUlf Lilleengen
523c0b9797aSUlf Lilleengen } else if (wp->parity != NULL) {
524c0b9797aSUlf Lilleengen cbp = wp->parity;
525c0b9797aSUlf Lilleengen wp->parity = NULL;
526c0b9797aSUlf Lilleengen s = cbp->bio_caller1;
527c0b9797aSUlf Lilleengen g_io_request(cbp, s->drive_sc->consumer);
528c0b9797aSUlf Lilleengen finished = 0;
52967e3ab6eSLukas Ertl }
530c0b9797aSUlf Lilleengen
531c0b9797aSUlf Lilleengen return (finished);
532c0b9797aSUlf Lilleengen }
533c0b9797aSUlf Lilleengen
534c0b9797aSUlf Lilleengen /* Flush the queue with delayed requests. */
535c0b9797aSUlf Lilleengen static void
gv_plex_flush(struct gv_plex * p)536c0b9797aSUlf Lilleengen gv_plex_flush(struct gv_plex *p)
537c0b9797aSUlf Lilleengen {
538c0b9797aSUlf Lilleengen struct bio *bp;
539c0b9797aSUlf Lilleengen
540c0b9797aSUlf Lilleengen bp = bioq_takefirst(p->rqueue);
541c0b9797aSUlf Lilleengen while (bp != NULL) {
542c0b9797aSUlf Lilleengen gv_plex_start(p, bp);
543c0b9797aSUlf Lilleengen bp = bioq_takefirst(p->rqueue);
54467e3ab6eSLukas Ertl }
54567e3ab6eSLukas Ertl }
54667e3ab6eSLukas Ertl
547d8d015cdSUlf Lilleengen static void
gv_post_bio(struct gv_softc * sc,struct bio * bp)548d8d015cdSUlf Lilleengen gv_post_bio(struct gv_softc *sc, struct bio *bp)
549d8d015cdSUlf Lilleengen {
550d8d015cdSUlf Lilleengen
551d8d015cdSUlf Lilleengen KASSERT(sc != NULL, ("NULL sc"));
552d8d015cdSUlf Lilleengen KASSERT(bp != NULL, ("NULL bp"));
553d8d015cdSUlf Lilleengen mtx_lock(&sc->bqueue_mtx);
554d8d015cdSUlf Lilleengen bioq_disksort(sc->bqueue_down, bp);
555d8d015cdSUlf Lilleengen wakeup(sc);
556d8d015cdSUlf Lilleengen mtx_unlock(&sc->bqueue_mtx);
557d8d015cdSUlf Lilleengen }
558d8d015cdSUlf Lilleengen
559c0b9797aSUlf Lilleengen int
gv_sync_request(struct gv_plex * from,struct gv_plex * to,off_t offset,off_t length,int type,caddr_t data)560c0b9797aSUlf Lilleengen gv_sync_request(struct gv_plex *from, struct gv_plex *to, off_t offset,
561c0b9797aSUlf Lilleengen off_t length, int type, caddr_t data)
562c0b9797aSUlf Lilleengen {
563c0b9797aSUlf Lilleengen struct gv_softc *sc;
564c0b9797aSUlf Lilleengen struct bio *bp;
565fb5885afSLukas Ertl
566c0b9797aSUlf Lilleengen KASSERT(from != NULL, ("NULL from"));
567c0b9797aSUlf Lilleengen KASSERT(to != NULL, ("NULL to"));
568c0b9797aSUlf Lilleengen sc = from->vinumconf;
569c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("NULL sc"));
570c0b9797aSUlf Lilleengen
571c0b9797aSUlf Lilleengen bp = g_new_bio();
572c0b9797aSUlf Lilleengen if (bp == NULL) {
573c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "sync from '%s' failed at offset "
574c0b9797aSUlf Lilleengen " %jd; out of memory", from->name, offset);
575c0b9797aSUlf Lilleengen return (ENOMEM);
576d5817a50SLukas Ertl }
577c0b9797aSUlf Lilleengen bp->bio_length = length;
578a29df733SAlexander Motin bp->bio_done = NULL;
579d8d015cdSUlf Lilleengen bp->bio_pflags |= GV_BIO_SYNCREQ;
580c0b9797aSUlf Lilleengen bp->bio_offset = offset;
581c0b9797aSUlf Lilleengen bp->bio_caller1 = from;
582c0b9797aSUlf Lilleengen bp->bio_caller2 = to;
583c0b9797aSUlf Lilleengen bp->bio_cmd = type;
584c0b9797aSUlf Lilleengen if (data == NULL)
585c0b9797aSUlf Lilleengen data = g_malloc(length, M_WAITOK);
586d8d015cdSUlf Lilleengen bp->bio_pflags |= GV_BIO_MALLOC; /* Free on the next run. */
587c0b9797aSUlf Lilleengen bp->bio_data = data;
588c0b9797aSUlf Lilleengen
589c0b9797aSUlf Lilleengen /* Send down next. */
59004027246SUlf Lilleengen gv_post_bio(sc, bp);
591c0b9797aSUlf Lilleengen //gv_plex_start(from, bp);
592c0b9797aSUlf Lilleengen return (0);
593c0b9797aSUlf Lilleengen }
594c0b9797aSUlf Lilleengen
595c0b9797aSUlf Lilleengen /*
596c0b9797aSUlf Lilleengen * Handle a finished plex sync bio.
597c0b9797aSUlf Lilleengen */
598c0b9797aSUlf Lilleengen int
gv_sync_complete(struct gv_plex * to,struct bio * bp)599c0b9797aSUlf Lilleengen gv_sync_complete(struct gv_plex *to, struct bio *bp)
600c0b9797aSUlf Lilleengen {
601c0b9797aSUlf Lilleengen struct gv_plex *from, *p;
602c0b9797aSUlf Lilleengen struct gv_sd *s;
603c0b9797aSUlf Lilleengen struct gv_volume *v;
604c0b9797aSUlf Lilleengen struct gv_softc *sc;
605c0b9797aSUlf Lilleengen off_t offset;
606c0b9797aSUlf Lilleengen int err;
607c0b9797aSUlf Lilleengen
608c0b9797aSUlf Lilleengen g_topology_assert_not();
609c0b9797aSUlf Lilleengen
610c0b9797aSUlf Lilleengen err = 0;
611c0b9797aSUlf Lilleengen KASSERT(to != NULL, ("NULL to"));
612c0b9797aSUlf Lilleengen KASSERT(bp != NULL, ("NULL bp"));
613c0b9797aSUlf Lilleengen from = bp->bio_caller2;
614c0b9797aSUlf Lilleengen KASSERT(from != NULL, ("NULL from"));
615c0b9797aSUlf Lilleengen v = to->vol_sc;
616c0b9797aSUlf Lilleengen KASSERT(v != NULL, ("NULL v"));
617c0b9797aSUlf Lilleengen sc = v->vinumconf;
618c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("NULL sc"));
619c0b9797aSUlf Lilleengen
620c0b9797aSUlf Lilleengen /* If it was a read, write it. */
621c0b9797aSUlf Lilleengen if (bp->bio_cmd == BIO_READ) {
622c0b9797aSUlf Lilleengen err = gv_sync_request(from, to, bp->bio_offset, bp->bio_length,
623c0b9797aSUlf Lilleengen BIO_WRITE, bp->bio_data);
624c0b9797aSUlf Lilleengen /* If it was a write, read the next one. */
625c0b9797aSUlf Lilleengen } else if (bp->bio_cmd == BIO_WRITE) {
626d8d015cdSUlf Lilleengen if (bp->bio_pflags & GV_BIO_MALLOC)
627c0b9797aSUlf Lilleengen g_free(bp->bio_data);
628c0b9797aSUlf Lilleengen to->synced += bp->bio_length;
629c0b9797aSUlf Lilleengen /* If we're finished, clean up. */
630c0b9797aSUlf Lilleengen if (bp->bio_offset + bp->bio_length >= from->size) {
631c0b9797aSUlf Lilleengen G_VINUM_DEBUG(1, "syncing of %s from %s completed",
632c0b9797aSUlf Lilleengen to->name, from->name);
633c0b9797aSUlf Lilleengen /* Update our state. */
634c0b9797aSUlf Lilleengen LIST_FOREACH(s, &to->subdisks, in_plex)
635c0b9797aSUlf Lilleengen gv_set_sd_state(s, GV_SD_UP, 0);
636c0b9797aSUlf Lilleengen gv_update_plex_state(to);
637c0b9797aSUlf Lilleengen to->flags &= ~GV_PLEX_SYNCING;
638c0b9797aSUlf Lilleengen to->synced = 0;
639c0b9797aSUlf Lilleengen gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0);
640c0b9797aSUlf Lilleengen } else {
641c0b9797aSUlf Lilleengen offset = bp->bio_offset + bp->bio_length;
642c0b9797aSUlf Lilleengen err = gv_sync_request(from, to, offset,
643c0b9797aSUlf Lilleengen MIN(bp->bio_length, from->size - offset),
644c0b9797aSUlf Lilleengen BIO_READ, NULL);
645c0b9797aSUlf Lilleengen }
646c0b9797aSUlf Lilleengen }
647c0b9797aSUlf Lilleengen g_destroy_bio(bp);
648c0b9797aSUlf Lilleengen /* Clean up if there was an error. */
649c0b9797aSUlf Lilleengen if (err) {
650c0b9797aSUlf Lilleengen to->flags &= ~GV_PLEX_SYNCING;
651c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "error syncing plexes: error code %d", err);
652c0b9797aSUlf Lilleengen }
653c0b9797aSUlf Lilleengen
654c0b9797aSUlf Lilleengen /* Check if all plexes are synced, and lower refcounts. */
655c0b9797aSUlf Lilleengen g_topology_lock();
656c0b9797aSUlf Lilleengen LIST_FOREACH(p, &v->plexes, in_volume) {
657c0b9797aSUlf Lilleengen if (p->flags & GV_PLEX_SYNCING) {
658c0b9797aSUlf Lilleengen g_topology_unlock();
659c0b9797aSUlf Lilleengen return (-1);
660c0b9797aSUlf Lilleengen }
661c0b9797aSUlf Lilleengen }
662c0b9797aSUlf Lilleengen /* If we came here, all plexes are synced, and we're free. */
663c0b9797aSUlf Lilleengen gv_access(v->provider, -1, -1, 0);
664c0b9797aSUlf Lilleengen g_topology_unlock();
665c0b9797aSUlf Lilleengen G_VINUM_DEBUG(1, "plex sync completed");
666c0b9797aSUlf Lilleengen gv_volume_flush(v);
667c0b9797aSUlf Lilleengen return (0);
668c0b9797aSUlf Lilleengen }
669c0b9797aSUlf Lilleengen
670c0b9797aSUlf Lilleengen /*
671c0b9797aSUlf Lilleengen * Create a new bio struct for the next grow request.
672c0b9797aSUlf Lilleengen */
673c0b9797aSUlf Lilleengen int
gv_grow_request(struct gv_plex * p,off_t offset,off_t length,int type,caddr_t data)674c0b9797aSUlf Lilleengen gv_grow_request(struct gv_plex *p, off_t offset, off_t length, int type,
675c0b9797aSUlf Lilleengen caddr_t data)
676c0b9797aSUlf Lilleengen {
677c0b9797aSUlf Lilleengen struct gv_softc *sc;
678c0b9797aSUlf Lilleengen struct bio *bp;
679c0b9797aSUlf Lilleengen
680c0b9797aSUlf Lilleengen KASSERT(p != NULL, ("gv_grow_request: NULL p"));
681c0b9797aSUlf Lilleengen sc = p->vinumconf;
682c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("gv_grow_request: NULL sc"));
683c0b9797aSUlf Lilleengen
684c0b9797aSUlf Lilleengen bp = g_new_bio();
685c0b9797aSUlf Lilleengen if (bp == NULL) {
686c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "grow of %s failed creating bio: "
687c0b9797aSUlf Lilleengen "out of memory", p->name);
688c0b9797aSUlf Lilleengen return (ENOMEM);
689c0b9797aSUlf Lilleengen }
690c0b9797aSUlf Lilleengen
691c0b9797aSUlf Lilleengen bp->bio_cmd = type;
692a29df733SAlexander Motin bp->bio_done = NULL;
693c0b9797aSUlf Lilleengen bp->bio_error = 0;
694c0b9797aSUlf Lilleengen bp->bio_caller1 = p;
695c0b9797aSUlf Lilleengen bp->bio_offset = offset;
696c0b9797aSUlf Lilleengen bp->bio_length = length;
697d8d015cdSUlf Lilleengen bp->bio_pflags |= GV_BIO_GROW;
698c0b9797aSUlf Lilleengen if (data == NULL)
699c0b9797aSUlf Lilleengen data = g_malloc(length, M_WAITOK);
700d8d015cdSUlf Lilleengen bp->bio_pflags |= GV_BIO_MALLOC;
701c0b9797aSUlf Lilleengen bp->bio_data = data;
702c0b9797aSUlf Lilleengen
70304027246SUlf Lilleengen gv_post_bio(sc, bp);
704c0b9797aSUlf Lilleengen //gv_plex_start(p, bp);
705c0b9797aSUlf Lilleengen return (0);
706c0b9797aSUlf Lilleengen }
707c0b9797aSUlf Lilleengen
708c0b9797aSUlf Lilleengen /*
709c0b9797aSUlf Lilleengen * Finish handling of a bio to a growing plex.
710c0b9797aSUlf Lilleengen */
711c0b9797aSUlf Lilleengen void
gv_grow_complete(struct gv_plex * p,struct bio * bp)712c0b9797aSUlf Lilleengen gv_grow_complete(struct gv_plex *p, struct bio *bp)
713c0b9797aSUlf Lilleengen {
714c0b9797aSUlf Lilleengen struct gv_softc *sc;
715c0b9797aSUlf Lilleengen struct gv_sd *s;
716c0b9797aSUlf Lilleengen struct gv_volume *v;
717c0b9797aSUlf Lilleengen off_t origsize, offset;
718c0b9797aSUlf Lilleengen int sdcount, err;
719c0b9797aSUlf Lilleengen
720c0b9797aSUlf Lilleengen v = p->vol_sc;
721c0b9797aSUlf Lilleengen KASSERT(v != NULL, ("gv_grow_complete: NULL v"));
722c0b9797aSUlf Lilleengen sc = v->vinumconf;
723c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("gv_grow_complete: NULL sc"));
724c0b9797aSUlf Lilleengen err = 0;
725c0b9797aSUlf Lilleengen
726c0b9797aSUlf Lilleengen /* If it was a read, write it. */
727c0b9797aSUlf Lilleengen if (bp->bio_cmd == BIO_READ) {
728c0b9797aSUlf Lilleengen p->synced += bp->bio_length;
729c0b9797aSUlf Lilleengen err = gv_grow_request(p, bp->bio_offset, bp->bio_length,
730c0b9797aSUlf Lilleengen BIO_WRITE, bp->bio_data);
731c0b9797aSUlf Lilleengen /* If it was a write, read next. */
732c0b9797aSUlf Lilleengen } else if (bp->bio_cmd == BIO_WRITE) {
733d8d015cdSUlf Lilleengen if (bp->bio_pflags & GV_BIO_MALLOC)
734c0b9797aSUlf Lilleengen g_free(bp->bio_data);
735c0b9797aSUlf Lilleengen
736c0b9797aSUlf Lilleengen /* Find the real size of the plex. */
737c0b9797aSUlf Lilleengen sdcount = gv_sdcount(p, 1);
738c0b9797aSUlf Lilleengen s = LIST_FIRST(&p->subdisks);
739c0b9797aSUlf Lilleengen KASSERT(s != NULL, ("NULL s"));
740c0b9797aSUlf Lilleengen origsize = (s->size * (sdcount - 1));
741c0b9797aSUlf Lilleengen if (bp->bio_offset + bp->bio_length >= origsize) {
742c0b9797aSUlf Lilleengen G_VINUM_DEBUG(1, "growing of %s completed", p->name);
743c0b9797aSUlf Lilleengen p->flags &= ~GV_PLEX_GROWING;
744c0b9797aSUlf Lilleengen LIST_FOREACH(s, &p->subdisks, in_plex) {
745c0b9797aSUlf Lilleengen s->flags &= ~GV_SD_GROW;
746c0b9797aSUlf Lilleengen gv_set_sd_state(s, GV_SD_UP, 0);
747c0b9797aSUlf Lilleengen }
748c0b9797aSUlf Lilleengen p->size = gv_plex_size(p);
749c0b9797aSUlf Lilleengen gv_update_vol_size(v, gv_vol_size(v));
750c0b9797aSUlf Lilleengen gv_set_plex_state(p, GV_PLEX_UP, 0);
751c0b9797aSUlf Lilleengen g_topology_lock();
752c0b9797aSUlf Lilleengen gv_access(v->provider, -1, -1, 0);
753c0b9797aSUlf Lilleengen g_topology_unlock();
754c0b9797aSUlf Lilleengen p->synced = 0;
755c0b9797aSUlf Lilleengen gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0);
756c0b9797aSUlf Lilleengen /* Issue delayed requests. */
757c0b9797aSUlf Lilleengen gv_plex_flush(p);
758c0b9797aSUlf Lilleengen } else {
759c0b9797aSUlf Lilleengen offset = bp->bio_offset + bp->bio_length;
760c0b9797aSUlf Lilleengen err = gv_grow_request(p, offset,
761c0b9797aSUlf Lilleengen MIN(bp->bio_length, origsize - offset),
762c0b9797aSUlf Lilleengen BIO_READ, NULL);
763c0b9797aSUlf Lilleengen }
764c0b9797aSUlf Lilleengen }
765c0b9797aSUlf Lilleengen g_destroy_bio(bp);
766c0b9797aSUlf Lilleengen
767c0b9797aSUlf Lilleengen if (err) {
768c0b9797aSUlf Lilleengen p->flags &= ~GV_PLEX_GROWING;
769c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "error growing plex: error code %d", err);
77067e3ab6eSLukas Ertl }
77167e3ab6eSLukas Ertl }
77267e3ab6eSLukas Ertl
773c0b9797aSUlf Lilleengen /*
774c0b9797aSUlf Lilleengen * Create an initialization BIO and send it off to the consumer. Assume that
775c0b9797aSUlf Lilleengen * we're given initialization data as parameter.
776c0b9797aSUlf Lilleengen */
777c0b9797aSUlf Lilleengen void
gv_init_request(struct gv_sd * s,off_t start,caddr_t data,off_t length)778c0b9797aSUlf Lilleengen gv_init_request(struct gv_sd *s, off_t start, caddr_t data, off_t length)
779c0b9797aSUlf Lilleengen {
780c0b9797aSUlf Lilleengen struct gv_drive *d;
781c0b9797aSUlf Lilleengen struct g_consumer *cp;
782c0b9797aSUlf Lilleengen struct bio *bp, *cbp;
783c0b9797aSUlf Lilleengen
784c0b9797aSUlf Lilleengen KASSERT(s != NULL, ("gv_init_request: NULL s"));
785c0b9797aSUlf Lilleengen d = s->drive_sc;
786c0b9797aSUlf Lilleengen KASSERT(d != NULL, ("gv_init_request: NULL d"));
787c0b9797aSUlf Lilleengen cp = d->consumer;
788c0b9797aSUlf Lilleengen KASSERT(cp != NULL, ("gv_init_request: NULL cp"));
789c0b9797aSUlf Lilleengen
790c0b9797aSUlf Lilleengen bp = g_new_bio();
791c0b9797aSUlf Lilleengen if (bp == NULL) {
792c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "subdisk '%s' init: write failed at offset %jd"
793c0b9797aSUlf Lilleengen " (drive offset %jd); out of memory", s->name,
794c0b9797aSUlf Lilleengen (intmax_t)s->initialized, (intmax_t)start);
795c0b9797aSUlf Lilleengen return; /* XXX: Error codes. */
796c0b9797aSUlf Lilleengen }
797c0b9797aSUlf Lilleengen bp->bio_cmd = BIO_WRITE;
798c0b9797aSUlf Lilleengen bp->bio_data = data;
799a29df733SAlexander Motin bp->bio_done = NULL;
800c0b9797aSUlf Lilleengen bp->bio_error = 0;
801c0b9797aSUlf Lilleengen bp->bio_length = length;
802d8d015cdSUlf Lilleengen bp->bio_pflags |= GV_BIO_INIT;
803c0b9797aSUlf Lilleengen bp->bio_offset = start;
804c0b9797aSUlf Lilleengen bp->bio_caller1 = s;
805c0b9797aSUlf Lilleengen
806c0b9797aSUlf Lilleengen /* Then ofcourse, we have to clone it. */
807c0b9797aSUlf Lilleengen cbp = g_clone_bio(bp);
808c0b9797aSUlf Lilleengen if (cbp == NULL) {
809c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "subdisk '%s' init: write failed at offset %jd"
810c0b9797aSUlf Lilleengen " (drive offset %jd); out of memory", s->name,
811c0b9797aSUlf Lilleengen (intmax_t)s->initialized, (intmax_t)start);
812c0b9797aSUlf Lilleengen return; /* XXX: Error codes. */
813c0b9797aSUlf Lilleengen }
814c0b9797aSUlf Lilleengen cbp->bio_done = gv_done;
815c0b9797aSUlf Lilleengen cbp->bio_caller1 = s;
816a29df733SAlexander Motin d->active++;
817c0b9797aSUlf Lilleengen /* Send it off to the consumer. */
818c0b9797aSUlf Lilleengen g_io_request(cbp, cp);
81967e3ab6eSLukas Ertl }
82067e3ab6eSLukas Ertl
821c0b9797aSUlf Lilleengen /*
822c0b9797aSUlf Lilleengen * Handle a finished initialization BIO.
823c0b9797aSUlf Lilleengen */
824c0b9797aSUlf Lilleengen void
gv_init_complete(struct gv_plex * p,struct bio * bp)825c0b9797aSUlf Lilleengen gv_init_complete(struct gv_plex *p, struct bio *bp)
826c0b9797aSUlf Lilleengen {
827c0b9797aSUlf Lilleengen struct gv_softc *sc;
828c0b9797aSUlf Lilleengen struct gv_drive *d;
829c0b9797aSUlf Lilleengen struct g_consumer *cp;
830c0b9797aSUlf Lilleengen struct gv_sd *s;
831c0b9797aSUlf Lilleengen off_t start, length;
832c0b9797aSUlf Lilleengen caddr_t data;
833c0b9797aSUlf Lilleengen int error;
83467e3ab6eSLukas Ertl
835c0b9797aSUlf Lilleengen s = bp->bio_caller1;
836c0b9797aSUlf Lilleengen start = bp->bio_offset;
837c0b9797aSUlf Lilleengen length = bp->bio_length;
838c0b9797aSUlf Lilleengen error = bp->bio_error;
839c0b9797aSUlf Lilleengen data = bp->bio_data;
840c0b9797aSUlf Lilleengen
841c0b9797aSUlf Lilleengen KASSERT(s != NULL, ("gv_init_complete: NULL s"));
842c0b9797aSUlf Lilleengen d = s->drive_sc;
843c0b9797aSUlf Lilleengen KASSERT(d != NULL, ("gv_init_complete: NULL d"));
844c0b9797aSUlf Lilleengen cp = d->consumer;
845c0b9797aSUlf Lilleengen KASSERT(cp != NULL, ("gv_init_complete: NULL cp"));
846c0b9797aSUlf Lilleengen sc = p->vinumconf;
847c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("gv_init_complete: NULL sc"));
848c0b9797aSUlf Lilleengen
849c0b9797aSUlf Lilleengen g_destroy_bio(bp);
850c0b9797aSUlf Lilleengen
851c0b9797aSUlf Lilleengen /*
852c0b9797aSUlf Lilleengen * First we need to find out if it was okay, and abort if it's not.
853c0b9797aSUlf Lilleengen * Then we need to free previous buffers, find out the correct subdisk,
854c0b9797aSUlf Lilleengen * as well as getting the correct starting point and length of the BIO.
855c0b9797aSUlf Lilleengen */
856c0b9797aSUlf Lilleengen if (start >= s->drive_offset + s->size) {
857c0b9797aSUlf Lilleengen /* Free the data we initialized. */
858c0b9797aSUlf Lilleengen g_free(data);
859c0b9797aSUlf Lilleengen g_topology_assert_not();
860c0b9797aSUlf Lilleengen g_topology_lock();
861c0b9797aSUlf Lilleengen g_access(cp, 0, -1, 0);
862c0b9797aSUlf Lilleengen g_topology_unlock();
863c0b9797aSUlf Lilleengen if (error) {
864c0b9797aSUlf Lilleengen gv_set_sd_state(s, GV_SD_STALE, GV_SETSTATE_FORCE |
865c0b9797aSUlf Lilleengen GV_SETSTATE_CONFIG);
866c0b9797aSUlf Lilleengen } else {
867c0b9797aSUlf Lilleengen gv_set_sd_state(s, GV_SD_UP, GV_SETSTATE_CONFIG);
868c0b9797aSUlf Lilleengen s->initialized = 0;
869c0b9797aSUlf Lilleengen gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0);
870c0b9797aSUlf Lilleengen G_VINUM_DEBUG(1, "subdisk '%s' init: finished "
871c0b9797aSUlf Lilleengen "successfully", s->name);
872c0b9797aSUlf Lilleengen }
873c0b9797aSUlf Lilleengen return;
874c0b9797aSUlf Lilleengen }
875c0b9797aSUlf Lilleengen s->initialized += length;
876c0b9797aSUlf Lilleengen start += length;
877c0b9797aSUlf Lilleengen gv_init_request(s, start, data, length);
878c0b9797aSUlf Lilleengen }
879c0b9797aSUlf Lilleengen
880c0b9797aSUlf Lilleengen /*
881c0b9797aSUlf Lilleengen * Create a new bio struct for the next parity rebuild. Used both by internal
882c0b9797aSUlf Lilleengen * rebuild of degraded plexes as well as user initiated rebuilds/checks.
883c0b9797aSUlf Lilleengen */
884c0b9797aSUlf Lilleengen void
gv_parity_request(struct gv_plex * p,int flags,off_t offset)885c0b9797aSUlf Lilleengen gv_parity_request(struct gv_plex *p, int flags, off_t offset)
886c0b9797aSUlf Lilleengen {
887c0b9797aSUlf Lilleengen struct gv_softc *sc;
888c0b9797aSUlf Lilleengen struct bio *bp;
889c0b9797aSUlf Lilleengen
890c0b9797aSUlf Lilleengen KASSERT(p != NULL, ("gv_parity_request: NULL p"));
891c0b9797aSUlf Lilleengen sc = p->vinumconf;
892c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("gv_parity_request: NULL sc"));
893c0b9797aSUlf Lilleengen
894c0b9797aSUlf Lilleengen bp = g_new_bio();
895c0b9797aSUlf Lilleengen if (bp == NULL) {
896c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "rebuild of %s failed creating bio: "
897c0b9797aSUlf Lilleengen "out of memory", p->name);
898c0b9797aSUlf Lilleengen return;
899c0b9797aSUlf Lilleengen }
900c0b9797aSUlf Lilleengen
901c0b9797aSUlf Lilleengen bp->bio_cmd = BIO_WRITE;
902a29df733SAlexander Motin bp->bio_done = NULL;
903c0b9797aSUlf Lilleengen bp->bio_error = 0;
904c0b9797aSUlf Lilleengen bp->bio_length = p->stripesize;
905c0b9797aSUlf Lilleengen bp->bio_caller1 = p;
906c0b9797aSUlf Lilleengen
907c0b9797aSUlf Lilleengen /*
908c0b9797aSUlf Lilleengen * Check if it's a rebuild of a degraded plex or a user request of
909c0b9797aSUlf Lilleengen * parity rebuild.
910c0b9797aSUlf Lilleengen */
911c0b9797aSUlf Lilleengen if (flags & GV_BIO_REBUILD)
912c0b9797aSUlf Lilleengen bp->bio_data = g_malloc(GV_DFLT_SYNCSIZE, M_WAITOK);
913c0b9797aSUlf Lilleengen else if (flags & GV_BIO_CHECK)
914c0b9797aSUlf Lilleengen bp->bio_data = g_malloc(p->stripesize, M_WAITOK | M_ZERO);
915c0b9797aSUlf Lilleengen else {
916c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "invalid flags given in rebuild");
917c0b9797aSUlf Lilleengen return;
918c0b9797aSUlf Lilleengen }
919c0b9797aSUlf Lilleengen
920d8d015cdSUlf Lilleengen bp->bio_pflags = flags;
921d8d015cdSUlf Lilleengen bp->bio_pflags |= GV_BIO_MALLOC;
922c0b9797aSUlf Lilleengen
923c0b9797aSUlf Lilleengen /* We still have more parity to build. */
924c0b9797aSUlf Lilleengen bp->bio_offset = offset;
92504027246SUlf Lilleengen gv_post_bio(sc, bp);
926c0b9797aSUlf Lilleengen //gv_plex_start(p, bp); /* Send it down to the plex. */
927c0b9797aSUlf Lilleengen }
928c0b9797aSUlf Lilleengen
929c0b9797aSUlf Lilleengen /*
930c0b9797aSUlf Lilleengen * Handle a finished parity write.
931c0b9797aSUlf Lilleengen */
932c0b9797aSUlf Lilleengen void
gv_parity_complete(struct gv_plex * p,struct bio * bp)933c0b9797aSUlf Lilleengen gv_parity_complete(struct gv_plex *p, struct bio *bp)
934c0b9797aSUlf Lilleengen {
935c0b9797aSUlf Lilleengen struct gv_softc *sc;
936c0b9797aSUlf Lilleengen int error, flags;
937c0b9797aSUlf Lilleengen
938c0b9797aSUlf Lilleengen error = bp->bio_error;
939d8d015cdSUlf Lilleengen flags = bp->bio_pflags;
940c0b9797aSUlf Lilleengen flags &= ~GV_BIO_MALLOC;
941c0b9797aSUlf Lilleengen
942c0b9797aSUlf Lilleengen sc = p->vinumconf;
943c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("gv_parity_complete: NULL sc"));
94467e3ab6eSLukas Ertl
94567e3ab6eSLukas Ertl /* Clean up what we allocated. */
946d8d015cdSUlf Lilleengen if (bp->bio_pflags & GV_BIO_MALLOC)
94767e3ab6eSLukas Ertl g_free(bp->bio_data);
94867e3ab6eSLukas Ertl g_destroy_bio(bp);
949c0b9797aSUlf Lilleengen
950c0b9797aSUlf Lilleengen if (error == EAGAIN) {
951c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "parity incorrect at offset 0x%jx",
952c0b9797aSUlf Lilleengen (intmax_t)p->synced);
95367e3ab6eSLukas Ertl }
95467e3ab6eSLukas Ertl
955c0b9797aSUlf Lilleengen /* Any error is fatal, except EAGAIN when we're rebuilding. */
956c0b9797aSUlf Lilleengen if (error && !(error == EAGAIN && (flags & GV_BIO_PARITY))) {
957c0b9797aSUlf Lilleengen /* Make sure we don't have the lock. */
958c0b9797aSUlf Lilleengen g_topology_assert_not();
959c0b9797aSUlf Lilleengen g_topology_lock();
960c0b9797aSUlf Lilleengen gv_access(p->vol_sc->provider, -1, -1, 0);
961c0b9797aSUlf Lilleengen g_topology_unlock();
962c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "parity check on %s failed at 0x%jx "
963c0b9797aSUlf Lilleengen "errno %d", p->name, (intmax_t)p->synced, error);
964c0b9797aSUlf Lilleengen return;
965c0b9797aSUlf Lilleengen } else {
966c0b9797aSUlf Lilleengen p->synced += p->stripesize;
967c0b9797aSUlf Lilleengen }
968c0b9797aSUlf Lilleengen
969c0b9797aSUlf Lilleengen if (p->synced >= p->size) {
970c0b9797aSUlf Lilleengen /* Make sure we don't have the lock. */
971c0b9797aSUlf Lilleengen g_topology_assert_not();
972c0b9797aSUlf Lilleengen g_topology_lock();
973c0b9797aSUlf Lilleengen gv_access(p->vol_sc->provider, -1, -1, 0);
974c0b9797aSUlf Lilleengen g_topology_unlock();
975c0b9797aSUlf Lilleengen /* We're finished. */
976c0b9797aSUlf Lilleengen G_VINUM_DEBUG(1, "parity operation on %s finished", p->name);
977c0b9797aSUlf Lilleengen p->synced = 0;
978c0b9797aSUlf Lilleengen gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0);
979c0b9797aSUlf Lilleengen return;
980c0b9797aSUlf Lilleengen }
981c0b9797aSUlf Lilleengen
982c0b9797aSUlf Lilleengen /* Send down next. It will determine if we need to itself. */
983c0b9797aSUlf Lilleengen gv_parity_request(p, flags, p->synced);
984c0b9797aSUlf Lilleengen }
985c0b9797aSUlf Lilleengen
986c0b9797aSUlf Lilleengen /*
987c0b9797aSUlf Lilleengen * Handle a finished plex rebuild bio.
988c0b9797aSUlf Lilleengen */
98967e3ab6eSLukas Ertl void
gv_rebuild_complete(struct gv_plex * p,struct bio * bp)990c0b9797aSUlf Lilleengen gv_rebuild_complete(struct gv_plex *p, struct bio *bp)
99167e3ab6eSLukas Ertl {
99273679edcSLukas Ertl struct gv_softc *sc;
993c0b9797aSUlf Lilleengen struct gv_sd *s;
994c0b9797aSUlf Lilleengen int error, flags;
995c0b9797aSUlf Lilleengen off_t offset;
99673679edcSLukas Ertl
997c0b9797aSUlf Lilleengen error = bp->bio_error;
998d8d015cdSUlf Lilleengen flags = bp->bio_pflags;
999c0b9797aSUlf Lilleengen offset = bp->bio_offset;
1000c0b9797aSUlf Lilleengen flags &= ~GV_BIO_MALLOC;
1001c0b9797aSUlf Lilleengen sc = p->vinumconf;
1002c0b9797aSUlf Lilleengen KASSERT(sc != NULL, ("gv_rebuild_complete: NULL sc"));
100373679edcSLukas Ertl
1004c0b9797aSUlf Lilleengen /* Clean up what we allocated. */
1005d8d015cdSUlf Lilleengen if (bp->bio_pflags & GV_BIO_MALLOC)
1006c0b9797aSUlf Lilleengen g_free(bp->bio_data);
1007c0b9797aSUlf Lilleengen g_destroy_bio(bp);
100873679edcSLukas Ertl
1009663e5a33SLukas Ertl if (error) {
1010c0b9797aSUlf Lilleengen g_topology_assert_not();
1011c0b9797aSUlf Lilleengen g_topology_lock();
1012c0b9797aSUlf Lilleengen gv_access(p->vol_sc->provider, -1, -1, 0);
1013c0b9797aSUlf Lilleengen g_topology_unlock();
101473679edcSLukas Ertl
1015c0b9797aSUlf Lilleengen G_VINUM_DEBUG(0, "rebuild of %s failed at offset %jd errno: %d",
1016c0b9797aSUlf Lilleengen p->name, (intmax_t)offset, error);
1017c0b9797aSUlf Lilleengen p->flags &= ~GV_PLEX_REBUILDING;
1018c0b9797aSUlf Lilleengen p->synced = 0;
1019c0b9797aSUlf Lilleengen gv_plex_flush(p); /* Flush out remaining rebuild BIOs. */
1020c0b9797aSUlf Lilleengen return;
1021f11c507cSLukas Ertl }
1022f11c507cSLukas Ertl
1023c0b9797aSUlf Lilleengen offset += (p->stripesize * (gv_sdcount(p, 1) - 1));
1024c0b9797aSUlf Lilleengen if (offset >= p->size) {
1025c0b9797aSUlf Lilleengen /* We're finished. */
1026c0b9797aSUlf Lilleengen g_topology_assert_not();
1027c0b9797aSUlf Lilleengen g_topology_lock();
1028c0b9797aSUlf Lilleengen gv_access(p->vol_sc->provider, -1, -1, 0);
1029c0b9797aSUlf Lilleengen g_topology_unlock();
103073679edcSLukas Ertl
1031c0b9797aSUlf Lilleengen G_VINUM_DEBUG(1, "rebuild of %s finished", p->name);
1032c0b9797aSUlf Lilleengen gv_save_config(p->vinumconf);
1033c0b9797aSUlf Lilleengen p->flags &= ~GV_PLEX_REBUILDING;
1034c0b9797aSUlf Lilleengen p->synced = 0;
1035c0b9797aSUlf Lilleengen /* Try to up all subdisks. */
1036c0b9797aSUlf Lilleengen LIST_FOREACH(s, &p->subdisks, in_plex)
1037c0b9797aSUlf Lilleengen gv_update_sd_state(s);
1038c0b9797aSUlf Lilleengen gv_post_event(sc, GV_EVENT_SAVE_CONFIG, sc, NULL, 0, 0);
1039c0b9797aSUlf Lilleengen gv_plex_flush(p); /* Flush out remaining rebuild BIOs. */
1040c0b9797aSUlf Lilleengen return;
104173679edcSLukas Ertl }
104273679edcSLukas Ertl
1043c0b9797aSUlf Lilleengen /* Send down next. It will determine if we need to itself. */
1044c0b9797aSUlf Lilleengen gv_parity_request(p, flags, offset);
104573679edcSLukas Ertl }
1046