1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004, 2007 Lukas Ertl 5 * Copyright (c) 2007, 2009 Ulf Lilleengen 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/libkern.h> 35 #include <sys/malloc.h> 36 37 #include <geom/geom.h> 38 #include <geom/vinum/geom_vinum_var.h> 39 #include <geom/vinum/geom_vinum.h> 40 41 static int gv_sync(struct gv_volume *); 42 static int gv_rebuild_plex(struct gv_plex *); 43 static int gv_init_plex(struct gv_plex *); 44 static int gv_grow_plex(struct gv_plex *); 45 static int gv_sync_plex(struct gv_plex *, struct gv_plex *); 46 static struct gv_plex *gv_find_good_plex(struct gv_volume *); 47 48 void 49 gv_start_obj(struct g_geom *gp, struct gctl_req *req) 50 { 51 struct gv_softc *sc; 52 struct gv_volume *v; 53 struct gv_plex *p; 54 int *argc, *initsize; 55 char *argv, buf[20]; 56 int i, type; 57 58 argc = gctl_get_paraml(req, "argc", sizeof(*argc)); 59 initsize = gctl_get_paraml(req, "initsize", sizeof(*initsize)); 60 61 if (argc == NULL || *argc == 0) { 62 gctl_error(req, "no arguments given"); 63 return; 64 } 65 66 sc = gp->softc; 67 68 for (i = 0; i < *argc; i++) { 69 snprintf(buf, sizeof(buf), "argv%d", i); 70 argv = gctl_get_param(req, buf, NULL); 71 if (argv == NULL) 72 continue; 73 type = gv_object_type(sc, argv); 74 switch (type) { 75 case GV_TYPE_VOL: 76 v = gv_find_vol(sc, argv); 77 if (v != NULL) 78 gv_post_event(sc, GV_EVENT_START_VOLUME, v, 79 NULL, *initsize, 0); 80 break; 81 82 case GV_TYPE_PLEX: 83 p = gv_find_plex(sc, argv); 84 if (p != NULL) 85 gv_post_event(sc, GV_EVENT_START_PLEX, p, NULL, 86 *initsize, 0); 87 break; 88 89 case GV_TYPE_SD: 90 case GV_TYPE_DRIVE: 91 /* XXX Not implemented, but what is the use? */ 92 gctl_error(req, "unable to start '%s' - not yet supported", 93 argv); 94 return; 95 default: 96 gctl_error(req, "unknown object '%s'", argv); 97 return; 98 } 99 } 100 } 101 102 int 103 gv_start_plex(struct gv_plex *p) 104 { 105 struct gv_volume *v; 106 struct gv_plex *up; 107 struct gv_sd *s; 108 int error; 109 110 KASSERT(p != NULL, ("gv_start_plex: NULL p")); 111 112 error = 0; 113 v = p->vol_sc; 114 115 /* RAID5 plexes can either be init, rebuilt or grown. */ 116 if (p->org == GV_PLEX_RAID5) { 117 if (p->state > GV_PLEX_DEGRADED) { 118 LIST_FOREACH(s, &p->subdisks, in_plex) { 119 if (s->flags & GV_SD_GROW) { 120 error = gv_grow_plex(p); 121 return (error); 122 } 123 } 124 } else if (p->state == GV_PLEX_DEGRADED) { 125 error = gv_rebuild_plex(p); 126 } else 127 error = gv_init_plex(p); 128 } else { 129 /* We want to sync from the other plex if we're down. */ 130 if (p->state == GV_PLEX_DOWN && v->plexcount > 1) { 131 up = gv_find_good_plex(v); 132 if (up == NULL) { 133 G_VINUM_DEBUG(1, "unable to find a good plex"); 134 return (ENXIO); 135 } 136 g_topology_lock(); 137 error = gv_access(v->provider, 1, 1, 0); 138 if (error) { 139 g_topology_unlock(); 140 G_VINUM_DEBUG(0, "sync from '%s' failed to " 141 "access volume: %d", up->name, error); 142 return (error); 143 } 144 g_topology_unlock(); 145 error = gv_sync_plex(p, up); 146 if (error) 147 return (error); 148 /* 149 * In case we have a stripe that is up, check whether it can be 150 * grown. 151 */ 152 } else if (p->org == GV_PLEX_STRIPED && 153 p->state != GV_PLEX_DOWN) { 154 LIST_FOREACH(s, &p->subdisks, in_plex) { 155 if (s->flags & GV_SD_GROW) { 156 error = gv_grow_plex(p); 157 break; 158 } 159 } 160 } 161 } 162 return (error); 163 } 164 165 int 166 gv_start_vol(struct gv_volume *v) 167 { 168 struct gv_plex *p; 169 int error; 170 171 KASSERT(v != NULL, ("gv_start_vol: NULL v")); 172 173 error = 0; 174 175 if (v->plexcount == 0) 176 return (ENXIO); 177 178 else if (v->plexcount == 1) { 179 p = LIST_FIRST(&v->plexes); 180 KASSERT(p != NULL, ("gv_start_vol: NULL p on %s", v->name)); 181 error = gv_start_plex(p); 182 } else 183 error = gv_sync(v); 184 185 return (error); 186 } 187 188 /* Sync a plex p from the plex up. */ 189 static int 190 gv_sync_plex(struct gv_plex *p, struct gv_plex *up) 191 { 192 int error; 193 194 KASSERT(p != NULL, ("%s: NULL p", __func__)); 195 KASSERT(up != NULL, ("%s: NULL up", __func__)); 196 if ((p == up) || (p->state == GV_PLEX_UP)) 197 return (0); 198 if (p->flags & GV_PLEX_SYNCING || 199 p->flags & GV_PLEX_REBUILDING || 200 p->flags & GV_PLEX_GROWING) { 201 return (EINPROGRESS); 202 } 203 p->synced = 0; 204 p->flags |= GV_PLEX_SYNCING; 205 G_VINUM_DEBUG(1, "starting sync of plex %s", p->name); 206 error = gv_sync_request(up, p, p->synced, 207 MIN(GV_DFLT_SYNCSIZE, up->size - p->synced), 208 BIO_READ, NULL); 209 if (error) { 210 G_VINUM_DEBUG(0, "error syncing plex %s", p->name); 211 return (error); 212 } 213 return (0); 214 } 215 216 /* Return a good plex from volume v. */ 217 static struct gv_plex * 218 gv_find_good_plex(struct gv_volume *v) 219 { 220 struct gv_plex *up; 221 222 /* Find the plex that's up. */ 223 up = NULL; 224 LIST_FOREACH(up, &v->plexes, in_volume) { 225 if (up->state == GV_PLEX_UP) 226 break; 227 } 228 /* Didn't find a good plex. */ 229 return (up); 230 } 231 232 static int 233 gv_sync(struct gv_volume *v) 234 { 235 struct gv_softc *sc; 236 struct gv_plex *p, *up; 237 int error; 238 239 KASSERT(v != NULL, ("gv_sync: NULL v")); 240 sc = v->vinumconf; 241 KASSERT(sc != NULL, ("gv_sync: NULL sc on %s", v->name)); 242 243 244 up = gv_find_good_plex(v); 245 if (up == NULL) 246 return (ENXIO); 247 g_topology_lock(); 248 error = gv_access(v->provider, 1, 1, 0); 249 if (error) { 250 g_topology_unlock(); 251 G_VINUM_DEBUG(0, "sync from '%s' failed to access volume: %d", 252 up->name, error); 253 return (error); 254 } 255 g_topology_unlock(); 256 257 /* Go through the good plex, and issue BIO's to all other plexes. */ 258 LIST_FOREACH(p, &v->plexes, in_volume) { 259 error = gv_sync_plex(p, up); 260 if (error) 261 break; 262 } 263 return (0); 264 } 265 266 static int 267 gv_rebuild_plex(struct gv_plex *p) 268 { 269 struct gv_drive *d; 270 struct gv_sd *s; 271 int error; 272 273 if (p->flags & GV_PLEX_SYNCING || 274 p->flags & GV_PLEX_REBUILDING || 275 p->flags & GV_PLEX_GROWING) 276 return (EINPROGRESS); 277 /* 278 * Make sure that all subdisks have consumers. We won't allow a rebuild 279 * unless every subdisk have one. 280 */ 281 LIST_FOREACH(s, &p->subdisks, in_plex) { 282 d = s->drive_sc; 283 if (d == NULL || (d->flags & GV_DRIVE_REFERENCED)) { 284 G_VINUM_DEBUG(0, "unable to rebuild %s, subdisk(s) have" 285 " no drives", p->name); 286 return (ENXIO); 287 } 288 } 289 p->flags |= GV_PLEX_REBUILDING; 290 p->synced = 0; 291 292 g_topology_assert_not(); 293 g_topology_lock(); 294 error = gv_access(p->vol_sc->provider, 1, 1, 0); 295 if (error) { 296 G_VINUM_DEBUG(0, "unable to access provider"); 297 return (0); 298 } 299 g_topology_unlock(); 300 301 gv_parity_request(p, GV_BIO_REBUILD, 0); 302 return (0); 303 } 304 305 static int 306 gv_grow_plex(struct gv_plex *p) 307 { 308 struct gv_volume *v; 309 struct gv_sd *s; 310 off_t origsize, origlength; 311 int error, sdcount; 312 313 KASSERT(p != NULL, ("gv_grow_plex: NULL p")); 314 v = p->vol_sc; 315 KASSERT(v != NULL, ("gv_grow_plex: NULL v")); 316 317 if (p->flags & GV_PLEX_GROWING || 318 p->flags & GV_PLEX_SYNCING || 319 p->flags & GV_PLEX_REBUILDING) 320 return (EINPROGRESS); 321 g_topology_lock(); 322 error = gv_access(v->provider, 1, 1, 0); 323 g_topology_unlock(); 324 if (error) { 325 G_VINUM_DEBUG(0, "unable to access provider"); 326 return (error); 327 } 328 329 /* XXX: This routine with finding origsize is used two other places as 330 * well, so we should create a function for it. */ 331 sdcount = p->sdcount; 332 LIST_FOREACH(s, &p->subdisks, in_plex) { 333 if (s->flags & GV_SD_GROW) 334 sdcount--; 335 } 336 s = LIST_FIRST(&p->subdisks); 337 if (s == NULL) { 338 G_VINUM_DEBUG(0, "error growing plex without subdisks"); 339 return (GV_ERR_NOTFOUND); 340 } 341 p->flags |= GV_PLEX_GROWING; 342 origsize = (sdcount - 1) * s->size; 343 origlength = (sdcount - 1) * p->stripesize; 344 p->synced = 0; 345 G_VINUM_DEBUG(1, "starting growing of plex %s", p->name); 346 gv_grow_request(p, 0, MIN(origlength, origsize), BIO_READ, NULL); 347 348 return (0); 349 } 350 351 static int 352 gv_init_plex(struct gv_plex *p) 353 { 354 struct gv_drive *d; 355 struct gv_sd *s; 356 int error; 357 off_t start; 358 caddr_t data; 359 360 KASSERT(p != NULL, ("gv_init_plex: NULL p")); 361 362 LIST_FOREACH(s, &p->subdisks, in_plex) { 363 if (s->state == GV_SD_INITIALIZING) 364 return (EINPROGRESS); 365 gv_set_sd_state(s, GV_SD_INITIALIZING, GV_SETSTATE_FORCE); 366 s->init_size = GV_DFLT_SYNCSIZE; 367 start = s->drive_offset + s->initialized; 368 d = s->drive_sc; 369 if (d == NULL) { 370 G_VINUM_DEBUG(0, "subdisk %s has no drive yet", s->name); 371 break; 372 } 373 /* 374 * Take the lock here since we need to avoid a race in 375 * gv_init_request if the BIO is completed before the lock is 376 * released. 377 */ 378 g_topology_lock(); 379 error = g_access(d->consumer, 0, 1, 0); 380 g_topology_unlock(); 381 if (error) { 382 G_VINUM_DEBUG(0, "error accessing consumer when " 383 "initializing %s", s->name); 384 break; 385 } 386 data = g_malloc(s->init_size, M_WAITOK | M_ZERO); 387 gv_init_request(s, start, data, s->init_size); 388 } 389 return (0); 390 } 391