1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/bio.h> 44 #include <sys/sysctl.h> 45 #include <sys/proc.h> 46 #include <sys/kthread.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/errno.h> 50 #include <sys/sbuf.h> 51 #include <geom/geom.h> 52 #include <geom/geom_slice.h> 53 #include <machine/stdarg.h> 54 55 static g_access_t g_slice_access; 56 static g_start_t g_slice_start; 57 58 static struct g_slicer * 59 g_slice_alloc(unsigned nslice, unsigned scsize) 60 { 61 struct g_slicer *gsp; 62 63 gsp = g_malloc(sizeof *gsp, M_WAITOK | M_ZERO); 64 if (scsize > 0) 65 gsp->softc = g_malloc(scsize, M_WAITOK | M_ZERO); 66 else 67 gsp->softc = NULL; 68 gsp->slices = g_malloc(nslice * sizeof(struct g_slice), 69 M_WAITOK | M_ZERO); 70 gsp->nslice = nslice; 71 return (gsp); 72 } 73 74 static void 75 g_slice_free(struct g_slicer *gsp) 76 { 77 78 if (gsp == NULL) /* XXX: phk thinks about this */ 79 return; 80 g_free(gsp->slices); 81 if (gsp->hotspot != NULL) 82 g_free(gsp->hotspot); 83 if (gsp->softc != NULL) 84 g_free(gsp->softc); 85 g_free(gsp); 86 } 87 88 static int 89 g_slice_access(struct g_provider *pp, int dr, int dw, int de) 90 { 91 int error; 92 u_int u; 93 struct g_geom *gp; 94 struct g_consumer *cp; 95 struct g_provider *pp2; 96 struct g_slicer *gsp; 97 struct g_slice *gsl, *gsl2; 98 99 gp = pp->geom; 100 cp = LIST_FIRST(&gp->consumer); 101 KASSERT (cp != NULL, ("g_slice_access but no consumer")); 102 gsp = gp->softc; 103 if (dr > 0 || dw > 0 || de > 0) { 104 gsl = &gsp->slices[pp->index]; 105 for (u = 0; u < gsp->nslice; u++) { 106 gsl2 = &gsp->slices[u]; 107 if (gsl2->length == 0) 108 continue; 109 if (u == pp->index) 110 continue; 111 if (gsl->offset + gsl->length <= gsl2->offset) 112 continue; 113 if (gsl2->offset + gsl2->length <= gsl->offset) 114 continue; 115 /* overlap */ 116 pp2 = gsl2->provider; 117 if ((pp->acw + dw) > 0 && pp2->ace > 0) 118 return (EPERM); 119 if ((pp->ace + de) > 0 && pp2->acw > 0) 120 return (EPERM); 121 } 122 } 123 /* On first open, grab an extra "exclusive" bit */ 124 if (cp->acr == 0 && cp->acw == 0 && cp->ace == 0) 125 de++; 126 /* ... and let go of it on last close */ 127 if ((cp->acr + dr) == 0 && (cp->acw + dw) == 0 && (cp->ace + de) == 1) 128 de--; 129 error = g_access(cp, dr, dw, de); 130 return (error); 131 } 132 133 /* 134 * XXX: It should be possible to specify here if we should finish all of the 135 * XXX: bio, or only the non-hot bits. This would get messy if there were 136 * XXX: two hot spots in the same bio, so for now we simply finish off the 137 * XXX: entire bio. Modifying hot data on the way to disk is frowned on 138 * XXX: so making that considerably harder is not a bad idea anyway. 139 */ 140 void 141 g_slice_finish_hot(struct bio *bp) 142 { 143 struct bio *bp2; 144 struct g_geom *gp; 145 struct g_consumer *cp; 146 struct g_slicer *gsp; 147 struct g_slice *gsl; 148 int idx; 149 150 KASSERT(bp->bio_to != NULL, 151 ("NULL bio_to in g_slice_finish_hot(%p)", bp)); 152 KASSERT(bp->bio_from != NULL, 153 ("NULL bio_from in g_slice_finish_hot(%p)", bp)); 154 gp = bp->bio_to->geom; 155 gsp = gp->softc; 156 cp = LIST_FIRST(&gp->consumer); 157 KASSERT(cp != NULL, ("NULL consumer in g_slice_finish_hot(%p)", bp)); 158 idx = bp->bio_to->index; 159 gsl = &gsp->slices[idx]; 160 161 bp2 = g_clone_bio(bp); 162 if (bp2 == NULL) { 163 g_io_deliver(bp, ENOMEM); 164 return; 165 } 166 if (bp2->bio_offset + bp2->bio_length > gsl->length) 167 bp2->bio_length = gsl->length - bp2->bio_offset; 168 bp2->bio_done = g_std_done; 169 bp2->bio_offset += gsl->offset; 170 g_io_request(bp2, cp); 171 return; 172 } 173 174 static void 175 g_slice_done(struct bio *bp) 176 { 177 178 KASSERT(bp->bio_cmd == BIO_GETATTR && 179 strcmp(bp->bio_attribute, "GEOM::ident") == 0, 180 ("bio_cmd=0x%x bio_attribute=%s", bp->bio_cmd, bp->bio_attribute)); 181 182 if (bp->bio_error == 0 && bp->bio_data[0] != '\0') { 183 char idx[8]; 184 185 /* Add index to the ident received. */ 186 snprintf(idx, sizeof(idx), "s%d", 187 bp->bio_parent->bio_to->index); 188 if (strlcat(bp->bio_data, idx, bp->bio_length) >= 189 bp->bio_length) { 190 bp->bio_error = EFAULT; 191 } 192 } 193 g_std_done(bp); 194 } 195 196 static void 197 g_slice_start(struct bio *bp) 198 { 199 struct bio *bp2; 200 struct g_provider *pp; 201 struct g_geom *gp; 202 struct g_consumer *cp; 203 struct g_slicer *gsp; 204 struct g_slice *gsl; 205 struct g_slice_hot *ghp; 206 int idx, error; 207 u_int m_index; 208 off_t t; 209 210 pp = bp->bio_to; 211 gp = pp->geom; 212 gsp = gp->softc; 213 cp = LIST_FIRST(&gp->consumer); 214 idx = pp->index; 215 gsl = &gsp->slices[idx]; 216 switch(bp->bio_cmd) { 217 case BIO_READ: 218 case BIO_WRITE: 219 case BIO_DELETE: 220 if (bp->bio_offset > gsl->length) { 221 g_io_deliver(bp, EINVAL); /* XXX: EWHAT ? */ 222 return; 223 } 224 /* 225 * Check if we collide with any hot spaces, and call the 226 * method once if so. 227 */ 228 t = bp->bio_offset + gsl->offset; 229 for (m_index = 0; m_index < gsp->nhotspot; m_index++) { 230 ghp = &gsp->hotspot[m_index]; 231 if (t >= ghp->offset + ghp->length) 232 continue; 233 if (t + bp->bio_length <= ghp->offset) 234 continue; 235 switch(bp->bio_cmd) { 236 case BIO_READ: idx = ghp->ract; break; 237 case BIO_WRITE: idx = ghp->wact; break; 238 case BIO_DELETE: idx = ghp->dact; break; 239 } 240 switch(idx) { 241 case G_SLICE_HOT_ALLOW: 242 /* Fall out and continue normal processing */ 243 continue; 244 case G_SLICE_HOT_DENY: 245 g_io_deliver(bp, EROFS); 246 return; 247 case G_SLICE_HOT_START: 248 error = gsp->start(bp); 249 if (error && error != EJUSTRETURN) 250 g_io_deliver(bp, error); 251 return; 252 case G_SLICE_HOT_CALL: 253 error = g_post_event(gsp->hot, bp, M_NOWAIT, 254 gp, NULL); 255 if (error) 256 g_io_deliver(bp, error); 257 return; 258 } 259 break; 260 } 261 bp2 = g_clone_bio(bp); 262 if (bp2 == NULL) { 263 g_io_deliver(bp, ENOMEM); 264 return; 265 } 266 if (bp2->bio_offset + bp2->bio_length > gsl->length) 267 bp2->bio_length = gsl->length - bp2->bio_offset; 268 bp2->bio_done = g_std_done; 269 bp2->bio_offset += gsl->offset; 270 g_io_request(bp2, cp); 271 return; 272 case BIO_GETATTR: 273 /* Give the real method a chance to override */ 274 if (gsp->start != NULL && gsp->start(bp)) 275 return; 276 if (!strcmp("GEOM::ident", bp->bio_attribute)) { 277 bp2 = g_clone_bio(bp); 278 if (bp2 == NULL) { 279 g_io_deliver(bp, ENOMEM); 280 return; 281 } 282 bp2->bio_done = g_slice_done; 283 g_io_request(bp2, cp); 284 return; 285 } 286 if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { 287 struct g_kerneldump *gkd; 288 289 gkd = (struct g_kerneldump *)bp->bio_data; 290 gkd->offset += gsp->slices[idx].offset; 291 if (gkd->length > gsp->slices[idx].length) 292 gkd->length = gsp->slices[idx].length; 293 /* now, pass it on downwards... */ 294 } 295 /* FALLTHROUGH */ 296 case BIO_FLUSH: 297 bp2 = g_clone_bio(bp); 298 if (bp2 == NULL) { 299 g_io_deliver(bp, ENOMEM); 300 return; 301 } 302 bp2->bio_done = g_std_done; 303 g_io_request(bp2, cp); 304 break; 305 default: 306 g_io_deliver(bp, EOPNOTSUPP); 307 return; 308 } 309 } 310 311 void 312 g_slice_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) 313 { 314 struct g_slicer *gsp; 315 316 gsp = gp->softc; 317 if (indent == NULL) { 318 sbuf_printf(sb, " i %u", pp->index); 319 sbuf_printf(sb, " o %ju", 320 (uintmax_t)gsp->slices[pp->index].offset); 321 return; 322 } 323 if (pp != NULL) { 324 sbuf_printf(sb, "%s<index>%u</index>\n", indent, pp->index); 325 sbuf_printf(sb, "%s<length>%ju</length>\n", 326 indent, (uintmax_t)gsp->slices[pp->index].length); 327 sbuf_printf(sb, "%s<seclength>%ju</seclength>\n", indent, 328 (uintmax_t)gsp->slices[pp->index].length / 512); 329 sbuf_printf(sb, "%s<offset>%ju</offset>\n", indent, 330 (uintmax_t)gsp->slices[pp->index].offset); 331 sbuf_printf(sb, "%s<secoffset>%ju</secoffset>\n", indent, 332 (uintmax_t)gsp->slices[pp->index].offset / 512); 333 } 334 } 335 336 int 337 g_slice_config(struct g_geom *gp, u_int idx, int how, off_t offset, off_t length, u_int sectorsize, const char *fmt, ...) 338 { 339 struct g_provider *pp, *pp2; 340 struct g_slicer *gsp; 341 struct g_slice *gsl; 342 va_list ap; 343 struct sbuf *sb; 344 int acc; 345 346 g_trace(G_T_TOPOLOGY, "g_slice_config(%s, %d, %d)", 347 gp->name, idx, how); 348 g_topology_assert(); 349 gsp = gp->softc; 350 if (idx >= gsp->nslice) 351 return(EINVAL); 352 gsl = &gsp->slices[idx]; 353 pp = gsl->provider; 354 if (pp != NULL) 355 acc = pp->acr + pp->acw + pp->ace; 356 else 357 acc = 0; 358 if (acc != 0 && how != G_SLICE_CONFIG_FORCE) { 359 if (length < gsl->length) 360 return(EBUSY); 361 if (offset != gsl->offset) 362 return(EBUSY); 363 } 364 /* XXX: check offset + length <= MEDIASIZE */ 365 if (how == G_SLICE_CONFIG_CHECK) 366 return (0); 367 gsl->length = length; 368 gsl->offset = offset; 369 gsl->sectorsize = sectorsize; 370 if (length == 0) { 371 if (pp == NULL) 372 return (0); 373 if (bootverbose) 374 printf("GEOM: Deconfigure %s\n", pp->name); 375 g_wither_provider(pp, ENXIO); 376 gsl->provider = NULL; 377 gsp->nprovider--; 378 return (0); 379 } 380 if (pp != NULL) { 381 if (bootverbose) 382 printf("GEOM: Reconfigure %s, start %jd length %jd end %jd\n", 383 pp->name, (intmax_t)offset, (intmax_t)length, 384 (intmax_t)(offset + length - 1)); 385 g_resize_provider(pp, gsl->length); 386 return (0); 387 } 388 sb = sbuf_new_auto(); 389 va_start(ap, fmt); 390 sbuf_vprintf(sb, fmt, ap); 391 va_end(ap); 392 sbuf_finish(sb); 393 pp = g_new_providerf(gp, "%s", sbuf_data(sb)); 394 pp2 = LIST_FIRST(&gp->consumer)->provider; 395 pp->stripesize = pp2->stripesize; 396 pp->stripeoffset = pp2->stripeoffset + offset; 397 if (pp->stripesize > 0) 398 pp->stripeoffset %= pp->stripesize; 399 if (gsp->nhotspot == 0) { 400 pp->flags |= pp2->flags & G_PF_ACCEPT_UNMAPPED; 401 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 402 } 403 if (0 && bootverbose) 404 printf("GEOM: Configure %s, start %jd length %jd end %jd\n", 405 pp->name, (intmax_t)offset, (intmax_t)length, 406 (intmax_t)(offset + length - 1)); 407 pp->index = idx; 408 pp->mediasize = gsl->length; 409 pp->sectorsize = gsl->sectorsize; 410 gsl->provider = pp; 411 gsp->nprovider++; 412 g_error_provider(pp, 0); 413 sbuf_delete(sb); 414 return(0); 415 } 416 417 /* 418 * Configure "hotspots". A hotspot is a piece of the parent device which 419 * this particular slicer cares about for some reason. Typically because 420 * it contains meta-data used to configure the slicer. 421 * A hotspot is identified by its index number. The offset and length are 422 * relative to the parent device, and the three "?act" fields specify 423 * what action to take on BIO_READ, BIO_DELETE and BIO_WRITE. 424 * 425 * XXX: There may be a race relative to g_slice_start() here, if an existing 426 * XXX: hotspot is changed wile I/O is happening. Should this become a problem 427 * XXX: we can protect the hotspot stuff with a mutex. 428 */ 429 430 int 431 g_slice_conf_hot(struct g_geom *gp, u_int idx, off_t offset, off_t length, int ract, int dact, int wact) 432 { 433 struct g_slicer *gsp; 434 struct g_slice_hot *gsl, *gsl2; 435 struct g_consumer *cp; 436 struct g_provider *pp; 437 438 g_trace(G_T_TOPOLOGY, "g_slice_conf_hot(%s, idx: %d, off: %jd, len: %jd)", 439 gp->name, idx, (intmax_t)offset, (intmax_t)length); 440 g_topology_assert(); 441 gsp = gp->softc; 442 /* Deny unmapped I/O and direct dispatch if hotspots are used. */ 443 if (gsp->nhotspot == 0) { 444 LIST_FOREACH(pp, &gp->provider, provider) 445 pp->flags &= ~(G_PF_ACCEPT_UNMAPPED | 446 G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE); 447 LIST_FOREACH(cp, &gp->consumer, consumer) 448 cp->flags &= ~(G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE); 449 } 450 gsl = gsp->hotspot; 451 if(idx >= gsp->nhotspot) { 452 gsl2 = g_malloc((idx + 1) * sizeof *gsl2, M_WAITOK | M_ZERO); 453 if (gsp->hotspot != NULL) 454 bcopy(gsp->hotspot, gsl2, gsp->nhotspot * sizeof *gsl2); 455 gsp->hotspot = gsl2; 456 if (gsp->hotspot != NULL) 457 g_free(gsl); 458 gsl = gsl2; 459 gsp->nhotspot = idx + 1; 460 } 461 gsl[idx].offset = offset; 462 gsl[idx].length = length; 463 KASSERT(!((ract | dact | wact) & G_SLICE_HOT_START) 464 || gsp->start != NULL, ("G_SLICE_HOT_START but no slice->start")); 465 /* XXX: check that we _have_ a start function if HOT_START specified */ 466 gsl[idx].ract = ract; 467 gsl[idx].dact = dact; 468 gsl[idx].wact = wact; 469 return (0); 470 } 471 472 void 473 g_slice_spoiled(struct g_consumer *cp) 474 { 475 struct g_geom *gp; 476 struct g_slicer *gsp; 477 478 g_topology_assert(); 479 gp = cp->geom; 480 g_trace(G_T_TOPOLOGY, "g_slice_spoiled(%p/%s)", cp, gp->name); 481 cp->flags |= G_CF_ORPHAN; 482 gsp = gp->softc; 483 gp->softc = NULL; 484 g_slice_free(gsp); 485 g_wither_geom(gp, ENXIO); 486 } 487 488 int 489 g_slice_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) 490 { 491 492 g_slice_spoiled(LIST_FIRST(&gp->consumer)); 493 return (0); 494 } 495 496 struct g_geom * 497 g_slice_new(struct g_class *mp, u_int slices, struct g_provider *pp, struct g_consumer **cpp, void *extrap, int extra, g_slice_start_t *start) 498 { 499 struct g_geom *gp; 500 struct g_slicer *gsp; 501 struct g_consumer *cp; 502 void **vp; 503 int error; 504 505 g_topology_assert(); 506 vp = (void **)extrap; 507 gp = g_new_geomf(mp, "%s", pp->name); 508 gsp = g_slice_alloc(slices, extra); 509 gsp->start = start; 510 gp->access = g_slice_access; 511 gp->orphan = g_slice_orphan; 512 gp->softc = gsp; 513 gp->start = g_slice_start; 514 gp->spoiled = g_slice_spoiled; 515 if (gp->dumpconf == NULL) 516 gp->dumpconf = g_slice_dumpconf; 517 if (gp->class->destroy_geom == NULL) 518 gp->class->destroy_geom = g_slice_destroy_geom; 519 cp = g_new_consumer(gp); 520 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 521 error = g_attach(cp, pp); 522 if (error == 0) 523 error = g_access(cp, 1, 0, 0); 524 if (error) { 525 g_wither_geom(gp, ENXIO); 526 return (NULL); 527 } 528 if (extrap != NULL) 529 *vp = gsp->softc; 530 *cpp = cp; 531 return (gp); 532 } 533 534 void 535 g_slice_orphan(struct g_consumer *cp) 536 { 537 538 g_trace(G_T_TOPOLOGY, "g_slice_orphan(%p/%s)", cp, cp->provider->name); 539 g_topology_assert(); 540 541 /* XXX: Not good enough we leak the softc and its suballocations */ 542 g_slice_free(cp->geom->softc); 543 g_wither_geom(cp->geom, ENXIO); 544 } 545