1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_geom.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/sysctl.h> 45 #include <sys/bio.h> 46 #include <sys/bus.h> 47 #include <sys/ctype.h> 48 #include <sys/fcntl.h> 49 #include <sys/malloc.h> 50 #include <sys/sbuf.h> 51 #include <sys/devicestat.h> 52 #include <machine/md_var.h> 53 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <geom/geom.h> 57 #include <geom/geom_disk.h> 58 #include <geom/geom_int.h> 59 60 #include <dev/led/led.h> 61 62 #include <machine/bus.h> 63 64 struct g_disk_softc { 65 struct mtx done_mtx; 66 struct disk *dp; 67 struct sysctl_ctx_list sysctl_ctx; 68 struct sysctl_oid *sysctl_tree; 69 char led[64]; 70 uint32_t state; 71 struct mtx start_mtx; 72 }; 73 74 static g_access_t g_disk_access; 75 static g_start_t g_disk_start; 76 static g_ioctl_t g_disk_ioctl; 77 static g_dumpconf_t g_disk_dumpconf; 78 static g_provgone_t g_disk_providergone; 79 80 static struct g_class g_disk_class = { 81 .name = G_DISK_CLASS_NAME, 82 .version = G_VERSION, 83 .start = g_disk_start, 84 .access = g_disk_access, 85 .ioctl = g_disk_ioctl, 86 .providergone = g_disk_providergone, 87 .dumpconf = g_disk_dumpconf, 88 }; 89 90 SYSCTL_DECL(_kern_geom); 91 static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0, 92 "GEOM_DISK stuff"); 93 94 DECLARE_GEOM_CLASS(g_disk_class, g_disk); 95 96 static int 97 g_disk_access(struct g_provider *pp, int r, int w, int e) 98 { 99 struct disk *dp; 100 struct g_disk_softc *sc; 101 int error; 102 103 g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)", 104 pp->name, r, w, e); 105 g_topology_assert(); 106 sc = pp->private; 107 if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) { 108 /* 109 * Allow decreasing access count even if disk is not 110 * available anymore. 111 */ 112 if (r <= 0 && w <= 0 && e <= 0) 113 return (0); 114 return (ENXIO); 115 } 116 r += pp->acr; 117 w += pp->acw; 118 e += pp->ace; 119 error = 0; 120 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 121 if (dp->d_open != NULL) { 122 error = dp->d_open(dp); 123 if (bootverbose && error != 0) 124 printf("Opened disk %s -> %d\n", 125 pp->name, error); 126 if (error != 0) 127 return (error); 128 } 129 pp->mediasize = dp->d_mediasize; 130 pp->sectorsize = dp->d_sectorsize; 131 if (dp->d_maxsize == 0) { 132 printf("WARNING: Disk drive %s%d has no d_maxsize\n", 133 dp->d_name, dp->d_unit); 134 dp->d_maxsize = DFLTPHYS; 135 } 136 if (dp->d_delmaxsize == 0) { 137 if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) { 138 printf("WARNING: Disk drive %s%d has no " 139 "d_delmaxsize\n", dp->d_name, dp->d_unit); 140 } 141 dp->d_delmaxsize = dp->d_maxsize; 142 } 143 pp->stripeoffset = dp->d_stripeoffset; 144 pp->stripesize = dp->d_stripesize; 145 dp->d_flags |= DISKFLAG_OPEN; 146 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 147 if (dp->d_close != NULL) { 148 error = dp->d_close(dp); 149 if (error != 0) 150 printf("Closed disk %s -> %d\n", 151 pp->name, error); 152 } 153 sc->state = G_STATE_ACTIVE; 154 if (sc->led[0] != 0) 155 led_set(sc->led, "0"); 156 dp->d_flags &= ~DISKFLAG_OPEN; 157 } 158 return (error); 159 } 160 161 static void 162 g_disk_kerneldump(struct bio *bp, struct disk *dp) 163 { 164 struct g_kerneldump *gkd; 165 struct g_geom *gp; 166 167 gkd = (struct g_kerneldump*)bp->bio_data; 168 gp = bp->bio_to->geom; 169 g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)", 170 gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length); 171 if (dp->d_dump == NULL) { 172 g_io_deliver(bp, ENODEV); 173 return; 174 } 175 gkd->di.dumper = dp->d_dump; 176 gkd->di.priv = dp; 177 gkd->di.blocksize = dp->d_sectorsize; 178 gkd->di.maxiosize = dp->d_maxsize; 179 gkd->di.mediaoffset = gkd->offset; 180 if ((gkd->offset + gkd->length) > dp->d_mediasize) 181 gkd->length = dp->d_mediasize - gkd->offset; 182 gkd->di.mediasize = gkd->length; 183 g_io_deliver(bp, 0); 184 } 185 186 static void 187 g_disk_setstate(struct bio *bp, struct g_disk_softc *sc) 188 { 189 const char *cmd; 190 191 memcpy(&sc->state, bp->bio_data, sizeof(sc->state)); 192 if (sc->led[0] != 0) { 193 switch (sc->state) { 194 case G_STATE_FAILED: 195 cmd = "1"; 196 break; 197 case G_STATE_REBUILD: 198 cmd = "f5"; 199 break; 200 case G_STATE_RESYNC: 201 cmd = "f1"; 202 break; 203 default: 204 cmd = "0"; 205 break; 206 } 207 led_set(sc->led, cmd); 208 } 209 g_io_deliver(bp, 0); 210 } 211 212 static void 213 g_disk_done(struct bio *bp) 214 { 215 struct bintime now; 216 struct bio *bp2; 217 struct g_disk_softc *sc; 218 219 /* See "notes" for why we need a mutex here */ 220 /* XXX: will witness accept a mix of Giant/unGiant drivers here ? */ 221 bp2 = bp->bio_parent; 222 sc = bp2->bio_to->private; 223 bp->bio_completed = bp->bio_length - bp->bio_resid; 224 binuptime(&now); 225 mtx_lock(&sc->done_mtx); 226 if (bp2->bio_error == 0) 227 bp2->bio_error = bp->bio_error; 228 bp2->bio_completed += bp->bio_completed; 229 230 switch (bp->bio_cmd) { 231 case BIO_ZONE: 232 bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone)); 233 /*FALLTHROUGH*/ 234 case BIO_READ: 235 case BIO_WRITE: 236 case BIO_DELETE: 237 case BIO_FLUSH: 238 devstat_end_transaction_bio_bt(sc->dp->d_devstat, bp, &now); 239 break; 240 default: 241 break; 242 } 243 bp2->bio_inbed++; 244 if (bp2->bio_children == bp2->bio_inbed) { 245 mtx_unlock(&sc->done_mtx); 246 bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed; 247 g_io_deliver(bp2, bp2->bio_error); 248 } else 249 mtx_unlock(&sc->done_mtx); 250 g_destroy_bio(bp); 251 } 252 253 static int 254 g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td) 255 { 256 struct disk *dp; 257 struct g_disk_softc *sc; 258 int error; 259 260 sc = pp->private; 261 dp = sc->dp; 262 263 if (dp->d_ioctl == NULL) 264 return (ENOIOCTL); 265 error = dp->d_ioctl(dp, cmd, data, fflag, td); 266 return (error); 267 } 268 269 static off_t 270 g_disk_maxsize(struct disk *dp, struct bio *bp) 271 { 272 if (bp->bio_cmd == BIO_DELETE) 273 return (dp->d_delmaxsize); 274 return (dp->d_maxsize); 275 } 276 277 static int 278 g_disk_maxsegs(struct disk *dp, struct bio *bp) 279 { 280 return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1); 281 } 282 283 static void 284 g_disk_advance(struct disk *dp, struct bio *bp, off_t off) 285 { 286 287 bp->bio_offset += off; 288 bp->bio_length -= off; 289 290 if ((bp->bio_flags & BIO_VLIST) != 0) { 291 bus_dma_segment_t *seg, *end; 292 293 seg = (bus_dma_segment_t *)bp->bio_data; 294 end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; 295 off += bp->bio_ma_offset; 296 while (off >= seg->ds_len) { 297 KASSERT((seg != end), 298 ("vlist request runs off the end")); 299 off -= seg->ds_len; 300 seg++; 301 } 302 bp->bio_ma_offset = off; 303 bp->bio_ma_n = end - seg; 304 bp->bio_data = (void *)seg; 305 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 306 bp->bio_ma += off / PAGE_SIZE; 307 bp->bio_ma_offset += off; 308 bp->bio_ma_offset %= PAGE_SIZE; 309 bp->bio_ma_n -= off / PAGE_SIZE; 310 } else { 311 bp->bio_data += off; 312 } 313 } 314 315 static void 316 g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset, 317 off_t *plength, int *ppages) 318 { 319 uintptr_t seg_page_base; 320 uintptr_t seg_page_end; 321 off_t offset; 322 off_t length; 323 int seg_pages; 324 325 offset = *poffset; 326 length = *plength; 327 328 if (length > seg->ds_len - offset) 329 length = seg->ds_len - offset; 330 331 seg_page_base = trunc_page(seg->ds_addr + offset); 332 seg_page_end = round_page(seg->ds_addr + offset + length); 333 seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT; 334 335 if (seg_pages > *ppages) { 336 seg_pages = *ppages; 337 length = (seg_page_base + (seg_pages << PAGE_SHIFT)) - 338 (seg->ds_addr + offset); 339 } 340 341 *poffset = 0; 342 *plength -= length; 343 *ppages -= seg_pages; 344 } 345 346 static off_t 347 g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg) 348 { 349 bus_dma_segment_t *seg, *end; 350 off_t residual; 351 off_t offset; 352 int pages; 353 354 seg = (bus_dma_segment_t *)bp->bio_data; 355 end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; 356 residual = bp->bio_length; 357 offset = bp->bio_ma_offset; 358 pages = g_disk_maxsegs(dp, bp); 359 while (residual != 0 && pages != 0) { 360 KASSERT((seg != end), 361 ("vlist limit runs off the end")); 362 g_disk_seg_limit(seg, &offset, &residual, &pages); 363 seg++; 364 } 365 if (pendseg != NULL) 366 *pendseg = seg; 367 return (residual); 368 } 369 370 static bool 371 g_disk_limit(struct disk *dp, struct bio *bp) 372 { 373 bool limited = false; 374 off_t maxsz; 375 376 maxsz = g_disk_maxsize(dp, bp); 377 378 /* 379 * XXX: If we have a stripesize we should really use it here. 380 * Care should be taken in the delete case if this is done 381 * as deletes can be very sensitive to size given how they 382 * are processed. 383 */ 384 if (bp->bio_length > maxsz) { 385 bp->bio_length = maxsz; 386 limited = true; 387 } 388 389 if ((bp->bio_flags & BIO_VLIST) != 0) { 390 bus_dma_segment_t *firstseg, *endseg; 391 off_t residual; 392 393 firstseg = (bus_dma_segment_t*)bp->bio_data; 394 residual = g_disk_vlist_limit(dp, bp, &endseg); 395 if (residual != 0) { 396 bp->bio_ma_n = endseg - firstseg; 397 bp->bio_length -= residual; 398 limited = true; 399 } 400 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 401 bp->bio_ma_n = 402 howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE); 403 } 404 405 return (limited); 406 } 407 408 static void 409 g_disk_start(struct bio *bp) 410 { 411 struct bio *bp2, *bp3; 412 struct disk *dp; 413 struct g_disk_softc *sc; 414 int error; 415 off_t off; 416 417 sc = bp->bio_to->private; 418 if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) { 419 g_io_deliver(bp, ENXIO); 420 return; 421 } 422 error = EJUSTRETURN; 423 switch(bp->bio_cmd) { 424 case BIO_DELETE: 425 if (!(dp->d_flags & DISKFLAG_CANDELETE)) { 426 error = EOPNOTSUPP; 427 break; 428 } 429 /* fall-through */ 430 case BIO_READ: 431 case BIO_WRITE: 432 KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 || 433 (bp->bio_flags & BIO_UNMAPPED) == 0, 434 ("unmapped bio not supported by disk %s", dp->d_name)); 435 off = 0; 436 bp3 = NULL; 437 bp2 = g_clone_bio(bp); 438 if (bp2 == NULL) { 439 error = ENOMEM; 440 break; 441 } 442 for (;;) { 443 if (g_disk_limit(dp, bp2)) { 444 off += bp2->bio_length; 445 446 /* 447 * To avoid a race, we need to grab the next bio 448 * before we schedule this one. See "notes". 449 */ 450 bp3 = g_clone_bio(bp); 451 if (bp3 == NULL) 452 bp->bio_error = ENOMEM; 453 } 454 bp2->bio_done = g_disk_done; 455 bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize; 456 bp2->bio_bcount = bp2->bio_length; 457 bp2->bio_disk = dp; 458 mtx_lock(&sc->start_mtx); 459 devstat_start_transaction_bio(dp->d_devstat, bp2); 460 mtx_unlock(&sc->start_mtx); 461 dp->d_strategy(bp2); 462 463 if (bp3 == NULL) 464 break; 465 466 bp2 = bp3; 467 bp3 = NULL; 468 g_disk_advance(dp, bp2, off); 469 } 470 break; 471 case BIO_GETATTR: 472 /* Give the driver a chance to override */ 473 if (dp->d_getattr != NULL) { 474 if (bp->bio_disk == NULL) 475 bp->bio_disk = dp; 476 error = dp->d_getattr(bp); 477 if (error != -1) 478 break; 479 error = EJUSTRETURN; 480 } 481 if (g_handleattr_int(bp, "GEOM::candelete", 482 (dp->d_flags & DISKFLAG_CANDELETE) != 0)) 483 break; 484 else if (g_handleattr_int(bp, "GEOM::fwsectors", 485 dp->d_fwsectors)) 486 break; 487 else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads)) 488 break; 489 else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0)) 490 break; 491 else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident)) 492 break; 493 else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor", 494 dp->d_hba_vendor)) 495 break; 496 else if (g_handleattr_uint16_t(bp, "GEOM::hba_device", 497 dp->d_hba_device)) 498 break; 499 else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor", 500 dp->d_hba_subvendor)) 501 break; 502 else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice", 503 dp->d_hba_subdevice)) 504 break; 505 else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump")) 506 g_disk_kerneldump(bp, dp); 507 else if (!strcmp(bp->bio_attribute, "GEOM::setstate")) 508 g_disk_setstate(bp, sc); 509 else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate", 510 dp->d_rotation_rate)) 511 break; 512 else 513 error = ENOIOCTL; 514 break; 515 case BIO_FLUSH: 516 g_trace(G_T_BIO, "g_disk_flushcache(%s)", 517 bp->bio_to->name); 518 if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) { 519 error = EOPNOTSUPP; 520 break; 521 } 522 /*FALLTHROUGH*/ 523 case BIO_ZONE: 524 if (bp->bio_cmd == BIO_ZONE) { 525 if (!(dp->d_flags & DISKFLAG_CANZONE)) { 526 error = EOPNOTSUPP; 527 break; 528 } 529 g_trace(G_T_BIO, "g_disk_zone(%s)", 530 bp->bio_to->name); 531 } 532 bp2 = g_clone_bio(bp); 533 if (bp2 == NULL) { 534 g_io_deliver(bp, ENOMEM); 535 return; 536 } 537 bp2->bio_done = g_disk_done; 538 bp2->bio_disk = dp; 539 mtx_lock(&sc->start_mtx); 540 devstat_start_transaction_bio(dp->d_devstat, bp2); 541 mtx_unlock(&sc->start_mtx); 542 dp->d_strategy(bp2); 543 break; 544 default: 545 error = EOPNOTSUPP; 546 break; 547 } 548 if (error != EJUSTRETURN) 549 g_io_deliver(bp, error); 550 return; 551 } 552 553 static void 554 g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) 555 { 556 struct bio *bp; 557 struct disk *dp; 558 struct g_disk_softc *sc; 559 char *buf; 560 int res = 0; 561 562 sc = gp->softc; 563 if (sc == NULL || (dp = sc->dp) == NULL) 564 return; 565 if (indent == NULL) { 566 sbuf_printf(sb, " hd %u", dp->d_fwheads); 567 sbuf_printf(sb, " sc %u", dp->d_fwsectors); 568 return; 569 } 570 if (pp != NULL) { 571 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", 572 indent, dp->d_fwheads); 573 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", 574 indent, dp->d_fwsectors); 575 576 /* 577 * "rotationrate" is a little complicated, because the value 578 * returned by the drive might not be the RPM; 0 and 1 are 579 * special cases, and there's also a valid range. 580 */ 581 sbuf_printf(sb, "%s<rotationrate>", indent); 582 if (dp->d_rotation_rate == 0) /* Old drives don't */ 583 sbuf_printf(sb, "unknown"); /* report RPM. */ 584 else if (dp->d_rotation_rate == 1) /* Since 0 is used */ 585 sbuf_printf(sb, "0"); /* above, SSDs use 1. */ 586 else if ((dp->d_rotation_rate >= 0x041) && 587 (dp->d_rotation_rate <= 0xfffe)) 588 sbuf_printf(sb, "%u", dp->d_rotation_rate); 589 else 590 sbuf_printf(sb, "invalid"); 591 sbuf_printf(sb, "</rotationrate>\n"); 592 if (dp->d_getattr != NULL) { 593 buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK); 594 bp = g_alloc_bio(); 595 bp->bio_disk = dp; 596 bp->bio_attribute = "GEOM::ident"; 597 bp->bio_length = DISK_IDENT_SIZE; 598 bp->bio_data = buf; 599 res = dp->d_getattr(bp); 600 sbuf_printf(sb, "%s<ident>", indent); 601 g_conf_printf_escaped(sb, "%s", 602 res == 0 ? buf: dp->d_ident); 603 sbuf_printf(sb, "</ident>\n"); 604 bp->bio_attribute = "GEOM::lunid"; 605 bp->bio_length = DISK_IDENT_SIZE; 606 bp->bio_data = buf; 607 if (dp->d_getattr(bp) == 0) { 608 sbuf_printf(sb, "%s<lunid>", indent); 609 g_conf_printf_escaped(sb, "%s", buf); 610 sbuf_printf(sb, "</lunid>\n"); 611 } 612 bp->bio_attribute = "GEOM::lunname"; 613 bp->bio_length = DISK_IDENT_SIZE; 614 bp->bio_data = buf; 615 if (dp->d_getattr(bp) == 0) { 616 sbuf_printf(sb, "%s<lunname>", indent); 617 g_conf_printf_escaped(sb, "%s", buf); 618 sbuf_printf(sb, "</lunname>\n"); 619 } 620 g_destroy_bio(bp); 621 g_free(buf); 622 } else { 623 sbuf_printf(sb, "%s<ident>", indent); 624 g_conf_printf_escaped(sb, "%s", dp->d_ident); 625 sbuf_printf(sb, "</ident>\n"); 626 } 627 sbuf_printf(sb, "%s<descr>", indent); 628 g_conf_printf_escaped(sb, "%s", dp->d_descr); 629 sbuf_printf(sb, "</descr>\n"); 630 } 631 } 632 633 static void 634 g_disk_resize(void *ptr, int flag) 635 { 636 struct disk *dp; 637 struct g_geom *gp; 638 struct g_provider *pp; 639 640 if (flag == EV_CANCEL) 641 return; 642 g_topology_assert(); 643 644 dp = ptr; 645 gp = dp->d_geom; 646 647 if (dp->d_destroyed || gp == NULL) 648 return; 649 650 LIST_FOREACH(pp, &gp->provider, provider) { 651 if (pp->sectorsize != 0 && 652 pp->sectorsize != dp->d_sectorsize) 653 g_wither_provider(pp, ENXIO); 654 else 655 g_resize_provider(pp, dp->d_mediasize); 656 } 657 } 658 659 static void 660 g_disk_create(void *arg, int flag) 661 { 662 struct g_geom *gp; 663 struct g_provider *pp; 664 struct disk *dp; 665 struct g_disk_softc *sc; 666 char tmpstr[80]; 667 668 if (flag == EV_CANCEL) 669 return; 670 g_topology_assert(); 671 dp = arg; 672 673 mtx_pool_lock(mtxpool_sleep, dp); 674 dp->d_init_level = DISK_INIT_START; 675 676 /* 677 * If the disk has already gone away, we can just stop here and 678 * call the user's callback to tell him we've cleaned things up. 679 */ 680 if (dp->d_goneflag != 0) { 681 mtx_pool_unlock(mtxpool_sleep, dp); 682 if (dp->d_gone != NULL) 683 dp->d_gone(dp); 684 return; 685 } 686 mtx_pool_unlock(mtxpool_sleep, dp); 687 688 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); 689 mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF); 690 mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF); 691 sc->dp = dp; 692 gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit); 693 gp->softc = sc; 694 pp = g_new_providerf(gp, "%s", gp->name); 695 devstat_remove_entry(pp->stat); 696 pp->stat = NULL; 697 dp->d_devstat->id = pp; 698 pp->mediasize = dp->d_mediasize; 699 pp->sectorsize = dp->d_sectorsize; 700 pp->stripeoffset = dp->d_stripeoffset; 701 pp->stripesize = dp->d_stripesize; 702 if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0) 703 pp->flags |= G_PF_ACCEPT_UNMAPPED; 704 if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0) 705 pp->flags |= G_PF_DIRECT_SEND; 706 pp->flags |= G_PF_DIRECT_RECEIVE; 707 if (bootverbose) 708 printf("GEOM: new disk %s\n", gp->name); 709 sysctl_ctx_init(&sc->sysctl_ctx); 710 snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name); 711 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 712 SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name, 713 CTLFLAG_RD, 0, tmpstr); 714 if (sc->sysctl_tree != NULL) { 715 SYSCTL_ADD_STRING(&sc->sysctl_ctx, 716 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led", 717 CTLFLAG_RWTUN, sc->led, sizeof(sc->led), 718 "LED name"); 719 } 720 pp->private = sc; 721 dp->d_geom = gp; 722 g_error_provider(pp, 0); 723 724 mtx_pool_lock(mtxpool_sleep, dp); 725 dp->d_init_level = DISK_INIT_DONE; 726 727 /* 728 * If the disk has gone away at this stage, start the withering 729 * process for it. 730 */ 731 if (dp->d_goneflag != 0) { 732 mtx_pool_unlock(mtxpool_sleep, dp); 733 g_wither_provider(pp, ENXIO); 734 return; 735 } 736 mtx_pool_unlock(mtxpool_sleep, dp); 737 738 } 739 740 /* 741 * We get this callback after all of the consumers have gone away, and just 742 * before the provider is freed. If the disk driver provided a d_gone 743 * callback, let them know that it is okay to free resources -- they won't 744 * be getting any more accesses from GEOM. 745 */ 746 static void 747 g_disk_providergone(struct g_provider *pp) 748 { 749 struct disk *dp; 750 struct g_disk_softc *sc; 751 752 sc = (struct g_disk_softc *)pp->private; 753 dp = sc->dp; 754 if (dp != NULL && dp->d_gone != NULL) 755 dp->d_gone(dp); 756 if (sc->sysctl_tree != NULL) { 757 sysctl_ctx_free(&sc->sysctl_ctx); 758 sc->sysctl_tree = NULL; 759 } 760 if (sc->led[0] != 0) { 761 led_set(sc->led, "0"); 762 sc->led[0] = 0; 763 } 764 pp->private = NULL; 765 pp->geom->softc = NULL; 766 mtx_destroy(&sc->done_mtx); 767 mtx_destroy(&sc->start_mtx); 768 g_free(sc); 769 } 770 771 static void 772 g_disk_destroy(void *ptr, int flag) 773 { 774 struct disk *dp; 775 struct g_geom *gp; 776 struct g_disk_softc *sc; 777 778 g_topology_assert(); 779 dp = ptr; 780 gp = dp->d_geom; 781 if (gp != NULL) { 782 sc = gp->softc; 783 if (sc != NULL) 784 sc->dp = NULL; 785 dp->d_geom = NULL; 786 g_wither_geom(gp, ENXIO); 787 } 788 789 g_free(dp); 790 } 791 792 /* 793 * We only allow printable characters in disk ident, 794 * the rest is converted to 'x<HH>'. 795 */ 796 static void 797 g_disk_ident_adjust(char *ident, size_t size) 798 { 799 char *p, tmp[4], newid[DISK_IDENT_SIZE]; 800 801 newid[0] = '\0'; 802 for (p = ident; *p != '\0'; p++) { 803 if (isprint(*p)) { 804 tmp[0] = *p; 805 tmp[1] = '\0'; 806 } else { 807 snprintf(tmp, sizeof(tmp), "x%02hhx", 808 *(unsigned char *)p); 809 } 810 if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid)) 811 break; 812 } 813 bzero(ident, size); 814 strlcpy(ident, newid, size); 815 } 816 817 struct disk * 818 disk_alloc(void) 819 { 820 821 return (g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO)); 822 } 823 824 void 825 disk_create(struct disk *dp, int version) 826 { 827 828 if (version != DISK_VERSION) { 829 printf("WARNING: Attempt to add disk %s%d %s", 830 dp->d_name, dp->d_unit, 831 " using incompatible ABI version of disk(9)\n"); 832 printf("WARNING: Ignoring disk %s%d\n", 833 dp->d_name, dp->d_unit); 834 return; 835 } 836 if (dp->d_flags & DISKFLAG_RESERVED) { 837 printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n", 838 dp->d_name, dp->d_unit); 839 printf("WARNING: Ignoring disk %s%d\n", 840 dp->d_name, dp->d_unit); 841 return; 842 } 843 KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy")); 844 KASSERT(dp->d_name != NULL, ("disk_create need d_name")); 845 KASSERT(*dp->d_name != 0, ("disk_create need d_name")); 846 KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long")); 847 if (dp->d_devstat == NULL) 848 dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit, 849 dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED, 850 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 851 dp->d_geom = NULL; 852 853 dp->d_init_level = DISK_INIT_NONE; 854 855 g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident)); 856 g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL); 857 } 858 859 void 860 disk_destroy(struct disk *dp) 861 { 862 863 g_cancel_event(dp); 864 dp->d_destroyed = 1; 865 if (dp->d_devstat != NULL) 866 devstat_remove_entry(dp->d_devstat); 867 g_post_event(g_disk_destroy, dp, M_WAITOK, NULL); 868 } 869 870 void 871 disk_gone(struct disk *dp) 872 { 873 struct g_geom *gp; 874 struct g_provider *pp; 875 876 mtx_pool_lock(mtxpool_sleep, dp); 877 dp->d_goneflag = 1; 878 879 /* 880 * If we're still in the process of creating this disk (the 881 * g_disk_create() function is still queued, or is in 882 * progress), the init level will not yet be DISK_INIT_DONE. 883 * 884 * If that is the case, g_disk_create() will see d_goneflag 885 * and take care of cleaning things up. 886 * 887 * If the disk has already been created, we default to 888 * withering the provider as usual below. 889 * 890 * If the caller has not set a d_gone() callback, he will 891 * not be any worse off by returning here, because the geom 892 * has not been fully setup in any case. 893 */ 894 if (dp->d_init_level < DISK_INIT_DONE) { 895 mtx_pool_unlock(mtxpool_sleep, dp); 896 return; 897 } 898 mtx_pool_unlock(mtxpool_sleep, dp); 899 900 gp = dp->d_geom; 901 if (gp != NULL) { 902 pp = LIST_FIRST(&gp->provider); 903 if (pp != NULL) { 904 KASSERT(LIST_NEXT(pp, provider) == NULL, 905 ("geom %p has more than one provider", gp)); 906 g_wither_provider(pp, ENXIO); 907 } 908 } 909 } 910 911 void 912 disk_attr_changed(struct disk *dp, const char *attr, int flag) 913 { 914 struct g_geom *gp; 915 struct g_provider *pp; 916 char devnamebuf[128]; 917 918 gp = dp->d_geom; 919 if (gp != NULL) 920 LIST_FOREACH(pp, &gp->provider, provider) 921 (void)g_attr_changed(pp, attr, flag); 922 snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name, 923 dp->d_unit); 924 devctl_notify("GEOM", "disk", attr, devnamebuf); 925 } 926 927 void 928 disk_media_changed(struct disk *dp, int flag) 929 { 930 struct g_geom *gp; 931 struct g_provider *pp; 932 933 gp = dp->d_geom; 934 if (gp != NULL) { 935 pp = LIST_FIRST(&gp->provider); 936 if (pp != NULL) { 937 KASSERT(LIST_NEXT(pp, provider) == NULL, 938 ("geom %p has more than one provider", gp)); 939 g_media_changed(pp, flag); 940 } 941 } 942 } 943 944 void 945 disk_media_gone(struct disk *dp, int flag) 946 { 947 struct g_geom *gp; 948 struct g_provider *pp; 949 950 gp = dp->d_geom; 951 if (gp != NULL) { 952 pp = LIST_FIRST(&gp->provider); 953 if (pp != NULL) { 954 KASSERT(LIST_NEXT(pp, provider) == NULL, 955 ("geom %p has more than one provider", gp)); 956 g_media_gone(pp, flag); 957 } 958 } 959 } 960 961 int 962 disk_resize(struct disk *dp, int flag) 963 { 964 965 if (dp->d_destroyed || dp->d_geom == NULL) 966 return (0); 967 968 return (g_post_event(g_disk_resize, dp, flag, NULL)); 969 } 970 971 static void 972 g_kern_disks(void *p, int flag __unused) 973 { 974 struct sbuf *sb; 975 struct g_geom *gp; 976 char *sp; 977 978 sb = p; 979 sp = ""; 980 g_topology_assert(); 981 LIST_FOREACH(gp, &g_disk_class.geom, geom) { 982 sbuf_printf(sb, "%s%s", sp, gp->name); 983 sp = " "; 984 } 985 sbuf_finish(sb); 986 } 987 988 static int 989 sysctl_disks(SYSCTL_HANDLER_ARGS) 990 { 991 int error; 992 struct sbuf *sb; 993 994 sb = sbuf_new_auto(); 995 g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL); 996 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 997 sbuf_delete(sb); 998 return error; 999 } 1000 1001 SYSCTL_PROC(_kern, OID_AUTO, disks, 1002 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1003 sysctl_disks, "A", "names of available disks"); 1004