1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_geom.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/sysctl.h> 45 #include <sys/bio.h> 46 #include <sys/bus.h> 47 #include <sys/ctype.h> 48 #include <sys/fcntl.h> 49 #include <sys/malloc.h> 50 #include <sys/sbuf.h> 51 #include <sys/devicestat.h> 52 #include <machine/md_var.h> 53 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <geom/geom.h> 57 #include <geom/geom_disk.h> 58 #include <geom/geom_int.h> 59 60 #include <dev/led/led.h> 61 62 #include <machine/bus.h> 63 64 struct g_disk_softc { 65 struct mtx done_mtx; 66 struct disk *dp; 67 struct sysctl_ctx_list sysctl_ctx; 68 struct sysctl_oid *sysctl_tree; 69 char led[64]; 70 uint32_t state; 71 struct mtx start_mtx; 72 }; 73 74 static g_access_t g_disk_access; 75 static g_start_t g_disk_start; 76 static g_ioctl_t g_disk_ioctl; 77 static g_dumpconf_t g_disk_dumpconf; 78 static g_provgone_t g_disk_providergone; 79 80 static int g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS); 81 82 static struct g_class g_disk_class = { 83 .name = G_DISK_CLASS_NAME, 84 .version = G_VERSION, 85 .start = g_disk_start, 86 .access = g_disk_access, 87 .ioctl = g_disk_ioctl, 88 .providergone = g_disk_providergone, 89 .dumpconf = g_disk_dumpconf, 90 }; 91 92 SYSCTL_DECL(_kern_geom); 93 static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0, 94 "GEOM_DISK stuff"); 95 96 DECLARE_GEOM_CLASS(g_disk_class, g_disk); 97 98 static int 99 g_disk_access(struct g_provider *pp, int r, int w, int e) 100 { 101 struct disk *dp; 102 struct g_disk_softc *sc; 103 int error; 104 105 g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)", 106 pp->name, r, w, e); 107 g_topology_assert(); 108 sc = pp->private; 109 if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) { 110 /* 111 * Allow decreasing access count even if disk is not 112 * available anymore. 113 */ 114 if (r <= 0 && w <= 0 && e <= 0) 115 return (0); 116 return (ENXIO); 117 } 118 r += pp->acr; 119 w += pp->acw; 120 e += pp->ace; 121 error = 0; 122 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 123 if (dp->d_open != NULL) { 124 error = dp->d_open(dp); 125 if (bootverbose && error != 0) 126 printf("Opened disk %s -> %d\n", 127 pp->name, error); 128 if (error != 0) 129 return (error); 130 } 131 pp->sectorsize = dp->d_sectorsize; 132 if (dp->d_maxsize == 0) { 133 printf("WARNING: Disk drive %s%d has no d_maxsize\n", 134 dp->d_name, dp->d_unit); 135 dp->d_maxsize = DFLTPHYS; 136 } 137 if (dp->d_delmaxsize == 0) { 138 if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) { 139 printf("WARNING: Disk drive %s%d has no " 140 "d_delmaxsize\n", dp->d_name, dp->d_unit); 141 } 142 dp->d_delmaxsize = dp->d_maxsize; 143 } 144 pp->stripeoffset = dp->d_stripeoffset; 145 pp->stripesize = dp->d_stripesize; 146 dp->d_flags |= DISKFLAG_OPEN; 147 /* 148 * Do not invoke resize event when initial size was zero. 149 * Some disks report its size only after first opening. 150 */ 151 if (pp->mediasize == 0) 152 pp->mediasize = dp->d_mediasize; 153 else 154 g_resize_provider(pp, dp->d_mediasize); 155 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 156 if (dp->d_close != NULL) { 157 error = dp->d_close(dp); 158 if (error != 0) 159 printf("Closed disk %s -> %d\n", 160 pp->name, error); 161 } 162 sc->state = G_STATE_ACTIVE; 163 if (sc->led[0] != 0) 164 led_set(sc->led, "0"); 165 dp->d_flags &= ~DISKFLAG_OPEN; 166 } 167 return (error); 168 } 169 170 static void 171 g_disk_kerneldump(struct bio *bp, struct disk *dp) 172 { 173 struct g_kerneldump *gkd; 174 struct g_geom *gp; 175 176 gkd = (struct g_kerneldump*)bp->bio_data; 177 gp = bp->bio_to->geom; 178 g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)", 179 gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length); 180 if (dp->d_dump == NULL) { 181 g_io_deliver(bp, ENODEV); 182 return; 183 } 184 gkd->di.dumper = dp->d_dump; 185 gkd->di.priv = dp; 186 gkd->di.blocksize = dp->d_sectorsize; 187 gkd->di.maxiosize = dp->d_maxsize; 188 gkd->di.mediaoffset = gkd->offset; 189 if ((gkd->offset + gkd->length) > dp->d_mediasize) 190 gkd->length = dp->d_mediasize - gkd->offset; 191 gkd->di.mediasize = gkd->length; 192 g_io_deliver(bp, 0); 193 } 194 195 static void 196 g_disk_setstate(struct bio *bp, struct g_disk_softc *sc) 197 { 198 const char *cmd; 199 200 memcpy(&sc->state, bp->bio_data, sizeof(sc->state)); 201 if (sc->led[0] != 0) { 202 switch (sc->state) { 203 case G_STATE_FAILED: 204 cmd = "1"; 205 break; 206 case G_STATE_REBUILD: 207 cmd = "f5"; 208 break; 209 case G_STATE_RESYNC: 210 cmd = "f1"; 211 break; 212 default: 213 cmd = "0"; 214 break; 215 } 216 led_set(sc->led, cmd); 217 } 218 g_io_deliver(bp, 0); 219 } 220 221 static void 222 g_disk_done(struct bio *bp) 223 { 224 struct bintime now; 225 struct bio *bp2; 226 struct g_disk_softc *sc; 227 228 /* See "notes" for why we need a mutex here */ 229 /* XXX: will witness accept a mix of Giant/unGiant drivers here ? */ 230 bp2 = bp->bio_parent; 231 sc = bp2->bio_to->private; 232 bp->bio_completed = bp->bio_length - bp->bio_resid; 233 binuptime(&now); 234 mtx_lock(&sc->done_mtx); 235 if (bp2->bio_error == 0) 236 bp2->bio_error = bp->bio_error; 237 bp2->bio_completed += bp->bio_completed; 238 239 switch (bp->bio_cmd) { 240 case BIO_ZONE: 241 bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone)); 242 /*FALLTHROUGH*/ 243 case BIO_READ: 244 case BIO_WRITE: 245 case BIO_DELETE: 246 case BIO_FLUSH: 247 devstat_end_transaction_bio_bt(sc->dp->d_devstat, bp, &now); 248 break; 249 default: 250 break; 251 } 252 bp2->bio_inbed++; 253 if (bp2->bio_children == bp2->bio_inbed) { 254 mtx_unlock(&sc->done_mtx); 255 bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed; 256 g_io_deliver(bp2, bp2->bio_error); 257 } else 258 mtx_unlock(&sc->done_mtx); 259 g_destroy_bio(bp); 260 } 261 262 static int 263 g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td) 264 { 265 struct disk *dp; 266 struct g_disk_softc *sc; 267 int error; 268 269 sc = pp->private; 270 dp = sc->dp; 271 272 if (dp->d_ioctl == NULL) 273 return (ENOIOCTL); 274 error = dp->d_ioctl(dp, cmd, data, fflag, td); 275 return (error); 276 } 277 278 static off_t 279 g_disk_maxsize(struct disk *dp, struct bio *bp) 280 { 281 if (bp->bio_cmd == BIO_DELETE) 282 return (dp->d_delmaxsize); 283 return (dp->d_maxsize); 284 } 285 286 static int 287 g_disk_maxsegs(struct disk *dp, struct bio *bp) 288 { 289 return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1); 290 } 291 292 static void 293 g_disk_advance(struct disk *dp, struct bio *bp, off_t off) 294 { 295 296 bp->bio_offset += off; 297 bp->bio_length -= off; 298 299 if ((bp->bio_flags & BIO_VLIST) != 0) { 300 bus_dma_segment_t *seg, *end; 301 302 seg = (bus_dma_segment_t *)bp->bio_data; 303 end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; 304 off += bp->bio_ma_offset; 305 while (off >= seg->ds_len) { 306 KASSERT((seg != end), 307 ("vlist request runs off the end")); 308 off -= seg->ds_len; 309 seg++; 310 } 311 bp->bio_ma_offset = off; 312 bp->bio_ma_n = end - seg; 313 bp->bio_data = (void *)seg; 314 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 315 bp->bio_ma += off / PAGE_SIZE; 316 bp->bio_ma_offset += off; 317 bp->bio_ma_offset %= PAGE_SIZE; 318 bp->bio_ma_n -= off / PAGE_SIZE; 319 } else { 320 bp->bio_data += off; 321 } 322 } 323 324 static void 325 g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset, 326 off_t *plength, int *ppages) 327 { 328 uintptr_t seg_page_base; 329 uintptr_t seg_page_end; 330 off_t offset; 331 off_t length; 332 int seg_pages; 333 334 offset = *poffset; 335 length = *plength; 336 337 if (length > seg->ds_len - offset) 338 length = seg->ds_len - offset; 339 340 seg_page_base = trunc_page(seg->ds_addr + offset); 341 seg_page_end = round_page(seg->ds_addr + offset + length); 342 seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT; 343 344 if (seg_pages > *ppages) { 345 seg_pages = *ppages; 346 length = (seg_page_base + (seg_pages << PAGE_SHIFT)) - 347 (seg->ds_addr + offset); 348 } 349 350 *poffset = 0; 351 *plength -= length; 352 *ppages -= seg_pages; 353 } 354 355 static off_t 356 g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg) 357 { 358 bus_dma_segment_t *seg, *end; 359 off_t residual; 360 off_t offset; 361 int pages; 362 363 seg = (bus_dma_segment_t *)bp->bio_data; 364 end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; 365 residual = bp->bio_length; 366 offset = bp->bio_ma_offset; 367 pages = g_disk_maxsegs(dp, bp); 368 while (residual != 0 && pages != 0) { 369 KASSERT((seg != end), 370 ("vlist limit runs off the end")); 371 g_disk_seg_limit(seg, &offset, &residual, &pages); 372 seg++; 373 } 374 if (pendseg != NULL) 375 *pendseg = seg; 376 return (residual); 377 } 378 379 static bool 380 g_disk_limit(struct disk *dp, struct bio *bp) 381 { 382 bool limited = false; 383 off_t maxsz; 384 385 maxsz = g_disk_maxsize(dp, bp); 386 387 /* 388 * XXX: If we have a stripesize we should really use it here. 389 * Care should be taken in the delete case if this is done 390 * as deletes can be very sensitive to size given how they 391 * are processed. 392 */ 393 if (bp->bio_length > maxsz) { 394 bp->bio_length = maxsz; 395 limited = true; 396 } 397 398 if ((bp->bio_flags & BIO_VLIST) != 0) { 399 bus_dma_segment_t *firstseg, *endseg; 400 off_t residual; 401 402 firstseg = (bus_dma_segment_t*)bp->bio_data; 403 residual = g_disk_vlist_limit(dp, bp, &endseg); 404 if (residual != 0) { 405 bp->bio_ma_n = endseg - firstseg; 406 bp->bio_length -= residual; 407 limited = true; 408 } 409 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 410 bp->bio_ma_n = 411 howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE); 412 } 413 414 return (limited); 415 } 416 417 static void 418 g_disk_start(struct bio *bp) 419 { 420 struct bio *bp2, *bp3; 421 struct disk *dp; 422 struct g_disk_softc *sc; 423 int error; 424 off_t off; 425 426 biotrack(bp, __func__); 427 428 sc = bp->bio_to->private; 429 if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) { 430 g_io_deliver(bp, ENXIO); 431 return; 432 } 433 error = EJUSTRETURN; 434 switch(bp->bio_cmd) { 435 case BIO_DELETE: 436 if (!(dp->d_flags & DISKFLAG_CANDELETE)) { 437 error = EOPNOTSUPP; 438 break; 439 } 440 /* fall-through */ 441 case BIO_READ: 442 case BIO_WRITE: 443 KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 || 444 (bp->bio_flags & BIO_UNMAPPED) == 0, 445 ("unmapped bio not supported by disk %s", dp->d_name)); 446 off = 0; 447 bp3 = NULL; 448 bp2 = g_clone_bio(bp); 449 if (bp2 == NULL) { 450 error = ENOMEM; 451 break; 452 } 453 for (;;) { 454 if (g_disk_limit(dp, bp2)) { 455 off += bp2->bio_length; 456 457 /* 458 * To avoid a race, we need to grab the next bio 459 * before we schedule this one. See "notes". 460 */ 461 bp3 = g_clone_bio(bp); 462 if (bp3 == NULL) 463 bp->bio_error = ENOMEM; 464 } 465 bp2->bio_done = g_disk_done; 466 bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize; 467 bp2->bio_bcount = bp2->bio_length; 468 bp2->bio_disk = dp; 469 mtx_lock(&sc->start_mtx); 470 devstat_start_transaction_bio(dp->d_devstat, bp2); 471 mtx_unlock(&sc->start_mtx); 472 dp->d_strategy(bp2); 473 474 if (bp3 == NULL) 475 break; 476 477 bp2 = bp3; 478 bp3 = NULL; 479 g_disk_advance(dp, bp2, off); 480 } 481 break; 482 case BIO_GETATTR: 483 /* Give the driver a chance to override */ 484 if (dp->d_getattr != NULL) { 485 if (bp->bio_disk == NULL) 486 bp->bio_disk = dp; 487 error = dp->d_getattr(bp); 488 if (error != -1) 489 break; 490 error = EJUSTRETURN; 491 } 492 if (g_handleattr_int(bp, "GEOM::candelete", 493 (dp->d_flags & DISKFLAG_CANDELETE) != 0)) 494 break; 495 else if (g_handleattr_int(bp, "GEOM::fwsectors", 496 dp->d_fwsectors)) 497 break; 498 else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads)) 499 break; 500 else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0)) 501 break; 502 else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident)) 503 break; 504 else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor", 505 dp->d_hba_vendor)) 506 break; 507 else if (g_handleattr_uint16_t(bp, "GEOM::hba_device", 508 dp->d_hba_device)) 509 break; 510 else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor", 511 dp->d_hba_subvendor)) 512 break; 513 else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice", 514 dp->d_hba_subdevice)) 515 break; 516 else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump")) 517 g_disk_kerneldump(bp, dp); 518 else if (!strcmp(bp->bio_attribute, "GEOM::setstate")) 519 g_disk_setstate(bp, sc); 520 else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate", 521 dp->d_rotation_rate)) 522 break; 523 else 524 error = ENOIOCTL; 525 break; 526 case BIO_FLUSH: 527 g_trace(G_T_BIO, "g_disk_flushcache(%s)", 528 bp->bio_to->name); 529 if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) { 530 error = EOPNOTSUPP; 531 break; 532 } 533 /*FALLTHROUGH*/ 534 case BIO_ZONE: 535 if (bp->bio_cmd == BIO_ZONE) { 536 if (!(dp->d_flags & DISKFLAG_CANZONE)) { 537 error = EOPNOTSUPP; 538 break; 539 } 540 g_trace(G_T_BIO, "g_disk_zone(%s)", 541 bp->bio_to->name); 542 } 543 bp2 = g_clone_bio(bp); 544 if (bp2 == NULL) { 545 g_io_deliver(bp, ENOMEM); 546 return; 547 } 548 bp2->bio_done = g_disk_done; 549 bp2->bio_disk = dp; 550 mtx_lock(&sc->start_mtx); 551 devstat_start_transaction_bio(dp->d_devstat, bp2); 552 mtx_unlock(&sc->start_mtx); 553 dp->d_strategy(bp2); 554 break; 555 default: 556 error = EOPNOTSUPP; 557 break; 558 } 559 if (error != EJUSTRETURN) 560 g_io_deliver(bp, error); 561 return; 562 } 563 564 static void 565 g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) 566 { 567 struct bio *bp; 568 struct disk *dp; 569 struct g_disk_softc *sc; 570 char *buf; 571 int res = 0; 572 573 sc = gp->softc; 574 if (sc == NULL || (dp = sc->dp) == NULL) 575 return; 576 if (indent == NULL) { 577 sbuf_printf(sb, " hd %u", dp->d_fwheads); 578 sbuf_printf(sb, " sc %u", dp->d_fwsectors); 579 return; 580 } 581 if (pp != NULL) { 582 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", 583 indent, dp->d_fwheads); 584 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", 585 indent, dp->d_fwsectors); 586 587 /* 588 * "rotationrate" is a little complicated, because the value 589 * returned by the drive might not be the RPM; 0 and 1 are 590 * special cases, and there's also a valid range. 591 */ 592 sbuf_printf(sb, "%s<rotationrate>", indent); 593 if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */ 594 sbuf_printf(sb, "unknown"); /* don't report RPM. */ 595 else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING) 596 sbuf_printf(sb, "0"); 597 else if ((dp->d_rotation_rate >= DISK_RR_MIN) && 598 (dp->d_rotation_rate <= DISK_RR_MAX)) 599 sbuf_printf(sb, "%u", dp->d_rotation_rate); 600 else 601 sbuf_printf(sb, "invalid"); 602 sbuf_printf(sb, "</rotationrate>\n"); 603 if (dp->d_getattr != NULL) { 604 buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK); 605 bp = g_alloc_bio(); 606 bp->bio_disk = dp; 607 bp->bio_attribute = "GEOM::ident"; 608 bp->bio_length = DISK_IDENT_SIZE; 609 bp->bio_data = buf; 610 res = dp->d_getattr(bp); 611 sbuf_printf(sb, "%s<ident>", indent); 612 g_conf_printf_escaped(sb, "%s", 613 res == 0 ? buf: dp->d_ident); 614 sbuf_printf(sb, "</ident>\n"); 615 bp->bio_attribute = "GEOM::lunid"; 616 bp->bio_length = DISK_IDENT_SIZE; 617 bp->bio_data = buf; 618 if (dp->d_getattr(bp) == 0) { 619 sbuf_printf(sb, "%s<lunid>", indent); 620 g_conf_printf_escaped(sb, "%s", buf); 621 sbuf_printf(sb, "</lunid>\n"); 622 } 623 bp->bio_attribute = "GEOM::lunname"; 624 bp->bio_length = DISK_IDENT_SIZE; 625 bp->bio_data = buf; 626 if (dp->d_getattr(bp) == 0) { 627 sbuf_printf(sb, "%s<lunname>", indent); 628 g_conf_printf_escaped(sb, "%s", buf); 629 sbuf_printf(sb, "</lunname>\n"); 630 } 631 g_destroy_bio(bp); 632 g_free(buf); 633 } else { 634 sbuf_printf(sb, "%s<ident>", indent); 635 g_conf_printf_escaped(sb, "%s", dp->d_ident); 636 sbuf_printf(sb, "</ident>\n"); 637 } 638 sbuf_printf(sb, "%s<descr>", indent); 639 g_conf_printf_escaped(sb, "%s", dp->d_descr); 640 sbuf_printf(sb, "</descr>\n"); 641 } 642 } 643 644 static void 645 g_disk_resize(void *ptr, int flag) 646 { 647 struct disk *dp; 648 struct g_geom *gp; 649 struct g_provider *pp; 650 651 if (flag == EV_CANCEL) 652 return; 653 g_topology_assert(); 654 655 dp = ptr; 656 gp = dp->d_geom; 657 658 if (dp->d_destroyed || gp == NULL) 659 return; 660 661 LIST_FOREACH(pp, &gp->provider, provider) { 662 if (pp->sectorsize != 0 && 663 pp->sectorsize != dp->d_sectorsize) 664 g_wither_provider(pp, ENXIO); 665 else 666 g_resize_provider(pp, dp->d_mediasize); 667 } 668 } 669 670 static void 671 g_disk_create(void *arg, int flag) 672 { 673 struct g_geom *gp; 674 struct g_provider *pp; 675 struct disk *dp; 676 struct g_disk_softc *sc; 677 char tmpstr[80]; 678 679 if (flag == EV_CANCEL) 680 return; 681 g_topology_assert(); 682 dp = arg; 683 684 mtx_pool_lock(mtxpool_sleep, dp); 685 dp->d_init_level = DISK_INIT_START; 686 687 /* 688 * If the disk has already gone away, we can just stop here and 689 * call the user's callback to tell him we've cleaned things up. 690 */ 691 if (dp->d_goneflag != 0) { 692 mtx_pool_unlock(mtxpool_sleep, dp); 693 if (dp->d_gone != NULL) 694 dp->d_gone(dp); 695 return; 696 } 697 mtx_pool_unlock(mtxpool_sleep, dp); 698 699 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); 700 mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF); 701 mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF); 702 sc->dp = dp; 703 gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit); 704 gp->softc = sc; 705 pp = g_new_providerf(gp, "%s", gp->name); 706 devstat_remove_entry(pp->stat); 707 pp->stat = NULL; 708 dp->d_devstat->id = pp; 709 pp->mediasize = dp->d_mediasize; 710 pp->sectorsize = dp->d_sectorsize; 711 pp->stripeoffset = dp->d_stripeoffset; 712 pp->stripesize = dp->d_stripesize; 713 if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0) 714 pp->flags |= G_PF_ACCEPT_UNMAPPED; 715 if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0) 716 pp->flags |= G_PF_DIRECT_SEND; 717 pp->flags |= G_PF_DIRECT_RECEIVE; 718 if (bootverbose) 719 printf("GEOM: new disk %s\n", gp->name); 720 sysctl_ctx_init(&sc->sysctl_ctx); 721 snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name); 722 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 723 SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name, 724 CTLFLAG_RD, 0, tmpstr); 725 if (sc->sysctl_tree != NULL) { 726 SYSCTL_ADD_STRING(&sc->sysctl_ctx, 727 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led", 728 CTLFLAG_RWTUN, sc->led, sizeof(sc->led), 729 "LED name"); 730 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 731 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "flags", 732 CTLTYPE_STRING | CTLFLAG_RD, dp, 0, g_disk_sysctl_flags, 733 "A", "Report disk flags"); 734 } 735 pp->private = sc; 736 dp->d_geom = gp; 737 g_error_provider(pp, 0); 738 739 mtx_pool_lock(mtxpool_sleep, dp); 740 dp->d_init_level = DISK_INIT_DONE; 741 742 /* 743 * If the disk has gone away at this stage, start the withering 744 * process for it. 745 */ 746 if (dp->d_goneflag != 0) { 747 mtx_pool_unlock(mtxpool_sleep, dp); 748 g_wither_provider(pp, ENXIO); 749 return; 750 } 751 mtx_pool_unlock(mtxpool_sleep, dp); 752 753 } 754 755 /* 756 * We get this callback after all of the consumers have gone away, and just 757 * before the provider is freed. If the disk driver provided a d_gone 758 * callback, let them know that it is okay to free resources -- they won't 759 * be getting any more accesses from GEOM. 760 */ 761 static void 762 g_disk_providergone(struct g_provider *pp) 763 { 764 struct disk *dp; 765 struct g_disk_softc *sc; 766 767 sc = (struct g_disk_softc *)pp->private; 768 dp = sc->dp; 769 if (dp != NULL && dp->d_gone != NULL) 770 dp->d_gone(dp); 771 if (sc->sysctl_tree != NULL) { 772 sysctl_ctx_free(&sc->sysctl_ctx); 773 sc->sysctl_tree = NULL; 774 } 775 if (sc->led[0] != 0) { 776 led_set(sc->led, "0"); 777 sc->led[0] = 0; 778 } 779 pp->private = NULL; 780 pp->geom->softc = NULL; 781 mtx_destroy(&sc->done_mtx); 782 mtx_destroy(&sc->start_mtx); 783 g_free(sc); 784 } 785 786 static void 787 g_disk_destroy(void *ptr, int flag) 788 { 789 struct disk *dp; 790 struct g_geom *gp; 791 struct g_disk_softc *sc; 792 793 g_topology_assert(); 794 dp = ptr; 795 gp = dp->d_geom; 796 if (gp != NULL) { 797 sc = gp->softc; 798 if (sc != NULL) 799 sc->dp = NULL; 800 dp->d_geom = NULL; 801 g_wither_geom(gp, ENXIO); 802 } 803 804 g_free(dp); 805 } 806 807 /* 808 * We only allow printable characters in disk ident, 809 * the rest is converted to 'x<HH>'. 810 */ 811 static void 812 g_disk_ident_adjust(char *ident, size_t size) 813 { 814 char *p, tmp[4], newid[DISK_IDENT_SIZE]; 815 816 newid[0] = '\0'; 817 for (p = ident; *p != '\0'; p++) { 818 if (isprint(*p)) { 819 tmp[0] = *p; 820 tmp[1] = '\0'; 821 } else { 822 snprintf(tmp, sizeof(tmp), "x%02hhx", 823 *(unsigned char *)p); 824 } 825 if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid)) 826 break; 827 } 828 bzero(ident, size); 829 strlcpy(ident, newid, size); 830 } 831 832 struct disk * 833 disk_alloc(void) 834 { 835 836 return (g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO)); 837 } 838 839 void 840 disk_create(struct disk *dp, int version) 841 { 842 843 if (version != DISK_VERSION) { 844 printf("WARNING: Attempt to add disk %s%d %s", 845 dp->d_name, dp->d_unit, 846 " using incompatible ABI version of disk(9)\n"); 847 printf("WARNING: Ignoring disk %s%d\n", 848 dp->d_name, dp->d_unit); 849 return; 850 } 851 if (dp->d_flags & DISKFLAG_RESERVED) { 852 printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n", 853 dp->d_name, dp->d_unit); 854 printf("WARNING: Ignoring disk %s%d\n", 855 dp->d_name, dp->d_unit); 856 return; 857 } 858 KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy")); 859 KASSERT(dp->d_name != NULL, ("disk_create need d_name")); 860 KASSERT(*dp->d_name != 0, ("disk_create need d_name")); 861 KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long")); 862 if (dp->d_devstat == NULL) 863 dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit, 864 dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED, 865 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 866 dp->d_geom = NULL; 867 868 dp->d_init_level = DISK_INIT_NONE; 869 870 g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident)); 871 g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL); 872 } 873 874 void 875 disk_destroy(struct disk *dp) 876 { 877 878 g_cancel_event(dp); 879 dp->d_destroyed = 1; 880 if (dp->d_devstat != NULL) 881 devstat_remove_entry(dp->d_devstat); 882 g_post_event(g_disk_destroy, dp, M_WAITOK, NULL); 883 } 884 885 void 886 disk_gone(struct disk *dp) 887 { 888 struct g_geom *gp; 889 struct g_provider *pp; 890 891 mtx_pool_lock(mtxpool_sleep, dp); 892 dp->d_goneflag = 1; 893 894 /* 895 * If we're still in the process of creating this disk (the 896 * g_disk_create() function is still queued, or is in 897 * progress), the init level will not yet be DISK_INIT_DONE. 898 * 899 * If that is the case, g_disk_create() will see d_goneflag 900 * and take care of cleaning things up. 901 * 902 * If the disk has already been created, we default to 903 * withering the provider as usual below. 904 * 905 * If the caller has not set a d_gone() callback, he will 906 * not be any worse off by returning here, because the geom 907 * has not been fully setup in any case. 908 */ 909 if (dp->d_init_level < DISK_INIT_DONE) { 910 mtx_pool_unlock(mtxpool_sleep, dp); 911 return; 912 } 913 mtx_pool_unlock(mtxpool_sleep, dp); 914 915 gp = dp->d_geom; 916 if (gp != NULL) { 917 pp = LIST_FIRST(&gp->provider); 918 if (pp != NULL) { 919 KASSERT(LIST_NEXT(pp, provider) == NULL, 920 ("geom %p has more than one provider", gp)); 921 g_wither_provider(pp, ENXIO); 922 } 923 } 924 } 925 926 void 927 disk_attr_changed(struct disk *dp, const char *attr, int flag) 928 { 929 struct g_geom *gp; 930 struct g_provider *pp; 931 char devnamebuf[128]; 932 933 gp = dp->d_geom; 934 if (gp != NULL) 935 LIST_FOREACH(pp, &gp->provider, provider) 936 (void)g_attr_changed(pp, attr, flag); 937 snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name, 938 dp->d_unit); 939 devctl_notify("GEOM", "disk", attr, devnamebuf); 940 } 941 942 void 943 disk_media_changed(struct disk *dp, int flag) 944 { 945 struct g_geom *gp; 946 struct g_provider *pp; 947 948 gp = dp->d_geom; 949 if (gp != NULL) { 950 pp = LIST_FIRST(&gp->provider); 951 if (pp != NULL) { 952 KASSERT(LIST_NEXT(pp, provider) == NULL, 953 ("geom %p has more than one provider", gp)); 954 g_media_changed(pp, flag); 955 } 956 } 957 } 958 959 void 960 disk_media_gone(struct disk *dp, int flag) 961 { 962 struct g_geom *gp; 963 struct g_provider *pp; 964 965 gp = dp->d_geom; 966 if (gp != NULL) { 967 pp = LIST_FIRST(&gp->provider); 968 if (pp != NULL) { 969 KASSERT(LIST_NEXT(pp, provider) == NULL, 970 ("geom %p has more than one provider", gp)); 971 g_media_gone(pp, flag); 972 } 973 } 974 } 975 976 int 977 disk_resize(struct disk *dp, int flag) 978 { 979 980 if (dp->d_destroyed || dp->d_geom == NULL) 981 return (0); 982 983 return (g_post_event(g_disk_resize, dp, flag, NULL)); 984 } 985 986 static void 987 g_kern_disks(void *p, int flag __unused) 988 { 989 struct sbuf *sb; 990 struct g_geom *gp; 991 char *sp; 992 993 sb = p; 994 sp = ""; 995 g_topology_assert(); 996 LIST_FOREACH(gp, &g_disk_class.geom, geom) { 997 sbuf_printf(sb, "%s%s", sp, gp->name); 998 sp = " "; 999 } 1000 sbuf_finish(sb); 1001 } 1002 1003 static int 1004 g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS) 1005 { 1006 struct disk *dp; 1007 struct sbuf *sb; 1008 int error; 1009 1010 sb = sbuf_new_auto(); 1011 dp = (struct disk *)arg1; 1012 sbuf_printf(sb, "%b", dp->d_flags, 1013 "\20" 1014 "\2OPEN" 1015 "\3CANDELETE" 1016 "\4CANFLUSHCACHE" 1017 "\5UNMAPPEDBIO" 1018 "\6DIRECTCOMPLETION" 1019 "\10CANZONE"); 1020 1021 sbuf_finish(sb); 1022 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 1023 sbuf_delete(sb); 1024 return (error); 1025 } 1026 1027 static int 1028 sysctl_disks(SYSCTL_HANDLER_ARGS) 1029 { 1030 int error; 1031 struct sbuf *sb; 1032 1033 sb = sbuf_new_auto(); 1034 g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL); 1035 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 1036 sbuf_delete(sb); 1037 return error; 1038 } 1039 1040 SYSCTL_PROC(_kern, OID_AUTO, disks, 1041 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1042 sysctl_disks, "A", "names of available disks"); 1043