1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Poul-Henning Kamp 5 * Copyright (c) 2002 Networks Associates Technology, Inc. 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 9 * and NAI Labs, the Security Research Division of Network Associates, Inc. 10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 11 * DARPA CHATS research program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The names of the authors may not be used to endorse or promote 22 * products derived from this software without specific prior written 23 * permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_geom.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/sysctl.h> 47 #include <sys/bio.h> 48 #include <sys/ctype.h> 49 #include <sys/devctl.h> 50 #include <sys/fcntl.h> 51 #include <sys/malloc.h> 52 #include <sys/msan.h> 53 #include <sys/sbuf.h> 54 #include <sys/devicestat.h> 55 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 #include <geom/geom.h> 59 #include <geom/geom_disk.h> 60 #include <geom/geom_int.h> 61 62 #include <dev/led/led.h> 63 64 #include <machine/bus.h> 65 66 struct g_disk_softc { 67 struct disk *dp; 68 struct devstat *d_devstat; 69 struct sysctl_ctx_list sysctl_ctx; 70 struct sysctl_oid *sysctl_tree; 71 char led[64]; 72 uint32_t state; 73 struct mtx done_mtx; 74 bool flush_notsup_succeed; 75 }; 76 77 static g_access_t g_disk_access; 78 static g_start_t g_disk_start; 79 static g_ioctl_t g_disk_ioctl; 80 static g_dumpconf_t g_disk_dumpconf; 81 static g_provgone_t g_disk_providergone; 82 83 static int g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS); 84 85 static struct g_class g_disk_class = { 86 .name = G_DISK_CLASS_NAME, 87 .version = G_VERSION, 88 .start = g_disk_start, 89 .access = g_disk_access, 90 .ioctl = g_disk_ioctl, 91 .providergone = g_disk_providergone, 92 .dumpconf = g_disk_dumpconf, 93 }; 94 95 SYSCTL_DECL(_kern_geom); 96 static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 97 "GEOM_DISK stuff"); 98 99 DECLARE_GEOM_CLASS(g_disk_class, g_disk); 100 101 static int 102 g_disk_access(struct g_provider *pp, int r, int w, int e) 103 { 104 struct disk *dp; 105 struct g_disk_softc *sc; 106 int error; 107 108 g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)", 109 pp->name, r, w, e); 110 g_topology_assert(); 111 sc = pp->private; 112 if ((dp = sc->dp) == NULL || dp->d_destroyed) { 113 /* 114 * Allow decreasing access count even if disk is not 115 * available anymore. 116 */ 117 if (r <= 0 && w <= 0 && e <= 0) 118 return (0); 119 return (ENXIO); 120 } 121 r += pp->acr; 122 w += pp->acw; 123 e += pp->ace; 124 error = 0; 125 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { 126 /* 127 * It would be better to defer this decision to d_open if 128 * it was able to take flags. 129 */ 130 if (w > 0 && (dp->d_flags & DISKFLAG_WRITE_PROTECT) != 0) 131 error = EROFS; 132 if (error == 0 && dp->d_open != NULL) 133 error = dp->d_open(dp); 134 if (bootverbose && error != 0) 135 printf("Opened disk %s -> %d\n", pp->name, error); 136 if (error != 0) 137 return (error); 138 pp->sectorsize = dp->d_sectorsize; 139 if (dp->d_maxsize == 0) { 140 printf("WARNING: Disk drive %s%d has no d_maxsize\n", 141 dp->d_name, dp->d_unit); 142 dp->d_maxsize = DFLTPHYS; 143 } 144 if (dp->d_delmaxsize == 0) { 145 if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) { 146 printf("WARNING: Disk drive %s%d has no " 147 "d_delmaxsize\n", dp->d_name, dp->d_unit); 148 } 149 dp->d_delmaxsize = dp->d_maxsize; 150 } 151 pp->stripeoffset = dp->d_stripeoffset; 152 pp->stripesize = dp->d_stripesize; 153 dp->d_flags |= DISKFLAG_OPEN; 154 /* 155 * Do not invoke resize event when initial size was zero. 156 * Some disks report its size only after first opening. 157 */ 158 if (pp->mediasize == 0) 159 pp->mediasize = dp->d_mediasize; 160 else 161 g_resize_provider(pp, dp->d_mediasize); 162 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { 163 if (dp->d_close != NULL) { 164 error = dp->d_close(dp); 165 if (error != 0) 166 printf("Closed disk %s -> %d\n", 167 pp->name, error); 168 } 169 sc->state = G_STATE_ACTIVE; 170 if (sc->led[0] != 0) 171 led_set(sc->led, "0"); 172 dp->d_flags &= ~DISKFLAG_OPEN; 173 } 174 return (error); 175 } 176 177 static void 178 g_disk_kerneldump(struct bio *bp, struct disk *dp) 179 { 180 struct g_kerneldump *gkd; 181 struct g_geom *gp; 182 183 gkd = (struct g_kerneldump*)bp->bio_data; 184 gp = bp->bio_to->geom; 185 g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)", 186 gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length); 187 if (dp->d_dump == NULL) { 188 g_io_deliver(bp, ENODEV); 189 return; 190 } 191 gkd->di.dumper = dp->d_dump; 192 gkd->di.priv = dp; 193 gkd->di.blocksize = dp->d_sectorsize; 194 gkd->di.maxiosize = dp->d_maxsize; 195 gkd->di.mediaoffset = gkd->offset; 196 if ((gkd->offset + gkd->length) > dp->d_mediasize) 197 gkd->length = dp->d_mediasize - gkd->offset; 198 gkd->di.mediasize = gkd->length; 199 g_io_deliver(bp, 0); 200 } 201 202 static void 203 g_disk_setstate(struct bio *bp, struct g_disk_softc *sc) 204 { 205 const char *cmd; 206 207 memcpy(&sc->state, bp->bio_data, sizeof(sc->state)); 208 if (sc->led[0] != 0) { 209 switch (sc->state) { 210 case G_STATE_FAILED: 211 cmd = "1"; 212 break; 213 case G_STATE_REBUILD: 214 cmd = "f5"; 215 break; 216 case G_STATE_RESYNC: 217 cmd = "f1"; 218 break; 219 default: 220 cmd = "0"; 221 break; 222 } 223 led_set(sc->led, cmd); 224 } 225 g_io_deliver(bp, 0); 226 } 227 228 static void 229 g_disk_done(struct bio *bp) 230 { 231 struct bintime now; 232 struct bio *bp2; 233 struct g_disk_softc *sc; 234 235 /* See "notes" for why we need a mutex here */ 236 sc = bp->bio_caller1; 237 bp2 = bp->bio_parent; 238 binuptime(&now); 239 mtx_lock(&sc->done_mtx); 240 if (bp2->bio_error == 0) 241 bp2->bio_error = bp->bio_error; 242 bp2->bio_completed += bp->bio_length - bp->bio_resid; 243 244 if (bp->bio_cmd == BIO_READ) 245 kmsan_check(bp2->bio_data, bp2->bio_completed, "g_disk_done"); 246 247 switch (bp->bio_cmd) { 248 case BIO_ZONE: 249 bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone)); 250 /*FALLTHROUGH*/ 251 case BIO_READ: 252 case BIO_WRITE: 253 case BIO_DELETE: 254 case BIO_FLUSH: 255 devstat_end_transaction_bio_bt(sc->d_devstat, bp, &now); 256 break; 257 default: 258 break; 259 } 260 bp2->bio_inbed++; 261 if (bp2->bio_children == bp2->bio_inbed) { 262 mtx_unlock(&sc->done_mtx); 263 bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed; 264 g_io_deliver(bp2, bp2->bio_error); 265 } else 266 mtx_unlock(&sc->done_mtx); 267 g_destroy_bio(bp); 268 } 269 270 static int 271 g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td) 272 { 273 struct disk *dp; 274 struct g_disk_softc *sc; 275 276 sc = pp->private; 277 dp = sc->dp; 278 KASSERT(dp != NULL && !dp->d_destroyed, 279 ("g_disk_ioctl(%lx) on destroyed disk %s", cmd, pp->name)); 280 281 if (dp->d_ioctl == NULL) 282 return (ENOIOCTL); 283 return (dp->d_ioctl(dp, cmd, data, fflag, td)); 284 } 285 286 static off_t 287 g_disk_maxsize(struct disk *dp, struct bio *bp) 288 { 289 if (bp->bio_cmd == BIO_DELETE) 290 return (dp->d_delmaxsize); 291 return (dp->d_maxsize); 292 } 293 294 static int 295 g_disk_maxsegs(struct disk *dp, struct bio *bp) 296 { 297 return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1); 298 } 299 300 static void 301 g_disk_advance(struct disk *dp, struct bio *bp, off_t off) 302 { 303 304 bp->bio_offset += off; 305 bp->bio_length -= off; 306 307 if ((bp->bio_flags & BIO_VLIST) != 0) { 308 bus_dma_segment_t *seg, *end; 309 310 seg = (bus_dma_segment_t *)bp->bio_data; 311 end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; 312 off += bp->bio_ma_offset; 313 while (off >= seg->ds_len) { 314 KASSERT((seg != end), 315 ("vlist request runs off the end")); 316 off -= seg->ds_len; 317 seg++; 318 } 319 bp->bio_ma_offset = off; 320 bp->bio_ma_n = end - seg; 321 bp->bio_data = (void *)seg; 322 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 323 bp->bio_ma += off / PAGE_SIZE; 324 bp->bio_ma_offset += off; 325 bp->bio_ma_offset %= PAGE_SIZE; 326 bp->bio_ma_n -= off / PAGE_SIZE; 327 } else { 328 bp->bio_data += off; 329 } 330 } 331 332 static void 333 g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset, 334 off_t *plength, int *ppages) 335 { 336 uintptr_t seg_page_base; 337 uintptr_t seg_page_end; 338 off_t offset; 339 off_t length; 340 int seg_pages; 341 342 offset = *poffset; 343 length = *plength; 344 345 if (length > seg->ds_len - offset) 346 length = seg->ds_len - offset; 347 348 seg_page_base = trunc_page(seg->ds_addr + offset); 349 seg_page_end = round_page(seg->ds_addr + offset + length); 350 seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT; 351 352 if (seg_pages > *ppages) { 353 seg_pages = *ppages; 354 length = (seg_page_base + (seg_pages << PAGE_SHIFT)) - 355 (seg->ds_addr + offset); 356 } 357 358 *poffset = 0; 359 *plength -= length; 360 *ppages -= seg_pages; 361 } 362 363 static off_t 364 g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg) 365 { 366 bus_dma_segment_t *seg, *end __diagused; 367 off_t residual; 368 off_t offset; 369 int pages; 370 371 seg = (bus_dma_segment_t *)bp->bio_data; 372 end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; 373 residual = bp->bio_length; 374 offset = bp->bio_ma_offset; 375 pages = g_disk_maxsegs(dp, bp); 376 while (residual != 0 && pages != 0) { 377 KASSERT((seg != end), 378 ("vlist limit runs off the end")); 379 g_disk_seg_limit(seg, &offset, &residual, &pages); 380 seg++; 381 } 382 if (pendseg != NULL) 383 *pendseg = seg; 384 return (residual); 385 } 386 387 static bool 388 g_disk_limit(struct disk *dp, struct bio *bp) 389 { 390 bool limited = false; 391 off_t maxsz; 392 393 maxsz = g_disk_maxsize(dp, bp); 394 395 /* 396 * XXX: If we have a stripesize we should really use it here. 397 * Care should be taken in the delete case if this is done 398 * as deletes can be very sensitive to size given how they 399 * are processed. 400 */ 401 if (bp->bio_length > maxsz) { 402 bp->bio_length = maxsz; 403 limited = true; 404 } 405 406 if ((bp->bio_flags & BIO_VLIST) != 0) { 407 bus_dma_segment_t *firstseg, *endseg; 408 off_t residual; 409 410 firstseg = (bus_dma_segment_t*)bp->bio_data; 411 residual = g_disk_vlist_limit(dp, bp, &endseg); 412 if (residual != 0) { 413 bp->bio_ma_n = endseg - firstseg; 414 bp->bio_length -= residual; 415 limited = true; 416 } 417 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { 418 bp->bio_ma_n = 419 howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE); 420 } 421 422 return (limited); 423 } 424 425 static void 426 g_disk_start(struct bio *bp) 427 { 428 struct bio *bp2, *bp3; 429 struct disk *dp; 430 struct g_disk_softc *sc; 431 int error; 432 off_t off; 433 434 biotrack(bp, __func__); 435 436 sc = bp->bio_to->private; 437 dp = sc->dp; 438 KASSERT(dp != NULL && !dp->d_destroyed, 439 ("g_disk_start(%p) on destroyed disk %s", bp, bp->bio_to->name)); 440 error = EJUSTRETURN; 441 switch(bp->bio_cmd) { 442 case BIO_DELETE: 443 if (!(dp->d_flags & DISKFLAG_CANDELETE)) { 444 error = EOPNOTSUPP; 445 break; 446 } 447 /* fall-through */ 448 case BIO_READ: 449 case BIO_WRITE: 450 KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 || 451 (bp->bio_flags & BIO_UNMAPPED) == 0, 452 ("unmapped bio not supported by disk %s", dp->d_name)); 453 454 if (bp->bio_cmd == BIO_WRITE) 455 kmsan_check_bio(bp, "g_disk_start"); 456 457 off = 0; 458 bp3 = NULL; 459 bp2 = g_clone_bio(bp); 460 if (bp2 == NULL) { 461 error = ENOMEM; 462 break; 463 } 464 for (;;) { 465 if (g_disk_limit(dp, bp2)) { 466 off += bp2->bio_length; 467 468 /* 469 * To avoid a race, we need to grab the next bio 470 * before we schedule this one. See "notes". 471 */ 472 bp3 = g_clone_bio(bp); 473 if (bp3 == NULL) 474 bp->bio_error = ENOMEM; 475 } 476 bp2->bio_done = g_disk_done; 477 bp2->bio_caller1 = sc; 478 bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize; 479 bp2->bio_bcount = bp2->bio_length; 480 bp2->bio_disk = dp; 481 devstat_start_transaction_bio(dp->d_devstat, bp2); 482 dp->d_strategy(bp2); 483 484 if (bp3 == NULL) 485 break; 486 487 bp2 = bp3; 488 bp3 = NULL; 489 g_disk_advance(dp, bp2, off); 490 } 491 break; 492 case BIO_GETATTR: 493 /* Give the driver a chance to override */ 494 if (dp->d_getattr != NULL) { 495 if (bp->bio_disk == NULL) 496 bp->bio_disk = dp; 497 error = dp->d_getattr(bp); 498 if (error != -1) 499 break; 500 error = EJUSTRETURN; 501 } 502 if (g_handleattr_int(bp, "GEOM::candelete", 503 (dp->d_flags & DISKFLAG_CANDELETE) != 0)) 504 break; 505 else if (g_handleattr_int(bp, "GEOM::fwsectors", 506 dp->d_fwsectors)) 507 break; 508 else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads)) 509 break; 510 else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident)) 511 break; 512 else if (g_handleattr_str(bp, "GEOM::descr", dp->d_descr)) 513 break; 514 else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor", 515 dp->d_hba_vendor)) 516 break; 517 else if (g_handleattr_uint16_t(bp, "GEOM::hba_device", 518 dp->d_hba_device)) 519 break; 520 else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor", 521 dp->d_hba_subvendor)) 522 break; 523 else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice", 524 dp->d_hba_subdevice)) 525 break; 526 else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump")) 527 g_disk_kerneldump(bp, dp); 528 else if (!strcmp(bp->bio_attribute, "GEOM::setstate")) 529 g_disk_setstate(bp, sc); 530 else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate", 531 dp->d_rotation_rate)) 532 break; 533 else if (g_handleattr_str(bp, "GEOM::attachment", 534 dp->d_attachment)) 535 break; 536 else 537 error = ENOIOCTL; 538 break; 539 case BIO_FLUSH: 540 g_trace(G_T_BIO, "g_disk_flushcache(%s)", 541 bp->bio_to->name); 542 if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) { 543 error = (sc->flush_notsup_succeed) ? 0 : EOPNOTSUPP; 544 break; 545 } 546 /*FALLTHROUGH*/ 547 case BIO_ZONE: 548 if (bp->bio_cmd == BIO_ZONE) { 549 if (!(dp->d_flags & DISKFLAG_CANZONE)) { 550 error = EOPNOTSUPP; 551 break; 552 } 553 g_trace(G_T_BIO, "g_disk_zone(%s)", 554 bp->bio_to->name); 555 } 556 bp2 = g_clone_bio(bp); 557 if (bp2 == NULL) { 558 g_io_deliver(bp, ENOMEM); 559 return; 560 } 561 bp2->bio_done = g_disk_done; 562 bp2->bio_caller1 = sc; 563 bp2->bio_disk = dp; 564 devstat_start_transaction_bio(dp->d_devstat, bp2); 565 dp->d_strategy(bp2); 566 break; 567 case BIO_SPEEDUP: 568 bp2 = g_clone_bio(bp); 569 if (bp2 == NULL) { 570 g_io_deliver(bp, ENOMEM); 571 return; 572 } 573 bp2->bio_done = g_disk_done; 574 bp2->bio_caller1 = sc; 575 bp2->bio_disk = dp; 576 dp->d_strategy(bp2); 577 break; 578 default: 579 error = EOPNOTSUPP; 580 break; 581 } 582 if (error != EJUSTRETURN) 583 g_io_deliver(bp, error); 584 return; 585 } 586 587 static void 588 g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) 589 { 590 struct bio *bp; 591 struct disk *dp; 592 struct g_disk_softc *sc; 593 char *buf; 594 int res = 0; 595 596 sc = gp->softc; 597 if (sc == NULL || (dp = sc->dp) == NULL) 598 return; 599 if (indent == NULL) { 600 sbuf_printf(sb, " hd %u", dp->d_fwheads); 601 sbuf_printf(sb, " sc %u", dp->d_fwsectors); 602 return; 603 } 604 if (pp != NULL) { 605 sbuf_printf(sb, "%s<fwheads>%u</fwheads>\n", 606 indent, dp->d_fwheads); 607 sbuf_printf(sb, "%s<fwsectors>%u</fwsectors>\n", 608 indent, dp->d_fwsectors); 609 610 /* 611 * "rotationrate" is a little complicated, because the value 612 * returned by the drive might not be the RPM; 0 and 1 are 613 * special cases, and there's also a valid range. 614 */ 615 sbuf_printf(sb, "%s<rotationrate>", indent); 616 if (dp->d_rotation_rate == DISK_RR_UNKNOWN) /* Old drives */ 617 sbuf_cat(sb, "unknown"); /* don't report RPM. */ 618 else if (dp->d_rotation_rate == DISK_RR_NON_ROTATING) 619 sbuf_cat(sb, "0"); 620 else if ((dp->d_rotation_rate >= DISK_RR_MIN) && 621 (dp->d_rotation_rate <= DISK_RR_MAX)) 622 sbuf_printf(sb, "%u", dp->d_rotation_rate); 623 else 624 sbuf_cat(sb, "invalid"); 625 sbuf_cat(sb, "</rotationrate>\n"); 626 if (dp->d_getattr != NULL) { 627 buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK); 628 bp = g_alloc_bio(); 629 bp->bio_disk = dp; 630 bp->bio_attribute = "GEOM::ident"; 631 bp->bio_length = DISK_IDENT_SIZE; 632 bp->bio_data = buf; 633 res = dp->d_getattr(bp); 634 sbuf_printf(sb, "%s<ident>", indent); 635 g_conf_cat_escaped(sb, res == 0 ? buf : dp->d_ident); 636 sbuf_cat(sb, "</ident>\n"); 637 bp->bio_attribute = "GEOM::lunid"; 638 bp->bio_length = DISK_IDENT_SIZE; 639 bp->bio_data = buf; 640 if (dp->d_getattr(bp) == 0) { 641 sbuf_printf(sb, "%s<lunid>", indent); 642 g_conf_cat_escaped(sb, buf); 643 sbuf_cat(sb, "</lunid>\n"); 644 } 645 bp->bio_attribute = "GEOM::lunname"; 646 bp->bio_length = DISK_IDENT_SIZE; 647 bp->bio_data = buf; 648 if (dp->d_getattr(bp) == 0) { 649 sbuf_printf(sb, "%s<lunname>", indent); 650 g_conf_cat_escaped(sb, buf); 651 sbuf_cat(sb, "</lunname>\n"); 652 } 653 g_destroy_bio(bp); 654 g_free(buf); 655 } else { 656 sbuf_printf(sb, "%s<ident>", indent); 657 g_conf_cat_escaped(sb, dp->d_ident); 658 sbuf_cat(sb, "</ident>\n"); 659 } 660 sbuf_printf(sb, "%s<descr>", indent); 661 g_conf_cat_escaped(sb, dp->d_descr); 662 sbuf_cat(sb, "</descr>\n"); 663 } 664 } 665 666 static void 667 g_disk_resize(void *ptr, int flag) 668 { 669 struct disk *dp; 670 struct g_geom *gp; 671 struct g_provider *pp; 672 673 if (flag == EV_CANCEL) 674 return; 675 g_topology_assert(); 676 677 dp = ptr; 678 gp = dp->d_geom; 679 680 if (dp->d_destroyed || gp == NULL) 681 return; 682 683 LIST_FOREACH(pp, &gp->provider, provider) { 684 if (pp->sectorsize != 0 && 685 pp->sectorsize != dp->d_sectorsize) 686 g_wither_provider(pp, ENXIO); 687 else 688 g_resize_provider(pp, dp->d_mediasize); 689 } 690 } 691 692 static void 693 g_disk_create(void *arg, int flag) 694 { 695 struct g_geom *gp; 696 struct g_provider *pp; 697 struct disk *dp; 698 struct g_disk_softc *sc; 699 struct disk_alias *dap; 700 char tmpstr[80]; 701 702 if (flag == EV_CANCEL) 703 return; 704 g_topology_assert(); 705 dp = arg; 706 707 mtx_pool_lock(mtxpool_sleep, dp); 708 dp->d_init_level = DISK_INIT_START; 709 710 /* 711 * If the disk has already gone away, we can just stop here and 712 * call the user's callback to tell him we've cleaned things up. 713 */ 714 if (dp->d_goneflag != 0) { 715 mtx_pool_unlock(mtxpool_sleep, dp); 716 if (dp->d_gone != NULL) 717 dp->d_gone(dp); 718 return; 719 } 720 mtx_pool_unlock(mtxpool_sleep, dp); 721 722 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); 723 mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF); 724 sc->dp = dp; 725 if (dp->d_devstat == NULL) { 726 dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit, 727 dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED, 728 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 729 } 730 sc->d_devstat = dp->d_devstat; 731 gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit); 732 gp->softc = sc; 733 pp = g_new_providerf(gp, "%s", gp->name); 734 LIST_FOREACH(dap, &dp->d_aliases, da_next) 735 g_provider_add_alias(pp, "%s%d", dap->da_alias, dp->d_unit); 736 devstat_remove_entry(pp->stat); 737 pp->stat = NULL; 738 dp->d_devstat->id = pp; 739 pp->mediasize = dp->d_mediasize; 740 pp->sectorsize = dp->d_sectorsize; 741 pp->stripeoffset = dp->d_stripeoffset; 742 pp->stripesize = dp->d_stripesize; 743 if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0) 744 pp->flags |= G_PF_ACCEPT_UNMAPPED; 745 if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0) 746 pp->flags |= G_PF_DIRECT_SEND; 747 pp->flags |= G_PF_DIRECT_RECEIVE; 748 if (bootverbose) 749 printf("GEOM: new disk %s\n", gp->name); 750 sysctl_ctx_init(&sc->sysctl_ctx); 751 snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name); 752 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 753 SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name, 754 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr); 755 if (sc->sysctl_tree != NULL) { 756 SYSCTL_ADD_STRING(&sc->sysctl_ctx, 757 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led", 758 CTLFLAG_RWTUN, sc->led, sizeof(sc->led), 759 "LED name"); 760 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 761 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "flags", 762 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dp, 0, 763 g_disk_sysctl_flags, "A", "Report disk flags"); 764 SYSCTL_ADD_BOOL(&sc->sysctl_ctx, 765 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "flush_notsup_succeed", 766 CTLFLAG_RWTUN, &sc->flush_notsup_succeed, sizeof(sc->flush_notsup_succeed), 767 "Do not return EOPNOTSUPP if there is no cache to flush"); 768 } 769 pp->private = sc; 770 dp->d_geom = gp; 771 g_error_provider(pp, 0); 772 773 mtx_pool_lock(mtxpool_sleep, dp); 774 dp->d_init_level = DISK_INIT_DONE; 775 776 /* 777 * If the disk has gone away at this stage, start the withering 778 * process for it. 779 */ 780 if (dp->d_goneflag != 0) { 781 mtx_pool_unlock(mtxpool_sleep, dp); 782 g_wither_provider(pp, ENXIO); 783 return; 784 } 785 mtx_pool_unlock(mtxpool_sleep, dp); 786 787 } 788 789 /* 790 * We get this callback after all of the consumers have gone away, and just 791 * before the provider is freed. If the disk driver provided a d_gone 792 * callback, let them know that it is okay to free resources -- they won't 793 * be getting any more accesses from GEOM. 794 */ 795 static void 796 g_disk_providergone(struct g_provider *pp) 797 { 798 struct disk *dp; 799 struct g_disk_softc *sc; 800 801 sc = (struct g_disk_softc *)pp->private; 802 dp = sc->dp; 803 if (dp != NULL && dp->d_gone != NULL) 804 dp->d_gone(dp); 805 if (sc->sysctl_tree != NULL) { 806 sysctl_ctx_free(&sc->sysctl_ctx); 807 sc->sysctl_tree = NULL; 808 } 809 if (sc->led[0] != 0) { 810 led_set(sc->led, "0"); 811 sc->led[0] = 0; 812 } 813 pp->private = NULL; 814 pp->geom->softc = NULL; 815 mtx_destroy(&sc->done_mtx); 816 g_free(sc); 817 } 818 819 static void 820 g_disk_destroy(void *ptr, int flag) 821 { 822 struct disk *dp; 823 struct g_geom *gp; 824 struct g_disk_softc *sc; 825 struct disk_alias *dap, *daptmp; 826 827 g_topology_assert(); 828 dp = ptr; 829 gp = dp->d_geom; 830 if (gp != NULL) { 831 sc = gp->softc; 832 if (sc != NULL) 833 sc->dp = NULL; 834 dp->d_geom = NULL; 835 g_wither_geom(gp, ENXIO); 836 } 837 LIST_FOREACH_SAFE(dap, &dp->d_aliases, da_next, daptmp) 838 g_free(dap); 839 840 g_free(dp); 841 } 842 843 /* 844 * We only allow printable characters in disk ident, 845 * the rest is converted to 'x<HH>'. 846 */ 847 static void 848 g_disk_ident_adjust(char *ident, size_t size) 849 { 850 char *p, tmp[4], newid[DISK_IDENT_SIZE]; 851 852 newid[0] = '\0'; 853 for (p = ident; *p != '\0'; p++) { 854 if (isprint(*p)) { 855 tmp[0] = *p; 856 tmp[1] = '\0'; 857 } else { 858 snprintf(tmp, sizeof(tmp), "x%02hhx", 859 *(unsigned char *)p); 860 } 861 if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid)) 862 break; 863 } 864 bzero(ident, size); 865 strlcpy(ident, newid, size); 866 } 867 868 struct disk * 869 disk_alloc(void) 870 { 871 struct disk *dp; 872 873 dp = g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO); 874 LIST_INIT(&dp->d_aliases); 875 dp->d_init_level = DISK_INIT_NONE; 876 dp->d_cevent = g_alloc_event(M_WAITOK); 877 dp->d_devent = g_alloc_event(M_WAITOK); 878 return (dp); 879 } 880 881 void 882 disk_create(struct disk *dp, int version) 883 { 884 885 if (version != DISK_VERSION) { 886 printf("WARNING: Attempt to add disk %s%d %s", 887 dp->d_name, dp->d_unit, 888 " using incompatible ABI version of disk(9)\n"); 889 printf("WARNING: Ignoring disk %s%d\n", 890 dp->d_name, dp->d_unit); 891 return; 892 } 893 if (dp->d_flags & DISKFLAG_RESERVED) { 894 printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n", 895 dp->d_name, dp->d_unit); 896 printf("WARNING: Ignoring disk %s%d\n", 897 dp->d_name, dp->d_unit); 898 return; 899 } 900 KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy")); 901 KASSERT(dp->d_name != NULL, ("disk_create need d_name")); 902 KASSERT(*dp->d_name != 0, ("disk_create need d_name")); 903 KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long")); 904 g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident)); 905 906 dp->d_init_level = DISK_INIT_CREATE; 907 908 KASSERT(dp->d_cevent != NULL, 909 ("Disk create for %p with event NULL", dp)); 910 g_post_event_ep(g_disk_create, dp, dp->d_cevent, dp, NULL); 911 } 912 913 void 914 disk_destroy(struct disk *dp) 915 { 916 struct disk_alias *dap, *daptmp; 917 918 /* If disk_create() was never called, just free the resources. */ 919 if (dp->d_init_level < DISK_INIT_CREATE) { 920 if (dp->d_devstat != NULL) 921 devstat_remove_entry(dp->d_devstat); 922 LIST_FOREACH_SAFE(dap, &dp->d_aliases, da_next, daptmp) 923 g_free(dap); 924 g_free(dp->d_cevent); 925 g_free(dp->d_devent); 926 g_free(dp); 927 return; 928 } 929 930 KASSERT(dp->d_devent != NULL, 931 ("Disk destroy for %p with event NULL", dp)); 932 disk_gone(dp); 933 dp->d_destroyed = 1; 934 g_cancel_event(dp); 935 if (dp->d_devstat != NULL) 936 devstat_remove_entry(dp->d_devstat); 937 g_post_event_ep(g_disk_destroy, dp, dp->d_devent, NULL); 938 } 939 940 void 941 disk_add_alias(struct disk *dp, const char *name) 942 { 943 struct disk_alias *dap; 944 945 dap = (struct disk_alias *)g_malloc( 946 sizeof(struct disk_alias) + strlen(name) + 1, M_WAITOK); 947 strcpy((char *)(dap + 1), name); 948 dap->da_alias = (const char *)(dap + 1); 949 LIST_INSERT_HEAD(&dp->d_aliases, dap, da_next); 950 } 951 952 void 953 disk_gone(struct disk *dp) 954 { 955 struct g_geom *gp; 956 struct g_provider *pp; 957 958 mtx_pool_lock(mtxpool_sleep, dp); 959 960 /* 961 * Second wither call makes no sense, plus we can not access the list 962 * of providers without topology lock after calling wither once. 963 */ 964 if (dp->d_goneflag != 0) { 965 mtx_pool_unlock(mtxpool_sleep, dp); 966 return; 967 } 968 969 dp->d_goneflag = 1; 970 971 /* 972 * If we're still in the process of creating this disk (the 973 * g_disk_create() function is still queued, or is in 974 * progress), the init level will not yet be DISK_INIT_DONE. 975 * 976 * If that is the case, g_disk_create() will see d_goneflag 977 * and take care of cleaning things up. 978 * 979 * If the disk has already been created, we default to 980 * withering the provider as usual below. 981 * 982 * If the caller has not set a d_gone() callback, he will 983 * not be any worse off by returning here, because the geom 984 * has not been fully setup in any case. 985 */ 986 if (dp->d_init_level < DISK_INIT_DONE) { 987 mtx_pool_unlock(mtxpool_sleep, dp); 988 return; 989 } 990 mtx_pool_unlock(mtxpool_sleep, dp); 991 992 gp = dp->d_geom; 993 pp = LIST_FIRST(&gp->provider); 994 if (pp != NULL) { 995 KASSERT(LIST_NEXT(pp, provider) == NULL, 996 ("geom %p has more than one provider", gp)); 997 g_wither_provider(pp, ENXIO); 998 } 999 } 1000 1001 void 1002 disk_attr_changed(struct disk *dp, const char *attr, int flag) 1003 { 1004 struct g_geom *gp = dp->d_geom; 1005 struct g_provider *pp; 1006 char devnamebuf[128]; 1007 1008 if (gp == NULL) 1009 return; 1010 LIST_FOREACH(pp, &gp->provider, provider) 1011 (void)g_attr_changed(pp, attr, flag); 1012 snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name, 1013 dp->d_unit); 1014 devctl_notify("GEOM", "disk", attr, devnamebuf); 1015 } 1016 1017 void 1018 disk_media_changed(struct disk *dp, int flag) 1019 { 1020 struct g_geom *gp = dp->d_geom; 1021 struct g_provider *pp; 1022 1023 if (gp == NULL) 1024 return; 1025 pp = LIST_FIRST(&gp->provider); 1026 if (pp != NULL) { 1027 KASSERT(LIST_NEXT(pp, provider) == NULL, 1028 ("geom %p has more than one provider", gp)); 1029 g_media_changed(pp, flag); 1030 } 1031 } 1032 1033 void 1034 disk_media_gone(struct disk *dp, int flag) 1035 { 1036 struct g_geom *gp = dp->d_geom; 1037 struct g_provider *pp; 1038 1039 if (gp == NULL) 1040 return; 1041 pp = LIST_FIRST(&gp->provider); 1042 if (pp != NULL) { 1043 KASSERT(LIST_NEXT(pp, provider) == NULL, 1044 ("geom %p has more than one provider", gp)); 1045 g_media_gone(pp, flag); 1046 } 1047 } 1048 1049 int 1050 disk_resize(struct disk *dp, int flag) 1051 { 1052 1053 if (dp->d_destroyed || dp->d_geom == NULL) 1054 return (0); 1055 1056 return (g_post_event(g_disk_resize, dp, flag, NULL)); 1057 } 1058 1059 static void 1060 g_kern_disks(void *p, int flag __unused) 1061 { 1062 struct sbuf *sb; 1063 struct g_geom *gp; 1064 char *sp; 1065 1066 sb = p; 1067 sp = ""; 1068 g_topology_assert(); 1069 LIST_FOREACH(gp, &g_disk_class.geom, geom) { 1070 sbuf_printf(sb, "%s%s", sp, gp->name); 1071 sp = " "; 1072 } 1073 sbuf_finish(sb); 1074 } 1075 1076 static int 1077 g_disk_sysctl_flags(SYSCTL_HANDLER_ARGS) 1078 { 1079 struct disk *dp; 1080 struct sbuf *sb; 1081 int error; 1082 1083 sb = sbuf_new_auto(); 1084 dp = (struct disk *)arg1; 1085 sbuf_printf(sb, "%b", dp->d_flags, 1086 "\20" 1087 "\2OPEN" 1088 "\3CANDELETE" 1089 "\4CANFLUSHCACHE" 1090 "\5UNMAPPEDBIO" 1091 "\6DIRECTCOMPLETION" 1092 "\10CANZONE" 1093 "\11WRITEPROTECT"); 1094 1095 sbuf_finish(sb); 1096 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 1097 sbuf_delete(sb); 1098 return (error); 1099 } 1100 1101 static int 1102 sysctl_disks(SYSCTL_HANDLER_ARGS) 1103 { 1104 int error; 1105 struct sbuf *sb; 1106 1107 sb = sbuf_new_auto(); 1108 g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL); 1109 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 1110 sbuf_delete(sb); 1111 return error; 1112 } 1113 1114 SYSCTL_PROC(_kern, OID_AUTO, disks, 1115 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1116 sysctl_disks, "A", "names of available disks"); 1117