1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/disk.h> 36 #include <sys/kernel.h> 37 #include <sys/systm.h> 38 #include <sys/bio.h> 39 #include <sys/devicestat.h> 40 #include <sys/sdt.h> 41 #include <sys/sysctl.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/conf.h> 46 #include <vm/vm.h> 47 #include <vm/pmap.h> 48 49 #include <machine/atomic.h> 50 51 SDT_PROVIDER_DEFINE(io); 52 53 SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *"); 54 SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *"); 55 SDT_PROBE_DEFINE2(io, , , wait__start, "struct bio *", 56 "struct devstat *"); 57 SDT_PROBE_DEFINE2(io, , , wait__done, "struct bio *", 58 "struct devstat *"); 59 60 #define DTRACE_DEVSTAT_START() SDT_PROBE2(io, , , start, NULL, ds) 61 #define DTRACE_DEVSTAT_BIO_START() SDT_PROBE2(io, , , start, bp, ds) 62 #define DTRACE_DEVSTAT_DONE() SDT_PROBE2(io, , , done, NULL, ds) 63 #define DTRACE_DEVSTAT_BIO_DONE() SDT_PROBE2(io, , , done, bp, ds) 64 #define DTRACE_DEVSTAT_WAIT_START() SDT_PROBE2(io, , , wait__start, NULL, ds) 65 #define DTRACE_DEVSTAT_WAIT_DONE() SDT_PROBE2(io, , , wait__done, NULL, ds) 66 67 static int devstat_num_devs; 68 static long devstat_generation = 1; 69 static int devstat_version = DEVSTAT_VERSION; 70 static int devstat_current_devnumber; 71 static struct mtx devstat_mutex; 72 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF); 73 74 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq); 75 static struct devstat *devstat_alloc(void); 76 static void devstat_free(struct devstat *); 77 static void devstat_add_entry(struct devstat *ds, const void *dev_name, 78 int unit_number, uint32_t block_size, 79 devstat_support_flags flags, 80 devstat_type_flags device_type, 81 devstat_priority priority); 82 83 /* 84 * Allocate a devstat and initialize it 85 */ 86 struct devstat * 87 devstat_new_entry(const void *dev_name, 88 int unit_number, uint32_t block_size, 89 devstat_support_flags flags, 90 devstat_type_flags device_type, 91 devstat_priority priority) 92 { 93 struct devstat *ds; 94 95 mtx_assert(&devstat_mutex, MA_NOTOWNED); 96 97 ds = devstat_alloc(); 98 mtx_lock(&devstat_mutex); 99 if (unit_number == -1) { 100 ds->unit_number = unit_number; 101 ds->id = dev_name; 102 binuptime(&ds->creation_time); 103 devstat_generation++; 104 } else { 105 devstat_add_entry(ds, dev_name, unit_number, block_size, 106 flags, device_type, priority); 107 } 108 mtx_unlock(&devstat_mutex); 109 return (ds); 110 } 111 112 /* 113 * Take a malloced and zeroed devstat structure given to us, fill it in 114 * and add it to the queue of devices. 115 */ 116 static void 117 devstat_add_entry(struct devstat *ds, const void *dev_name, 118 int unit_number, uint32_t block_size, 119 devstat_support_flags flags, 120 devstat_type_flags device_type, 121 devstat_priority priority) 122 { 123 struct devstatlist *devstat_head; 124 struct devstat *ds_tmp; 125 126 mtx_assert(&devstat_mutex, MA_OWNED); 127 devstat_num_devs++; 128 129 devstat_head = &device_statq; 130 131 /* 132 * Priority sort. Each driver passes in its priority when it adds 133 * its devstat entry. Drivers are sorted first by priority, and 134 * then by probe order. 135 * 136 * For the first device, we just insert it, since the priority 137 * doesn't really matter yet. Subsequent devices are inserted into 138 * the list using the order outlined above. 139 */ 140 if (devstat_num_devs == 1) 141 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links); 142 else { 143 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) { 144 struct devstat *ds_next; 145 146 ds_next = STAILQ_NEXT(ds_tmp, dev_links); 147 148 /* 149 * If we find a break between higher and lower 150 * priority items, and if this item fits in the 151 * break, insert it. This also applies if the 152 * "lower priority item" is the end of the list. 153 */ 154 if ((priority <= ds_tmp->priority) 155 && ((ds_next == NULL) 156 || (priority > ds_next->priority))) { 157 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds, 158 dev_links); 159 break; 160 } else if (priority > ds_tmp->priority) { 161 /* 162 * If this is the case, we should be able 163 * to insert ourselves at the head of the 164 * list. If we can't, something is wrong. 165 */ 166 if (ds_tmp == STAILQ_FIRST(devstat_head)) { 167 STAILQ_INSERT_HEAD(devstat_head, 168 ds, dev_links); 169 break; 170 } else { 171 STAILQ_INSERT_TAIL(devstat_head, 172 ds, dev_links); 173 printf("devstat_add_entry: HELP! " 174 "sorting problem detected " 175 "for name %p unit %d\n", 176 dev_name, unit_number); 177 break; 178 } 179 } 180 } 181 } 182 183 ds->device_number = devstat_current_devnumber++; 184 ds->unit_number = unit_number; 185 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN); 186 ds->block_size = block_size; 187 ds->flags = flags; 188 ds->device_type = device_type; 189 ds->priority = priority; 190 binuptime(&ds->creation_time); 191 devstat_generation++; 192 } 193 194 /* 195 * Remove a devstat structure from the list of devices. 196 */ 197 void 198 devstat_remove_entry(struct devstat *ds) 199 { 200 struct devstatlist *devstat_head; 201 202 mtx_assert(&devstat_mutex, MA_NOTOWNED); 203 if (ds == NULL) 204 return; 205 206 mtx_lock(&devstat_mutex); 207 208 devstat_head = &device_statq; 209 210 /* Remove this entry from the devstat queue */ 211 atomic_add_acq_int(&ds->sequence1, 1); 212 if (ds->unit_number != -1) { 213 devstat_num_devs--; 214 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links); 215 } 216 devstat_free(ds); 217 devstat_generation++; 218 mtx_unlock(&devstat_mutex); 219 } 220 221 /* 222 * Record a transaction start. 223 * 224 * See comments for devstat_end_transaction(). Ordering is very important 225 * here. 226 */ 227 void 228 devstat_start_transaction(struct devstat *ds, const struct bintime *now) 229 { 230 231 /* sanity check */ 232 if (ds == NULL) 233 return; 234 235 atomic_add_acq_int(&ds->sequence1, 1); 236 /* 237 * We only want to set the start time when we are going from idle 238 * to busy. The start time is really the start of the latest busy 239 * period. 240 */ 241 if (atomic_fetchadd_int(&ds->start_count, 1) == ds->end_count) { 242 if (now != NULL) 243 ds->busy_from = *now; 244 else 245 binuptime(&ds->busy_from); 246 } 247 atomic_add_rel_int(&ds->sequence0, 1); 248 DTRACE_DEVSTAT_START(); 249 } 250 251 void 252 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp) 253 { 254 255 /* sanity check */ 256 if (ds == NULL) 257 return; 258 259 binuptime(&bp->bio_t0); 260 devstat_start_transaction_bio_t0(ds, bp); 261 } 262 263 void 264 devstat_start_transaction_bio_t0(struct devstat *ds, struct bio *bp) 265 { 266 267 /* sanity check */ 268 if (ds == NULL) 269 return; 270 271 devstat_start_transaction(ds, &bp->bio_t0); 272 DTRACE_DEVSTAT_BIO_START(); 273 } 274 275 /* 276 * Record the ending of a transaction, and incrment the various counters. 277 * 278 * Ordering in this function, and in devstat_start_transaction() is VERY 279 * important. The idea here is to run without locks, so we are very 280 * careful to only modify some fields on the way "down" (i.e. at 281 * transaction start) and some fields on the way "up" (i.e. at transaction 282 * completion). One exception is busy_from, which we only modify in 283 * devstat_start_transaction() when there are no outstanding transactions, 284 * and thus it can't be modified in devstat_end_transaction() 285 * simultaneously. 286 * 287 * The sequence0 and sequence1 fields are provided to enable an application 288 * spying on the structures with mmap(2) to tell when a structure is in a 289 * consistent state or not. 290 * 291 * For this to work 100% reliably, it is important that the two fields 292 * are at opposite ends of the structure and that they are incremented 293 * in the opposite order of how a memcpy(3) in userland would copy them. 294 * We assume that the copying happens front to back, but there is actually 295 * no way short of writing your own memcpy(3) replacement to guarantee 296 * this will be the case. 297 * 298 * In addition to this, being a kind of locks, they must be updated with 299 * atomic instructions using appropriate memory barriers. 300 */ 301 void 302 devstat_end_transaction(struct devstat *ds, uint32_t bytes, 303 devstat_tag_type tag_type, devstat_trans_flags flags, 304 const struct bintime *now, const struct bintime *then) 305 { 306 struct bintime dt, lnow; 307 308 /* sanity check */ 309 if (ds == NULL) 310 return; 311 312 if (now == NULL) { 313 binuptime(&lnow); 314 now = &lnow; 315 } 316 317 atomic_add_acq_int(&ds->sequence1, 1); 318 /* Update byte and operations counts */ 319 ds->bytes[flags] += bytes; 320 ds->operations[flags]++; 321 322 /* 323 * Keep a count of the various tag types sent. 324 */ 325 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 && 326 tag_type != DEVSTAT_TAG_NONE) 327 ds->tag_types[tag_type]++; 328 329 if (then != NULL) { 330 /* Update duration of operations */ 331 dt = *now; 332 bintime_sub(&dt, then); 333 bintime_add(&ds->duration[flags], &dt); 334 } 335 336 /* Accumulate busy time */ 337 dt = *now; 338 bintime_sub(&dt, &ds->busy_from); 339 bintime_add(&ds->busy_time, &dt); 340 ds->busy_from = *now; 341 342 ds->end_count++; 343 atomic_add_rel_int(&ds->sequence0, 1); 344 DTRACE_DEVSTAT_DONE(); 345 } 346 347 void 348 devstat_end_transaction_bio(struct devstat *ds, const struct bio *bp) 349 { 350 351 devstat_end_transaction_bio_bt(ds, bp, NULL); 352 } 353 354 void 355 devstat_end_transaction_bio_bt(struct devstat *ds, const struct bio *bp, 356 const struct bintime *now) 357 { 358 devstat_trans_flags flg; 359 devstat_tag_type tag; 360 361 /* sanity check */ 362 if (ds == NULL) 363 return; 364 365 if (bp->bio_flags & BIO_ORDERED) 366 tag = DEVSTAT_TAG_ORDERED; 367 else 368 tag = DEVSTAT_TAG_SIMPLE; 369 if (bp->bio_cmd == BIO_DELETE) 370 flg = DEVSTAT_FREE; 371 else if ((bp->bio_cmd == BIO_READ) 372 || ((bp->bio_cmd == BIO_ZONE) 373 && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES))) 374 flg = DEVSTAT_READ; 375 else if (bp->bio_cmd == BIO_WRITE) 376 flg = DEVSTAT_WRITE; 377 else 378 flg = DEVSTAT_NO_DATA; 379 380 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid, 381 tag, flg, now, &bp->bio_t0); 382 DTRACE_DEVSTAT_BIO_DONE(); 383 } 384 385 /* 386 * This is the sysctl handler for the devstat package. The data pushed out 387 * on the kern.devstat.all sysctl variable consists of the current devstat 388 * generation number, and then an array of devstat structures, one for each 389 * device in the system. 390 * 391 * This is more cryptic that obvious, but basically we neither can nor 392 * want to hold the devstat_mutex for any amount of time, so we grab it 393 * only when we need to and keep an eye on devstat_generation all the time. 394 */ 395 static int 396 sysctl_devstat(SYSCTL_HANDLER_ARGS) 397 { 398 int error; 399 long mygen; 400 struct devstat *nds; 401 402 mtx_assert(&devstat_mutex, MA_NOTOWNED); 403 404 /* 405 * XXX devstat_generation should really be "volatile" but that 406 * XXX freaks out the sysctl macro below. The places where we 407 * XXX change it and inspect it are bracketed in the mutex which 408 * XXX guarantees us proper write barriers. I don't believe the 409 * XXX compiler is allowed to optimize mygen away across calls 410 * XXX to other functions, so the following is belived to be safe. 411 */ 412 mygen = devstat_generation; 413 414 error = SYSCTL_OUT(req, &mygen, sizeof(mygen)); 415 416 if (devstat_num_devs == 0) 417 return(0); 418 419 if (error != 0) 420 return (error); 421 422 mtx_lock(&devstat_mutex); 423 nds = STAILQ_FIRST(&device_statq); 424 if (mygen != devstat_generation) 425 error = EBUSY; 426 mtx_unlock(&devstat_mutex); 427 428 if (error != 0) 429 return (error); 430 431 for (;nds != NULL;) { 432 error = SYSCTL_OUT(req, nds, sizeof(struct devstat)); 433 if (error != 0) 434 return (error); 435 mtx_lock(&devstat_mutex); 436 if (mygen != devstat_generation) 437 error = EBUSY; 438 else 439 nds = STAILQ_NEXT(nds, dev_links); 440 mtx_unlock(&devstat_mutex); 441 if (error != 0) 442 return (error); 443 } 444 return(error); 445 } 446 447 /* 448 * Sysctl entries for devstat. The first one is a node that all the rest 449 * hang off of. 450 */ 451 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 452 "Device Statistics"); 453 454 SYSCTL_PROC(_kern_devstat, OID_AUTO, all, 455 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0, 456 sysctl_devstat, "S,devstat", 457 "All devices in the devstat list"); 458 /* 459 * Export the number of devices in the system so that userland utilities 460 * can determine how much memory to allocate to hold all the devices. 461 */ 462 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD, 463 &devstat_num_devs, 0, "Number of devices in the devstat list"); 464 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD, 465 &devstat_generation, 0, "Devstat list generation"); 466 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD, 467 &devstat_version, 0, "Devstat list version number"); 468 469 /* 470 * Allocator for struct devstat structures. We sub-allocate these from pages 471 * which we get from malloc. These pages are exported for mmap(2)'ing through 472 * a miniature device driver 473 */ 474 475 #define statsperpage (PAGE_SIZE / sizeof(struct devstat)) 476 477 static d_ioctl_t devstat_ioctl; 478 static d_mmap_t devstat_mmap; 479 480 static struct cdevsw devstat_cdevsw = { 481 .d_version = D_VERSION, 482 .d_ioctl = devstat_ioctl, 483 .d_mmap = devstat_mmap, 484 .d_name = "devstat", 485 }; 486 487 struct statspage { 488 TAILQ_ENTRY(statspage) list; 489 struct devstat *stat; 490 u_int nfree; 491 }; 492 493 static size_t pagelist_pages = 0; 494 static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist); 495 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics"); 496 497 static int 498 devstat_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 499 struct thread *td) 500 { 501 int error = ENOTTY; 502 503 switch (cmd) { 504 case DIOCGMEDIASIZE: 505 error = 0; 506 *(off_t *)data = pagelist_pages * PAGE_SIZE; 507 break; 508 } 509 510 return (error); 511 } 512 513 static int 514 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 515 int nprot, vm_memattr_t *memattr) 516 { 517 struct statspage *spp; 518 519 if (nprot != VM_PROT_READ) 520 return (-1); 521 mtx_lock(&devstat_mutex); 522 TAILQ_FOREACH(spp, &pagelist, list) { 523 if (offset == 0) { 524 *paddr = vtophys(spp->stat); 525 mtx_unlock(&devstat_mutex); 526 return (0); 527 } 528 offset -= PAGE_SIZE; 529 } 530 mtx_unlock(&devstat_mutex); 531 return (-1); 532 } 533 534 static struct devstat * 535 devstat_alloc(void) 536 { 537 struct devstat *dsp; 538 struct statspage *spp, *spp2; 539 u_int u; 540 static int once; 541 542 mtx_assert(&devstat_mutex, MA_NOTOWNED); 543 if (!once) { 544 make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME, 545 &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444, 546 DEVSTAT_DEVICE_NAME); 547 once = 1; 548 } 549 spp2 = NULL; 550 mtx_lock(&devstat_mutex); 551 for (;;) { 552 TAILQ_FOREACH(spp, &pagelist, list) { 553 if (spp->nfree > 0) 554 break; 555 } 556 if (spp != NULL) 557 break; 558 mtx_unlock(&devstat_mutex); 559 spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); 560 spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); 561 spp2->nfree = statsperpage; 562 563 /* 564 * If free statspages were added while the lock was released 565 * just reuse them. 566 */ 567 mtx_lock(&devstat_mutex); 568 TAILQ_FOREACH(spp, &pagelist, list) 569 if (spp->nfree > 0) 570 break; 571 if (spp == NULL) { 572 spp = spp2; 573 574 /* 575 * It would make more sense to add the new page at the 576 * head but the order on the list determine the 577 * sequence of the mapping so we can't do that. 578 */ 579 pagelist_pages++; 580 TAILQ_INSERT_TAIL(&pagelist, spp, list); 581 } else 582 break; 583 } 584 dsp = spp->stat; 585 for (u = 0; u < statsperpage; u++) { 586 if (dsp->allocated == 0) 587 break; 588 dsp++; 589 } 590 spp->nfree--; 591 dsp->allocated = 1; 592 mtx_unlock(&devstat_mutex); 593 if (spp2 != NULL && spp2 != spp) { 594 free(spp2->stat, M_DEVSTAT); 595 free(spp2, M_DEVSTAT); 596 } 597 return (dsp); 598 } 599 600 static void 601 devstat_free(struct devstat *dsp) 602 { 603 struct statspage *spp; 604 605 mtx_assert(&devstat_mutex, MA_OWNED); 606 bzero(dsp, sizeof *dsp); 607 TAILQ_FOREACH(spp, &pagelist, list) { 608 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) { 609 spp->nfree++; 610 return; 611 } 612 } 613 } 614 615 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD, 616 SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)"); 617