1 /*- 2 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/systm.h> 35 #include <sys/bio.h> 36 #include <sys/devicestat.h> 37 #include <sys/sdt.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/conf.h> 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 46 #include <machine/atomic.h> 47 48 SDT_PROVIDER_DEFINE(io); 49 50 SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *"); 51 SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *"); 52 SDT_PROBE_DEFINE2(io, , , wait__start, "struct bio *", 53 "struct devstat *"); 54 SDT_PROBE_DEFINE2(io, , , wait__done, "struct bio *", 55 "struct devstat *"); 56 57 #define DTRACE_DEVSTAT_START() SDT_PROBE2(io, , , start, NULL, ds) 58 #define DTRACE_DEVSTAT_BIO_START() SDT_PROBE2(io, , , start, bp, ds) 59 #define DTRACE_DEVSTAT_DONE() SDT_PROBE2(io, , , done, NULL, ds) 60 #define DTRACE_DEVSTAT_BIO_DONE() SDT_PROBE2(io, , , done, bp, ds) 61 #define DTRACE_DEVSTAT_WAIT_START() SDT_PROBE2(io, , , wait__start, NULL, ds) 62 #define DTRACE_DEVSTAT_WAIT_DONE() SDT_PROBE2(io, , , wait__done, NULL, ds) 63 64 static int devstat_num_devs; 65 static long devstat_generation = 1; 66 static int devstat_version = DEVSTAT_VERSION; 67 static int devstat_current_devnumber; 68 static struct mtx devstat_mutex; 69 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF); 70 71 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq); 72 static struct devstat *devstat_alloc(void); 73 static void devstat_free(struct devstat *); 74 static void devstat_add_entry(struct devstat *ds, const void *dev_name, 75 int unit_number, uint32_t block_size, 76 devstat_support_flags flags, 77 devstat_type_flags device_type, 78 devstat_priority priority); 79 80 /* 81 * Allocate a devstat and initialize it 82 */ 83 struct devstat * 84 devstat_new_entry(const void *dev_name, 85 int unit_number, uint32_t block_size, 86 devstat_support_flags flags, 87 devstat_type_flags device_type, 88 devstat_priority priority) 89 { 90 struct devstat *ds; 91 92 mtx_assert(&devstat_mutex, MA_NOTOWNED); 93 94 ds = devstat_alloc(); 95 mtx_lock(&devstat_mutex); 96 if (unit_number == -1) { 97 ds->unit_number = unit_number; 98 ds->id = dev_name; 99 binuptime(&ds->creation_time); 100 devstat_generation++; 101 } else { 102 devstat_add_entry(ds, dev_name, unit_number, block_size, 103 flags, device_type, priority); 104 } 105 mtx_unlock(&devstat_mutex); 106 return (ds); 107 } 108 109 /* 110 * Take a malloced and zeroed devstat structure given to us, fill it in 111 * and add it to the queue of devices. 112 */ 113 static void 114 devstat_add_entry(struct devstat *ds, const void *dev_name, 115 int unit_number, uint32_t block_size, 116 devstat_support_flags flags, 117 devstat_type_flags device_type, 118 devstat_priority priority) 119 { 120 struct devstatlist *devstat_head; 121 struct devstat *ds_tmp; 122 123 mtx_assert(&devstat_mutex, MA_OWNED); 124 devstat_num_devs++; 125 126 devstat_head = &device_statq; 127 128 /* 129 * Priority sort. Each driver passes in its priority when it adds 130 * its devstat entry. Drivers are sorted first by priority, and 131 * then by probe order. 132 * 133 * For the first device, we just insert it, since the priority 134 * doesn't really matter yet. Subsequent devices are inserted into 135 * the list using the order outlined above. 136 */ 137 if (devstat_num_devs == 1) 138 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links); 139 else { 140 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) { 141 struct devstat *ds_next; 142 143 ds_next = STAILQ_NEXT(ds_tmp, dev_links); 144 145 /* 146 * If we find a break between higher and lower 147 * priority items, and if this item fits in the 148 * break, insert it. This also applies if the 149 * "lower priority item" is the end of the list. 150 */ 151 if ((priority <= ds_tmp->priority) 152 && ((ds_next == NULL) 153 || (priority > ds_next->priority))) { 154 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds, 155 dev_links); 156 break; 157 } else if (priority > ds_tmp->priority) { 158 /* 159 * If this is the case, we should be able 160 * to insert ourselves at the head of the 161 * list. If we can't, something is wrong. 162 */ 163 if (ds_tmp == STAILQ_FIRST(devstat_head)) { 164 STAILQ_INSERT_HEAD(devstat_head, 165 ds, dev_links); 166 break; 167 } else { 168 STAILQ_INSERT_TAIL(devstat_head, 169 ds, dev_links); 170 printf("devstat_add_entry: HELP! " 171 "sorting problem detected " 172 "for name %p unit %d\n", 173 dev_name, unit_number); 174 break; 175 } 176 } 177 } 178 } 179 180 ds->device_number = devstat_current_devnumber++; 181 ds->unit_number = unit_number; 182 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN); 183 ds->block_size = block_size; 184 ds->flags = flags; 185 ds->device_type = device_type; 186 ds->priority = priority; 187 binuptime(&ds->creation_time); 188 devstat_generation++; 189 } 190 191 /* 192 * Remove a devstat structure from the list of devices. 193 */ 194 void 195 devstat_remove_entry(struct devstat *ds) 196 { 197 struct devstatlist *devstat_head; 198 199 mtx_assert(&devstat_mutex, MA_NOTOWNED); 200 if (ds == NULL) 201 return; 202 203 mtx_lock(&devstat_mutex); 204 205 devstat_head = &device_statq; 206 207 /* Remove this entry from the devstat queue */ 208 atomic_add_acq_int(&ds->sequence1, 1); 209 if (ds->unit_number != -1) { 210 devstat_num_devs--; 211 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links); 212 } 213 devstat_free(ds); 214 devstat_generation++; 215 mtx_unlock(&devstat_mutex); 216 } 217 218 /* 219 * Record a transaction start. 220 * 221 * See comments for devstat_end_transaction(). Ordering is very important 222 * here. 223 */ 224 void 225 devstat_start_transaction(struct devstat *ds, struct bintime *now) 226 { 227 228 mtx_assert(&devstat_mutex, MA_NOTOWNED); 229 230 /* sanity check */ 231 if (ds == NULL) 232 return; 233 234 atomic_add_acq_int(&ds->sequence1, 1); 235 /* 236 * We only want to set the start time when we are going from idle 237 * to busy. The start time is really the start of the latest busy 238 * period. 239 */ 240 if (ds->start_count == ds->end_count) { 241 if (now != NULL) 242 ds->busy_from = *now; 243 else 244 binuptime(&ds->busy_from); 245 } 246 ds->start_count++; 247 atomic_add_rel_int(&ds->sequence0, 1); 248 DTRACE_DEVSTAT_START(); 249 } 250 251 void 252 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp) 253 { 254 255 mtx_assert(&devstat_mutex, MA_NOTOWNED); 256 257 /* sanity check */ 258 if (ds == NULL) 259 return; 260 261 binuptime(&bp->bio_t0); 262 devstat_start_transaction(ds, &bp->bio_t0); 263 DTRACE_DEVSTAT_BIO_START(); 264 } 265 266 /* 267 * Record the ending of a transaction, and incrment the various counters. 268 * 269 * Ordering in this function, and in devstat_start_transaction() is VERY 270 * important. The idea here is to run without locks, so we are very 271 * careful to only modify some fields on the way "down" (i.e. at 272 * transaction start) and some fields on the way "up" (i.e. at transaction 273 * completion). One exception is busy_from, which we only modify in 274 * devstat_start_transaction() when there are no outstanding transactions, 275 * and thus it can't be modified in devstat_end_transaction() 276 * simultaneously. 277 * 278 * The sequence0 and sequence1 fields are provided to enable an application 279 * spying on the structures with mmap(2) to tell when a structure is in a 280 * consistent state or not. 281 * 282 * For this to work 100% reliably, it is important that the two fields 283 * are at opposite ends of the structure and that they are incremented 284 * in the opposite order of how a memcpy(3) in userland would copy them. 285 * We assume that the copying happens front to back, but there is actually 286 * no way short of writing your own memcpy(3) replacement to guarantee 287 * this will be the case. 288 * 289 * In addition to this, being a kind of locks, they must be updated with 290 * atomic instructions using appropriate memory barriers. 291 */ 292 void 293 devstat_end_transaction(struct devstat *ds, uint32_t bytes, 294 devstat_tag_type tag_type, devstat_trans_flags flags, 295 struct bintime *now, struct bintime *then) 296 { 297 struct bintime dt, lnow; 298 299 /* sanity check */ 300 if (ds == NULL) 301 return; 302 303 if (now == NULL) { 304 now = &lnow; 305 binuptime(now); 306 } 307 308 atomic_add_acq_int(&ds->sequence1, 1); 309 /* Update byte and operations counts */ 310 ds->bytes[flags] += bytes; 311 ds->operations[flags]++; 312 313 /* 314 * Keep a count of the various tag types sent. 315 */ 316 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 && 317 tag_type != DEVSTAT_TAG_NONE) 318 ds->tag_types[tag_type]++; 319 320 if (then != NULL) { 321 /* Update duration of operations */ 322 dt = *now; 323 bintime_sub(&dt, then); 324 bintime_add(&ds->duration[flags], &dt); 325 } 326 327 /* Accumulate busy time */ 328 dt = *now; 329 bintime_sub(&dt, &ds->busy_from); 330 bintime_add(&ds->busy_time, &dt); 331 ds->busy_from = *now; 332 333 ds->end_count++; 334 atomic_add_rel_int(&ds->sequence0, 1); 335 DTRACE_DEVSTAT_DONE(); 336 } 337 338 void 339 devstat_end_transaction_bio(struct devstat *ds, struct bio *bp) 340 { 341 342 devstat_end_transaction_bio_bt(ds, bp, NULL); 343 } 344 345 void 346 devstat_end_transaction_bio_bt(struct devstat *ds, struct bio *bp, 347 struct bintime *now) 348 { 349 devstat_trans_flags flg; 350 351 /* sanity check */ 352 if (ds == NULL) 353 return; 354 355 if (bp->bio_cmd == BIO_DELETE) 356 flg = DEVSTAT_FREE; 357 else if (bp->bio_cmd == BIO_READ) 358 flg = DEVSTAT_READ; 359 else if (bp->bio_cmd == BIO_WRITE) 360 flg = DEVSTAT_WRITE; 361 else 362 flg = DEVSTAT_NO_DATA; 363 364 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid, 365 DEVSTAT_TAG_SIMPLE, flg, now, &bp->bio_t0); 366 DTRACE_DEVSTAT_BIO_DONE(); 367 } 368 369 /* 370 * This is the sysctl handler for the devstat package. The data pushed out 371 * on the kern.devstat.all sysctl variable consists of the current devstat 372 * generation number, and then an array of devstat structures, one for each 373 * device in the system. 374 * 375 * This is more cryptic that obvious, but basically we neither can nor 376 * want to hold the devstat_mutex for any amount of time, so we grab it 377 * only when we need to and keep an eye on devstat_generation all the time. 378 */ 379 static int 380 sysctl_devstat(SYSCTL_HANDLER_ARGS) 381 { 382 int error; 383 long mygen; 384 struct devstat *nds; 385 386 mtx_assert(&devstat_mutex, MA_NOTOWNED); 387 388 /* 389 * XXX devstat_generation should really be "volatile" but that 390 * XXX freaks out the sysctl macro below. The places where we 391 * XXX change it and inspect it are bracketed in the mutex which 392 * XXX guarantees us proper write barriers. I don't belive the 393 * XXX compiler is allowed to optimize mygen away across calls 394 * XXX to other functions, so the following is belived to be safe. 395 */ 396 mygen = devstat_generation; 397 398 error = SYSCTL_OUT(req, &mygen, sizeof(mygen)); 399 400 if (devstat_num_devs == 0) 401 return(0); 402 403 if (error != 0) 404 return (error); 405 406 mtx_lock(&devstat_mutex); 407 nds = STAILQ_FIRST(&device_statq); 408 if (mygen != devstat_generation) 409 error = EBUSY; 410 mtx_unlock(&devstat_mutex); 411 412 if (error != 0) 413 return (error); 414 415 for (;nds != NULL;) { 416 error = SYSCTL_OUT(req, nds, sizeof(struct devstat)); 417 if (error != 0) 418 return (error); 419 mtx_lock(&devstat_mutex); 420 if (mygen != devstat_generation) 421 error = EBUSY; 422 else 423 nds = STAILQ_NEXT(nds, dev_links); 424 mtx_unlock(&devstat_mutex); 425 if (error != 0) 426 return (error); 427 } 428 return(error); 429 } 430 431 /* 432 * Sysctl entries for devstat. The first one is a node that all the rest 433 * hang off of. 434 */ 435 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, NULL, 436 "Device Statistics"); 437 438 SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE, 439 NULL, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list"); 440 /* 441 * Export the number of devices in the system so that userland utilities 442 * can determine how much memory to allocate to hold all the devices. 443 */ 444 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD, 445 &devstat_num_devs, 0, "Number of devices in the devstat list"); 446 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD, 447 &devstat_generation, 0, "Devstat list generation"); 448 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD, 449 &devstat_version, 0, "Devstat list version number"); 450 451 /* 452 * Allocator for struct devstat structures. We sub-allocate these from pages 453 * which we get from malloc. These pages are exported for mmap(2)'ing through 454 * a miniature device driver 455 */ 456 457 #define statsperpage (PAGE_SIZE / sizeof(struct devstat)) 458 459 static d_mmap_t devstat_mmap; 460 461 static struct cdevsw devstat_cdevsw = { 462 .d_version = D_VERSION, 463 .d_mmap = devstat_mmap, 464 .d_name = "devstat", 465 }; 466 467 struct statspage { 468 TAILQ_ENTRY(statspage) list; 469 struct devstat *stat; 470 u_int nfree; 471 }; 472 473 static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist); 474 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics"); 475 476 static int 477 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 478 int nprot, vm_memattr_t *memattr) 479 { 480 struct statspage *spp; 481 482 if (nprot != VM_PROT_READ) 483 return (-1); 484 mtx_lock(&devstat_mutex); 485 TAILQ_FOREACH(spp, &pagelist, list) { 486 if (offset == 0) { 487 *paddr = vtophys(spp->stat); 488 mtx_unlock(&devstat_mutex); 489 return (0); 490 } 491 offset -= PAGE_SIZE; 492 } 493 mtx_unlock(&devstat_mutex); 494 return (-1); 495 } 496 497 static struct devstat * 498 devstat_alloc(void) 499 { 500 struct devstat *dsp; 501 struct statspage *spp, *spp2; 502 u_int u; 503 static int once; 504 505 mtx_assert(&devstat_mutex, MA_NOTOWNED); 506 if (!once) { 507 make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME, 508 &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444, 509 DEVSTAT_DEVICE_NAME); 510 once = 1; 511 } 512 spp2 = NULL; 513 mtx_lock(&devstat_mutex); 514 for (;;) { 515 TAILQ_FOREACH(spp, &pagelist, list) { 516 if (spp->nfree > 0) 517 break; 518 } 519 if (spp != NULL) 520 break; 521 mtx_unlock(&devstat_mutex); 522 spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); 523 spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); 524 spp2->nfree = statsperpage; 525 526 /* 527 * If free statspages were added while the lock was released 528 * just reuse them. 529 */ 530 mtx_lock(&devstat_mutex); 531 TAILQ_FOREACH(spp, &pagelist, list) 532 if (spp->nfree > 0) 533 break; 534 if (spp == NULL) { 535 spp = spp2; 536 537 /* 538 * It would make more sense to add the new page at the 539 * head but the order on the list determine the 540 * sequence of the mapping so we can't do that. 541 */ 542 TAILQ_INSERT_TAIL(&pagelist, spp, list); 543 } else 544 break; 545 } 546 dsp = spp->stat; 547 for (u = 0; u < statsperpage; u++) { 548 if (dsp->allocated == 0) 549 break; 550 dsp++; 551 } 552 spp->nfree--; 553 dsp->allocated = 1; 554 mtx_unlock(&devstat_mutex); 555 if (spp2 != NULL && spp2 != spp) { 556 free(spp2->stat, M_DEVSTAT); 557 free(spp2, M_DEVSTAT); 558 } 559 return (dsp); 560 } 561 562 static void 563 devstat_free(struct devstat *dsp) 564 { 565 struct statspage *spp; 566 567 mtx_assert(&devstat_mutex, MA_OWNED); 568 bzero(dsp, sizeof *dsp); 569 TAILQ_FOREACH(spp, &pagelist, list) { 570 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) { 571 spp->nfree++; 572 return; 573 } 574 } 575 } 576 577 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD, 578 SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)"); 579