1 /*- 2 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/systm.h> 35 #include <sys/bio.h> 36 #include <sys/devicestat.h> 37 #include <sys/sysctl.h> 38 #include <sys/malloc.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/conf.h> 42 #include <vm/vm.h> 43 #include <vm/pmap.h> 44 45 #include <machine/atomic.h> 46 47 static int devstat_num_devs; 48 static long devstat_generation = 1; 49 static int devstat_version = DEVSTAT_VERSION; 50 static int devstat_current_devnumber; 51 static struct mtx devstat_mutex; 52 53 static struct devstatlist device_statq; 54 static struct devstat *devstat_alloc(void); 55 static void devstat_free(struct devstat *); 56 static void devstat_add_entry(struct devstat *ds, const void *dev_name, 57 int unit_number, u_int32_t block_size, 58 devstat_support_flags flags, 59 devstat_type_flags device_type, 60 devstat_priority priority); 61 62 /* 63 * Allocate a devstat and initialize it 64 */ 65 struct devstat * 66 devstat_new_entry(const void *dev_name, 67 int unit_number, u_int32_t block_size, 68 devstat_support_flags flags, 69 devstat_type_flags device_type, 70 devstat_priority priority) 71 { 72 struct devstat *ds; 73 static int once; 74 75 if (!once) { 76 STAILQ_INIT(&device_statq); 77 mtx_init(&devstat_mutex, "devstat", NULL, MTX_DEF); 78 once = 1; 79 } 80 mtx_assert(&devstat_mutex, MA_NOTOWNED); 81 82 ds = devstat_alloc(); 83 mtx_lock(&devstat_mutex); 84 if (unit_number == -1) { 85 ds->id = dev_name; 86 binuptime(&ds->creation_time); 87 devstat_generation++; 88 } else { 89 devstat_add_entry(ds, dev_name, unit_number, block_size, 90 flags, device_type, priority); 91 } 92 mtx_unlock(&devstat_mutex); 93 return (ds); 94 } 95 96 /* 97 * Take a malloced and zeroed devstat structure given to us, fill it in 98 * and add it to the queue of devices. 99 */ 100 static void 101 devstat_add_entry(struct devstat *ds, const void *dev_name, 102 int unit_number, u_int32_t block_size, 103 devstat_support_flags flags, 104 devstat_type_flags device_type, 105 devstat_priority priority) 106 { 107 struct devstatlist *devstat_head; 108 struct devstat *ds_tmp; 109 110 mtx_assert(&devstat_mutex, MA_OWNED); 111 devstat_num_devs++; 112 113 devstat_head = &device_statq; 114 115 /* 116 * Priority sort. Each driver passes in its priority when it adds 117 * its devstat entry. Drivers are sorted first by priority, and 118 * then by probe order. 119 * 120 * For the first device, we just insert it, since the priority 121 * doesn't really matter yet. Subsequent devices are inserted into 122 * the list using the order outlined above. 123 */ 124 if (devstat_num_devs == 1) 125 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links); 126 else { 127 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) { 128 struct devstat *ds_next; 129 130 ds_next = STAILQ_NEXT(ds_tmp, dev_links); 131 132 /* 133 * If we find a break between higher and lower 134 * priority items, and if this item fits in the 135 * break, insert it. This also applies if the 136 * "lower priority item" is the end of the list. 137 */ 138 if ((priority <= ds_tmp->priority) 139 && ((ds_next == NULL) 140 || (priority > ds_next->priority))) { 141 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds, 142 dev_links); 143 break; 144 } else if (priority > ds_tmp->priority) { 145 /* 146 * If this is the case, we should be able 147 * to insert ourselves at the head of the 148 * list. If we can't, something is wrong. 149 */ 150 if (ds_tmp == STAILQ_FIRST(devstat_head)) { 151 STAILQ_INSERT_HEAD(devstat_head, 152 ds, dev_links); 153 break; 154 } else { 155 STAILQ_INSERT_TAIL(devstat_head, 156 ds, dev_links); 157 printf("devstat_add_entry: HELP! " 158 "sorting problem detected " 159 "for name %p unit %d\n", 160 dev_name, unit_number); 161 break; 162 } 163 } 164 } 165 } 166 167 ds->device_number = devstat_current_devnumber++; 168 ds->unit_number = unit_number; 169 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN); 170 ds->block_size = block_size; 171 ds->flags = flags; 172 ds->device_type = device_type; 173 ds->priority = priority; 174 binuptime(&ds->creation_time); 175 devstat_generation++; 176 } 177 178 /* 179 * Remove a devstat structure from the list of devices. 180 */ 181 void 182 devstat_remove_entry(struct devstat *ds) 183 { 184 struct devstatlist *devstat_head; 185 186 mtx_assert(&devstat_mutex, MA_NOTOWNED); 187 if (ds == NULL) 188 return; 189 190 mtx_lock(&devstat_mutex); 191 192 devstat_head = &device_statq; 193 194 /* Remove this entry from the devstat queue */ 195 atomic_add_acq_int(&ds->sequence1, 1); 196 if (ds->id == NULL) { 197 devstat_num_devs--; 198 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links); 199 } 200 devstat_free(ds); 201 devstat_generation++; 202 mtx_unlock(&devstat_mutex); 203 } 204 205 /* 206 * Record a transaction start. 207 * 208 * See comments for devstat_end_transaction(). Ordering is very important 209 * here. 210 */ 211 void 212 devstat_start_transaction(struct devstat *ds, struct bintime *now) 213 { 214 215 mtx_assert(&devstat_mutex, MA_NOTOWNED); 216 217 /* sanity check */ 218 if (ds == NULL) 219 return; 220 221 atomic_add_acq_int(&ds->sequence1, 1); 222 /* 223 * We only want to set the start time when we are going from idle 224 * to busy. The start time is really the start of the latest busy 225 * period. 226 */ 227 if (ds->start_count == ds->end_count) { 228 if (now != NULL) 229 ds->busy_from = *now; 230 else 231 binuptime(&ds->busy_from); 232 } 233 ds->start_count++; 234 atomic_add_rel_int(&ds->sequence0, 1); 235 } 236 237 void 238 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp) 239 { 240 241 mtx_assert(&devstat_mutex, MA_NOTOWNED); 242 243 /* sanity check */ 244 if (ds == NULL) 245 return; 246 247 binuptime(&bp->bio_t0); 248 devstat_start_transaction(ds, &bp->bio_t0); 249 } 250 251 /* 252 * Record the ending of a transaction, and incrment the various counters. 253 * 254 * Ordering in this function, and in devstat_start_transaction() is VERY 255 * important. The idea here is to run without locks, so we are very 256 * careful to only modify some fields on the way "down" (i.e. at 257 * transaction start) and some fields on the way "up" (i.e. at transaction 258 * completion). One exception is busy_from, which we only modify in 259 * devstat_start_transaction() when there are no outstanding transactions, 260 * and thus it can't be modified in devstat_end_transaction() 261 * simultaneously. 262 * 263 * The sequence0 and sequence1 fields are provided to enable an application 264 * spying on the structures with mmap(2) to tell when a structure is in a 265 * consistent state or not. 266 * 267 * For this to work 100% reliably, it is important that the two fields 268 * are at opposite ends of the structure and that they are incremented 269 * in the opposite order of how a memcpy(3) in userland would copy them. 270 * We assume that the copying happens front to back, but there is actually 271 * no way short of writing your own memcpy(3) replacement to guarantee 272 * this will be the case. 273 * 274 * In addition to this, being a kind of locks, they must be updated with 275 * atomic instructions using appropriate memory barriers. 276 */ 277 void 278 devstat_end_transaction(struct devstat *ds, u_int32_t bytes, 279 devstat_tag_type tag_type, devstat_trans_flags flags, 280 struct bintime *now, struct bintime *then) 281 { 282 struct bintime dt, lnow; 283 284 /* sanity check */ 285 if (ds == NULL) 286 return; 287 288 if (now == NULL) { 289 now = &lnow; 290 binuptime(now); 291 } 292 293 atomic_add_acq_int(&ds->sequence1, 1); 294 /* Update byte and operations counts */ 295 ds->bytes[flags] += bytes; 296 ds->operations[flags]++; 297 298 /* 299 * Keep a count of the various tag types sent. 300 */ 301 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 && 302 tag_type != DEVSTAT_TAG_NONE) 303 ds->tag_types[tag_type]++; 304 305 if (then != NULL) { 306 /* Update duration of operations */ 307 dt = *now; 308 bintime_sub(&dt, then); 309 bintime_add(&ds->duration[flags], &dt); 310 } 311 312 /* Accumulate busy time */ 313 dt = *now; 314 bintime_sub(&dt, &ds->busy_from); 315 bintime_add(&ds->busy_time, &dt); 316 ds->busy_from = *now; 317 318 ds->end_count++; 319 atomic_add_rel_int(&ds->sequence0, 1); 320 } 321 322 void 323 devstat_end_transaction_bio(struct devstat *ds, struct bio *bp) 324 { 325 devstat_trans_flags flg; 326 327 /* sanity check */ 328 if (ds == NULL) 329 return; 330 331 if (bp->bio_cmd == BIO_DELETE) 332 flg = DEVSTAT_FREE; 333 else if (bp->bio_cmd == BIO_READ) 334 flg = DEVSTAT_READ; 335 else if (bp->bio_cmd == BIO_WRITE) 336 flg = DEVSTAT_WRITE; 337 else 338 flg = DEVSTAT_NO_DATA; 339 340 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid, 341 DEVSTAT_TAG_SIMPLE, flg, NULL, &bp->bio_t0); 342 } 343 344 /* 345 * This is the sysctl handler for the devstat package. The data pushed out 346 * on the kern.devstat.all sysctl variable consists of the current devstat 347 * generation number, and then an array of devstat structures, one for each 348 * device in the system. 349 * 350 * This is more cryptic that obvious, but basically we neither can nor 351 * want to hold the devstat_mutex for any amount of time, so we grab it 352 * only when we need to and keep an eye on devstat_generation all the time. 353 */ 354 static int 355 sysctl_devstat(SYSCTL_HANDLER_ARGS) 356 { 357 int error; 358 long mygen; 359 struct devstat *nds; 360 361 mtx_assert(&devstat_mutex, MA_NOTOWNED); 362 363 /* 364 * XXX devstat_generation should really be "volatile" but that 365 * XXX freaks out the sysctl macro below. The places where we 366 * XXX change it and inspect it are bracketed in the mutex which 367 * XXX guarantees us proper write barriers. I don't belive the 368 * XXX compiler is allowed to optimize mygen away across calls 369 * XXX to other functions, so the following is belived to be safe. 370 */ 371 mygen = devstat_generation; 372 373 error = SYSCTL_OUT(req, &mygen, sizeof(mygen)); 374 375 if (devstat_num_devs == 0) 376 return(0); 377 378 if (error != 0) 379 return (error); 380 381 mtx_lock(&devstat_mutex); 382 nds = STAILQ_FIRST(&device_statq); 383 if (mygen != devstat_generation) 384 error = EBUSY; 385 mtx_unlock(&devstat_mutex); 386 387 if (error != 0) 388 return (error); 389 390 for (;nds != NULL;) { 391 error = SYSCTL_OUT(req, nds, sizeof(struct devstat)); 392 if (error != 0) 393 return (error); 394 mtx_lock(&devstat_mutex); 395 if (mygen != devstat_generation) 396 error = EBUSY; 397 else 398 nds = STAILQ_NEXT(nds, dev_links); 399 mtx_unlock(&devstat_mutex); 400 if (error != 0) 401 return (error); 402 } 403 return(error); 404 } 405 406 /* 407 * Sysctl entries for devstat. The first one is a node that all the rest 408 * hang off of. 409 */ 410 SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, 0, "Device Statistics"); 411 412 SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE, 413 0, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list"); 414 /* 415 * Export the number of devices in the system so that userland utilities 416 * can determine how much memory to allocate to hold all the devices. 417 */ 418 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD, 419 &devstat_num_devs, 0, "Number of devices in the devstat list"); 420 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD, 421 &devstat_generation, 0, "Devstat list generation"); 422 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD, 423 &devstat_version, 0, "Devstat list version number"); 424 425 /* 426 * Allocator for struct devstat structures. We sub-allocate these from pages 427 * which we get from malloc. These pages are exported for mmap(2)'ing through 428 * a miniature device driver 429 */ 430 431 #define statsperpage (PAGE_SIZE / sizeof(struct devstat)) 432 433 static d_mmap_t devstat_mmap; 434 435 static struct cdevsw devstat_cdevsw = { 436 .d_version = D_VERSION, 437 .d_flags = D_NEEDGIANT, 438 .d_mmap = devstat_mmap, 439 .d_name = "devstat", 440 }; 441 442 struct statspage { 443 TAILQ_ENTRY(statspage) list; 444 struct devstat *stat; 445 u_int nfree; 446 }; 447 448 static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist); 449 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics"); 450 451 static int 452 devstat_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) 453 { 454 struct statspage *spp; 455 456 if (nprot != VM_PROT_READ) 457 return (-1); 458 TAILQ_FOREACH(spp, &pagelist, list) { 459 if (offset == 0) { 460 *paddr = vtophys(spp->stat); 461 return (0); 462 } 463 offset -= PAGE_SIZE; 464 } 465 return (-1); 466 } 467 468 static struct devstat * 469 devstat_alloc(void) 470 { 471 struct devstat *dsp; 472 struct statspage *spp; 473 u_int u; 474 static int once; 475 476 mtx_assert(&devstat_mutex, MA_NOTOWNED); 477 if (!once) { 478 make_dev(&devstat_cdevsw, 0, 479 UID_ROOT, GID_WHEEL, 0400, DEVSTAT_DEVICE_NAME); 480 once = 1; 481 } 482 mtx_lock(&devstat_mutex); 483 for (;;) { 484 TAILQ_FOREACH(spp, &pagelist, list) { 485 if (spp->nfree > 0) 486 break; 487 } 488 if (spp != NULL) 489 break; 490 /* 491 * We had no free slot in any of our pages, drop the mutex 492 * and get another page. In theory we could have more than 493 * one process doing this at the same time and consequently 494 * we may allocate more pages than we will need. That is 495 * Just Too Bad[tm], we can live with that. 496 */ 497 mtx_unlock(&devstat_mutex); 498 spp = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); 499 spp->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); 500 spp->nfree = statsperpage; 501 mtx_lock(&devstat_mutex); 502 /* 503 * It would make more sense to add the new page at the head 504 * but the order on the list determine the sequence of the 505 * mapping so we can't do that. 506 */ 507 TAILQ_INSERT_TAIL(&pagelist, spp, list); 508 } 509 dsp = spp->stat; 510 for (u = 0; u < statsperpage; u++) { 511 if (dsp->allocated == 0) 512 break; 513 dsp++; 514 } 515 spp->nfree--; 516 dsp->allocated = 1; 517 mtx_unlock(&devstat_mutex); 518 return (dsp); 519 } 520 521 static void 522 devstat_free(struct devstat *dsp) 523 { 524 struct statspage *spp; 525 526 mtx_assert(&devstat_mutex, MA_OWNED); 527 bzero(dsp, sizeof *dsp); 528 TAILQ_FOREACH(spp, &pagelist, list) { 529 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) { 530 spp->nfree++; 531 return; 532 } 533 } 534 } 535 536 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD, 537 0, sizeof(struct devstat), "sizeof(struct devstat)"); 538