1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/disk.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/bio.h>
36 #include <sys/devicestat.h>
37 #include <sys/sdt.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45
46 #include <machine/atomic.h>
47
48 SDT_PROVIDER_DEFINE(io);
49
50 SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *");
51 SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *");
52
53 #define DTRACE_DEVSTAT_BIO_START() SDT_PROBE2(io, , , start, bp, ds)
54 #define DTRACE_DEVSTAT_BIO_DONE() SDT_PROBE2(io, , , done, bp, ds)
55
56 static int devstat_num_devs;
57 static long devstat_generation = 1;
58 static int devstat_version = DEVSTAT_VERSION;
59 static int devstat_current_devnumber;
60 static struct mtx devstat_mutex;
61 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
62
63 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
64 static struct devstat *devstat_alloc(void);
65 static void devstat_free(struct devstat *);
66 static void devstat_add_entry(struct devstat *ds, const void *dev_name,
67 int unit_number, uint32_t block_size,
68 devstat_support_flags flags,
69 devstat_type_flags device_type,
70 devstat_priority priority);
71
72 /*
73 * Allocate a devstat and initialize it
74 */
75 struct devstat *
devstat_new_entry(const void * dev_name,int unit_number,uint32_t block_size,devstat_support_flags flags,devstat_type_flags device_type,devstat_priority priority)76 devstat_new_entry(const void *dev_name,
77 int unit_number, uint32_t block_size,
78 devstat_support_flags flags,
79 devstat_type_flags device_type,
80 devstat_priority priority)
81 {
82 struct devstat *ds;
83
84 mtx_assert(&devstat_mutex, MA_NOTOWNED);
85
86 ds = devstat_alloc();
87 mtx_lock(&devstat_mutex);
88 if (unit_number == -1) {
89 ds->unit_number = unit_number;
90 ds->id = dev_name;
91 binuptime(&ds->creation_time);
92 devstat_generation++;
93 } else {
94 devstat_add_entry(ds, dev_name, unit_number, block_size,
95 flags, device_type, priority);
96 }
97 mtx_unlock(&devstat_mutex);
98 return (ds);
99 }
100
101 /*
102 * Take a malloced and zeroed devstat structure given to us, fill it in
103 * and add it to the queue of devices.
104 */
105 static void
devstat_add_entry(struct devstat * ds,const void * dev_name,int unit_number,uint32_t block_size,devstat_support_flags flags,devstat_type_flags device_type,devstat_priority priority)106 devstat_add_entry(struct devstat *ds, const void *dev_name,
107 int unit_number, uint32_t block_size,
108 devstat_support_flags flags,
109 devstat_type_flags device_type,
110 devstat_priority priority)
111 {
112 struct devstatlist *devstat_head;
113 struct devstat *ds_tmp;
114
115 mtx_assert(&devstat_mutex, MA_OWNED);
116 devstat_num_devs++;
117
118 devstat_head = &device_statq;
119
120 /*
121 * Priority sort. Each driver passes in its priority when it adds
122 * its devstat entry. Drivers are sorted first by priority, and
123 * then by probe order.
124 *
125 * For the first device, we just insert it, since the priority
126 * doesn't really matter yet. Subsequent devices are inserted into
127 * the list using the order outlined above.
128 */
129 if (devstat_num_devs == 1)
130 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
131 else {
132 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
133 struct devstat *ds_next;
134
135 ds_next = STAILQ_NEXT(ds_tmp, dev_links);
136
137 /*
138 * If we find a break between higher and lower
139 * priority items, and if this item fits in the
140 * break, insert it. This also applies if the
141 * "lower priority item" is the end of the list.
142 */
143 if ((priority <= ds_tmp->priority)
144 && ((ds_next == NULL)
145 || (priority > ds_next->priority))) {
146 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
147 dev_links);
148 break;
149 } else if (priority > ds_tmp->priority) {
150 /*
151 * If this is the case, we should be able
152 * to insert ourselves at the head of the
153 * list. If we can't, something is wrong.
154 */
155 if (ds_tmp == STAILQ_FIRST(devstat_head)) {
156 STAILQ_INSERT_HEAD(devstat_head,
157 ds, dev_links);
158 break;
159 } else {
160 STAILQ_INSERT_TAIL(devstat_head,
161 ds, dev_links);
162 printf("devstat_add_entry: HELP! "
163 "sorting problem detected "
164 "for name %p unit %d\n",
165 dev_name, unit_number);
166 break;
167 }
168 }
169 }
170 }
171
172 ds->device_number = devstat_current_devnumber++;
173 ds->unit_number = unit_number;
174 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
175 ds->block_size = block_size;
176 ds->flags = flags;
177 ds->device_type = device_type;
178 ds->priority = priority;
179 binuptime(&ds->creation_time);
180 devstat_generation++;
181 }
182
183 /*
184 * Remove a devstat structure from the list of devices.
185 */
186 void
devstat_remove_entry(struct devstat * ds)187 devstat_remove_entry(struct devstat *ds)
188 {
189 struct devstatlist *devstat_head;
190
191 mtx_assert(&devstat_mutex, MA_NOTOWNED);
192 if (ds == NULL)
193 return;
194
195 mtx_lock(&devstat_mutex);
196
197 devstat_head = &device_statq;
198
199 /* Remove this entry from the devstat queue */
200 atomic_add_acq_int(&ds->sequence1, 1);
201 if (ds->unit_number != -1) {
202 devstat_num_devs--;
203 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
204 }
205 devstat_free(ds);
206 devstat_generation++;
207 mtx_unlock(&devstat_mutex);
208 }
209
210 /*
211 * Record a transaction start.
212 *
213 * See comments for devstat_end_transaction(). Ordering is very important
214 * here.
215 */
216 void
devstat_start_transaction(struct devstat * ds,const struct bintime * now)217 devstat_start_transaction(struct devstat *ds, const struct bintime *now)
218 {
219
220 /* sanity check */
221 if (ds == NULL)
222 return;
223
224 atomic_add_acq_int(&ds->sequence1, 1);
225 /*
226 * We only want to set the start time when we are going from idle
227 * to busy. The start time is really the start of the latest busy
228 * period.
229 */
230 if (atomic_fetchadd_int(&ds->start_count, 1) == ds->end_count) {
231 if (now != NULL)
232 ds->busy_from = *now;
233 else
234 binuptime(&ds->busy_from);
235 }
236 atomic_add_rel_int(&ds->sequence0, 1);
237 }
238
239 void
devstat_start_transaction_bio(struct devstat * ds,struct bio * bp)240 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
241 {
242
243 /* sanity check */
244 if (ds == NULL)
245 return;
246
247 binuptime(&bp->bio_t0);
248 devstat_start_transaction_bio_t0(ds, bp);
249 }
250
251 void
devstat_start_transaction_bio_t0(struct devstat * ds,struct bio * bp)252 devstat_start_transaction_bio_t0(struct devstat *ds, struct bio *bp)
253 {
254
255 /* sanity check */
256 if (ds == NULL)
257 return;
258
259 devstat_start_transaction(ds, &bp->bio_t0);
260 DTRACE_DEVSTAT_BIO_START();
261 }
262
263 /*
264 * Record the ending of a transaction, and incrment the various counters.
265 *
266 * Ordering in this function, and in devstat_start_transaction() is VERY
267 * important. The idea here is to run without locks, so we are very
268 * careful to only modify some fields on the way "down" (i.e. at
269 * transaction start) and some fields on the way "up" (i.e. at transaction
270 * completion). One exception is busy_from, which we only modify in
271 * devstat_start_transaction() when there are no outstanding transactions,
272 * and thus it can't be modified in devstat_end_transaction()
273 * simultaneously.
274 *
275 * The sequence0 and sequence1 fields are provided to enable an application
276 * spying on the structures with mmap(2) to tell when a structure is in a
277 * consistent state or not.
278 *
279 * For this to work 100% reliably, it is important that the two fields
280 * are at opposite ends of the structure and that they are incremented
281 * in the opposite order of how a memcpy(3) in userland would copy them.
282 * We assume that the copying happens front to back, but there is actually
283 * no way short of writing your own memcpy(3) replacement to guarantee
284 * this will be the case.
285 *
286 * In addition to this, being a kind of locks, they must be updated with
287 * atomic instructions using appropriate memory barriers.
288 */
289 void
devstat_end_transaction(struct devstat * ds,uint32_t bytes,devstat_tag_type tag_type,devstat_trans_flags flags,const struct bintime * now,const struct bintime * then)290 devstat_end_transaction(struct devstat *ds, uint32_t bytes,
291 devstat_tag_type tag_type, devstat_trans_flags flags,
292 const struct bintime *now, const struct bintime *then)
293 {
294 struct bintime dt, lnow;
295
296 /* sanity check */
297 if (ds == NULL)
298 return;
299
300 if (now == NULL) {
301 binuptime(&lnow);
302 now = &lnow;
303 }
304
305 atomic_add_acq_int(&ds->sequence1, 1);
306 /* Update byte and operations counts */
307 ds->bytes[flags] += bytes;
308 ds->operations[flags]++;
309
310 /*
311 * Keep a count of the various tag types sent.
312 */
313 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
314 tag_type != DEVSTAT_TAG_NONE)
315 ds->tag_types[tag_type]++;
316
317 if (then != NULL) {
318 /* Update duration of operations */
319 dt = *now;
320 bintime_sub(&dt, then);
321 bintime_add(&ds->duration[flags], &dt);
322 }
323
324 /* Accumulate busy time */
325 dt = *now;
326 bintime_sub(&dt, &ds->busy_from);
327 bintime_add(&ds->busy_time, &dt);
328 ds->busy_from = *now;
329
330 ds->end_count++;
331 atomic_add_rel_int(&ds->sequence0, 1);
332 }
333
334 void
devstat_end_transaction_bio(struct devstat * ds,const struct bio * bp)335 devstat_end_transaction_bio(struct devstat *ds, const struct bio *bp)
336 {
337
338 devstat_end_transaction_bio_bt(ds, bp, NULL);
339 }
340
341 void
devstat_end_transaction_bio_bt(struct devstat * ds,const struct bio * bp,const struct bintime * now)342 devstat_end_transaction_bio_bt(struct devstat *ds, const struct bio *bp,
343 const struct bintime *now)
344 {
345 devstat_trans_flags flg;
346 devstat_tag_type tag;
347
348 /* sanity check */
349 if (ds == NULL)
350 return;
351
352 if (bp->bio_flags & BIO_ORDERED)
353 tag = DEVSTAT_TAG_ORDERED;
354 else
355 tag = DEVSTAT_TAG_SIMPLE;
356 if (bp->bio_cmd == BIO_DELETE)
357 flg = DEVSTAT_FREE;
358 else if ((bp->bio_cmd == BIO_READ)
359 || ((bp->bio_cmd == BIO_ZONE)
360 && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)))
361 flg = DEVSTAT_READ;
362 else if (bp->bio_cmd == BIO_WRITE)
363 flg = DEVSTAT_WRITE;
364 else
365 flg = DEVSTAT_NO_DATA;
366
367 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
368 tag, flg, now, &bp->bio_t0);
369 DTRACE_DEVSTAT_BIO_DONE();
370 }
371
372 /*
373 * This is the sysctl handler for the devstat package. The data pushed out
374 * on the kern.devstat.all sysctl variable consists of the current devstat
375 * generation number, and then an array of devstat structures, one for each
376 * device in the system.
377 *
378 * This is more cryptic that obvious, but basically we neither can nor
379 * want to hold the devstat_mutex for any amount of time, so we grab it
380 * only when we need to and keep an eye on devstat_generation all the time.
381 */
382 static int
sysctl_devstat(SYSCTL_HANDLER_ARGS)383 sysctl_devstat(SYSCTL_HANDLER_ARGS)
384 {
385 int error;
386 long mygen;
387 struct devstat *nds;
388
389 mtx_assert(&devstat_mutex, MA_NOTOWNED);
390
391 /*
392 * XXX devstat_generation should really be "volatile" but that
393 * XXX freaks out the sysctl macro below. The places where we
394 * XXX change it and inspect it are bracketed in the mutex which
395 * XXX guarantees us proper write barriers. I don't believe the
396 * XXX compiler is allowed to optimize mygen away across calls
397 * XXX to other functions, so the following is belived to be safe.
398 */
399 mygen = devstat_generation;
400
401 error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
402
403 if (devstat_num_devs == 0)
404 return(0);
405
406 if (error != 0)
407 return (error);
408
409 mtx_lock(&devstat_mutex);
410 nds = STAILQ_FIRST(&device_statq);
411 if (mygen != devstat_generation)
412 error = EBUSY;
413 mtx_unlock(&devstat_mutex);
414
415 if (error != 0)
416 return (error);
417
418 for (;nds != NULL;) {
419 error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
420 if (error != 0)
421 return (error);
422 mtx_lock(&devstat_mutex);
423 if (mygen != devstat_generation)
424 error = EBUSY;
425 else
426 nds = STAILQ_NEXT(nds, dev_links);
427 mtx_unlock(&devstat_mutex);
428 if (error != 0)
429 return (error);
430 }
431 return(error);
432 }
433
434 /*
435 * Sysctl entries for devstat. The first one is a node that all the rest
436 * hang off of.
437 */
438 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
439 "Device Statistics");
440
441 SYSCTL_PROC(_kern_devstat, OID_AUTO, all,
442 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
443 sysctl_devstat, "S,devstat",
444 "All devices in the devstat list");
445 /*
446 * Export the number of devices in the system so that userland utilities
447 * can determine how much memory to allocate to hold all the devices.
448 */
449 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
450 &devstat_num_devs, 0, "Number of devices in the devstat list");
451 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
452 &devstat_generation, 0, "Devstat list generation");
453 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
454 &devstat_version, 0, "Devstat list version number");
455
456 /*
457 * Allocator for struct devstat structures. We sub-allocate these from pages
458 * which we get from malloc. These pages are exported for mmap(2)'ing through
459 * a miniature device driver
460 */
461
462 #define statsperpage (PAGE_SIZE / sizeof(struct devstat))
463
464 static d_ioctl_t devstat_ioctl;
465 static d_mmap_t devstat_mmap;
466
467 static struct cdevsw devstat_cdevsw = {
468 .d_version = D_VERSION,
469 .d_ioctl = devstat_ioctl,
470 .d_mmap = devstat_mmap,
471 .d_name = "devstat",
472 };
473
474 struct statspage {
475 TAILQ_ENTRY(statspage) list;
476 struct devstat *stat;
477 u_int nfree;
478 };
479
480 static size_t pagelist_pages = 0;
481 static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
482 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
483
484 static int
devstat_ioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)485 devstat_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
486 struct thread *td)
487 {
488 int error = ENOTTY;
489
490 switch (cmd) {
491 case DIOCGMEDIASIZE:
492 error = 0;
493 *(off_t *)data = pagelist_pages * PAGE_SIZE;
494 break;
495 }
496
497 return (error);
498 }
499
500 static int
devstat_mmap(struct cdev * dev,vm_ooffset_t offset,vm_paddr_t * paddr,int nprot,vm_memattr_t * memattr)501 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
502 int nprot, vm_memattr_t *memattr)
503 {
504 struct statspage *spp;
505
506 if (nprot != VM_PROT_READ)
507 return (-1);
508 mtx_lock(&devstat_mutex);
509 TAILQ_FOREACH(spp, &pagelist, list) {
510 if (offset == 0) {
511 *paddr = vtophys(spp->stat);
512 mtx_unlock(&devstat_mutex);
513 return (0);
514 }
515 offset -= PAGE_SIZE;
516 }
517 mtx_unlock(&devstat_mutex);
518 return (-1);
519 }
520
521 static struct devstat *
devstat_alloc(void)522 devstat_alloc(void)
523 {
524 struct devstat *dsp;
525 struct statspage *spp, *spp2;
526 u_int u;
527 static int once;
528
529 mtx_assert(&devstat_mutex, MA_NOTOWNED);
530 if (!once) {
531 make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
532 &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444,
533 DEVSTAT_DEVICE_NAME);
534 once = 1;
535 }
536 spp2 = NULL;
537 mtx_lock(&devstat_mutex);
538 for (;;) {
539 TAILQ_FOREACH(spp, &pagelist, list) {
540 if (spp->nfree > 0)
541 break;
542 }
543 if (spp != NULL)
544 break;
545 mtx_unlock(&devstat_mutex);
546 spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
547 spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
548 spp2->nfree = statsperpage;
549
550 /*
551 * If free statspages were added while the lock was released
552 * just reuse them.
553 */
554 mtx_lock(&devstat_mutex);
555 TAILQ_FOREACH(spp, &pagelist, list)
556 if (spp->nfree > 0)
557 break;
558 if (spp == NULL) {
559 spp = spp2;
560
561 /*
562 * It would make more sense to add the new page at the
563 * head but the order on the list determine the
564 * sequence of the mapping so we can't do that.
565 */
566 pagelist_pages++;
567 TAILQ_INSERT_TAIL(&pagelist, spp, list);
568 } else
569 break;
570 }
571 dsp = spp->stat;
572 for (u = 0; u < statsperpage; u++) {
573 if (dsp->allocated == 0)
574 break;
575 dsp++;
576 }
577 spp->nfree--;
578 dsp->allocated = 1;
579 mtx_unlock(&devstat_mutex);
580 if (spp2 != NULL && spp2 != spp) {
581 free(spp2->stat, M_DEVSTAT);
582 free(spp2, M_DEVSTAT);
583 }
584 return (dsp);
585 }
586
587 static void
devstat_free(struct devstat * dsp)588 devstat_free(struct devstat *dsp)
589 {
590 struct statspage *spp;
591
592 mtx_assert(&devstat_mutex, MA_OWNED);
593 bzero(dsp, sizeof *dsp);
594 TAILQ_FOREACH(spp, &pagelist, list) {
595 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
596 spp->nfree++;
597 return;
598 }
599 }
600 }
601
602 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
603 SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)");
604