xref: /freebsd/sys/kern/subr_devstat.c (revision 2b06b2013c82ee7f744ac5b6a413edede3eeb8cd)
1 /*-
2  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_kdtrace.h"
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/bio.h>
38 #include <sys/devicestat.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/conf.h>
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46 
47 #include <machine/atomic.h>
48 
49 #ifdef KDTRACE_HOOKS
50 #include <sys/dtrace_bsd.h>
51 
52 dtrace_io_start_probe_func_t dtrace_io_start_probe;
53 dtrace_io_done_probe_func_t dtrace_io_done_probe;
54 dtrace_io_wait_start_probe_func_t dtrace_io_wait_start_probe;
55 dtrace_io_wait_done_probe_func_t dtrace_io_wait_done_probe;
56 
57 uint32_t	dtio_start_id;
58 uint32_t	dtio_done_id;
59 uint32_t	dtio_wait_start_id;
60 uint32_t	dtio_wait_done_id;
61 
62 #define DTRACE_DEVSTAT_START() \
63 	if (dtrace_io_start_probe != NULL) \
64 		(*dtrace_io_start_probe)(dtio_start_id, NULL, ds);
65 
66 #define DTRACE_DEVSTAT_BIO_START() \
67 	if (dtrace_io_start_probe != NULL) \
68 		(*dtrace_io_start_probe)(dtio_start_id, bp, ds);
69 
70 #define DTRACE_DEVSTAT_DONE() \
71 	if (dtrace_io_done_probe != NULL) \
72 		(*dtrace_io_done_probe)(dtio_done_id, NULL, ds);
73 
74 #define DTRACE_DEVSTAT_BIO_DONE() \
75 	if (dtrace_io_done_probe != NULL) \
76 		(*dtrace_io_done_probe)(dtio_done_id, bp, ds);
77 
78 #define DTRACE_DEVSTAT_WAIT_START() \
79 	if (dtrace_io_wait_start_probe != NULL) \
80 		(*dtrace_io_wait_start_probe)(dtio_wait_start_id, NULL, ds);
81 
82 #define DTRACE_DEVSTAT_WAIT_DONE() \
83 	if (dtrace_io_wait_done_probe != NULL) \
84 		(*dtrace_io_wait_done_probe)(dtio_wait_done_id, NULL, ds);
85 
86 #else /* ! KDTRACE_HOOKS */
87 
88 #define DTRACE_DEVSTAT_START()
89 
90 #define DTRACE_DEVSTAT_BIO_START()
91 
92 #define DTRACE_DEVSTAT_DONE()
93 
94 #define DTRACE_DEVSTAT_BIO_DONE()
95 
96 #define DTRACE_DEVSTAT_WAIT_START()
97 
98 #define DTRACE_DEVSTAT_WAIT_DONE()
99 #endif /* KDTRACE_HOOKS */
100 
101 static int devstat_num_devs;
102 static long devstat_generation = 1;
103 static int devstat_version = DEVSTAT_VERSION;
104 static int devstat_current_devnumber;
105 static struct mtx devstat_mutex;
106 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
107 
108 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
109 static struct devstat *devstat_alloc(void);
110 static void devstat_free(struct devstat *);
111 static void devstat_add_entry(struct devstat *ds, const void *dev_name,
112 		       int unit_number, uint32_t block_size,
113 		       devstat_support_flags flags,
114 		       devstat_type_flags device_type,
115 		       devstat_priority priority);
116 
117 /*
118  * Allocate a devstat and initialize it
119  */
120 struct devstat *
121 devstat_new_entry(const void *dev_name,
122 		  int unit_number, uint32_t block_size,
123 		  devstat_support_flags flags,
124 		  devstat_type_flags device_type,
125 		  devstat_priority priority)
126 {
127 	struct devstat *ds;
128 
129 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
130 
131 	ds = devstat_alloc();
132 	mtx_lock(&devstat_mutex);
133 	if (unit_number == -1) {
134 		ds->id = dev_name;
135 		binuptime(&ds->creation_time);
136 		devstat_generation++;
137 	} else {
138 		devstat_add_entry(ds, dev_name, unit_number, block_size,
139 				  flags, device_type, priority);
140 	}
141 	mtx_unlock(&devstat_mutex);
142 	return (ds);
143 }
144 
145 /*
146  * Take a malloced and zeroed devstat structure given to us, fill it in
147  * and add it to the queue of devices.
148  */
149 static void
150 devstat_add_entry(struct devstat *ds, const void *dev_name,
151 		  int unit_number, uint32_t block_size,
152 		  devstat_support_flags flags,
153 		  devstat_type_flags device_type,
154 		  devstat_priority priority)
155 {
156 	struct devstatlist *devstat_head;
157 	struct devstat *ds_tmp;
158 
159 	mtx_assert(&devstat_mutex, MA_OWNED);
160 	devstat_num_devs++;
161 
162 	devstat_head = &device_statq;
163 
164 	/*
165 	 * Priority sort.  Each driver passes in its priority when it adds
166 	 * its devstat entry.  Drivers are sorted first by priority, and
167 	 * then by probe order.
168 	 *
169 	 * For the first device, we just insert it, since the priority
170 	 * doesn't really matter yet.  Subsequent devices are inserted into
171 	 * the list using the order outlined above.
172 	 */
173 	if (devstat_num_devs == 1)
174 		STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
175 	else {
176 		STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
177 			struct devstat *ds_next;
178 
179 			ds_next = STAILQ_NEXT(ds_tmp, dev_links);
180 
181 			/*
182 			 * If we find a break between higher and lower
183 			 * priority items, and if this item fits in the
184 			 * break, insert it.  This also applies if the
185 			 * "lower priority item" is the end of the list.
186 			 */
187 			if ((priority <= ds_tmp->priority)
188 			 && ((ds_next == NULL)
189 			   || (priority > ds_next->priority))) {
190 				STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
191 						    dev_links);
192 				break;
193 			} else if (priority > ds_tmp->priority) {
194 				/*
195 				 * If this is the case, we should be able
196 				 * to insert ourselves at the head of the
197 				 * list.  If we can't, something is wrong.
198 				 */
199 				if (ds_tmp == STAILQ_FIRST(devstat_head)) {
200 					STAILQ_INSERT_HEAD(devstat_head,
201 							   ds, dev_links);
202 					break;
203 				} else {
204 					STAILQ_INSERT_TAIL(devstat_head,
205 							   ds, dev_links);
206 					printf("devstat_add_entry: HELP! "
207 					       "sorting problem detected "
208 					       "for name %p unit %d\n",
209 					       dev_name, unit_number);
210 					break;
211 				}
212 			}
213 		}
214 	}
215 
216 	ds->device_number = devstat_current_devnumber++;
217 	ds->unit_number = unit_number;
218 	strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
219 	ds->block_size = block_size;
220 	ds->flags = flags;
221 	ds->device_type = device_type;
222 	ds->priority = priority;
223 	binuptime(&ds->creation_time);
224 	devstat_generation++;
225 }
226 
227 /*
228  * Remove a devstat structure from the list of devices.
229  */
230 void
231 devstat_remove_entry(struct devstat *ds)
232 {
233 	struct devstatlist *devstat_head;
234 
235 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
236 	if (ds == NULL)
237 		return;
238 
239 	mtx_lock(&devstat_mutex);
240 
241 	devstat_head = &device_statq;
242 
243 	/* Remove this entry from the devstat queue */
244 	atomic_add_acq_int(&ds->sequence1, 1);
245 	if (ds->id == NULL) {
246 		devstat_num_devs--;
247 		STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
248 	}
249 	devstat_free(ds);
250 	devstat_generation++;
251 	mtx_unlock(&devstat_mutex);
252 }
253 
254 /*
255  * Record a transaction start.
256  *
257  * See comments for devstat_end_transaction().  Ordering is very important
258  * here.
259  */
260 void
261 devstat_start_transaction(struct devstat *ds, struct bintime *now)
262 {
263 
264 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
265 
266 	/* sanity check */
267 	if (ds == NULL)
268 		return;
269 
270 	atomic_add_acq_int(&ds->sequence1, 1);
271 	/*
272 	 * We only want to set the start time when we are going from idle
273 	 * to busy.  The start time is really the start of the latest busy
274 	 * period.
275 	 */
276 	if (ds->start_count == ds->end_count) {
277 		if (now != NULL)
278 			ds->busy_from = *now;
279 		else
280 			binuptime(&ds->busy_from);
281 	}
282 	ds->start_count++;
283 	atomic_add_rel_int(&ds->sequence0, 1);
284 	DTRACE_DEVSTAT_START();
285 }
286 
287 void
288 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
289 {
290 
291 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
292 
293 	/* sanity check */
294 	if (ds == NULL)
295 		return;
296 
297 	binuptime(&bp->bio_t0);
298 	devstat_start_transaction(ds, &bp->bio_t0);
299 	DTRACE_DEVSTAT_BIO_START();
300 }
301 
302 /*
303  * Record the ending of a transaction, and incrment the various counters.
304  *
305  * Ordering in this function, and in devstat_start_transaction() is VERY
306  * important.  The idea here is to run without locks, so we are very
307  * careful to only modify some fields on the way "down" (i.e. at
308  * transaction start) and some fields on the way "up" (i.e. at transaction
309  * completion).  One exception is busy_from, which we only modify in
310  * devstat_start_transaction() when there are no outstanding transactions,
311  * and thus it can't be modified in devstat_end_transaction()
312  * simultaneously.
313  *
314  * The sequence0 and sequence1 fields are provided to enable an application
315  * spying on the structures with mmap(2) to tell when a structure is in a
316  * consistent state or not.
317  *
318  * For this to work 100% reliably, it is important that the two fields
319  * are at opposite ends of the structure and that they are incremented
320  * in the opposite order of how a memcpy(3) in userland would copy them.
321  * We assume that the copying happens front to back, but there is actually
322  * no way short of writing your own memcpy(3) replacement to guarantee
323  * this will be the case.
324  *
325  * In addition to this, being a kind of locks, they must be updated with
326  * atomic instructions using appropriate memory barriers.
327  */
328 void
329 devstat_end_transaction(struct devstat *ds, uint32_t bytes,
330 			devstat_tag_type tag_type, devstat_trans_flags flags,
331 			struct bintime *now, struct bintime *then)
332 {
333 	struct bintime dt, lnow;
334 
335 	/* sanity check */
336 	if (ds == NULL)
337 		return;
338 
339 	if (now == NULL) {
340 		now = &lnow;
341 		binuptime(now);
342 	}
343 
344 	atomic_add_acq_int(&ds->sequence1, 1);
345 	/* Update byte and operations counts */
346 	ds->bytes[flags] += bytes;
347 	ds->operations[flags]++;
348 
349 	/*
350 	 * Keep a count of the various tag types sent.
351 	 */
352 	if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
353 	    tag_type != DEVSTAT_TAG_NONE)
354 		ds->tag_types[tag_type]++;
355 
356 	if (then != NULL) {
357 		/* Update duration of operations */
358 		dt = *now;
359 		bintime_sub(&dt, then);
360 		bintime_add(&ds->duration[flags], &dt);
361 	}
362 
363 	/* Accumulate busy time */
364 	dt = *now;
365 	bintime_sub(&dt, &ds->busy_from);
366 	bintime_add(&ds->busy_time, &dt);
367 	ds->busy_from = *now;
368 
369 	ds->end_count++;
370 	atomic_add_rel_int(&ds->sequence0, 1);
371 	DTRACE_DEVSTAT_DONE();
372 }
373 
374 void
375 devstat_end_transaction_bio(struct devstat *ds, struct bio *bp)
376 {
377 
378 	devstat_end_transaction_bio_bt(ds, bp, NULL);
379 }
380 
381 void
382 devstat_end_transaction_bio_bt(struct devstat *ds, struct bio *bp,
383     struct bintime *now)
384 {
385 	devstat_trans_flags flg;
386 
387 	/* sanity check */
388 	if (ds == NULL)
389 		return;
390 
391 	if (bp->bio_cmd == BIO_DELETE)
392 		flg = DEVSTAT_FREE;
393 	else if (bp->bio_cmd == BIO_READ)
394 		flg = DEVSTAT_READ;
395 	else if (bp->bio_cmd == BIO_WRITE)
396 		flg = DEVSTAT_WRITE;
397 	else
398 		flg = DEVSTAT_NO_DATA;
399 
400 	devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
401 				DEVSTAT_TAG_SIMPLE, flg, now, &bp->bio_t0);
402 	DTRACE_DEVSTAT_BIO_DONE();
403 }
404 
405 /*
406  * This is the sysctl handler for the devstat package.  The data pushed out
407  * on the kern.devstat.all sysctl variable consists of the current devstat
408  * generation number, and then an array of devstat structures, one for each
409  * device in the system.
410  *
411  * This is more cryptic that obvious, but basically we neither can nor
412  * want to hold the devstat_mutex for any amount of time, so we grab it
413  * only when we need to and keep an eye on devstat_generation all the time.
414  */
415 static int
416 sysctl_devstat(SYSCTL_HANDLER_ARGS)
417 {
418 	int error;
419 	long mygen;
420 	struct devstat *nds;
421 
422 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
423 
424 	/*
425 	 * XXX devstat_generation should really be "volatile" but that
426 	 * XXX freaks out the sysctl macro below.  The places where we
427 	 * XXX change it and inspect it are bracketed in the mutex which
428 	 * XXX guarantees us proper write barriers.  I don't belive the
429 	 * XXX compiler is allowed to optimize mygen away across calls
430 	 * XXX to other functions, so the following is belived to be safe.
431 	 */
432 	mygen = devstat_generation;
433 
434 	error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
435 
436 	if (devstat_num_devs == 0)
437 		return(0);
438 
439 	if (error != 0)
440 		return (error);
441 
442 	mtx_lock(&devstat_mutex);
443 	nds = STAILQ_FIRST(&device_statq);
444 	if (mygen != devstat_generation)
445 		error = EBUSY;
446 	mtx_unlock(&devstat_mutex);
447 
448 	if (error != 0)
449 		return (error);
450 
451 	for (;nds != NULL;) {
452 		error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
453 		if (error != 0)
454 			return (error);
455 		mtx_lock(&devstat_mutex);
456 		if (mygen != devstat_generation)
457 			error = EBUSY;
458 		else
459 			nds = STAILQ_NEXT(nds, dev_links);
460 		mtx_unlock(&devstat_mutex);
461 		if (error != 0)
462 			return (error);
463 	}
464 	return(error);
465 }
466 
467 /*
468  * Sysctl entries for devstat.  The first one is a node that all the rest
469  * hang off of.
470  */
471 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, NULL,
472     "Device Statistics");
473 
474 SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE,
475     NULL, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list");
476 /*
477  * Export the number of devices in the system so that userland utilities
478  * can determine how much memory to allocate to hold all the devices.
479  */
480 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
481     &devstat_num_devs, 0, "Number of devices in the devstat list");
482 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
483     &devstat_generation, 0, "Devstat list generation");
484 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
485     &devstat_version, 0, "Devstat list version number");
486 
487 /*
488  * Allocator for struct devstat structures.  We sub-allocate these from pages
489  * which we get from malloc.  These pages are exported for mmap(2)'ing through
490  * a miniature device driver
491  */
492 
493 #define statsperpage (PAGE_SIZE / sizeof(struct devstat))
494 
495 static d_mmap_t devstat_mmap;
496 
497 static struct cdevsw devstat_cdevsw = {
498 	.d_version =	D_VERSION,
499 	.d_flags =	D_NEEDGIANT,
500 	.d_mmap =	devstat_mmap,
501 	.d_name =	"devstat",
502 };
503 
504 struct statspage {
505 	TAILQ_ENTRY(statspage)	list;
506 	struct devstat		*stat;
507 	u_int			nfree;
508 };
509 
510 static TAILQ_HEAD(, statspage)	pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
511 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
512 
513 static int
514 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
515     int nprot, vm_memattr_t *memattr)
516 {
517 	struct statspage *spp;
518 
519 	if (nprot != VM_PROT_READ)
520 		return (-1);
521 	TAILQ_FOREACH(spp, &pagelist, list) {
522 		if (offset == 0) {
523 			*paddr = vtophys(spp->stat);
524 			return (0);
525 		}
526 		offset -= PAGE_SIZE;
527 	}
528 	return (-1);
529 }
530 
531 static struct devstat *
532 devstat_alloc(void)
533 {
534 	struct devstat *dsp;
535 	struct statspage *spp, *spp2;
536 	u_int u;
537 	static int once;
538 
539 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
540 	if (!once) {
541 		make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
542 		    &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0400,
543 		    DEVSTAT_DEVICE_NAME);
544 		once = 1;
545 	}
546 	spp2 = NULL;
547 	mtx_lock(&devstat_mutex);
548 	for (;;) {
549 		TAILQ_FOREACH(spp, &pagelist, list) {
550 			if (spp->nfree > 0)
551 				break;
552 		}
553 		if (spp != NULL)
554 			break;
555 		mtx_unlock(&devstat_mutex);
556 		spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
557 		spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
558 		spp2->nfree = statsperpage;
559 
560 		/*
561 		 * If free statspages were added while the lock was released
562 		 * just reuse them.
563 		 */
564 		mtx_lock(&devstat_mutex);
565 		TAILQ_FOREACH(spp, &pagelist, list)
566 			if (spp->nfree > 0)
567 				break;
568 		if (spp == NULL) {
569 			spp = spp2;
570 
571 			/*
572 			 * It would make more sense to add the new page at the
573 			 * head but the order on the list determine the
574 			 * sequence of the mapping so we can't do that.
575 			 */
576 			TAILQ_INSERT_TAIL(&pagelist, spp, list);
577 		} else
578 			break;
579 	}
580 	dsp = spp->stat;
581 	for (u = 0; u < statsperpage; u++) {
582 		if (dsp->allocated == 0)
583 			break;
584 		dsp++;
585 	}
586 	spp->nfree--;
587 	dsp->allocated = 1;
588 	mtx_unlock(&devstat_mutex);
589 	if (spp2 != NULL && spp2 != spp) {
590 		free(spp2->stat, M_DEVSTAT);
591 		free(spp2, M_DEVSTAT);
592 	}
593 	return (dsp);
594 }
595 
596 static void
597 devstat_free(struct devstat *dsp)
598 {
599 	struct statspage *spp;
600 
601 	mtx_assert(&devstat_mutex, MA_OWNED);
602 	bzero(dsp, sizeof *dsp);
603 	TAILQ_FOREACH(spp, &pagelist, list) {
604 		if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
605 			spp->nfree++;
606 			return;
607 		}
608 	}
609 }
610 
611 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
612     NULL, sizeof(struct devstat), "sizeof(struct devstat)");
613