xref: /freebsd/sys/kern/subr_devstat.c (revision 51e235148a4becba94e824a44bd69687644a7f56)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/disk.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/devicestat.h>
38 #include <sys/sdt.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/conf.h>
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46 
47 #include <machine/atomic.h>
48 
49 SDT_PROVIDER_DEFINE(io);
50 
51 SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *");
52 SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *");
53 
54 #define	DTRACE_DEVSTAT_BIO_START()	SDT_PROBE2(io, , , start, bp, ds)
55 #define	DTRACE_DEVSTAT_BIO_DONE()	SDT_PROBE2(io, , , done, bp, ds)
56 
57 static int devstat_num_devs;
58 static long devstat_generation = 1;
59 static int devstat_version = DEVSTAT_VERSION;
60 static int devstat_current_devnumber;
61 static struct mtx devstat_mutex;
62 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
63 
64 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
65 static struct devstat *devstat_alloc(void);
66 static void devstat_free(struct devstat *);
67 static void devstat_add_entry(struct devstat *ds, const void *dev_name,
68 		       int unit_number, uint32_t block_size,
69 		       devstat_support_flags flags,
70 		       devstat_type_flags device_type,
71 		       devstat_priority priority);
72 
73 /*
74  * Allocate a devstat and initialize it
75  */
76 struct devstat *
77 devstat_new_entry(const void *dev_name,
78 		  int unit_number, uint32_t block_size,
79 		  devstat_support_flags flags,
80 		  devstat_type_flags device_type,
81 		  devstat_priority priority)
82 {
83 	struct devstat *ds;
84 
85 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
86 
87 	ds = devstat_alloc();
88 	mtx_lock(&devstat_mutex);
89 	if (unit_number == -1) {
90 		ds->unit_number = unit_number;
91 		ds->id = dev_name;
92 		binuptime(&ds->creation_time);
93 		devstat_generation++;
94 	} else {
95 		devstat_add_entry(ds, dev_name, unit_number, block_size,
96 				  flags, device_type, priority);
97 	}
98 	mtx_unlock(&devstat_mutex);
99 	return (ds);
100 }
101 
102 /*
103  * Take a malloced and zeroed devstat structure given to us, fill it in
104  * and add it to the queue of devices.
105  */
106 static void
107 devstat_add_entry(struct devstat *ds, const void *dev_name,
108 		  int unit_number, uint32_t block_size,
109 		  devstat_support_flags flags,
110 		  devstat_type_flags device_type,
111 		  devstat_priority priority)
112 {
113 	struct devstatlist *devstat_head;
114 	struct devstat *ds_tmp;
115 
116 	mtx_assert(&devstat_mutex, MA_OWNED);
117 	devstat_num_devs++;
118 
119 	devstat_head = &device_statq;
120 
121 	/*
122 	 * Priority sort.  Each driver passes in its priority when it adds
123 	 * its devstat entry.  Drivers are sorted first by priority, and
124 	 * then by probe order.
125 	 *
126 	 * For the first device, we just insert it, since the priority
127 	 * doesn't really matter yet.  Subsequent devices are inserted into
128 	 * the list using the order outlined above.
129 	 */
130 	if (devstat_num_devs == 1)
131 		STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
132 	else {
133 		STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
134 			struct devstat *ds_next;
135 
136 			ds_next = STAILQ_NEXT(ds_tmp, dev_links);
137 
138 			/*
139 			 * If we find a break between higher and lower
140 			 * priority items, and if this item fits in the
141 			 * break, insert it.  This also applies if the
142 			 * "lower priority item" is the end of the list.
143 			 */
144 			if ((priority <= ds_tmp->priority)
145 			 && ((ds_next == NULL)
146 			   || (priority > ds_next->priority))) {
147 				STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
148 						    dev_links);
149 				break;
150 			} else if (priority > ds_tmp->priority) {
151 				/*
152 				 * If this is the case, we should be able
153 				 * to insert ourselves at the head of the
154 				 * list.  If we can't, something is wrong.
155 				 */
156 				if (ds_tmp == STAILQ_FIRST(devstat_head)) {
157 					STAILQ_INSERT_HEAD(devstat_head,
158 							   ds, dev_links);
159 					break;
160 				} else {
161 					STAILQ_INSERT_TAIL(devstat_head,
162 							   ds, dev_links);
163 					printf("devstat_add_entry: HELP! "
164 					       "sorting problem detected "
165 					       "for name %p unit %d\n",
166 					       dev_name, unit_number);
167 					break;
168 				}
169 			}
170 		}
171 	}
172 
173 	ds->device_number = devstat_current_devnumber++;
174 	ds->unit_number = unit_number;
175 	strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
176 	ds->block_size = block_size;
177 	ds->flags = flags;
178 	ds->device_type = device_type;
179 	ds->priority = priority;
180 	binuptime(&ds->creation_time);
181 	devstat_generation++;
182 }
183 
184 /*
185  * Remove a devstat structure from the list of devices.
186  */
187 void
188 devstat_remove_entry(struct devstat *ds)
189 {
190 	struct devstatlist *devstat_head;
191 
192 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
193 	if (ds == NULL)
194 		return;
195 
196 	mtx_lock(&devstat_mutex);
197 
198 	devstat_head = &device_statq;
199 
200 	/* Remove this entry from the devstat queue */
201 	atomic_add_acq_int(&ds->sequence1, 1);
202 	if (ds->unit_number != -1) {
203 		devstat_num_devs--;
204 		STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
205 	}
206 	devstat_free(ds);
207 	devstat_generation++;
208 	mtx_unlock(&devstat_mutex);
209 }
210 
211 /*
212  * Record a transaction start.
213  *
214  * See comments for devstat_end_transaction().  Ordering is very important
215  * here.
216  */
217 void
218 devstat_start_transaction(struct devstat *ds, const struct bintime *now)
219 {
220 
221 	/* sanity check */
222 	if (ds == NULL)
223 		return;
224 
225 	atomic_add_acq_int(&ds->sequence1, 1);
226 	/*
227 	 * We only want to set the start time when we are going from idle
228 	 * to busy.  The start time is really the start of the latest busy
229 	 * period.
230 	 */
231 	if (atomic_fetchadd_int(&ds->start_count, 1) == ds->end_count) {
232 		if (now != NULL)
233 			ds->busy_from = *now;
234 		else
235 			binuptime(&ds->busy_from);
236 	}
237 	atomic_add_rel_int(&ds->sequence0, 1);
238 }
239 
240 void
241 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
242 {
243 
244 	/* sanity check */
245 	if (ds == NULL)
246 		return;
247 
248 	binuptime(&bp->bio_t0);
249 	devstat_start_transaction_bio_t0(ds, bp);
250 }
251 
252 void
253 devstat_start_transaction_bio_t0(struct devstat *ds, struct bio *bp)
254 {
255 
256 	/* sanity check */
257 	if (ds == NULL)
258 		return;
259 
260 	devstat_start_transaction(ds, &bp->bio_t0);
261 	DTRACE_DEVSTAT_BIO_START();
262 }
263 
264 /*
265  * Record the ending of a transaction, and incrment the various counters.
266  *
267  * Ordering in this function, and in devstat_start_transaction() is VERY
268  * important.  The idea here is to run without locks, so we are very
269  * careful to only modify some fields on the way "down" (i.e. at
270  * transaction start) and some fields on the way "up" (i.e. at transaction
271  * completion).  One exception is busy_from, which we only modify in
272  * devstat_start_transaction() when there are no outstanding transactions,
273  * and thus it can't be modified in devstat_end_transaction()
274  * simultaneously.
275  *
276  * The sequence0 and sequence1 fields are provided to enable an application
277  * spying on the structures with mmap(2) to tell when a structure is in a
278  * consistent state or not.
279  *
280  * For this to work 100% reliably, it is important that the two fields
281  * are at opposite ends of the structure and that they are incremented
282  * in the opposite order of how a memcpy(3) in userland would copy them.
283  * We assume that the copying happens front to back, but there is actually
284  * no way short of writing your own memcpy(3) replacement to guarantee
285  * this will be the case.
286  *
287  * In addition to this, being a kind of locks, they must be updated with
288  * atomic instructions using appropriate memory barriers.
289  */
290 void
291 devstat_end_transaction(struct devstat *ds, uint32_t bytes,
292 			devstat_tag_type tag_type, devstat_trans_flags flags,
293 			const struct bintime *now, const struct bintime *then)
294 {
295 	struct bintime dt, lnow;
296 
297 	/* sanity check */
298 	if (ds == NULL)
299 		return;
300 
301 	if (now == NULL) {
302 		binuptime(&lnow);
303 		now = &lnow;
304 	}
305 
306 	atomic_add_acq_int(&ds->sequence1, 1);
307 	/* Update byte and operations counts */
308 	ds->bytes[flags] += bytes;
309 	ds->operations[flags]++;
310 
311 	/*
312 	 * Keep a count of the various tag types sent.
313 	 */
314 	if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
315 	    tag_type != DEVSTAT_TAG_NONE)
316 		ds->tag_types[tag_type]++;
317 
318 	if (then != NULL) {
319 		/* Update duration of operations */
320 		dt = *now;
321 		bintime_sub(&dt, then);
322 		bintime_add(&ds->duration[flags], &dt);
323 	}
324 
325 	/* Accumulate busy time */
326 	dt = *now;
327 	bintime_sub(&dt, &ds->busy_from);
328 	bintime_add(&ds->busy_time, &dt);
329 	ds->busy_from = *now;
330 
331 	ds->end_count++;
332 	atomic_add_rel_int(&ds->sequence0, 1);
333 }
334 
335 void
336 devstat_end_transaction_bio(struct devstat *ds, const struct bio *bp)
337 {
338 
339 	devstat_end_transaction_bio_bt(ds, bp, NULL);
340 }
341 
342 void
343 devstat_end_transaction_bio_bt(struct devstat *ds, const struct bio *bp,
344     const struct bintime *now)
345 {
346 	devstat_trans_flags flg;
347 	devstat_tag_type tag;
348 
349 	/* sanity check */
350 	if (ds == NULL)
351 		return;
352 
353 	if (bp->bio_flags & BIO_ORDERED)
354 		tag = DEVSTAT_TAG_ORDERED;
355 	else
356 		tag = DEVSTAT_TAG_SIMPLE;
357 	if (bp->bio_cmd == BIO_DELETE)
358 		flg = DEVSTAT_FREE;
359 	else if ((bp->bio_cmd == BIO_READ)
360 	      || ((bp->bio_cmd == BIO_ZONE)
361 	       && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)))
362 		flg = DEVSTAT_READ;
363 	else if (bp->bio_cmd == BIO_WRITE)
364 		flg = DEVSTAT_WRITE;
365 	else
366 		flg = DEVSTAT_NO_DATA;
367 
368 	devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
369 				tag, flg, now, &bp->bio_t0);
370 	DTRACE_DEVSTAT_BIO_DONE();
371 }
372 
373 /*
374  * This is the sysctl handler for the devstat package.  The data pushed out
375  * on the kern.devstat.all sysctl variable consists of the current devstat
376  * generation number, and then an array of devstat structures, one for each
377  * device in the system.
378  *
379  * This is more cryptic that obvious, but basically we neither can nor
380  * want to hold the devstat_mutex for any amount of time, so we grab it
381  * only when we need to and keep an eye on devstat_generation all the time.
382  */
383 static int
384 sysctl_devstat(SYSCTL_HANDLER_ARGS)
385 {
386 	int error;
387 	long mygen;
388 	struct devstat *nds;
389 
390 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
391 
392 	/*
393 	 * XXX devstat_generation should really be "volatile" but that
394 	 * XXX freaks out the sysctl macro below.  The places where we
395 	 * XXX change it and inspect it are bracketed in the mutex which
396 	 * XXX guarantees us proper write barriers.  I don't believe the
397 	 * XXX compiler is allowed to optimize mygen away across calls
398 	 * XXX to other functions, so the following is belived to be safe.
399 	 */
400 	mygen = devstat_generation;
401 
402 	error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
403 
404 	if (devstat_num_devs == 0)
405 		return(0);
406 
407 	if (error != 0)
408 		return (error);
409 
410 	mtx_lock(&devstat_mutex);
411 	nds = STAILQ_FIRST(&device_statq);
412 	if (mygen != devstat_generation)
413 		error = EBUSY;
414 	mtx_unlock(&devstat_mutex);
415 
416 	if (error != 0)
417 		return (error);
418 
419 	for (;nds != NULL;) {
420 		error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
421 		if (error != 0)
422 			return (error);
423 		mtx_lock(&devstat_mutex);
424 		if (mygen != devstat_generation)
425 			error = EBUSY;
426 		else
427 			nds = STAILQ_NEXT(nds, dev_links);
428 		mtx_unlock(&devstat_mutex);
429 		if (error != 0)
430 			return (error);
431 	}
432 	return(error);
433 }
434 
435 /*
436  * Sysctl entries for devstat.  The first one is a node that all the rest
437  * hang off of.
438  */
439 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
440     "Device Statistics");
441 
442 SYSCTL_PROC(_kern_devstat, OID_AUTO, all,
443     CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
444     sysctl_devstat, "S,devstat",
445     "All devices in the devstat list");
446 /*
447  * Export the number of devices in the system so that userland utilities
448  * can determine how much memory to allocate to hold all the devices.
449  */
450 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
451     &devstat_num_devs, 0, "Number of devices in the devstat list");
452 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
453     &devstat_generation, 0, "Devstat list generation");
454 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
455     &devstat_version, 0, "Devstat list version number");
456 
457 /*
458  * Allocator for struct devstat structures.  We sub-allocate these from pages
459  * which we get from malloc.  These pages are exported for mmap(2)'ing through
460  * a miniature device driver
461  */
462 
463 #define statsperpage (PAGE_SIZE / sizeof(struct devstat))
464 
465 static d_ioctl_t devstat_ioctl;
466 static d_mmap_t devstat_mmap;
467 
468 static struct cdevsw devstat_cdevsw = {
469 	.d_version =	D_VERSION,
470 	.d_ioctl =	devstat_ioctl,
471 	.d_mmap =	devstat_mmap,
472 	.d_name =	"devstat",
473 };
474 
475 struct statspage {
476 	TAILQ_ENTRY(statspage)	list;
477 	struct devstat		*stat;
478 	u_int			nfree;
479 };
480 
481 static size_t pagelist_pages = 0;
482 static TAILQ_HEAD(, statspage)	pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
483 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
484 
485 static int
486 devstat_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
487     struct thread *td)
488 {
489 	int error = ENOTTY;
490 
491 	switch (cmd) {
492 	case DIOCGMEDIASIZE:
493 		error = 0;
494 		*(off_t *)data = pagelist_pages * PAGE_SIZE;
495 		break;
496 	}
497 
498 	return (error);
499 }
500 
501 static int
502 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
503     int nprot, vm_memattr_t *memattr)
504 {
505 	struct statspage *spp;
506 
507 	if (nprot != VM_PROT_READ)
508 		return (-1);
509 	mtx_lock(&devstat_mutex);
510 	TAILQ_FOREACH(spp, &pagelist, list) {
511 		if (offset == 0) {
512 			*paddr = vtophys(spp->stat);
513 			mtx_unlock(&devstat_mutex);
514 			return (0);
515 		}
516 		offset -= PAGE_SIZE;
517 	}
518 	mtx_unlock(&devstat_mutex);
519 	return (-1);
520 }
521 
522 static struct devstat *
523 devstat_alloc(void)
524 {
525 	struct devstat *dsp;
526 	struct statspage *spp, *spp2;
527 	u_int u;
528 	static int once;
529 
530 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
531 	if (!once) {
532 		make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
533 		    &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444,
534 		    DEVSTAT_DEVICE_NAME);
535 		once = 1;
536 	}
537 	spp2 = NULL;
538 	mtx_lock(&devstat_mutex);
539 	for (;;) {
540 		TAILQ_FOREACH(spp, &pagelist, list) {
541 			if (spp->nfree > 0)
542 				break;
543 		}
544 		if (spp != NULL)
545 			break;
546 		mtx_unlock(&devstat_mutex);
547 		spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
548 		spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
549 		spp2->nfree = statsperpage;
550 
551 		/*
552 		 * If free statspages were added while the lock was released
553 		 * just reuse them.
554 		 */
555 		mtx_lock(&devstat_mutex);
556 		TAILQ_FOREACH(spp, &pagelist, list)
557 			if (spp->nfree > 0)
558 				break;
559 		if (spp == NULL) {
560 			spp = spp2;
561 
562 			/*
563 			 * It would make more sense to add the new page at the
564 			 * head but the order on the list determine the
565 			 * sequence of the mapping so we can't do that.
566 			 */
567 			pagelist_pages++;
568 			TAILQ_INSERT_TAIL(&pagelist, spp, list);
569 		} else
570 			break;
571 	}
572 	dsp = spp->stat;
573 	for (u = 0; u < statsperpage; u++) {
574 		if (dsp->allocated == 0)
575 			break;
576 		dsp++;
577 	}
578 	spp->nfree--;
579 	dsp->allocated = 1;
580 	mtx_unlock(&devstat_mutex);
581 	if (spp2 != NULL && spp2 != spp) {
582 		free(spp2->stat, M_DEVSTAT);
583 		free(spp2, M_DEVSTAT);
584 	}
585 	return (dsp);
586 }
587 
588 static void
589 devstat_free(struct devstat *dsp)
590 {
591 	struct statspage *spp;
592 
593 	mtx_assert(&devstat_mutex, MA_OWNED);
594 	bzero(dsp, sizeof *dsp);
595 	TAILQ_FOREACH(spp, &pagelist, list) {
596 		if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
597 			spp->nfree++;
598 			return;
599 		}
600 	}
601 }
602 
603 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
604     SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)");
605