xref: /freebsd/sys/kern/kern_conf.c (revision b3aaa0cc21c63d388230c7ef2a80abd631ff20d5)
1 /*-
2  * Copyright (c) 1999-2002 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/bio.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/conf.h>
40 #include <sys/vnode.h>
41 #include <sys/queue.h>
42 #include <sys/poll.h>
43 #include <sys/sx.h>
44 #include <sys/ctype.h>
45 #include <sys/ucred.h>
46 #include <sys/taskqueue.h>
47 #include <machine/stdarg.h>
48 
49 #include <fs/devfs/devfs_int.h>
50 
51 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage");
52 
53 struct mtx devmtx;
54 static void destroy_devl(struct cdev *dev);
55 static int destroy_dev_sched_cbl(struct cdev *dev,
56     void (*cb)(void *), void *arg);
57 static struct cdev *make_dev_credv(int flags,
58     struct cdevsw *devsw, int unit,
59     struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,
60     va_list ap);
61 
62 static struct cdev_priv_list cdevp_free_list =
63     TAILQ_HEAD_INITIALIZER(cdevp_free_list);
64 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list =
65     SLIST_HEAD_INITIALIZER();
66 
67 void
68 dev_lock(void)
69 {
70 
71 	mtx_lock(&devmtx);
72 }
73 
74 /*
75  * Free all the memory collected while the cdev mutex was
76  * locked. Since devmtx is after the system map mutex, free() cannot
77  * be called immediately and is postponed until cdev mutex can be
78  * dropped.
79  */
80 static void
81 dev_unlock_and_free(void)
82 {
83 	struct cdev_priv_list cdp_free;
84 	struct free_cdevsw csw_free;
85 	struct cdev_priv *cdp;
86 	struct cdevsw *csw;
87 
88 	mtx_assert(&devmtx, MA_OWNED);
89 
90 	/*
91 	 * Make the local copy of the list heads while the dev_mtx is
92 	 * held. Free it later.
93 	 */
94 	TAILQ_INIT(&cdp_free);
95 	TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list);
96 	csw_free = cdevsw_gt_post_list;
97 	SLIST_INIT(&cdevsw_gt_post_list);
98 
99 	mtx_unlock(&devmtx);
100 
101 	while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) {
102 		TAILQ_REMOVE(&cdp_free, cdp, cdp_list);
103 		devfs_free(&cdp->cdp_c);
104 	}
105 	while ((csw = SLIST_FIRST(&csw_free)) != NULL) {
106 		SLIST_REMOVE_HEAD(&csw_free, d_postfree_list);
107 		free(csw, M_DEVT);
108 	}
109 }
110 
111 static void
112 dev_free_devlocked(struct cdev *cdev)
113 {
114 	struct cdev_priv *cdp;
115 
116 	mtx_assert(&devmtx, MA_OWNED);
117 	cdp = cdev2priv(cdev);
118 	TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list);
119 }
120 
121 static void
122 cdevsw_free_devlocked(struct cdevsw *csw)
123 {
124 
125 	mtx_assert(&devmtx, MA_OWNED);
126 	SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list);
127 }
128 
129 void
130 dev_unlock(void)
131 {
132 
133 	mtx_unlock(&devmtx);
134 }
135 
136 void
137 dev_ref(struct cdev *dev)
138 {
139 
140 	mtx_assert(&devmtx, MA_NOTOWNED);
141 	mtx_lock(&devmtx);
142 	dev->si_refcount++;
143 	mtx_unlock(&devmtx);
144 }
145 
146 void
147 dev_refl(struct cdev *dev)
148 {
149 
150 	mtx_assert(&devmtx, MA_OWNED);
151 	dev->si_refcount++;
152 }
153 
154 void
155 dev_rel(struct cdev *dev)
156 {
157 	int flag = 0;
158 
159 	mtx_assert(&devmtx, MA_NOTOWNED);
160 	dev_lock();
161 	dev->si_refcount--;
162 	KASSERT(dev->si_refcount >= 0,
163 	    ("dev_rel(%s) gave negative count", devtoname(dev)));
164 #if 0
165 	if (dev->si_usecount == 0 &&
166 	    (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED))
167 		;
168 	else
169 #endif
170 	if (dev->si_devsw == NULL && dev->si_refcount == 0) {
171 		LIST_REMOVE(dev, si_list);
172 		flag = 1;
173 	}
174 	dev_unlock();
175 	if (flag)
176 		devfs_free(dev);
177 }
178 
179 struct cdevsw *
180 dev_refthread(struct cdev *dev)
181 {
182 	struct cdevsw *csw;
183 	struct cdev_priv *cdp;
184 
185 	mtx_assert(&devmtx, MA_NOTOWNED);
186 	dev_lock();
187 	csw = dev->si_devsw;
188 	if (csw != NULL) {
189 		cdp = cdev2priv(dev);
190 		if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0)
191 			dev->si_threadcount++;
192 		else
193 			csw = NULL;
194 	}
195 	dev_unlock();
196 	return (csw);
197 }
198 
199 struct cdevsw *
200 devvn_refthread(struct vnode *vp, struct cdev **devp)
201 {
202 	struct cdevsw *csw;
203 	struct cdev_priv *cdp;
204 
205 	mtx_assert(&devmtx, MA_NOTOWNED);
206 	csw = NULL;
207 	dev_lock();
208 	*devp = vp->v_rdev;
209 	if (*devp != NULL) {
210 		cdp = cdev2priv(*devp);
211 		if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) {
212 			csw = (*devp)->si_devsw;
213 			if (csw != NULL)
214 				(*devp)->si_threadcount++;
215 		}
216 	}
217 	dev_unlock();
218 	return (csw);
219 }
220 
221 void
222 dev_relthread(struct cdev *dev)
223 {
224 
225 	mtx_assert(&devmtx, MA_NOTOWNED);
226 	dev_lock();
227 	KASSERT(dev->si_threadcount > 0,
228 	    ("%s threadcount is wrong", dev->si_name));
229 	dev->si_threadcount--;
230 	dev_unlock();
231 }
232 
233 int
234 nullop(void)
235 {
236 
237 	return (0);
238 }
239 
240 int
241 eopnotsupp(void)
242 {
243 
244 	return (EOPNOTSUPP);
245 }
246 
247 static int
248 enxio(void)
249 {
250 	return (ENXIO);
251 }
252 
253 static int
254 enodev(void)
255 {
256 	return (ENODEV);
257 }
258 
259 /* Define a dead_cdevsw for use when devices leave unexpectedly. */
260 
261 #define dead_open	(d_open_t *)enxio
262 #define dead_close	(d_close_t *)enxio
263 #define dead_read	(d_read_t *)enxio
264 #define dead_write	(d_write_t *)enxio
265 #define dead_ioctl	(d_ioctl_t *)enxio
266 #define dead_poll	(d_poll_t *)enodev
267 #define dead_mmap	(d_mmap_t *)enodev
268 
269 static void
270 dead_strategy(struct bio *bp)
271 {
272 
273 	biofinish(bp, NULL, ENXIO);
274 }
275 
276 #define dead_dump	(dumper_t *)enxio
277 #define dead_kqfilter	(d_kqfilter_t *)enxio
278 
279 static struct cdevsw dead_cdevsw = {
280 	.d_version =	D_VERSION,
281 	.d_flags =	D_NEEDGIANT, /* XXX: does dead_strategy need this ? */
282 	.d_open =	dead_open,
283 	.d_close =	dead_close,
284 	.d_read =	dead_read,
285 	.d_write =	dead_write,
286 	.d_ioctl =	dead_ioctl,
287 	.d_poll =	dead_poll,
288 	.d_mmap =	dead_mmap,
289 	.d_strategy =	dead_strategy,
290 	.d_name =	"dead",
291 	.d_dump =	dead_dump,
292 	.d_kqfilter =	dead_kqfilter
293 };
294 
295 /* Default methods if driver does not specify method */
296 
297 #define null_open	(d_open_t *)nullop
298 #define null_close	(d_close_t *)nullop
299 #define no_read		(d_read_t *)enodev
300 #define no_write	(d_write_t *)enodev
301 #define no_ioctl	(d_ioctl_t *)enodev
302 #define no_mmap		(d_mmap_t *)enodev
303 #define no_kqfilter	(d_kqfilter_t *)enodev
304 
305 static void
306 no_strategy(struct bio *bp)
307 {
308 
309 	biofinish(bp, NULL, ENODEV);
310 }
311 
312 static int
313 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
314 {
315 	/*
316 	 * Return true for read/write.  If the user asked for something
317 	 * special, return POLLNVAL, so that clients have a way of
318 	 * determining reliably whether or not the extended
319 	 * functionality is present without hard-coding knowledge
320 	 * of specific filesystem implementations.
321 	 * Stay in sync with vop_nopoll().
322 	 */
323 	if (events & ~POLLSTANDARD)
324 		return (POLLNVAL);
325 
326 	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
327 }
328 
329 #define no_dump		(dumper_t *)enodev
330 
331 static int
332 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
333 {
334 	struct cdevsw *dsw;
335 	int retval;
336 
337 	dsw = dev_refthread(dev);
338 	if (dsw == NULL)
339 		return (ENXIO);
340 	mtx_lock(&Giant);
341 	retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td);
342 	mtx_unlock(&Giant);
343 	dev_relthread(dev);
344 	return (retval);
345 }
346 
347 static int
348 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp)
349 {
350 	struct cdevsw *dsw;
351 	int retval;
352 
353 	dsw = dev_refthread(dev);
354 	if (dsw == NULL)
355 		return (ENXIO);
356 	mtx_lock(&Giant);
357 	retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp);
358 	mtx_unlock(&Giant);
359 	dev_relthread(dev);
360 	return (retval);
361 }
362 
363 static int
364 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
365 {
366 	struct cdevsw *dsw;
367 	int retval;
368 
369 	dsw = dev_refthread(dev);
370 	if (dsw == NULL)
371 		return (ENXIO);
372 	mtx_lock(&Giant);
373 	retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td);
374 	mtx_unlock(&Giant);
375 	dev_relthread(dev);
376 	return (retval);
377 }
378 
379 static void
380 giant_strategy(struct bio *bp)
381 {
382 	struct cdevsw *dsw;
383 	struct cdev *dev;
384 
385 	dev = bp->bio_dev;
386 	dsw = dev_refthread(dev);
387 	if (dsw == NULL) {
388 		biofinish(bp, NULL, ENXIO);
389 		return;
390 	}
391 	mtx_lock(&Giant);
392 	dsw->d_gianttrick->d_strategy(bp);
393 	mtx_unlock(&Giant);
394 	dev_relthread(dev);
395 }
396 
397 static int
398 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
399 {
400 	struct cdevsw *dsw;
401 	int retval;
402 
403 	dsw = dev_refthread(dev);
404 	if (dsw == NULL)
405 		return (ENXIO);
406 	mtx_lock(&Giant);
407 	retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td);
408 	mtx_unlock(&Giant);
409 	dev_relthread(dev);
410 	return (retval);
411 }
412 
413 static int
414 giant_read(struct cdev *dev, struct uio *uio, int ioflag)
415 {
416 	struct cdevsw *dsw;
417 	int retval;
418 
419 	dsw = dev_refthread(dev);
420 	if (dsw == NULL)
421 		return (ENXIO);
422 	mtx_lock(&Giant);
423 	retval = dsw->d_gianttrick->d_read(dev, uio, ioflag);
424 	mtx_unlock(&Giant);
425 	dev_relthread(dev);
426 	return (retval);
427 }
428 
429 static int
430 giant_write(struct cdev *dev, struct uio *uio, int ioflag)
431 {
432 	struct cdevsw *dsw;
433 	int retval;
434 
435 	dsw = dev_refthread(dev);
436 	if (dsw == NULL)
437 		return (ENXIO);
438 	mtx_lock(&Giant);
439 	retval = dsw->d_gianttrick->d_write(dev, uio, ioflag);
440 	mtx_unlock(&Giant);
441 	dev_relthread(dev);
442 	return (retval);
443 }
444 
445 static int
446 giant_poll(struct cdev *dev, int events, struct thread *td)
447 {
448 	struct cdevsw *dsw;
449 	int retval;
450 
451 	dsw = dev_refthread(dev);
452 	if (dsw == NULL)
453 		return (ENXIO);
454 	mtx_lock(&Giant);
455 	retval = dsw->d_gianttrick->d_poll(dev, events, td);
456 	mtx_unlock(&Giant);
457 	dev_relthread(dev);
458 	return (retval);
459 }
460 
461 static int
462 giant_kqfilter(struct cdev *dev, struct knote *kn)
463 {
464 	struct cdevsw *dsw;
465 	int retval;
466 
467 	dsw = dev_refthread(dev);
468 	if (dsw == NULL)
469 		return (ENXIO);
470 	mtx_lock(&Giant);
471 	retval = dsw->d_gianttrick->d_kqfilter(dev, kn);
472 	mtx_unlock(&Giant);
473 	dev_relthread(dev);
474 	return (retval);
475 }
476 
477 static int
478 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot)
479 {
480 	struct cdevsw *dsw;
481 	int retval;
482 
483 	dsw = dev_refthread(dev);
484 	if (dsw == NULL)
485 		return (ENXIO);
486 	mtx_lock(&Giant);
487 	retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot);
488 	mtx_unlock(&Giant);
489 	dev_relthread(dev);
490 	return (retval);
491 }
492 
493 
494 static void
495 notify(struct cdev *dev, const char *ev)
496 {
497 	static const char prefix[] = "cdev=";
498 	char *data;
499 	int namelen;
500 
501 	if (cold)
502 		return;
503 	namelen = strlen(dev->si_name);
504 	data = malloc(namelen + sizeof(prefix), M_TEMP, M_WAITOK);
505 	memcpy(data, prefix, sizeof(prefix) - 1);
506 	memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1);
507 	devctl_notify("DEVFS", "CDEV", ev, data);
508 	free(data, M_TEMP);
509 }
510 
511 static void
512 notify_create(struct cdev *dev)
513 {
514 
515 	notify(dev, "CREATE");
516 }
517 
518 static void
519 notify_destroy(struct cdev *dev)
520 {
521 
522 	notify(dev, "DESTROY");
523 }
524 
525 static struct cdev *
526 newdev(struct cdevsw *csw, int y, struct cdev *si)
527 {
528 	struct cdev *si2;
529 	dev_t	udev;
530 
531 	mtx_assert(&devmtx, MA_OWNED);
532 	udev = y;
533 	if (csw->d_flags & D_NEEDMINOR) {
534 		/* We may want to return an existing device */
535 		LIST_FOREACH(si2, &csw->d_devs, si_list) {
536 			if (si2->si_drv0 == udev) {
537 				dev_free_devlocked(si);
538 				return (si2);
539 			}
540 		}
541 	}
542 	si->si_drv0 = udev;
543 	si->si_devsw = csw;
544 	LIST_INSERT_HEAD(&csw->d_devs, si, si_list);
545 	return (si);
546 }
547 
548 static void
549 fini_cdevsw(struct cdevsw *devsw)
550 {
551 	struct cdevsw *gt;
552 
553 	if (devsw->d_gianttrick != NULL) {
554 		gt = devsw->d_gianttrick;
555 		memcpy(devsw, gt, sizeof *devsw);
556 		cdevsw_free_devlocked(gt);
557 		devsw->d_gianttrick = NULL;
558 	}
559 	devsw->d_flags &= ~D_INIT;
560 }
561 
562 static void
563 prep_cdevsw(struct cdevsw *devsw)
564 {
565 	struct cdevsw *dsw2;
566 
567 	mtx_assert(&devmtx, MA_OWNED);
568 	if (devsw->d_flags & D_INIT)
569 		return;
570 	if (devsw->d_flags & D_NEEDGIANT) {
571 		dev_unlock();
572 		dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK);
573 		dev_lock();
574 	} else
575 		dsw2 = NULL;
576 	if (devsw->d_flags & D_INIT) {
577 		if (dsw2 != NULL)
578 			cdevsw_free_devlocked(dsw2);
579 		return;
580 	}
581 
582 	if (devsw->d_version != D_VERSION_01) {
583 		printf(
584 		    "WARNING: Device driver \"%s\" has wrong version %s\n",
585 		    devsw->d_name == NULL ? "???" : devsw->d_name,
586 		    "and is disabled.  Recompile KLD module.");
587 		devsw->d_open = dead_open;
588 		devsw->d_close = dead_close;
589 		devsw->d_read = dead_read;
590 		devsw->d_write = dead_write;
591 		devsw->d_ioctl = dead_ioctl;
592 		devsw->d_poll = dead_poll;
593 		devsw->d_mmap = dead_mmap;
594 		devsw->d_strategy = dead_strategy;
595 		devsw->d_dump = dead_dump;
596 		devsw->d_kqfilter = dead_kqfilter;
597 	}
598 
599 	if (devsw->d_flags & D_NEEDGIANT) {
600 		if (devsw->d_gianttrick == NULL) {
601 			memcpy(dsw2, devsw, sizeof *dsw2);
602 			devsw->d_gianttrick = dsw2;
603 			dsw2 = NULL;
604 		}
605 	}
606 
607 #define FIXUP(member, noop, giant) 				\
608 	do {							\
609 		if (devsw->member == NULL) {			\
610 			devsw->member = noop;			\
611 		} else if (devsw->d_flags & D_NEEDGIANT)	\
612 			devsw->member = giant;			\
613 		}						\
614 	while (0)
615 
616 	FIXUP(d_open,		null_open,	giant_open);
617 	FIXUP(d_fdopen,		NULL,		giant_fdopen);
618 	FIXUP(d_close,		null_close,	giant_close);
619 	FIXUP(d_read,		no_read,	giant_read);
620 	FIXUP(d_write,		no_write,	giant_write);
621 	FIXUP(d_ioctl,		no_ioctl,	giant_ioctl);
622 	FIXUP(d_poll,		no_poll,	giant_poll);
623 	FIXUP(d_mmap,		no_mmap,	giant_mmap);
624 	FIXUP(d_strategy,	no_strategy,	giant_strategy);
625 	FIXUP(d_kqfilter,	no_kqfilter,	giant_kqfilter);
626 
627 	if (devsw->d_dump == NULL)	devsw->d_dump = no_dump;
628 
629 	LIST_INIT(&devsw->d_devs);
630 
631 	devsw->d_flags |= D_INIT;
632 
633 	if (dsw2 != NULL)
634 		cdevsw_free_devlocked(dsw2);
635 }
636 
637 struct cdev *
638 make_dev_credv(int flags, struct cdevsw *devsw, int unit,
639     struct ucred *cr, uid_t uid,
640     gid_t gid, int mode, const char *fmt, va_list ap)
641 {
642 	struct cdev *dev;
643 	int i;
644 
645 	dev = devfs_alloc();
646 	dev_lock();
647 	prep_cdevsw(devsw);
648 	dev = newdev(devsw, unit, dev);
649 	if (flags & MAKEDEV_REF)
650 		dev_refl(dev);
651 	if (dev->si_flags & SI_CHEAPCLONE &&
652 	    dev->si_flags & SI_NAMED) {
653 		/*
654 		 * This is allowed as it removes races and generally
655 		 * simplifies cloning devices.
656 		 * XXX: still ??
657 		 */
658 		dev_unlock_and_free();
659 		return (dev);
660 	}
661 	KASSERT(!(dev->si_flags & SI_NAMED),
662 	    ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)",
663 	    devsw->d_name, dev2unit(dev), devtoname(dev)));
664 
665 	i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
666 	if (i > (sizeof dev->__si_namebuf - 1)) {
667 		printf("WARNING: Device name truncated! (%s)\n",
668 		    dev->__si_namebuf);
669 	}
670 
671 	dev->si_flags |= SI_NAMED;
672 	if (cr != NULL)
673 		dev->si_cred = crhold(cr);
674 	else
675 		dev->si_cred = NULL;
676 	dev->si_uid = uid;
677 	dev->si_gid = gid;
678 	dev->si_mode = mode;
679 
680 	devfs_create(dev);
681 	clean_unrhdrl(devfs_inos);
682 	dev_unlock_and_free();
683 
684 	notify_create(dev);
685 
686 	return (dev);
687 }
688 
689 struct cdev *
690 make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode,
691     const char *fmt, ...)
692 {
693 	struct cdev *dev;
694 	va_list ap;
695 
696 	va_start(ap, fmt);
697 	dev = make_dev_credv(0, devsw, unit, NULL, uid, gid, mode, fmt, ap);
698 	va_end(ap);
699 	return (dev);
700 }
701 
702 struct cdev *
703 make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid,
704     gid_t gid, int mode, const char *fmt, ...)
705 {
706 	struct cdev *dev;
707 	va_list ap;
708 
709 	va_start(ap, fmt);
710 	dev = make_dev_credv(0, devsw, unit, cr, uid, gid, mode, fmt, ap);
711 	va_end(ap);
712 
713 	return (dev);
714 }
715 
716 struct cdev *
717 make_dev_credf(int flags, struct cdevsw *devsw, int unit,
718     struct ucred *cr, uid_t uid,
719     gid_t gid, int mode, const char *fmt, ...)
720 {
721 	struct cdev *dev;
722 	va_list ap;
723 
724 	va_start(ap, fmt);
725 	dev = make_dev_credv(flags, devsw, unit, cr, uid, gid, mode,
726 	    fmt, ap);
727 	va_end(ap);
728 
729 	return (dev);
730 }
731 
732 static void
733 dev_dependsl(struct cdev *pdev, struct cdev *cdev)
734 {
735 
736 	cdev->si_parent = pdev;
737 	cdev->si_flags |= SI_CHILD;
738 	LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings);
739 }
740 
741 
742 void
743 dev_depends(struct cdev *pdev, struct cdev *cdev)
744 {
745 
746 	dev_lock();
747 	dev_dependsl(pdev, cdev);
748 	dev_unlock();
749 }
750 
751 struct cdev *
752 make_dev_alias(struct cdev *pdev, const char *fmt, ...)
753 {
754 	struct cdev *dev;
755 	va_list ap;
756 	int i;
757 
758 	KASSERT(pdev != NULL, ("NULL pdev"));
759 	dev = devfs_alloc();
760 	dev_lock();
761 	dev->si_flags |= SI_ALIAS;
762 	dev->si_flags |= SI_NAMED;
763 	va_start(ap, fmt);
764 	i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
765 	if (i > (sizeof dev->__si_namebuf - 1)) {
766 		printf("WARNING: Device name truncated! (%s)\n",
767 		    dev->__si_namebuf);
768 	}
769 	va_end(ap);
770 
771 	devfs_create(dev);
772 	dev_dependsl(pdev, dev);
773 	clean_unrhdrl(devfs_inos);
774 	dev_unlock();
775 
776 	notify_create(dev);
777 
778 	return (dev);
779 }
780 
781 static void
782 destroy_devl(struct cdev *dev)
783 {
784 	struct cdevsw *csw;
785 	struct cdev_privdata *p, *p1;
786 
787 	mtx_assert(&devmtx, MA_OWNED);
788 	KASSERT(dev->si_flags & SI_NAMED,
789 	    ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev)));
790 
791 	devfs_destroy(dev);
792 
793 	/* Remove name marking */
794 	dev->si_flags &= ~SI_NAMED;
795 
796 	/* If we are a child, remove us from the parents list */
797 	if (dev->si_flags & SI_CHILD) {
798 		LIST_REMOVE(dev, si_siblings);
799 		dev->si_flags &= ~SI_CHILD;
800 	}
801 
802 	/* Kill our children */
803 	while (!LIST_EMPTY(&dev->si_children))
804 		destroy_devl(LIST_FIRST(&dev->si_children));
805 
806 	/* Remove from clone list */
807 	if (dev->si_flags & SI_CLONELIST) {
808 		LIST_REMOVE(dev, si_clone);
809 		dev->si_flags &= ~SI_CLONELIST;
810 	}
811 
812 	dev->si_refcount++;	/* Avoid race with dev_rel() */
813 	csw = dev->si_devsw;
814 	dev->si_devsw = NULL;	/* already NULL for SI_ALIAS */
815 	while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) {
816 		csw->d_purge(dev);
817 		msleep(csw, &devmtx, PRIBIO, "devprg", hz/10);
818 		if (dev->si_threadcount)
819 			printf("Still %lu threads in %s\n",
820 			    dev->si_threadcount, devtoname(dev));
821 	}
822 	while (dev->si_threadcount != 0) {
823 		/* Use unique dummy wait ident */
824 		msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10);
825 	}
826 
827 	dev_unlock();
828 	notify_destroy(dev);
829 	mtx_lock(&cdevpriv_mtx);
830 	LIST_FOREACH_SAFE(p, &cdev2priv(dev)->cdp_fdpriv, cdpd_list, p1) {
831 		devfs_destroy_cdevpriv(p);
832 		mtx_lock(&cdevpriv_mtx);
833 	}
834 	mtx_unlock(&cdevpriv_mtx);
835 	dev_lock();
836 
837 	dev->si_drv1 = 0;
838 	dev->si_drv2 = 0;
839 	bzero(&dev->__si_u, sizeof(dev->__si_u));
840 
841 	if (!(dev->si_flags & SI_ALIAS)) {
842 		/* Remove from cdevsw list */
843 		LIST_REMOVE(dev, si_list);
844 
845 		/* If cdevsw has no more struct cdev *'s, clean it */
846 		if (LIST_EMPTY(&csw->d_devs)) {
847 			fini_cdevsw(csw);
848 			wakeup(&csw->d_devs);
849 		}
850 	}
851 	dev->si_flags &= ~SI_ALIAS;
852 	dev->si_refcount--;	/* Avoid race with dev_rel() */
853 
854 	if (dev->si_refcount > 0) {
855 		LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list);
856 	} else {
857 		dev_free_devlocked(dev);
858 	}
859 }
860 
861 void
862 destroy_dev(struct cdev *dev)
863 {
864 
865 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev");
866 	dev_lock();
867 	destroy_devl(dev);
868 	dev_unlock_and_free();
869 }
870 
871 const char *
872 devtoname(struct cdev *dev)
873 {
874 	char *p;
875 	struct cdevsw *csw;
876 	int mynor;
877 
878 	if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') {
879 		p = dev->si_name;
880 		csw = dev_refthread(dev);
881 		if (csw != NULL) {
882 			sprintf(p, "(%s)", csw->d_name);
883 			dev_relthread(dev);
884 		}
885 		p += strlen(p);
886 		mynor = dev2unit(dev);
887 		if (mynor < 0 || mynor > 255)
888 			sprintf(p, "/%#x", (u_int)mynor);
889 		else
890 			sprintf(p, "/%d", mynor);
891 	}
892 	return (dev->si_name);
893 }
894 
895 int
896 dev_stdclone(char *name, char **namep, const char *stem, int *unit)
897 {
898 	int u, i;
899 
900 	i = strlen(stem);
901 	if (bcmp(stem, name, i) != 0)
902 		return (0);
903 	if (!isdigit(name[i]))
904 		return (0);
905 	u = 0;
906 	if (name[i] == '0' && isdigit(name[i+1]))
907 		return (0);
908 	while (isdigit(name[i])) {
909 		u *= 10;
910 		u += name[i++] - '0';
911 	}
912 	if (u > 0xffffff)
913 		return (0);
914 	*unit = u;
915 	if (namep)
916 		*namep = &name[i];
917 	if (name[i])
918 		return (2);
919 	return (1);
920 }
921 
922 /*
923  * Helper functions for cloning device drivers.
924  *
925  * The objective here is to make it unnecessary for the device drivers to
926  * use rman or similar to manage their unit number space.  Due to the way
927  * we do "on-demand" devices, using rman or other "private" methods
928  * will be very tricky to lock down properly once we lock down this file.
929  *
930  * Instead we give the drivers these routines which puts the struct cdev *'s
931  * that are to be managed on their own list, and gives the driver the ability
932  * to ask for the first free unit number or a given specified unit number.
933  *
934  * In addition these routines support paired devices (pty, nmdm and similar)
935  * by respecting a number of "flag" bits in the minor number.
936  *
937  */
938 
939 struct clonedevs {
940 	LIST_HEAD(,cdev)	head;
941 };
942 
943 void
944 clone_setup(struct clonedevs **cdp)
945 {
946 
947 	*cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO);
948 	LIST_INIT(&(*cdp)->head);
949 }
950 
951 int
952 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra)
953 {
954 	struct clonedevs *cd;
955 	struct cdev *dev, *ndev, *dl, *de;
956 	int unit, low, u;
957 
958 	KASSERT(*cdp != NULL,
959 	    ("clone_setup() not called in driver \"%s\"", csw->d_name));
960 	KASSERT(!(extra & CLONE_UNITMASK),
961 	    ("Illegal extra bits (0x%x) in clone_create", extra));
962 	KASSERT(*up <= CLONE_UNITMASK,
963 	    ("Too high unit (0x%x) in clone_create", *up));
964 	KASSERT(csw->d_flags & D_NEEDMINOR,
965 	    ("clone_create() on cdevsw without minor numbers"));
966 
967 
968 	/*
969 	 * Search the list for a lot of things in one go:
970 	 *   A preexisting match is returned immediately.
971 	 *   The lowest free unit number if we are passed -1, and the place
972 	 *	 in the list where we should insert that new element.
973 	 *   The place to insert a specified unit number, if applicable
974 	 *       the end of the list.
975 	 */
976 	unit = *up;
977 	ndev = devfs_alloc();
978 	dev_lock();
979 	prep_cdevsw(csw);
980 	low = extra;
981 	de = dl = NULL;
982 	cd = *cdp;
983 	LIST_FOREACH(dev, &cd->head, si_clone) {
984 		KASSERT(dev->si_flags & SI_CLONELIST,
985 		    ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
986 		u = dev2unit(dev);
987 		if (u == (unit | extra)) {
988 			*dp = dev;
989 			dev_unlock();
990 			devfs_free(ndev);
991 			return (0);
992 		}
993 		if (unit == -1 && u == low) {
994 			low++;
995 			de = dev;
996 			continue;
997 		} else if (u < (unit | extra)) {
998 			de = dev;
999 			continue;
1000 		} else if (u > (unit | extra)) {
1001 			dl = dev;
1002 			break;
1003 		}
1004 	}
1005 	if (unit == -1)
1006 		unit = low & CLONE_UNITMASK;
1007 	dev = newdev(csw, unit | extra, ndev);
1008 	if (dev->si_flags & SI_CLONELIST) {
1009 		printf("dev %p (%s) is on clonelist\n", dev, dev->si_name);
1010 		printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra);
1011 		LIST_FOREACH(dev, &cd->head, si_clone) {
1012 			printf("\t%p %s\n", dev, dev->si_name);
1013 		}
1014 		panic("foo");
1015 	}
1016 	KASSERT(!(dev->si_flags & SI_CLONELIST),
1017 	    ("Dev %p(%s) should not be on clonelist", dev, dev->si_name));
1018 	if (dl != NULL)
1019 		LIST_INSERT_BEFORE(dl, dev, si_clone);
1020 	else if (de != NULL)
1021 		LIST_INSERT_AFTER(de, dev, si_clone);
1022 	else
1023 		LIST_INSERT_HEAD(&cd->head, dev, si_clone);
1024 	dev->si_flags |= SI_CLONELIST;
1025 	*up = unit;
1026 	dev_unlock_and_free();
1027 	return (1);
1028 }
1029 
1030 /*
1031  * Kill everything still on the list.  The driver should already have
1032  * disposed of any softc hung of the struct cdev *'s at this time.
1033  */
1034 void
1035 clone_cleanup(struct clonedevs **cdp)
1036 {
1037 	struct cdev *dev;
1038 	struct cdev_priv *cp;
1039 	struct clonedevs *cd;
1040 
1041 	cd = *cdp;
1042 	if (cd == NULL)
1043 		return;
1044 	dev_lock();
1045 	while (!LIST_EMPTY(&cd->head)) {
1046 		dev = LIST_FIRST(&cd->head);
1047 		LIST_REMOVE(dev, si_clone);
1048 		KASSERT(dev->si_flags & SI_CLONELIST,
1049 		    ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
1050 		dev->si_flags &= ~SI_CLONELIST;
1051 		cp = cdev2priv(dev);
1052 		if (!(cp->cdp_flags & CDP_SCHED_DTR)) {
1053 			cp->cdp_flags |= CDP_SCHED_DTR;
1054 			KASSERT(dev->si_flags & SI_NAMED,
1055 				("Driver has goofed in cloning underways udev %x", dev->si_drv0));
1056 			destroy_devl(dev);
1057 		}
1058 	}
1059 	dev_unlock_and_free();
1060 	free(cd, M_DEVBUF);
1061 	*cdp = NULL;
1062 }
1063 
1064 static TAILQ_HEAD(, cdev_priv) dev_ddtr =
1065 	TAILQ_HEAD_INITIALIZER(dev_ddtr);
1066 static struct task dev_dtr_task;
1067 
1068 static void
1069 destroy_dev_tq(void *ctx, int pending)
1070 {
1071 	struct cdev_priv *cp;
1072 	struct cdev *dev;
1073 	void (*cb)(void *);
1074 	void *cb_arg;
1075 
1076 	dev_lock();
1077 	while (!TAILQ_EMPTY(&dev_ddtr)) {
1078 		cp = TAILQ_FIRST(&dev_ddtr);
1079 		dev = &cp->cdp_c;
1080 		KASSERT(cp->cdp_flags & CDP_SCHED_DTR,
1081 		    ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp));
1082 		TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list);
1083 		cb = cp->cdp_dtr_cb;
1084 		cb_arg = cp->cdp_dtr_cb_arg;
1085 		destroy_devl(dev);
1086 		dev_unlock_and_free();
1087 		dev_rel(dev);
1088 		if (cb != NULL)
1089 			cb(cb_arg);
1090 		dev_lock();
1091 	}
1092 	dev_unlock();
1093 }
1094 
1095 /*
1096  * devmtx shall be locked on entry. devmtx will be unlocked after
1097  * function return.
1098  */
1099 static int
1100 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg)
1101 {
1102 	struct cdev_priv *cp;
1103 
1104 	mtx_assert(&devmtx, MA_OWNED);
1105 	cp = cdev2priv(dev);
1106 	if (cp->cdp_flags & CDP_SCHED_DTR) {
1107 		dev_unlock();
1108 		return (0);
1109 	}
1110 	dev_refl(dev);
1111 	cp->cdp_flags |= CDP_SCHED_DTR;
1112 	cp->cdp_dtr_cb = cb;
1113 	cp->cdp_dtr_cb_arg = arg;
1114 	TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list);
1115 	dev_unlock();
1116 	taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task);
1117 	return (1);
1118 }
1119 
1120 int
1121 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg)
1122 {
1123 	dev_lock();
1124 	return (destroy_dev_sched_cbl(dev, cb, arg));
1125 }
1126 
1127 int
1128 destroy_dev_sched(struct cdev *dev)
1129 {
1130 	return (destroy_dev_sched_cb(dev, NULL, NULL));
1131 }
1132 
1133 void
1134 destroy_dev_drain(struct cdevsw *csw)
1135 {
1136 
1137 	dev_lock();
1138 	while (!LIST_EMPTY(&csw->d_devs)) {
1139 		msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10);
1140 	}
1141 	dev_unlock();
1142 }
1143 
1144 void
1145 drain_dev_clone_events(void)
1146 {
1147 
1148 	sx_xlock(&clone_drain_lock);
1149 	sx_xunlock(&clone_drain_lock);
1150 }
1151 
1152 static void
1153 devdtr_init(void *dummy __unused)
1154 {
1155 
1156 	TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL);
1157 }
1158 
1159 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL);
1160