xref: /freebsd/sys/kern/kern_conf.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 1999-2002 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/bio.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/conf.h>
40 #include <sys/vnode.h>
41 #include <sys/queue.h>
42 #include <sys/poll.h>
43 #include <sys/sx.h>
44 #include <sys/ctype.h>
45 #include <sys/ucred.h>
46 #include <sys/taskqueue.h>
47 #include <machine/stdarg.h>
48 
49 #include <fs/devfs/devfs_int.h>
50 #include <vm/vm.h>
51 
52 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage");
53 
54 struct mtx devmtx;
55 static void destroy_devl(struct cdev *dev);
56 static int destroy_dev_sched_cbl(struct cdev *dev,
57     void (*cb)(void *), void *arg);
58 static struct cdev *make_dev_credv(int flags,
59     struct cdevsw *devsw, int unit,
60     struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,
61     va_list ap);
62 
63 static struct cdev_priv_list cdevp_free_list =
64     TAILQ_HEAD_INITIALIZER(cdevp_free_list);
65 static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list =
66     SLIST_HEAD_INITIALIZER();
67 
68 void
69 dev_lock(void)
70 {
71 
72 	mtx_lock(&devmtx);
73 }
74 
75 /*
76  * Free all the memory collected while the cdev mutex was
77  * locked. Since devmtx is after the system map mutex, free() cannot
78  * be called immediately and is postponed until cdev mutex can be
79  * dropped.
80  */
81 static void
82 dev_unlock_and_free(void)
83 {
84 	struct cdev_priv_list cdp_free;
85 	struct free_cdevsw csw_free;
86 	struct cdev_priv *cdp;
87 	struct cdevsw *csw;
88 
89 	mtx_assert(&devmtx, MA_OWNED);
90 
91 	/*
92 	 * Make the local copy of the list heads while the dev_mtx is
93 	 * held. Free it later.
94 	 */
95 	TAILQ_INIT(&cdp_free);
96 	TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list);
97 	csw_free = cdevsw_gt_post_list;
98 	SLIST_INIT(&cdevsw_gt_post_list);
99 
100 	mtx_unlock(&devmtx);
101 
102 	while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) {
103 		TAILQ_REMOVE(&cdp_free, cdp, cdp_list);
104 		devfs_free(&cdp->cdp_c);
105 	}
106 	while ((csw = SLIST_FIRST(&csw_free)) != NULL) {
107 		SLIST_REMOVE_HEAD(&csw_free, d_postfree_list);
108 		free(csw, M_DEVT);
109 	}
110 }
111 
112 static void
113 dev_free_devlocked(struct cdev *cdev)
114 {
115 	struct cdev_priv *cdp;
116 
117 	mtx_assert(&devmtx, MA_OWNED);
118 	cdp = cdev2priv(cdev);
119 	TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list);
120 }
121 
122 static void
123 cdevsw_free_devlocked(struct cdevsw *csw)
124 {
125 
126 	mtx_assert(&devmtx, MA_OWNED);
127 	SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list);
128 }
129 
130 void
131 dev_unlock(void)
132 {
133 
134 	mtx_unlock(&devmtx);
135 }
136 
137 void
138 dev_ref(struct cdev *dev)
139 {
140 
141 	mtx_assert(&devmtx, MA_NOTOWNED);
142 	mtx_lock(&devmtx);
143 	dev->si_refcount++;
144 	mtx_unlock(&devmtx);
145 }
146 
147 void
148 dev_refl(struct cdev *dev)
149 {
150 
151 	mtx_assert(&devmtx, MA_OWNED);
152 	dev->si_refcount++;
153 }
154 
155 void
156 dev_rel(struct cdev *dev)
157 {
158 	int flag = 0;
159 
160 	mtx_assert(&devmtx, MA_NOTOWNED);
161 	dev_lock();
162 	dev->si_refcount--;
163 	KASSERT(dev->si_refcount >= 0,
164 	    ("dev_rel(%s) gave negative count", devtoname(dev)));
165 #if 0
166 	if (dev->si_usecount == 0 &&
167 	    (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED))
168 		;
169 	else
170 #endif
171 	if (dev->si_devsw == NULL && dev->si_refcount == 0) {
172 		LIST_REMOVE(dev, si_list);
173 		flag = 1;
174 	}
175 	dev_unlock();
176 	if (flag)
177 		devfs_free(dev);
178 }
179 
180 struct cdevsw *
181 dev_refthread(struct cdev *dev)
182 {
183 	struct cdevsw *csw;
184 	struct cdev_priv *cdp;
185 
186 	mtx_assert(&devmtx, MA_NOTOWNED);
187 	dev_lock();
188 	csw = dev->si_devsw;
189 	if (csw != NULL) {
190 		cdp = cdev2priv(dev);
191 		if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0)
192 			dev->si_threadcount++;
193 		else
194 			csw = NULL;
195 	}
196 	dev_unlock();
197 	return (csw);
198 }
199 
200 struct cdevsw *
201 devvn_refthread(struct vnode *vp, struct cdev **devp)
202 {
203 	struct cdevsw *csw;
204 	struct cdev_priv *cdp;
205 
206 	mtx_assert(&devmtx, MA_NOTOWNED);
207 	csw = NULL;
208 	dev_lock();
209 	*devp = vp->v_rdev;
210 	if (*devp != NULL) {
211 		cdp = cdev2priv(*devp);
212 		if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) {
213 			csw = (*devp)->si_devsw;
214 			if (csw != NULL)
215 				(*devp)->si_threadcount++;
216 		}
217 	}
218 	dev_unlock();
219 	return (csw);
220 }
221 
222 void
223 dev_relthread(struct cdev *dev)
224 {
225 
226 	mtx_assert(&devmtx, MA_NOTOWNED);
227 	dev_lock();
228 	KASSERT(dev->si_threadcount > 0,
229 	    ("%s threadcount is wrong", dev->si_name));
230 	dev->si_threadcount--;
231 	dev_unlock();
232 }
233 
234 int
235 nullop(void)
236 {
237 
238 	return (0);
239 }
240 
241 int
242 eopnotsupp(void)
243 {
244 
245 	return (EOPNOTSUPP);
246 }
247 
248 static int
249 enxio(void)
250 {
251 	return (ENXIO);
252 }
253 
254 static int
255 enodev(void)
256 {
257 	return (ENODEV);
258 }
259 
260 /* Define a dead_cdevsw for use when devices leave unexpectedly. */
261 
262 #define dead_open	(d_open_t *)enxio
263 #define dead_close	(d_close_t *)enxio
264 #define dead_read	(d_read_t *)enxio
265 #define dead_write	(d_write_t *)enxio
266 #define dead_ioctl	(d_ioctl_t *)enxio
267 #define dead_poll	(d_poll_t *)enodev
268 #define dead_mmap	(d_mmap_t *)enodev
269 
270 static void
271 dead_strategy(struct bio *bp)
272 {
273 
274 	biofinish(bp, NULL, ENXIO);
275 }
276 
277 #define dead_dump	(dumper_t *)enxio
278 #define dead_kqfilter	(d_kqfilter_t *)enxio
279 #define dead_mmap_single (d_mmap_single_t *)enodev
280 
281 static struct cdevsw dead_cdevsw = {
282 	.d_version =	D_VERSION,
283 	.d_flags =	D_NEEDGIANT, /* XXX: does dead_strategy need this ? */
284 	.d_open =	dead_open,
285 	.d_close =	dead_close,
286 	.d_read =	dead_read,
287 	.d_write =	dead_write,
288 	.d_ioctl =	dead_ioctl,
289 	.d_poll =	dead_poll,
290 	.d_mmap =	dead_mmap,
291 	.d_strategy =	dead_strategy,
292 	.d_name =	"dead",
293 	.d_dump =	dead_dump,
294 	.d_kqfilter =	dead_kqfilter,
295 	.d_mmap_single = dead_mmap_single
296 };
297 
298 /* Default methods if driver does not specify method */
299 
300 #define null_open	(d_open_t *)nullop
301 #define null_close	(d_close_t *)nullop
302 #define no_read		(d_read_t *)enodev
303 #define no_write	(d_write_t *)enodev
304 #define no_ioctl	(d_ioctl_t *)enodev
305 #define no_mmap		(d_mmap2_t *)enodev
306 #define no_kqfilter	(d_kqfilter_t *)enodev
307 #define no_mmap_single	(d_mmap_single_t *)enodev
308 
309 static void
310 no_strategy(struct bio *bp)
311 {
312 
313 	biofinish(bp, NULL, ENODEV);
314 }
315 
316 static int
317 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
318 {
319 
320 	return (poll_no_poll(events));
321 }
322 
323 #define no_dump		(dumper_t *)enodev
324 
325 static int
326 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
327 {
328 	struct cdevsw *dsw;
329 	int retval;
330 
331 	dsw = dev_refthread(dev);
332 	if (dsw == NULL)
333 		return (ENXIO);
334 	mtx_lock(&Giant);
335 	retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td);
336 	mtx_unlock(&Giant);
337 	dev_relthread(dev);
338 	return (retval);
339 }
340 
341 static int
342 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp)
343 {
344 	struct cdevsw *dsw;
345 	int retval;
346 
347 	dsw = dev_refthread(dev);
348 	if (dsw == NULL)
349 		return (ENXIO);
350 	mtx_lock(&Giant);
351 	retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp);
352 	mtx_unlock(&Giant);
353 	dev_relthread(dev);
354 	return (retval);
355 }
356 
357 static int
358 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
359 {
360 	struct cdevsw *dsw;
361 	int retval;
362 
363 	dsw = dev_refthread(dev);
364 	if (dsw == NULL)
365 		return (ENXIO);
366 	mtx_lock(&Giant);
367 	retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td);
368 	mtx_unlock(&Giant);
369 	dev_relthread(dev);
370 	return (retval);
371 }
372 
373 static void
374 giant_strategy(struct bio *bp)
375 {
376 	struct cdevsw *dsw;
377 	struct cdev *dev;
378 
379 	dev = bp->bio_dev;
380 	dsw = dev_refthread(dev);
381 	if (dsw == NULL) {
382 		biofinish(bp, NULL, ENXIO);
383 		return;
384 	}
385 	mtx_lock(&Giant);
386 	dsw->d_gianttrick->d_strategy(bp);
387 	mtx_unlock(&Giant);
388 	dev_relthread(dev);
389 }
390 
391 static int
392 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
393 {
394 	struct cdevsw *dsw;
395 	int retval;
396 
397 	dsw = dev_refthread(dev);
398 	if (dsw == NULL)
399 		return (ENXIO);
400 	mtx_lock(&Giant);
401 	retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td);
402 	mtx_unlock(&Giant);
403 	dev_relthread(dev);
404 	return (retval);
405 }
406 
407 static int
408 giant_read(struct cdev *dev, struct uio *uio, int ioflag)
409 {
410 	struct cdevsw *dsw;
411 	int retval;
412 
413 	dsw = dev_refthread(dev);
414 	if (dsw == NULL)
415 		return (ENXIO);
416 	mtx_lock(&Giant);
417 	retval = dsw->d_gianttrick->d_read(dev, uio, ioflag);
418 	mtx_unlock(&Giant);
419 	dev_relthread(dev);
420 	return (retval);
421 }
422 
423 static int
424 giant_write(struct cdev *dev, struct uio *uio, int ioflag)
425 {
426 	struct cdevsw *dsw;
427 	int retval;
428 
429 	dsw = dev_refthread(dev);
430 	if (dsw == NULL)
431 		return (ENXIO);
432 	mtx_lock(&Giant);
433 	retval = dsw->d_gianttrick->d_write(dev, uio, ioflag);
434 	mtx_unlock(&Giant);
435 	dev_relthread(dev);
436 	return (retval);
437 }
438 
439 static int
440 giant_poll(struct cdev *dev, int events, struct thread *td)
441 {
442 	struct cdevsw *dsw;
443 	int retval;
444 
445 	dsw = dev_refthread(dev);
446 	if (dsw == NULL)
447 		return (ENXIO);
448 	mtx_lock(&Giant);
449 	retval = dsw->d_gianttrick->d_poll(dev, events, td);
450 	mtx_unlock(&Giant);
451 	dev_relthread(dev);
452 	return (retval);
453 }
454 
455 static int
456 giant_kqfilter(struct cdev *dev, struct knote *kn)
457 {
458 	struct cdevsw *dsw;
459 	int retval;
460 
461 	dsw = dev_refthread(dev);
462 	if (dsw == NULL)
463 		return (ENXIO);
464 	mtx_lock(&Giant);
465 	retval = dsw->d_gianttrick->d_kqfilter(dev, kn);
466 	mtx_unlock(&Giant);
467 	dev_relthread(dev);
468 	return (retval);
469 }
470 
471 static int
472 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot,
473     vm_memattr_t *memattr)
474 {
475 	struct cdevsw *dsw;
476 	int retval;
477 
478 	dsw = dev_refthread(dev);
479 	if (dsw == NULL)
480 		return (ENXIO);
481 	mtx_lock(&Giant);
482 	if (dsw->d_gianttrick->d_flags & D_MMAP2)
483 		retval = dsw->d_gianttrick->d_mmap2(dev, offset, paddr, nprot,
484 		    memattr);
485 	else
486 		retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot);
487 	mtx_unlock(&Giant);
488 	dev_relthread(dev);
489 	return (retval);
490 }
491 
492 static int
493 giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
494     vm_object_t *object, int nprot)
495 {
496 	struct cdevsw *dsw;
497 	int retval;
498 
499 	dsw = dev_refthread(dev);
500 	if (dsw == NULL)
501 		return (ENXIO);
502 	mtx_lock(&Giant);
503 	retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object,
504 	    nprot);
505 	mtx_unlock(&Giant);
506 	dev_relthread(dev);
507 	return (retval);
508 }
509 
510 static void
511 notify(struct cdev *dev, const char *ev)
512 {
513 	static const char prefix[] = "cdev=";
514 	char *data;
515 	int namelen;
516 
517 	if (cold)
518 		return;
519 	namelen = strlen(dev->si_name);
520 	data = malloc(namelen + sizeof(prefix), M_TEMP, M_NOWAIT);
521 	if (data == NULL)
522 		return;
523 	memcpy(data, prefix, sizeof(prefix) - 1);
524 	memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1);
525 	devctl_notify("DEVFS", "CDEV", ev, data);
526 	free(data, M_TEMP);
527 }
528 
529 static void
530 notify_create(struct cdev *dev)
531 {
532 
533 	notify(dev, "CREATE");
534 }
535 
536 static void
537 notify_destroy(struct cdev *dev)
538 {
539 
540 	notify(dev, "DESTROY");
541 }
542 
543 static struct cdev *
544 newdev(struct cdevsw *csw, int unit, struct cdev *si)
545 {
546 	struct cdev *si2;
547 
548 	mtx_assert(&devmtx, MA_OWNED);
549 	if (csw->d_flags & D_NEEDMINOR) {
550 		/* We may want to return an existing device */
551 		LIST_FOREACH(si2, &csw->d_devs, si_list) {
552 			if (dev2unit(si2) == unit) {
553 				dev_free_devlocked(si);
554 				return (si2);
555 			}
556 		}
557 	}
558 	si->si_drv0 = unit;
559 	si->si_devsw = csw;
560 	LIST_INSERT_HEAD(&csw->d_devs, si, si_list);
561 	return (si);
562 }
563 
564 static void
565 fini_cdevsw(struct cdevsw *devsw)
566 {
567 	struct cdevsw *gt;
568 
569 	if (devsw->d_gianttrick != NULL) {
570 		gt = devsw->d_gianttrick;
571 		memcpy(devsw, gt, sizeof *devsw);
572 		cdevsw_free_devlocked(gt);
573 		devsw->d_gianttrick = NULL;
574 	}
575 	devsw->d_flags &= ~D_INIT;
576 }
577 
578 static void
579 prep_cdevsw(struct cdevsw *devsw)
580 {
581 	struct cdevsw *dsw2;
582 
583 	mtx_assert(&devmtx, MA_OWNED);
584 	if (devsw->d_flags & D_INIT)
585 		return;
586 	if (devsw->d_flags & D_NEEDGIANT) {
587 		dev_unlock();
588 		dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK);
589 		dev_lock();
590 	} else
591 		dsw2 = NULL;
592 	if (devsw->d_flags & D_INIT) {
593 		if (dsw2 != NULL)
594 			cdevsw_free_devlocked(dsw2);
595 		return;
596 	}
597 
598 	if (devsw->d_version != D_VERSION_01 &&
599 	    devsw->d_version != D_VERSION_02) {
600 		printf(
601 		    "WARNING: Device driver \"%s\" has wrong version %s\n",
602 		    devsw->d_name == NULL ? "???" : devsw->d_name,
603 		    "and is disabled.  Recompile KLD module.");
604 		devsw->d_open = dead_open;
605 		devsw->d_close = dead_close;
606 		devsw->d_read = dead_read;
607 		devsw->d_write = dead_write;
608 		devsw->d_ioctl = dead_ioctl;
609 		devsw->d_poll = dead_poll;
610 		devsw->d_mmap = dead_mmap;
611 		devsw->d_strategy = dead_strategy;
612 		devsw->d_dump = dead_dump;
613 		devsw->d_kqfilter = dead_kqfilter;
614 	}
615 	if (devsw->d_version == D_VERSION_01)
616 		devsw->d_mmap_single = NULL;
617 
618 	if (devsw->d_flags & D_NEEDGIANT) {
619 		if (devsw->d_gianttrick == NULL) {
620 			memcpy(dsw2, devsw, sizeof *dsw2);
621 			devsw->d_gianttrick = dsw2;
622 			devsw->d_flags |= D_MMAP2;
623 			dsw2 = NULL;
624 		}
625 	}
626 
627 #define FIXUP(member, noop, giant) 				\
628 	do {							\
629 		if (devsw->member == NULL) {			\
630 			devsw->member = noop;			\
631 		} else if (devsw->d_flags & D_NEEDGIANT)	\
632 			devsw->member = giant;			\
633 		}						\
634 	while (0)
635 
636 	FIXUP(d_open,		null_open,	giant_open);
637 	FIXUP(d_fdopen,		NULL,		giant_fdopen);
638 	FIXUP(d_close,		null_close,	giant_close);
639 	FIXUP(d_read,		no_read,	giant_read);
640 	FIXUP(d_write,		no_write,	giant_write);
641 	FIXUP(d_ioctl,		no_ioctl,	giant_ioctl);
642 	FIXUP(d_poll,		no_poll,	giant_poll);
643 	FIXUP(d_mmap2,		no_mmap,	giant_mmap);
644 	FIXUP(d_strategy,	no_strategy,	giant_strategy);
645 	FIXUP(d_kqfilter,	no_kqfilter,	giant_kqfilter);
646 	FIXUP(d_mmap_single,	no_mmap_single,	giant_mmap_single);
647 
648 	if (devsw->d_dump == NULL)	devsw->d_dump = no_dump;
649 
650 	LIST_INIT(&devsw->d_devs);
651 
652 	devsw->d_flags |= D_INIT;
653 
654 	if (dsw2 != NULL)
655 		cdevsw_free_devlocked(dsw2);
656 }
657 
658 struct cdev *
659 make_dev_credv(int flags, struct cdevsw *devsw, int unit,
660     struct ucred *cr, uid_t uid,
661     gid_t gid, int mode, const char *fmt, va_list ap)
662 {
663 	struct cdev *dev;
664 	int i;
665 
666 	dev = devfs_alloc();
667 	dev_lock();
668 	prep_cdevsw(devsw);
669 	dev = newdev(devsw, unit, dev);
670 	if (flags & MAKEDEV_REF)
671 		dev_refl(dev);
672 	if (dev->si_flags & SI_CHEAPCLONE &&
673 	    dev->si_flags & SI_NAMED) {
674 		/*
675 		 * This is allowed as it removes races and generally
676 		 * simplifies cloning devices.
677 		 * XXX: still ??
678 		 */
679 		dev_unlock_and_free();
680 		return (dev);
681 	}
682 	KASSERT(!(dev->si_flags & SI_NAMED),
683 	    ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)",
684 	    devsw->d_name, dev2unit(dev), devtoname(dev)));
685 
686 	i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
687 	if (i > (sizeof dev->__si_namebuf - 1)) {
688 		printf("WARNING: Device name truncated! (%s)\n",
689 		    dev->__si_namebuf);
690 	}
691 
692 	dev->si_flags |= SI_NAMED;
693 	if (cr != NULL)
694 		dev->si_cred = crhold(cr);
695 	else
696 		dev->si_cred = NULL;
697 	dev->si_uid = uid;
698 	dev->si_gid = gid;
699 	dev->si_mode = mode;
700 
701 	devfs_create(dev);
702 	clean_unrhdrl(devfs_inos);
703 	dev_unlock_and_free();
704 
705 	notify_create(dev);
706 
707 	return (dev);
708 }
709 
710 struct cdev *
711 make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode,
712     const char *fmt, ...)
713 {
714 	struct cdev *dev;
715 	va_list ap;
716 
717 	va_start(ap, fmt);
718 	dev = make_dev_credv(0, devsw, unit, NULL, uid, gid, mode, fmt, ap);
719 	va_end(ap);
720 	return (dev);
721 }
722 
723 struct cdev *
724 make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid,
725     gid_t gid, int mode, const char *fmt, ...)
726 {
727 	struct cdev *dev;
728 	va_list ap;
729 
730 	va_start(ap, fmt);
731 	dev = make_dev_credv(0, devsw, unit, cr, uid, gid, mode, fmt, ap);
732 	va_end(ap);
733 
734 	return (dev);
735 }
736 
737 struct cdev *
738 make_dev_credf(int flags, struct cdevsw *devsw, int unit,
739     struct ucred *cr, uid_t uid,
740     gid_t gid, int mode, const char *fmt, ...)
741 {
742 	struct cdev *dev;
743 	va_list ap;
744 
745 	va_start(ap, fmt);
746 	dev = make_dev_credv(flags, devsw, unit, cr, uid, gid, mode,
747 	    fmt, ap);
748 	va_end(ap);
749 
750 	return (dev);
751 }
752 
753 static void
754 dev_dependsl(struct cdev *pdev, struct cdev *cdev)
755 {
756 
757 	cdev->si_parent = pdev;
758 	cdev->si_flags |= SI_CHILD;
759 	LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings);
760 }
761 
762 
763 void
764 dev_depends(struct cdev *pdev, struct cdev *cdev)
765 {
766 
767 	dev_lock();
768 	dev_dependsl(pdev, cdev);
769 	dev_unlock();
770 }
771 
772 struct cdev *
773 make_dev_alias(struct cdev *pdev, const char *fmt, ...)
774 {
775 	struct cdev *dev;
776 	va_list ap;
777 	int i;
778 
779 	KASSERT(pdev != NULL, ("NULL pdev"));
780 	dev = devfs_alloc();
781 	dev_lock();
782 	dev->si_flags |= SI_ALIAS;
783 	dev->si_flags |= SI_NAMED;
784 	va_start(ap, fmt);
785 	i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
786 	if (i > (sizeof dev->__si_namebuf - 1)) {
787 		printf("WARNING: Device name truncated! (%s)\n",
788 		    dev->__si_namebuf);
789 	}
790 	va_end(ap);
791 
792 	devfs_create(dev);
793 	dev_dependsl(pdev, dev);
794 	clean_unrhdrl(devfs_inos);
795 	dev_unlock();
796 
797 	notify_create(dev);
798 
799 	return (dev);
800 }
801 
802 static void
803 destroy_devl(struct cdev *dev)
804 {
805 	struct cdevsw *csw;
806 	struct cdev_privdata *p, *p1;
807 
808 	mtx_assert(&devmtx, MA_OWNED);
809 	KASSERT(dev->si_flags & SI_NAMED,
810 	    ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev)));
811 
812 	devfs_destroy(dev);
813 
814 	/* Remove name marking */
815 	dev->si_flags &= ~SI_NAMED;
816 
817 	/* If we are a child, remove us from the parents list */
818 	if (dev->si_flags & SI_CHILD) {
819 		LIST_REMOVE(dev, si_siblings);
820 		dev->si_flags &= ~SI_CHILD;
821 	}
822 
823 	/* Kill our children */
824 	while (!LIST_EMPTY(&dev->si_children))
825 		destroy_devl(LIST_FIRST(&dev->si_children));
826 
827 	/* Remove from clone list */
828 	if (dev->si_flags & SI_CLONELIST) {
829 		LIST_REMOVE(dev, si_clone);
830 		dev->si_flags &= ~SI_CLONELIST;
831 	}
832 
833 	dev->si_refcount++;	/* Avoid race with dev_rel() */
834 	csw = dev->si_devsw;
835 	dev->si_devsw = NULL;	/* already NULL for SI_ALIAS */
836 	while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) {
837 		csw->d_purge(dev);
838 		msleep(csw, &devmtx, PRIBIO, "devprg", hz/10);
839 		if (dev->si_threadcount)
840 			printf("Still %lu threads in %s\n",
841 			    dev->si_threadcount, devtoname(dev));
842 	}
843 	while (dev->si_threadcount != 0) {
844 		/* Use unique dummy wait ident */
845 		msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10);
846 	}
847 
848 	dev_unlock();
849 	notify_destroy(dev);
850 	mtx_lock(&cdevpriv_mtx);
851 	LIST_FOREACH_SAFE(p, &cdev2priv(dev)->cdp_fdpriv, cdpd_list, p1) {
852 		devfs_destroy_cdevpriv(p);
853 		mtx_lock(&cdevpriv_mtx);
854 	}
855 	mtx_unlock(&cdevpriv_mtx);
856 	dev_lock();
857 
858 	dev->si_drv1 = 0;
859 	dev->si_drv2 = 0;
860 	bzero(&dev->__si_u, sizeof(dev->__si_u));
861 
862 	if (!(dev->si_flags & SI_ALIAS)) {
863 		/* Remove from cdevsw list */
864 		LIST_REMOVE(dev, si_list);
865 
866 		/* If cdevsw has no more struct cdev *'s, clean it */
867 		if (LIST_EMPTY(&csw->d_devs)) {
868 			fini_cdevsw(csw);
869 			wakeup(&csw->d_devs);
870 		}
871 	}
872 	dev->si_flags &= ~SI_ALIAS;
873 	dev->si_refcount--;	/* Avoid race with dev_rel() */
874 
875 	if (dev->si_refcount > 0) {
876 		LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list);
877 	} else {
878 		dev_free_devlocked(dev);
879 	}
880 }
881 
882 void
883 destroy_dev(struct cdev *dev)
884 {
885 
886 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev");
887 	dev_lock();
888 	destroy_devl(dev);
889 	dev_unlock_and_free();
890 }
891 
892 const char *
893 devtoname(struct cdev *dev)
894 {
895 
896 	return (dev->si_name);
897 }
898 
899 int
900 dev_stdclone(char *name, char **namep, const char *stem, int *unit)
901 {
902 	int u, i;
903 
904 	i = strlen(stem);
905 	if (bcmp(stem, name, i) != 0)
906 		return (0);
907 	if (!isdigit(name[i]))
908 		return (0);
909 	u = 0;
910 	if (name[i] == '0' && isdigit(name[i+1]))
911 		return (0);
912 	while (isdigit(name[i])) {
913 		u *= 10;
914 		u += name[i++] - '0';
915 	}
916 	if (u > 0xffffff)
917 		return (0);
918 	*unit = u;
919 	if (namep)
920 		*namep = &name[i];
921 	if (name[i])
922 		return (2);
923 	return (1);
924 }
925 
926 /*
927  * Helper functions for cloning device drivers.
928  *
929  * The objective here is to make it unnecessary for the device drivers to
930  * use rman or similar to manage their unit number space.  Due to the way
931  * we do "on-demand" devices, using rman or other "private" methods
932  * will be very tricky to lock down properly once we lock down this file.
933  *
934  * Instead we give the drivers these routines which puts the struct cdev *'s
935  * that are to be managed on their own list, and gives the driver the ability
936  * to ask for the first free unit number or a given specified unit number.
937  *
938  * In addition these routines support paired devices (pty, nmdm and similar)
939  * by respecting a number of "flag" bits in the minor number.
940  *
941  */
942 
943 struct clonedevs {
944 	LIST_HEAD(,cdev)	head;
945 };
946 
947 void
948 clone_setup(struct clonedevs **cdp)
949 {
950 
951 	*cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO);
952 	LIST_INIT(&(*cdp)->head);
953 }
954 
955 int
956 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra)
957 {
958 	struct clonedevs *cd;
959 	struct cdev *dev, *ndev, *dl, *de;
960 	int unit, low, u;
961 
962 	KASSERT(*cdp != NULL,
963 	    ("clone_setup() not called in driver \"%s\"", csw->d_name));
964 	KASSERT(!(extra & CLONE_UNITMASK),
965 	    ("Illegal extra bits (0x%x) in clone_create", extra));
966 	KASSERT(*up <= CLONE_UNITMASK,
967 	    ("Too high unit (0x%x) in clone_create", *up));
968 	KASSERT(csw->d_flags & D_NEEDMINOR,
969 	    ("clone_create() on cdevsw without minor numbers"));
970 
971 
972 	/*
973 	 * Search the list for a lot of things in one go:
974 	 *   A preexisting match is returned immediately.
975 	 *   The lowest free unit number if we are passed -1, and the place
976 	 *	 in the list where we should insert that new element.
977 	 *   The place to insert a specified unit number, if applicable
978 	 *       the end of the list.
979 	 */
980 	unit = *up;
981 	ndev = devfs_alloc();
982 	dev_lock();
983 	prep_cdevsw(csw);
984 	low = extra;
985 	de = dl = NULL;
986 	cd = *cdp;
987 	LIST_FOREACH(dev, &cd->head, si_clone) {
988 		KASSERT(dev->si_flags & SI_CLONELIST,
989 		    ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
990 		u = dev2unit(dev);
991 		if (u == (unit | extra)) {
992 			*dp = dev;
993 			dev_unlock();
994 			devfs_free(ndev);
995 			return (0);
996 		}
997 		if (unit == -1 && u == low) {
998 			low++;
999 			de = dev;
1000 			continue;
1001 		} else if (u < (unit | extra)) {
1002 			de = dev;
1003 			continue;
1004 		} else if (u > (unit | extra)) {
1005 			dl = dev;
1006 			break;
1007 		}
1008 	}
1009 	if (unit == -1)
1010 		unit = low & CLONE_UNITMASK;
1011 	dev = newdev(csw, unit | extra, ndev);
1012 	if (dev->si_flags & SI_CLONELIST) {
1013 		printf("dev %p (%s) is on clonelist\n", dev, dev->si_name);
1014 		printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra);
1015 		LIST_FOREACH(dev, &cd->head, si_clone) {
1016 			printf("\t%p %s\n", dev, dev->si_name);
1017 		}
1018 		panic("foo");
1019 	}
1020 	KASSERT(!(dev->si_flags & SI_CLONELIST),
1021 	    ("Dev %p(%s) should not be on clonelist", dev, dev->si_name));
1022 	if (dl != NULL)
1023 		LIST_INSERT_BEFORE(dl, dev, si_clone);
1024 	else if (de != NULL)
1025 		LIST_INSERT_AFTER(de, dev, si_clone);
1026 	else
1027 		LIST_INSERT_HEAD(&cd->head, dev, si_clone);
1028 	dev->si_flags |= SI_CLONELIST;
1029 	*up = unit;
1030 	dev_unlock_and_free();
1031 	return (1);
1032 }
1033 
1034 /*
1035  * Kill everything still on the list.  The driver should already have
1036  * disposed of any softc hung of the struct cdev *'s at this time.
1037  */
1038 void
1039 clone_cleanup(struct clonedevs **cdp)
1040 {
1041 	struct cdev *dev;
1042 	struct cdev_priv *cp;
1043 	struct clonedevs *cd;
1044 
1045 	cd = *cdp;
1046 	if (cd == NULL)
1047 		return;
1048 	dev_lock();
1049 	while (!LIST_EMPTY(&cd->head)) {
1050 		dev = LIST_FIRST(&cd->head);
1051 		LIST_REMOVE(dev, si_clone);
1052 		KASSERT(dev->si_flags & SI_CLONELIST,
1053 		    ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
1054 		dev->si_flags &= ~SI_CLONELIST;
1055 		cp = cdev2priv(dev);
1056 		if (!(cp->cdp_flags & CDP_SCHED_DTR)) {
1057 			cp->cdp_flags |= CDP_SCHED_DTR;
1058 			KASSERT(dev->si_flags & SI_NAMED,
1059 				("Driver has goofed in cloning underways udev %x unit %x", dev2udev(dev), dev2unit(dev)));
1060 			destroy_devl(dev);
1061 		}
1062 	}
1063 	dev_unlock_and_free();
1064 	free(cd, M_DEVBUF);
1065 	*cdp = NULL;
1066 }
1067 
1068 static TAILQ_HEAD(, cdev_priv) dev_ddtr =
1069 	TAILQ_HEAD_INITIALIZER(dev_ddtr);
1070 static struct task dev_dtr_task;
1071 
1072 static void
1073 destroy_dev_tq(void *ctx, int pending)
1074 {
1075 	struct cdev_priv *cp;
1076 	struct cdev *dev;
1077 	void (*cb)(void *);
1078 	void *cb_arg;
1079 
1080 	dev_lock();
1081 	while (!TAILQ_EMPTY(&dev_ddtr)) {
1082 		cp = TAILQ_FIRST(&dev_ddtr);
1083 		dev = &cp->cdp_c;
1084 		KASSERT(cp->cdp_flags & CDP_SCHED_DTR,
1085 		    ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp));
1086 		TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list);
1087 		cb = cp->cdp_dtr_cb;
1088 		cb_arg = cp->cdp_dtr_cb_arg;
1089 		destroy_devl(dev);
1090 		dev_unlock_and_free();
1091 		dev_rel(dev);
1092 		if (cb != NULL)
1093 			cb(cb_arg);
1094 		dev_lock();
1095 	}
1096 	dev_unlock();
1097 }
1098 
1099 /*
1100  * devmtx shall be locked on entry. devmtx will be unlocked after
1101  * function return.
1102  */
1103 static int
1104 destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg)
1105 {
1106 	struct cdev_priv *cp;
1107 
1108 	mtx_assert(&devmtx, MA_OWNED);
1109 	cp = cdev2priv(dev);
1110 	if (cp->cdp_flags & CDP_SCHED_DTR) {
1111 		dev_unlock();
1112 		return (0);
1113 	}
1114 	dev_refl(dev);
1115 	cp->cdp_flags |= CDP_SCHED_DTR;
1116 	cp->cdp_dtr_cb = cb;
1117 	cp->cdp_dtr_cb_arg = arg;
1118 	TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list);
1119 	dev_unlock();
1120 	taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task);
1121 	return (1);
1122 }
1123 
1124 int
1125 destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg)
1126 {
1127 	dev_lock();
1128 	return (destroy_dev_sched_cbl(dev, cb, arg));
1129 }
1130 
1131 int
1132 destroy_dev_sched(struct cdev *dev)
1133 {
1134 	return (destroy_dev_sched_cb(dev, NULL, NULL));
1135 }
1136 
1137 void
1138 destroy_dev_drain(struct cdevsw *csw)
1139 {
1140 
1141 	dev_lock();
1142 	while (!LIST_EMPTY(&csw->d_devs)) {
1143 		msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10);
1144 	}
1145 	dev_unlock();
1146 }
1147 
1148 void
1149 drain_dev_clone_events(void)
1150 {
1151 
1152 	sx_xlock(&clone_drain_lock);
1153 	sx_xunlock(&clone_drain_lock);
1154 }
1155 
1156 static void
1157 devdtr_init(void *dummy __unused)
1158 {
1159 
1160 	TASK_INIT(&dev_dtr_task, 0, destroy_dev_tq, NULL);
1161 }
1162 
1163 SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL);
1164