xref: /freebsd/sys/fs/devfs/devfs_rule.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002 Dima Dorfman.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 /*
32  * DEVFS ruleset implementation.
33  *
34  * A note on terminology: To "run" a rule on a dirent is to take the
35  * prescribed action; to "apply" a rule is to check whether it matches
36  * a dirent and run if if it does.
37  *
38  * A note on locking: Only foreign entry points (non-static functions)
39  * should deal with locking.  Everything else assumes we already hold
40  * the required kind of lock.
41  *
42  * A note on namespace: devfs_rules_* are the non-static functions for
43  * the entire "ruleset" subsystem, devfs_rule_* are the static
44  * functions that operate on rules, and devfs_ruleset_* are the static
45  * functions that operate on rulesets.  The line between the last two
46  * isn't always clear, but the guideline is still useful.
47  *
48  * A note on "special" identifiers: Ruleset 0 is the NULL, or empty,
49  * ruleset; it cannot be deleted or changed in any way.  This may be
50  * assumed inside the code; e.g., a ruleset of 0 may be interpeted to
51  * mean "no ruleset".  The interpretation of rule 0 is
52  * command-dependent, but in no case is there a real rule with number
53  * 0.
54  *
55  * A note on errno codes: To make it easier for the userland to tell
56  * what went wrong, we sometimes use errno codes that are not entirely
57  * appropriate for the error but that would be less ambiguous than the
58  * appropriate "generic" code.  For example, when we can't find a
59  * ruleset, we return ESRCH instead of ENOENT (except in
60  * DEVFSIO_{R,S}GETNEXT, where a nonexistent ruleset means "end of
61  * list", and the userland expects ENOENT to be this indicator); this
62  * way, when an operation fails, it's clear that what couldn't be
63  * found is a ruleset and not a rule (well, it's clear to those who
64  * know the convention).
65  */
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/conf.h>
70 #include <sys/kernel.h>
71 #include <sys/malloc.h>
72 #include <sys/priv.h>
73 #include <sys/dirent.h>
74 #include <sys/ioccom.h>
75 #include <sys/lock.h>
76 #include <sys/sx.h>
77 
78 #include <fs/devfs/devfs.h>
79 #include <fs/devfs/devfs_int.h>
80 
81 /*
82  * Kernel version of devfs_rule.
83  */
84 struct devfs_krule {
85 	TAILQ_ENTRY(devfs_krule)	dk_list;
86 	struct devfs_ruleset		*dk_ruleset;
87 	struct devfs_rule		dk_rule;
88 };
89 
90 TAILQ_HEAD(rulehead, devfs_krule);
91 static MALLOC_DEFINE(M_DEVFSRULE, "DEVFS_RULE", "DEVFS rule storage");
92 
93 /*
94  * Structure to describe a ruleset.
95  */
96 struct devfs_ruleset {
97 	TAILQ_ENTRY(devfs_ruleset)	ds_list;
98 	struct rulehead			ds_rules;
99 	devfs_rsnum			ds_number;
100 	int				ds_refcount;
101 };
102 
103 static devfs_rid devfs_rid_input(devfs_rid rid, struct devfs_mount *dm);
104 
105 static void devfs_rule_applyde_recursive(struct devfs_krule *dk,
106 		struct devfs_mount *dm, struct devfs_dirent *de);
107 static void devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm);
108 static int  devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnp);
109 static struct devfs_krule *devfs_rule_byid(devfs_rid rid);
110 static int  devfs_rule_delete(struct devfs_krule *dkp);
111 static struct cdev *devfs_rule_getdev(struct devfs_dirent *de);
112 static int  devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm);
113 static int  devfs_rule_insert(struct devfs_rule *dr);
114 static int  devfs_rule_match(struct devfs_krule *dk, struct devfs_mount *dm,
115 		struct devfs_dirent *de);
116 static int  devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_mount *dm,
117 		struct devfs_dirent *de);
118 static void devfs_rule_run(struct devfs_krule *dk, struct devfs_mount *dm,
119 		struct devfs_dirent *de, unsigned depth);
120 
121 static void devfs_ruleset_applyde(struct devfs_ruleset *ds,
122 		struct devfs_mount *dm, struct devfs_dirent *de,
123 		unsigned depth);
124 static void devfs_ruleset_applydm(struct devfs_ruleset *ds,
125 		struct devfs_mount *dm);
126 static struct devfs_ruleset *devfs_ruleset_bynum(devfs_rsnum rsnum);
127 static struct devfs_ruleset *devfs_ruleset_create(devfs_rsnum rsnum);
128 static void devfs_ruleset_reap(struct devfs_ruleset *dsp);
129 static int  devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm);
130 
131 static struct sx sx_rules;
132 SX_SYSINIT(sx_rules, &sx_rules, "DEVFS ruleset lock");
133 
134 static TAILQ_HEAD(, devfs_ruleset) devfs_rulesets =
135     TAILQ_HEAD_INITIALIZER(devfs_rulesets);
136 
137 /*
138  * Called to apply the proper rules for 'de' before it can be
139  * exposed to the userland.  This should be called with an exclusive
140  * lock on dm in case we need to run anything.
141  */
142 void
143 devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de)
144 {
145 	struct devfs_ruleset *ds;
146 
147 	sx_assert(&dm->dm_lock, SX_XLOCKED);
148 
149 	if (dm->dm_ruleset == 0)
150 		return;
151 	sx_slock(&sx_rules);
152 	ds = devfs_ruleset_bynum(dm->dm_ruleset);
153 	KASSERT(ds != NULL, ("mount-point has NULL ruleset"));
154 	devfs_ruleset_applyde(ds, dm, de, devfs_rule_depth);
155 	sx_sunlock(&sx_rules);
156 }
157 
158 /*
159  * Rule subsystem ioctl hook.
160  */
161 int
162 devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td)
163 {
164 	struct devfs_ruleset *ds;
165 	struct devfs_krule *dk;
166 	struct devfs_rule *dr;
167 	devfs_rsnum rsnum;
168 	devfs_rnum rnum;
169 	devfs_rid rid;
170 	int error;
171 
172 	sx_assert(&dm->dm_lock, SX_XLOCKED);
173 
174 	/*
175 	 * XXX: This returns an error regardless of whether we actually
176 	 * support the cmd or not.
177 	 *
178 	 * We could make this privileges finer grained if desired.
179 	 */
180 	error = priv_check(td, PRIV_DEVFS_RULE);
181 	if (error)
182 		return (error);
183 
184 	sx_xlock(&sx_rules);
185 
186 	switch (cmd) {
187 	case DEVFSIO_RADD:
188 		dr = (struct devfs_rule *)data;
189 		error = devfs_rule_input(dr, dm);
190 		if (error != 0)
191 			break;
192 		dk = devfs_rule_byid(dr->dr_id);
193 		if (dk != NULL) {
194 			error = EEXIST;
195 			break;
196 		}
197 		if (rid2rsn(dr->dr_id) == 0) {
198 			error = EIO;
199 			break;
200 		}
201 		error = devfs_rule_insert(dr);
202 		break;
203 	case DEVFSIO_RAPPLY:
204 		dr = (struct devfs_rule *)data;
205 		error = devfs_rule_input(dr, dm);
206 		if (error != 0)
207 			break;
208 
209 		/*
210 		 * This is one of many possible hackish
211 		 * implementations.  The primary contender is an
212 		 * implementation where the rule we read in is
213 		 * temporarily inserted into some ruleset, perhaps
214 		 * with a hypothetical DRO_NOAUTO flag so that it
215 		 * doesn't get used where it isn't intended, and
216 		 * applied in the normal way.  This can be done in the
217 		 * userland (DEVFSIO_ADD, DEVFSIO_APPLYID,
218 		 * DEVFSIO_DEL) or in the kernel; either way it breaks
219 		 * some corner case assumptions in other parts of the
220 		 * code (not that this implementation doesn't do
221 		 * that).
222 		 */
223 		if (dr->dr_iacts & DRA_INCSET &&
224 		    devfs_ruleset_bynum(dr->dr_incset) == NULL) {
225 			error = ESRCH;
226 			break;
227 		}
228 		dk = malloc(sizeof(*dk), M_TEMP, M_WAITOK | M_ZERO);
229 		memcpy(&dk->dk_rule, dr, sizeof(*dr));
230 		devfs_rule_applydm(dk, dm);
231 		free(dk, M_TEMP);
232 		break;
233 	case DEVFSIO_RAPPLYID:
234 		rid = *(devfs_rid *)data;
235 		rid = devfs_rid_input(rid, dm);
236 		dk = devfs_rule_byid(rid);
237 		if (dk == NULL) {
238 			error = ENOENT;
239 			break;
240 		}
241 		devfs_rule_applydm(dk, dm);
242 		break;
243 	case DEVFSIO_RDEL:
244 		rid = *(devfs_rid *)data;
245 		rid = devfs_rid_input(rid, dm);
246 		dk = devfs_rule_byid(rid);
247 		if (dk == NULL) {
248 			error = ENOENT;
249 			break;
250 		}
251 		ds = dk->dk_ruleset;
252 		error = devfs_rule_delete(dk);
253 		break;
254 	case DEVFSIO_RGETNEXT:
255 		dr = (struct devfs_rule *)data;
256 		error = devfs_rule_input(dr, dm);
257 		if (error != 0)
258 			break;
259 		/*
260 		 * We can't use devfs_rule_byid() here since that
261 		 * requires the rule specified to exist, but we want
262 		 * getnext(N) to work whether there is a rule N or not
263 		 * (specifically, getnext(0) must work, but we should
264 		 * never have a rule 0 since the add command
265 		 * interprets 0 to mean "auto-number").
266 		 */
267 		ds = devfs_ruleset_bynum(rid2rsn(dr->dr_id));
268 		if (ds == NULL) {
269 			error = ENOENT;
270 			break;
271 		}
272 		rnum = rid2rn(dr->dr_id);
273 		TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
274 			if (rid2rn(dk->dk_rule.dr_id) > rnum)
275 				break;
276 		}
277 		if (dk == NULL) {
278 			error = ENOENT;
279 			break;
280 		}
281 		memcpy(dr, &dk->dk_rule, sizeof(*dr));
282 		break;
283 	case DEVFSIO_SUSE:
284 		rsnum = *(devfs_rsnum *)data;
285 		error = devfs_ruleset_use(rsnum, dm);
286 		break;
287 	case DEVFSIO_SAPPLY:
288 		rsnum = *(devfs_rsnum *)data;
289 		rsnum = rid2rsn(devfs_rid_input(mkrid(rsnum, 0), dm));
290 		ds = devfs_ruleset_bynum(rsnum);
291 		if (ds == NULL) {
292 			error = ESRCH;
293 			break;
294 		}
295 		devfs_ruleset_applydm(ds, dm);
296 		break;
297 	case DEVFSIO_SGETNEXT:
298 		rsnum = *(devfs_rsnum *)data;
299 		TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
300 			if (ds->ds_number > rsnum)
301 				break;
302 		}
303 		if (ds == NULL) {
304 			error = ENOENT;
305 			break;
306 		}
307 		*(devfs_rsnum *)data = ds->ds_number;
308 		break;
309 	default:
310 		error = ENOIOCTL;
311 		break;
312 	}
313 
314 	sx_xunlock(&sx_rules);
315 	return (error);
316 }
317 
318 /*
319  * Adjust the rule identifier to use the ruleset of dm if one isn't
320  * explicitly specified.
321  *
322  * Note that after this operation, rid2rsn(rid) might still be 0, and
323  * that's okay; ruleset 0 is a valid ruleset, but when it's read in
324  * from the userland, it means "current ruleset for this mount-point".
325  */
326 static devfs_rid
327 devfs_rid_input(devfs_rid rid, struct devfs_mount *dm)
328 {
329 
330 	if (rid2rsn(rid) == 0)
331 		return (mkrid(dm->dm_ruleset, rid2rn(rid)));
332 	else
333 		return (rid);
334 }
335 
336 /*
337  * Apply dk to de and everything under de.
338  *
339  * XXX: This method needs a function call for every nested
340  * subdirectory in a devfs mount.  If we plan to have many of these,
341  * we might eventually run out of kernel stack space.
342  * XXX: a linear search could be done through the cdev list instead.
343  */
344 static void
345 devfs_rule_applyde_recursive(struct devfs_krule *dk, struct devfs_mount *dm,
346     struct devfs_dirent *de)
347 {
348 	struct devfs_dirent *de2;
349 
350 	TAILQ_FOREACH(de2, &de->de_dlist, de_list)
351 		devfs_rule_applyde_recursive(dk, dm, de2);
352 	devfs_rule_run(dk, dm, de, devfs_rule_depth);
353 }
354 
355 /*
356  * Apply dk to all entires in dm.
357  */
358 static void
359 devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm)
360 {
361 
362 	devfs_rule_applyde_recursive(dk, dm, dm->dm_rootdir);
363 }
364 
365 /*
366  * Automatically select a number for a new rule in ds, and write the
367  * result into rnump.
368  */
369 static int
370 devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnump)
371 {
372 	struct devfs_krule *dk;
373 
374 	/* Find the last rule. */
375 	dk = TAILQ_LAST(&ds->ds_rules, rulehead);
376 	if (dk == NULL)
377 		*rnump = 100;
378 	else {
379 		*rnump = rid2rn(dk->dk_rule.dr_id) + 100;
380 		/* Detect overflow. */
381 		if (*rnump < rid2rn(dk->dk_rule.dr_id))
382 			return (ERANGE);
383 	}
384 	KASSERT(devfs_rule_byid(mkrid(ds->ds_number, *rnump)) == NULL,
385 	    ("autonumbering resulted in an already existing rule"));
386 	return (0);
387 }
388 
389 /*
390  * Find a krule by id.
391  */
392 static struct devfs_krule *
393 devfs_rule_byid(devfs_rid rid)
394 {
395 	struct devfs_ruleset *ds;
396 	struct devfs_krule *dk;
397 	devfs_rnum rn;
398 
399 	rn = rid2rn(rid);
400 	ds = devfs_ruleset_bynum(rid2rsn(rid));
401 	if (ds == NULL)
402 		return (NULL);
403 	TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
404 		if (rid2rn(dk->dk_rule.dr_id) == rn)
405 			return (dk);
406 		else if (rid2rn(dk->dk_rule.dr_id) > rn)
407 			break;
408 	}
409 	return (NULL);
410 }
411 
412 /*
413  * Remove dkp from any lists it may be on and remove memory associated
414  * with it.
415  */
416 static int
417 devfs_rule_delete(struct devfs_krule *dk)
418 {
419 	struct devfs_ruleset *ds;
420 
421 	if (dk->dk_rule.dr_iacts & DRA_INCSET) {
422 		ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
423 		KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
424 		--ds->ds_refcount;
425 		devfs_ruleset_reap(ds);
426 	}
427 	ds = dk->dk_ruleset;
428 	TAILQ_REMOVE(&ds->ds_rules, dk, dk_list);
429 	devfs_ruleset_reap(ds);
430 	free(dk, M_DEVFSRULE);
431 	return (0);
432 }
433 
434 /*
435  * Get a struct cdev *corresponding to de so we can try to match rules based
436  * on it.  If this routine returns NULL, there is no struct cdev *associated
437  * with the dirent (symlinks and directories don't have dev_ts), and
438  * the caller should assume that any critera dependent on a dev_t
439  * don't match.
440  */
441 static struct cdev *
442 devfs_rule_getdev(struct devfs_dirent *de)
443 {
444 
445 	if (de->de_cdp == NULL)
446 		return (NULL);
447 	if (de->de_cdp->cdp_flags & CDP_ACTIVE)
448 		return (&de->de_cdp->cdp_c);
449 	else
450 		return (NULL);
451 }
452 
453 /*
454  * Do what we need to do to a rule that we just loaded from the
455  * userland.  In particular, we need to check the magic, and adjust
456  * the ruleset appropriate if desired.
457  */
458 static int
459 devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm)
460 {
461 
462 	if (dr->dr_magic != DEVFS_MAGIC)
463 		return (ERPCMISMATCH);
464 	dr->dr_id = devfs_rid_input(dr->dr_id, dm);
465 	return (0);
466 }
467 
468 /*
469  * Import dr into the appropriate place in the kernel (i.e., make a
470  * krule).  The value of dr is copied, so the pointer may be destroyed
471  * after this call completes.
472  */
473 static int
474 devfs_rule_insert(struct devfs_rule *dr)
475 {
476 	struct devfs_ruleset *ds, *dsi;
477 	struct devfs_krule *k1;
478 	struct devfs_krule *dk;
479 	devfs_rsnum rsnum;
480 	devfs_rnum dkrn;
481 	int error;
482 
483 	/*
484 	 * This stuff seems out of place here, but we want to do it as
485 	 * soon as possible so that if it fails, we don't have to roll
486 	 * back any changes we already made (e.g., ruleset creation).
487 	 */
488 	if (dr->dr_iacts & DRA_INCSET) {
489 		dsi = devfs_ruleset_bynum(dr->dr_incset);
490 		if (dsi == NULL)
491 			return (ESRCH);
492 	} else
493 		dsi = NULL;
494 
495 	rsnum = rid2rsn(dr->dr_id);
496 	KASSERT(rsnum != 0, ("Inserting into ruleset zero"));
497 
498 	ds = devfs_ruleset_bynum(rsnum);
499 	if (ds == NULL)
500 		ds = devfs_ruleset_create(rsnum);
501 	dkrn = rid2rn(dr->dr_id);
502 	if (dkrn == 0) {
503 		error = devfs_rule_autonumber(ds, &dkrn);
504 		if (error != 0) {
505 			devfs_ruleset_reap(ds);
506 			return (error);
507 		}
508 	}
509 
510 	dk = malloc(sizeof(*dk), M_DEVFSRULE, M_WAITOK | M_ZERO);
511 	dk->dk_ruleset = ds;
512 	if (dsi != NULL)
513 		++dsi->ds_refcount;
514 	/* XXX: Inspect dr? */
515 	memcpy(&dk->dk_rule, dr, sizeof(*dr));
516 	dk->dk_rule.dr_id = mkrid(rid2rsn(dk->dk_rule.dr_id), dkrn);
517 
518 	TAILQ_FOREACH(k1, &ds->ds_rules, dk_list) {
519 		if (rid2rn(k1->dk_rule.dr_id) > dkrn) {
520 			TAILQ_INSERT_BEFORE(k1, dk, dk_list);
521 			break;
522 		}
523 	}
524 	if (k1 == NULL)
525 		TAILQ_INSERT_TAIL(&ds->ds_rules, dk, dk_list);
526 	return (0);
527 }
528 
529 /*
530  * Determine whether dk matches de.  Returns 1 if dk should be run on
531  * de; 0, otherwise.
532  */
533 static int
534 devfs_rule_match(struct devfs_krule *dk, struct devfs_mount *dm,
535     struct devfs_dirent *de)
536 {
537 	struct devfs_rule *dr = &dk->dk_rule;
538 	struct cdev *dev;
539 	struct cdevsw *dsw;
540 	int ref;
541 
542 	dev = devfs_rule_getdev(de);
543 	/*
544 	 * At this point, if dev is NULL, we should assume that any
545 	 * criteria that depend on it don't match.  We should *not*
546 	 * just ignore them (i.e., act like they weren't specified),
547 	 * since that makes a rule that only has criteria dependent on
548 	 * the struct cdev *match all symlinks and directories.
549 	 *
550 	 * Note also that the following tests are somewhat reversed:
551 	 * They're actually testing to see whether the condition does
552 	 * *not* match, since the default is to assume the rule should
553 	 * be run (such as if there are no conditions).
554 	 */
555 	if (dr->dr_icond & DRC_DSWFLAGS) {
556 		if (dev == NULL)
557 			return (0);
558 		dsw = dev_refthread(dev, &ref);
559 		if (dsw == NULL)
560 			return (0);
561 		if ((dsw->d_flags & dr->dr_dswflags) == 0) {
562 			dev_relthread(dev, ref);
563 			return (0);
564 		}
565 		dev_relthread(dev, ref);
566 	}
567 	if (dr->dr_icond & DRC_PATHPTRN)
568 		if (!devfs_rule_matchpath(dk, dm, de))
569 			return (0);
570 
571 	return (1);
572 }
573 
574 /*
575  * Determine whether dk matches de on account of dr_pathptrn.
576  */
577 static int
578 devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_mount *dm,
579     struct devfs_dirent *de)
580 {
581 	struct devfs_rule *dr = &dk->dk_rule;
582 	struct cdev *dev;
583 	int match;
584 	char *pname, *specname;
585 
586 	specname = NULL;
587 	dev = devfs_rule_getdev(de);
588 	if (dev != NULL)
589 		pname = dev->si_name;
590 	else if (de->de_dirent->d_type == DT_LNK ||
591 	    (de->de_dirent->d_type == DT_DIR && de != dm->dm_rootdir &&
592 	    (de->de_flags & (DE_DOT | DE_DOTDOT)) == 0)) {
593 		specname = malloc(SPECNAMELEN + 1, M_TEMP, M_WAITOK);
594 		pname = devfs_fqpn(specname, dm, de, NULL);
595 	} else
596 		return (0);
597 
598 	KASSERT(pname != NULL, ("devfs_rule_matchpath: NULL pname"));
599 	match = fnmatch(dr->dr_pathptrn, pname, FNM_PATHNAME) == 0;
600 	free(specname, M_TEMP);
601 	return (match);
602 }
603 
604 /*
605  * Run dk on de.
606  */
607 static void
608 devfs_rule_run(struct devfs_krule *dk,  struct devfs_mount *dm,
609     struct devfs_dirent *de, unsigned depth)
610 {
611 	struct devfs_rule *dr = &dk->dk_rule;
612 	struct devfs_ruleset *ds;
613 
614 	if (!devfs_rule_match(dk, dm, de))
615 		return;
616 	if (dr->dr_iacts & DRA_BACTS) {
617 		if (dr->dr_bacts & DRB_HIDE)
618 			de->de_flags |= DE_WHITEOUT;
619 		if (dr->dr_bacts & DRB_UNHIDE)
620 			de->de_flags &= ~DE_WHITEOUT;
621 	}
622 	if (dr->dr_iacts & DRA_UID)
623 		de->de_uid = dr->dr_uid;
624 	if (dr->dr_iacts & DRA_GID)
625 		de->de_gid = dr->dr_gid;
626 	if (dr->dr_iacts & DRA_MODE)
627 		de->de_mode = dr->dr_mode;
628 	if (dr->dr_iacts & DRA_INCSET) {
629 		/*
630 		 * XXX: we should tell the user if the depth is exceeded here
631 		 * XXX: but it is not obvious how to.  A return value will
632 		 * XXX: not work as this is called when devices are created
633 		 * XXX: long time after the rules were instantiated.
634 		 * XXX: a printf() would probably give too much noise, or
635 		 * XXX: DoS the machine.  I guess a rate-limited message
636 		 * XXX: might work.
637 		 */
638 		if (depth > 0) {
639 			ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
640 			KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
641 			devfs_ruleset_applyde(ds, dm, de, depth - 1);
642 		}
643 	}
644 }
645 
646 /*
647  * Apply all the rules in ds to de.
648  */
649 static void
650 devfs_ruleset_applyde(struct devfs_ruleset *ds, struct devfs_mount *dm,
651     struct devfs_dirent *de, unsigned depth)
652 {
653 	struct devfs_krule *dk;
654 
655 	TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
656 		devfs_rule_run(dk, dm, de, depth);
657 }
658 
659 /*
660  * Apply all the rules in ds to all the entires in dm.
661  */
662 static void
663 devfs_ruleset_applydm(struct devfs_ruleset *ds, struct devfs_mount *dm)
664 {
665 	struct devfs_krule *dk;
666 
667 	/*
668 	 * XXX: Does it matter whether we do
669 	 *
670 	 *	foreach(dk in ds)
671 	 *		foreach(de in dm)
672 	 *			apply(dk to de)
673 	 *
674 	 * as opposed to
675 	 *
676 	 *	foreach(de in dm)
677 	 *		foreach(dk in ds)
678 	 *			apply(dk to de)
679 	 *
680 	 * The end result is obviously the same, but does the order
681 	 * matter?
682 	 */
683 	TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
684 		devfs_rule_applydm(dk, dm);
685 }
686 
687 /*
688  * Find a ruleset by number.
689  */
690 static struct devfs_ruleset *
691 devfs_ruleset_bynum(devfs_rsnum rsnum)
692 {
693 	struct devfs_ruleset *ds;
694 
695 	TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
696 		if (ds->ds_number == rsnum)
697 			return (ds);
698 	}
699 	return (NULL);
700 }
701 
702 /*
703  * Create a new ruleset.
704  */
705 static struct devfs_ruleset *
706 devfs_ruleset_create(devfs_rsnum rsnum)
707 {
708 	struct devfs_ruleset *s1;
709 	struct devfs_ruleset *ds;
710 
711 	KASSERT(rsnum != 0, ("creating ruleset zero"));
712 
713 	KASSERT(devfs_ruleset_bynum(rsnum) == NULL,
714 	    ("creating already existent ruleset %d", rsnum));
715 
716 	ds = malloc(sizeof(*ds), M_DEVFSRULE, M_WAITOK | M_ZERO);
717 	ds->ds_number = rsnum;
718 	TAILQ_INIT(&ds->ds_rules);
719 
720 	TAILQ_FOREACH(s1, &devfs_rulesets, ds_list) {
721 		if (s1->ds_number > rsnum) {
722 			TAILQ_INSERT_BEFORE(s1, ds, ds_list);
723 			break;
724 		}
725 	}
726 	if (s1 == NULL)
727 		TAILQ_INSERT_TAIL(&devfs_rulesets, ds, ds_list);
728 	return (ds);
729 }
730 
731 /*
732  * Remove a ruleset from the system if it's empty and not used
733  * anywhere.  This should be called after every time a rule is deleted
734  * from this ruleset or the reference count is decremented.
735  */
736 static void
737 devfs_ruleset_reap(struct devfs_ruleset *ds)
738 {
739 
740 	KASSERT(ds->ds_number != 0, ("reaping ruleset zero "));
741 
742 	if (!TAILQ_EMPTY(&ds->ds_rules) || ds->ds_refcount != 0)
743 		return;
744 
745 	TAILQ_REMOVE(&devfs_rulesets, ds, ds_list);
746 	free(ds, M_DEVFSRULE);
747 }
748 
749 /*
750  * Make rsnum the active ruleset for dm.
751  */
752 static int
753 devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm)
754 {
755 	struct devfs_ruleset *cds, *ds;
756 
757 	if (dm->dm_ruleset != 0) {
758 		cds = devfs_ruleset_bynum(dm->dm_ruleset);
759 		--cds->ds_refcount;
760 		devfs_ruleset_reap(cds);
761 	}
762 
763 	if (rsnum == 0) {
764 		dm->dm_ruleset = 0;
765 		return (0);
766 	}
767 
768 	ds = devfs_ruleset_bynum(rsnum);
769 	if (ds == NULL)
770 		ds = devfs_ruleset_create(rsnum);
771 	/* These should probably be made atomic somehow. */
772 	++ds->ds_refcount;
773 	dm->dm_ruleset = rsnum;
774 
775 	return (0);
776 }
777 
778 void
779 devfs_rules_cleanup(struct devfs_mount *dm)
780 {
781 	struct devfs_ruleset *ds;
782 
783 	sx_assert(&dm->dm_lock, SX_XLOCKED);
784 	if (dm->dm_ruleset != 0) {
785 		ds = devfs_ruleset_bynum(dm->dm_ruleset);
786 		--ds->ds_refcount;
787 		devfs_ruleset_reap(ds);
788 	}
789 }
790 
791 /*
792  * Make rsnum the active ruleset for dm (locked)
793  */
794 void
795 devfs_ruleset_set(devfs_rsnum rsnum, struct devfs_mount *dm)
796 {
797 
798 	sx_assert(&dm->dm_lock, SX_XLOCKED);
799 
800 	sx_xlock(&sx_rules);
801 	devfs_ruleset_use(rsnum, dm);
802 	sx_xunlock(&sx_rules);
803 }
804 
805 /*
806  * Apply the current active ruleset on a mount
807  */
808 void
809 devfs_ruleset_apply(struct devfs_mount *dm)
810 {
811 	struct devfs_ruleset *ds;
812 
813 	sx_assert(&dm->dm_lock, SX_XLOCKED);
814 
815 	sx_xlock(&sx_rules);
816 	if (dm->dm_ruleset == 0) {
817 		sx_xunlock(&sx_rules);
818 		return;
819 	}
820 	ds = devfs_ruleset_bynum(dm->dm_ruleset);
821 	if (ds != NULL)
822 		devfs_ruleset_applydm(ds, dm);
823 	sx_xunlock(&sx_rules);
824 }
825