1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002 Dima Dorfman.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * DEVFS ruleset implementation.
31 *
32 * A note on terminology: To "run" a rule on a dirent is to take the
33 * prescribed action; to "apply" a rule is to check whether it matches
34 * a dirent and run if if it does.
35 *
36 * A note on locking: Only foreign entry points (non-static functions)
37 * should deal with locking. Everything else assumes we already hold
38 * the required kind of lock.
39 *
40 * A note on namespace: devfs_rules_* are the non-static functions for
41 * the entire "ruleset" subsystem, devfs_rule_* are the static
42 * functions that operate on rules, and devfs_ruleset_* are the static
43 * functions that operate on rulesets. The line between the last two
44 * isn't always clear, but the guideline is still useful.
45 *
46 * A note on "special" identifiers: Ruleset 0 is the NULL, or empty,
47 * ruleset; it cannot be deleted or changed in any way. This may be
48 * assumed inside the code; e.g., a ruleset of 0 may be interpreted to
49 * mean "no ruleset". The interpretation of rule 0 is
50 * command-dependent, but in no case is there a real rule with number
51 * 0.
52 *
53 * A note on errno codes: To make it easier for the userland to tell
54 * what went wrong, we sometimes use errno codes that are not entirely
55 * appropriate for the error but that would be less ambiguous than the
56 * appropriate "generic" code. For example, when we can't find a
57 * ruleset, we return ESRCH instead of ENOENT (except in
58 * DEVFSIO_{R,S}GETNEXT, where a nonexistent ruleset means "end of
59 * list", and the userland expects ENOENT to be this indicator); this
60 * way, when an operation fails, it's clear that what couldn't be
61 * found is a ruleset and not a rule (well, it's clear to those who
62 * know the convention).
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/conf.h>
68 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/priv.h>
71 #include <sys/dirent.h>
72 #include <sys/ioccom.h>
73 #include <sys/lock.h>
74 #include <sys/sx.h>
75
76 #include <fs/devfs/devfs.h>
77 #include <fs/devfs/devfs_int.h>
78
79 /*
80 * Kernel version of devfs_rule.
81 */
82 struct devfs_krule {
83 TAILQ_ENTRY(devfs_krule) dk_list;
84 struct devfs_ruleset *dk_ruleset;
85 struct devfs_rule dk_rule;
86 };
87
88 TAILQ_HEAD(rulehead, devfs_krule);
89 static MALLOC_DEFINE(M_DEVFSRULE, "DEVFS_RULE", "DEVFS rule storage");
90
91 /*
92 * Structure to describe a ruleset.
93 */
94 struct devfs_ruleset {
95 TAILQ_ENTRY(devfs_ruleset) ds_list;
96 struct rulehead ds_rules;
97 devfs_rsnum ds_number;
98 int ds_refcount;
99 };
100
101 static devfs_rid devfs_rid_input(devfs_rid rid, struct devfs_mount *dm);
102
103 static void devfs_rule_applyde_recursive(struct devfs_krule *dk,
104 struct devfs_mount *dm, struct devfs_dirent *de);
105 static void devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm);
106 static int devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnp);
107 static struct devfs_krule *devfs_rule_byid(devfs_rid rid);
108 static int devfs_rule_delete(struct devfs_krule *dkp);
109 static struct cdev *devfs_rule_getdev(struct devfs_dirent *de);
110 static int devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm);
111 static int devfs_rule_insert(struct devfs_rule *dr);
112 static int devfs_rule_match(struct devfs_krule *dk, struct devfs_mount *dm,
113 struct devfs_dirent *de);
114 static int devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_mount *dm,
115 struct devfs_dirent *de);
116 static void devfs_rule_run(struct devfs_krule *dk, struct devfs_mount *dm,
117 struct devfs_dirent *de, unsigned depth);
118
119 static void devfs_ruleset_applyde(struct devfs_ruleset *ds,
120 struct devfs_mount *dm, struct devfs_dirent *de,
121 unsigned depth);
122 static void devfs_ruleset_applydm(struct devfs_ruleset *ds,
123 struct devfs_mount *dm);
124 static struct devfs_ruleset *devfs_ruleset_bynum(devfs_rsnum rsnum);
125 static struct devfs_ruleset *devfs_ruleset_create(devfs_rsnum rsnum);
126 static void devfs_ruleset_reap(struct devfs_ruleset *dsp);
127 static int devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm);
128
129 static struct sx sx_rules;
130 SX_SYSINIT(sx_rules, &sx_rules, "DEVFS ruleset lock");
131
132 static TAILQ_HEAD(, devfs_ruleset) devfs_rulesets =
133 TAILQ_HEAD_INITIALIZER(devfs_rulesets);
134
135 /*
136 * Called to apply the proper rules for 'de' before it can be
137 * exposed to the userland. This should be called with an exclusive
138 * lock on dm in case we need to run anything.
139 */
140 void
devfs_rules_apply(struct devfs_mount * dm,struct devfs_dirent * de)141 devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de)
142 {
143 struct devfs_ruleset *ds;
144
145 sx_assert(&dm->dm_lock, SX_XLOCKED);
146
147 if (dm->dm_ruleset == 0)
148 return;
149 sx_slock(&sx_rules);
150 ds = devfs_ruleset_bynum(dm->dm_ruleset);
151 KASSERT(ds != NULL, ("mount-point has NULL ruleset"));
152 devfs_ruleset_applyde(ds, dm, de, devfs_rule_depth);
153 sx_sunlock(&sx_rules);
154 }
155
156 /*
157 * Rule subsystem ioctl hook.
158 */
159 int
devfs_rules_ioctl(struct devfs_mount * dm,u_long cmd,caddr_t data,struct thread * td)160 devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td)
161 {
162 struct devfs_ruleset *ds;
163 struct devfs_krule *dk;
164 struct devfs_rule *dr;
165 devfs_rsnum rsnum;
166 devfs_rnum rnum;
167 devfs_rid rid;
168 int error;
169
170 sx_assert(&dm->dm_lock, SX_XLOCKED);
171
172 /*
173 * XXX: This returns an error regardless of whether we actually
174 * support the cmd or not.
175 *
176 * We could make this privileges finer grained if desired.
177 */
178 error = priv_check(td, PRIV_DEVFS_RULE);
179 if (error)
180 return (error);
181
182 sx_xlock(&sx_rules);
183
184 switch (cmd) {
185 case DEVFSIO_RADD:
186 dr = (struct devfs_rule *)data;
187 error = devfs_rule_input(dr, dm);
188 if (error != 0)
189 break;
190 dk = devfs_rule_byid(dr->dr_id);
191 if (dk != NULL) {
192 error = EEXIST;
193 break;
194 }
195 if (rid2rsn(dr->dr_id) == 0) {
196 error = EIO;
197 break;
198 }
199 error = devfs_rule_insert(dr);
200 break;
201 case DEVFSIO_RAPPLY:
202 dr = (struct devfs_rule *)data;
203 error = devfs_rule_input(dr, dm);
204 if (error != 0)
205 break;
206
207 /*
208 * This is one of many possible hackish
209 * implementations. The primary contender is an
210 * implementation where the rule we read in is
211 * temporarily inserted into some ruleset, perhaps
212 * with a hypothetical DRO_NOAUTO flag so that it
213 * doesn't get used where it isn't intended, and
214 * applied in the normal way. This can be done in the
215 * userland (DEVFSIO_ADD, DEVFSIO_APPLYID,
216 * DEVFSIO_DEL) or in the kernel; either way it breaks
217 * some corner case assumptions in other parts of the
218 * code (not that this implementation doesn't do
219 * that).
220 */
221 if (dr->dr_iacts & DRA_INCSET &&
222 devfs_ruleset_bynum(dr->dr_incset) == NULL) {
223 error = ESRCH;
224 break;
225 }
226 dk = malloc(sizeof(*dk), M_TEMP, M_WAITOK | M_ZERO);
227 memcpy(&dk->dk_rule, dr, sizeof(*dr));
228 devfs_rule_applydm(dk, dm);
229 free(dk, M_TEMP);
230 break;
231 case DEVFSIO_RAPPLYID:
232 rid = *(devfs_rid *)data;
233 rid = devfs_rid_input(rid, dm);
234 dk = devfs_rule_byid(rid);
235 if (dk == NULL) {
236 error = ENOENT;
237 break;
238 }
239 devfs_rule_applydm(dk, dm);
240 break;
241 case DEVFSIO_RDEL:
242 rid = *(devfs_rid *)data;
243 rid = devfs_rid_input(rid, dm);
244 dk = devfs_rule_byid(rid);
245 if (dk == NULL) {
246 error = ENOENT;
247 break;
248 }
249 ds = dk->dk_ruleset;
250 error = devfs_rule_delete(dk);
251 break;
252 case DEVFSIO_RGETNEXT:
253 dr = (struct devfs_rule *)data;
254 error = devfs_rule_input(dr, dm);
255 if (error != 0)
256 break;
257 /*
258 * We can't use devfs_rule_byid() here since that
259 * requires the rule specified to exist, but we want
260 * getnext(N) to work whether there is a rule N or not
261 * (specifically, getnext(0) must work, but we should
262 * never have a rule 0 since the add command
263 * interprets 0 to mean "auto-number").
264 */
265 ds = devfs_ruleset_bynum(rid2rsn(dr->dr_id));
266 if (ds == NULL) {
267 error = ENOENT;
268 break;
269 }
270 rnum = rid2rn(dr->dr_id);
271 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
272 if (rid2rn(dk->dk_rule.dr_id) > rnum)
273 break;
274 }
275 if (dk == NULL) {
276 error = ENOENT;
277 break;
278 }
279 memcpy(dr, &dk->dk_rule, sizeof(*dr));
280 break;
281 case DEVFSIO_SUSE:
282 rsnum = *(devfs_rsnum *)data;
283 error = devfs_ruleset_use(rsnum, dm);
284 break;
285 case DEVFSIO_SAPPLY:
286 rsnum = *(devfs_rsnum *)data;
287 rsnum = rid2rsn(devfs_rid_input(mkrid(rsnum, 0), dm));
288 ds = devfs_ruleset_bynum(rsnum);
289 if (ds == NULL) {
290 error = ESRCH;
291 break;
292 }
293 devfs_ruleset_applydm(ds, dm);
294 break;
295 case DEVFSIO_SGETNEXT:
296 rsnum = *(devfs_rsnum *)data;
297 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
298 if (ds->ds_number > rsnum)
299 break;
300 }
301 if (ds == NULL) {
302 error = ENOENT;
303 break;
304 }
305 *(devfs_rsnum *)data = ds->ds_number;
306 break;
307 default:
308 error = ENOIOCTL;
309 break;
310 }
311
312 sx_xunlock(&sx_rules);
313 return (error);
314 }
315
316 /*
317 * Adjust the rule identifier to use the ruleset of dm if one isn't
318 * explicitly specified.
319 *
320 * Note that after this operation, rid2rsn(rid) might still be 0, and
321 * that's okay; ruleset 0 is a valid ruleset, but when it's read in
322 * from the userland, it means "current ruleset for this mount-point".
323 */
324 static devfs_rid
devfs_rid_input(devfs_rid rid,struct devfs_mount * dm)325 devfs_rid_input(devfs_rid rid, struct devfs_mount *dm)
326 {
327
328 if (rid2rsn(rid) == 0)
329 return (mkrid(dm->dm_ruleset, rid2rn(rid)));
330 else
331 return (rid);
332 }
333
334 /*
335 * Apply dk to de and everything under de.
336 *
337 * XXX: This method needs a function call for every nested
338 * subdirectory in a devfs mount. If we plan to have many of these,
339 * we might eventually run out of kernel stack space.
340 * XXX: a linear search could be done through the cdev list instead.
341 */
342 static void
devfs_rule_applyde_recursive(struct devfs_krule * dk,struct devfs_mount * dm,struct devfs_dirent * de)343 devfs_rule_applyde_recursive(struct devfs_krule *dk, struct devfs_mount *dm,
344 struct devfs_dirent *de)
345 {
346 struct devfs_dirent *de2;
347
348 TAILQ_FOREACH(de2, &de->de_dlist, de_list)
349 devfs_rule_applyde_recursive(dk, dm, de2);
350 devfs_rule_run(dk, dm, de, devfs_rule_depth);
351 }
352
353 /*
354 * Apply dk to all entires in dm.
355 */
356 static void
devfs_rule_applydm(struct devfs_krule * dk,struct devfs_mount * dm)357 devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm)
358 {
359
360 devfs_rule_applyde_recursive(dk, dm, dm->dm_rootdir);
361 }
362
363 /*
364 * Automatically select a number for a new rule in ds, and write the
365 * result into rnump.
366 */
367 static int
devfs_rule_autonumber(struct devfs_ruleset * ds,devfs_rnum * rnump)368 devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnump)
369 {
370 struct devfs_krule *dk;
371
372 /* Find the last rule. */
373 dk = TAILQ_LAST(&ds->ds_rules, rulehead);
374 if (dk == NULL)
375 *rnump = 100;
376 else {
377 *rnump = rid2rn(dk->dk_rule.dr_id) + 100;
378 /* Detect overflow. */
379 if (*rnump < rid2rn(dk->dk_rule.dr_id))
380 return (ERANGE);
381 }
382 KASSERT(devfs_rule_byid(mkrid(ds->ds_number, *rnump)) == NULL,
383 ("autonumbering resulted in an already existing rule"));
384 return (0);
385 }
386
387 /*
388 * Find a krule by id.
389 */
390 static struct devfs_krule *
devfs_rule_byid(devfs_rid rid)391 devfs_rule_byid(devfs_rid rid)
392 {
393 struct devfs_ruleset *ds;
394 struct devfs_krule *dk;
395 devfs_rnum rn;
396
397 rn = rid2rn(rid);
398 ds = devfs_ruleset_bynum(rid2rsn(rid));
399 if (ds == NULL)
400 return (NULL);
401 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
402 if (rid2rn(dk->dk_rule.dr_id) == rn)
403 return (dk);
404 else if (rid2rn(dk->dk_rule.dr_id) > rn)
405 break;
406 }
407 return (NULL);
408 }
409
410 /*
411 * Remove dkp from any lists it may be on and remove memory associated
412 * with it.
413 */
414 static int
devfs_rule_delete(struct devfs_krule * dk)415 devfs_rule_delete(struct devfs_krule *dk)
416 {
417 struct devfs_ruleset *ds;
418
419 if (dk->dk_rule.dr_iacts & DRA_INCSET) {
420 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
421 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
422 --ds->ds_refcount;
423 devfs_ruleset_reap(ds);
424 }
425 ds = dk->dk_ruleset;
426 TAILQ_REMOVE(&ds->ds_rules, dk, dk_list);
427 devfs_ruleset_reap(ds);
428 free(dk, M_DEVFSRULE);
429 return (0);
430 }
431
432 /*
433 * Get a struct cdev *corresponding to de so we can try to match rules based
434 * on it. If this routine returns NULL, there is no struct cdev *associated
435 * with the dirent (symlinks and directories don't have dev_ts), and
436 * the caller should assume that any critera dependent on a dev_t
437 * don't match.
438 */
439 static struct cdev *
devfs_rule_getdev(struct devfs_dirent * de)440 devfs_rule_getdev(struct devfs_dirent *de)
441 {
442
443 if (de->de_cdp == NULL)
444 return (NULL);
445 if (de->de_cdp->cdp_flags & CDP_ACTIVE)
446 return (&de->de_cdp->cdp_c);
447 else
448 return (NULL);
449 }
450
451 /*
452 * Do what we need to do to a rule that we just loaded from the
453 * userland. In particular, we need to check the magic, and adjust
454 * the ruleset appropriate if desired.
455 */
456 static int
devfs_rule_input(struct devfs_rule * dr,struct devfs_mount * dm)457 devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm)
458 {
459
460 if (dr->dr_magic != DEVFS_MAGIC)
461 return (ERPCMISMATCH);
462 dr->dr_id = devfs_rid_input(dr->dr_id, dm);
463 return (0);
464 }
465
466 /*
467 * Import dr into the appropriate place in the kernel (i.e., make a
468 * krule). The value of dr is copied, so the pointer may be destroyed
469 * after this call completes.
470 */
471 static int
devfs_rule_insert(struct devfs_rule * dr)472 devfs_rule_insert(struct devfs_rule *dr)
473 {
474 struct devfs_ruleset *ds, *dsi;
475 struct devfs_krule *k1;
476 struct devfs_krule *dk;
477 devfs_rsnum rsnum;
478 devfs_rnum dkrn;
479 int error;
480
481 /*
482 * This stuff seems out of place here, but we want to do it as
483 * soon as possible so that if it fails, we don't have to roll
484 * back any changes we already made (e.g., ruleset creation).
485 */
486 if (dr->dr_iacts & DRA_INCSET) {
487 dsi = devfs_ruleset_bynum(dr->dr_incset);
488 if (dsi == NULL)
489 return (ESRCH);
490 } else
491 dsi = NULL;
492
493 rsnum = rid2rsn(dr->dr_id);
494 KASSERT(rsnum != 0, ("Inserting into ruleset zero"));
495
496 ds = devfs_ruleset_bynum(rsnum);
497 if (ds == NULL)
498 ds = devfs_ruleset_create(rsnum);
499 dkrn = rid2rn(dr->dr_id);
500 if (dkrn == 0) {
501 error = devfs_rule_autonumber(ds, &dkrn);
502 if (error != 0) {
503 devfs_ruleset_reap(ds);
504 return (error);
505 }
506 }
507
508 dk = malloc(sizeof(*dk), M_DEVFSRULE, M_WAITOK | M_ZERO);
509 dk->dk_ruleset = ds;
510 if (dsi != NULL)
511 ++dsi->ds_refcount;
512 /* XXX: Inspect dr? */
513 memcpy(&dk->dk_rule, dr, sizeof(*dr));
514 dk->dk_rule.dr_id = mkrid(rid2rsn(dk->dk_rule.dr_id), dkrn);
515
516 TAILQ_FOREACH(k1, &ds->ds_rules, dk_list) {
517 if (rid2rn(k1->dk_rule.dr_id) > dkrn) {
518 TAILQ_INSERT_BEFORE(k1, dk, dk_list);
519 break;
520 }
521 }
522 if (k1 == NULL)
523 TAILQ_INSERT_TAIL(&ds->ds_rules, dk, dk_list);
524 return (0);
525 }
526
527 /*
528 * Determine whether dk matches de. Returns 1 if dk should be run on
529 * de; 0, otherwise.
530 */
531 static int
devfs_rule_match(struct devfs_krule * dk,struct devfs_mount * dm,struct devfs_dirent * de)532 devfs_rule_match(struct devfs_krule *dk, struct devfs_mount *dm,
533 struct devfs_dirent *de)
534 {
535 struct devfs_rule *dr = &dk->dk_rule;
536 struct cdev *dev;
537 struct cdevsw *dsw;
538 int ref;
539
540 dev = devfs_rule_getdev(de);
541 /*
542 * At this point, if dev is NULL, we should assume that any
543 * criteria that depend on it don't match. We should *not*
544 * just ignore them (i.e., act like they weren't specified),
545 * since that makes a rule that only has criteria dependent on
546 * the struct cdev *match all symlinks and directories.
547 *
548 * Note also that the following tests are somewhat reversed:
549 * They're actually testing to see whether the condition does
550 * *not* match, since the default is to assume the rule should
551 * be run (such as if there are no conditions).
552 */
553 if (dr->dr_icond & DRC_DSWFLAGS) {
554 if (dev == NULL)
555 return (0);
556 dsw = dev_refthread(dev, &ref);
557 if (dsw == NULL)
558 return (0);
559 if ((dsw->d_flags & dr->dr_dswflags) == 0) {
560 dev_relthread(dev, ref);
561 return (0);
562 }
563 dev_relthread(dev, ref);
564 }
565 if (dr->dr_icond & DRC_PATHPTRN)
566 if (!devfs_rule_matchpath(dk, dm, de))
567 return (0);
568
569 return (1);
570 }
571
572 /*
573 * Determine whether dk matches de on account of dr_pathptrn.
574 */
575 static int
devfs_rule_matchpath(struct devfs_krule * dk,struct devfs_mount * dm,struct devfs_dirent * de)576 devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_mount *dm,
577 struct devfs_dirent *de)
578 {
579 struct devfs_rule *dr = &dk->dk_rule;
580 struct cdev *dev;
581 int match;
582 char *pname, *specname;
583
584 specname = NULL;
585 dev = devfs_rule_getdev(de);
586 if (dev != NULL)
587 pname = dev->si_name;
588 else if (de->de_dirent->d_type == DT_LNK ||
589 (de->de_dirent->d_type == DT_DIR && de != dm->dm_rootdir &&
590 (de->de_flags & (DE_DOT | DE_DOTDOT)) == 0)) {
591 specname = malloc(SPECNAMELEN + 1, M_TEMP, M_WAITOK);
592 pname = devfs_fqpn(specname, dm, de, NULL);
593 } else
594 return (0);
595
596 KASSERT(pname != NULL, ("devfs_rule_matchpath: NULL pname"));
597 match = fnmatch(dr->dr_pathptrn, pname, FNM_PATHNAME) == 0;
598 free(specname, M_TEMP);
599 return (match);
600 }
601
602 /*
603 * Run dk on de.
604 */
605 static void
devfs_rule_run(struct devfs_krule * dk,struct devfs_mount * dm,struct devfs_dirent * de,unsigned depth)606 devfs_rule_run(struct devfs_krule *dk, struct devfs_mount *dm,
607 struct devfs_dirent *de, unsigned depth)
608 {
609 struct devfs_rule *dr = &dk->dk_rule;
610 struct devfs_ruleset *ds;
611
612 if (!devfs_rule_match(dk, dm, de))
613 return;
614 if (dr->dr_iacts & DRA_BACTS) {
615 if (dr->dr_bacts & DRB_HIDE)
616 de->de_flags |= DE_WHITEOUT;
617 if (dr->dr_bacts & DRB_UNHIDE)
618 de->de_flags &= ~DE_WHITEOUT;
619 }
620 if (dr->dr_iacts & DRA_UID)
621 de->de_uid = dr->dr_uid;
622 if (dr->dr_iacts & DRA_GID)
623 de->de_gid = dr->dr_gid;
624 if (dr->dr_iacts & DRA_MODE)
625 de->de_mode = dr->dr_mode;
626 if (dr->dr_iacts & DRA_INCSET) {
627 /*
628 * XXX: we should tell the user if the depth is exceeded here
629 * XXX: but it is not obvious how to. A return value will
630 * XXX: not work as this is called when devices are created
631 * XXX: long time after the rules were instantiated.
632 * XXX: a printf() would probably give too much noise, or
633 * XXX: DoS the machine. I guess a rate-limited message
634 * XXX: might work.
635 */
636 if (depth > 0) {
637 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
638 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
639 devfs_ruleset_applyde(ds, dm, de, depth - 1);
640 }
641 }
642 }
643
644 /*
645 * Apply all the rules in ds to de.
646 */
647 static void
devfs_ruleset_applyde(struct devfs_ruleset * ds,struct devfs_mount * dm,struct devfs_dirent * de,unsigned depth)648 devfs_ruleset_applyde(struct devfs_ruleset *ds, struct devfs_mount *dm,
649 struct devfs_dirent *de, unsigned depth)
650 {
651 struct devfs_krule *dk;
652
653 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
654 devfs_rule_run(dk, dm, de, depth);
655 }
656
657 /*
658 * Apply all the rules in ds to all the entires in dm.
659 */
660 static void
devfs_ruleset_applydm(struct devfs_ruleset * ds,struct devfs_mount * dm)661 devfs_ruleset_applydm(struct devfs_ruleset *ds, struct devfs_mount *dm)
662 {
663 struct devfs_krule *dk;
664
665 /*
666 * XXX: Does it matter whether we do
667 *
668 * foreach(dk in ds)
669 * foreach(de in dm)
670 * apply(dk to de)
671 *
672 * as opposed to
673 *
674 * foreach(de in dm)
675 * foreach(dk in ds)
676 * apply(dk to de)
677 *
678 * The end result is obviously the same, but does the order
679 * matter?
680 */
681 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
682 devfs_rule_applydm(dk, dm);
683 }
684
685 /*
686 * Find a ruleset by number.
687 */
688 static struct devfs_ruleset *
devfs_ruleset_bynum(devfs_rsnum rsnum)689 devfs_ruleset_bynum(devfs_rsnum rsnum)
690 {
691 struct devfs_ruleset *ds;
692
693 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
694 if (ds->ds_number == rsnum)
695 return (ds);
696 }
697 return (NULL);
698 }
699
700 /*
701 * Create a new ruleset.
702 */
703 static struct devfs_ruleset *
devfs_ruleset_create(devfs_rsnum rsnum)704 devfs_ruleset_create(devfs_rsnum rsnum)
705 {
706 struct devfs_ruleset *s1;
707 struct devfs_ruleset *ds;
708
709 KASSERT(rsnum != 0, ("creating ruleset zero"));
710
711 KASSERT(devfs_ruleset_bynum(rsnum) == NULL,
712 ("creating already existent ruleset %d", rsnum));
713
714 ds = malloc(sizeof(*ds), M_DEVFSRULE, M_WAITOK | M_ZERO);
715 ds->ds_number = rsnum;
716 TAILQ_INIT(&ds->ds_rules);
717
718 TAILQ_FOREACH(s1, &devfs_rulesets, ds_list) {
719 if (s1->ds_number > rsnum) {
720 TAILQ_INSERT_BEFORE(s1, ds, ds_list);
721 break;
722 }
723 }
724 if (s1 == NULL)
725 TAILQ_INSERT_TAIL(&devfs_rulesets, ds, ds_list);
726 return (ds);
727 }
728
729 /*
730 * Remove a ruleset from the system if it's empty and not used
731 * anywhere. This should be called after every time a rule is deleted
732 * from this ruleset or the reference count is decremented.
733 */
734 static void
devfs_ruleset_reap(struct devfs_ruleset * ds)735 devfs_ruleset_reap(struct devfs_ruleset *ds)
736 {
737
738 KASSERT(ds->ds_number != 0, ("reaping ruleset zero "));
739
740 if (!TAILQ_EMPTY(&ds->ds_rules) || ds->ds_refcount != 0)
741 return;
742
743 TAILQ_REMOVE(&devfs_rulesets, ds, ds_list);
744 free(ds, M_DEVFSRULE);
745 }
746
747 /*
748 * Make rsnum the active ruleset for dm.
749 */
750 static int
devfs_ruleset_use(devfs_rsnum rsnum,struct devfs_mount * dm)751 devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm)
752 {
753 struct devfs_ruleset *cds, *ds;
754
755 if (dm->dm_ruleset != 0) {
756 cds = devfs_ruleset_bynum(dm->dm_ruleset);
757 --cds->ds_refcount;
758 devfs_ruleset_reap(cds);
759 }
760
761 if (rsnum == 0) {
762 dm->dm_ruleset = 0;
763 return (0);
764 }
765
766 ds = devfs_ruleset_bynum(rsnum);
767 if (ds == NULL)
768 ds = devfs_ruleset_create(rsnum);
769 /* These should probably be made atomic somehow. */
770 ++ds->ds_refcount;
771 dm->dm_ruleset = rsnum;
772
773 return (0);
774 }
775
776 void
devfs_rules_cleanup(struct devfs_mount * dm)777 devfs_rules_cleanup(struct devfs_mount *dm)
778 {
779 struct devfs_ruleset *ds;
780
781 sx_assert(&dm->dm_lock, SX_XLOCKED);
782 if (dm->dm_ruleset != 0) {
783 ds = devfs_ruleset_bynum(dm->dm_ruleset);
784 --ds->ds_refcount;
785 devfs_ruleset_reap(ds);
786 }
787 }
788
789 /*
790 * Make rsnum the active ruleset for dm (locked)
791 */
792 void
devfs_ruleset_set(devfs_rsnum rsnum,struct devfs_mount * dm)793 devfs_ruleset_set(devfs_rsnum rsnum, struct devfs_mount *dm)
794 {
795
796 sx_assert(&dm->dm_lock, SX_XLOCKED);
797
798 sx_xlock(&sx_rules);
799 devfs_ruleset_use(rsnum, dm);
800 sx_xunlock(&sx_rules);
801 }
802
803 /*
804 * Apply the current active ruleset on a mount
805 */
806 void
devfs_ruleset_apply(struct devfs_mount * dm)807 devfs_ruleset_apply(struct devfs_mount *dm)
808 {
809 struct devfs_ruleset *ds;
810
811 sx_assert(&dm->dm_lock, SX_XLOCKED);
812
813 sx_xlock(&sx_rules);
814 if (dm->dm_ruleset == 0) {
815 sx_xunlock(&sx_rules);
816 return;
817 }
818 ds = devfs_ruleset_bynum(dm->dm_ruleset);
819 if (ds != NULL)
820 devfs_ruleset_applydm(ds, dm);
821 sx_xunlock(&sx_rules);
822 }
823