1 /*- 2 * Copyright (c) 2002 Dima Dorfman. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * DEVFS ruleset implementation. 31 * 32 * A note on terminology: To "run" a rule on a dirent is to take the 33 * prescribed action; to "apply" a rule is to check whether it matches 34 * a dirent and run if if it does. 35 * 36 * A note on locking: Only foreign entry points (non-static functions) 37 * should deal with locking. Everything else assumes we already hold 38 * the required kind of lock. 39 * 40 * A note on namespace: devfs_rules_* are the non-static functions for 41 * the entire "ruleset" subsystem, devfs_rule_* are the static 42 * functions that operate on rules, and devfs_ruleset_* are the static 43 * functions that operate on rulesets. The line between the last two 44 * isn't always clear, but the guideline is still useful. 45 * 46 * A note on "special" identifiers: Ruleset 0 is the NULL, or empty, 47 * ruleset; it cannot be deleted or changed in any way. This may be 48 * assumed inside the code; e.g., a ruleset of 0 may be interpeted to 49 * mean "no ruleset". The interpretation of rule 0 is 50 * command-dependent, but in no case is there a real rule with number 51 * 0. 52 * 53 * A note on errno codes: To make it easier for the userland to tell 54 * what went wrong, we sometimes use errno codes that are not entirely 55 * appropriate for the error but that would be less ambiguous than the 56 * appropriate "generic" code. For example, when we can't find a 57 * ruleset, we return ESRCH instead of ENOENT (except in 58 * DEVFSIO_{R,S}GETNEXT, where a nonexistent ruleset means "end of 59 * list", and the userland expects ENOENT to be this indicator); this 60 * way, when an operation fails, it's clear that what couldn't be 61 * found is a ruleset and not a rule (well, it's clear to those who 62 * know the convention). 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/conf.h> 68 #include <sys/kernel.h> 69 #include <sys/malloc.h> 70 #include <sys/priv.h> 71 #include <sys/dirent.h> 72 #include <sys/ioccom.h> 73 #include <sys/lock.h> 74 #include <sys/sx.h> 75 76 #include <fs/devfs/devfs.h> 77 #include <fs/devfs/devfs_int.h> 78 79 /* 80 * Kernel version of devfs_rule. 81 */ 82 struct devfs_krule { 83 TAILQ_ENTRY(devfs_krule) dk_list; 84 struct devfs_ruleset *dk_ruleset; 85 struct devfs_rule dk_rule; 86 }; 87 88 TAILQ_HEAD(rulehead, devfs_krule); 89 static MALLOC_DEFINE(M_DEVFSRULE, "DEVFS_RULE", "DEVFS rule storage"); 90 91 /* 92 * Structure to describe a ruleset. 93 */ 94 struct devfs_ruleset { 95 TAILQ_ENTRY(devfs_ruleset) ds_list; 96 struct rulehead ds_rules; 97 devfs_rsnum ds_number; 98 int ds_refcount; 99 }; 100 101 static devfs_rid devfs_rid_input(devfs_rid rid, struct devfs_mount *dm); 102 103 static void devfs_rule_applyde_recursive(struct devfs_krule *dk, 104 struct devfs_dirent *de); 105 static void devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm); 106 static int devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnp); 107 static struct devfs_krule *devfs_rule_byid(devfs_rid rid); 108 static int devfs_rule_delete(struct devfs_krule *dkp); 109 static struct cdev *devfs_rule_getdev(struct devfs_dirent *de); 110 static int devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm); 111 static int devfs_rule_insert(struct devfs_rule *dr); 112 static int devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de); 113 static int devfs_rule_matchpath(struct devfs_krule *dk, 114 struct devfs_dirent *de); 115 static void devfs_rule_run(struct devfs_krule *dk, struct devfs_dirent *de, unsigned depth); 116 117 static void devfs_ruleset_applyde(struct devfs_ruleset *ds, 118 struct devfs_dirent *de, unsigned depth); 119 static void devfs_ruleset_applydm(struct devfs_ruleset *ds, 120 struct devfs_mount *dm); 121 static struct devfs_ruleset *devfs_ruleset_bynum(devfs_rsnum rsnum); 122 static struct devfs_ruleset *devfs_ruleset_create(devfs_rsnum rsnum); 123 static void devfs_ruleset_reap(struct devfs_ruleset *dsp); 124 static int devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm); 125 126 static struct sx sx_rules; 127 SX_SYSINIT(sx_rules, &sx_rules, "DEVFS ruleset lock"); 128 129 static TAILQ_HEAD(, devfs_ruleset) devfs_rulesets = 130 TAILQ_HEAD_INITIALIZER(devfs_rulesets); 131 132 /* 133 * Called to apply the proper rules for 'de' before it can be 134 * exposed to the userland. This should be called with an exclusive 135 * lock on dm in case we need to run anything. 136 */ 137 void 138 devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de) 139 { 140 struct devfs_ruleset *ds; 141 142 sx_assert(&dm->dm_lock, SX_XLOCKED); 143 144 if (dm->dm_ruleset == 0) 145 return; 146 sx_slock(&sx_rules); 147 ds = devfs_ruleset_bynum(dm->dm_ruleset); 148 KASSERT(ds != NULL, ("mount-point has NULL ruleset")); 149 devfs_ruleset_applyde(ds, de, devfs_rule_depth); 150 sx_sunlock(&sx_rules); 151 } 152 153 /* 154 * Rule subsystem ioctl hook. 155 */ 156 int 157 devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td) 158 { 159 struct devfs_ruleset *ds; 160 struct devfs_krule *dk; 161 struct devfs_rule *dr; 162 devfs_rsnum rsnum; 163 devfs_rnum rnum; 164 devfs_rid rid; 165 int error; 166 167 sx_assert(&dm->dm_lock, SX_XLOCKED); 168 169 /* 170 * XXX: This returns an error regardless of whether we actually 171 * support the cmd or not. 172 * 173 * We could make this privileges finer grained if desired. 174 */ 175 error = priv_check(td, PRIV_DEVFS_RULE); 176 if (error) 177 return (error); 178 179 sx_xlock(&sx_rules); 180 181 switch (cmd) { 182 case DEVFSIO_RADD: 183 dr = (struct devfs_rule *)data; 184 error = devfs_rule_input(dr, dm); 185 if (error != 0) 186 break; 187 dk = devfs_rule_byid(dr->dr_id); 188 if (dk != NULL) { 189 error = EEXIST; 190 break; 191 } 192 if (rid2rsn(dr->dr_id) == 0) { 193 error = EIO; 194 break; 195 } 196 error = devfs_rule_insert(dr); 197 break; 198 case DEVFSIO_RAPPLY: 199 dr = (struct devfs_rule *)data; 200 error = devfs_rule_input(dr, dm); 201 if (error != 0) 202 break; 203 204 /* 205 * This is one of many possible hackish 206 * implementations. The primary contender is an 207 * implementation where the rule we read in is 208 * temporarily inserted into some ruleset, perhaps 209 * with a hypothetical DRO_NOAUTO flag so that it 210 * doesn't get used where it isn't intended, and 211 * applied in the normal way. This can be done in the 212 * userland (DEVFSIO_ADD, DEVFSIO_APPLYID, 213 * DEVFSIO_DEL) or in the kernel; either way it breaks 214 * some corner case assumptions in other parts of the 215 * code (not that this implementation doesn't do 216 * that). 217 */ 218 if (dr->dr_iacts & DRA_INCSET && 219 devfs_ruleset_bynum(dr->dr_incset) == NULL) { 220 error = ESRCH; 221 break; 222 } 223 dk = malloc(sizeof(*dk), M_TEMP, M_WAITOK | M_ZERO); 224 memcpy(&dk->dk_rule, dr, sizeof(*dr)); 225 devfs_rule_applydm(dk, dm); 226 free(dk, M_TEMP); 227 break; 228 case DEVFSIO_RAPPLYID: 229 rid = *(devfs_rid *)data; 230 rid = devfs_rid_input(rid, dm); 231 dk = devfs_rule_byid(rid); 232 if (dk == NULL) { 233 error = ENOENT; 234 break; 235 } 236 devfs_rule_applydm(dk, dm); 237 break; 238 case DEVFSIO_RDEL: 239 rid = *(devfs_rid *)data; 240 rid = devfs_rid_input(rid, dm); 241 dk = devfs_rule_byid(rid); 242 if (dk == NULL) { 243 error = ENOENT; 244 break; 245 } 246 ds = dk->dk_ruleset; 247 error = devfs_rule_delete(dk); 248 break; 249 case DEVFSIO_RGETNEXT: 250 dr = (struct devfs_rule *)data; 251 error = devfs_rule_input(dr, dm); 252 if (error != 0) 253 break; 254 /* 255 * We can't use devfs_rule_byid() here since that 256 * requires the rule specified to exist, but we want 257 * getnext(N) to work whether there is a rule N or not 258 * (specifically, getnext(0) must work, but we should 259 * never have a rule 0 since the add command 260 * interprets 0 to mean "auto-number"). 261 */ 262 ds = devfs_ruleset_bynum(rid2rsn(dr->dr_id)); 263 if (ds == NULL) { 264 error = ENOENT; 265 break; 266 } 267 rnum = rid2rn(dr->dr_id); 268 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) { 269 if (rid2rn(dk->dk_rule.dr_id) > rnum) 270 break; 271 } 272 if (dk == NULL) { 273 error = ENOENT; 274 break; 275 } 276 memcpy(dr, &dk->dk_rule, sizeof(*dr)); 277 break; 278 case DEVFSIO_SUSE: 279 rsnum = *(devfs_rsnum *)data; 280 error = devfs_ruleset_use(rsnum, dm); 281 break; 282 case DEVFSIO_SAPPLY: 283 rsnum = *(devfs_rsnum *)data; 284 rsnum = rid2rsn(devfs_rid_input(mkrid(rsnum, 0), dm)); 285 ds = devfs_ruleset_bynum(rsnum); 286 if (ds == NULL) { 287 error = ESRCH; 288 break; 289 } 290 devfs_ruleset_applydm(ds, dm); 291 break; 292 case DEVFSIO_SGETNEXT: 293 rsnum = *(devfs_rsnum *)data; 294 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) { 295 if (ds->ds_number > rsnum) 296 break; 297 } 298 if (ds == NULL) { 299 error = ENOENT; 300 break; 301 } 302 *(devfs_rsnum *)data = ds->ds_number; 303 break; 304 default: 305 error = ENOIOCTL; 306 break; 307 } 308 309 sx_xunlock(&sx_rules); 310 return (error); 311 } 312 313 /* 314 * Adjust the rule identifier to use the ruleset of dm if one isn't 315 * explicitly specified. 316 * 317 * Note that after this operation, rid2rsn(rid) might still be 0, and 318 * that's okay; ruleset 0 is a valid ruleset, but when it's read in 319 * from the userland, it means "current ruleset for this mount-point". 320 */ 321 static devfs_rid 322 devfs_rid_input(devfs_rid rid, struct devfs_mount *dm) 323 { 324 325 if (rid2rsn(rid) == 0) 326 return (mkrid(dm->dm_ruleset, rid2rn(rid))); 327 else 328 return (rid); 329 } 330 331 /* 332 * Apply dk to de and everything under de. 333 * 334 * XXX: This method needs a function call for every nested 335 * subdirectory in a devfs mount. If we plan to have many of these, 336 * we might eventually run out of kernel stack space. 337 * XXX: a linear search could be done through the cdev list instead. 338 */ 339 static void 340 devfs_rule_applyde_recursive(struct devfs_krule *dk, struct devfs_dirent *de) 341 { 342 struct devfs_dirent *de2; 343 344 TAILQ_FOREACH(de2, &de->de_dlist, de_list) 345 devfs_rule_applyde_recursive(dk, de2); 346 devfs_rule_run(dk, de, devfs_rule_depth); 347 } 348 349 /* 350 * Apply dk to all entires in dm. 351 */ 352 static void 353 devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm) 354 { 355 356 devfs_rule_applyde_recursive(dk, dm->dm_rootdir); 357 } 358 359 /* 360 * Automatically select a number for a new rule in ds, and write the 361 * result into rnump. 362 */ 363 static int 364 devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnump) 365 { 366 struct devfs_krule *dk; 367 368 /* Find the last rule. */ 369 dk = TAILQ_LAST(&ds->ds_rules, rulehead); 370 if (dk == NULL) 371 *rnump = 100; 372 else { 373 *rnump = rid2rn(dk->dk_rule.dr_id) + 100; 374 /* Detect overflow. */ 375 if (*rnump < rid2rn(dk->dk_rule.dr_id)) 376 return (ERANGE); 377 } 378 KASSERT(devfs_rule_byid(mkrid(ds->ds_number, *rnump)) == NULL, 379 ("autonumbering resulted in an already existing rule")); 380 return (0); 381 } 382 383 /* 384 * Find a krule by id. 385 */ 386 static struct devfs_krule * 387 devfs_rule_byid(devfs_rid rid) 388 { 389 struct devfs_ruleset *ds; 390 struct devfs_krule *dk; 391 devfs_rnum rn; 392 393 rn = rid2rn(rid); 394 ds = devfs_ruleset_bynum(rid2rsn(rid)); 395 if (ds == NULL) 396 return (NULL); 397 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) { 398 if (rid2rn(dk->dk_rule.dr_id) == rn) 399 return (dk); 400 else if (rid2rn(dk->dk_rule.dr_id) > rn) 401 break; 402 } 403 return (NULL); 404 } 405 406 /* 407 * Remove dkp from any lists it may be on and remove memory associated 408 * with it. 409 */ 410 static int 411 devfs_rule_delete(struct devfs_krule *dk) 412 { 413 struct devfs_ruleset *ds; 414 415 if (dk->dk_rule.dr_iacts & DRA_INCSET) { 416 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset); 417 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset")); 418 --ds->ds_refcount; 419 devfs_ruleset_reap(ds); 420 } 421 ds = dk->dk_ruleset; 422 TAILQ_REMOVE(&ds->ds_rules, dk, dk_list); 423 devfs_ruleset_reap(ds); 424 free(dk, M_DEVFSRULE); 425 return (0); 426 } 427 428 /* 429 * Get a struct cdev *corresponding to de so we can try to match rules based 430 * on it. If this routine returns NULL, there is no struct cdev *associated 431 * with the dirent (symlinks and directories don't have dev_ts), and 432 * the caller should assume that any critera dependent on a dev_t 433 * don't match. 434 */ 435 static struct cdev * 436 devfs_rule_getdev(struct devfs_dirent *de) 437 { 438 439 if (de->de_cdp == NULL) 440 return (NULL); 441 if (de->de_cdp->cdp_flags & CDP_ACTIVE) 442 return (&de->de_cdp->cdp_c); 443 else 444 return (NULL); 445 } 446 447 /* 448 * Do what we need to do to a rule that we just loaded from the 449 * userland. In particular, we need to check the magic, and adjust 450 * the ruleset appropriate if desired. 451 */ 452 static int 453 devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm) 454 { 455 456 if (dr->dr_magic != DEVFS_MAGIC) 457 return (ERPCMISMATCH); 458 dr->dr_id = devfs_rid_input(dr->dr_id, dm); 459 return (0); 460 } 461 462 /* 463 * Import dr into the appropriate place in the kernel (i.e., make a 464 * krule). The value of dr is copied, so the pointer may be destroyed 465 * after this call completes. 466 */ 467 static int 468 devfs_rule_insert(struct devfs_rule *dr) 469 { 470 struct devfs_ruleset *ds, *dsi; 471 struct devfs_krule *k1; 472 struct devfs_krule *dk; 473 devfs_rsnum rsnum; 474 devfs_rnum dkrn; 475 int error; 476 477 /* 478 * This stuff seems out of place here, but we want to do it as 479 * soon as possible so that if it fails, we don't have to roll 480 * back any changes we already made (e.g., ruleset creation). 481 */ 482 if (dr->dr_iacts & DRA_INCSET) { 483 dsi = devfs_ruleset_bynum(dr->dr_incset); 484 if (dsi == NULL) 485 return (ESRCH); 486 } else 487 dsi = NULL; 488 489 rsnum = rid2rsn(dr->dr_id); 490 KASSERT(rsnum != 0, ("Inserting into ruleset zero")); 491 492 ds = devfs_ruleset_bynum(rsnum); 493 if (ds == NULL) 494 ds = devfs_ruleset_create(rsnum); 495 dkrn = rid2rn(dr->dr_id); 496 if (dkrn == 0) { 497 error = devfs_rule_autonumber(ds, &dkrn); 498 if (error != 0) { 499 devfs_ruleset_reap(ds); 500 return (error); 501 } 502 } 503 504 dk = malloc(sizeof(*dk), M_DEVFSRULE, M_WAITOK | M_ZERO); 505 dk->dk_ruleset = ds; 506 if (dsi != NULL) 507 ++dsi->ds_refcount; 508 /* XXX: Inspect dr? */ 509 memcpy(&dk->dk_rule, dr, sizeof(*dr)); 510 dk->dk_rule.dr_id = mkrid(rid2rsn(dk->dk_rule.dr_id), dkrn); 511 512 TAILQ_FOREACH(k1, &ds->ds_rules, dk_list) { 513 if (rid2rn(k1->dk_rule.dr_id) > dkrn) { 514 TAILQ_INSERT_BEFORE(k1, dk, dk_list); 515 break; 516 } 517 } 518 if (k1 == NULL) 519 TAILQ_INSERT_TAIL(&ds->ds_rules, dk, dk_list); 520 return (0); 521 } 522 523 /* 524 * Determine whether dk matches de. Returns 1 if dk should be run on 525 * de; 0, otherwise. 526 */ 527 static int 528 devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de) 529 { 530 struct devfs_rule *dr = &dk->dk_rule; 531 struct cdev *dev; 532 struct cdevsw *dsw; 533 int ref; 534 535 dev = devfs_rule_getdev(de); 536 /* 537 * At this point, if dev is NULL, we should assume that any 538 * criteria that depend on it don't match. We should *not* 539 * just ignore them (i.e., act like they weren't specified), 540 * since that makes a rule that only has criteria dependent on 541 * the struct cdev *match all symlinks and directories. 542 * 543 * Note also that the following tests are somewhat reversed: 544 * They're actually testing to see whether the condition does 545 * *not* match, since the default is to assume the rule should 546 * be run (such as if there are no conditions). 547 */ 548 if (dr->dr_icond & DRC_DSWFLAGS) { 549 if (dev == NULL) 550 return (0); 551 dsw = dev_refthread(dev, &ref); 552 if (dsw == NULL) 553 return (0); 554 if ((dsw->d_flags & dr->dr_dswflags) == 0) { 555 dev_relthread(dev, ref); 556 return (0); 557 } 558 dev_relthread(dev, ref); 559 } 560 if (dr->dr_icond & DRC_PATHPTRN) 561 if (!devfs_rule_matchpath(dk, de)) 562 return (0); 563 564 return (1); 565 } 566 567 /* 568 * Determine whether dk matches de on account of dr_pathptrn. 569 */ 570 static int 571 devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_dirent *de) 572 { 573 struct devfs_rule *dr = &dk->dk_rule; 574 char *pname; 575 struct cdev *dev; 576 577 dev = devfs_rule_getdev(de); 578 if (dev != NULL) 579 pname = dev->si_name; 580 else if (de->de_dirent->d_type == DT_LNK || 581 de->de_dirent->d_type == DT_DIR) 582 pname = de->de_dirent->d_name; 583 else 584 return (0); 585 KASSERT(pname != NULL, ("devfs_rule_matchpath: NULL pname")); 586 587 return (fnmatch(dr->dr_pathptrn, pname, 0) == 0); 588 } 589 590 /* 591 * Run dk on de. 592 */ 593 static void 594 devfs_rule_run(struct devfs_krule *dk, struct devfs_dirent *de, unsigned depth) 595 { 596 struct devfs_rule *dr = &dk->dk_rule; 597 struct devfs_ruleset *ds; 598 599 if (!devfs_rule_match(dk, de)) 600 return; 601 if (dr->dr_iacts & DRA_BACTS) { 602 if (dr->dr_bacts & DRB_HIDE) 603 de->de_flags |= DE_WHITEOUT; 604 if (dr->dr_bacts & DRB_UNHIDE) 605 de->de_flags &= ~DE_WHITEOUT; 606 } 607 if (dr->dr_iacts & DRA_UID) 608 de->de_uid = dr->dr_uid; 609 if (dr->dr_iacts & DRA_GID) 610 de->de_gid = dr->dr_gid; 611 if (dr->dr_iacts & DRA_MODE) 612 de->de_mode = dr->dr_mode; 613 if (dr->dr_iacts & DRA_INCSET) { 614 /* 615 * XXX: we should tell the user if the depth is exceeded here 616 * XXX: but it is not obvious how to. A return value will 617 * XXX: not work as this is called when devices are created 618 * XXX: long time after the rules were instantiated. 619 * XXX: a printf() would probably give too much noise, or 620 * XXX: DoS the machine. I guess a rate-limited message 621 * XXX: might work. 622 */ 623 if (depth > 0) { 624 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset); 625 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset")); 626 devfs_ruleset_applyde(ds, de, depth - 1); 627 } 628 } 629 } 630 631 /* 632 * Apply all the rules in ds to de. 633 */ 634 static void 635 devfs_ruleset_applyde(struct devfs_ruleset *ds, struct devfs_dirent *de, unsigned depth) 636 { 637 struct devfs_krule *dk; 638 639 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) 640 devfs_rule_run(dk, de, depth); 641 } 642 643 /* 644 * Apply all the rules in ds to all the entires in dm. 645 */ 646 static void 647 devfs_ruleset_applydm(struct devfs_ruleset *ds, struct devfs_mount *dm) 648 { 649 struct devfs_krule *dk; 650 651 /* 652 * XXX: Does it matter whether we do 653 * 654 * foreach(dk in ds) 655 * foreach(de in dm) 656 * apply(dk to de) 657 * 658 * as opposed to 659 * 660 * foreach(de in dm) 661 * foreach(dk in ds) 662 * apply(dk to de) 663 * 664 * The end result is obviously the same, but does the order 665 * matter? 666 */ 667 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) 668 devfs_rule_applydm(dk, dm); 669 } 670 671 /* 672 * Find a ruleset by number. 673 */ 674 static struct devfs_ruleset * 675 devfs_ruleset_bynum(devfs_rsnum rsnum) 676 { 677 struct devfs_ruleset *ds; 678 679 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) { 680 if (ds->ds_number == rsnum) 681 return (ds); 682 } 683 return (NULL); 684 } 685 686 /* 687 * Create a new ruleset. 688 */ 689 static struct devfs_ruleset * 690 devfs_ruleset_create(devfs_rsnum rsnum) 691 { 692 struct devfs_ruleset *s1; 693 struct devfs_ruleset *ds; 694 695 KASSERT(rsnum != 0, ("creating ruleset zero")); 696 697 KASSERT(devfs_ruleset_bynum(rsnum) == NULL, 698 ("creating already existent ruleset %d", rsnum)); 699 700 ds = malloc(sizeof(*ds), M_DEVFSRULE, M_WAITOK | M_ZERO); 701 ds->ds_number = rsnum; 702 TAILQ_INIT(&ds->ds_rules); 703 704 TAILQ_FOREACH(s1, &devfs_rulesets, ds_list) { 705 if (s1->ds_number > rsnum) { 706 TAILQ_INSERT_BEFORE(s1, ds, ds_list); 707 break; 708 } 709 } 710 if (s1 == NULL) 711 TAILQ_INSERT_TAIL(&devfs_rulesets, ds, ds_list); 712 return (ds); 713 } 714 715 /* 716 * Remove a ruleset from the system if it's empty and not used 717 * anywhere. This should be called after every time a rule is deleted 718 * from this ruleset or the reference count is decremented. 719 */ 720 static void 721 devfs_ruleset_reap(struct devfs_ruleset *ds) 722 { 723 724 KASSERT(ds->ds_number != 0, ("reaping ruleset zero ")); 725 726 if (!TAILQ_EMPTY(&ds->ds_rules) || ds->ds_refcount != 0) 727 return; 728 729 TAILQ_REMOVE(&devfs_rulesets, ds, ds_list); 730 free(ds, M_DEVFSRULE); 731 } 732 733 /* 734 * Make rsnum the active ruleset for dm. 735 */ 736 static int 737 devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm) 738 { 739 struct devfs_ruleset *cds, *ds; 740 741 if (dm->dm_ruleset != 0) { 742 cds = devfs_ruleset_bynum(dm->dm_ruleset); 743 --cds->ds_refcount; 744 devfs_ruleset_reap(cds); 745 } 746 747 if (rsnum == 0) { 748 dm->dm_ruleset = 0; 749 return (0); 750 } 751 752 ds = devfs_ruleset_bynum(rsnum); 753 if (ds == NULL) 754 ds = devfs_ruleset_create(rsnum); 755 /* These should probably be made atomic somehow. */ 756 ++ds->ds_refcount; 757 dm->dm_ruleset = rsnum; 758 759 return (0); 760 } 761 762 void 763 devfs_rules_cleanup(struct devfs_mount *dm) 764 { 765 struct devfs_ruleset *ds; 766 767 sx_assert(&dm->dm_lock, SX_XLOCKED); 768 if (dm->dm_ruleset != 0) { 769 ds = devfs_ruleset_bynum(dm->dm_ruleset); 770 --ds->ds_refcount; 771 devfs_ruleset_reap(ds); 772 } 773 } 774