1 /*- 2 * Copyright (c) 1999-2002, 2006, 2009 Robert N. M. Watson 3 * Copyright (c) 2001 Ilmar S. Habibulin 4 * Copyright (c) 2001-2005 Networks Associates Technology, Inc. 5 * Copyright (c) 2005-2006 SPARTA, Inc. 6 * Copyright (c) 2008-2009 Apple Inc. 7 * All rights reserved. 8 * 9 * This software was developed by Robert Watson and Ilmar Habibulin for the 10 * TrustedBSD Project. 11 * 12 * This software was developed for the FreeBSD Project in part by Network 13 * Associates Laboratories, the Security Research Division of Network 14 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), 15 * as part of the DARPA CHATS research program. 16 * 17 * This software was enhanced by SPARTA ISSO under SPAWAR contract 18 * N66001-04-C-6019 ("SEFOS"). 19 * 20 * This software was developed at the University of Cambridge Computer 21 * Laboratory with support from a grant from Google, Inc. 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 1. Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * 2. Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in the 30 * documentation and/or other materials provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 */ 44 45 /*- 46 * Framework for extensible kernel access control. This file contains core 47 * kernel infrastructure for the TrustedBSD MAC Framework, including policy 48 * registration, versioning, locking, error composition operator, and system 49 * calls. 50 * 51 * The MAC Framework implements three programming interfaces: 52 * 53 * - The kernel MAC interface, defined in mac_framework.h, and invoked 54 * throughout the kernel to request security decisions, notify of security 55 * related events, etc. 56 * 57 * - The MAC policy module interface, defined in mac_policy.h, which is 58 * implemented by MAC policy modules and invoked by the MAC Framework to 59 * forward kernel security requests and notifications to policy modules. 60 * 61 * - The user MAC API, defined in mac.h, which allows user programs to query 62 * and set label state on objects. 63 * 64 * The majority of the MAC Framework implementation may be found in 65 * src/sys/security/mac. Sample policy modules may be found in 66 * src/sys/security/mac_*. 67 */ 68 69 #include "opt_mac.h" 70 71 #include <sys/cdefs.h> 72 __FBSDID("$FreeBSD$"); 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/condvar.h> 77 #include <sys/kernel.h> 78 #include <sys/lock.h> 79 #include <sys/mac.h> 80 #include <sys/module.h> 81 #include <sys/rmlock.h> 82 #include <sys/sdt.h> 83 #include <sys/sx.h> 84 #include <sys/sysctl.h> 85 #include <sys/vnode.h> 86 87 #include <security/mac/mac_framework.h> 88 #include <security/mac/mac_internal.h> 89 #include <security/mac/mac_policy.h> 90 91 /* 92 * DTrace SDT providers for MAC. 93 */ 94 SDT_PROVIDER_DEFINE(mac); 95 SDT_PROVIDER_DEFINE(mac_framework); 96 97 SDT_PROBE_DEFINE2(mac, , policy, modevent, "int", 98 "struct mac_policy_conf *"); 99 SDT_PROBE_DEFINE1(mac, , policy, register, 100 "struct mac_policy_conf *"); 101 SDT_PROBE_DEFINE1(mac, , policy, unregister, 102 "struct mac_policy_conf *"); 103 104 /* 105 * Root sysctl node for all MAC and MAC policy controls. 106 */ 107 SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 108 "TrustedBSD MAC policy controls"); 109 110 /* 111 * Declare that the kernel provides MAC support, version 3 (FreeBSD 7.x). 112 * This permits modules to refuse to be loaded if the necessary support isn't 113 * present, even if it's pre-boot. 114 */ 115 MODULE_VERSION(kernel_mac_support, MAC_VERSION); 116 117 static unsigned int mac_version = MAC_VERSION; 118 SYSCTL_UINT(_security_mac, OID_AUTO, version, CTLFLAG_RD, &mac_version, 0, 119 ""); 120 121 /* 122 * Flags for inlined checks. Note this would be best hotpatched at runtime. 123 * The following is a band-aid. 124 * 125 * Use FPFLAG for hooks running in commonly executed paths and FPFLAG_RARE 126 * for the rest. 127 */ 128 #define FPFLAG(f) \ 129 bool __read_frequently mac_##f##_fp_flag 130 131 #define FPFLAG_RARE(f) \ 132 bool __read_mostly mac_##f##_fp_flag 133 134 FPFLAG(priv_check); 135 FPFLAG(priv_grant); 136 FPFLAG(vnode_check_lookup); 137 FPFLAG(vnode_check_open); 138 FPFLAG(vnode_check_stat); 139 FPFLAG(vnode_check_read); 140 FPFLAG(vnode_check_write); 141 FPFLAG(vnode_check_mmap); 142 FPFLAG_RARE(vnode_check_poll); 143 FPFLAG_RARE(vnode_check_rename_from); 144 FPFLAG_RARE(vnode_check_access); 145 FPFLAG_RARE(pipe_check_stat); 146 FPFLAG_RARE(pipe_check_poll); 147 148 #undef FPFLAG 149 #undef FPFLAG_RARE 150 151 /* 152 * Labels consist of a indexed set of "slots", which are allocated policies 153 * as required. The MAC Framework maintains a bitmask of slots allocated so 154 * far to prevent reuse. Slots cannot be reused, as the MAC Framework 155 * guarantees that newly allocated slots in labels will be NULL unless 156 * otherwise initialized, and because we do not have a mechanism to garbage 157 * collect slots on policy unload. As labeled policies tend to be statically 158 * loaded during boot, and not frequently unloaded and reloaded, this is not 159 * generally an issue. 160 */ 161 #if MAC_MAX_SLOTS > 32 162 #error "MAC_MAX_SLOTS too large" 163 #endif 164 165 static unsigned int mac_max_slots = MAC_MAX_SLOTS; 166 static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1; 167 SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, &mac_max_slots, 168 0, ""); 169 170 /* 171 * Has the kernel started generating labeled objects yet? All read/write 172 * access to this variable is serialized during the boot process. Following 173 * the end of serialization, we don't update this flag; no locking. 174 */ 175 static int mac_late = 0; 176 177 /* 178 * Each policy declares a mask of object types requiring labels to be 179 * allocated for them. For convenience, we combine and cache the bitwise or 180 * of the per-policy object flags to track whether we will allocate a label 181 * for an object type at run-time. 182 */ 183 uint64_t mac_labeled; 184 SYSCTL_UQUAD(_security_mac, OID_AUTO, labeled, CTLFLAG_RD, &mac_labeled, 0, 185 "Mask of object types being labeled"); 186 187 MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage"); 188 189 /* 190 * MAC policy modules are placed in one of two lists: mac_static_policy_list, 191 * for policies that are loaded early and cannot be unloaded, and 192 * mac_policy_list, which holds policies either loaded later in the boot 193 * cycle or that may be unloaded. The static policy list does not require 194 * locks to iterate over, but the dynamic list requires synchronization. 195 * Support for dynamic policy loading can be compiled out using the 196 * MAC_STATIC kernel option. 197 * 198 * The dynamic policy list is protected by two locks: modifying the list 199 * requires both locks to be held exclusively. One of the locks, 200 * mac_policy_rm, is acquired over policy entry points that will never sleep; 201 * the other, mac_policy_rms, is acquired over policy entry points that may 202 * sleep. The former category will be used when kernel locks may be held 203 * over calls to the MAC Framework, during network processing in ithreads, 204 * etc. The latter will tend to involve potentially blocking memory 205 * allocations, extended attribute I/O, etc. 206 */ 207 #ifndef MAC_STATIC 208 static struct rmlock mac_policy_rm; /* Non-sleeping entry points. */ 209 static struct rmslock mac_policy_rms; /* Sleeping entry points. */ 210 #endif 211 212 struct mac_policy_list_head mac_policy_list; 213 struct mac_policy_list_head mac_static_policy_list; 214 u_int mac_policy_count; /* Registered policy count. */ 215 216 static void mac_policy_xlock(void); 217 static void mac_policy_xlock_assert(void); 218 static void mac_policy_xunlock(void); 219 220 void 221 mac_policy_slock_nosleep(struct rm_priotracker *tracker) 222 { 223 224 #ifndef MAC_STATIC 225 if (!mac_late) 226 return; 227 228 rm_rlock(&mac_policy_rm, tracker); 229 #endif 230 } 231 232 void 233 mac_policy_slock_sleep(void) 234 { 235 236 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 237 "mac_policy_slock_sleep"); 238 239 #ifndef MAC_STATIC 240 if (!mac_late) 241 return; 242 243 rms_rlock(&mac_policy_rms); 244 #endif 245 } 246 247 void 248 mac_policy_sunlock_nosleep(struct rm_priotracker *tracker) 249 { 250 251 #ifndef MAC_STATIC 252 if (!mac_late) 253 return; 254 255 rm_runlock(&mac_policy_rm, tracker); 256 #endif 257 } 258 259 void 260 mac_policy_sunlock_sleep(void) 261 { 262 263 #ifndef MAC_STATIC 264 if (!mac_late) 265 return; 266 267 rms_runlock(&mac_policy_rms); 268 #endif 269 } 270 271 static void 272 mac_policy_xlock(void) 273 { 274 275 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 276 "mac_policy_xlock()"); 277 278 #ifndef MAC_STATIC 279 if (!mac_late) 280 return; 281 282 rms_wlock(&mac_policy_rms); 283 rm_wlock(&mac_policy_rm); 284 #endif 285 } 286 287 static void 288 mac_policy_xunlock(void) 289 { 290 291 #ifndef MAC_STATIC 292 if (!mac_late) 293 return; 294 295 rm_wunlock(&mac_policy_rm); 296 rms_wunlock(&mac_policy_rms); 297 #endif 298 } 299 300 static void 301 mac_policy_xlock_assert(void) 302 { 303 304 #ifndef MAC_STATIC 305 if (!mac_late) 306 return; 307 308 rm_assert(&mac_policy_rm, RA_WLOCKED); 309 #endif 310 } 311 312 /* 313 * Initialize the MAC subsystem, including appropriate SMP locks. 314 */ 315 static void 316 mac_init(void) 317 { 318 319 LIST_INIT(&mac_static_policy_list); 320 LIST_INIT(&mac_policy_list); 321 mac_labelzone_init(); 322 323 #ifndef MAC_STATIC 324 rm_init_flags(&mac_policy_rm, "mac_policy_rm", RM_NOWITNESS | 325 RM_RECURSE); 326 rms_init(&mac_policy_rms, "mac_policy_rms"); 327 #endif 328 } 329 330 /* 331 * For the purposes of modules that want to know if they were loaded "early", 332 * set the mac_late flag once we've processed modules either linked into the 333 * kernel, or loaded before the kernel startup. 334 */ 335 static void 336 mac_late_init(void) 337 { 338 339 mac_late = 1; 340 } 341 342 /* 343 * Given a policy, derive from its set of non-NULL label init methods what 344 * object types the policy is interested in. 345 */ 346 static uint64_t 347 mac_policy_getlabeled(struct mac_policy_conf *mpc) 348 { 349 uint64_t labeled; 350 351 #define MPC_FLAG(method, flag) \ 352 if (mpc->mpc_ops->mpo_ ## method != NULL) \ 353 labeled |= (flag); \ 354 355 labeled = 0; 356 MPC_FLAG(cred_init_label, MPC_OBJECT_CRED); 357 MPC_FLAG(proc_init_label, MPC_OBJECT_PROC); 358 MPC_FLAG(vnode_init_label, MPC_OBJECT_VNODE); 359 MPC_FLAG(inpcb_init_label, MPC_OBJECT_INPCB); 360 MPC_FLAG(socket_init_label, MPC_OBJECT_SOCKET); 361 MPC_FLAG(devfs_init_label, MPC_OBJECT_DEVFS); 362 MPC_FLAG(mbuf_init_label, MPC_OBJECT_MBUF); 363 MPC_FLAG(ipq_init_label, MPC_OBJECT_IPQ); 364 MPC_FLAG(ifnet_init_label, MPC_OBJECT_IFNET); 365 MPC_FLAG(bpfdesc_init_label, MPC_OBJECT_BPFDESC); 366 MPC_FLAG(pipe_init_label, MPC_OBJECT_PIPE); 367 MPC_FLAG(mount_init_label, MPC_OBJECT_MOUNT); 368 MPC_FLAG(posixsem_init_label, MPC_OBJECT_POSIXSEM); 369 MPC_FLAG(posixshm_init_label, MPC_OBJECT_POSIXSHM); 370 MPC_FLAG(sysvmsg_init_label, MPC_OBJECT_SYSVMSG); 371 MPC_FLAG(sysvmsq_init_label, MPC_OBJECT_SYSVMSQ); 372 MPC_FLAG(sysvsem_init_label, MPC_OBJECT_SYSVSEM); 373 MPC_FLAG(sysvshm_init_label, MPC_OBJECT_SYSVSHM); 374 MPC_FLAG(syncache_init_label, MPC_OBJECT_SYNCACHE); 375 MPC_FLAG(ip6q_init_label, MPC_OBJECT_IP6Q); 376 377 #undef MPC_FLAG 378 return (labeled); 379 } 380 381 /* 382 * When policies are loaded or unloaded, walk the list of registered policies 383 * and built mac_labeled, a bitmask representing the union of all objects 384 * requiring labels across all policies. 385 */ 386 static void 387 mac_policy_update(void) 388 { 389 struct mac_policy_conf *mpc; 390 391 mac_policy_xlock_assert(); 392 393 mac_labeled = 0; 394 mac_policy_count = 0; 395 LIST_FOREACH(mpc, &mac_static_policy_list, mpc_list) { 396 mac_labeled |= mac_policy_getlabeled(mpc); 397 mac_policy_count++; 398 } 399 LIST_FOREACH(mpc, &mac_policy_list, mpc_list) { 400 mac_labeled |= mac_policy_getlabeled(mpc); 401 mac_policy_count++; 402 } 403 404 cache_fast_lookup_enabled_recalc(); 405 } 406 407 /* 408 * There are frequently used code paths which check for rarely installed 409 * policies. Gross hack below enables doing it in a cheap manner. 410 */ 411 412 #define FPO(f) (offsetof(struct mac_policy_ops, mpo_##f) / sizeof(uintptr_t)) 413 414 struct mac_policy_fastpath_elem { 415 int count; 416 bool *flag; 417 size_t offset; 418 }; 419 420 struct mac_policy_fastpath_elem mac_policy_fastpath_array[] = { 421 { .offset = FPO(priv_check), .flag = &mac_priv_check_fp_flag }, 422 { .offset = FPO(priv_grant), .flag = &mac_priv_grant_fp_flag }, 423 { .offset = FPO(vnode_check_lookup), 424 .flag = &mac_vnode_check_lookup_fp_flag }, 425 { .offset = FPO(vnode_check_open), 426 .flag = &mac_vnode_check_open_fp_flag }, 427 { .offset = FPO(vnode_check_stat), 428 .flag = &mac_vnode_check_stat_fp_flag }, 429 { .offset = FPO(vnode_check_read), 430 .flag = &mac_vnode_check_read_fp_flag }, 431 { .offset = FPO(vnode_check_write), 432 .flag = &mac_vnode_check_write_fp_flag }, 433 { .offset = FPO(vnode_check_mmap), 434 .flag = &mac_vnode_check_mmap_fp_flag }, 435 { .offset = FPO(vnode_check_poll), 436 .flag = &mac_vnode_check_poll_fp_flag }, 437 { .offset = FPO(vnode_check_rename_from), 438 .flag = &mac_vnode_check_rename_from_fp_flag }, 439 { .offset = FPO(vnode_check_access), 440 .flag = &mac_vnode_check_access_fp_flag }, 441 { .offset = FPO(pipe_check_stat), 442 .flag = &mac_pipe_check_stat_fp_flag }, 443 { .offset = FPO(pipe_check_poll), 444 .flag = &mac_pipe_check_poll_fp_flag }, 445 }; 446 447 static void 448 mac_policy_fastpath_enable(struct mac_policy_fastpath_elem *mpfe) 449 { 450 451 MPASS(mpfe->count >= 0); 452 mpfe->count++; 453 if (mpfe->count == 1) { 454 MPASS(*mpfe->flag == false); 455 *mpfe->flag = true; 456 } 457 } 458 459 static void 460 mac_policy_fastpath_disable(struct mac_policy_fastpath_elem *mpfe) 461 { 462 463 MPASS(mpfe->count >= 1); 464 mpfe->count--; 465 if (mpfe->count == 0) { 466 MPASS(*mpfe->flag == true); 467 *mpfe->flag = false; 468 } 469 } 470 471 static void 472 mac_policy_fastpath_register(struct mac_policy_conf *mpc) 473 { 474 struct mac_policy_fastpath_elem *mpfe; 475 uintptr_t **ops; 476 int i; 477 478 mac_policy_xlock_assert(); 479 480 ops = (uintptr_t **)mpc->mpc_ops; 481 for (i = 0; i < nitems(mac_policy_fastpath_array); i++) { 482 mpfe = &mac_policy_fastpath_array[i]; 483 if (ops[mpfe->offset] != NULL) 484 mac_policy_fastpath_enable(mpfe); 485 } 486 } 487 488 static void 489 mac_policy_fastpath_unregister(struct mac_policy_conf *mpc) 490 { 491 struct mac_policy_fastpath_elem *mpfe; 492 uintptr_t **ops; 493 int i; 494 495 mac_policy_xlock_assert(); 496 497 ops = (uintptr_t **)mpc->mpc_ops; 498 for (i = 0; i < nitems(mac_policy_fastpath_array); i++) { 499 mpfe = &mac_policy_fastpath_array[i]; 500 if (ops[mpfe->offset] != NULL) 501 mac_policy_fastpath_disable(mpfe); 502 } 503 } 504 505 #undef FPO 506 507 static int 508 mac_policy_register(struct mac_policy_conf *mpc) 509 { 510 struct mac_policy_conf *tmpc; 511 int error, slot, static_entry; 512 513 error = 0; 514 515 /* 516 * We don't technically need exclusive access while !mac_late, but 517 * hold it for assertion consistency. 518 */ 519 mac_policy_xlock(); 520 521 /* 522 * If the module can potentially be unloaded, or we're loading late, 523 * we have to stick it in the non-static list and pay an extra 524 * performance overhead. Otherwise, we can pay a light locking cost 525 * and stick it in the static list. 526 */ 527 static_entry = (!mac_late && 528 !(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK)); 529 530 if (static_entry) { 531 LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) { 532 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) { 533 error = EEXIST; 534 goto out; 535 } 536 } 537 } else { 538 LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) { 539 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) { 540 error = EEXIST; 541 goto out; 542 } 543 } 544 } 545 if (mpc->mpc_field_off != NULL) { 546 slot = ffs(mac_slot_offsets_free); 547 if (slot == 0) { 548 error = ENOMEM; 549 goto out; 550 } 551 slot--; 552 mac_slot_offsets_free &= ~(1 << slot); 553 *mpc->mpc_field_off = slot; 554 } 555 mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED; 556 557 /* 558 * If we're loading a MAC module after the framework has initialized, 559 * it has to go into the dynamic list. If we're loading it before 560 * we've finished initializing, it can go into the static list with 561 * weaker locker requirements. 562 */ 563 if (static_entry) 564 LIST_INSERT_HEAD(&mac_static_policy_list, mpc, mpc_list); 565 else 566 LIST_INSERT_HEAD(&mac_policy_list, mpc, mpc_list); 567 568 /* 569 * Per-policy initialization. Currently, this takes place under the 570 * exclusive lock, so policies must not sleep in their init method. 571 * In the future, we may want to separate "init" from "start", with 572 * "init" occurring without the lock held. Likewise, on tear-down, 573 * breaking out "stop" from "destroy". 574 */ 575 if (mpc->mpc_ops->mpo_init != NULL) 576 (*(mpc->mpc_ops->mpo_init))(mpc); 577 578 mac_policy_fastpath_register(mpc); 579 580 mac_policy_update(); 581 582 SDT_PROBE1(mac, , policy, register, mpc); 583 printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname, 584 mpc->mpc_name); 585 586 out: 587 mac_policy_xunlock(); 588 return (error); 589 } 590 591 static int 592 mac_policy_unregister(struct mac_policy_conf *mpc) 593 { 594 595 /* 596 * If we fail the load, we may get a request to unload. Check to see 597 * if we did the run-time registration, and if not, silently succeed. 598 */ 599 mac_policy_xlock(); 600 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) { 601 mac_policy_xunlock(); 602 return (0); 603 } 604 #if 0 605 /* 606 * Don't allow unloading modules with private data. 607 */ 608 if (mpc->mpc_field_off != NULL) { 609 mac_policy_xunlock(); 610 return (EBUSY); 611 } 612 #endif 613 /* 614 * Only allow the unload to proceed if the module is unloadable by 615 * its own definition. 616 */ 617 if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) { 618 mac_policy_xunlock(); 619 return (EBUSY); 620 } 621 622 mac_policy_fastpath_unregister(mpc); 623 624 if (mpc->mpc_ops->mpo_destroy != NULL) 625 (*(mpc->mpc_ops->mpo_destroy))(mpc); 626 627 LIST_REMOVE(mpc, mpc_list); 628 mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED; 629 mac_policy_update(); 630 mac_policy_xunlock(); 631 632 SDT_PROBE1(mac, , policy, unregister, mpc); 633 printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname, 634 mpc->mpc_name); 635 636 return (0); 637 } 638 639 /* 640 * Allow MAC policy modules to register during boot, etc. 641 */ 642 int 643 mac_policy_modevent(module_t mod, int type, void *data) 644 { 645 struct mac_policy_conf *mpc; 646 int error; 647 648 error = 0; 649 mpc = (struct mac_policy_conf *) data; 650 651 #ifdef MAC_STATIC 652 if (mac_late) { 653 printf("mac_policy_modevent: MAC_STATIC and late\n"); 654 return (EBUSY); 655 } 656 #endif 657 658 SDT_PROBE2(mac, , policy, modevent, type, mpc); 659 switch (type) { 660 case MOD_LOAD: 661 if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE && 662 mac_late) { 663 printf("mac_policy_modevent: can't load %s policy " 664 "after booting\n", mpc->mpc_name); 665 error = EBUSY; 666 break; 667 } 668 error = mac_policy_register(mpc); 669 break; 670 case MOD_UNLOAD: 671 /* Don't unregister the module if it was never registered. */ 672 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) 673 != 0) 674 error = mac_policy_unregister(mpc); 675 else 676 error = 0; 677 break; 678 default: 679 error = EOPNOTSUPP; 680 break; 681 } 682 683 return (error); 684 } 685 686 /* 687 * Define an error value precedence, and given two arguments, selects the 688 * value with the higher precedence. 689 */ 690 int 691 mac_error_select(int error1, int error2) 692 { 693 694 /* Certain decision-making errors take top priority. */ 695 if (error1 == EDEADLK || error2 == EDEADLK) 696 return (EDEADLK); 697 698 /* Invalid arguments should be reported where possible. */ 699 if (error1 == EINVAL || error2 == EINVAL) 700 return (EINVAL); 701 702 /* Precedence goes to "visibility", with both process and file. */ 703 if (error1 == ESRCH || error2 == ESRCH) 704 return (ESRCH); 705 706 if (error1 == ENOENT || error2 == ENOENT) 707 return (ENOENT); 708 709 /* Precedence goes to DAC/MAC protections. */ 710 if (error1 == EACCES || error2 == EACCES) 711 return (EACCES); 712 713 /* Precedence goes to privilege. */ 714 if (error1 == EPERM || error2 == EPERM) 715 return (EPERM); 716 717 /* Precedence goes to error over success; otherwise, arbitrary. */ 718 if (error1 != 0) 719 return (error1); 720 return (error2); 721 } 722 723 int 724 mac_check_structmac_consistent(struct mac *mac) 725 { 726 727 /* Require that labels have a non-zero length. */ 728 if (mac->m_buflen > MAC_MAX_LABEL_BUF_LEN || 729 mac->m_buflen <= sizeof("")) 730 return (EINVAL); 731 732 return (0); 733 } 734 735 SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL); 736 SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL); 737