1 /*- 2 * Copyright (c) 1999-2002 Robert N. M. Watson 3 * Copyright (c) 2001 Ilmar S. Habibulin 4 * Copyright (c) 2001-2005 Networks Associates Technology, Inc. 5 * All rights reserved. 6 * 7 * This software was developed by Robert Watson and Ilmar Habibulin for the 8 * TrustedBSD Project. 9 * 10 * This software was developed for the FreeBSD Project in part by Network 11 * Associates Laboratories, the Security Research Division of Network 12 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), 13 * as part of the DARPA CHATS research program. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /*- 38 * Framework for extensible kernel access control. This file contains 39 * Kernel and userland interface to the framework, policy registration 40 * and composition. Per-object interfaces, controls, and labeling may be 41 * found in src/sys/mac/. Sample policies may be found in src/sys/mac*. 42 */ 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include "opt_mac.h" 48 #include "opt_devfs.h" 49 50 #include <sys/param.h> 51 #include <sys/condvar.h> 52 #include <sys/extattr.h> 53 #include <sys/imgact.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mutex.h> 58 #include <sys/mac.h> 59 #include <sys/module.h> 60 #include <sys/proc.h> 61 #include <sys/sbuf.h> 62 #include <sys/systm.h> 63 #include <sys/sysproto.h> 64 #include <sys/sysent.h> 65 #include <sys/vnode.h> 66 #include <sys/mount.h> 67 #include <sys/file.h> 68 #include <sys/namei.h> 69 #include <sys/socket.h> 70 #include <sys/pipe.h> 71 #include <sys/socketvar.h> 72 #include <sys/sysctl.h> 73 74 #include <vm/vm.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_object.h> 78 79 #include <sys/mac_policy.h> 80 81 #include <fs/devfs/devfs.h> 82 83 #include <net/bpfdesc.h> 84 #include <net/if.h> 85 #include <net/if_var.h> 86 87 #include <netinet/in.h> 88 #include <netinet/ip_var.h> 89 90 #include <security/mac/mac_internal.h> 91 92 #ifdef MAC 93 94 /* 95 * Declare that the kernel provides MAC support, version 1. This permits 96 * modules to refuse to be loaded if the necessary support isn't present, 97 * even if it's pre-boot. 98 */ 99 MODULE_VERSION(kernel_mac_support, 2); 100 101 SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW, 0, 102 "TrustedBSD MAC policy controls"); 103 104 #if MAC_MAX_SLOTS > 32 105 #error "MAC_MAX_SLOTS too large" 106 #endif 107 108 static unsigned int mac_max_slots = MAC_MAX_SLOTS; 109 static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1; 110 SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, 111 &mac_max_slots, 0, ""); 112 113 /* 114 * Has the kernel started generating labeled objects yet? All read/write 115 * access to this variable is serialized during the boot process. Following 116 * the end of serialization, we don't update this flag; no locking. 117 */ 118 int mac_late = 0; 119 120 /* 121 * Flag to indicate whether or not we should allocate label storage for 122 * new mbufs. Since most dynamic policies we currently work with don't 123 * rely on mbuf labeling, try to avoid paying the cost of mtag allocation 124 * unless specifically notified of interest. One result of this is 125 * that if a dynamically loaded policy requests mbuf labels, it must 126 * be able to deal with a NULL label being returned on any mbufs that 127 * were already in flight when the policy was loaded. Since the policy 128 * already has to deal with uninitialized labels, this probably won't 129 * be a problem. Note: currently no locking. Will this be a problem? 130 */ 131 #ifndef MAC_ALWAYS_LABEL_MBUF 132 int mac_labelmbufs = 0; 133 #endif 134 135 #ifdef MAC_DEBUG 136 SYSCTL_NODE(_security_mac, OID_AUTO, debug, CTLFLAG_RW, 0, 137 "TrustedBSD MAC debug info"); 138 SYSCTL_NODE(_security_mac_debug, OID_AUTO, counters, CTLFLAG_RW, 0, 139 "TrustedBSD MAC object counters"); 140 141 static unsigned int nmactemp; 142 SYSCTL_UINT(_security_mac_debug_counters, OID_AUTO, temp, CTLFLAG_RD, 143 &nmactemp, 0, "number of temporary labels in use"); 144 #endif 145 146 static int mac_policy_register(struct mac_policy_conf *mpc); 147 static int mac_policy_unregister(struct mac_policy_conf *mpc); 148 149 MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage"); 150 151 /* 152 * mac_static_policy_list holds a list of policy modules that are not 153 * loaded while the system is "live", and cannot be unloaded. These 154 * policies can be invoked without holding the busy count. 155 * 156 * mac_policy_list stores the list of dynamic policies. A busy count is 157 * maintained for the list, stored in mac_policy_busy. The busy count 158 * is protected by mac_policy_mtx; the list may be modified only 159 * while the busy count is 0, requiring that the lock be held to 160 * prevent new references to the list from being acquired. For almost 161 * all operations, incrementing the busy count is sufficient to 162 * guarantee consistency, as the list cannot be modified while the 163 * busy count is elevated. For a few special operations involving a 164 * change to the list of active policies, the mtx itself must be held. 165 * A condition variable, mac_policy_cv, is used to signal potential 166 * exclusive consumers that they should try to acquire the lock if a 167 * first attempt at exclusive access fails. 168 */ 169 #ifndef MAC_STATIC 170 static struct mtx mac_policy_mtx; 171 static struct cv mac_policy_cv; 172 static int mac_policy_count; 173 #endif 174 struct mac_policy_list_head mac_policy_list; 175 struct mac_policy_list_head mac_static_policy_list; 176 177 /* 178 * We manually invoke WITNESS_WARN() to allow Witness to generate 179 * warnings even if we don't end up ever triggering the wait at 180 * run-time. The consumer of the exclusive interface must not hold 181 * any locks (other than potentially Giant) since we may sleep for 182 * long (potentially indefinite) periods of time waiting for the 183 * framework to become quiescent so that a policy list change may 184 * be made. 185 */ 186 void 187 mac_policy_grab_exclusive(void) 188 { 189 190 #ifndef MAC_STATIC 191 if (!mac_late) 192 return; 193 194 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 195 "mac_policy_grab_exclusive() at %s:%d", __FILE__, __LINE__); 196 mtx_lock(&mac_policy_mtx); 197 while (mac_policy_count != 0) 198 cv_wait(&mac_policy_cv, &mac_policy_mtx); 199 #endif 200 } 201 202 void 203 mac_policy_assert_exclusive(void) 204 { 205 206 #ifndef MAC_STATIC 207 if (!mac_late) 208 return; 209 210 mtx_assert(&mac_policy_mtx, MA_OWNED); 211 KASSERT(mac_policy_count == 0, 212 ("mac_policy_assert_exclusive(): not exclusive")); 213 #endif 214 } 215 216 void 217 mac_policy_release_exclusive(void) 218 { 219 220 #ifndef MAC_STATIC 221 if (!mac_late) 222 return; 223 224 KASSERT(mac_policy_count == 0, 225 ("mac_policy_release_exclusive(): not exclusive")); 226 mtx_unlock(&mac_policy_mtx); 227 cv_signal(&mac_policy_cv); 228 #endif 229 } 230 231 void 232 mac_policy_list_busy(void) 233 { 234 235 #ifndef MAC_STATIC 236 if (!mac_late) 237 return; 238 239 mtx_lock(&mac_policy_mtx); 240 mac_policy_count++; 241 mtx_unlock(&mac_policy_mtx); 242 #endif 243 } 244 245 int 246 mac_policy_list_conditional_busy(void) 247 { 248 #ifndef MAC_STATIC 249 int ret; 250 251 if (!mac_late) 252 return (1); 253 254 mtx_lock(&mac_policy_mtx); 255 if (!LIST_EMPTY(&mac_policy_list)) { 256 mac_policy_count++; 257 ret = 1; 258 } else 259 ret = 0; 260 mtx_unlock(&mac_policy_mtx); 261 return (ret); 262 #else 263 if (!mac_late) 264 return (1); 265 266 return (1); 267 #endif 268 } 269 270 void 271 mac_policy_list_unbusy(void) 272 { 273 274 #ifndef MAC_STATIC 275 if (!mac_late) 276 return; 277 278 mtx_lock(&mac_policy_mtx); 279 mac_policy_count--; 280 KASSERT(mac_policy_count >= 0, ("MAC_POLICY_LIST_LOCK")); 281 if (mac_policy_count == 0) 282 cv_signal(&mac_policy_cv); 283 mtx_unlock(&mac_policy_mtx); 284 #endif 285 } 286 287 /* 288 * Initialize the MAC subsystem, including appropriate SMP locks. 289 */ 290 static void 291 mac_init(void) 292 { 293 294 LIST_INIT(&mac_static_policy_list); 295 LIST_INIT(&mac_policy_list); 296 mac_labelzone_init(); 297 298 #ifndef MAC_STATIC 299 mtx_init(&mac_policy_mtx, "mac_policy_mtx", NULL, MTX_DEF); 300 cv_init(&mac_policy_cv, "mac_policy_cv"); 301 #endif 302 } 303 304 /* 305 * For the purposes of modules that want to know if they were loaded 306 * "early", set the mac_late flag once we've processed modules either 307 * linked into the kernel, or loaded before the kernel startup. 308 */ 309 static void 310 mac_late_init(void) 311 { 312 313 mac_late = 1; 314 } 315 316 /* 317 * After the policy list has changed, walk the list to update any global 318 * flags. Currently, we support only one flag, and it's conditionally 319 * defined; as a result, the entire function is conditional. Eventually, 320 * the #else case might also iterate across the policies. 321 */ 322 static void 323 mac_policy_updateflags(void) 324 { 325 #ifndef MAC_ALWAYS_LABEL_MBUF 326 struct mac_policy_conf *tmpc; 327 int labelmbufs; 328 329 mac_policy_assert_exclusive(); 330 331 labelmbufs = 0; 332 LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) { 333 if (tmpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_LABELMBUFS) 334 labelmbufs++; 335 } 336 LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) { 337 if (tmpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_LABELMBUFS) 338 labelmbufs++; 339 } 340 mac_labelmbufs = (labelmbufs != 0); 341 #endif 342 } 343 344 /* 345 * Allow MAC policy modules to register during boot, etc. 346 */ 347 int 348 mac_policy_modevent(module_t mod, int type, void *data) 349 { 350 struct mac_policy_conf *mpc; 351 int error; 352 353 error = 0; 354 mpc = (struct mac_policy_conf *) data; 355 356 #ifdef MAC_STATIC 357 if (mac_late) { 358 printf("mac_policy_modevent: MAC_STATIC and late\n"); 359 return (EBUSY); 360 } 361 #endif 362 363 switch (type) { 364 case MOD_LOAD: 365 if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE && 366 mac_late) { 367 printf("mac_policy_modevent: can't load %s policy " 368 "after booting\n", mpc->mpc_name); 369 error = EBUSY; 370 break; 371 } 372 error = mac_policy_register(mpc); 373 break; 374 case MOD_UNLOAD: 375 /* Don't unregister the module if it was never registered. */ 376 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) 377 != 0) 378 error = mac_policy_unregister(mpc); 379 else 380 error = 0; 381 break; 382 default: 383 error = EOPNOTSUPP; 384 break; 385 } 386 387 return (error); 388 } 389 390 static int 391 mac_policy_register(struct mac_policy_conf *mpc) 392 { 393 struct mac_policy_conf *tmpc; 394 int error, slot, static_entry; 395 396 error = 0; 397 398 /* 399 * We don't technically need exclusive access while !mac_late, 400 * but hold it for assertion consistency. 401 */ 402 mac_policy_grab_exclusive(); 403 404 /* 405 * If the module can potentially be unloaded, or we're loading 406 * late, we have to stick it in the non-static list and pay 407 * an extra performance overhead. Otherwise, we can pay a 408 * light locking cost and stick it in the static list. 409 */ 410 static_entry = (!mac_late && 411 !(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK)); 412 413 if (static_entry) { 414 LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) { 415 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) { 416 error = EEXIST; 417 goto out; 418 } 419 } 420 } else { 421 LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) { 422 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) { 423 error = EEXIST; 424 goto out; 425 } 426 } 427 } 428 if (mpc->mpc_field_off != NULL) { 429 slot = ffs(mac_slot_offsets_free); 430 if (slot == 0) { 431 error = ENOMEM; 432 goto out; 433 } 434 slot--; 435 mac_slot_offsets_free &= ~(1 << slot); 436 *mpc->mpc_field_off = slot; 437 } 438 mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED; 439 440 /* 441 * If we're loading a MAC module after the framework has 442 * initialized, it has to go into the dynamic list. If 443 * we're loading it before we've finished initializing, 444 * it can go into the static list with weaker locker 445 * requirements. 446 */ 447 if (static_entry) 448 LIST_INSERT_HEAD(&mac_static_policy_list, mpc, mpc_list); 449 else 450 LIST_INSERT_HEAD(&mac_policy_list, mpc, mpc_list); 451 452 /* Per-policy initialization. */ 453 if (mpc->mpc_ops->mpo_init != NULL) 454 (*(mpc->mpc_ops->mpo_init))(mpc); 455 mac_policy_updateflags(); 456 457 printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname, 458 mpc->mpc_name); 459 460 out: 461 mac_policy_release_exclusive(); 462 return (error); 463 } 464 465 static int 466 mac_policy_unregister(struct mac_policy_conf *mpc) 467 { 468 469 /* 470 * If we fail the load, we may get a request to unload. Check 471 * to see if we did the run-time registration, and if not, 472 * silently succeed. 473 */ 474 mac_policy_grab_exclusive(); 475 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) { 476 mac_policy_release_exclusive(); 477 return (0); 478 } 479 #if 0 480 /* 481 * Don't allow unloading modules with private data. 482 */ 483 if (mpc->mpc_field_off != NULL) { 484 MAC_POLICY_LIST_UNLOCK(); 485 return (EBUSY); 486 } 487 #endif 488 /* 489 * Only allow the unload to proceed if the module is unloadable 490 * by its own definition. 491 */ 492 if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) { 493 mac_policy_release_exclusive(); 494 return (EBUSY); 495 } 496 if (mpc->mpc_ops->mpo_destroy != NULL) 497 (*(mpc->mpc_ops->mpo_destroy))(mpc); 498 499 LIST_REMOVE(mpc, mpc_list); 500 mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED; 501 mac_policy_updateflags(); 502 503 mac_policy_release_exclusive(); 504 505 printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname, 506 mpc->mpc_name); 507 508 return (0); 509 } 510 511 /* 512 * Define an error value precedence, and given two arguments, selects the 513 * value with the higher precedence. 514 */ 515 int 516 mac_error_select(int error1, int error2) 517 { 518 519 /* Certain decision-making errors take top priority. */ 520 if (error1 == EDEADLK || error2 == EDEADLK) 521 return (EDEADLK); 522 523 /* Invalid arguments should be reported where possible. */ 524 if (error1 == EINVAL || error2 == EINVAL) 525 return (EINVAL); 526 527 /* Precedence goes to "visibility", with both process and file. */ 528 if (error1 == ESRCH || error2 == ESRCH) 529 return (ESRCH); 530 531 if (error1 == ENOENT || error2 == ENOENT) 532 return (ENOENT); 533 534 /* Precedence goes to DAC/MAC protections. */ 535 if (error1 == EACCES || error2 == EACCES) 536 return (EACCES); 537 538 /* Precedence goes to privilege. */ 539 if (error1 == EPERM || error2 == EPERM) 540 return (EPERM); 541 542 /* Precedence goes to error over success; otherwise, arbitrary. */ 543 if (error1 != 0) 544 return (error1); 545 return (error2); 546 } 547 548 void 549 mac_init_label(struct label *label) 550 { 551 552 bzero(label, sizeof(*label)); 553 label->l_flags = MAC_FLAG_INITIALIZED; 554 } 555 556 void 557 mac_destroy_label(struct label *label) 558 { 559 560 KASSERT(label->l_flags & MAC_FLAG_INITIALIZED, 561 ("destroying uninitialized label")); 562 563 bzero(label, sizeof(*label)); 564 /* implicit: label->l_flags &= ~MAC_FLAG_INITIALIZED; */ 565 } 566 567 int 568 mac_check_structmac_consistent(struct mac *mac) 569 { 570 571 if (mac->m_buflen < 0 || 572 mac->m_buflen > MAC_MAX_LABEL_BUF_LEN) 573 return (EINVAL); 574 575 return (0); 576 } 577 578 /* 579 * MPSAFE 580 */ 581 int 582 __mac_get_pid(struct thread *td, struct __mac_get_pid_args *uap) 583 { 584 char *elements, *buffer; 585 struct mac mac; 586 struct proc *tproc; 587 struct ucred *tcred; 588 int error; 589 590 error = copyin(uap->mac_p, &mac, sizeof(mac)); 591 if (error) 592 return (error); 593 594 error = mac_check_structmac_consistent(&mac); 595 if (error) 596 return (error); 597 598 tproc = pfind(uap->pid); 599 if (tproc == NULL) 600 return (ESRCH); 601 602 tcred = NULL; /* Satisfy gcc. */ 603 error = p_cansee(td, tproc); 604 if (error == 0) 605 tcred = crhold(tproc->p_ucred); 606 PROC_UNLOCK(tproc); 607 if (error) 608 return (error); 609 610 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 611 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 612 if (error) { 613 free(elements, M_MACTEMP); 614 crfree(tcred); 615 return (error); 616 } 617 618 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 619 error = mac_externalize_cred_label(tcred->cr_label, elements, 620 buffer, mac.m_buflen); 621 if (error == 0) 622 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 623 624 free(buffer, M_MACTEMP); 625 free(elements, M_MACTEMP); 626 crfree(tcred); 627 return (error); 628 } 629 630 /* 631 * MPSAFE 632 */ 633 int 634 __mac_get_proc(struct thread *td, struct __mac_get_proc_args *uap) 635 { 636 char *elements, *buffer; 637 struct mac mac; 638 int error; 639 640 error = copyin(uap->mac_p, &mac, sizeof(mac)); 641 if (error) 642 return (error); 643 644 error = mac_check_structmac_consistent(&mac); 645 if (error) 646 return (error); 647 648 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 649 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 650 if (error) { 651 free(elements, M_MACTEMP); 652 return (error); 653 } 654 655 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 656 error = mac_externalize_cred_label(td->td_ucred->cr_label, 657 elements, buffer, mac.m_buflen); 658 if (error == 0) 659 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 660 661 free(buffer, M_MACTEMP); 662 free(elements, M_MACTEMP); 663 return (error); 664 } 665 666 /* 667 * MPSAFE 668 */ 669 int 670 __mac_set_proc(struct thread *td, struct __mac_set_proc_args *uap) 671 { 672 struct ucred *newcred, *oldcred; 673 struct label *intlabel; 674 struct proc *p; 675 struct mac mac; 676 char *buffer; 677 int error; 678 679 error = copyin(uap->mac_p, &mac, sizeof(mac)); 680 if (error) 681 return (error); 682 683 error = mac_check_structmac_consistent(&mac); 684 if (error) 685 return (error); 686 687 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 688 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 689 if (error) { 690 free(buffer, M_MACTEMP); 691 return (error); 692 } 693 694 intlabel = mac_cred_label_alloc(); 695 error = mac_internalize_cred_label(intlabel, buffer); 696 free(buffer, M_MACTEMP); 697 if (error) 698 goto out; 699 700 newcred = crget(); 701 702 p = td->td_proc; 703 PROC_LOCK(p); 704 oldcred = p->p_ucred; 705 706 error = mac_check_cred_relabel(oldcred, intlabel); 707 if (error) { 708 PROC_UNLOCK(p); 709 crfree(newcred); 710 goto out; 711 } 712 713 setsugid(p); 714 crcopy(newcred, oldcred); 715 mac_relabel_cred(newcred, intlabel); 716 p->p_ucred = newcred; 717 718 /* 719 * Grab additional reference for use while revoking mmaps, prior 720 * to releasing the proc lock and sharing the cred. 721 */ 722 crhold(newcred); 723 PROC_UNLOCK(p); 724 725 if (mac_enforce_vm) { 726 mtx_lock(&Giant); 727 mac_cred_mmapped_drop_perms(td, newcred); 728 mtx_unlock(&Giant); 729 } 730 731 crfree(newcred); /* Free revocation reference. */ 732 crfree(oldcred); 733 734 out: 735 mac_cred_label_free(intlabel); 736 return (error); 737 } 738 739 /* 740 * MPSAFE 741 */ 742 int 743 __mac_get_fd(struct thread *td, struct __mac_get_fd_args *uap) 744 { 745 char *elements, *buffer; 746 struct label *intlabel; 747 struct file *fp; 748 struct mac mac; 749 struct vnode *vp; 750 struct pipe *pipe; 751 struct socket *so; 752 short label_type; 753 int error; 754 755 error = copyin(uap->mac_p, &mac, sizeof(mac)); 756 if (error) 757 return (error); 758 759 error = mac_check_structmac_consistent(&mac); 760 if (error) 761 return (error); 762 763 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 764 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 765 if (error) { 766 free(elements, M_MACTEMP); 767 return (error); 768 } 769 770 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 771 error = fget(td, uap->fd, &fp); 772 if (error) 773 goto out; 774 775 label_type = fp->f_type; 776 switch (fp->f_type) { 777 case DTYPE_FIFO: 778 case DTYPE_VNODE: 779 vp = fp->f_vnode; 780 intlabel = mac_vnode_label_alloc(); 781 mtx_lock(&Giant); /* VFS */ 782 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 783 mac_copy_vnode_label(vp->v_label, intlabel); 784 VOP_UNLOCK(vp, 0, td); 785 mtx_unlock(&Giant); /* VFS */ 786 error = mac_externalize_vnode_label(intlabel, elements, 787 buffer, mac.m_buflen); 788 mac_vnode_label_free(intlabel); 789 break; 790 791 case DTYPE_PIPE: 792 pipe = fp->f_data; 793 intlabel = mac_pipe_label_alloc(); 794 PIPE_LOCK(pipe); 795 mac_copy_pipe_label(pipe->pipe_pair->pp_label, intlabel); 796 PIPE_UNLOCK(pipe); 797 error = mac_externalize_pipe_label(intlabel, elements, 798 buffer, mac.m_buflen); 799 mac_pipe_label_free(intlabel); 800 break; 801 802 case DTYPE_SOCKET: 803 so = fp->f_data; 804 intlabel = mac_socket_label_alloc(M_WAITOK); 805 NET_LOCK_GIANT(); 806 SOCK_LOCK(so); 807 mac_copy_socket_label(so->so_label, intlabel); 808 SOCK_UNLOCK(so); 809 NET_UNLOCK_GIANT(); 810 error = mac_externalize_socket_label(intlabel, elements, 811 buffer, mac.m_buflen); 812 mac_socket_label_free(intlabel); 813 break; 814 815 default: 816 error = EINVAL; 817 } 818 fdrop(fp, td); 819 if (error == 0) 820 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 821 822 out: 823 free(buffer, M_MACTEMP); 824 free(elements, M_MACTEMP); 825 return (error); 826 } 827 828 /* 829 * MPSAFE 830 */ 831 int 832 __mac_get_file(struct thread *td, struct __mac_get_file_args *uap) 833 { 834 char *elements, *buffer; 835 struct nameidata nd; 836 struct label *intlabel; 837 struct mac mac; 838 int error; 839 840 error = copyin(uap->mac_p, &mac, sizeof(mac)); 841 if (error) 842 return (error); 843 844 error = mac_check_structmac_consistent(&mac); 845 if (error) 846 return (error); 847 848 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 849 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 850 if (error) { 851 free(elements, M_MACTEMP); 852 return (error); 853 } 854 855 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 856 mtx_lock(&Giant); /* VFS */ 857 NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW, UIO_USERSPACE, uap->path_p, 858 td); 859 error = namei(&nd); 860 if (error) 861 goto out; 862 863 intlabel = mac_vnode_label_alloc(); 864 mac_copy_vnode_label(nd.ni_vp->v_label, intlabel); 865 error = mac_externalize_vnode_label(intlabel, elements, buffer, 866 mac.m_buflen); 867 868 NDFREE(&nd, 0); 869 mac_vnode_label_free(intlabel); 870 871 if (error == 0) 872 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 873 874 out: 875 mtx_unlock(&Giant); /* VFS */ 876 877 free(buffer, M_MACTEMP); 878 free(elements, M_MACTEMP); 879 880 return (error); 881 } 882 883 /* 884 * MPSAFE 885 */ 886 int 887 __mac_get_link(struct thread *td, struct __mac_get_link_args *uap) 888 { 889 char *elements, *buffer; 890 struct nameidata nd; 891 struct label *intlabel; 892 struct mac mac; 893 int error; 894 895 error = copyin(uap->mac_p, &mac, sizeof(mac)); 896 if (error) 897 return (error); 898 899 error = mac_check_structmac_consistent(&mac); 900 if (error) 901 return (error); 902 903 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 904 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 905 if (error) { 906 free(elements, M_MACTEMP); 907 return (error); 908 } 909 910 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 911 mtx_lock(&Giant); /* VFS */ 912 NDINIT(&nd, LOOKUP, LOCKLEAF | NOFOLLOW, UIO_USERSPACE, uap->path_p, 913 td); 914 error = namei(&nd); 915 if (error) 916 goto out; 917 918 intlabel = mac_vnode_label_alloc(); 919 mac_copy_vnode_label(nd.ni_vp->v_label, intlabel); 920 error = mac_externalize_vnode_label(intlabel, elements, buffer, 921 mac.m_buflen); 922 NDFREE(&nd, 0); 923 mac_vnode_label_free(intlabel); 924 925 if (error == 0) 926 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 927 928 out: 929 mtx_unlock(&Giant); /* VFS */ 930 931 free(buffer, M_MACTEMP); 932 free(elements, M_MACTEMP); 933 934 return (error); 935 } 936 937 /* 938 * MPSAFE 939 */ 940 int 941 __mac_set_fd(struct thread *td, struct __mac_set_fd_args *uap) 942 { 943 struct label *intlabel; 944 struct pipe *pipe; 945 struct socket *so; 946 struct file *fp; 947 struct mount *mp; 948 struct vnode *vp; 949 struct mac mac; 950 char *buffer; 951 int error; 952 953 error = copyin(uap->mac_p, &mac, sizeof(mac)); 954 if (error) 955 return (error); 956 957 error = mac_check_structmac_consistent(&mac); 958 if (error) 959 return (error); 960 961 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 962 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 963 if (error) { 964 free(buffer, M_MACTEMP); 965 return (error); 966 } 967 968 error = fget(td, uap->fd, &fp); 969 if (error) 970 goto out; 971 972 switch (fp->f_type) { 973 case DTYPE_FIFO: 974 case DTYPE_VNODE: 975 intlabel = mac_vnode_label_alloc(); 976 error = mac_internalize_vnode_label(intlabel, buffer); 977 if (error) { 978 mac_vnode_label_free(intlabel); 979 break; 980 } 981 vp = fp->f_vnode; 982 mtx_lock(&Giant); /* VFS */ 983 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 984 if (error != 0) { 985 mtx_unlock(&Giant); /* VFS */ 986 mac_vnode_label_free(intlabel); 987 break; 988 } 989 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 990 error = vn_setlabel(vp, intlabel, td->td_ucred); 991 VOP_UNLOCK(vp, 0, td); 992 vn_finished_write(mp); 993 mtx_unlock(&Giant); /* VFS */ 994 mac_vnode_label_free(intlabel); 995 break; 996 997 case DTYPE_PIPE: 998 intlabel = mac_pipe_label_alloc(); 999 error = mac_internalize_pipe_label(intlabel, buffer); 1000 if (error == 0) { 1001 pipe = fp->f_data; 1002 PIPE_LOCK(pipe); 1003 error = mac_pipe_label_set(td->td_ucred, 1004 pipe->pipe_pair, intlabel); 1005 PIPE_UNLOCK(pipe); 1006 } 1007 mac_pipe_label_free(intlabel); 1008 break; 1009 1010 case DTYPE_SOCKET: 1011 intlabel = mac_socket_label_alloc(M_WAITOK); 1012 error = mac_internalize_socket_label(intlabel, buffer); 1013 if (error == 0) { 1014 so = fp->f_data; 1015 NET_LOCK_GIANT(); 1016 error = mac_socket_label_set(td->td_ucred, so, 1017 intlabel); 1018 NET_UNLOCK_GIANT(); 1019 } 1020 mac_socket_label_free(intlabel); 1021 break; 1022 1023 default: 1024 error = EINVAL; 1025 } 1026 fdrop(fp, td); 1027 out: 1028 free(buffer, M_MACTEMP); 1029 return (error); 1030 } 1031 1032 /* 1033 * MPSAFE 1034 */ 1035 int 1036 __mac_set_file(struct thread *td, struct __mac_set_file_args *uap) 1037 { 1038 struct label *intlabel; 1039 struct nameidata nd; 1040 struct mount *mp; 1041 struct mac mac; 1042 char *buffer; 1043 int error; 1044 1045 error = copyin(uap->mac_p, &mac, sizeof(mac)); 1046 if (error) 1047 return (error); 1048 1049 error = mac_check_structmac_consistent(&mac); 1050 if (error) 1051 return (error); 1052 1053 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 1054 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 1055 if (error) { 1056 free(buffer, M_MACTEMP); 1057 return (error); 1058 } 1059 1060 intlabel = mac_vnode_label_alloc(); 1061 error = mac_internalize_vnode_label(intlabel, buffer); 1062 free(buffer, M_MACTEMP); 1063 if (error) 1064 goto out; 1065 1066 mtx_lock(&Giant); /* VFS */ 1067 1068 NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW, UIO_USERSPACE, uap->path_p, 1069 td); 1070 error = namei(&nd); 1071 if (error == 0) { 1072 error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH); 1073 if (error == 0) 1074 error = vn_setlabel(nd.ni_vp, intlabel, 1075 td->td_ucred); 1076 vn_finished_write(mp); 1077 } 1078 1079 NDFREE(&nd, 0); 1080 mtx_unlock(&Giant); /* VFS */ 1081 out: 1082 mac_vnode_label_free(intlabel); 1083 return (error); 1084 } 1085 1086 /* 1087 * MPSAFE 1088 */ 1089 int 1090 __mac_set_link(struct thread *td, struct __mac_set_link_args *uap) 1091 { 1092 struct label *intlabel; 1093 struct nameidata nd; 1094 struct mount *mp; 1095 struct mac mac; 1096 char *buffer; 1097 int error; 1098 1099 error = copyin(uap->mac_p, &mac, sizeof(mac)); 1100 if (error) 1101 return (error); 1102 1103 error = mac_check_structmac_consistent(&mac); 1104 if (error) 1105 return (error); 1106 1107 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 1108 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 1109 if (error) { 1110 free(buffer, M_MACTEMP); 1111 return (error); 1112 } 1113 1114 intlabel = mac_vnode_label_alloc(); 1115 error = mac_internalize_vnode_label(intlabel, buffer); 1116 free(buffer, M_MACTEMP); 1117 if (error) 1118 goto out; 1119 1120 mtx_lock(&Giant); /* VFS */ 1121 1122 NDINIT(&nd, LOOKUP, LOCKLEAF | NOFOLLOW, UIO_USERSPACE, uap->path_p, 1123 td); 1124 error = namei(&nd); 1125 if (error == 0) { 1126 error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH); 1127 if (error == 0) 1128 error = vn_setlabel(nd.ni_vp, intlabel, 1129 td->td_ucred); 1130 vn_finished_write(mp); 1131 } 1132 1133 NDFREE(&nd, 0); 1134 mtx_unlock(&Giant); /* VFS */ 1135 out: 1136 mac_vnode_label_free(intlabel); 1137 return (error); 1138 } 1139 1140 /* 1141 * MPSAFE 1142 */ 1143 int 1144 mac_syscall(struct thread *td, struct mac_syscall_args *uap) 1145 { 1146 struct mac_policy_conf *mpc; 1147 char target[MAC_MAX_POLICY_NAME]; 1148 int entrycount, error; 1149 1150 error = copyinstr(uap->policy, target, sizeof(target), NULL); 1151 if (error) 1152 return (error); 1153 1154 error = ENOSYS; 1155 LIST_FOREACH(mpc, &mac_static_policy_list, mpc_list) { 1156 if (strcmp(mpc->mpc_name, target) == 0 && 1157 mpc->mpc_ops->mpo_syscall != NULL) { 1158 error = mpc->mpc_ops->mpo_syscall(td, 1159 uap->call, uap->arg); 1160 goto out; 1161 } 1162 } 1163 1164 if ((entrycount = mac_policy_list_conditional_busy()) != 0) { 1165 LIST_FOREACH(mpc, &mac_policy_list, mpc_list) { 1166 if (strcmp(mpc->mpc_name, target) == 0 && 1167 mpc->mpc_ops->mpo_syscall != NULL) { 1168 error = mpc->mpc_ops->mpo_syscall(td, 1169 uap->call, uap->arg); 1170 break; 1171 } 1172 } 1173 mac_policy_list_unbusy(); 1174 } 1175 out: 1176 return (error); 1177 } 1178 1179 SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL); 1180 SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL); 1181 1182 #else /* !MAC */ 1183 1184 int 1185 __mac_get_pid(struct thread *td, struct __mac_get_pid_args *uap) 1186 { 1187 1188 return (ENOSYS); 1189 } 1190 1191 int 1192 __mac_get_proc(struct thread *td, struct __mac_get_proc_args *uap) 1193 { 1194 1195 return (ENOSYS); 1196 } 1197 1198 int 1199 __mac_set_proc(struct thread *td, struct __mac_set_proc_args *uap) 1200 { 1201 1202 return (ENOSYS); 1203 } 1204 1205 int 1206 __mac_get_fd(struct thread *td, struct __mac_get_fd_args *uap) 1207 { 1208 1209 return (ENOSYS); 1210 } 1211 1212 int 1213 __mac_get_file(struct thread *td, struct __mac_get_file_args *uap) 1214 { 1215 1216 return (ENOSYS); 1217 } 1218 1219 int 1220 __mac_get_link(struct thread *td, struct __mac_get_link_args *uap) 1221 { 1222 1223 return (ENOSYS); 1224 } 1225 1226 int 1227 __mac_set_fd(struct thread *td, struct __mac_set_fd_args *uap) 1228 { 1229 1230 return (ENOSYS); 1231 } 1232 1233 int 1234 __mac_set_file(struct thread *td, struct __mac_set_file_args *uap) 1235 { 1236 1237 return (ENOSYS); 1238 } 1239 1240 int 1241 __mac_set_link(struct thread *td, struct __mac_set_link_args *uap) 1242 { 1243 1244 return (ENOSYS); 1245 } 1246 1247 int 1248 mac_syscall(struct thread *td, struct mac_syscall_args *uap) 1249 { 1250 1251 return (ENOSYS); 1252 } 1253 1254 #endif /* !MAC */ 1255