1 /*-
2 * Copyright (c) 1999-2002, 2006, 2009 Robert N. M. Watson
3 * Copyright (c) 2001 Ilmar S. Habibulin
4 * Copyright (c) 2001-2005 Networks Associates Technology, Inc.
5 * Copyright (c) 2005-2006 SPARTA, Inc.
6 * Copyright (c) 2008-2009 Apple Inc.
7 * All rights reserved.
8 *
9 * This software was developed by Robert Watson and Ilmar Habibulin for the
10 * TrustedBSD Project.
11 *
12 * This software was developed for the FreeBSD Project in part by Network
13 * Associates Laboratories, the Security Research Division of Network
14 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
15 * as part of the DARPA CHATS research program.
16 *
17 * This software was enhanced by SPARTA ISSO under SPAWAR contract
18 * N66001-04-C-6019 ("SEFOS").
19 *
20 * This software was developed at the University of Cambridge Computer
21 * Laboratory with support from a grant from Google, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 */
44
45 /*-
46 * Framework for extensible kernel access control. This file contains core
47 * kernel infrastructure for the TrustedBSD MAC Framework, including policy
48 * registration, versioning, locking, error composition operator, and system
49 * calls.
50 *
51 * The MAC Framework implements three programming interfaces:
52 *
53 * - The kernel MAC interface, defined in mac_framework.h, and invoked
54 * throughout the kernel to request security decisions, notify of security
55 * related events, etc.
56 *
57 * - The MAC policy module interface, defined in mac_policy.h, which is
58 * implemented by MAC policy modules and invoked by the MAC Framework to
59 * forward kernel security requests and notifications to policy modules.
60 *
61 * - The user MAC API, defined in mac.h, which allows user programs to query
62 * and set label state on objects.
63 *
64 * The majority of the MAC Framework implementation may be found in
65 * src/sys/security/mac. Sample policy modules may be found in
66 * src/sys/security/mac_*.
67 */
68
69 #include "opt_mac.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/condvar.h>
74 #include <sys/jail.h>
75 #include <sys/kernel.h>
76 #include <sys/lock.h>
77 #include <sys/mac.h>
78 #include <sys/module.h>
79 #include <sys/rmlock.h>
80 #include <sys/sdt.h>
81 #include <sys/sx.h>
82 #include <sys/sysctl.h>
83 #include <sys/vnode.h>
84
85 #include <security/mac/mac_framework.h>
86 #include <security/mac/mac_internal.h>
87 #include <security/mac/mac_policy.h>
88
89 /*
90 * DTrace SDT providers for MAC.
91 */
92 SDT_PROVIDER_DEFINE(mac);
93 SDT_PROVIDER_DEFINE(mac_framework);
94
95 SDT_PROBE_DEFINE2(mac, , policy, modevent, "int",
96 "struct mac_policy_conf *");
97 SDT_PROBE_DEFINE1(mac, , policy, register,
98 "struct mac_policy_conf *");
99 SDT_PROBE_DEFINE1(mac, , policy, unregister,
100 "struct mac_policy_conf *");
101
102 /*
103 * Root sysctl node for all MAC and MAC policy controls.
104 */
105 SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
106 "TrustedBSD MAC policy controls");
107
108 /*
109 * Root sysctl node for MAC modules' jail parameters.
110 */
111 SYSCTL_JAIL_PARAM_NODE(mac, "Jail parameters for MAC policy controls");
112
113 /*
114 * Declare that the kernel provides a specific version of MAC support.
115 * This permits modules to refuse to be loaded if the necessary support isn't
116 * present, even if it's pre-boot.
117 */
118 MODULE_VERSION(kernel_mac_support, MAC_VERSION);
119
120 static unsigned int mac_version = MAC_VERSION;
121 SYSCTL_UINT(_security_mac, OID_AUTO, version, CTLFLAG_RD, &mac_version, 0,
122 "");
123
124 /*
125 * Flags for inlined checks. Note this would be best hotpatched at runtime.
126 * The following is a band-aid.
127 *
128 * Use FPFLAG for hooks running in commonly executed paths and FPFLAG_RARE
129 * for the rest.
130 */
131 #define FPFLAG(f) \
132 bool __read_frequently mac_##f##_fp_flag
133
134 #define FPFLAG_RARE(f) \
135 bool __read_mostly mac_##f##_fp_flag
136
137 FPFLAG(priv_check);
138 FPFLAG(priv_grant);
139 FPFLAG(vnode_check_lookup);
140 FPFLAG(vnode_check_open);
141 FPFLAG(vnode_check_stat);
142 FPFLAG(vnode_check_read);
143 FPFLAG(vnode_check_write);
144 FPFLAG(vnode_check_mmap);
145 FPFLAG_RARE(vnode_check_poll);
146 FPFLAG_RARE(vnode_check_rename_from);
147 FPFLAG_RARE(vnode_check_access);
148 FPFLAG_RARE(vnode_check_readlink);
149 FPFLAG_RARE(pipe_check_stat);
150 FPFLAG_RARE(pipe_check_poll);
151 FPFLAG_RARE(pipe_check_read);
152 FPFLAG_RARE(ifnet_create_mbuf);
153 FPFLAG_RARE(ifnet_check_transmit);
154
155 #undef FPFLAG
156 #undef FPFLAG_RARE
157
158 /*
159 * Labels consist of a indexed set of "slots", which are allocated policies
160 * as required. The MAC Framework maintains a bitmask of slots allocated so
161 * far to prevent reuse. Slots cannot be reused, as the MAC Framework
162 * guarantees that newly allocated slots in labels will be NULL unless
163 * otherwise initialized, and because we do not have a mechanism to garbage
164 * collect slots on policy unload. As labeled policies tend to be statically
165 * loaded during boot, and not frequently unloaded and reloaded, this is not
166 * generally an issue.
167 */
168 #if MAC_MAX_SLOTS > 32
169 #error "MAC_MAX_SLOTS too large"
170 #endif
171
172 static unsigned int mac_max_slots = MAC_MAX_SLOTS;
173 static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1;
174 SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, &mac_max_slots,
175 0, "");
176
177 /*
178 * Has the kernel started generating labeled objects yet? All read/write
179 * access to this variable is serialized during the boot process. Following
180 * the end of serialization, we don't update this flag; no locking.
181 */
182 static int mac_late = 0;
183
184 /*
185 * Each policy declares a mask of object types requiring labels to be
186 * allocated for them. For convenience, we combine and cache the bitwise or
187 * of the per-policy object flags to track whether we will allocate a label
188 * for an object type at run-time.
189 */
190 uint64_t mac_labeled;
191 SYSCTL_UQUAD(_security_mac, OID_AUTO, labeled, CTLFLAG_RD, &mac_labeled, 0,
192 "Mask of object types being labeled");
193
194 MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage");
195
196 /*
197 * MAC policy modules are placed in one of two lists: mac_static_policy_list,
198 * for policies that are loaded early and cannot be unloaded, and
199 * mac_policy_list, which holds policies either loaded later in the boot
200 * cycle or that may be unloaded. The static policy list does not require
201 * locks to iterate over, but the dynamic list requires synchronization.
202 * Support for dynamic policy loading can be compiled out using the
203 * MAC_STATIC kernel option.
204 *
205 * The dynamic policy list is protected by two locks: modifying the list
206 * requires both locks to be held exclusively. One of the locks,
207 * mac_policy_rm, is acquired over policy entry points that will never sleep;
208 * the other, mac_policy_rms, is acquired over policy entry points that may
209 * sleep. The former category will be used when kernel locks may be held
210 * over calls to the MAC Framework, during network processing in ithreads,
211 * etc. The latter will tend to involve potentially blocking memory
212 * allocations, extended attribute I/O, etc.
213 */
214 #ifndef MAC_STATIC
215 static struct rmlock mac_policy_rm; /* Non-sleeping entry points. */
216 static struct rmslock mac_policy_rms; /* Sleeping entry points. */
217 #endif
218
219 struct mac_policy_list_head mac_policy_list;
220 struct mac_policy_list_head mac_static_policy_list;
221 u_int mac_policy_count; /* Registered policy count. */
222
223 static void mac_policy_xlock(void);
224 static void mac_policy_xlock_assert(void);
225 static void mac_policy_xunlock(void);
226
227 void
mac_policy_slock_nosleep(struct rm_priotracker * tracker)228 mac_policy_slock_nosleep(struct rm_priotracker *tracker)
229 {
230
231 #ifndef MAC_STATIC
232 if (!mac_late)
233 return;
234
235 rm_rlock(&mac_policy_rm, tracker);
236 #endif
237 }
238
239 void
mac_policy_slock_sleep(void)240 mac_policy_slock_sleep(void)
241 {
242
243 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
244 "mac_policy_slock_sleep");
245
246 #ifndef MAC_STATIC
247 if (!mac_late)
248 return;
249
250 rms_rlock(&mac_policy_rms);
251 #endif
252 }
253
254 void
mac_policy_sunlock_nosleep(struct rm_priotracker * tracker)255 mac_policy_sunlock_nosleep(struct rm_priotracker *tracker)
256 {
257
258 #ifndef MAC_STATIC
259 if (!mac_late)
260 return;
261
262 rm_runlock(&mac_policy_rm, tracker);
263 #endif
264 }
265
266 void
mac_policy_sunlock_sleep(void)267 mac_policy_sunlock_sleep(void)
268 {
269
270 #ifndef MAC_STATIC
271 if (!mac_late)
272 return;
273
274 rms_runlock(&mac_policy_rms);
275 #endif
276 }
277
278 static void
mac_policy_xlock(void)279 mac_policy_xlock(void)
280 {
281
282 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
283 "mac_policy_xlock()");
284
285 #ifndef MAC_STATIC
286 if (!mac_late)
287 return;
288
289 rms_wlock(&mac_policy_rms);
290 rm_wlock(&mac_policy_rm);
291 #endif
292 }
293
294 static void
mac_policy_xunlock(void)295 mac_policy_xunlock(void)
296 {
297
298 #ifndef MAC_STATIC
299 if (!mac_late)
300 return;
301
302 rm_wunlock(&mac_policy_rm);
303 rms_wunlock(&mac_policy_rms);
304 #endif
305 }
306
307 static void
mac_policy_xlock_assert(void)308 mac_policy_xlock_assert(void)
309 {
310
311 #ifndef MAC_STATIC
312 if (!mac_late)
313 return;
314
315 rm_assert(&mac_policy_rm, RA_WLOCKED);
316 #endif
317 }
318
319 /*
320 * Initialize the MAC subsystem, including appropriate SMP locks.
321 */
322 static void
mac_init(void * dummy __unused)323 mac_init(void *dummy __unused)
324 {
325
326 LIST_INIT(&mac_static_policy_list);
327 LIST_INIT(&mac_policy_list);
328 mac_labelzone_init();
329
330 #ifndef MAC_STATIC
331 rm_init_flags(&mac_policy_rm, "mac_policy_rm", RM_NOWITNESS |
332 RM_RECURSE);
333 rms_init(&mac_policy_rms, "mac_policy_rms");
334 #endif
335 }
336
337 /*
338 * For the purposes of modules that want to know if they were loaded "early",
339 * set the mac_late flag once we've processed modules either linked into the
340 * kernel, or loaded before the kernel startup.
341 */
342 static void
mac_late_init(void * dummy __unused)343 mac_late_init(void *dummy __unused)
344 {
345
346 mac_late = 1;
347 }
348
349 /*
350 * Given a policy, derive from its set of non-NULL label init methods what
351 * object types the policy is interested in.
352 */
353 static uint64_t
mac_policy_getlabeled(struct mac_policy_conf * mpc)354 mac_policy_getlabeled(struct mac_policy_conf *mpc)
355 {
356 uint64_t labeled;
357
358 #define MPC_FLAG(method, flag) \
359 if (mpc->mpc_ops->mpo_ ## method != NULL) \
360 labeled |= (flag); \
361
362 labeled = 0;
363 MPC_FLAG(cred_init_label, MPC_OBJECT_CRED);
364 MPC_FLAG(proc_init_label, MPC_OBJECT_PROC);
365 MPC_FLAG(vnode_init_label, MPC_OBJECT_VNODE);
366 MPC_FLAG(inpcb_init_label, MPC_OBJECT_INPCB);
367 MPC_FLAG(socket_init_label, MPC_OBJECT_SOCKET);
368 MPC_FLAG(devfs_init_label, MPC_OBJECT_DEVFS);
369 MPC_FLAG(mbuf_init_label, MPC_OBJECT_MBUF);
370 MPC_FLAG(ipq_init_label, MPC_OBJECT_IPQ);
371 MPC_FLAG(ifnet_init_label, MPC_OBJECT_IFNET);
372 MPC_FLAG(bpfdesc_init_label, MPC_OBJECT_BPFDESC);
373 MPC_FLAG(pipe_init_label, MPC_OBJECT_PIPE);
374 MPC_FLAG(mount_init_label, MPC_OBJECT_MOUNT);
375 MPC_FLAG(posixsem_init_label, MPC_OBJECT_POSIXSEM);
376 MPC_FLAG(posixshm_init_label, MPC_OBJECT_POSIXSHM);
377 MPC_FLAG(prison_init_label, MPC_OBJECT_PRISON);
378 MPC_FLAG(sysvmsg_init_label, MPC_OBJECT_SYSVMSG);
379 MPC_FLAG(sysvmsq_init_label, MPC_OBJECT_SYSVMSQ);
380 MPC_FLAG(sysvsem_init_label, MPC_OBJECT_SYSVSEM);
381 MPC_FLAG(sysvshm_init_label, MPC_OBJECT_SYSVSHM);
382 MPC_FLAG(syncache_init_label, MPC_OBJECT_SYNCACHE);
383 MPC_FLAG(ip6q_init_label, MPC_OBJECT_IP6Q);
384
385 #undef MPC_FLAG
386 return (labeled);
387 }
388
389 /*
390 * When policies are loaded or unloaded, walk the list of registered policies
391 * and built mac_labeled, a bitmask representing the union of all objects
392 * requiring labels across all policies.
393 */
394 static void
mac_policy_update(void)395 mac_policy_update(void)
396 {
397 struct mac_policy_conf *mpc;
398
399 mac_policy_xlock_assert();
400
401 mac_labeled = 0;
402 mac_policy_count = 0;
403 LIST_FOREACH(mpc, &mac_static_policy_list, mpc_list) {
404 mac_labeled |= mac_policy_getlabeled(mpc);
405 mac_policy_count++;
406 }
407 LIST_FOREACH(mpc, &mac_policy_list, mpc_list) {
408 mac_labeled |= mac_policy_getlabeled(mpc);
409 mac_policy_count++;
410 }
411
412 cache_fast_lookup_enabled_recalc();
413 }
414
415 /*
416 * There are frequently used code paths which check for rarely installed
417 * policies. Gross hack below enables doing it in a cheap manner.
418 */
419
420 #define FPO(f) (offsetof(struct mac_policy_ops, mpo_##f) / sizeof(uintptr_t))
421
422 struct mac_policy_fastpath_elem {
423 int count;
424 bool *flag;
425 size_t offset;
426 };
427
428 struct mac_policy_fastpath_elem mac_policy_fastpath_array[] = {
429 { .offset = FPO(priv_check), .flag = &mac_priv_check_fp_flag },
430 { .offset = FPO(priv_grant), .flag = &mac_priv_grant_fp_flag },
431 { .offset = FPO(vnode_check_lookup),
432 .flag = &mac_vnode_check_lookup_fp_flag },
433 { .offset = FPO(vnode_check_readlink),
434 .flag = &mac_vnode_check_readlink_fp_flag },
435 { .offset = FPO(vnode_check_open),
436 .flag = &mac_vnode_check_open_fp_flag },
437 { .offset = FPO(vnode_check_stat),
438 .flag = &mac_vnode_check_stat_fp_flag },
439 { .offset = FPO(vnode_check_read),
440 .flag = &mac_vnode_check_read_fp_flag },
441 { .offset = FPO(vnode_check_write),
442 .flag = &mac_vnode_check_write_fp_flag },
443 { .offset = FPO(vnode_check_mmap),
444 .flag = &mac_vnode_check_mmap_fp_flag },
445 { .offset = FPO(vnode_check_poll),
446 .flag = &mac_vnode_check_poll_fp_flag },
447 { .offset = FPO(vnode_check_rename_from),
448 .flag = &mac_vnode_check_rename_from_fp_flag },
449 { .offset = FPO(vnode_check_access),
450 .flag = &mac_vnode_check_access_fp_flag },
451 { .offset = FPO(pipe_check_stat),
452 .flag = &mac_pipe_check_stat_fp_flag },
453 { .offset = FPO(pipe_check_poll),
454 .flag = &mac_pipe_check_poll_fp_flag },
455 { .offset = FPO(pipe_check_read),
456 .flag = &mac_pipe_check_read_fp_flag },
457 { .offset = FPO(ifnet_create_mbuf),
458 .flag = &mac_ifnet_create_mbuf_fp_flag },
459 { .offset = FPO(ifnet_check_transmit),
460 .flag = &mac_ifnet_check_transmit_fp_flag },
461 };
462
463 static void
mac_policy_fastpath_enable(struct mac_policy_fastpath_elem * mpfe)464 mac_policy_fastpath_enable(struct mac_policy_fastpath_elem *mpfe)
465 {
466
467 MPASS(mpfe->count >= 0);
468 mpfe->count++;
469 if (mpfe->count == 1) {
470 MPASS(*mpfe->flag == false);
471 *mpfe->flag = true;
472 }
473 }
474
475 static void
mac_policy_fastpath_disable(struct mac_policy_fastpath_elem * mpfe)476 mac_policy_fastpath_disable(struct mac_policy_fastpath_elem *mpfe)
477 {
478
479 MPASS(mpfe->count >= 1);
480 mpfe->count--;
481 if (mpfe->count == 0) {
482 MPASS(*mpfe->flag == true);
483 *mpfe->flag = false;
484 }
485 }
486
487 static void
mac_policy_fastpath_register(struct mac_policy_conf * mpc)488 mac_policy_fastpath_register(struct mac_policy_conf *mpc)
489 {
490 struct mac_policy_fastpath_elem *mpfe;
491 uintptr_t **ops;
492 int i;
493
494 mac_policy_xlock_assert();
495
496 ops = (uintptr_t **)mpc->mpc_ops;
497 for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
498 mpfe = &mac_policy_fastpath_array[i];
499 if (ops[mpfe->offset] != NULL)
500 mac_policy_fastpath_enable(mpfe);
501 }
502 }
503
504 static void
mac_policy_fastpath_unregister(struct mac_policy_conf * mpc)505 mac_policy_fastpath_unregister(struct mac_policy_conf *mpc)
506 {
507 struct mac_policy_fastpath_elem *mpfe;
508 uintptr_t **ops;
509 int i;
510
511 mac_policy_xlock_assert();
512
513 ops = (uintptr_t **)mpc->mpc_ops;
514 for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
515 mpfe = &mac_policy_fastpath_array[i];
516 if (ops[mpfe->offset] != NULL)
517 mac_policy_fastpath_disable(mpfe);
518 }
519 }
520
521 #undef FPO
522
523 static int
mac_policy_register(struct mac_policy_conf * mpc)524 mac_policy_register(struct mac_policy_conf *mpc)
525 {
526 struct mac_policy_list_head *mpc_list;
527 struct mac_policy_conf *last_mpc, *tmpc;
528 int error, slot, static_entry;
529
530 error = 0;
531
532 /*
533 * We don't technically need exclusive access while !mac_late, but
534 * hold it for assertion consistency.
535 */
536 mac_policy_xlock();
537
538 /*
539 * If the module can potentially be unloaded, or we're loading late,
540 * we have to stick it in the non-static list and pay an extra
541 * performance overhead. Otherwise, we can pay a light locking cost
542 * and stick it in the static list.
543 */
544 static_entry = (!mac_late &&
545 !(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK));
546
547 mpc_list = (static_entry) ? &mac_static_policy_list :
548 &mac_policy_list;
549 last_mpc = NULL;
550 LIST_FOREACH(tmpc, mpc_list, mpc_list) {
551 last_mpc = tmpc;
552 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
553 error = EEXIST;
554 goto out;
555 }
556 }
557 if (mpc->mpc_field_off != NULL) {
558 slot = ffs(mac_slot_offsets_free);
559 if (slot == 0) {
560 error = ENOMEM;
561 goto out;
562 }
563 slot--;
564 mac_slot_offsets_free &= ~(1 << slot);
565 *mpc->mpc_field_off = slot;
566 }
567 mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED;
568
569 /*
570 * Some modules may depend on the operations of its dependencies.
571 * Inserting modules in order of registration ensures operations
572 * that work on the module list retain dependency order.
573 */
574 if (last_mpc == NULL)
575 LIST_INSERT_HEAD(mpc_list, mpc, mpc_list);
576 else
577 LIST_INSERT_AFTER(last_mpc, mpc, mpc_list);
578 /*
579 * Per-policy initialization. Currently, this takes place under the
580 * exclusive lock, so policies must not sleep in their init method.
581 * In the future, we may want to separate "init" from "start", with
582 * "init" occurring without the lock held. Likewise, on tear-down,
583 * breaking out "stop" from "destroy".
584 */
585 if (mpc->mpc_ops->mpo_init != NULL)
586 (*(mpc->mpc_ops->mpo_init))(mpc);
587
588 mac_policy_fastpath_register(mpc);
589
590 mac_policy_update();
591
592 SDT_PROBE1(mac, , policy, register, mpc);
593 printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname,
594 mpc->mpc_name);
595
596 out:
597 mac_policy_xunlock();
598 return (error);
599 }
600
601 static int
mac_policy_unregister(struct mac_policy_conf * mpc)602 mac_policy_unregister(struct mac_policy_conf *mpc)
603 {
604
605 /*
606 * If we fail the load, we may get a request to unload. Check to see
607 * if we did the run-time registration, and if not, silently succeed.
608 */
609 mac_policy_xlock();
610 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) {
611 mac_policy_xunlock();
612 return (0);
613 }
614 #if 0
615 /*
616 * Don't allow unloading modules with private data.
617 */
618 if (mpc->mpc_field_off != NULL) {
619 mac_policy_xunlock();
620 return (EBUSY);
621 }
622 #endif
623 /*
624 * Only allow the unload to proceed if the module is unloadable by
625 * its own definition.
626 */
627 if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) {
628 mac_policy_xunlock();
629 return (EBUSY);
630 }
631
632 mac_policy_fastpath_unregister(mpc);
633
634 if (mpc->mpc_ops->mpo_destroy != NULL)
635 (*(mpc->mpc_ops->mpo_destroy))(mpc);
636
637 LIST_REMOVE(mpc, mpc_list);
638 mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED;
639 mac_policy_update();
640 mac_policy_xunlock();
641
642 SDT_PROBE1(mac, , policy, unregister, mpc);
643 printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname,
644 mpc->mpc_name);
645
646 return (0);
647 }
648
649 /*
650 * Allow MAC policy modules to register during boot, etc.
651 */
652 int
mac_policy_modevent(module_t mod,int type,void * data)653 mac_policy_modevent(module_t mod, int type, void *data)
654 {
655 struct mac_policy_conf *mpc;
656 int error;
657
658 error = 0;
659 mpc = (struct mac_policy_conf *) data;
660
661 #ifdef MAC_STATIC
662 if (mac_late) {
663 printf("mac_policy_modevent: MAC_STATIC and late\n");
664 return (EBUSY);
665 }
666 #endif
667
668 SDT_PROBE2(mac, , policy, modevent, type, mpc);
669 switch (type) {
670 case MOD_LOAD:
671 if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE &&
672 mac_late) {
673 printf("mac_policy_modevent: can't load %s policy "
674 "after booting\n", mpc->mpc_name);
675 error = EBUSY;
676 break;
677 }
678 error = mac_policy_register(mpc);
679 break;
680 case MOD_UNLOAD:
681 /* Don't unregister the module if it was never registered. */
682 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED)
683 != 0)
684 error = mac_policy_unregister(mpc);
685 else
686 error = 0;
687 break;
688 default:
689 error = EOPNOTSUPP;
690 break;
691 }
692
693 return (error);
694 }
695
696 /*
697 * Define an error value precedence, and given two arguments, selects the
698 * value with the higher precedence.
699 */
700 int
mac_error_select(int error1,int error2)701 mac_error_select(int error1, int error2)
702 {
703
704 /* Certain decision-making errors take top priority. */
705 if (error1 == EDEADLK || error2 == EDEADLK)
706 return (EDEADLK);
707
708 /* Invalid arguments should be reported where possible. */
709 if (error1 == EINVAL || error2 == EINVAL)
710 return (EINVAL);
711
712 /* Precedence goes to "visibility", with both process and file. */
713 if (error1 == ESRCH || error2 == ESRCH)
714 return (ESRCH);
715
716 if (error1 == ENOENT || error2 == ENOENT)
717 return (ENOENT);
718
719 /* Precedence goes to DAC/MAC protections. */
720 if (error1 == EACCES || error2 == EACCES)
721 return (EACCES);
722
723 /* Precedence goes to privilege. */
724 if (error1 == EPERM || error2 == EPERM)
725 return (EPERM);
726
727 /* Precedence goes to error over success; otherwise, arbitrary. */
728 if (error1 != 0)
729 return (error1);
730 return (error2);
731 }
732
733 int
mac_check_structmac_consistent(const struct mac * mac)734 mac_check_structmac_consistent(const struct mac *mac)
735 {
736 /* Require that labels have a non-zero length. */
737 if (mac->m_buflen > MAC_MAX_LABEL_BUF_LEN ||
738 mac->m_buflen <= sizeof(""))
739 return (EINVAL);
740
741 return (0);
742 }
743
744 SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL);
745 SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL);
746