xref: /freebsd/sys/security/mac/mac_framework.c (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1 /*-
2  * Copyright (c) 1999-2002, 2006, 2009 Robert N. M. Watson
3  * Copyright (c) 2001 Ilmar S. Habibulin
4  * Copyright (c) 2001-2005 Networks Associates Technology, Inc.
5  * Copyright (c) 2005-2006 SPARTA, Inc.
6  * Copyright (c) 2008-2009 Apple Inc.
7  * All rights reserved.
8  *
9  * This software was developed by Robert Watson and Ilmar Habibulin for the
10  * TrustedBSD Project.
11  *
12  * This software was developed for the FreeBSD Project in part by Network
13  * Associates Laboratories, the Security Research Division of Network
14  * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
15  * as part of the DARPA CHATS research program.
16  *
17  * This software was enhanced by SPARTA ISSO under SPAWAR contract
18  * N66001-04-C-6019 ("SEFOS").
19  *
20  * This software was developed at the University of Cambridge Computer
21  * Laboratory with support from a grant from Google, Inc.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  * 1. Redistributions of source code must retain the above copyright
27  *    notice, this list of conditions and the following disclaimer.
28  * 2. Redistributions in binary form must reproduce the above copyright
29  *    notice, this list of conditions and the following disclaimer in the
30  *    documentation and/or other materials provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 
45 /*-
46  * Framework for extensible kernel access control.  This file contains core
47  * kernel infrastructure for the TrustedBSD MAC Framework, including policy
48  * registration, versioning, locking, error composition operator, and system
49  * calls.
50  *
51  * The MAC Framework implements three programming interfaces:
52  *
53  * - The kernel MAC interface, defined in mac_framework.h, and invoked
54  *   throughout the kernel to request security decisions, notify of security
55  *   related events, etc.
56  *
57  * - The MAC policy module interface, defined in mac_policy.h, which is
58  *   implemented by MAC policy modules and invoked by the MAC Framework to
59  *   forward kernel security requests and notifications to policy modules.
60  *
61  * - The user MAC API, defined in mac.h, which allows user programs to query
62  *   and set label state on objects.
63  *
64  * The majority of the MAC Framework implementation may be found in
65  * src/sys/security/mac.  Sample policy modules may be found in
66  * src/sys/security/mac_*.
67  */
68 
69 #include "opt_mac.h"
70 
71 #include <sys/cdefs.h>
72 __FBSDID("$FreeBSD$");
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/condvar.h>
77 #include <sys/kernel.h>
78 #include <sys/lock.h>
79 #include <sys/mac.h>
80 #include <sys/module.h>
81 #include <sys/rmlock.h>
82 #include <sys/sdt.h>
83 #include <sys/sx.h>
84 #include <sys/sysctl.h>
85 #include <sys/vnode.h>
86 
87 #include <security/mac/mac_framework.h>
88 #include <security/mac/mac_internal.h>
89 #include <security/mac/mac_policy.h>
90 
91 /*
92  * DTrace SDT providers for MAC.
93  */
94 SDT_PROVIDER_DEFINE(mac);
95 SDT_PROVIDER_DEFINE(mac_framework);
96 
97 SDT_PROBE_DEFINE2(mac, , policy, modevent, "int",
98     "struct mac_policy_conf *");
99 SDT_PROBE_DEFINE1(mac, , policy, register,
100     "struct mac_policy_conf *");
101 SDT_PROBE_DEFINE1(mac, , policy, unregister,
102     "struct mac_policy_conf *");
103 
104 /*
105  * Root sysctl node for all MAC and MAC policy controls.
106  */
107 SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
108     "TrustedBSD MAC policy controls");
109 
110 /*
111  * Declare that the kernel provides MAC support, version 3 (FreeBSD 7.x).
112  * This permits modules to refuse to be loaded if the necessary support isn't
113  * present, even if it's pre-boot.
114  */
115 MODULE_VERSION(kernel_mac_support, MAC_VERSION);
116 
117 static unsigned int	mac_version = MAC_VERSION;
118 SYSCTL_UINT(_security_mac, OID_AUTO, version, CTLFLAG_RD, &mac_version, 0,
119     "");
120 
121 /*
122  * Flags for inlined checks. Note this would be best hotpatched at runtime.
123  * The following is a band-aid.
124  *
125  * Use FPFLAG for hooks running in commonly executed paths and FPFLAG_RARE
126  * for the rest.
127  */
128 #define FPFLAG(f)	\
129 bool __read_frequently mac_##f##_fp_flag
130 
131 #define FPFLAG_RARE(f)	\
132 bool __read_mostly mac_##f##_fp_flag
133 
134 FPFLAG(priv_check);
135 FPFLAG(priv_grant);
136 FPFLAG(vnode_check_lookup);
137 FPFLAG(vnode_check_open);
138 FPFLAG(vnode_check_stat);
139 FPFLAG(vnode_check_read);
140 FPFLAG(vnode_check_write);
141 FPFLAG(vnode_check_mmap);
142 FPFLAG_RARE(vnode_check_poll);
143 FPFLAG_RARE(vnode_check_rename_from);
144 FPFLAG_RARE(vnode_check_access);
145 FPFLAG_RARE(vnode_check_readlink);
146 FPFLAG_RARE(pipe_check_stat);
147 FPFLAG_RARE(pipe_check_poll);
148 FPFLAG_RARE(pipe_check_read);
149 FPFLAG_RARE(ifnet_create_mbuf);
150 FPFLAG_RARE(ifnet_check_transmit);
151 
152 #undef FPFLAG
153 #undef FPFLAG_RARE
154 
155 /*
156  * Labels consist of a indexed set of "slots", which are allocated policies
157  * as required.  The MAC Framework maintains a bitmask of slots allocated so
158  * far to prevent reuse.  Slots cannot be reused, as the MAC Framework
159  * guarantees that newly allocated slots in labels will be NULL unless
160  * otherwise initialized, and because we do not have a mechanism to garbage
161  * collect slots on policy unload.  As labeled policies tend to be statically
162  * loaded during boot, and not frequently unloaded and reloaded, this is not
163  * generally an issue.
164  */
165 #if MAC_MAX_SLOTS > 32
166 #error "MAC_MAX_SLOTS too large"
167 #endif
168 
169 static unsigned int mac_max_slots = MAC_MAX_SLOTS;
170 static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1;
171 SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, &mac_max_slots,
172     0, "");
173 
174 /*
175  * Has the kernel started generating labeled objects yet?  All read/write
176  * access to this variable is serialized during the boot process.  Following
177  * the end of serialization, we don't update this flag; no locking.
178  */
179 static int	mac_late = 0;
180 
181 /*
182  * Each policy declares a mask of object types requiring labels to be
183  * allocated for them.  For convenience, we combine and cache the bitwise or
184  * of the per-policy object flags to track whether we will allocate a label
185  * for an object type at run-time.
186  */
187 uint64_t	mac_labeled;
188 SYSCTL_UQUAD(_security_mac, OID_AUTO, labeled, CTLFLAG_RD, &mac_labeled, 0,
189     "Mask of object types being labeled");
190 
191 MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage");
192 
193 /*
194  * MAC policy modules are placed in one of two lists: mac_static_policy_list,
195  * for policies that are loaded early and cannot be unloaded, and
196  * mac_policy_list, which holds policies either loaded later in the boot
197  * cycle or that may be unloaded.  The static policy list does not require
198  * locks to iterate over, but the dynamic list requires synchronization.
199  * Support for dynamic policy loading can be compiled out using the
200  * MAC_STATIC kernel option.
201  *
202  * The dynamic policy list is protected by two locks: modifying the list
203  * requires both locks to be held exclusively.  One of the locks,
204  * mac_policy_rm, is acquired over policy entry points that will never sleep;
205  * the other, mac_policy_rms, is acquired over policy entry points that may
206  * sleep.  The former category will be used when kernel locks may be held
207  * over calls to the MAC Framework, during network processing in ithreads,
208  * etc.  The latter will tend to involve potentially blocking memory
209  * allocations, extended attribute I/O, etc.
210  */
211 #ifndef MAC_STATIC
212 static struct rmlock mac_policy_rm;	/* Non-sleeping entry points. */
213 static struct rmslock mac_policy_rms;	/* Sleeping entry points. */
214 #endif
215 
216 struct mac_policy_list_head mac_policy_list;
217 struct mac_policy_list_head mac_static_policy_list;
218 u_int mac_policy_count;			/* Registered policy count. */
219 
220 static void	mac_policy_xlock(void);
221 static void	mac_policy_xlock_assert(void);
222 static void	mac_policy_xunlock(void);
223 
224 void
225 mac_policy_slock_nosleep(struct rm_priotracker *tracker)
226 {
227 
228 #ifndef MAC_STATIC
229 	if (!mac_late)
230 		return;
231 
232 	rm_rlock(&mac_policy_rm, tracker);
233 #endif
234 }
235 
236 void
237 mac_policy_slock_sleep(void)
238 {
239 
240 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
241  	    "mac_policy_slock_sleep");
242 
243 #ifndef MAC_STATIC
244 	if (!mac_late)
245 		return;
246 
247 	rms_rlock(&mac_policy_rms);
248 #endif
249 }
250 
251 void
252 mac_policy_sunlock_nosleep(struct rm_priotracker *tracker)
253 {
254 
255 #ifndef MAC_STATIC
256 	if (!mac_late)
257 		return;
258 
259 	rm_runlock(&mac_policy_rm, tracker);
260 #endif
261 }
262 
263 void
264 mac_policy_sunlock_sleep(void)
265 {
266 
267 #ifndef MAC_STATIC
268 	if (!mac_late)
269 		return;
270 
271 	rms_runlock(&mac_policy_rms);
272 #endif
273 }
274 
275 static void
276 mac_policy_xlock(void)
277 {
278 
279 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
280  	    "mac_policy_xlock()");
281 
282 #ifndef MAC_STATIC
283 	if (!mac_late)
284 		return;
285 
286 	rms_wlock(&mac_policy_rms);
287 	rm_wlock(&mac_policy_rm);
288 #endif
289 }
290 
291 static void
292 mac_policy_xunlock(void)
293 {
294 
295 #ifndef MAC_STATIC
296 	if (!mac_late)
297 		return;
298 
299 	rm_wunlock(&mac_policy_rm);
300 	rms_wunlock(&mac_policy_rms);
301 #endif
302 }
303 
304 static void
305 mac_policy_xlock_assert(void)
306 {
307 
308 #ifndef MAC_STATIC
309 	if (!mac_late)
310 		return;
311 
312 	rm_assert(&mac_policy_rm, RA_WLOCKED);
313 #endif
314 }
315 
316 /*
317  * Initialize the MAC subsystem, including appropriate SMP locks.
318  */
319 static void
320 mac_init(void)
321 {
322 
323 	LIST_INIT(&mac_static_policy_list);
324 	LIST_INIT(&mac_policy_list);
325 	mac_labelzone_init();
326 
327 #ifndef MAC_STATIC
328 	rm_init_flags(&mac_policy_rm, "mac_policy_rm", RM_NOWITNESS |
329 	    RM_RECURSE);
330 	rms_init(&mac_policy_rms, "mac_policy_rms");
331 #endif
332 }
333 
334 /*
335  * For the purposes of modules that want to know if they were loaded "early",
336  * set the mac_late flag once we've processed modules either linked into the
337  * kernel, or loaded before the kernel startup.
338  */
339 static void
340 mac_late_init(void)
341 {
342 
343 	mac_late = 1;
344 }
345 
346 /*
347  * Given a policy, derive from its set of non-NULL label init methods what
348  * object types the policy is interested in.
349  */
350 static uint64_t
351 mac_policy_getlabeled(struct mac_policy_conf *mpc)
352 {
353 	uint64_t labeled;
354 
355 #define	MPC_FLAG(method, flag)					\
356 	if (mpc->mpc_ops->mpo_ ## method != NULL)			\
357 		labeled |= (flag);					\
358 
359 	labeled = 0;
360 	MPC_FLAG(cred_init_label, MPC_OBJECT_CRED);
361 	MPC_FLAG(proc_init_label, MPC_OBJECT_PROC);
362 	MPC_FLAG(vnode_init_label, MPC_OBJECT_VNODE);
363 	MPC_FLAG(inpcb_init_label, MPC_OBJECT_INPCB);
364 	MPC_FLAG(socket_init_label, MPC_OBJECT_SOCKET);
365 	MPC_FLAG(devfs_init_label, MPC_OBJECT_DEVFS);
366 	MPC_FLAG(mbuf_init_label, MPC_OBJECT_MBUF);
367 	MPC_FLAG(ipq_init_label, MPC_OBJECT_IPQ);
368 	MPC_FLAG(ifnet_init_label, MPC_OBJECT_IFNET);
369 	MPC_FLAG(bpfdesc_init_label, MPC_OBJECT_BPFDESC);
370 	MPC_FLAG(pipe_init_label, MPC_OBJECT_PIPE);
371 	MPC_FLAG(mount_init_label, MPC_OBJECT_MOUNT);
372 	MPC_FLAG(posixsem_init_label, MPC_OBJECT_POSIXSEM);
373 	MPC_FLAG(posixshm_init_label, MPC_OBJECT_POSIXSHM);
374 	MPC_FLAG(sysvmsg_init_label, MPC_OBJECT_SYSVMSG);
375 	MPC_FLAG(sysvmsq_init_label, MPC_OBJECT_SYSVMSQ);
376 	MPC_FLAG(sysvsem_init_label, MPC_OBJECT_SYSVSEM);
377 	MPC_FLAG(sysvshm_init_label, MPC_OBJECT_SYSVSHM);
378 	MPC_FLAG(syncache_init_label, MPC_OBJECT_SYNCACHE);
379 	MPC_FLAG(ip6q_init_label, MPC_OBJECT_IP6Q);
380 
381 #undef MPC_FLAG
382 	return (labeled);
383 }
384 
385 /*
386  * When policies are loaded or unloaded, walk the list of registered policies
387  * and built mac_labeled, a bitmask representing the union of all objects
388  * requiring labels across all policies.
389  */
390 static void
391 mac_policy_update(void)
392 {
393 	struct mac_policy_conf *mpc;
394 
395 	mac_policy_xlock_assert();
396 
397 	mac_labeled = 0;
398 	mac_policy_count = 0;
399 	LIST_FOREACH(mpc, &mac_static_policy_list, mpc_list) {
400 		mac_labeled |= mac_policy_getlabeled(mpc);
401 		mac_policy_count++;
402 	}
403 	LIST_FOREACH(mpc, &mac_policy_list, mpc_list) {
404 		mac_labeled |= mac_policy_getlabeled(mpc);
405 		mac_policy_count++;
406 	}
407 
408 	cache_fast_lookup_enabled_recalc();
409 }
410 
411 /*
412  * There are frequently used code paths which check for rarely installed
413  * policies. Gross hack below enables doing it in a cheap manner.
414  */
415 
416 #define FPO(f)	(offsetof(struct mac_policy_ops, mpo_##f) / sizeof(uintptr_t))
417 
418 struct mac_policy_fastpath_elem {
419 	int	count;
420 	bool	*flag;
421 	size_t	offset;
422 };
423 
424 struct mac_policy_fastpath_elem mac_policy_fastpath_array[] = {
425 	{ .offset = FPO(priv_check), .flag = &mac_priv_check_fp_flag },
426 	{ .offset = FPO(priv_grant), .flag = &mac_priv_grant_fp_flag },
427 	{ .offset = FPO(vnode_check_lookup),
428 		.flag = &mac_vnode_check_lookup_fp_flag },
429 	{ .offset = FPO(vnode_check_readlink),
430 		.flag = &mac_vnode_check_readlink_fp_flag },
431 	{ .offset = FPO(vnode_check_open),
432 		.flag = &mac_vnode_check_open_fp_flag },
433 	{ .offset = FPO(vnode_check_stat),
434 		.flag = &mac_vnode_check_stat_fp_flag },
435 	{ .offset = FPO(vnode_check_read),
436 		.flag = &mac_vnode_check_read_fp_flag },
437 	{ .offset = FPO(vnode_check_write),
438 		.flag = &mac_vnode_check_write_fp_flag },
439 	{ .offset = FPO(vnode_check_mmap),
440 		.flag = &mac_vnode_check_mmap_fp_flag },
441 	{ .offset = FPO(vnode_check_poll),
442 		.flag = &mac_vnode_check_poll_fp_flag },
443 	{ .offset = FPO(vnode_check_rename_from),
444 		.flag = &mac_vnode_check_rename_from_fp_flag },
445 	{ .offset = FPO(vnode_check_access),
446 		.flag = &mac_vnode_check_access_fp_flag },
447 	{ .offset = FPO(pipe_check_stat),
448 		.flag = &mac_pipe_check_stat_fp_flag },
449 	{ .offset = FPO(pipe_check_poll),
450 		.flag = &mac_pipe_check_poll_fp_flag },
451 	{ .offset = FPO(pipe_check_read),
452 		.flag = &mac_pipe_check_read_fp_flag },
453 	{ .offset = FPO(ifnet_create_mbuf),
454 		.flag = &mac_ifnet_create_mbuf_fp_flag },
455 	{ .offset = FPO(ifnet_check_transmit),
456 		.flag = &mac_ifnet_check_transmit_fp_flag },
457 };
458 
459 static void
460 mac_policy_fastpath_enable(struct mac_policy_fastpath_elem *mpfe)
461 {
462 
463 	MPASS(mpfe->count >= 0);
464 	mpfe->count++;
465 	if (mpfe->count == 1) {
466 		MPASS(*mpfe->flag == false);
467 		*mpfe->flag = true;
468 	}
469 }
470 
471 static void
472 mac_policy_fastpath_disable(struct mac_policy_fastpath_elem *mpfe)
473 {
474 
475 	MPASS(mpfe->count >= 1);
476 	mpfe->count--;
477 	if (mpfe->count == 0) {
478 		MPASS(*mpfe->flag == true);
479 		*mpfe->flag = false;
480 	}
481 }
482 
483 static void
484 mac_policy_fastpath_register(struct mac_policy_conf *mpc)
485 {
486 	struct mac_policy_fastpath_elem *mpfe;
487 	uintptr_t **ops;
488 	int i;
489 
490 	mac_policy_xlock_assert();
491 
492 	ops = (uintptr_t **)mpc->mpc_ops;
493 	for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
494 		mpfe = &mac_policy_fastpath_array[i];
495 		if (ops[mpfe->offset] != NULL)
496 			mac_policy_fastpath_enable(mpfe);
497 	}
498 }
499 
500 static void
501 mac_policy_fastpath_unregister(struct mac_policy_conf *mpc)
502 {
503 	struct mac_policy_fastpath_elem *mpfe;
504 	uintptr_t **ops;
505 	int i;
506 
507 	mac_policy_xlock_assert();
508 
509 	ops = (uintptr_t **)mpc->mpc_ops;
510 	for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
511 		mpfe = &mac_policy_fastpath_array[i];
512 		if (ops[mpfe->offset] != NULL)
513 			mac_policy_fastpath_disable(mpfe);
514 	}
515 }
516 
517 #undef FPO
518 
519 static int
520 mac_policy_register(struct mac_policy_conf *mpc)
521 {
522 	struct mac_policy_conf *tmpc;
523 	int error, slot, static_entry;
524 
525 	error = 0;
526 
527 	/*
528 	 * We don't technically need exclusive access while !mac_late, but
529 	 * hold it for assertion consistency.
530 	 */
531 	mac_policy_xlock();
532 
533 	/*
534 	 * If the module can potentially be unloaded, or we're loading late,
535 	 * we have to stick it in the non-static list and pay an extra
536 	 * performance overhead.  Otherwise, we can pay a light locking cost
537 	 * and stick it in the static list.
538 	 */
539 	static_entry = (!mac_late &&
540 	    !(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK));
541 
542 	if (static_entry) {
543 		LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) {
544 			if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
545 				error = EEXIST;
546 				goto out;
547 			}
548 		}
549 	} else {
550 		LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) {
551 			if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
552 				error = EEXIST;
553 				goto out;
554 			}
555 		}
556 	}
557 	if (mpc->mpc_field_off != NULL) {
558 		slot = ffs(mac_slot_offsets_free);
559 		if (slot == 0) {
560 			error = ENOMEM;
561 			goto out;
562 		}
563 		slot--;
564 		mac_slot_offsets_free &= ~(1 << slot);
565 		*mpc->mpc_field_off = slot;
566 	}
567 	mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED;
568 
569 	/*
570 	 * If we're loading a MAC module after the framework has initialized,
571 	 * it has to go into the dynamic list.  If we're loading it before
572 	 * we've finished initializing, it can go into the static list with
573 	 * weaker locker requirements.
574 	 */
575 	if (static_entry)
576 		LIST_INSERT_HEAD(&mac_static_policy_list, mpc, mpc_list);
577 	else
578 		LIST_INSERT_HEAD(&mac_policy_list, mpc, mpc_list);
579 
580 	/*
581 	 * Per-policy initialization.  Currently, this takes place under the
582 	 * exclusive lock, so policies must not sleep in their init method.
583 	 * In the future, we may want to separate "init" from "start", with
584 	 * "init" occurring without the lock held.  Likewise, on tear-down,
585 	 * breaking out "stop" from "destroy".
586 	 */
587 	if (mpc->mpc_ops->mpo_init != NULL)
588 		(*(mpc->mpc_ops->mpo_init))(mpc);
589 
590 	mac_policy_fastpath_register(mpc);
591 
592 	mac_policy_update();
593 
594 	SDT_PROBE1(mac, , policy, register, mpc);
595 	printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname,
596 	    mpc->mpc_name);
597 
598 out:
599 	mac_policy_xunlock();
600 	return (error);
601 }
602 
603 static int
604 mac_policy_unregister(struct mac_policy_conf *mpc)
605 {
606 
607 	/*
608 	 * If we fail the load, we may get a request to unload.  Check to see
609 	 * if we did the run-time registration, and if not, silently succeed.
610 	 */
611 	mac_policy_xlock();
612 	if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) {
613 		mac_policy_xunlock();
614 		return (0);
615 	}
616 #if 0
617 	/*
618 	 * Don't allow unloading modules with private data.
619 	 */
620 	if (mpc->mpc_field_off != NULL) {
621 		mac_policy_xunlock();
622 		return (EBUSY);
623 	}
624 #endif
625 	/*
626 	 * Only allow the unload to proceed if the module is unloadable by
627 	 * its own definition.
628 	 */
629 	if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) {
630 		mac_policy_xunlock();
631 		return (EBUSY);
632 	}
633 
634 	mac_policy_fastpath_unregister(mpc);
635 
636 	if (mpc->mpc_ops->mpo_destroy != NULL)
637 		(*(mpc->mpc_ops->mpo_destroy))(mpc);
638 
639 	LIST_REMOVE(mpc, mpc_list);
640 	mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED;
641 	mac_policy_update();
642 	mac_policy_xunlock();
643 
644 	SDT_PROBE1(mac, , policy, unregister, mpc);
645 	printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname,
646 	    mpc->mpc_name);
647 
648 	return (0);
649 }
650 
651 /*
652  * Allow MAC policy modules to register during boot, etc.
653  */
654 int
655 mac_policy_modevent(module_t mod, int type, void *data)
656 {
657 	struct mac_policy_conf *mpc;
658 	int error;
659 
660 	error = 0;
661 	mpc = (struct mac_policy_conf *) data;
662 
663 #ifdef MAC_STATIC
664 	if (mac_late) {
665 		printf("mac_policy_modevent: MAC_STATIC and late\n");
666 		return (EBUSY);
667 	}
668 #endif
669 
670 	SDT_PROBE2(mac, , policy, modevent, type, mpc);
671 	switch (type) {
672 	case MOD_LOAD:
673 		if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE &&
674 		    mac_late) {
675 			printf("mac_policy_modevent: can't load %s policy "
676 			    "after booting\n", mpc->mpc_name);
677 			error = EBUSY;
678 			break;
679 		}
680 		error = mac_policy_register(mpc);
681 		break;
682 	case MOD_UNLOAD:
683 		/* Don't unregister the module if it was never registered. */
684 		if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED)
685 		    != 0)
686 			error = mac_policy_unregister(mpc);
687 		else
688 			error = 0;
689 		break;
690 	default:
691 		error = EOPNOTSUPP;
692 		break;
693 	}
694 
695 	return (error);
696 }
697 
698 /*
699  * Define an error value precedence, and given two arguments, selects the
700  * value with the higher precedence.
701  */
702 int
703 mac_error_select(int error1, int error2)
704 {
705 
706 	/* Certain decision-making errors take top priority. */
707 	if (error1 == EDEADLK || error2 == EDEADLK)
708 		return (EDEADLK);
709 
710 	/* Invalid arguments should be reported where possible. */
711 	if (error1 == EINVAL || error2 == EINVAL)
712 		return (EINVAL);
713 
714 	/* Precedence goes to "visibility", with both process and file. */
715 	if (error1 == ESRCH || error2 == ESRCH)
716 		return (ESRCH);
717 
718 	if (error1 == ENOENT || error2 == ENOENT)
719 		return (ENOENT);
720 
721 	/* Precedence goes to DAC/MAC protections. */
722 	if (error1 == EACCES || error2 == EACCES)
723 		return (EACCES);
724 
725 	/* Precedence goes to privilege. */
726 	if (error1 == EPERM || error2 == EPERM)
727 		return (EPERM);
728 
729 	/* Precedence goes to error over success; otherwise, arbitrary. */
730 	if (error1 != 0)
731 		return (error1);
732 	return (error2);
733 }
734 
735 int
736 mac_check_structmac_consistent(struct mac *mac)
737 {
738 
739 	/* Require that labels have a non-zero length. */
740 	if (mac->m_buflen > MAC_MAX_LABEL_BUF_LEN ||
741 	    mac->m_buflen <= sizeof(""))
742 		return (EINVAL);
743 
744 	return (0);
745 }
746 
747 SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL);
748 SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL);
749