1 /*-
2 * Copyright (c) 1999-2002, 2006, 2009 Robert N. M. Watson
3 * Copyright (c) 2001 Ilmar S. Habibulin
4 * Copyright (c) 2001-2005 Networks Associates Technology, Inc.
5 * Copyright (c) 2005-2006 SPARTA, Inc.
6 * Copyright (c) 2008-2009 Apple Inc.
7 * All rights reserved.
8 *
9 * This software was developed by Robert Watson and Ilmar Habibulin for the
10 * TrustedBSD Project.
11 *
12 * This software was developed for the FreeBSD Project in part by Network
13 * Associates Laboratories, the Security Research Division of Network
14 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
15 * as part of the DARPA CHATS research program.
16 *
17 * This software was enhanced by SPARTA ISSO under SPAWAR contract
18 * N66001-04-C-6019 ("SEFOS").
19 *
20 * This software was developed at the University of Cambridge Computer
21 * Laboratory with support from a grant from Google, Inc.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 */
44
45 /*-
46 * Framework for extensible kernel access control. This file contains core
47 * kernel infrastructure for the TrustedBSD MAC Framework, including policy
48 * registration, versioning, locking, error composition operator, and system
49 * calls.
50 *
51 * The MAC Framework implements three programming interfaces:
52 *
53 * - The kernel MAC interface, defined in mac_framework.h, and invoked
54 * throughout the kernel to request security decisions, notify of security
55 * related events, etc.
56 *
57 * - The MAC policy module interface, defined in mac_policy.h, which is
58 * implemented by MAC policy modules and invoked by the MAC Framework to
59 * forward kernel security requests and notifications to policy modules.
60 *
61 * - The user MAC API, defined in mac.h, which allows user programs to query
62 * and set label state on objects.
63 *
64 * The majority of the MAC Framework implementation may be found in
65 * src/sys/security/mac. Sample policy modules may be found in
66 * src/sys/security/mac_*.
67 */
68
69 #include "opt_mac.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/condvar.h>
74 #include <sys/kernel.h>
75 #include <sys/lock.h>
76 #include <sys/mac.h>
77 #include <sys/module.h>
78 #include <sys/rmlock.h>
79 #include <sys/sdt.h>
80 #include <sys/sx.h>
81 #include <sys/sysctl.h>
82 #include <sys/vnode.h>
83
84 #include <security/mac/mac_framework.h>
85 #include <security/mac/mac_internal.h>
86 #include <security/mac/mac_policy.h>
87
88 /*
89 * DTrace SDT providers for MAC.
90 */
91 SDT_PROVIDER_DEFINE(mac);
92 SDT_PROVIDER_DEFINE(mac_framework);
93
94 SDT_PROBE_DEFINE2(mac, , policy, modevent, "int",
95 "struct mac_policy_conf *");
96 SDT_PROBE_DEFINE1(mac, , policy, register,
97 "struct mac_policy_conf *");
98 SDT_PROBE_DEFINE1(mac, , policy, unregister,
99 "struct mac_policy_conf *");
100
101 /*
102 * Root sysctl node for all MAC and MAC policy controls.
103 */
104 SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
105 "TrustedBSD MAC policy controls");
106
107 /*
108 * Declare that the kernel provides MAC support, version 3 (FreeBSD 7.x).
109 * This permits modules to refuse to be loaded if the necessary support isn't
110 * present, even if it's pre-boot.
111 */
112 MODULE_VERSION(kernel_mac_support, MAC_VERSION);
113
114 static unsigned int mac_version = MAC_VERSION;
115 SYSCTL_UINT(_security_mac, OID_AUTO, version, CTLFLAG_RD, &mac_version, 0,
116 "");
117
118 /*
119 * Flags for inlined checks. Note this would be best hotpatched at runtime.
120 * The following is a band-aid.
121 *
122 * Use FPFLAG for hooks running in commonly executed paths and FPFLAG_RARE
123 * for the rest.
124 */
125 #define FPFLAG(f) \
126 bool __read_frequently mac_##f##_fp_flag
127
128 #define FPFLAG_RARE(f) \
129 bool __read_mostly mac_##f##_fp_flag
130
131 FPFLAG(priv_check);
132 FPFLAG(priv_grant);
133 FPFLAG(vnode_check_lookup);
134 FPFLAG(vnode_check_open);
135 FPFLAG(vnode_check_stat);
136 FPFLAG(vnode_check_read);
137 FPFLAG(vnode_check_write);
138 FPFLAG(vnode_check_mmap);
139 FPFLAG_RARE(vnode_check_poll);
140 FPFLAG_RARE(vnode_check_rename_from);
141 FPFLAG_RARE(vnode_check_access);
142 FPFLAG_RARE(vnode_check_readlink);
143 FPFLAG_RARE(pipe_check_stat);
144 FPFLAG_RARE(pipe_check_poll);
145 FPFLAG_RARE(pipe_check_read);
146 FPFLAG_RARE(ifnet_create_mbuf);
147 FPFLAG_RARE(ifnet_check_transmit);
148
149 #undef FPFLAG
150 #undef FPFLAG_RARE
151
152 /*
153 * Labels consist of a indexed set of "slots", which are allocated policies
154 * as required. The MAC Framework maintains a bitmask of slots allocated so
155 * far to prevent reuse. Slots cannot be reused, as the MAC Framework
156 * guarantees that newly allocated slots in labels will be NULL unless
157 * otherwise initialized, and because we do not have a mechanism to garbage
158 * collect slots on policy unload. As labeled policies tend to be statically
159 * loaded during boot, and not frequently unloaded and reloaded, this is not
160 * generally an issue.
161 */
162 #if MAC_MAX_SLOTS > 32
163 #error "MAC_MAX_SLOTS too large"
164 #endif
165
166 static unsigned int mac_max_slots = MAC_MAX_SLOTS;
167 static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1;
168 SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, &mac_max_slots,
169 0, "");
170
171 /*
172 * Has the kernel started generating labeled objects yet? All read/write
173 * access to this variable is serialized during the boot process. Following
174 * the end of serialization, we don't update this flag; no locking.
175 */
176 static int mac_late = 0;
177
178 /*
179 * Each policy declares a mask of object types requiring labels to be
180 * allocated for them. For convenience, we combine and cache the bitwise or
181 * of the per-policy object flags to track whether we will allocate a label
182 * for an object type at run-time.
183 */
184 uint64_t mac_labeled;
185 SYSCTL_UQUAD(_security_mac, OID_AUTO, labeled, CTLFLAG_RD, &mac_labeled, 0,
186 "Mask of object types being labeled");
187
188 MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage");
189
190 /*
191 * MAC policy modules are placed in one of two lists: mac_static_policy_list,
192 * for policies that are loaded early and cannot be unloaded, and
193 * mac_policy_list, which holds policies either loaded later in the boot
194 * cycle or that may be unloaded. The static policy list does not require
195 * locks to iterate over, but the dynamic list requires synchronization.
196 * Support for dynamic policy loading can be compiled out using the
197 * MAC_STATIC kernel option.
198 *
199 * The dynamic policy list is protected by two locks: modifying the list
200 * requires both locks to be held exclusively. One of the locks,
201 * mac_policy_rm, is acquired over policy entry points that will never sleep;
202 * the other, mac_policy_rms, is acquired over policy entry points that may
203 * sleep. The former category will be used when kernel locks may be held
204 * over calls to the MAC Framework, during network processing in ithreads,
205 * etc. The latter will tend to involve potentially blocking memory
206 * allocations, extended attribute I/O, etc.
207 */
208 #ifndef MAC_STATIC
209 static struct rmlock mac_policy_rm; /* Non-sleeping entry points. */
210 static struct rmslock mac_policy_rms; /* Sleeping entry points. */
211 #endif
212
213 struct mac_policy_list_head mac_policy_list;
214 struct mac_policy_list_head mac_static_policy_list;
215 u_int mac_policy_count; /* Registered policy count. */
216
217 static void mac_policy_xlock(void);
218 static void mac_policy_xlock_assert(void);
219 static void mac_policy_xunlock(void);
220
221 void
mac_policy_slock_nosleep(struct rm_priotracker * tracker)222 mac_policy_slock_nosleep(struct rm_priotracker *tracker)
223 {
224
225 #ifndef MAC_STATIC
226 if (!mac_late)
227 return;
228
229 rm_rlock(&mac_policy_rm, tracker);
230 #endif
231 }
232
233 void
mac_policy_slock_sleep(void)234 mac_policy_slock_sleep(void)
235 {
236
237 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
238 "mac_policy_slock_sleep");
239
240 #ifndef MAC_STATIC
241 if (!mac_late)
242 return;
243
244 rms_rlock(&mac_policy_rms);
245 #endif
246 }
247
248 void
mac_policy_sunlock_nosleep(struct rm_priotracker * tracker)249 mac_policy_sunlock_nosleep(struct rm_priotracker *tracker)
250 {
251
252 #ifndef MAC_STATIC
253 if (!mac_late)
254 return;
255
256 rm_runlock(&mac_policy_rm, tracker);
257 #endif
258 }
259
260 void
mac_policy_sunlock_sleep(void)261 mac_policy_sunlock_sleep(void)
262 {
263
264 #ifndef MAC_STATIC
265 if (!mac_late)
266 return;
267
268 rms_runlock(&mac_policy_rms);
269 #endif
270 }
271
272 static void
mac_policy_xlock(void)273 mac_policy_xlock(void)
274 {
275
276 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
277 "mac_policy_xlock()");
278
279 #ifndef MAC_STATIC
280 if (!mac_late)
281 return;
282
283 rms_wlock(&mac_policy_rms);
284 rm_wlock(&mac_policy_rm);
285 #endif
286 }
287
288 static void
mac_policy_xunlock(void)289 mac_policy_xunlock(void)
290 {
291
292 #ifndef MAC_STATIC
293 if (!mac_late)
294 return;
295
296 rm_wunlock(&mac_policy_rm);
297 rms_wunlock(&mac_policy_rms);
298 #endif
299 }
300
301 static void
mac_policy_xlock_assert(void)302 mac_policy_xlock_assert(void)
303 {
304
305 #ifndef MAC_STATIC
306 if (!mac_late)
307 return;
308
309 rm_assert(&mac_policy_rm, RA_WLOCKED);
310 #endif
311 }
312
313 /*
314 * Initialize the MAC subsystem, including appropriate SMP locks.
315 */
316 static void
mac_init(void)317 mac_init(void)
318 {
319
320 LIST_INIT(&mac_static_policy_list);
321 LIST_INIT(&mac_policy_list);
322 mac_labelzone_init();
323
324 #ifndef MAC_STATIC
325 rm_init_flags(&mac_policy_rm, "mac_policy_rm", RM_NOWITNESS |
326 RM_RECURSE);
327 rms_init(&mac_policy_rms, "mac_policy_rms");
328 #endif
329 }
330
331 /*
332 * For the purposes of modules that want to know if they were loaded "early",
333 * set the mac_late flag once we've processed modules either linked into the
334 * kernel, or loaded before the kernel startup.
335 */
336 static void
mac_late_init(void)337 mac_late_init(void)
338 {
339
340 mac_late = 1;
341 }
342
343 /*
344 * Given a policy, derive from its set of non-NULL label init methods what
345 * object types the policy is interested in.
346 */
347 static uint64_t
mac_policy_getlabeled(struct mac_policy_conf * mpc)348 mac_policy_getlabeled(struct mac_policy_conf *mpc)
349 {
350 uint64_t labeled;
351
352 #define MPC_FLAG(method, flag) \
353 if (mpc->mpc_ops->mpo_ ## method != NULL) \
354 labeled |= (flag); \
355
356 labeled = 0;
357 MPC_FLAG(cred_init_label, MPC_OBJECT_CRED);
358 MPC_FLAG(proc_init_label, MPC_OBJECT_PROC);
359 MPC_FLAG(vnode_init_label, MPC_OBJECT_VNODE);
360 MPC_FLAG(inpcb_init_label, MPC_OBJECT_INPCB);
361 MPC_FLAG(socket_init_label, MPC_OBJECT_SOCKET);
362 MPC_FLAG(devfs_init_label, MPC_OBJECT_DEVFS);
363 MPC_FLAG(mbuf_init_label, MPC_OBJECT_MBUF);
364 MPC_FLAG(ipq_init_label, MPC_OBJECT_IPQ);
365 MPC_FLAG(ifnet_init_label, MPC_OBJECT_IFNET);
366 MPC_FLAG(bpfdesc_init_label, MPC_OBJECT_BPFDESC);
367 MPC_FLAG(pipe_init_label, MPC_OBJECT_PIPE);
368 MPC_FLAG(mount_init_label, MPC_OBJECT_MOUNT);
369 MPC_FLAG(posixsem_init_label, MPC_OBJECT_POSIXSEM);
370 MPC_FLAG(posixshm_init_label, MPC_OBJECT_POSIXSHM);
371 MPC_FLAG(sysvmsg_init_label, MPC_OBJECT_SYSVMSG);
372 MPC_FLAG(sysvmsq_init_label, MPC_OBJECT_SYSVMSQ);
373 MPC_FLAG(sysvsem_init_label, MPC_OBJECT_SYSVSEM);
374 MPC_FLAG(sysvshm_init_label, MPC_OBJECT_SYSVSHM);
375 MPC_FLAG(syncache_init_label, MPC_OBJECT_SYNCACHE);
376 MPC_FLAG(ip6q_init_label, MPC_OBJECT_IP6Q);
377
378 #undef MPC_FLAG
379 return (labeled);
380 }
381
382 /*
383 * When policies are loaded or unloaded, walk the list of registered policies
384 * and built mac_labeled, a bitmask representing the union of all objects
385 * requiring labels across all policies.
386 */
387 static void
mac_policy_update(void)388 mac_policy_update(void)
389 {
390 struct mac_policy_conf *mpc;
391
392 mac_policy_xlock_assert();
393
394 mac_labeled = 0;
395 mac_policy_count = 0;
396 LIST_FOREACH(mpc, &mac_static_policy_list, mpc_list) {
397 mac_labeled |= mac_policy_getlabeled(mpc);
398 mac_policy_count++;
399 }
400 LIST_FOREACH(mpc, &mac_policy_list, mpc_list) {
401 mac_labeled |= mac_policy_getlabeled(mpc);
402 mac_policy_count++;
403 }
404
405 cache_fast_lookup_enabled_recalc();
406 }
407
408 /*
409 * There are frequently used code paths which check for rarely installed
410 * policies. Gross hack below enables doing it in a cheap manner.
411 */
412
413 #define FPO(f) (offsetof(struct mac_policy_ops, mpo_##f) / sizeof(uintptr_t))
414
415 struct mac_policy_fastpath_elem {
416 int count;
417 bool *flag;
418 size_t offset;
419 };
420
421 struct mac_policy_fastpath_elem mac_policy_fastpath_array[] = {
422 { .offset = FPO(priv_check), .flag = &mac_priv_check_fp_flag },
423 { .offset = FPO(priv_grant), .flag = &mac_priv_grant_fp_flag },
424 { .offset = FPO(vnode_check_lookup),
425 .flag = &mac_vnode_check_lookup_fp_flag },
426 { .offset = FPO(vnode_check_readlink),
427 .flag = &mac_vnode_check_readlink_fp_flag },
428 { .offset = FPO(vnode_check_open),
429 .flag = &mac_vnode_check_open_fp_flag },
430 { .offset = FPO(vnode_check_stat),
431 .flag = &mac_vnode_check_stat_fp_flag },
432 { .offset = FPO(vnode_check_read),
433 .flag = &mac_vnode_check_read_fp_flag },
434 { .offset = FPO(vnode_check_write),
435 .flag = &mac_vnode_check_write_fp_flag },
436 { .offset = FPO(vnode_check_mmap),
437 .flag = &mac_vnode_check_mmap_fp_flag },
438 { .offset = FPO(vnode_check_poll),
439 .flag = &mac_vnode_check_poll_fp_flag },
440 { .offset = FPO(vnode_check_rename_from),
441 .flag = &mac_vnode_check_rename_from_fp_flag },
442 { .offset = FPO(vnode_check_access),
443 .flag = &mac_vnode_check_access_fp_flag },
444 { .offset = FPO(pipe_check_stat),
445 .flag = &mac_pipe_check_stat_fp_flag },
446 { .offset = FPO(pipe_check_poll),
447 .flag = &mac_pipe_check_poll_fp_flag },
448 { .offset = FPO(pipe_check_read),
449 .flag = &mac_pipe_check_read_fp_flag },
450 { .offset = FPO(ifnet_create_mbuf),
451 .flag = &mac_ifnet_create_mbuf_fp_flag },
452 { .offset = FPO(ifnet_check_transmit),
453 .flag = &mac_ifnet_check_transmit_fp_flag },
454 };
455
456 static void
mac_policy_fastpath_enable(struct mac_policy_fastpath_elem * mpfe)457 mac_policy_fastpath_enable(struct mac_policy_fastpath_elem *mpfe)
458 {
459
460 MPASS(mpfe->count >= 0);
461 mpfe->count++;
462 if (mpfe->count == 1) {
463 MPASS(*mpfe->flag == false);
464 *mpfe->flag = true;
465 }
466 }
467
468 static void
mac_policy_fastpath_disable(struct mac_policy_fastpath_elem * mpfe)469 mac_policy_fastpath_disable(struct mac_policy_fastpath_elem *mpfe)
470 {
471
472 MPASS(mpfe->count >= 1);
473 mpfe->count--;
474 if (mpfe->count == 0) {
475 MPASS(*mpfe->flag == true);
476 *mpfe->flag = false;
477 }
478 }
479
480 static void
mac_policy_fastpath_register(struct mac_policy_conf * mpc)481 mac_policy_fastpath_register(struct mac_policy_conf *mpc)
482 {
483 struct mac_policy_fastpath_elem *mpfe;
484 uintptr_t **ops;
485 int i;
486
487 mac_policy_xlock_assert();
488
489 ops = (uintptr_t **)mpc->mpc_ops;
490 for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
491 mpfe = &mac_policy_fastpath_array[i];
492 if (ops[mpfe->offset] != NULL)
493 mac_policy_fastpath_enable(mpfe);
494 }
495 }
496
497 static void
mac_policy_fastpath_unregister(struct mac_policy_conf * mpc)498 mac_policy_fastpath_unregister(struct mac_policy_conf *mpc)
499 {
500 struct mac_policy_fastpath_elem *mpfe;
501 uintptr_t **ops;
502 int i;
503
504 mac_policy_xlock_assert();
505
506 ops = (uintptr_t **)mpc->mpc_ops;
507 for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
508 mpfe = &mac_policy_fastpath_array[i];
509 if (ops[mpfe->offset] != NULL)
510 mac_policy_fastpath_disable(mpfe);
511 }
512 }
513
514 #undef FPO
515
516 static int
mac_policy_register(struct mac_policy_conf * mpc)517 mac_policy_register(struct mac_policy_conf *mpc)
518 {
519 struct mac_policy_list_head *mpc_list;
520 struct mac_policy_conf *last_mpc, *tmpc;
521 int error, slot, static_entry;
522
523 error = 0;
524
525 /*
526 * We don't technically need exclusive access while !mac_late, but
527 * hold it for assertion consistency.
528 */
529 mac_policy_xlock();
530
531 /*
532 * If the module can potentially be unloaded, or we're loading late,
533 * we have to stick it in the non-static list and pay an extra
534 * performance overhead. Otherwise, we can pay a light locking cost
535 * and stick it in the static list.
536 */
537 static_entry = (!mac_late &&
538 !(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK));
539
540 mpc_list = (static_entry) ? &mac_static_policy_list :
541 &mac_policy_list;
542 last_mpc = NULL;
543 LIST_FOREACH(tmpc, mpc_list, mpc_list) {
544 last_mpc = tmpc;
545 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
546 error = EEXIST;
547 goto out;
548 }
549 }
550 if (mpc->mpc_field_off != NULL) {
551 slot = ffs(mac_slot_offsets_free);
552 if (slot == 0) {
553 error = ENOMEM;
554 goto out;
555 }
556 slot--;
557 mac_slot_offsets_free &= ~(1 << slot);
558 *mpc->mpc_field_off = slot;
559 }
560 mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED;
561
562 /*
563 * Some modules may depend on the operations of its dependencies.
564 * Inserting modules in order of registration ensures operations
565 * that work on the module list retain dependency order.
566 */
567 if (last_mpc == NULL)
568 LIST_INSERT_HEAD(mpc_list, mpc, mpc_list);
569 else
570 LIST_INSERT_AFTER(last_mpc, mpc, mpc_list);
571 /*
572 * Per-policy initialization. Currently, this takes place under the
573 * exclusive lock, so policies must not sleep in their init method.
574 * In the future, we may want to separate "init" from "start", with
575 * "init" occurring without the lock held. Likewise, on tear-down,
576 * breaking out "stop" from "destroy".
577 */
578 if (mpc->mpc_ops->mpo_init != NULL)
579 (*(mpc->mpc_ops->mpo_init))(mpc);
580
581 mac_policy_fastpath_register(mpc);
582
583 mac_policy_update();
584
585 SDT_PROBE1(mac, , policy, register, mpc);
586 printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname,
587 mpc->mpc_name);
588
589 out:
590 mac_policy_xunlock();
591 return (error);
592 }
593
594 static int
mac_policy_unregister(struct mac_policy_conf * mpc)595 mac_policy_unregister(struct mac_policy_conf *mpc)
596 {
597
598 /*
599 * If we fail the load, we may get a request to unload. Check to see
600 * if we did the run-time registration, and if not, silently succeed.
601 */
602 mac_policy_xlock();
603 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) {
604 mac_policy_xunlock();
605 return (0);
606 }
607 #if 0
608 /*
609 * Don't allow unloading modules with private data.
610 */
611 if (mpc->mpc_field_off != NULL) {
612 mac_policy_xunlock();
613 return (EBUSY);
614 }
615 #endif
616 /*
617 * Only allow the unload to proceed if the module is unloadable by
618 * its own definition.
619 */
620 if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) {
621 mac_policy_xunlock();
622 return (EBUSY);
623 }
624
625 mac_policy_fastpath_unregister(mpc);
626
627 if (mpc->mpc_ops->mpo_destroy != NULL)
628 (*(mpc->mpc_ops->mpo_destroy))(mpc);
629
630 LIST_REMOVE(mpc, mpc_list);
631 mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED;
632 mac_policy_update();
633 mac_policy_xunlock();
634
635 SDT_PROBE1(mac, , policy, unregister, mpc);
636 printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname,
637 mpc->mpc_name);
638
639 return (0);
640 }
641
642 /*
643 * Allow MAC policy modules to register during boot, etc.
644 */
645 int
mac_policy_modevent(module_t mod,int type,void * data)646 mac_policy_modevent(module_t mod, int type, void *data)
647 {
648 struct mac_policy_conf *mpc;
649 int error;
650
651 error = 0;
652 mpc = (struct mac_policy_conf *) data;
653
654 #ifdef MAC_STATIC
655 if (mac_late) {
656 printf("mac_policy_modevent: MAC_STATIC and late\n");
657 return (EBUSY);
658 }
659 #endif
660
661 SDT_PROBE2(mac, , policy, modevent, type, mpc);
662 switch (type) {
663 case MOD_LOAD:
664 if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE &&
665 mac_late) {
666 printf("mac_policy_modevent: can't load %s policy "
667 "after booting\n", mpc->mpc_name);
668 error = EBUSY;
669 break;
670 }
671 error = mac_policy_register(mpc);
672 break;
673 case MOD_UNLOAD:
674 /* Don't unregister the module if it was never registered. */
675 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED)
676 != 0)
677 error = mac_policy_unregister(mpc);
678 else
679 error = 0;
680 break;
681 default:
682 error = EOPNOTSUPP;
683 break;
684 }
685
686 return (error);
687 }
688
689 /*
690 * Define an error value precedence, and given two arguments, selects the
691 * value with the higher precedence.
692 */
693 int
mac_error_select(int error1,int error2)694 mac_error_select(int error1, int error2)
695 {
696
697 /* Certain decision-making errors take top priority. */
698 if (error1 == EDEADLK || error2 == EDEADLK)
699 return (EDEADLK);
700
701 /* Invalid arguments should be reported where possible. */
702 if (error1 == EINVAL || error2 == EINVAL)
703 return (EINVAL);
704
705 /* Precedence goes to "visibility", with both process and file. */
706 if (error1 == ESRCH || error2 == ESRCH)
707 return (ESRCH);
708
709 if (error1 == ENOENT || error2 == ENOENT)
710 return (ENOENT);
711
712 /* Precedence goes to DAC/MAC protections. */
713 if (error1 == EACCES || error2 == EACCES)
714 return (EACCES);
715
716 /* Precedence goes to privilege. */
717 if (error1 == EPERM || error2 == EPERM)
718 return (EPERM);
719
720 /* Precedence goes to error over success; otherwise, arbitrary. */
721 if (error1 != 0)
722 return (error1);
723 return (error2);
724 }
725
726 int
mac_check_structmac_consistent(const struct mac * mac)727 mac_check_structmac_consistent(const struct mac *mac)
728 {
729 /* Require that labels have a non-zero length. */
730 if (mac->m_buflen > MAC_MAX_LABEL_BUF_LEN ||
731 mac->m_buflen <= sizeof(""))
732 return (EINVAL);
733
734 return (0);
735 }
736
737 SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL);
738 SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL);
739