1 /*- 2 * Copyright (c) 1999-2002, 2008 Robert N. M. Watson 3 * Copyright (c) 2001 Ilmar S. Habibulin 4 * Copyright (c) 2001-2003 Networks Associates Technology, Inc. 5 * Copyright (c) 2005 Samy Al Bahra 6 * Copyright (c) 2006 SPARTA, Inc. 7 * Copyright (c) 2008 Apple Inc. 8 * All rights reserved. 9 * 10 * This software was developed by Robert Watson and Ilmar Habibulin for the 11 * TrustedBSD Project. 12 * 13 * This software was developed for the FreeBSD Project in part by Network 14 * Associates Laboratories, the Security Research Division of Network 15 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), 16 * as part of the DARPA CHATS research program. 17 * 18 * This software was enhanced by SPARTA ISSO under SPAWAR contract 19 * N66001-04-C-6019 ("SEFOS"). 20 * 21 * Redistribution and use in source and binary forms, with or without 22 * modification, are permitted provided that the following conditions 23 * are met: 24 * 1. Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * 2. Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in the 28 * documentation and/or other materials provided with the distribution. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_mac.h" 47 48 #include <sys/param.h> 49 #include <sys/condvar.h> 50 #include <sys/imgact.h> 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/malloc.h> 54 #include <sys/mutex.h> 55 #include <sys/mac.h> 56 #include <sys/proc.h> 57 #include <sys/sbuf.h> 58 #include <sys/systm.h> 59 #include <sys/vnode.h> 60 #include <sys/mount.h> 61 #include <sys/file.h> 62 #include <sys/namei.h> 63 #include <sys/sysctl.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_object.h> 69 70 #include <security/mac/mac_framework.h> 71 #include <security/mac/mac_internal.h> 72 #include <security/mac/mac_policy.h> 73 74 static int mac_mmap_revocation = 1; 75 SYSCTL_INT(_security_mac, OID_AUTO, mmap_revocation, CTLFLAG_RW, 76 &mac_mmap_revocation, 0, "Revoke mmap access to files on subject " 77 "relabel"); 78 79 static int mac_mmap_revocation_via_cow = 0; 80 SYSCTL_INT(_security_mac, OID_AUTO, mmap_revocation_via_cow, CTLFLAG_RW, 81 &mac_mmap_revocation_via_cow, 0, "Revoke mmap access to files via " 82 "copy-on-write semantics, or by removing all write access"); 83 84 static void mac_proc_vm_revoke_recurse(struct thread *td, 85 struct ucred *cred, struct vm_map *map); 86 87 static struct label * 88 mac_proc_label_alloc(void) 89 { 90 struct label *label; 91 92 label = mac_labelzone_alloc(M_WAITOK); 93 MAC_PERFORM(proc_init_label, label); 94 return (label); 95 } 96 97 void 98 mac_proc_init(struct proc *p) 99 { 100 101 if (mac_labeled & MPC_OBJECT_PROC) 102 p->p_label = mac_proc_label_alloc(); 103 else 104 p->p_label = NULL; 105 } 106 107 static void 108 mac_proc_label_free(struct label *label) 109 { 110 111 MAC_PERFORM(proc_destroy_label, label); 112 mac_labelzone_free(label); 113 } 114 115 void 116 mac_proc_destroy(struct proc *p) 117 { 118 119 if (p->p_label != NULL) { 120 mac_proc_label_free(p->p_label); 121 p->p_label = NULL; 122 } 123 } 124 125 void 126 mac_thread_userret(struct thread *td) 127 { 128 129 MAC_PERFORM(thread_userret, td); 130 } 131 132 int 133 mac_execve_enter(struct image_params *imgp, struct mac *mac_p) 134 { 135 struct label *label; 136 struct mac mac; 137 char *buffer; 138 int error; 139 140 if (mac_p == NULL) 141 return (0); 142 143 if (!(mac_labeled & MPC_OBJECT_CRED)) 144 return (EINVAL); 145 146 error = copyin(mac_p, &mac, sizeof(mac)); 147 if (error) 148 return (error); 149 150 error = mac_check_structmac_consistent(&mac); 151 if (error) 152 return (error); 153 154 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 155 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 156 if (error) { 157 free(buffer, M_MACTEMP); 158 return (error); 159 } 160 161 label = mac_cred_label_alloc(); 162 error = mac_cred_internalize_label(label, buffer); 163 free(buffer, M_MACTEMP); 164 if (error) { 165 mac_cred_label_free(label); 166 return (error); 167 } 168 imgp->execlabel = label; 169 return (0); 170 } 171 172 void 173 mac_execve_exit(struct image_params *imgp) 174 { 175 if (imgp->execlabel != NULL) { 176 mac_cred_label_free(imgp->execlabel); 177 imgp->execlabel = NULL; 178 } 179 } 180 181 void 182 mac_execve_interpreter_enter(struct vnode *interpvp, 183 struct label **interpvplabel) 184 { 185 186 if (mac_labeled & MPC_OBJECT_VNODE) { 187 *interpvplabel = mac_vnode_label_alloc(); 188 mac_vnode_copy_label(interpvp->v_label, *interpvplabel); 189 } else 190 *interpvplabel = NULL; 191 } 192 193 void 194 mac_execve_interpreter_exit(struct label *interpvplabel) 195 { 196 197 if (interpvplabel != NULL) 198 mac_vnode_label_free(interpvplabel); 199 } 200 201 /* 202 * When relabeling a process, call out to the policies for the maximum 203 * permission allowed for each object type we know about in its memory space, 204 * and revoke access (in the least surprising ways we know) when necessary. 205 * The process lock is not held here. 206 */ 207 void 208 mac_proc_vm_revoke(struct thread *td) 209 { 210 struct ucred *cred; 211 212 PROC_LOCK(td->td_proc); 213 cred = crhold(td->td_proc->p_ucred); 214 PROC_UNLOCK(td->td_proc); 215 216 /* XXX freeze all other threads */ 217 mac_proc_vm_revoke_recurse(td, cred, 218 &td->td_proc->p_vmspace->vm_map); 219 /* XXX allow other threads to continue */ 220 221 crfree(cred); 222 } 223 224 static __inline const char * 225 prot2str(vm_prot_t prot) 226 { 227 228 switch (prot & VM_PROT_ALL) { 229 case VM_PROT_READ: 230 return ("r--"); 231 case VM_PROT_READ | VM_PROT_WRITE: 232 return ("rw-"); 233 case VM_PROT_READ | VM_PROT_EXECUTE: 234 return ("r-x"); 235 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 236 return ("rwx"); 237 case VM_PROT_WRITE: 238 return ("-w-"); 239 case VM_PROT_EXECUTE: 240 return ("--x"); 241 case VM_PROT_WRITE | VM_PROT_EXECUTE: 242 return ("-wx"); 243 default: 244 return ("---"); 245 } 246 } 247 248 static void 249 mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred, 250 struct vm_map *map) 251 { 252 struct vm_map_entry *vme; 253 int vfslocked, result; 254 vm_prot_t revokeperms; 255 vm_object_t backing_object, object; 256 vm_ooffset_t offset; 257 struct vnode *vp; 258 struct mount *mp; 259 260 if (!mac_mmap_revocation) 261 return; 262 263 vm_map_lock(map); 264 for (vme = map->header.next; vme != &map->header; vme = vme->next) { 265 if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) { 266 mac_proc_vm_revoke_recurse(td, cred, 267 vme->object.sub_map); 268 continue; 269 } 270 /* 271 * Skip over entries that obviously are not shared. 272 */ 273 if (vme->eflags & (MAP_ENTRY_COW | MAP_ENTRY_NOSYNC) || 274 !vme->max_protection) 275 continue; 276 /* 277 * Drill down to the deepest backing object. 278 */ 279 offset = vme->offset; 280 object = vme->object.vm_object; 281 if (object == NULL) 282 continue; 283 VM_OBJECT_LOCK(object); 284 while ((backing_object = object->backing_object) != NULL) { 285 VM_OBJECT_LOCK(backing_object); 286 offset += object->backing_object_offset; 287 VM_OBJECT_UNLOCK(object); 288 object = backing_object; 289 } 290 VM_OBJECT_UNLOCK(object); 291 /* 292 * At the moment, vm_maps and objects aren't considered by 293 * the MAC system, so only things with backing by a normal 294 * object (read: vnodes) are checked. 295 */ 296 if (object->type != OBJT_VNODE) 297 continue; 298 vp = (struct vnode *)object->handle; 299 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 300 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 301 result = vme->max_protection; 302 mac_vnode_check_mmap_downgrade(cred, vp, &result); 303 VOP_UNLOCK(vp, 0); 304 /* 305 * Find out what maximum protection we may be allowing now 306 * but a policy needs to get removed. 307 */ 308 revokeperms = vme->max_protection & ~result; 309 if (!revokeperms) { 310 VFS_UNLOCK_GIANT(vfslocked); 311 continue; 312 } 313 printf("pid %ld: revoking %s perms from %#lx:%ld " 314 "(max %s/cur %s)\n", (long)td->td_proc->p_pid, 315 prot2str(revokeperms), (u_long)vme->start, 316 (long)(vme->end - vme->start), 317 prot2str(vme->max_protection), prot2str(vme->protection)); 318 /* 319 * This is the really simple case: if a map has more 320 * max_protection than is allowed, but it's not being 321 * actually used (that is, the current protection is still 322 * allowed), we can just wipe it out and do nothing more. 323 */ 324 if ((vme->protection & revokeperms) == 0) { 325 vme->max_protection -= revokeperms; 326 } else { 327 if (revokeperms & VM_PROT_WRITE) { 328 /* 329 * In the more complicated case, flush out all 330 * pending changes to the object then turn it 331 * copy-on-write. 332 */ 333 vm_object_reference(object); 334 (void) vn_start_write(vp, &mp, V_WAIT); 335 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 336 VM_OBJECT_LOCK(object); 337 vm_object_page_clean(object, 338 OFF_TO_IDX(offset), 339 OFF_TO_IDX(offset + vme->end - vme->start + 340 PAGE_MASK), 341 OBJPC_SYNC); 342 VM_OBJECT_UNLOCK(object); 343 VOP_UNLOCK(vp, 0); 344 vn_finished_write(mp); 345 vm_object_deallocate(object); 346 /* 347 * Why bother if there's no read permissions 348 * anymore? For the rest, we need to leave 349 * the write permissions on for COW, or 350 * remove them entirely if configured to. 351 */ 352 if (!mac_mmap_revocation_via_cow) { 353 vme->max_protection &= ~VM_PROT_WRITE; 354 vme->protection &= ~VM_PROT_WRITE; 355 } if ((revokeperms & VM_PROT_READ) == 0) 356 vme->eflags |= MAP_ENTRY_COW | 357 MAP_ENTRY_NEEDS_COPY; 358 } 359 if (revokeperms & VM_PROT_EXECUTE) { 360 vme->max_protection &= ~VM_PROT_EXECUTE; 361 vme->protection &= ~VM_PROT_EXECUTE; 362 } 363 if (revokeperms & VM_PROT_READ) { 364 vme->max_protection = 0; 365 vme->protection = 0; 366 } 367 pmap_protect(map->pmap, vme->start, vme->end, 368 vme->protection & ~revokeperms); 369 vm_map_simplify_entry(map, vme); 370 } 371 VFS_UNLOCK_GIANT(vfslocked); 372 } 373 vm_map_unlock(map); 374 } 375 376 int 377 mac_proc_check_debug(struct ucred *cred, struct proc *p) 378 { 379 int error; 380 381 PROC_LOCK_ASSERT(p, MA_OWNED); 382 383 MAC_CHECK(proc_check_debug, cred, p); 384 385 return (error); 386 } 387 388 int 389 mac_proc_check_sched(struct ucred *cred, struct proc *p) 390 { 391 int error; 392 393 PROC_LOCK_ASSERT(p, MA_OWNED); 394 395 MAC_CHECK(proc_check_sched, cred, p); 396 397 return (error); 398 } 399 400 int 401 mac_proc_check_signal(struct ucred *cred, struct proc *p, int signum) 402 { 403 int error; 404 405 PROC_LOCK_ASSERT(p, MA_OWNED); 406 407 MAC_CHECK(proc_check_signal, cred, p, signum); 408 409 return (error); 410 } 411 412 int 413 mac_proc_check_setuid(struct proc *p, struct ucred *cred, uid_t uid) 414 { 415 int error; 416 417 PROC_LOCK_ASSERT(p, MA_OWNED); 418 419 MAC_CHECK(proc_check_setuid, cred, uid); 420 return (error); 421 } 422 423 int 424 mac_proc_check_seteuid(struct proc *p, struct ucred *cred, uid_t euid) 425 { 426 int error; 427 428 PROC_LOCK_ASSERT(p, MA_OWNED); 429 430 MAC_CHECK(proc_check_seteuid, cred, euid); 431 return (error); 432 } 433 434 int 435 mac_proc_check_setgid(struct proc *p, struct ucred *cred, gid_t gid) 436 { 437 int error; 438 439 PROC_LOCK_ASSERT(p, MA_OWNED); 440 441 MAC_CHECK(proc_check_setgid, cred, gid); 442 443 return (error); 444 } 445 446 int 447 mac_proc_check_setegid(struct proc *p, struct ucred *cred, gid_t egid) 448 { 449 int error; 450 451 PROC_LOCK_ASSERT(p, MA_OWNED); 452 453 MAC_CHECK(proc_check_setegid, cred, egid); 454 455 return (error); 456 } 457 458 int 459 mac_proc_check_setgroups(struct proc *p, struct ucred *cred, int ngroups, 460 gid_t *gidset) 461 { 462 int error; 463 464 PROC_LOCK_ASSERT(p, MA_OWNED); 465 466 MAC_CHECK(proc_check_setgroups, cred, ngroups, gidset); 467 return (error); 468 } 469 470 int 471 mac_proc_check_setreuid(struct proc *p, struct ucred *cred, uid_t ruid, 472 uid_t euid) 473 { 474 int error; 475 476 PROC_LOCK_ASSERT(p, MA_OWNED); 477 478 MAC_CHECK(proc_check_setreuid, cred, ruid, euid); 479 480 return (error); 481 } 482 483 int 484 mac_proc_check_setregid(struct proc *proc, struct ucred *cred, gid_t rgid, 485 gid_t egid) 486 { 487 int error; 488 489 PROC_LOCK_ASSERT(proc, MA_OWNED); 490 491 MAC_CHECK(proc_check_setregid, cred, rgid, egid); 492 493 return (error); 494 } 495 496 int 497 mac_proc_check_setresuid(struct proc *p, struct ucred *cred, uid_t ruid, 498 uid_t euid, uid_t suid) 499 { 500 int error; 501 502 PROC_LOCK_ASSERT(p, MA_OWNED); 503 504 MAC_CHECK(proc_check_setresuid, cred, ruid, euid, suid); 505 return (error); 506 } 507 508 int 509 mac_proc_check_setresgid(struct proc *p, struct ucred *cred, gid_t rgid, 510 gid_t egid, gid_t sgid) 511 { 512 int error; 513 514 PROC_LOCK_ASSERT(p, MA_OWNED); 515 516 MAC_CHECK(proc_check_setresgid, cred, rgid, egid, sgid); 517 518 return (error); 519 } 520 521 int 522 mac_proc_check_wait(struct ucred *cred, struct proc *p) 523 { 524 int error; 525 526 PROC_LOCK_ASSERT(p, MA_OWNED); 527 528 MAC_CHECK(proc_check_wait, cred, p); 529 530 return (error); 531 } 532