1 /*- 2 * Copyright (c) 1999-2002, 2008 Robert N. M. Watson 3 * Copyright (c) 2001 Ilmar S. Habibulin 4 * Copyright (c) 2001-2003 Networks Associates Technology, Inc. 5 * Copyright (c) 2005 Samy Al Bahra 6 * Copyright (c) 2006 SPARTA, Inc. 7 * Copyright (c) 2008 Apple Inc. 8 * All rights reserved. 9 * 10 * This software was developed by Robert Watson and Ilmar Habibulin for the 11 * TrustedBSD Project. 12 * 13 * This software was developed for the FreeBSD Project in part by Network 14 * Associates Laboratories, the Security Research Division of Network 15 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), 16 * as part of the DARPA CHATS research program. 17 * 18 * This software was enhanced by SPARTA ISSO under SPAWAR contract 19 * N66001-04-C-6019 ("SEFOS"). 20 * 21 * Redistribution and use in source and binary forms, with or without 22 * modification, are permitted provided that the following conditions 23 * are met: 24 * 1. Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * 2. Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in the 28 * documentation and/or other materials provided with the distribution. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_mac.h" 47 48 #include <sys/param.h> 49 #include <sys/condvar.h> 50 #include <sys/imgact.h> 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/malloc.h> 54 #include <sys/mutex.h> 55 #include <sys/mac.h> 56 #include <sys/proc.h> 57 #include <sys/sbuf.h> 58 #include <sys/systm.h> 59 #include <sys/vnode.h> 60 #include <sys/mount.h> 61 #include <sys/file.h> 62 #include <sys/namei.h> 63 #include <sys/sysctl.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_object.h> 69 70 #include <security/mac/mac_framework.h> 71 #include <security/mac/mac_internal.h> 72 #include <security/mac/mac_policy.h> 73 74 static int mac_mmap_revocation = 1; 75 SYSCTL_INT(_security_mac, OID_AUTO, mmap_revocation, CTLFLAG_RW, 76 &mac_mmap_revocation, 0, "Revoke mmap access to files on subject " 77 "relabel"); 78 79 static int mac_mmap_revocation_via_cow = 0; 80 SYSCTL_INT(_security_mac, OID_AUTO, mmap_revocation_via_cow, CTLFLAG_RW, 81 &mac_mmap_revocation_via_cow, 0, "Revoke mmap access to files via " 82 "copy-on-write semantics, or by removing all write access"); 83 84 static void mac_proc_vm_revoke_recurse(struct thread *td, 85 struct ucred *cred, struct vm_map *map); 86 87 static struct label * 88 mac_proc_label_alloc(void) 89 { 90 struct label *label; 91 92 label = mac_labelzone_alloc(M_WAITOK); 93 MAC_PERFORM(proc_init_label, label); 94 return (label); 95 } 96 97 void 98 mac_proc_init(struct proc *p) 99 { 100 101 if (mac_labeled & MPC_OBJECT_PROC) 102 p->p_label = mac_proc_label_alloc(); 103 else 104 p->p_label = NULL; 105 } 106 107 static void 108 mac_proc_label_free(struct label *label) 109 { 110 111 MAC_PERFORM(proc_destroy_label, label); 112 mac_labelzone_free(label); 113 } 114 115 void 116 mac_proc_destroy(struct proc *p) 117 { 118 119 if (p->p_label != NULL) { 120 mac_proc_label_free(p->p_label); 121 p->p_label = NULL; 122 } 123 } 124 125 void 126 mac_thread_userret(struct thread *td) 127 { 128 129 MAC_PERFORM(thread_userret, td); 130 } 131 132 int 133 mac_execve_enter(struct image_params *imgp, struct mac *mac_p) 134 { 135 struct label *label; 136 struct mac mac; 137 char *buffer; 138 int error; 139 140 if (mac_p == NULL) 141 return (0); 142 143 if (!(mac_labeled & MPC_OBJECT_CRED)) 144 return (EINVAL); 145 146 error = copyin(mac_p, &mac, sizeof(mac)); 147 if (error) 148 return (error); 149 150 error = mac_check_structmac_consistent(&mac); 151 if (error) 152 return (error); 153 154 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 155 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 156 if (error) { 157 free(buffer, M_MACTEMP); 158 return (error); 159 } 160 161 label = mac_cred_label_alloc(); 162 error = mac_cred_internalize_label(label, buffer); 163 free(buffer, M_MACTEMP); 164 if (error) { 165 mac_cred_label_free(label); 166 return (error); 167 } 168 imgp->execlabel = label; 169 return (0); 170 } 171 172 void 173 mac_execve_exit(struct image_params *imgp) 174 { 175 if (imgp->execlabel != NULL) { 176 mac_cred_label_free(imgp->execlabel); 177 imgp->execlabel = NULL; 178 } 179 } 180 181 void 182 mac_execve_interpreter_enter(struct vnode *interpvp, 183 struct label **interpvplabel) 184 { 185 186 if (mac_labeled & MPC_OBJECT_VNODE) { 187 *interpvplabel = mac_vnode_label_alloc(); 188 mac_vnode_copy_label(interpvp->v_label, *interpvplabel); 189 } else 190 *interpvplabel = NULL; 191 } 192 193 void 194 mac_execve_interpreter_exit(struct label *interpvplabel) 195 { 196 197 if (interpvplabel != NULL) 198 mac_vnode_label_free(interpvplabel); 199 } 200 201 /* 202 * When relabeling a process, call out to the policies for the maximum 203 * permission allowed for each object type we know about in its memory space, 204 * and revoke access (in the least surprising ways we know) when necessary. 205 * The process lock is not held here. 206 */ 207 void 208 mac_proc_vm_revoke(struct thread *td) 209 { 210 struct ucred *cred; 211 212 PROC_LOCK(td->td_proc); 213 cred = crhold(td->td_proc->p_ucred); 214 PROC_UNLOCK(td->td_proc); 215 216 /* XXX freeze all other threads */ 217 mac_proc_vm_revoke_recurse(td, cred, 218 &td->td_proc->p_vmspace->vm_map); 219 /* XXX allow other threads to continue */ 220 221 crfree(cred); 222 } 223 224 static __inline const char * 225 prot2str(vm_prot_t prot) 226 { 227 228 switch (prot & VM_PROT_ALL) { 229 case VM_PROT_READ: 230 return ("r--"); 231 case VM_PROT_READ | VM_PROT_WRITE: 232 return ("rw-"); 233 case VM_PROT_READ | VM_PROT_EXECUTE: 234 return ("r-x"); 235 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 236 return ("rwx"); 237 case VM_PROT_WRITE: 238 return ("-w-"); 239 case VM_PROT_EXECUTE: 240 return ("--x"); 241 case VM_PROT_WRITE | VM_PROT_EXECUTE: 242 return ("-wx"); 243 default: 244 return ("---"); 245 } 246 } 247 248 static void 249 mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred, 250 struct vm_map *map) 251 { 252 struct vm_map_entry *vme; 253 int vfslocked, result; 254 vm_prot_t revokeperms; 255 vm_object_t backing_object, object; 256 vm_ooffset_t offset; 257 struct vnode *vp; 258 struct mount *mp; 259 260 if (!mac_mmap_revocation) 261 return; 262 263 vm_map_lock_read(map); 264 for (vme = map->header.next; vme != &map->header; vme = vme->next) { 265 if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) { 266 mac_proc_vm_revoke_recurse(td, cred, 267 vme->object.sub_map); 268 continue; 269 } 270 /* 271 * Skip over entries that obviously are not shared. 272 */ 273 if (vme->eflags & (MAP_ENTRY_COW | MAP_ENTRY_NOSYNC) || 274 !vme->max_protection) 275 continue; 276 /* 277 * Drill down to the deepest backing object. 278 */ 279 offset = vme->offset; 280 object = vme->object.vm_object; 281 if (object == NULL) 282 continue; 283 VM_OBJECT_LOCK(object); 284 while ((backing_object = object->backing_object) != NULL) { 285 VM_OBJECT_LOCK(backing_object); 286 offset += object->backing_object_offset; 287 VM_OBJECT_UNLOCK(object); 288 object = backing_object; 289 } 290 VM_OBJECT_UNLOCK(object); 291 /* 292 * At the moment, vm_maps and objects aren't considered by 293 * the MAC system, so only things with backing by a normal 294 * object (read: vnodes) are checked. 295 */ 296 if (object->type != OBJT_VNODE) 297 continue; 298 vp = (struct vnode *)object->handle; 299 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 300 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 301 result = vme->max_protection; 302 mac_vnode_check_mmap_downgrade(cred, vp, &result); 303 VOP_UNLOCK(vp, 0); 304 /* 305 * Find out what maximum protection we may be allowing now 306 * but a policy needs to get removed. 307 */ 308 revokeperms = vme->max_protection & ~result; 309 if (!revokeperms) { 310 VFS_UNLOCK_GIANT(vfslocked); 311 continue; 312 } 313 printf("pid %ld: revoking %s perms from %#lx:%ld " 314 "(max %s/cur %s)\n", (long)td->td_proc->p_pid, 315 prot2str(revokeperms), (u_long)vme->start, 316 (long)(vme->end - vme->start), 317 prot2str(vme->max_protection), prot2str(vme->protection)); 318 vm_map_lock_upgrade(map); 319 /* 320 * This is the really simple case: if a map has more 321 * max_protection than is allowed, but it's not being 322 * actually used (that is, the current protection is still 323 * allowed), we can just wipe it out and do nothing more. 324 */ 325 if ((vme->protection & revokeperms) == 0) { 326 vme->max_protection -= revokeperms; 327 } else { 328 if (revokeperms & VM_PROT_WRITE) { 329 /* 330 * In the more complicated case, flush out all 331 * pending changes to the object then turn it 332 * copy-on-write. 333 */ 334 vm_object_reference(object); 335 (void) vn_start_write(vp, &mp, V_WAIT); 336 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 337 VM_OBJECT_LOCK(object); 338 vm_object_page_clean(object, 339 OFF_TO_IDX(offset), 340 OFF_TO_IDX(offset + vme->end - vme->start + 341 PAGE_MASK), 342 OBJPC_SYNC); 343 VM_OBJECT_UNLOCK(object); 344 VOP_UNLOCK(vp, 0); 345 vn_finished_write(mp); 346 vm_object_deallocate(object); 347 /* 348 * Why bother if there's no read permissions 349 * anymore? For the rest, we need to leave 350 * the write permissions on for COW, or 351 * remove them entirely if configured to. 352 */ 353 if (!mac_mmap_revocation_via_cow) { 354 vme->max_protection &= ~VM_PROT_WRITE; 355 vme->protection &= ~VM_PROT_WRITE; 356 } if ((revokeperms & VM_PROT_READ) == 0) 357 vme->eflags |= MAP_ENTRY_COW | 358 MAP_ENTRY_NEEDS_COPY; 359 } 360 if (revokeperms & VM_PROT_EXECUTE) { 361 vme->max_protection &= ~VM_PROT_EXECUTE; 362 vme->protection &= ~VM_PROT_EXECUTE; 363 } 364 if (revokeperms & VM_PROT_READ) { 365 vme->max_protection = 0; 366 vme->protection = 0; 367 } 368 pmap_protect(map->pmap, vme->start, vme->end, 369 vme->protection & ~revokeperms); 370 vm_map_simplify_entry(map, vme); 371 } 372 vm_map_lock_downgrade(map); 373 VFS_UNLOCK_GIANT(vfslocked); 374 } 375 vm_map_unlock_read(map); 376 } 377 378 int 379 mac_proc_check_debug(struct ucred *cred, struct proc *p) 380 { 381 int error; 382 383 PROC_LOCK_ASSERT(p, MA_OWNED); 384 385 MAC_CHECK(proc_check_debug, cred, p); 386 387 return (error); 388 } 389 390 int 391 mac_proc_check_sched(struct ucred *cred, struct proc *p) 392 { 393 int error; 394 395 PROC_LOCK_ASSERT(p, MA_OWNED); 396 397 MAC_CHECK(proc_check_sched, cred, p); 398 399 return (error); 400 } 401 402 int 403 mac_proc_check_signal(struct ucred *cred, struct proc *p, int signum) 404 { 405 int error; 406 407 PROC_LOCK_ASSERT(p, MA_OWNED); 408 409 MAC_CHECK(proc_check_signal, cred, p, signum); 410 411 return (error); 412 } 413 414 int 415 mac_proc_check_setuid(struct proc *p, struct ucred *cred, uid_t uid) 416 { 417 int error; 418 419 PROC_LOCK_ASSERT(p, MA_OWNED); 420 421 MAC_CHECK(proc_check_setuid, cred, uid); 422 return (error); 423 } 424 425 int 426 mac_proc_check_seteuid(struct proc *p, struct ucred *cred, uid_t euid) 427 { 428 int error; 429 430 PROC_LOCK_ASSERT(p, MA_OWNED); 431 432 MAC_CHECK(proc_check_seteuid, cred, euid); 433 return (error); 434 } 435 436 int 437 mac_proc_check_setgid(struct proc *p, struct ucred *cred, gid_t gid) 438 { 439 int error; 440 441 PROC_LOCK_ASSERT(p, MA_OWNED); 442 443 MAC_CHECK(proc_check_setgid, cred, gid); 444 445 return (error); 446 } 447 448 int 449 mac_proc_check_setegid(struct proc *p, struct ucred *cred, gid_t egid) 450 { 451 int error; 452 453 PROC_LOCK_ASSERT(p, MA_OWNED); 454 455 MAC_CHECK(proc_check_setegid, cred, egid); 456 457 return (error); 458 } 459 460 int 461 mac_proc_check_setgroups(struct proc *p, struct ucred *cred, int ngroups, 462 gid_t *gidset) 463 { 464 int error; 465 466 PROC_LOCK_ASSERT(p, MA_OWNED); 467 468 MAC_CHECK(proc_check_setgroups, cred, ngroups, gidset); 469 return (error); 470 } 471 472 int 473 mac_proc_check_setreuid(struct proc *p, struct ucred *cred, uid_t ruid, 474 uid_t euid) 475 { 476 int error; 477 478 PROC_LOCK_ASSERT(p, MA_OWNED); 479 480 MAC_CHECK(proc_check_setreuid, cred, ruid, euid); 481 482 return (error); 483 } 484 485 int 486 mac_proc_check_setregid(struct proc *proc, struct ucred *cred, gid_t rgid, 487 gid_t egid) 488 { 489 int error; 490 491 PROC_LOCK_ASSERT(proc, MA_OWNED); 492 493 MAC_CHECK(proc_check_setregid, cred, rgid, egid); 494 495 return (error); 496 } 497 498 int 499 mac_proc_check_setresuid(struct proc *p, struct ucred *cred, uid_t ruid, 500 uid_t euid, uid_t suid) 501 { 502 int error; 503 504 PROC_LOCK_ASSERT(p, MA_OWNED); 505 506 MAC_CHECK(proc_check_setresuid, cred, ruid, euid, suid); 507 return (error); 508 } 509 510 int 511 mac_proc_check_setresgid(struct proc *p, struct ucred *cred, gid_t rgid, 512 gid_t egid, gid_t sgid) 513 { 514 int error; 515 516 PROC_LOCK_ASSERT(p, MA_OWNED); 517 518 MAC_CHECK(proc_check_setresgid, cred, rgid, egid, sgid); 519 520 return (error); 521 } 522 523 int 524 mac_proc_check_wait(struct ucred *cred, struct proc *p) 525 { 526 int error; 527 528 PROC_LOCK_ASSERT(p, MA_OWNED); 529 530 MAC_CHECK(proc_check_wait, cred, p); 531 532 return (error); 533 } 534