1 /*- 2 * Copyright (c) 1999-2001 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * This software was developed by Robert Watson for the TrustedBSD Project. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * Developed by the TrustedBSD Project. 32 * Support for POSIX.1e access control lists. 33 */ 34 35 #include "opt_cap.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/sysproto.h> 40 #include <sys/kernel.h> 41 #include <sys/malloc.h> 42 #include <sys/vnode.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/namei.h> 46 #include <sys/file.h> 47 #include <sys/proc.h> 48 #include <sys/sysent.h> 49 #include <sys/errno.h> 50 #include <sys/stat.h> 51 #include <sys/acl.h> 52 53 MALLOC_DEFINE(M_ACL, "acl", "access control list"); 54 55 static int vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type, 56 struct acl *aclp); 57 static int vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type, 58 struct acl *aclp); 59 static int vacl_aclcheck(struct thread *td, struct vnode *vp, 60 acl_type_t type, struct acl *aclp); 61 62 /* 63 * Implement a version of vaccess() that understands POSIX.1e ACL semantics. 64 * Return 0 on success, else an errno value. Should be merged into 65 * vaccess() eventually. 66 */ 67 int 68 vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid, 69 struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused) 70 { 71 struct acl_entry *acl_other, *acl_mask; 72 mode_t dac_granted; 73 mode_t cap_granted; 74 mode_t acl_mask_granted; 75 int group_matched, i; 76 77 /* 78 * Look for a normal, non-privileged way to access the file/directory 79 * as requested. If it exists, go with that. Otherwise, attempt 80 * to use privileges granted via cap_granted. In some cases, 81 * which privileges to use may be ambiguous due to "best match", 82 * in which case fall back on first match for the time being. 83 */ 84 if (privused != NULL) 85 *privused = 0; 86 87 /* 88 * Determine privileges now, but don't apply until we've found 89 * a DAC entry that matches but has failed to allow access. 90 */ 91 #ifndef CAPABILITIES 92 if (suser_xxx(cred, NULL, PRISON_ROOT) == 0) 93 cap_granted = (VEXEC | VREAD | VWRITE | VADMIN); 94 else 95 cap_granted = 0; 96 #else 97 cap_granted = 0; 98 99 if (type == VDIR) { 100 if ((acc_mode & VEXEC) && !cap_check(cred, NULL, 101 CAP_DAC_READ_SEARCH, PRISON_ROOT)) 102 cap_granted |= VEXEC; 103 } else { 104 if ((acc_mode & VEXEC) && !cap_check(cred, NULL, 105 CAP_DAC_EXECUTE, PRISON_ROOT)) 106 cap_granted |= VEXEC; 107 } 108 109 if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, 110 PRISON_ROOT)) 111 cap_granted |= VREAD; 112 113 if ((acc_mode & VWRITE) && !cap_check(cred, NULL, CAP_DAC_WRITE, 114 PRISON_ROOT)) 115 cap_granted |= VWRITE; 116 117 if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER, 118 PRISON_ROOT)) 119 cap_granted |= VADMIN; 120 #endif /* CAPABILITIES */ 121 122 /* 123 * The owner matches if the effective uid associated with the 124 * credential matches that of the ACL_USER_OBJ entry. While we're 125 * doing the first scan, also cache the location of the ACL_MASK 126 * and ACL_OTHER entries, preventing some future iterations. 127 */ 128 acl_mask = acl_other = NULL; 129 for (i = 0; i < acl->acl_cnt; i++) { 130 switch (acl->acl_entry[i].ae_tag) { 131 case ACL_USER_OBJ: 132 if (file_uid != cred->cr_uid) 133 break; 134 dac_granted = 0; 135 dac_granted |= VADMIN; 136 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 137 dac_granted |= VEXEC; 138 if (acl->acl_entry[i].ae_perm & ACL_READ) 139 dac_granted |= VREAD; 140 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 141 dac_granted |= VWRITE; 142 if ((acc_mode & dac_granted) == acc_mode) 143 return (0); 144 if ((acc_mode & (dac_granted | cap_granted)) == 145 acc_mode) { 146 if (privused != NULL) 147 *privused = 1; 148 return (0); 149 } 150 goto error; 151 152 case ACL_MASK: 153 acl_mask = &acl->acl_entry[i]; 154 break; 155 156 case ACL_OTHER: 157 acl_other = &acl->acl_entry[i]; 158 break; 159 160 default: 161 } 162 } 163 164 /* 165 * An ACL_OTHER entry should always exist in a valid access 166 * ACL. If it doesn't, then generate a serious failure. For now, 167 * this means a debugging message and EPERM, but in the future 168 * should probably be a panic. 169 */ 170 if (acl_other == NULL) { 171 /* 172 * XXX This should never happen 173 */ 174 printf("vaccess_acl_posix1e: ACL_OTHER missing\n"); 175 return (EPERM); 176 } 177 178 /* 179 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields 180 * are masked by an ACL_MASK entry, if any. As such, first identify 181 * the ACL_MASK field, then iterate through identifying potential 182 * user matches, then group matches. If there is no ACL_MASK, 183 * assume that the mask allows all requests to succeed. 184 */ 185 if (acl_mask != NULL) { 186 acl_mask_granted = 0; 187 if (acl_mask->ae_perm & ACL_EXECUTE) 188 acl_mask_granted |= VEXEC; 189 if (acl_mask->ae_perm & ACL_READ) 190 acl_mask_granted |= VREAD; 191 if (acl_mask->ae_perm & ACL_WRITE) 192 acl_mask_granted |= VWRITE; 193 } else 194 acl_mask_granted = VEXEC | VREAD | VWRITE; 195 196 /* 197 * Iterate through user ACL entries. Do checks twice, first 198 * without privilege, and then if a match is found but failed, 199 * a second time with privilege. 200 */ 201 202 /* 203 * Check ACL_USER ACL entries. 204 */ 205 for (i = 0; i < acl->acl_cnt; i++) { 206 switch (acl->acl_entry[i].ae_tag) { 207 case ACL_USER: 208 if (acl->acl_entry[i].ae_id != cred->cr_uid) 209 break; 210 dac_granted = 0; 211 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 212 dac_granted |= VEXEC; 213 if (acl->acl_entry[i].ae_perm & ACL_READ) 214 dac_granted |= VREAD; 215 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 216 dac_granted |= VWRITE; 217 dac_granted &= acl_mask_granted; 218 if ((acc_mode & dac_granted) == acc_mode) 219 return (0); 220 if ((acc_mode & (dac_granted | cap_granted)) != 221 acc_mode) 222 goto error; 223 224 if (privused != NULL) 225 *privused = 1; 226 return (0); 227 } 228 } 229 230 /* 231 * Group match is best-match, not first-match, so find a 232 * "best" match. Iterate across, testing each potential group 233 * match. Make sure we keep track of whether we found a match 234 * or not, so that we know if we should try again with any 235 * available privilege, or if we should move on to ACL_OTHER. 236 */ 237 group_matched = 0; 238 for (i = 0; i < acl->acl_cnt; i++) { 239 switch (acl->acl_entry[i].ae_tag) { 240 case ACL_GROUP_OBJ: 241 if (!groupmember(file_gid, cred)) 242 break; 243 dac_granted = 0; 244 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 245 dac_granted |= VEXEC; 246 if (acl->acl_entry[i].ae_perm & ACL_READ) 247 dac_granted |= VREAD; 248 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 249 dac_granted |= VWRITE; 250 dac_granted &= acl_mask_granted; 251 252 if ((acc_mode & dac_granted) == acc_mode) 253 return (0); 254 255 group_matched = 1; 256 break; 257 258 case ACL_GROUP: 259 if (!groupmember(acl->acl_entry[i].ae_id, cred)) 260 break; 261 dac_granted = 0; 262 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 263 dac_granted |= VEXEC; 264 if (acl->acl_entry[i].ae_perm & ACL_READ) 265 dac_granted |= VREAD; 266 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 267 dac_granted |= VWRITE; 268 dac_granted &= acl_mask_granted; 269 270 if ((acc_mode & dac_granted) == acc_mode) 271 return (0); 272 273 group_matched = 1; 274 break; 275 276 default: 277 } 278 } 279 280 if (group_matched == 1) { 281 /* 282 * There was a match, but it did not grant rights via 283 * pure DAC. Try again, this time with privilege. 284 */ 285 for (i = 0; i < acl->acl_cnt; i++) { 286 switch (acl->acl_entry[i].ae_tag) { 287 case ACL_GROUP_OBJ: 288 if (!groupmember(file_gid, cred)) 289 break; 290 dac_granted = 0; 291 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 292 dac_granted |= VEXEC; 293 if (acl->acl_entry[i].ae_perm & ACL_READ) 294 dac_granted |= VREAD; 295 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 296 dac_granted |= VWRITE; 297 dac_granted &= acl_mask_granted; 298 299 if ((acc_mode & (dac_granted | cap_granted)) != 300 acc_mode) 301 break; 302 303 if (privused != NULL) 304 *privused = 1; 305 return (0); 306 307 case ACL_GROUP: 308 if (!groupmember(acl->acl_entry[i].ae_id, 309 cred)) 310 break; 311 dac_granted = 0; 312 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 313 dac_granted |= VEXEC; 314 if (acl->acl_entry[i].ae_perm & ACL_READ) 315 dac_granted |= VREAD; 316 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 317 dac_granted |= VWRITE; 318 dac_granted &= acl_mask_granted; 319 320 if ((acc_mode & (dac_granted | cap_granted)) != 321 acc_mode) 322 break; 323 324 if (privused != NULL) 325 *privused = 1; 326 return (0); 327 328 default: 329 } 330 } 331 /* 332 * Even with privilege, group membership was not sufficient. 333 * Return failure. 334 */ 335 goto error; 336 } 337 338 /* 339 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER. 340 */ 341 dac_granted = 0; 342 if (acl_other->ae_perm & ACL_EXECUTE) 343 dac_granted |= VEXEC; 344 if (acl_other->ae_perm & ACL_READ) 345 dac_granted |= VREAD; 346 if (acl_other->ae_perm & ACL_WRITE) 347 dac_granted |= VWRITE; 348 349 if ((acc_mode & dac_granted) == acc_mode) 350 return (0); 351 if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) { 352 if (privused != NULL) 353 *privused = 1; 354 return (0); 355 } 356 357 error: 358 return ((acc_mode & VADMIN) ? EPERM : EACCES); 359 } 360 361 /* 362 * For the purposes of file systems maintaining the _OBJ entries in an 363 * inode with a mode_t field, this routine converts a mode_t entry 364 * to an acl_perm_t. 365 */ 366 acl_perm_t 367 acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode) 368 { 369 acl_perm_t perm = 0; 370 371 switch(tag) { 372 case ACL_USER_OBJ: 373 if (mode & S_IXUSR) 374 perm |= ACL_EXECUTE; 375 if (mode & S_IRUSR) 376 perm |= ACL_READ; 377 if (mode & S_IWUSR) 378 perm |= ACL_WRITE; 379 return (perm); 380 381 case ACL_GROUP_OBJ: 382 if (mode & S_IXGRP) 383 perm |= ACL_EXECUTE; 384 if (mode & S_IRGRP) 385 perm |= ACL_READ; 386 if (mode & S_IWGRP) 387 perm |= ACL_WRITE; 388 return (perm); 389 390 case ACL_OTHER: 391 if (mode & S_IXOTH) 392 perm |= ACL_EXECUTE; 393 if (mode & S_IROTH) 394 perm |= ACL_READ; 395 if (mode & S_IWOTH) 396 perm |= ACL_WRITE; 397 return (perm); 398 399 default: 400 printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag); 401 return (0); 402 } 403 } 404 405 /* 406 * Given inode information (uid, gid, mode), return an acl entry of the 407 * appropriate type. 408 */ 409 struct acl_entry 410 acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode) 411 { 412 struct acl_entry acl_entry; 413 414 acl_entry.ae_tag = tag; 415 acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode); 416 switch(tag) { 417 case ACL_USER_OBJ: 418 acl_entry.ae_id = uid; 419 break; 420 421 case ACL_GROUP_OBJ: 422 acl_entry.ae_id = gid; 423 break; 424 425 case ACL_OTHER: 426 acl_entry.ae_id = ACL_UNDEFINED_ID; 427 break; 428 429 default: 430 acl_entry.ae_id = ACL_UNDEFINED_ID; 431 printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag); 432 } 433 434 return (acl_entry); 435 } 436 437 /* 438 * Utility function to generate a file mode given appropriate ACL entries. 439 */ 440 mode_t 441 acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry, 442 struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry) 443 { 444 mode_t mode; 445 446 mode = 0; 447 if (acl_user_obj_entry->ae_perm & ACL_EXECUTE) 448 mode |= S_IXUSR; 449 if (acl_user_obj_entry->ae_perm & ACL_READ) 450 mode |= S_IRUSR; 451 if (acl_user_obj_entry->ae_perm & ACL_WRITE) 452 mode |= S_IWUSR; 453 if (acl_group_obj_entry->ae_perm & ACL_EXECUTE) 454 mode |= S_IXGRP; 455 if (acl_group_obj_entry->ae_perm & ACL_READ) 456 mode |= S_IRGRP; 457 if (acl_group_obj_entry->ae_perm & ACL_WRITE) 458 mode |= S_IWGRP; 459 if (acl_other_entry->ae_perm & ACL_EXECUTE) 460 mode |= S_IXOTH; 461 if (acl_other_entry->ae_perm & ACL_READ) 462 mode |= S_IROTH; 463 if (acl_other_entry->ae_perm & ACL_WRITE) 464 mode |= S_IWOTH; 465 466 return (mode); 467 } 468 469 /* 470 * Perform a syntactic check of the ACL, sufficient to allow an 471 * implementing file system to determine if it should accept this and 472 * rely on the POSIX.1e ACL properties. 473 */ 474 int 475 acl_posix1e_check(struct acl *acl) 476 { 477 int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group; 478 int num_acl_mask, num_acl_other, i; 479 480 /* 481 * Verify that the number of entries does not exceed the maximum 482 * defined for acl_t. 483 * Verify that the correct number of various sorts of ae_tags are 484 * present: 485 * Exactly one ACL_USER_OBJ 486 * Exactly one ACL_GROUP_OBJ 487 * Exactly one ACL_OTHER 488 * If any ACL_USER or ACL_GROUP entries appear, then exactly one 489 * ACL_MASK entry must also appear. 490 * Verify that all ae_perm entries are in ACL_PERM_BITS. 491 * Verify all ae_tag entries are understood by this implementation. 492 * Note: Does not check for uniqueness of qualifier (ae_id) field. 493 */ 494 num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group = 495 num_acl_mask = num_acl_other = 0; 496 if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0) 497 return (EINVAL); 498 for (i = 0; i < acl->acl_cnt; i++) { 499 /* 500 * Check for a valid tag. 501 */ 502 switch(acl->acl_entry[i].ae_tag) { 503 case ACL_USER_OBJ: 504 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 505 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 506 return (EINVAL); 507 num_acl_user_obj++; 508 break; 509 case ACL_GROUP_OBJ: 510 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 511 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 512 return (EINVAL); 513 num_acl_group_obj++; 514 break; 515 case ACL_USER: 516 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID) 517 return (EINVAL); 518 num_acl_user++; 519 break; 520 case ACL_GROUP: 521 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID) 522 return (EINVAL); 523 num_acl_group++; 524 break; 525 case ACL_OTHER: 526 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 527 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 528 return (EINVAL); 529 num_acl_other++; 530 break; 531 case ACL_MASK: 532 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 533 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 534 return (EINVAL); 535 num_acl_mask++; 536 break; 537 default: 538 return (EINVAL); 539 } 540 /* 541 * Check for valid perm entries. 542 */ 543 if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) != 544 ACL_PERM_BITS) 545 return (EINVAL); 546 } 547 if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) || 548 (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1)) 549 return (EINVAL); 550 if (((num_acl_group != 0) || (num_acl_user != 0)) && 551 (num_acl_mask != 1)) 552 return (EINVAL); 553 return (0); 554 } 555 556 /* 557 * These calls wrap the real vnode operations, and are called by the 558 * syscall code once the syscall has converted the path or file 559 * descriptor to a vnode (unlocked). The aclp pointer is assumed 560 * still to point to userland, so this should not be consumed within 561 * the kernel except by syscall code. Other code should directly 562 * invoke VOP_{SET,GET}ACL. 563 */ 564 565 /* 566 * Given a vnode, set its ACL. 567 */ 568 static int 569 vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type, 570 struct acl *aclp) 571 { 572 struct acl inkernacl; 573 int error; 574 575 error = copyin(aclp, &inkernacl, sizeof(struct acl)); 576 if (error) 577 return(error); 578 VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE); 579 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 580 error = VOP_SETACL(vp, type, &inkernacl, td->td_proc->p_ucred, td); 581 VOP_UNLOCK(vp, 0, td); 582 return(error); 583 } 584 585 /* 586 * Given a vnode, get its ACL. 587 */ 588 static int 589 vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type, 590 struct acl *aclp) 591 { 592 struct acl inkernelacl; 593 int error; 594 595 VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE); 596 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 597 error = VOP_GETACL(vp, type, &inkernelacl, td->td_proc->p_ucred, td); 598 VOP_UNLOCK(vp, 0, td); 599 if (error == 0) 600 error = copyout(&inkernelacl, aclp, sizeof(struct acl)); 601 return (error); 602 } 603 604 /* 605 * Given a vnode, delete its ACL. 606 */ 607 static int 608 vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type) 609 { 610 int error; 611 612 VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE); 613 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 614 error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_proc->p_ucred, 615 td); 616 VOP_UNLOCK(vp, 0, td); 617 return (error); 618 } 619 620 /* 621 * Given a vnode, check whether an ACL is appropriate for it 622 */ 623 static int 624 vacl_aclcheck(struct thread *td, struct vnode *vp, acl_type_t type, 625 struct acl *aclp) 626 { 627 struct acl inkernelacl; 628 int error; 629 630 error = copyin(aclp, &inkernelacl, sizeof(struct acl)); 631 if (error) 632 return(error); 633 error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_proc->p_ucred, 634 td); 635 return (error); 636 } 637 638 /* 639 * syscalls -- convert the path/fd to a vnode, and call vacl_whatever. 640 * Don't need to lock, as the vacl_ code will get/release any locks 641 * required. 642 */ 643 644 /* 645 * Given a file path, get an ACL for it 646 * 647 * MPSAFE 648 */ 649 int 650 __acl_get_file(struct thread *td, struct __acl_get_file_args *uap) 651 { 652 struct nameidata nd; 653 int error; 654 655 mtx_lock(&Giant); 656 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 657 error = namei(&nd); 658 if (error == 0) { 659 error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type), 660 SCARG(uap, aclp)); 661 NDFREE(&nd, 0); 662 } 663 mtx_unlock(&Giant); 664 return (error); 665 } 666 667 /* 668 * Given a file path, set an ACL for it 669 * 670 * MPSAFE 671 */ 672 int 673 __acl_set_file(struct thread *td, struct __acl_set_file_args *uap) 674 { 675 struct nameidata nd; 676 int error; 677 678 mtx_lock(&Giant); 679 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 680 error = namei(&nd); 681 if (error == 0) { 682 error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type), 683 SCARG(uap, aclp)); 684 NDFREE(&nd, 0); 685 } 686 mtx_unlock(&Giant); 687 return (error); 688 } 689 690 /* 691 * Given a file descriptor, get an ACL for it 692 * 693 * MPSAFE 694 */ 695 int 696 __acl_get_fd(struct thread *td, struct __acl_get_fd_args *uap) 697 { 698 struct file *fp; 699 int error; 700 701 mtx_lock(&Giant); 702 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 703 if (error == 0) { 704 error = vacl_get_acl(td, (struct vnode *)fp->f_data, 705 SCARG(uap, type), SCARG(uap, aclp)); 706 } 707 mtx_unlock(&Giant); 708 return (error); 709 } 710 711 /* 712 * Given a file descriptor, set an ACL for it 713 * 714 * MPSAFE 715 */ 716 int 717 __acl_set_fd(struct thread *td, struct __acl_set_fd_args *uap) 718 { 719 struct file *fp; 720 int error; 721 722 mtx_lock(&Giant); 723 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 724 if (error == 0) { 725 error = vacl_set_acl(td, (struct vnode *)fp->f_data, 726 SCARG(uap, type), SCARG(uap, aclp)); 727 } 728 mtx_unlock(&Giant); 729 return (error); 730 } 731 732 /* 733 * Given a file path, delete an ACL from it. 734 * 735 * MPSAFE 736 */ 737 int 738 __acl_delete_file(struct thread *td, struct __acl_delete_file_args *uap) 739 { 740 struct nameidata nd; 741 int error; 742 743 mtx_lock(&Giant); 744 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 745 error = namei(&nd); 746 if (error == 0) { 747 error = vacl_delete(td, nd.ni_vp, SCARG(uap, type)); 748 NDFREE(&nd, 0); 749 } 750 mtx_unlock(&Giant); 751 return (error); 752 } 753 754 /* 755 * Given a file path, delete an ACL from it. 756 * 757 * MPSAFE 758 */ 759 int 760 __acl_delete_fd(struct thread *td, struct __acl_delete_fd_args *uap) 761 { 762 struct file *fp; 763 int error; 764 765 mtx_lock(&Giant); 766 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 767 if (error == 0) { 768 error = vacl_delete(td, (struct vnode *)fp->f_data, 769 SCARG(uap, type)); 770 } 771 mtx_unlock(&Giant); 772 return (error); 773 } 774 775 /* 776 * Given a file path, check an ACL for it 777 * 778 * MPSAFE 779 */ 780 int 781 __acl_aclcheck_file(struct thread *td, struct __acl_aclcheck_file_args *uap) 782 { 783 struct nameidata nd; 784 int error; 785 786 mtx_lock(&Giant); 787 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 788 error = namei(&nd); 789 if (error == 0) { 790 error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type), 791 SCARG(uap, aclp)); 792 NDFREE(&nd, 0); 793 } 794 mtx_unlock(&Giant); 795 return (error); 796 } 797 798 /* 799 * Given a file descriptor, check an ACL for it 800 * 801 * MPSAFE 802 */ 803 int 804 __acl_aclcheck_fd(struct thread *td, struct __acl_aclcheck_fd_args *uap) 805 { 806 struct file *fp; 807 int error; 808 809 mtx_lock(&Giant); 810 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 811 if (error == 0) { 812 error = vacl_aclcheck(td, (struct vnode *)fp->f_data, 813 SCARG(uap, type), SCARG(uap, aclp)); 814 } 815 mtx_unlock(&Giant); 816 return (error); 817 } 818