1 /*- 2 * Copyright (c) 1999-2001 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * This software was developed by Robert Watson for the TrustedBSD Project. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * Developed by the TrustedBSD Project. 32 * Support for POSIX.1e access control lists. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/sysproto.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/vnode.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/namei.h> 44 #include <sys/file.h> 45 #include <sys/proc.h> 46 #include <sys/sysent.h> 47 #include <sys/errno.h> 48 #include <sys/stat.h> 49 #include <sys/acl.h> 50 51 MALLOC_DEFINE(M_ACL, "acl", "access control list"); 52 53 static int vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type, 54 struct acl *aclp); 55 static int vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type, 56 struct acl *aclp); 57 static int vacl_aclcheck(struct thread *td, struct vnode *vp, 58 acl_type_t type, struct acl *aclp); 59 60 /* 61 * Implement a version of vaccess() that understands POSIX.1e ACL semantics. 62 * Return 0 on success, else an errno value. Should be merged into 63 * vaccess() eventually. 64 */ 65 int 66 vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid, 67 struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused) 68 { 69 struct acl_entry *acl_other, *acl_mask; 70 mode_t dac_granted; 71 mode_t cap_granted; 72 mode_t acl_mask_granted; 73 int group_matched, i; 74 75 /* 76 * Look for a normal, non-privileged way to access the file/directory 77 * as requested. If it exists, go with that. Otherwise, attempt 78 * to use privileges granted via cap_granted. In some cases, 79 * which privileges to use may be ambiguous due to "best match", 80 * in which case fall back on first match for the time being. 81 */ 82 if (privused != NULL) 83 *privused = 0; 84 85 /* 86 * Determine privileges now, but don't apply until we've found 87 * a DAC entry that matches but has failed to allow access. 88 */ 89 #ifndef CAPABILITIES 90 if (suser_cred(cred, PRISON_ROOT) == 0) 91 cap_granted = (VEXEC | VREAD | VWRITE | VADMIN); 92 else 93 cap_granted = 0; 94 #else 95 cap_granted = 0; 96 97 if (type == VDIR) { 98 if ((acc_mode & VEXEC) && !cap_check(cred, NULL, 99 CAP_DAC_READ_SEARCH, PRISON_ROOT)) 100 cap_granted |= VEXEC; 101 } else { 102 if ((acc_mode & VEXEC) && !cap_check(cred, NULL, 103 CAP_DAC_EXECUTE, PRISON_ROOT)) 104 cap_granted |= VEXEC; 105 } 106 107 if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, 108 PRISON_ROOT)) 109 cap_granted |= VREAD; 110 111 if ((acc_mode & VWRITE) && !cap_check(cred, NULL, CAP_DAC_WRITE, 112 PRISON_ROOT)) 113 cap_granted |= VWRITE; 114 115 if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER, 116 PRISON_ROOT)) 117 cap_granted |= VADMIN; 118 #endif /* CAPABILITIES */ 119 120 /* 121 * The owner matches if the effective uid associated with the 122 * credential matches that of the ACL_USER_OBJ entry. While we're 123 * doing the first scan, also cache the location of the ACL_MASK 124 * and ACL_OTHER entries, preventing some future iterations. 125 */ 126 acl_mask = acl_other = NULL; 127 for (i = 0; i < acl->acl_cnt; i++) { 128 switch (acl->acl_entry[i].ae_tag) { 129 case ACL_USER_OBJ: 130 if (file_uid != cred->cr_uid) 131 break; 132 dac_granted = 0; 133 dac_granted |= VADMIN; 134 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 135 dac_granted |= VEXEC; 136 if (acl->acl_entry[i].ae_perm & ACL_READ) 137 dac_granted |= VREAD; 138 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 139 dac_granted |= VWRITE; 140 if ((acc_mode & dac_granted) == acc_mode) 141 return (0); 142 if ((acc_mode & (dac_granted | cap_granted)) == 143 acc_mode) { 144 if (privused != NULL) 145 *privused = 1; 146 return (0); 147 } 148 goto error; 149 150 case ACL_MASK: 151 acl_mask = &acl->acl_entry[i]; 152 break; 153 154 case ACL_OTHER: 155 acl_other = &acl->acl_entry[i]; 156 break; 157 158 default: 159 break; 160 } 161 } 162 163 /* 164 * An ACL_OTHER entry should always exist in a valid access 165 * ACL. If it doesn't, then generate a serious failure. For now, 166 * this means a debugging message and EPERM, but in the future 167 * should probably be a panic. 168 */ 169 if (acl_other == NULL) { 170 /* 171 * XXX This should never happen 172 */ 173 printf("vaccess_acl_posix1e: ACL_OTHER missing\n"); 174 return (EPERM); 175 } 176 177 /* 178 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields 179 * are masked by an ACL_MASK entry, if any. As such, first identify 180 * the ACL_MASK field, then iterate through identifying potential 181 * user matches, then group matches. If there is no ACL_MASK, 182 * assume that the mask allows all requests to succeed. 183 */ 184 if (acl_mask != NULL) { 185 acl_mask_granted = 0; 186 if (acl_mask->ae_perm & ACL_EXECUTE) 187 acl_mask_granted |= VEXEC; 188 if (acl_mask->ae_perm & ACL_READ) 189 acl_mask_granted |= VREAD; 190 if (acl_mask->ae_perm & ACL_WRITE) 191 acl_mask_granted |= VWRITE; 192 } else 193 acl_mask_granted = VEXEC | VREAD | VWRITE; 194 195 /* 196 * Iterate through user ACL entries. Do checks twice, first 197 * without privilege, and then if a match is found but failed, 198 * a second time with privilege. 199 */ 200 201 /* 202 * Check ACL_USER ACL entries. 203 */ 204 for (i = 0; i < acl->acl_cnt; i++) { 205 switch (acl->acl_entry[i].ae_tag) { 206 case ACL_USER: 207 if (acl->acl_entry[i].ae_id != cred->cr_uid) 208 break; 209 dac_granted = 0; 210 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 211 dac_granted |= VEXEC; 212 if (acl->acl_entry[i].ae_perm & ACL_READ) 213 dac_granted |= VREAD; 214 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 215 dac_granted |= VWRITE; 216 dac_granted &= acl_mask_granted; 217 if ((acc_mode & dac_granted) == acc_mode) 218 return (0); 219 if ((acc_mode & (dac_granted | cap_granted)) != 220 acc_mode) 221 goto error; 222 223 if (privused != NULL) 224 *privused = 1; 225 return (0); 226 } 227 } 228 229 /* 230 * Group match is best-match, not first-match, so find a 231 * "best" match. Iterate across, testing each potential group 232 * match. Make sure we keep track of whether we found a match 233 * or not, so that we know if we should try again with any 234 * available privilege, or if we should move on to ACL_OTHER. 235 */ 236 group_matched = 0; 237 for (i = 0; i < acl->acl_cnt; i++) { 238 switch (acl->acl_entry[i].ae_tag) { 239 case ACL_GROUP_OBJ: 240 if (!groupmember(file_gid, cred)) 241 break; 242 dac_granted = 0; 243 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 244 dac_granted |= VEXEC; 245 if (acl->acl_entry[i].ae_perm & ACL_READ) 246 dac_granted |= VREAD; 247 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 248 dac_granted |= VWRITE; 249 dac_granted &= acl_mask_granted; 250 251 if ((acc_mode & dac_granted) == acc_mode) 252 return (0); 253 254 group_matched = 1; 255 break; 256 257 case ACL_GROUP: 258 if (!groupmember(acl->acl_entry[i].ae_id, cred)) 259 break; 260 dac_granted = 0; 261 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 262 dac_granted |= VEXEC; 263 if (acl->acl_entry[i].ae_perm & ACL_READ) 264 dac_granted |= VREAD; 265 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 266 dac_granted |= VWRITE; 267 dac_granted &= acl_mask_granted; 268 269 if ((acc_mode & dac_granted) == acc_mode) 270 return (0); 271 272 group_matched = 1; 273 break; 274 275 default: 276 break; 277 } 278 } 279 280 if (group_matched == 1) { 281 /* 282 * There was a match, but it did not grant rights via 283 * pure DAC. Try again, this time with privilege. 284 */ 285 for (i = 0; i < acl->acl_cnt; i++) { 286 switch (acl->acl_entry[i].ae_tag) { 287 case ACL_GROUP_OBJ: 288 if (!groupmember(file_gid, cred)) 289 break; 290 dac_granted = 0; 291 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 292 dac_granted |= VEXEC; 293 if (acl->acl_entry[i].ae_perm & ACL_READ) 294 dac_granted |= VREAD; 295 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 296 dac_granted |= VWRITE; 297 dac_granted &= acl_mask_granted; 298 299 if ((acc_mode & (dac_granted | cap_granted)) != 300 acc_mode) 301 break; 302 303 if (privused != NULL) 304 *privused = 1; 305 return (0); 306 307 case ACL_GROUP: 308 if (!groupmember(acl->acl_entry[i].ae_id, 309 cred)) 310 break; 311 dac_granted = 0; 312 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 313 dac_granted |= VEXEC; 314 if (acl->acl_entry[i].ae_perm & ACL_READ) 315 dac_granted |= VREAD; 316 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 317 dac_granted |= VWRITE; 318 dac_granted &= acl_mask_granted; 319 320 if ((acc_mode & (dac_granted | cap_granted)) != 321 acc_mode) 322 break; 323 324 if (privused != NULL) 325 *privused = 1; 326 return (0); 327 328 default: 329 break; 330 } 331 } 332 /* 333 * Even with privilege, group membership was not sufficient. 334 * Return failure. 335 */ 336 goto error; 337 } 338 339 /* 340 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER. 341 */ 342 dac_granted = 0; 343 if (acl_other->ae_perm & ACL_EXECUTE) 344 dac_granted |= VEXEC; 345 if (acl_other->ae_perm & ACL_READ) 346 dac_granted |= VREAD; 347 if (acl_other->ae_perm & ACL_WRITE) 348 dac_granted |= VWRITE; 349 350 if ((acc_mode & dac_granted) == acc_mode) 351 return (0); 352 if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) { 353 if (privused != NULL) 354 *privused = 1; 355 return (0); 356 } 357 358 error: 359 return ((acc_mode & VADMIN) ? EPERM : EACCES); 360 } 361 362 /* 363 * For the purposes of filesystems maintaining the _OBJ entries in an 364 * inode with a mode_t field, this routine converts a mode_t entry 365 * to an acl_perm_t. 366 */ 367 acl_perm_t 368 acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode) 369 { 370 acl_perm_t perm = 0; 371 372 switch(tag) { 373 case ACL_USER_OBJ: 374 if (mode & S_IXUSR) 375 perm |= ACL_EXECUTE; 376 if (mode & S_IRUSR) 377 perm |= ACL_READ; 378 if (mode & S_IWUSR) 379 perm |= ACL_WRITE; 380 return (perm); 381 382 case ACL_GROUP_OBJ: 383 if (mode & S_IXGRP) 384 perm |= ACL_EXECUTE; 385 if (mode & S_IRGRP) 386 perm |= ACL_READ; 387 if (mode & S_IWGRP) 388 perm |= ACL_WRITE; 389 return (perm); 390 391 case ACL_OTHER: 392 if (mode & S_IXOTH) 393 perm |= ACL_EXECUTE; 394 if (mode & S_IROTH) 395 perm |= ACL_READ; 396 if (mode & S_IWOTH) 397 perm |= ACL_WRITE; 398 return (perm); 399 400 default: 401 printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag); 402 return (0); 403 } 404 } 405 406 /* 407 * Given inode information (uid, gid, mode), return an acl entry of the 408 * appropriate type. 409 */ 410 struct acl_entry 411 acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode) 412 { 413 struct acl_entry acl_entry; 414 415 acl_entry.ae_tag = tag; 416 acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode); 417 switch(tag) { 418 case ACL_USER_OBJ: 419 acl_entry.ae_id = uid; 420 break; 421 422 case ACL_GROUP_OBJ: 423 acl_entry.ae_id = gid; 424 break; 425 426 case ACL_OTHER: 427 acl_entry.ae_id = ACL_UNDEFINED_ID; 428 break; 429 430 default: 431 acl_entry.ae_id = ACL_UNDEFINED_ID; 432 printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag); 433 } 434 435 return (acl_entry); 436 } 437 438 /* 439 * Utility function to generate a file mode given appropriate ACL entries. 440 */ 441 mode_t 442 acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry, 443 struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry) 444 { 445 mode_t mode; 446 447 mode = 0; 448 if (acl_user_obj_entry->ae_perm & ACL_EXECUTE) 449 mode |= S_IXUSR; 450 if (acl_user_obj_entry->ae_perm & ACL_READ) 451 mode |= S_IRUSR; 452 if (acl_user_obj_entry->ae_perm & ACL_WRITE) 453 mode |= S_IWUSR; 454 if (acl_group_obj_entry->ae_perm & ACL_EXECUTE) 455 mode |= S_IXGRP; 456 if (acl_group_obj_entry->ae_perm & ACL_READ) 457 mode |= S_IRGRP; 458 if (acl_group_obj_entry->ae_perm & ACL_WRITE) 459 mode |= S_IWGRP; 460 if (acl_other_entry->ae_perm & ACL_EXECUTE) 461 mode |= S_IXOTH; 462 if (acl_other_entry->ae_perm & ACL_READ) 463 mode |= S_IROTH; 464 if (acl_other_entry->ae_perm & ACL_WRITE) 465 mode |= S_IWOTH; 466 467 return (mode); 468 } 469 470 /* 471 * Perform a syntactic check of the ACL, sufficient to allow an 472 * implementing filesystem to determine if it should accept this and 473 * rely on the POSIX.1e ACL properties. 474 */ 475 int 476 acl_posix1e_check(struct acl *acl) 477 { 478 int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group; 479 int num_acl_mask, num_acl_other, i; 480 481 /* 482 * Verify that the number of entries does not exceed the maximum 483 * defined for acl_t. 484 * Verify that the correct number of various sorts of ae_tags are 485 * present: 486 * Exactly one ACL_USER_OBJ 487 * Exactly one ACL_GROUP_OBJ 488 * Exactly one ACL_OTHER 489 * If any ACL_USER or ACL_GROUP entries appear, then exactly one 490 * ACL_MASK entry must also appear. 491 * Verify that all ae_perm entries are in ACL_PERM_BITS. 492 * Verify all ae_tag entries are understood by this implementation. 493 * Note: Does not check for uniqueness of qualifier (ae_id) field. 494 */ 495 num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group = 496 num_acl_mask = num_acl_other = 0; 497 if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0) 498 return (EINVAL); 499 for (i = 0; i < acl->acl_cnt; i++) { 500 /* 501 * Check for a valid tag. 502 */ 503 switch(acl->acl_entry[i].ae_tag) { 504 case ACL_USER_OBJ: 505 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 506 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 507 return (EINVAL); 508 num_acl_user_obj++; 509 break; 510 case ACL_GROUP_OBJ: 511 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 512 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 513 return (EINVAL); 514 num_acl_group_obj++; 515 break; 516 case ACL_USER: 517 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID) 518 return (EINVAL); 519 num_acl_user++; 520 break; 521 case ACL_GROUP: 522 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID) 523 return (EINVAL); 524 num_acl_group++; 525 break; 526 case ACL_OTHER: 527 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 528 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 529 return (EINVAL); 530 num_acl_other++; 531 break; 532 case ACL_MASK: 533 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 534 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 535 return (EINVAL); 536 num_acl_mask++; 537 break; 538 default: 539 return (EINVAL); 540 } 541 /* 542 * Check for valid perm entries. 543 */ 544 if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) != 545 ACL_PERM_BITS) 546 return (EINVAL); 547 } 548 if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) || 549 (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1)) 550 return (EINVAL); 551 if (((num_acl_group != 0) || (num_acl_user != 0)) && 552 (num_acl_mask != 1)) 553 return (EINVAL); 554 return (0); 555 } 556 557 /* 558 * These calls wrap the real vnode operations, and are called by the 559 * syscall code once the syscall has converted the path or file 560 * descriptor to a vnode (unlocked). The aclp pointer is assumed 561 * still to point to userland, so this should not be consumed within 562 * the kernel except by syscall code. Other code should directly 563 * invoke VOP_{SET,GET}ACL. 564 */ 565 566 /* 567 * Given a vnode, set its ACL. 568 */ 569 static int 570 vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type, 571 struct acl *aclp) 572 { 573 struct acl inkernacl; 574 struct mount *mp; 575 int error; 576 577 error = copyin(aclp, &inkernacl, sizeof(struct acl)); 578 if (error) 579 return(error); 580 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 581 if (error != 0) 582 return (error); 583 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE); 584 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 585 error = VOP_SETACL(vp, type, &inkernacl, td->td_ucred, td); 586 VOP_UNLOCK(vp, 0, td); 587 vn_finished_write(mp); 588 return(error); 589 } 590 591 /* 592 * Given a vnode, get its ACL. 593 */ 594 static int 595 vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type, 596 struct acl *aclp) 597 { 598 struct acl inkernelacl; 599 int error; 600 601 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE); 602 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 603 error = VOP_GETACL(vp, type, &inkernelacl, td->td_ucred, td); 604 VOP_UNLOCK(vp, 0, td); 605 if (error == 0) 606 error = copyout(&inkernelacl, aclp, sizeof(struct acl)); 607 return (error); 608 } 609 610 /* 611 * Given a vnode, delete its ACL. 612 */ 613 static int 614 vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type) 615 { 616 struct mount *mp; 617 int error; 618 619 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 620 if (error) 621 return (error); 622 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE); 623 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 624 error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_ucred, td); 625 VOP_UNLOCK(vp, 0, td); 626 vn_finished_write(mp); 627 return (error); 628 } 629 630 /* 631 * Given a vnode, check whether an ACL is appropriate for it 632 */ 633 static int 634 vacl_aclcheck(struct thread *td, struct vnode *vp, acl_type_t type, 635 struct acl *aclp) 636 { 637 struct acl inkernelacl; 638 int error; 639 640 error = copyin(aclp, &inkernelacl, sizeof(struct acl)); 641 if (error) 642 return(error); 643 error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_ucred, td); 644 return (error); 645 } 646 647 /* 648 * syscalls -- convert the path/fd to a vnode, and call vacl_whatever. 649 * Don't need to lock, as the vacl_ code will get/release any locks 650 * required. 651 */ 652 653 /* 654 * Given a file path, get an ACL for it 655 * 656 * MPSAFE 657 */ 658 int 659 __acl_get_file(struct thread *td, struct __acl_get_file_args *uap) 660 { 661 struct nameidata nd; 662 int error; 663 664 mtx_lock(&Giant); 665 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 666 error = namei(&nd); 667 if (error == 0) { 668 error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type), 669 SCARG(uap, aclp)); 670 NDFREE(&nd, 0); 671 } 672 mtx_unlock(&Giant); 673 return (error); 674 } 675 676 /* 677 * Given a file path, set an ACL for it 678 * 679 * MPSAFE 680 */ 681 int 682 __acl_set_file(struct thread *td, struct __acl_set_file_args *uap) 683 { 684 struct nameidata nd; 685 int error; 686 687 mtx_lock(&Giant); 688 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 689 error = namei(&nd); 690 if (error == 0) { 691 error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type), 692 SCARG(uap, aclp)); 693 NDFREE(&nd, 0); 694 } 695 mtx_unlock(&Giant); 696 return (error); 697 } 698 699 /* 700 * Given a file descriptor, get an ACL for it 701 * 702 * MPSAFE 703 */ 704 int 705 __acl_get_fd(struct thread *td, struct __acl_get_fd_args *uap) 706 { 707 struct file *fp; 708 int error; 709 710 mtx_lock(&Giant); 711 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 712 if (error == 0) { 713 error = vacl_get_acl(td, (struct vnode *)fp->f_data, 714 SCARG(uap, type), SCARG(uap, aclp)); 715 fdrop(fp, td); 716 } 717 mtx_unlock(&Giant); 718 return (error); 719 } 720 721 /* 722 * Given a file descriptor, set an ACL for it 723 * 724 * MPSAFE 725 */ 726 int 727 __acl_set_fd(struct thread *td, struct __acl_set_fd_args *uap) 728 { 729 struct file *fp; 730 int error; 731 732 mtx_lock(&Giant); 733 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 734 if (error == 0) { 735 error = vacl_set_acl(td, (struct vnode *)fp->f_data, 736 SCARG(uap, type), SCARG(uap, aclp)); 737 fdrop(fp, td); 738 } 739 mtx_unlock(&Giant); 740 return (error); 741 } 742 743 /* 744 * Given a file path, delete an ACL from it. 745 * 746 * MPSAFE 747 */ 748 int 749 __acl_delete_file(struct thread *td, struct __acl_delete_file_args *uap) 750 { 751 struct nameidata nd; 752 int error; 753 754 mtx_lock(&Giant); 755 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 756 error = namei(&nd); 757 if (error == 0) { 758 error = vacl_delete(td, nd.ni_vp, SCARG(uap, type)); 759 NDFREE(&nd, 0); 760 } 761 mtx_unlock(&Giant); 762 return (error); 763 } 764 765 /* 766 * Given a file path, delete an ACL from it. 767 * 768 * MPSAFE 769 */ 770 int 771 __acl_delete_fd(struct thread *td, struct __acl_delete_fd_args *uap) 772 { 773 struct file *fp; 774 int error; 775 776 mtx_lock(&Giant); 777 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 778 if (error == 0) { 779 error = vacl_delete(td, (struct vnode *)fp->f_data, 780 SCARG(uap, type)); 781 fdrop(fp, td); 782 } 783 mtx_unlock(&Giant); 784 return (error); 785 } 786 787 /* 788 * Given a file path, check an ACL for it 789 * 790 * MPSAFE 791 */ 792 int 793 __acl_aclcheck_file(struct thread *td, struct __acl_aclcheck_file_args *uap) 794 { 795 struct nameidata nd; 796 int error; 797 798 mtx_lock(&Giant); 799 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 800 error = namei(&nd); 801 if (error == 0) { 802 error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type), 803 SCARG(uap, aclp)); 804 NDFREE(&nd, 0); 805 } 806 mtx_unlock(&Giant); 807 return (error); 808 } 809 810 /* 811 * Given a file descriptor, check an ACL for it 812 * 813 * MPSAFE 814 */ 815 int 816 __acl_aclcheck_fd(struct thread *td, struct __acl_aclcheck_fd_args *uap) 817 { 818 struct file *fp; 819 int error; 820 821 mtx_lock(&Giant); 822 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 823 if (error == 0) { 824 error = vacl_aclcheck(td, (struct vnode *)fp->f_data, 825 SCARG(uap, type), SCARG(uap, aclp)); 826 fdrop(fp, td); 827 } 828 mtx_unlock(&Giant); 829 return (error); 830 } 831