1 /*- 2 * Copyright (c) 1999-2001 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * This software was developed by Robert Watson for the TrustedBSD Project. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * Developed by the TrustedBSD Project. 32 * Support for POSIX.1e access control lists. 33 */ 34 35 #include "opt_cap.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/sysproto.h> 40 #include <sys/kernel.h> 41 #include <sys/malloc.h> 42 #include <sys/vnode.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/namei.h> 46 #include <sys/file.h> 47 #include <sys/proc.h> 48 #include <sys/sysent.h> 49 #include <sys/errno.h> 50 #include <sys/stat.h> 51 #include <sys/acl.h> 52 53 MALLOC_DEFINE(M_ACL, "acl", "access control list"); 54 55 static int vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type, 56 struct acl *aclp); 57 static int vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type, 58 struct acl *aclp); 59 static int vacl_aclcheck(struct thread *td, struct vnode *vp, 60 acl_type_t type, struct acl *aclp); 61 62 /* 63 * Implement a version of vaccess() that understands POSIX.1e ACL semantics. 64 * Return 0 on success, else an errno value. Should be merged into 65 * vaccess() eventually. 66 */ 67 int 68 vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid, 69 struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused) 70 { 71 struct acl_entry *acl_other, *acl_mask; 72 mode_t dac_granted; 73 mode_t cap_granted; 74 mode_t acl_mask_granted; 75 int group_matched, i; 76 77 /* 78 * Look for a normal, non-privileged way to access the file/directory 79 * as requested. If it exists, go with that. Otherwise, attempt 80 * to use privileges granted via cap_granted. In some cases, 81 * which privileges to use may be ambiguous due to "best match", 82 * in which case fall back on first match for the time being. 83 */ 84 if (privused != NULL) 85 *privused = 0; 86 87 /* 88 * Determine privileges now, but don't apply until we've found 89 * a DAC entry that matches but has failed to allow access. 90 */ 91 #ifndef CAPABILITIES 92 if (suser_xxx(cred, NULL, PRISON_ROOT) == 0) 93 cap_granted = (VEXEC | VREAD | VWRITE | VADMIN); 94 else 95 cap_granted = 0; 96 #else 97 cap_granted = 0; 98 99 if (type == VDIR) { 100 if ((acc_mode & VEXEC) && !cap_check(cred, NULL, 101 CAP_DAC_READ_SEARCH, PRISON_ROOT)) 102 cap_granted |= VEXEC; 103 } else { 104 if ((acc_mode & VEXEC) && !cap_check(cred, NULL, 105 CAP_DAC_EXECUTE, PRISON_ROOT)) 106 cap_granted |= VEXEC; 107 } 108 109 if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, 110 PRISON_ROOT)) 111 cap_granted |= VREAD; 112 113 if ((acc_mode & VWRITE) && !cap_check(cred, NULL, CAP_DAC_WRITE, 114 PRISON_ROOT)) 115 cap_granted |= VWRITE; 116 117 if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER, 118 PRISON_ROOT)) 119 cap_granted |= VADMIN; 120 #endif /* CAPABILITIES */ 121 122 /* 123 * The owner matches if the effective uid associated with the 124 * credential matches that of the ACL_USER_OBJ entry. While we're 125 * doing the first scan, also cache the location of the ACL_MASK 126 * and ACL_OTHER entries, preventing some future iterations. 127 */ 128 acl_mask = acl_other = NULL; 129 for (i = 0; i < acl->acl_cnt; i++) { 130 switch (acl->acl_entry[i].ae_tag) { 131 case ACL_USER_OBJ: 132 if (file_uid != cred->cr_uid) 133 break; 134 dac_granted = 0; 135 dac_granted |= VADMIN; 136 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 137 dac_granted |= VEXEC; 138 if (acl->acl_entry[i].ae_perm & ACL_READ) 139 dac_granted |= VREAD; 140 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 141 dac_granted |= VWRITE; 142 if ((acc_mode & dac_granted) == acc_mode) 143 return (0); 144 if ((acc_mode & (dac_granted | cap_granted)) == 145 acc_mode) { 146 if (privused != NULL) 147 *privused = 1; 148 return (0); 149 } 150 goto error; 151 152 case ACL_MASK: 153 acl_mask = &acl->acl_entry[i]; 154 break; 155 156 case ACL_OTHER: 157 acl_other = &acl->acl_entry[i]; 158 break; 159 160 default: 161 break; 162 } 163 } 164 165 /* 166 * An ACL_OTHER entry should always exist in a valid access 167 * ACL. If it doesn't, then generate a serious failure. For now, 168 * this means a debugging message and EPERM, but in the future 169 * should probably be a panic. 170 */ 171 if (acl_other == NULL) { 172 /* 173 * XXX This should never happen 174 */ 175 printf("vaccess_acl_posix1e: ACL_OTHER missing\n"); 176 return (EPERM); 177 } 178 179 /* 180 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields 181 * are masked by an ACL_MASK entry, if any. As such, first identify 182 * the ACL_MASK field, then iterate through identifying potential 183 * user matches, then group matches. If there is no ACL_MASK, 184 * assume that the mask allows all requests to succeed. 185 */ 186 if (acl_mask != NULL) { 187 acl_mask_granted = 0; 188 if (acl_mask->ae_perm & ACL_EXECUTE) 189 acl_mask_granted |= VEXEC; 190 if (acl_mask->ae_perm & ACL_READ) 191 acl_mask_granted |= VREAD; 192 if (acl_mask->ae_perm & ACL_WRITE) 193 acl_mask_granted |= VWRITE; 194 } else 195 acl_mask_granted = VEXEC | VREAD | VWRITE; 196 197 /* 198 * Iterate through user ACL entries. Do checks twice, first 199 * without privilege, and then if a match is found but failed, 200 * a second time with privilege. 201 */ 202 203 /* 204 * Check ACL_USER ACL entries. 205 */ 206 for (i = 0; i < acl->acl_cnt; i++) { 207 switch (acl->acl_entry[i].ae_tag) { 208 case ACL_USER: 209 if (acl->acl_entry[i].ae_id != cred->cr_uid) 210 break; 211 dac_granted = 0; 212 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 213 dac_granted |= VEXEC; 214 if (acl->acl_entry[i].ae_perm & ACL_READ) 215 dac_granted |= VREAD; 216 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 217 dac_granted |= VWRITE; 218 dac_granted &= acl_mask_granted; 219 if ((acc_mode & dac_granted) == acc_mode) 220 return (0); 221 if ((acc_mode & (dac_granted | cap_granted)) != 222 acc_mode) 223 goto error; 224 225 if (privused != NULL) 226 *privused = 1; 227 return (0); 228 } 229 } 230 231 /* 232 * Group match is best-match, not first-match, so find a 233 * "best" match. Iterate across, testing each potential group 234 * match. Make sure we keep track of whether we found a match 235 * or not, so that we know if we should try again with any 236 * available privilege, or if we should move on to ACL_OTHER. 237 */ 238 group_matched = 0; 239 for (i = 0; i < acl->acl_cnt; i++) { 240 switch (acl->acl_entry[i].ae_tag) { 241 case ACL_GROUP_OBJ: 242 if (!groupmember(file_gid, cred)) 243 break; 244 dac_granted = 0; 245 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 246 dac_granted |= VEXEC; 247 if (acl->acl_entry[i].ae_perm & ACL_READ) 248 dac_granted |= VREAD; 249 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 250 dac_granted |= VWRITE; 251 dac_granted &= acl_mask_granted; 252 253 if ((acc_mode & dac_granted) == acc_mode) 254 return (0); 255 256 group_matched = 1; 257 break; 258 259 case ACL_GROUP: 260 if (!groupmember(acl->acl_entry[i].ae_id, cred)) 261 break; 262 dac_granted = 0; 263 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 264 dac_granted |= VEXEC; 265 if (acl->acl_entry[i].ae_perm & ACL_READ) 266 dac_granted |= VREAD; 267 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 268 dac_granted |= VWRITE; 269 dac_granted &= acl_mask_granted; 270 271 if ((acc_mode & dac_granted) == acc_mode) 272 return (0); 273 274 group_matched = 1; 275 break; 276 277 default: 278 break; 279 } 280 } 281 282 if (group_matched == 1) { 283 /* 284 * There was a match, but it did not grant rights via 285 * pure DAC. Try again, this time with privilege. 286 */ 287 for (i = 0; i < acl->acl_cnt; i++) { 288 switch (acl->acl_entry[i].ae_tag) { 289 case ACL_GROUP_OBJ: 290 if (!groupmember(file_gid, cred)) 291 break; 292 dac_granted = 0; 293 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 294 dac_granted |= VEXEC; 295 if (acl->acl_entry[i].ae_perm & ACL_READ) 296 dac_granted |= VREAD; 297 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 298 dac_granted |= VWRITE; 299 dac_granted &= acl_mask_granted; 300 301 if ((acc_mode & (dac_granted | cap_granted)) != 302 acc_mode) 303 break; 304 305 if (privused != NULL) 306 *privused = 1; 307 return (0); 308 309 case ACL_GROUP: 310 if (!groupmember(acl->acl_entry[i].ae_id, 311 cred)) 312 break; 313 dac_granted = 0; 314 if (acl->acl_entry[i].ae_perm & ACL_EXECUTE) 315 dac_granted |= VEXEC; 316 if (acl->acl_entry[i].ae_perm & ACL_READ) 317 dac_granted |= VREAD; 318 if (acl->acl_entry[i].ae_perm & ACL_WRITE) 319 dac_granted |= VWRITE; 320 dac_granted &= acl_mask_granted; 321 322 if ((acc_mode & (dac_granted | cap_granted)) != 323 acc_mode) 324 break; 325 326 if (privused != NULL) 327 *privused = 1; 328 return (0); 329 330 default: 331 break; 332 } 333 } 334 /* 335 * Even with privilege, group membership was not sufficient. 336 * Return failure. 337 */ 338 goto error; 339 } 340 341 /* 342 * Fall back on ACL_OTHER. ACL_MASK is not applied to ACL_OTHER. 343 */ 344 dac_granted = 0; 345 if (acl_other->ae_perm & ACL_EXECUTE) 346 dac_granted |= VEXEC; 347 if (acl_other->ae_perm & ACL_READ) 348 dac_granted |= VREAD; 349 if (acl_other->ae_perm & ACL_WRITE) 350 dac_granted |= VWRITE; 351 352 if ((acc_mode & dac_granted) == acc_mode) 353 return (0); 354 if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) { 355 if (privused != NULL) 356 *privused = 1; 357 return (0); 358 } 359 360 error: 361 return ((acc_mode & VADMIN) ? EPERM : EACCES); 362 } 363 364 /* 365 * For the purposes of file systems maintaining the _OBJ entries in an 366 * inode with a mode_t field, this routine converts a mode_t entry 367 * to an acl_perm_t. 368 */ 369 acl_perm_t 370 acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode) 371 { 372 acl_perm_t perm = 0; 373 374 switch(tag) { 375 case ACL_USER_OBJ: 376 if (mode & S_IXUSR) 377 perm |= ACL_EXECUTE; 378 if (mode & S_IRUSR) 379 perm |= ACL_READ; 380 if (mode & S_IWUSR) 381 perm |= ACL_WRITE; 382 return (perm); 383 384 case ACL_GROUP_OBJ: 385 if (mode & S_IXGRP) 386 perm |= ACL_EXECUTE; 387 if (mode & S_IRGRP) 388 perm |= ACL_READ; 389 if (mode & S_IWGRP) 390 perm |= ACL_WRITE; 391 return (perm); 392 393 case ACL_OTHER: 394 if (mode & S_IXOTH) 395 perm |= ACL_EXECUTE; 396 if (mode & S_IROTH) 397 perm |= ACL_READ; 398 if (mode & S_IWOTH) 399 perm |= ACL_WRITE; 400 return (perm); 401 402 default: 403 printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag); 404 return (0); 405 } 406 } 407 408 /* 409 * Given inode information (uid, gid, mode), return an acl entry of the 410 * appropriate type. 411 */ 412 struct acl_entry 413 acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode) 414 { 415 struct acl_entry acl_entry; 416 417 acl_entry.ae_tag = tag; 418 acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode); 419 switch(tag) { 420 case ACL_USER_OBJ: 421 acl_entry.ae_id = uid; 422 break; 423 424 case ACL_GROUP_OBJ: 425 acl_entry.ae_id = gid; 426 break; 427 428 case ACL_OTHER: 429 acl_entry.ae_id = ACL_UNDEFINED_ID; 430 break; 431 432 default: 433 acl_entry.ae_id = ACL_UNDEFINED_ID; 434 printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag); 435 } 436 437 return (acl_entry); 438 } 439 440 /* 441 * Utility function to generate a file mode given appropriate ACL entries. 442 */ 443 mode_t 444 acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry, 445 struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry) 446 { 447 mode_t mode; 448 449 mode = 0; 450 if (acl_user_obj_entry->ae_perm & ACL_EXECUTE) 451 mode |= S_IXUSR; 452 if (acl_user_obj_entry->ae_perm & ACL_READ) 453 mode |= S_IRUSR; 454 if (acl_user_obj_entry->ae_perm & ACL_WRITE) 455 mode |= S_IWUSR; 456 if (acl_group_obj_entry->ae_perm & ACL_EXECUTE) 457 mode |= S_IXGRP; 458 if (acl_group_obj_entry->ae_perm & ACL_READ) 459 mode |= S_IRGRP; 460 if (acl_group_obj_entry->ae_perm & ACL_WRITE) 461 mode |= S_IWGRP; 462 if (acl_other_entry->ae_perm & ACL_EXECUTE) 463 mode |= S_IXOTH; 464 if (acl_other_entry->ae_perm & ACL_READ) 465 mode |= S_IROTH; 466 if (acl_other_entry->ae_perm & ACL_WRITE) 467 mode |= S_IWOTH; 468 469 return (mode); 470 } 471 472 /* 473 * Perform a syntactic check of the ACL, sufficient to allow an 474 * implementing file system to determine if it should accept this and 475 * rely on the POSIX.1e ACL properties. 476 */ 477 int 478 acl_posix1e_check(struct acl *acl) 479 { 480 int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group; 481 int num_acl_mask, num_acl_other, i; 482 483 /* 484 * Verify that the number of entries does not exceed the maximum 485 * defined for acl_t. 486 * Verify that the correct number of various sorts of ae_tags are 487 * present: 488 * Exactly one ACL_USER_OBJ 489 * Exactly one ACL_GROUP_OBJ 490 * Exactly one ACL_OTHER 491 * If any ACL_USER or ACL_GROUP entries appear, then exactly one 492 * ACL_MASK entry must also appear. 493 * Verify that all ae_perm entries are in ACL_PERM_BITS. 494 * Verify all ae_tag entries are understood by this implementation. 495 * Note: Does not check for uniqueness of qualifier (ae_id) field. 496 */ 497 num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group = 498 num_acl_mask = num_acl_other = 0; 499 if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0) 500 return (EINVAL); 501 for (i = 0; i < acl->acl_cnt; i++) { 502 /* 503 * Check for a valid tag. 504 */ 505 switch(acl->acl_entry[i].ae_tag) { 506 case ACL_USER_OBJ: 507 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 508 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 509 return (EINVAL); 510 num_acl_user_obj++; 511 break; 512 case ACL_GROUP_OBJ: 513 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 514 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 515 return (EINVAL); 516 num_acl_group_obj++; 517 break; 518 case ACL_USER: 519 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID) 520 return (EINVAL); 521 num_acl_user++; 522 break; 523 case ACL_GROUP: 524 if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID) 525 return (EINVAL); 526 num_acl_group++; 527 break; 528 case ACL_OTHER: 529 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 530 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 531 return (EINVAL); 532 num_acl_other++; 533 break; 534 case ACL_MASK: 535 acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */ 536 if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID) 537 return (EINVAL); 538 num_acl_mask++; 539 break; 540 default: 541 return (EINVAL); 542 } 543 /* 544 * Check for valid perm entries. 545 */ 546 if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) != 547 ACL_PERM_BITS) 548 return (EINVAL); 549 } 550 if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) || 551 (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1)) 552 return (EINVAL); 553 if (((num_acl_group != 0) || (num_acl_user != 0)) && 554 (num_acl_mask != 1)) 555 return (EINVAL); 556 return (0); 557 } 558 559 /* 560 * These calls wrap the real vnode operations, and are called by the 561 * syscall code once the syscall has converted the path or file 562 * descriptor to a vnode (unlocked). The aclp pointer is assumed 563 * still to point to userland, so this should not be consumed within 564 * the kernel except by syscall code. Other code should directly 565 * invoke VOP_{SET,GET}ACL. 566 */ 567 568 /* 569 * Given a vnode, set its ACL. 570 */ 571 static int 572 vacl_set_acl(struct thread *td, struct vnode *vp, acl_type_t type, 573 struct acl *aclp) 574 { 575 struct acl inkernacl; 576 struct mount *mp; 577 int error; 578 579 error = copyin(aclp, &inkernacl, sizeof(struct acl)); 580 if (error) 581 return(error); 582 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 583 if (error != 0) 584 return (error); 585 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE); 586 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 587 error = VOP_SETACL(vp, type, &inkernacl, td->td_ucred, td); 588 VOP_UNLOCK(vp, 0, td); 589 vn_finished_write(mp); 590 return(error); 591 } 592 593 /* 594 * Given a vnode, get its ACL. 595 */ 596 static int 597 vacl_get_acl(struct thread *td, struct vnode *vp, acl_type_t type, 598 struct acl *aclp) 599 { 600 struct acl inkernelacl; 601 int error; 602 603 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE); 604 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 605 error = VOP_GETACL(vp, type, &inkernelacl, td->td_ucred, td); 606 VOP_UNLOCK(vp, 0, td); 607 if (error == 0) 608 error = copyout(&inkernelacl, aclp, sizeof(struct acl)); 609 return (error); 610 } 611 612 /* 613 * Given a vnode, delete its ACL. 614 */ 615 static int 616 vacl_delete(struct thread *td, struct vnode *vp, acl_type_t type) 617 { 618 struct mount *mp; 619 int error; 620 621 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 622 if (error) 623 return (error); 624 VOP_LEASE(vp, td, td->td_ucred, LEASE_WRITE); 625 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 626 error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_ucred, td); 627 VOP_UNLOCK(vp, 0, td); 628 vn_finished_write(mp); 629 return (error); 630 } 631 632 /* 633 * Given a vnode, check whether an ACL is appropriate for it 634 */ 635 static int 636 vacl_aclcheck(struct thread *td, struct vnode *vp, acl_type_t type, 637 struct acl *aclp) 638 { 639 struct acl inkernelacl; 640 int error; 641 642 error = copyin(aclp, &inkernelacl, sizeof(struct acl)); 643 if (error) 644 return(error); 645 error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_ucred, td); 646 return (error); 647 } 648 649 /* 650 * syscalls -- convert the path/fd to a vnode, and call vacl_whatever. 651 * Don't need to lock, as the vacl_ code will get/release any locks 652 * required. 653 */ 654 655 /* 656 * Given a file path, get an ACL for it 657 * 658 * MPSAFE 659 */ 660 int 661 __acl_get_file(struct thread *td, struct __acl_get_file_args *uap) 662 { 663 struct nameidata nd; 664 int error; 665 666 mtx_lock(&Giant); 667 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 668 error = namei(&nd); 669 if (error == 0) { 670 error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type), 671 SCARG(uap, aclp)); 672 NDFREE(&nd, 0); 673 } 674 mtx_unlock(&Giant); 675 return (error); 676 } 677 678 /* 679 * Given a file path, set an ACL for it 680 * 681 * MPSAFE 682 */ 683 int 684 __acl_set_file(struct thread *td, struct __acl_set_file_args *uap) 685 { 686 struct nameidata nd; 687 int error; 688 689 mtx_lock(&Giant); 690 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 691 error = namei(&nd); 692 if (error == 0) { 693 error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type), 694 SCARG(uap, aclp)); 695 NDFREE(&nd, 0); 696 } 697 mtx_unlock(&Giant); 698 return (error); 699 } 700 701 /* 702 * Given a file descriptor, get an ACL for it 703 * 704 * MPSAFE 705 */ 706 int 707 __acl_get_fd(struct thread *td, struct __acl_get_fd_args *uap) 708 { 709 struct file *fp; 710 int error; 711 712 mtx_lock(&Giant); 713 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 714 if (error == 0) { 715 error = vacl_get_acl(td, (struct vnode *)fp->f_data, 716 SCARG(uap, type), SCARG(uap, aclp)); 717 fdrop(fp, td); 718 } 719 mtx_unlock(&Giant); 720 return (error); 721 } 722 723 /* 724 * Given a file descriptor, set an ACL for it 725 * 726 * MPSAFE 727 */ 728 int 729 __acl_set_fd(struct thread *td, struct __acl_set_fd_args *uap) 730 { 731 struct file *fp; 732 int error; 733 734 mtx_lock(&Giant); 735 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 736 if (error == 0) { 737 error = vacl_set_acl(td, (struct vnode *)fp->f_data, 738 SCARG(uap, type), SCARG(uap, aclp)); 739 fdrop(fp, td); 740 } 741 mtx_unlock(&Giant); 742 return (error); 743 } 744 745 /* 746 * Given a file path, delete an ACL from it. 747 * 748 * MPSAFE 749 */ 750 int 751 __acl_delete_file(struct thread *td, struct __acl_delete_file_args *uap) 752 { 753 struct nameidata nd; 754 int error; 755 756 mtx_lock(&Giant); 757 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 758 error = namei(&nd); 759 if (error == 0) { 760 error = vacl_delete(td, nd.ni_vp, SCARG(uap, type)); 761 NDFREE(&nd, 0); 762 } 763 mtx_unlock(&Giant); 764 return (error); 765 } 766 767 /* 768 * Given a file path, delete an ACL from it. 769 * 770 * MPSAFE 771 */ 772 int 773 __acl_delete_fd(struct thread *td, struct __acl_delete_fd_args *uap) 774 { 775 struct file *fp; 776 int error; 777 778 mtx_lock(&Giant); 779 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 780 if (error == 0) { 781 error = vacl_delete(td, (struct vnode *)fp->f_data, 782 SCARG(uap, type)); 783 fdrop(fp, td); 784 } 785 mtx_unlock(&Giant); 786 return (error); 787 } 788 789 /* 790 * Given a file path, check an ACL for it 791 * 792 * MPSAFE 793 */ 794 int 795 __acl_aclcheck_file(struct thread *td, struct __acl_aclcheck_file_args *uap) 796 { 797 struct nameidata nd; 798 int error; 799 800 mtx_lock(&Giant); 801 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td); 802 error = namei(&nd); 803 if (error == 0) { 804 error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type), 805 SCARG(uap, aclp)); 806 NDFREE(&nd, 0); 807 } 808 mtx_unlock(&Giant); 809 return (error); 810 } 811 812 /* 813 * Given a file descriptor, check an ACL for it 814 * 815 * MPSAFE 816 */ 817 int 818 __acl_aclcheck_fd(struct thread *td, struct __acl_aclcheck_fd_args *uap) 819 { 820 struct file *fp; 821 int error; 822 823 mtx_lock(&Giant); 824 error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp); 825 if (error == 0) { 826 error = vacl_aclcheck(td, (struct vnode *)fp->f_data, 827 SCARG(uap, type), SCARG(uap, aclp)); 828 fdrop(fp, td); 829 } 830 mtx_unlock(&Giant); 831 return (error); 832 } 833