xref: /freebsd/sys/kern/vfs_acl.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*-
2  * Copyright (c) 1999, 2000, 2001 Robert N. M. Watson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 /*
29  * Developed by the TrustedBSD Project.
30  * Support for POSIX.1e access control lists.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysproto.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/vnode.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/namei.h>
42 #include <sys/file.h>
43 #include <sys/proc.h>
44 #include <sys/sysent.h>
45 #include <sys/errno.h>
46 #include <sys/stat.h>
47 #include <sys/acl.h>
48 
49 MALLOC_DEFINE(M_ACL, "acl", "access control list");
50 
51 static int	vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
52 	    struct acl *aclp);
53 static int	vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
54 	    struct acl *aclp);
55 static int	vacl_aclcheck( struct thread *td, struct vnode *vp,
56 	    acl_type_t type, struct acl *aclp);
57 
58 /*
59  * Implement a version of vaccess() that understands POSIX.1e ACL semantics.
60  * Return 0 on success, else an errno value.  Should be merged into
61  * vaccess() eventually.
62  */
63 int
64 vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid,
65     struct acl *acl, mode_t acc_mode, struct ucred *cred, int *privused)
66 {
67 	struct acl_entry *acl_other, *acl_mask;
68 	mode_t dac_granted;
69 	mode_t cap_granted;
70 	mode_t acl_mask_granted;
71 	int group_matched, i;
72 
73 	/*
74 	 * Look for a normal, non-privileged way to access the file/directory
75 	 * as requested.  If it exists, go with that.  Otherwise, attempt
76 	 * to use privileges granted via cap_granted.  In some cases,
77 	 * which privileges to use may be ambiguous due to "best match",
78 	 * in which case fall back on first match for the time being.
79 	 */
80 	if (privused != NULL)
81 		*privused = 0;
82 
83 	/*
84 	 * Determine privileges now, but don't apply until we've found
85 	 * a DAC entry that matches but has failed to allow access.
86 	 */
87 #ifndef CAPABILITIES
88 	if (suser_xxx(cred, NULL, PRISON_ROOT) == 0)
89 		cap_granted = (VEXEC | VREAD | VWRITE | VADMIN);
90 	else
91 		cap_granted = 0;
92 #else
93 	cap_granted = 0;
94 
95 	if (type == VDIR) {
96 		if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
97 		     CAP_DAC_READ_SEARCH, PRISON_ROOT))
98 			cap_granted |= VEXEC;
99 	} else {
100 		if ((acc_mode & VEXEC) && !cap_check(cred, NULL,
101 		    CAP_DAC_EXECUTE, PRISON_ROOT))
102 			cap_granted |= VEXEC;
103 	}
104 
105 	if ((acc_mode & VREAD) && !cap_check(cred, NULL, CAP_DAC_READ_SEARCH,
106 	    PRISON_ROOT))
107 		cap_granted |= VREAD;
108 
109 	if ((acc_mode & VWRITE) && !cap_check(cred, NULL, CAP_DAC_WRITE,
110 	    PRISON_ROOT))
111 		cap_granted |= VWRITE;
112 
113 	if ((acc_mode & VADMIN) && !cap_check(cred, NULL, CAP_FOWNER,
114 	    PRISON_ROOT))
115 		cap_granted |= VADMIN;
116 #endif /* CAPABILITIES */
117 
118 	/*
119 	 * The owner matches if the effective uid associated with the
120 	 * credential matches that of the ACL_USER_OBJ entry.  While we're
121 	 * doing the first scan, also cache the location of the ACL_MASK
122 	 * and ACL_OTHER entries, preventing some future iterations.
123 	 */
124 	acl_mask = acl_other = NULL;
125 	for (i = 0; i < acl->acl_cnt; i++) {
126 		switch (acl->acl_entry[i].ae_tag) {
127 		case ACL_USER_OBJ:
128 			if (file_uid != cred->cr_uid)
129 				break;
130 			dac_granted = 0;
131 			dac_granted |= VADMIN;
132 			if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
133 				dac_granted |= VEXEC;
134 			if (acl->acl_entry[i].ae_perm & ACL_READ)
135 				dac_granted |= VREAD;
136 			if (acl->acl_entry[i].ae_perm & ACL_WRITE)
137 				dac_granted |= VWRITE;
138 			if ((acc_mode & dac_granted) == acc_mode)
139 				return (0);
140 			if ((acc_mode & (dac_granted | cap_granted)) ==
141 			    acc_mode) {
142 				if (privused != NULL)
143 					*privused = 1;
144 				return (0);
145 			}
146 			goto error;
147 
148 		case ACL_MASK:
149 			acl_mask = &acl->acl_entry[i];
150 			break;
151 
152 		case ACL_OTHER:
153 			acl_other = &acl->acl_entry[i];
154 			break;
155 
156 		default:
157 		}
158 	}
159 
160 	/*
161 	 * An ACL_OTHER entry should always exist in a valid access
162 	 * ACL.  If it doesn't, then generate a serious failure.  For now,
163 	 * this means a debugging message and EPERM, but in the future
164 	 * should probably be a panic.
165 	 */
166 	if (acl_other == NULL) {
167 		/*
168 		 * XXX This should never happen
169 		 */
170 		printf("vaccess_acl_posix1e: ACL_OTHER missing\n");
171 		return (EPERM);
172 	}
173 
174 	/*
175 	 * Checks against ACL_USER, ACL_GROUP_OBJ, and ACL_GROUP fields
176 	 * are masked by an ACL_MASK entry, if any.  As such, first identify
177 	 * the ACL_MASK field, then iterate through identifying potential
178 	 * user matches, then group matches.  If there is no ACL_MASK,
179 	 * assume that the mask allows all requests to succeed.
180 	 */
181 	if (acl_mask != NULL) {
182 		acl_mask_granted = 0;
183 		if (acl_mask->ae_perm & ACL_EXECUTE)
184 			acl_mask_granted |= VEXEC;
185 		if (acl_mask->ae_perm & ACL_READ)
186 			acl_mask_granted |= VREAD;
187 		if (acl_mask->ae_perm & ACL_WRITE)
188 			acl_mask_granted |= VWRITE;
189 	} else
190 		acl_mask_granted = VEXEC | VREAD | VWRITE;
191 
192 	/*
193 	 * Iterate through user ACL entries.  Do checks twice, first
194 	 * without privilege, and then if a match is found but failed,
195 	 * a second time with privilege.
196 	 */
197 
198 	/*
199 	 * Check ACL_USER ACL entries.
200 	 */
201 	for (i = 0; i < acl->acl_cnt; i++) {
202 		switch (acl->acl_entry[i].ae_tag) {
203 		case ACL_USER:
204 			if (acl->acl_entry[i].ae_id != cred->cr_uid)
205 				break;
206 			dac_granted = 0;
207 			if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
208 				dac_granted |= VEXEC;
209 			if (acl->acl_entry[i].ae_perm & ACL_READ)
210 				dac_granted |= VREAD;
211 			if (acl->acl_entry[i].ae_perm & ACL_WRITE)
212 				dac_granted |= VWRITE;
213 			dac_granted &= acl_mask_granted;
214 			if ((acc_mode & dac_granted) == acc_mode)
215 				return (0);
216 			if ((acc_mode & (dac_granted | cap_granted)) !=
217 			    acc_mode)
218 				goto error;
219 
220 			if (privused != NULL)
221 				*privused = 1;
222 			return (0);
223 		}
224 	}
225 
226 	/*
227 	 * Group match is best-match, not first-match, so find a
228 	 * "best" match.  Iterate across, testing each potential group
229 	 * match.  Make sure we keep track of whether we found a match
230 	 * or not, so that we know if we should try again with any
231 	 * available privilege, or if we should move on to ACL_OTHER.
232 	 */
233 	group_matched = 0;
234 	for (i = 0; i < acl->acl_cnt; i++) {
235 		switch (acl->acl_entry[i].ae_tag) {
236 		case ACL_GROUP_OBJ:
237 			if (!groupmember(file_gid, cred))
238 				break;
239 			dac_granted = 0;
240 			if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
241 				dac_granted |= VEXEC;
242 			if (acl->acl_entry[i].ae_perm & ACL_READ)
243 				dac_granted |= VREAD;
244 			if (acl->acl_entry[i].ae_perm & ACL_WRITE)
245 				dac_granted |= VWRITE;
246 			dac_granted  &= acl_mask_granted;
247 
248 			if ((acc_mode & dac_granted) == acc_mode)
249 				return (0);
250 
251 			group_matched = 1;
252 			break;
253 
254 		case ACL_GROUP:
255 			if (!groupmember(acl->acl_entry[i].ae_id, cred))
256 				break;
257 			dac_granted = 0;
258 			if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
259 				dac_granted |= VEXEC;
260 			if (acl->acl_entry[i].ae_perm & ACL_READ)
261 				dac_granted |= VREAD;
262 			if (acl->acl_entry[i].ae_perm & ACL_WRITE)
263 				dac_granted |= VWRITE;
264 			dac_granted  &= acl_mask_granted;
265 
266 			if ((acc_mode & dac_granted) == acc_mode)
267 				return (0);
268 
269 			group_matched = 1;
270 			break;
271 
272 		default:
273 		}
274 	}
275 
276 	if (group_matched == 1) {
277 		/*
278 		 * There was a match, but it did not grant rights via
279 		 * pure DAC.  Try again, this time with privilege.
280 		 */
281 		for (i = 0; i < acl->acl_cnt; i++) {
282 			switch (acl->acl_entry[i].ae_tag) {
283 			case ACL_GROUP_OBJ:
284 				if (!groupmember(file_gid, cred))
285 					break;
286 				dac_granted = 0;
287 				if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
288 					dac_granted |= VEXEC;
289 				if (acl->acl_entry[i].ae_perm & ACL_READ)
290 					dac_granted |= VREAD;
291 				if (acl->acl_entry[i].ae_perm & ACL_WRITE)
292 					dac_granted |= VWRITE;
293 				dac_granted &= acl_mask_granted;
294 
295 				if ((acc_mode & (dac_granted | cap_granted)) !=
296 				    acc_mode)
297 					break;
298 
299 				if (privused != NULL)
300 					*privused = 1;
301 				return (0);
302 
303 			case ACL_GROUP:
304 				if (!groupmember(acl->acl_entry[i].ae_id,
305 				    cred))
306 					break;
307 				dac_granted = 0;
308 				if (acl->acl_entry[i].ae_perm & ACL_EXECUTE)
309 				dac_granted |= VEXEC;
310 				if (acl->acl_entry[i].ae_perm & ACL_READ)
311 					dac_granted |= VREAD;
312 				if (acl->acl_entry[i].ae_perm & ACL_WRITE)
313 					dac_granted |= VWRITE;
314 				dac_granted &= acl_mask_granted;
315 
316 				if ((acc_mode & (dac_granted | cap_granted)) !=
317 				    acc_mode)
318 					break;
319 
320 				if (privused != NULL)
321 					*privused = 1;
322 				return (0);
323 
324 			default:
325 			}
326 		}
327 		/*
328 		 * Even with privilege, group membership was not sufficient.
329 		 * Return failure.
330 		 */
331 		goto error;
332 	}
333 
334 	/*
335 	 * Fall back on ACL_OTHER.  ACL_MASK is not applied to ACL_OTHER.
336 	 */
337 	dac_granted = 0;
338 	if (acl_other->ae_perm & ACL_EXECUTE)
339 		dac_granted |= VEXEC;
340 	if (acl_other->ae_perm & ACL_READ)
341 		dac_granted |= VREAD;
342 	if (acl_other->ae_perm & ACL_WRITE)
343 		dac_granted |= VWRITE;
344 
345 	if ((acc_mode & dac_granted) == acc_mode)
346 		return (0);
347 	if ((acc_mode & (dac_granted | cap_granted)) == acc_mode) {
348 		if (privused != NULL)
349 			*privused = 1;
350 		return (0);
351 	}
352 
353 error:
354 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
355 }
356 
357 /*
358  * For the purposes of file systems maintaining the _OBJ entries in an
359  * inode with a mode_t field, this routine converts a mode_t entry
360  * to an acl_perm_t.
361  */
362 acl_perm_t
363 acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode)
364 {
365 	acl_perm_t	perm = 0;
366 
367 	switch(tag) {
368 	case ACL_USER_OBJ:
369 		if (mode & S_IXUSR)
370 			perm |= ACL_EXECUTE;
371 		if (mode & S_IRUSR)
372 			perm |= ACL_READ;
373 		if (mode & S_IWUSR)
374 			perm |= ACL_WRITE;
375 		return (perm);
376 
377 	case ACL_GROUP_OBJ:
378 		if (mode & S_IXGRP)
379 			perm |= ACL_EXECUTE;
380 		if (mode & S_IRGRP)
381 			perm |= ACL_READ;
382 		if (mode & S_IWGRP)
383 			perm |= ACL_WRITE;
384 		return (perm);
385 
386 	case ACL_OTHER:
387 		if (mode & S_IXOTH)
388 			perm |= ACL_EXECUTE;
389 		if (mode & S_IROTH)
390 			perm |= ACL_READ;
391 		if (mode & S_IWOTH)
392 			perm |= ACL_WRITE;
393 		return (perm);
394 
395 	default:
396 		printf("acl_posix1e_mode_to_perm: invalid tag (%d)\n", tag);
397 		return (0);
398 	}
399 }
400 
401 /*
402  * Given inode information (uid, gid, mode), return an acl entry of the
403  * appropriate type.
404  */
405 struct acl_entry
406 acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid, gid_t gid, mode_t mode)
407 {
408 	struct acl_entry	acl_entry;
409 
410 	acl_entry.ae_tag = tag;
411 	acl_entry.ae_perm = acl_posix1e_mode_to_perm(tag, mode);
412 	switch(tag) {
413 	case ACL_USER_OBJ:
414 		acl_entry.ae_id = uid;
415 		break;
416 
417 	case ACL_GROUP_OBJ:
418 		acl_entry.ae_id = gid;
419 		break;
420 
421 	case ACL_OTHER:
422 		acl_entry.ae_id = ACL_UNDEFINED_ID;
423 		break;
424 
425 	default:
426 		acl_entry.ae_id = ACL_UNDEFINED_ID;
427 		printf("acl_posix1e_mode_to_entry: invalid tag (%d)\n", tag);
428 	}
429 
430 	return (acl_entry);
431 }
432 
433 /*
434  * Utility function to generate a file mode given appropriate ACL entries.
435  */
436 mode_t
437 acl_posix1e_perms_to_mode(struct acl_entry *acl_user_obj_entry,
438     struct acl_entry *acl_group_obj_entry, struct acl_entry *acl_other_entry)
439 {
440 	mode_t	mode;
441 
442 	mode = 0;
443 	if (acl_user_obj_entry->ae_perm & ACL_EXECUTE)
444 		mode |= S_IXUSR;
445 	if (acl_user_obj_entry->ae_perm & ACL_READ)
446 		mode |= S_IRUSR;
447 	if (acl_user_obj_entry->ae_perm & ACL_WRITE)
448 		mode |= S_IWUSR;
449 	if (acl_group_obj_entry->ae_perm & ACL_EXECUTE)
450 		mode |= S_IXGRP;
451 	if (acl_group_obj_entry->ae_perm & ACL_READ)
452 		mode |= S_IRGRP;
453 	if (acl_group_obj_entry->ae_perm & ACL_WRITE)
454 		mode |= S_IWGRP;
455 	if (acl_other_entry->ae_perm & ACL_EXECUTE)
456 		mode |= S_IXOTH;
457 	if (acl_other_entry->ae_perm & ACL_READ)
458 		mode |= S_IROTH;
459 	if (acl_other_entry->ae_perm & ACL_WRITE)
460 		mode |= S_IWOTH;
461 
462 	return (mode);
463 }
464 
465 /*
466  * Perform a syntactic check of the ACL, sufficient to allow an
467  * implementing file system to determine if it should accept this and
468  * rely on the POSIX.1e ACL properties.
469  */
470 int
471 acl_posix1e_check(struct acl *acl)
472 {
473 	int num_acl_user_obj, num_acl_user, num_acl_group_obj, num_acl_group;
474 	int num_acl_mask, num_acl_other, i;
475 
476 	/*
477 	 * Verify that the number of entries does not exceed the maximum
478 	 * defined for acl_t.
479 	 * Verify that the correct number of various sorts of ae_tags are
480 	 * present:
481 	 *   Exactly one ACL_USER_OBJ
482 	 *   Exactly one ACL_GROUP_OBJ
483 	 *   Exactly one ACL_OTHER
484 	 *   If any ACL_USER or ACL_GROUP entries appear, then exactly one
485 	 *   ACL_MASK entry must also appear.
486 	 * Verify that all ae_perm entries are in ACL_PERM_BITS.
487 	 * Verify all ae_tag entries are understood by this implementation.
488 	 * Note: Does not check for uniqueness of qualifier (ae_id) field.
489 	 */
490 	num_acl_user_obj = num_acl_user = num_acl_group_obj = num_acl_group =
491 	    num_acl_mask = num_acl_other = 0;
492 	if (acl->acl_cnt > ACL_MAX_ENTRIES || acl->acl_cnt < 0)
493 		return (EINVAL);
494 	for (i = 0; i < acl->acl_cnt; i++) {
495 		/*
496 		 * Check for a valid tag.
497 		 */
498 		switch(acl->acl_entry[i].ae_tag) {
499 		case ACL_USER_OBJ:
500 			acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
501 			if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
502 				return (EINVAL);
503 			num_acl_user_obj++;
504 			break;
505 		case ACL_GROUP_OBJ:
506 			acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
507 			if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
508 				return (EINVAL);
509 			num_acl_group_obj++;
510 			break;
511 		case ACL_USER:
512 			if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
513 				return (EINVAL);
514 			num_acl_user++;
515 			break;
516 		case ACL_GROUP:
517 			if (acl->acl_entry[i].ae_id == ACL_UNDEFINED_ID)
518 				return (EINVAL);
519 			num_acl_group++;
520 			break;
521 		case ACL_OTHER:
522 			acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
523 			if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
524 				return (EINVAL);
525 			num_acl_other++;
526 			break;
527 		case ACL_MASK:
528 			acl->acl_entry[i].ae_id = ACL_UNDEFINED_ID; /* XXX */
529 			if (acl->acl_entry[i].ae_id != ACL_UNDEFINED_ID)
530 				return (EINVAL);
531 			num_acl_mask++;
532 			break;
533 		default:
534 			return (EINVAL);
535 		}
536 		/*
537 		 * Check for valid perm entries.
538 		 */
539 		if ((acl->acl_entry[i].ae_perm | ACL_PERM_BITS) !=
540 		    ACL_PERM_BITS)
541 			return (EINVAL);
542 	}
543 	if ((num_acl_user_obj != 1) || (num_acl_group_obj != 1) ||
544 	    (num_acl_other != 1) || (num_acl_mask != 0 && num_acl_mask != 1))
545 		return (EINVAL);
546 	if (((num_acl_group != 0) || (num_acl_user != 0)) &&
547 	    (num_acl_mask != 1))
548 		return (EINVAL);
549 	return (0);
550 }
551 
552 /*
553  * These calls wrap the real vnode operations, and are called by the
554  * syscall code once the syscall has converted the path or file
555  * descriptor to a vnode (unlocked).  The aclp pointer is assumed
556  * still to point to userland, so this should not be consumed within
557  * the kernel except by syscall code.  Other code should directly
558  * invoke VOP_{SET,GET}ACL.
559  */
560 
561 /*
562  * Given a vnode, set its ACL.
563  */
564 static int
565 vacl_set_acl( struct thread *td, struct vnode *vp, acl_type_t type,
566     struct acl *aclp)
567 {
568 	struct acl inkernacl;
569 	int error;
570 
571 	error = copyin(aclp, &inkernacl, sizeof(struct acl));
572 	if (error)
573 		return(error);
574 	VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
575 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
576 	error = VOP_SETACL(vp, type, &inkernacl, td->td_proc->p_ucred, td);
577 	VOP_UNLOCK(vp, 0, td);
578 	return(error);
579 }
580 
581 /*
582  * Given a vnode, get its ACL.
583  */
584 static int
585 vacl_get_acl( struct thread *td, struct vnode *vp, acl_type_t type,
586     struct acl *aclp)
587 {
588 	struct acl inkernelacl;
589 	int error;
590 
591 	VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
592 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
593 	error = VOP_GETACL(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
594 	VOP_UNLOCK(vp, 0, td);
595 	if (error == 0)
596 		error = copyout(&inkernelacl, aclp, sizeof(struct acl));
597 	return (error);
598 }
599 
600 /*
601  * Given a vnode, delete its ACL.
602  */
603 static int
604 vacl_delete( struct thread *td, struct vnode *vp, acl_type_t type)
605 {
606 	int error;
607 
608 	VOP_LEASE(vp, td, td->td_proc->p_ucred, LEASE_WRITE);
609 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
610 	error = VOP_SETACL(vp, ACL_TYPE_DEFAULT, 0, td->td_proc->p_ucred, td);
611 	VOP_UNLOCK(vp, 0, td);
612 	return (error);
613 }
614 
615 /*
616  * Given a vnode, check whether an ACL is appropriate for it
617  */
618 static int
619 vacl_aclcheck( struct thread *td, struct vnode *vp, acl_type_t type,
620     struct acl *aclp)
621 {
622 	struct acl inkernelacl;
623 	int error;
624 
625 	error = copyin(aclp, &inkernelacl, sizeof(struct acl));
626 	if (error)
627 		return(error);
628 	error = VOP_ACLCHECK(vp, type, &inkernelacl, td->td_proc->p_ucred, td);
629 	return (error);
630 }
631 
632 /*
633  * syscalls -- convert the path/fd to a vnode, and call vacl_whatever.
634  * Don't need to lock, as the vacl_ code will get/release any locks
635  * required.
636  */
637 
638 /*
639  * Given a file path, get an ACL for it
640  *
641  * MPSAFE
642  */
643 int
644 __acl_get_file( struct thread *td, struct __acl_get_file_args *uap)
645 {
646 	struct nameidata nd;
647 	int error;
648 
649 	mtx_lock(&Giant);
650 	/* what flags are required here -- possible not LOCKLEAF? */
651 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
652 	error = namei(&nd);
653 	if (error == 0) {
654 		error = vacl_get_acl(td, nd.ni_vp, SCARG(uap, type),
655 			    SCARG(uap, aclp));
656 		NDFREE(&nd, 0);
657 	}
658 	mtx_unlock(&Giant);
659 	return (error);
660 }
661 
662 /*
663  * Given a file path, set an ACL for it
664  *
665  * MPSAFE
666  */
667 int
668 __acl_set_file( struct thread *td, struct __acl_set_file_args *uap)
669 {
670 	struct nameidata nd;
671 	int error;
672 
673 	mtx_lock(&Giant);
674 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
675 	error = namei(&nd);
676 	if (error == 0) {
677 		error = vacl_set_acl(td, nd.ni_vp, SCARG(uap, type),
678 			    SCARG(uap, aclp));
679 		NDFREE(&nd, 0);
680 	}
681 	mtx_unlock(&Giant);
682 	return (error);
683 }
684 
685 /*
686  * Given a file descriptor, get an ACL for it
687  *
688  * MPSAFE
689  */
690 int
691 __acl_get_fd( struct thread *td, struct __acl_get_fd_args *uap)
692 {
693 	struct file *fp;
694 	int error;
695 
696 	mtx_lock(&Giant);
697 	error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
698 	if (error == 0) {
699 		error = vacl_get_acl(td, (struct vnode *)fp->f_data,
700 			    SCARG(uap, type), SCARG(uap, aclp));
701 	}
702 	mtx_unlock(&Giant);
703 	return (error);
704 }
705 
706 /*
707  * Given a file descriptor, set an ACL for it
708  *
709  * MPSAFE
710  */
711 int
712 __acl_set_fd( struct thread *td, struct __acl_set_fd_args *uap)
713 {
714 	struct file *fp;
715 	int error;
716 
717 	mtx_lock(&Giant);
718 	error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
719 	if (error == 0) {
720 		error = vacl_set_acl(td, (struct vnode *)fp->f_data,
721 			    SCARG(uap, type), SCARG(uap, aclp));
722 	}
723 	mtx_unlock(&Giant);
724 	return (error);
725 }
726 
727 /*
728  * Given a file path, delete an ACL from it.
729  *
730  * MPSAFE
731  */
732 int
733 __acl_delete_file( struct thread *td, struct __acl_delete_file_args *uap)
734 {
735 	struct nameidata nd;
736 	int error;
737 
738 	mtx_lock(&Giant);
739 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
740 	error = namei(&nd);
741 	if (error == 0) {
742 		error = vacl_delete(td, nd.ni_vp, SCARG(uap, type));
743 		NDFREE(&nd, 0);
744 	}
745 	mtx_unlock(&Giant);
746 	return (error);
747 }
748 
749 /*
750  * Given a file path, delete an ACL from it.
751  *
752  * MPSAFE
753  */
754 int
755 __acl_delete_fd( struct thread *td, struct __acl_delete_fd_args *uap)
756 {
757 	struct file *fp;
758 	int error;
759 
760 	mtx_lock(&Giant);
761 	error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
762 	if (error == 0) {
763 		error = vacl_delete(td, (struct vnode *)fp->f_data,
764 			    SCARG(uap, type));
765 	}
766 	mtx_unlock(&Giant);
767 	return (error);
768 }
769 
770 /*
771  * Given a file path, check an ACL for it
772  *
773  * MPSAFE
774  */
775 int
776 __acl_aclcheck_file( struct thread *td, struct __acl_aclcheck_file_args *uap)
777 {
778 	struct nameidata	nd;
779 	int	error;
780 
781 	mtx_lock(&Giant);
782 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
783 	error = namei(&nd);
784 	if (error == 0) {
785 		error = vacl_aclcheck(td, nd.ni_vp, SCARG(uap, type),
786 			    SCARG(uap, aclp));
787 		NDFREE(&nd, 0);
788 	}
789 	mtx_unlock(&Giant);
790 	return (error);
791 }
792 
793 /*
794  * Given a file descriptor, check an ACL for it
795  *
796  * MPSAFE
797  */
798 int
799 __acl_aclcheck_fd( struct thread *td, struct __acl_aclcheck_fd_args *uap)
800 {
801 	struct file *fp;
802 	int error;
803 
804 	mtx_lock(&Giant);
805 	error = getvnode(td->td_proc->p_fd, SCARG(uap, filedes), &fp);
806 	if (error == 0) {
807 		error = vacl_aclcheck(td, (struct vnode *)fp->f_data,
808 			    SCARG(uap, type), SCARG(uap, aclp));
809 	}
810 	mtx_unlock(&Giant);
811 	return (error);
812 }
813