xref: /titanic_52/usr/src/uts/common/fs/ufs/ufs_acl.c (revision 7aec1d6e253b21f9e9b7ef68b4d81ab9859b51fe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <sys/errno.h>
32 #include <sys/kmem.h>
33 #include <sys/t_lock.h>
34 #include <sys/ksynch.h>
35 #include <sys/buf.h>
36 #include <sys/vfs.h>
37 #include <sys/vnode.h>
38 #include <sys/mode.h>
39 #include <sys/systm.h>
40 #include <vm/seg.h>
41 #include <sys/file.h>
42 #include <sys/acl.h>
43 #include <sys/fs/ufs_inode.h>
44 #include <sys/fs/ufs_acl.h>
45 #include <sys/fs/ufs_quota.h>
46 #include <sys/sysmacros.h>
47 #include <sys/debug.h>
48 #include <sys/policy.h>
49 
50 /* Cache routines */
51 static int si_signature(si_t *);
52 static int si_cachei_get(struct inode *, si_t **);
53 static int si_cachea_get(struct inode *, si_t *, si_t **);
54 static int si_cmp(si_t *, si_t *);
55 static void si_cache_put(si_t *);
56 void si_cache_del(si_t *, int);
57 void si_cache_init(void);
58 
59 static void ufs_si_free_mem(si_t *);
60 static int ufs_si_store(struct inode *, si_t *, int, cred_t *);
61 static si_t *ufs_acl_cp(si_t *);
62 static int ufs_sectobuf(si_t *, caddr_t *, size_t *);
63 static int acl_count(ufs_ic_acl_t *);
64 static int acl_validate(aclent_t *, int, int);
65 static int vsecattr2aclentry(vsecattr_t *, si_t **);
66 static int aclentry2vsecattr(si_t *, vsecattr_t *);
67 
68 krwlock_t si_cache_lock;		/* Protects si_cache */
69 int	si_cachecnt = 64;		/* # buckets in si_cache[a|i] */
70 si_t	**si_cachea;			/* The 'by acl' cache chains */
71 si_t	**si_cachei;			/* The 'by inode' cache chains */
72 long	si_cachehit = 0;
73 long	si_cachemiss = 0;
74 
75 #define	SI_HASH(S)	((int)(S) & (si_cachecnt - 1))
76 
77 /*
78  * Store the new acls in aclp.  Attempts to make things atomic.
79  * Search the acl cache for an identical sp and, if found, attach
80  * the cache'd acl to ip. If the acl is new (not in the cache),
81  * add it to the cache, then attach it to ip.  Last, remove and
82  * decrement the reference count of any prior acl list attached
83  * to the ip.
84  *
85  * Parameters:
86  * ip - Ptr to inode to receive the acl list
87  * sp - Ptr to in-core acl structure to attach to the inode.
88  * puship - 0 do not push the object inode(ip) 1 push the ip
89  * cr - Ptr to credentials
90  *
91  * Returns:	0 - Success
92  * 		N - From errno.h
93  */
94 static int
95 ufs_si_store(struct inode *ip, si_t *sp, int puship, cred_t *cr)
96 {
97 	struct vfs	*vfsp;
98 	struct inode	*sip;
99 	si_t		*oldsp;
100 	si_t		*csp;
101 	caddr_t		acldata;
102 	ino_t		oldshadow;
103 	size_t		acldatalen;
104 	off_t		offset;
105 	int		shadow;
106 	int		err;
107 	int		refcnt;
108 	int		usecnt;
109 	int		signature;
110 	int		resid;
111 	struct ufsvfs	*ufsvfsp	= ip->i_ufsvfs;
112 	struct fs	*fs		= ufsvfsp->vfs_fs;
113 
114 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
115 	ASSERT(ip->i_ufs_acl != sp);
116 
117 	if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
118 		return (ENOSYS);
119 
120 	/*
121 	 * if there are only the three owner/group/other then do not
122 	 * create a shadow inode.  If there is already a shadow with
123 	 * the file, remove it.
124 	 *
125 	 */
126 	if (!sp->ausers &&
127 	    !sp->agroups &&
128 	    !sp->downer &&
129 	    !sp->dgroup &&
130 	    !sp->dother &&
131 	    sp->dclass.acl_ismask == 0 &&
132 	    !sp->dusers &&
133 	    !sp->dgroups) {
134 		if (ip->i_ufs_acl)
135 			err = ufs_si_free(ip->i_ufs_acl, ITOV(ip)->v_vfsp, cr);
136 		ip->i_ufs_acl = NULL;
137 		ip->i_shadow = 0;
138 		ip->i_flag |= IMOD | IACC;
139 		ip->i_mode = (ip->i_smode & ~0777) |
140 		    ((sp->aowner->acl_ic_perm & 07) << 6) |
141 		    (((sp->aclass.acl_ismask ? sp->aclass.acl_maskbits :
142 			sp->agroup->acl_ic_perm) & 07) << 3) |
143 		    (sp->aother->acl_ic_perm & 07);
144 		TRANS_INODE(ip->i_ufsvfs, ip);
145 		ufs_iupdat(ip, 1);
146 		ufs_si_free_mem(sp);
147 		return (0);
148 	}
149 
150 loop:
151 
152 	/*
153 	 * Check cache. If in cache, use existing shadow inode.
154 	 * Increment the shadow link count, then attach to the
155 	 * cached ufs_acl_entry struct, and increment it's reference
156 	 * count.  Then discard the passed-in ufs_acl_entry and
157 	 * return.
158 	 */
159 	if (si_cachea_get(ip, sp, &csp) == 0) {
160 		ASSERT(RW_WRITE_HELD(&csp->s_lock));
161 		if (ip->i_ufs_acl == csp) {
162 			rw_exit(&csp->s_lock);
163 			(void) ufs_si_free_mem(sp);
164 			return (0);
165 		}
166 		vfsp = ITOV(ip)->v_vfsp;
167 		ASSERT(csp->s_shadow <= INT_MAX);
168 		shadow = (int)csp->s_shadow;
169 		/*
170 		 * We can't call ufs_iget while holding the csp locked,
171 		 * because we might deadlock.  So we drop the
172 		 * lock on csp, then go search the si_cache again
173 		 * to see if the csp is still there.
174 		 */
175 		rw_exit(&csp->s_lock);
176 		if ((err = ufs_iget(vfsp, shadow, &sip, cr)) != 0) {
177 			(void) ufs_si_free_mem(sp);
178 			return (EIO);
179 		}
180 		rw_enter(&sip->i_contents, RW_WRITER);
181 		if ((sip->i_mode & IFMT) != IFSHAD || sip->i_nlink <= 0) {
182 			rw_exit(&sip->i_contents);
183 			VN_RELE(ITOV(sip));
184 			goto loop;
185 		}
186 		/* Get the csp again */
187 		if (si_cachea_get(ip, sp, &csp) != 0) {
188 			rw_exit(&sip->i_contents);
189 			VN_RELE(ITOV(sip));
190 			goto loop;
191 		}
192 		ASSERT(RW_WRITE_HELD(&csp->s_lock));
193 		/* See if we got the right shadow */
194 		if (csp->s_shadow != shadow) {
195 			rw_exit(&csp->s_lock);
196 			rw_exit(&sip->i_contents);
197 			VN_RELE(ITOV(sip));
198 			goto loop;
199 		}
200 		ASSERT(RW_WRITE_HELD(&sip->i_contents));
201 		ASSERT(sip->i_dquot == 0);
202 		/* Increment link count */
203 		ASSERT(sip->i_nlink > 0);
204 		sip->i_nlink++;
205 		TRANS_INODE(ufsvfsp, sip);
206 		csp->s_use = sip->i_nlink;
207 		csp->s_ref++;
208 		ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
209 		sip->i_flag |= ICHG | IMOD;
210 		sip->i_seq++;
211 		ITIMES_NOLOCK(sip);
212 		/*
213 		 * Always release s_lock before both releasing i_contents
214 		 * and calling VN_RELE.
215 		 */
216 		rw_exit(&csp->s_lock);
217 		rw_exit(&sip->i_contents);
218 		VN_RELE(ITOV(sip));
219 		(void) ufs_si_free_mem(sp);
220 		sp = csp;
221 		si_cachehit++;
222 		goto switchshadows;
223 	}
224 
225 	/* Alloc a shadow inode and fill it in */
226 	err = ufs_ialloc(ip, ip->i_number, (mode_t)IFSHAD, &sip, cr);
227 	if (err) {
228 		(void) ufs_si_free_mem(sp);
229 		return (err);
230 	}
231 	rw_enter(&sip->i_contents, RW_WRITER);
232 	sip->i_flag |= IACC | IUPD | ICHG;
233 	sip->i_seq++;
234 	sip->i_mode = (o_mode_t)IFSHAD;
235 	ITOV(sip)->v_type = VREG;
236 	sip->i_nlink = 1;
237 	sip->i_uid = crgetuid(cr);
238 	sip->i_suid = (ulong_t)sip->i_uid > (ulong_t)USHRT_MAX ?
239 		UID_LONG : sip->i_uid;
240 	sip->i_gid = crgetgid(cr);
241 	sip->i_sgid = (ulong_t)sip->i_gid > (ulong_t)USHRT_MAX ?
242 		GID_LONG : sip->i_gid;
243 	sip->i_shadow = 0;
244 	TRANS_INODE(ufsvfsp, sip);
245 	sip->i_ufs_acl = NULL;
246 	ASSERT(sip->i_size == 0);
247 
248 	sp->s_shadow = sip->i_number;
249 
250 	if ((err = ufs_sectobuf(sp, &acldata, &acldatalen)) != 0)
251 		goto errout;
252 	offset = 0;
253 
254 	/*
255 	 * We don't actually care about the residual count upon failure,
256 	 * but giving ufs_rdwri() the pointer means it won't translate
257 	 * all failures to EIO.  Our caller needs to know when ENOSPC
258 	 * gets hit.
259 	 */
260 	resid = 0;
261 	if (((err = ufs_rdwri(UIO_WRITE, FWRITE|FSYNC, sip, acldata,
262 	    acldatalen, (offset_t)0, UIO_SYSSPACE, &resid, cr)) != 0) ||
263 	    (resid != 0)) {
264 		kmem_free(acldata, acldatalen);
265 		if ((resid != 0) && (err == 0))
266 			err = ENOSPC;
267 		goto errout;
268 	}
269 
270 	offset += acldatalen;
271 	if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl)
272 		ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize;
273 
274 	kmem_free(acldata, acldatalen);
275 	/* Sync & free the shadow inode */
276 	ufs_iupdat(sip, 1);
277 	rw_exit(&sip->i_contents);
278 	VN_RELE(ITOV(sip));
279 
280 	/* We're committed to using this sp */
281 	sp->s_use = 1;
282 	sp->s_ref = 1;
283 
284 	/* Now put the new acl stuff in the cache */
285 	/* XXX Might make a duplicate */
286 	si_cache_put(sp);
287 	si_cachemiss++;
288 
289 switchshadows:
290 	/* Now switch the parent inode to use the new shadow inode */
291 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
292 	rw_enter(&sp->s_lock, RW_READER);
293 	oldsp = ip->i_ufs_acl;
294 	oldshadow = ip->i_shadow;
295 	ip->i_ufs_acl = sp;
296 	ASSERT(sp->s_shadow <= INT_MAX);
297 	ip->i_shadow = (int32_t)sp->s_shadow;
298 	ASSERT(oldsp != sp);
299 	ASSERT(oldshadow != ip->i_number);
300 	ASSERT(ip->i_number != ip->i_shadow);
301 	/*
302 	 * Change the mode bits to follow the acl list
303 	 *
304 	 * NOTE:	a directory is not required to have a "regular" acl
305 	 *		bug id's 1238908,  1257173, 1263171 and 1263188
306 	 *
307 	 *		but if a "regular" acl is present, it must contain
308 	 *		an "owner", "group", and "other" acl
309 	 *
310 	 *		If an ACL mask exists, the effective group rights are
311 	 *		set to the mask.  Otherwise, the effective group rights
312 	 * 		are set to the object group bits.
313 	 */
314 	if (sp->aowner) {				/* Owner */
315 		ip->i_mode &= ~0700;			/* clear Owner */
316 		ip->i_mode |= (sp->aowner->acl_ic_perm & 07) << 6;
317 		ip->i_uid = sp->aowner->acl_ic_who;
318 	}
319 
320 	if (sp->agroup) {				/* Group */
321 		ip->i_mode &= ~0070;			/* clear Group */
322 		ip->i_mode |= (sp->agroup->acl_ic_perm & 07) << 3;
323 		ip->i_gid = sp->agroup->acl_ic_who;
324 	}
325 
326 	if (sp->aother) {				/* Other */
327 		ip->i_mode &= ~0007;			/* clear Other */
328 		ip->i_mode |= (sp->aother->acl_ic_perm & 07);
329 	}
330 
331 	if (sp->aclass.acl_ismask)
332 		ip->i_mode = (ip->i_mode & ~070) |
333 		    (((sp->aclass.acl_maskbits & 07) << 3) &
334 		    ip->i_mode);
335 
336 	TRANS_INODE(ufsvfsp, ip);
337 	rw_exit(&sp->s_lock);
338 	ip->i_flag |= ICHG;
339 	ip->i_seq++;
340 	/*
341 	 * when creating a file there is no need to push the inode, it
342 	 * is pushed later
343 	 */
344 	if (puship == 1)
345 		ufs_iupdat(ip, 1);
346 
347 	/*
348 	 * Decrement link count on the old shadow inode,
349 	 * and decrement reference count on the old aclp,
350 	 */
351 	if (oldshadow) {
352 		/* Get the shadow inode */
353 		ASSERT(RW_WRITE_HELD(&ip->i_contents));
354 		vfsp = ITOV(ip)->v_vfsp;
355 		if ((err = ufs_iget_alloced(vfsp, oldshadow, &sip, cr)) != 0) {
356 			return (EIO);
357 		}
358 		/* Decrement link count */
359 		rw_enter(&sip->i_contents, RW_WRITER);
360 		if (oldsp)
361 			rw_enter(&oldsp->s_lock, RW_WRITER);
362 		ASSERT(sip->i_dquot == 0);
363 		ASSERT(sip->i_nlink > 0);
364 		usecnt = --sip->i_nlink;
365 		ufs_setreclaim(sip);
366 		TRANS_INODE(ufsvfsp, sip);
367 		sip->i_flag |= ICHG | IMOD;
368 		sip->i_seq++;
369 		ITIMES_NOLOCK(sip);
370 		if (oldsp) {
371 			oldsp->s_use = usecnt;
372 			refcnt = --oldsp->s_ref;
373 			signature = oldsp->s_signature;
374 			/*
375 			 * Always release s_lock before both releasing
376 			 * i_contents and calling VN_RELE.
377 			 */
378 			rw_exit(&oldsp->s_lock);
379 		}
380 		rw_exit(&sip->i_contents);
381 		VN_RELE(ITOV(sip));
382 		if (oldsp && (refcnt == 0))
383 			si_cache_del(oldsp, signature);
384 	}
385 	return (0);
386 
387 errout:
388 	/* Throw the newly alloc'd inode away */
389 	sip->i_nlink = 0;
390 	ufs_setreclaim(sip);
391 	TRANS_INODE(ufsvfsp, sip);
392 	ITIMES_NOLOCK(sip);
393 	rw_exit(&sip->i_contents);
394 	VN_RELE(ITOV(sip));
395 	ASSERT(!sp->s_use && !sp->s_ref && !(sp->s_flags & SI_CACHED));
396 	(void) ufs_si_free_mem(sp);
397 	return (err);
398 }
399 
400 /*
401  * Load the acls for inode ip either from disk (adding to the cache),
402  * or search the cache and attach the cache'd acl list to the ip.
403  * In either case, maintain the proper reference count on the cached entry.
404  *
405  * Parameters:
406  * ip - Ptr to the inode which needs the acl list loaded
407  * cr - Ptr to credentials
408  *
409  * Returns:	0 - Success
410  * 		N - From errno.h
411  */
412 int
413 ufs_si_load(struct inode *ip, cred_t *cr)
414 /*
415  *	ip	parent inode in
416  *	cr	credentials in
417  */
418 {
419 	struct vfs	*vfsp;
420 	struct inode	*sip;
421 	ufs_fsd_t	*fsdp;
422 	si_t		*sp;
423 	vsecattr_t	vsecattr = {
424 				(uint_t)0,
425 				(int)0,
426 				(void *)NULL,
427 				(int)0,
428 				(void *)NULL};
429 	aclent_t	*aclp;
430 	ufs_acl_t	*ufsaclp;
431 	caddr_t		acldata = NULL;
432 	ino_t		maxino;
433 	int		err;
434 	size_t		acldatalen;
435 	int		numacls;
436 	int		shadow;
437 	int		usecnt;
438 	struct ufsvfs	*ufsvfsp	= ip->i_ufsvfs;
439 	struct fs	*fs		= ufsvfsp->vfs_fs;
440 
441 	ASSERT(ip != NULL);
442 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
443 	ASSERT(ip->i_shadow && ip->i_ufs_acl == NULL);
444 	ASSERT((ip->i_mode & IFMT) != IFSHAD);
445 
446 	if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
447 		return (ENOSYS);
448 
449 	if (ip->i_shadow == ip->i_number)
450 		return (EIO);
451 
452 	maxino = (ino_t)(ITOF(ip)->fs_ncg * ITOF(ip)->fs_ipg);
453 	if (ip->i_shadow < UFSROOTINO || ip->i_shadow > maxino)
454 		return (EIO);
455 
456 	/*
457 	 * XXX Check cache.  If in cache, link to it and increment
458 	 * the reference count, then return.
459 	 */
460 	if (si_cachei_get(ip, &sp) == 0) {
461 		ASSERT(RW_WRITE_HELD(&sp->s_lock));
462 		ip->i_ufs_acl = sp;
463 		sp->s_ref++;
464 		ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
465 		rw_exit(&sp->s_lock);
466 		si_cachehit++;
467 		return (0);
468 	}
469 
470 	/* Get the shadow inode */
471 	vfsp = ITOV(ip)->v_vfsp;
472 	shadow = ip->i_shadow;
473 	if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) != 0) {
474 		return (err);
475 	}
476 	rw_enter(&sip->i_contents, RW_WRITER);
477 
478 	if ((sip->i_mode & IFMT) != IFSHAD) {
479 		rw_exit(&sip->i_contents);
480 		err = EINVAL;
481 		goto alldone;
482 	}
483 
484 	ASSERT(sip->i_dquot == 0);
485 	usecnt = sip->i_nlink;
486 	if ((!ULOCKFS_IS_NOIACC(&ufsvfsp->vfs_ulockfs)) &&
487 	    (!(sip)->i_ufsvfs->vfs_noatime)) {
488 		sip->i_flag |= IACC;
489 	}
490 	rw_downgrade(&sip->i_contents);
491 
492 	ASSERT(sip->i_size <= MAXOFF_T);
493 	/* Read the acl's and other stuff from disk */
494 	acldata	 = kmem_zalloc((size_t)sip->i_size, KM_SLEEP);
495 	acldatalen = sip->i_size;
496 
497 	err = ufs_rdwri(UIO_READ, FREAD, sip, acldata, acldatalen, (offset_t)0,
498 	    UIO_SYSSPACE, (int *)0, cr);
499 
500 	rw_exit(&sip->i_contents);
501 
502 	if (err)
503 		goto alldone;
504 
505 	/*
506 	 * Convert from disk format
507 	 * Result is a vsecattr struct which we then convert to the
508 	 * si struct.
509 	 */
510 	bzero((caddr_t)&vsecattr, sizeof (vsecattr_t));
511 	for (fsdp = (ufs_fsd_t *)acldata;
512 			fsdp < (ufs_fsd_t *)(acldata + acldatalen);
513 			fsdp = (ufs_fsd_t *)((caddr_t)fsdp +
514 				FSD_RECSZ(fsdp, fsdp->fsd_size))) {
515 		if (fsdp->fsd_size <= 0)
516 			break;
517 		switch (fsdp->fsd_type) {
518 		case FSD_ACL:
519 			numacls = vsecattr.vsa_aclcnt =
520 				(int)((fsdp->fsd_size - 2 * sizeof (int)) /
521 							sizeof (ufs_acl_t));
522 			aclp = vsecattr.vsa_aclentp =
523 			kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
524 			for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data;
525 							numacls; ufsaclp++) {
526 				aclp->a_type = ufsaclp->acl_tag;
527 				aclp->a_id = ufsaclp->acl_who;
528 				aclp->a_perm = ufsaclp->acl_perm;
529 				aclp++;
530 				numacls--;
531 			}
532 			break;
533 		case FSD_DFACL:
534 			numacls = vsecattr.vsa_dfaclcnt =
535 				(int)((fsdp->fsd_size - 2 * sizeof (int)) /
536 							sizeof (ufs_acl_t));
537 			aclp = vsecattr.vsa_dfaclentp =
538 			kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
539 			for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data;
540 							numacls; ufsaclp++) {
541 				aclp->a_type = ufsaclp->acl_tag;
542 				aclp->a_id = ufsaclp->acl_who;
543 				aclp->a_perm = ufsaclp->acl_perm;
544 				aclp++;
545 				numacls--;
546 			}
547 			break;
548 		}
549 	}
550 	/* Sort the lists */
551 	if (vsecattr.vsa_aclentp) {
552 		ksort((caddr_t)vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt,
553 				sizeof (aclent_t), cmp2acls);
554 		if ((err = acl_validate(vsecattr.vsa_aclentp,
555 				vsecattr.vsa_aclcnt, ACL_CHECK)) != 0) {
556 			goto alldone;
557 		}
558 	}
559 	if (vsecattr.vsa_dfaclentp) {
560 		ksort((caddr_t)vsecattr.vsa_dfaclentp, vsecattr.vsa_dfaclcnt,
561 				sizeof (aclent_t), cmp2acls);
562 		if ((err = acl_validate(vsecattr.vsa_dfaclentp,
563 				vsecattr.vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) {
564 			goto alldone;
565 		}
566 	}
567 
568 	/* ignore shadow inodes without ACLs */
569 	if (!vsecattr.vsa_aclentp && !vsecattr.vsa_dfaclentp) {
570 		err = 0;
571 		goto alldone;
572 	}
573 
574 	/* Convert from vsecattr struct to ufs_acl_entry struct */
575 	if ((err = vsecattr2aclentry(&vsecattr, &sp)) != 0) {
576 		goto alldone;
577 	}
578 
579 	/* There aren't filled in by vsecattr2aclentry */
580 	sp->s_shadow = ip->i_shadow;
581 	sp->s_dev = ip->i_dev;
582 	sp->s_use = usecnt;
583 	sp->s_ref = 1;
584 	ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
585 
586 	/* XXX Might make a duplicate */
587 	si_cache_put(sp);
588 
589 	/* Signal anyone waiting on this shadow to be loaded */
590 	ip->i_ufs_acl = sp;
591 	err = 0;
592 	si_cachemiss++;
593 	if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl)
594 		ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize;
595 alldone:
596 	/*
597 	 * Common exit point. Mark shadow inode as ISTALE
598 	 * if we detect an internal inconsistency, to
599 	 * prevent stray inodes appearing in the cache.
600 	 */
601 	if (err) {
602 		rw_enter(&sip->i_contents, RW_READER);
603 		mutex_enter(&sip->i_tlock);
604 		sip->i_flag |= ISTALE;
605 		mutex_exit(&sip->i_tlock);
606 		rw_exit(&sip->i_contents);
607 	}
608 	VN_RELE(ITOV(sip));
609 
610 	/*
611 	 * Cleanup of data structures allocated
612 	 * on the fly.
613 	 */
614 	if (acldata)
615 		kmem_free(acldata, acldatalen);
616 
617 	if (vsecattr.vsa_aclentp)
618 		kmem_free(vsecattr.vsa_aclentp,
619 			vsecattr.vsa_aclcnt * sizeof (aclent_t));
620 	if (vsecattr.vsa_dfaclentp)
621 		kmem_free(vsecattr.vsa_dfaclentp,
622 			vsecattr.vsa_dfaclcnt * sizeof (aclent_t));
623 	return (err);
624 }
625 
626 /*
627  * Check the inode's ACL's to see if this mode of access is
628  * allowed; return 0 if allowed, EACCES if not.
629  *
630  * We follow the procedure defined in Sec. 3.3.5, ACL Access
631  * Check Algorithm, of the POSIX 1003.6 Draft Standard.
632  */
633 int
634 ufs_acl_access(struct inode *ip, int mode, cred_t *cr)
635 /*
636  *	ip 	parent inode
637  *	mode 	mode of access read, write, execute/examine
638  *	cr	credentials
639  */
640 {
641 	ufs_ic_acl_t *acl;
642 	int ismask, mask = 0;
643 	int gperm = 0;
644 	int ngroup = 0;
645 	si_t	*sp = NULL;
646 	uid_t uid = crgetuid(cr);
647 	uid_t owner;
648 
649 	ASSERT(ip->i_ufs_acl != NULL);
650 
651 	sp = ip->i_ufs_acl;
652 
653 	ismask = sp->aclass.acl_ismask ?
654 	    sp->aclass.acl_ismask : NULL;
655 
656 	if (ismask)
657 		mask = sp->aclass.acl_maskbits;
658 	else
659 		mask = -1;
660 
661 	/*
662 	 * (1) If user owns the file, obey user mode bits
663 	 */
664 	owner = sp->aowner->acl_ic_who;
665 	if (uid == owner) {
666 		return (MODE_CHECK(owner, mode, (sp->aowner->acl_ic_perm << 6),
667 							    cr, ip));
668 	}
669 
670 	/*
671 	 * (2) Obey any matching ACL_USER entry
672 	 */
673 	if (sp->ausers)
674 		for (acl = sp->ausers; acl != NULL; acl = acl->acl_ic_next) {
675 			if (acl->acl_ic_who == uid) {
676 				return (MODE_CHECK(owner, mode,
677 				    (mask & acl->acl_ic_perm) << 6, cr, ip));
678 			}
679 		}
680 
681 	/*
682 	 * (3) If user belongs to file's group, obey group mode bits
683 	 * if no ACL mask is defined; if there is an ACL mask, we look
684 	 * at both the group mode bits and any ACL_GROUP entries.
685 	 */
686 	if (groupmember((uid_t)sp->agroup->acl_ic_who, cr)) {
687 		ngroup++;
688 		gperm = (sp->agroup->acl_ic_perm);
689 		if (!ismask)
690 			return (MODE_CHECK(owner, mode, (gperm << 6), cr, ip));
691 	}
692 
693 	/*
694 	 * (4) Accumulate the permissions in matching ACL_GROUP entries
695 	 */
696 	if (sp->agroups)
697 		for (acl = sp->agroups; acl != NULL; acl = acl->acl_ic_next)
698 		{
699 			if (groupmember(acl->acl_ic_who, cr)) {
700 				ngroup++;
701 				gperm |= acl->acl_ic_perm;
702 			}
703 		}
704 
705 	if (ngroup != 0)
706 		return (MODE_CHECK(owner, mode, ((gperm & mask) << 6), cr, ip));
707 
708 	/*
709 	 * (5) Finally, use the "other" mode bits
710 	 */
711 	return (MODE_CHECK(owner, mode, sp->aother->acl_ic_perm << 6, cr, ip));
712 }
713 
714 /*ARGSUSED2*/
715 int
716 ufs_acl_get(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr)
717 {
718 	aclent_t	*aclentp;
719 
720 	ASSERT(RW_LOCK_HELD(&ip->i_contents));
721 
722 	/* XXX Range check, sanity check, shadow check */
723 	/* If an ACL is present, get the data from the shadow inode info */
724 	if (ip->i_ufs_acl)
725 		return (aclentry2vsecattr(ip->i_ufs_acl, vsap));
726 
727 	/*
728 	 * If no ACLs are present, fabricate one from the mode bits.
729 	 * This code is almost identical to fs_fab_acl(), but we
730 	 * already have the mode bits handy, so we'll avoid going
731 	 * through VOP_GETATTR() again.
732 	 */
733 
734 	vsap->vsa_aclcnt    = 0;
735 	vsap->vsa_aclentp   = NULL;
736 	vsap->vsa_dfaclcnt  = 0;	/* Default ACLs are not fabricated */
737 	vsap->vsa_dfaclentp = NULL;
738 
739 	if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL))
740 		vsap->vsa_aclcnt    = 4;  /* USER, GROUP, OTHER, and CLASS */
741 
742 	if (vsap->vsa_mask & VSA_ACL) {
743 		vsap->vsa_aclentp = kmem_zalloc(4 * sizeof (aclent_t),
744 		    KM_SLEEP);
745 		if (vsap->vsa_aclentp == NULL)
746 			return (ENOMEM);
747 		aclentp = vsap->vsa_aclentp;
748 
749 		/* Owner */
750 		aclentp->a_type = USER_OBJ;
751 		aclentp->a_perm = ((ushort_t)(ip->i_mode & 0700)) >> 6;
752 		aclentp->a_id = ip->i_uid;	/* Really undefined */
753 		aclentp++;
754 
755 		/* Group */
756 		aclentp->a_type = GROUP_OBJ;
757 		aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3;
758 		aclentp->a_id = ip->i_gid; 	/* Really undefined */
759 		aclentp++;
760 
761 		/* Other */
762 		aclentp->a_type = OTHER_OBJ;
763 		aclentp->a_perm = ip->i_mode & 0007;
764 		aclentp->a_id = 0;		/* Really undefined */
765 		aclentp++;
766 
767 		/* Class */
768 		aclentp->a_type = CLASS_OBJ;
769 		aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3;
770 		aclentp->a_id = 0;		/* Really undefined */
771 		ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
772 				sizeof (aclent_t), cmp2acls);
773 	}
774 
775 	return (0);
776 }
777 
778 /*ARGSUSED2*/
779 int
780 ufs_acl_set(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr)
781 {
782 	si_t	*sp;
783 	int	err;
784 
785 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
786 
787 	if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
788 		return (ENOSYS);
789 
790 	/*
791 	 * only the owner of the file or privileged users can change the ACLs
792 	 */
793 	if (secpolicy_vnode_setdac(cr, ip->i_uid) != 0)
794 		return (EPERM);
795 
796 	/* Convert from vsecattr struct to ufs_acl_entry struct */
797 	if ((err = vsecattr2aclentry(vsap, &sp)) != 0)
798 		return (err);
799 	sp->s_dev = ip->i_dev;
800 
801 	/*
802 	 * Make the user & group objs in the acl list follow what's
803 	 * in the inode.
804 	 */
805 #ifdef DEBUG
806 	if (vsap->vsa_mask == VSA_ACL) {
807 		ASSERT(sp->aowner);
808 		ASSERT(sp->agroup);
809 		ASSERT(sp->aother);
810 	}
811 #endif	/* DEBUG */
812 
813 	if (sp->aowner)
814 		sp->aowner->acl_ic_who = ip->i_uid;
815 	if (sp->agroup)
816 		sp->agroup->acl_ic_who = ip->i_gid;
817 
818 	/*
819 	 * Write and cache the new acl list
820 	 */
821 	err = ufs_si_store(ip, sp, 1, cr);
822 
823 	return (err);
824 }
825 
826 /*
827  * XXX Scan sorted array of acl's, checking for:
828  * 1) Any duplicate/conflicting entries (same type and id)
829  * 2) More than 1 of USER_OBJ, GROUP_OBJ, OTHER_OBJ, CLASS_OBJ
830  * 3) More than 1 of DEF_USER_OBJ, DEF_GROUP_OBJ, DEF_OTHER_OBJ, DEF_CLASS_OBJ
831  *
832  * Parameters:
833  * aclentp - ptr to sorted list of acl entries.
834  * nentries - # acl entries on the list
835  * flag - Bitmap (ACL_CHECK and/or DEF_ACL_CHECK) indicating whether the
836  * list contains regular acls, default acls, or both.
837  *
838  * Returns:	0 - Success
839  * EINVAL - Invalid list (dups or multiple entries of type USER_OBJ, etc)
840  */
841 static int
842 acl_validate(aclent_t *aclentp, int nentries, int flag)
843 {
844 	int	i;
845 	int	nuser_objs = 0;
846 	int	ngroup_objs = 0;
847 	int	nother_objs = 0;
848 	int	nclass_objs = 0;
849 	int	ndef_user_objs = 0;
850 	int	ndef_group_objs = 0;
851 	int	ndef_other_objs = 0;
852 	int	ndef_class_objs = 0;
853 	int	nusers = 0;
854 	int	ngroups = 0;
855 	int	ndef_users = 0;
856 	int	ndef_groups = 0;
857 	int	numdefs = 0;
858 
859 	/* Null list or list of one */
860 	if (aclentp == NULL)
861 		return (0);
862 
863 	if (nentries <= 0)
864 		return (EINVAL);
865 
866 	for (i = 1; i < nentries; i++) {
867 		if (((aclentp[i - 1].a_type == aclentp[i].a_type) &&
868 		    (aclentp[i - 1].a_id   == aclentp[i].a_id)) ||
869 		    (aclentp[i - 1].a_perm > 07)) {
870 			return (EINVAL);
871 		}
872 	}
873 
874 	if (flag == 0 || (flag != ACL_CHECK && flag != DEF_ACL_CHECK))
875 		return (EINVAL);
876 
877 	/* Count types */
878 	for (i = 0; i < nentries; i++) {
879 		switch (aclentp[i].a_type) {
880 		case USER_OBJ:		/* Owner */
881 			nuser_objs++;
882 			break;
883 		case GROUP_OBJ:		/* Group */
884 			ngroup_objs++;
885 			break;
886 		case OTHER_OBJ:		/* Other */
887 			nother_objs++;
888 			break;
889 		case CLASS_OBJ:		/* Mask */
890 			nclass_objs++;
891 			break;
892 		case DEF_USER_OBJ:	/* Default Owner */
893 			ndef_user_objs++;
894 			break;
895 		case DEF_GROUP_OBJ:	/* Default Group */
896 			ndef_group_objs++;
897 			break;
898 		case DEF_OTHER_OBJ:	/* Default Other */
899 			ndef_other_objs++;
900 			break;
901 		case DEF_CLASS_OBJ:	/* Default Mask */
902 			ndef_class_objs++;
903 			break;
904 		case USER:		/* Users */
905 			nusers++;
906 			break;
907 		case GROUP:		/* Groups */
908 			ngroups++;
909 			break;
910 		case DEF_USER:		/* Default Users */
911 			ndef_users++;
912 			break;
913 		case DEF_GROUP:		/* Default Groups */
914 			ndef_groups++;
915 			break;
916 		default:		/* Unknown type */
917 			return (EINVAL);
918 		}
919 	}
920 
921 	/*
922 	 * For normal acl's, we require there be one (and only one)
923 	 * USER_OBJ, GROUP_OBJ and OTHER_OBJ.  There is either zero
924 	 * or one CLASS_OBJ.
925 	 */
926 	if (flag & ACL_CHECK) {
927 		if (nuser_objs != 1 || ngroup_objs != 1 ||
928 		    nother_objs != 1 || nclass_objs > 1) {
929 			return (EINVAL);
930 		}
931 		/*
932 		 * If there are ANY group acls, there MUST be a
933 		 * class_obj(mask) acl (1003.6/D12 p. 29 lines 75-80).
934 		 */
935 		if (ngroups && !nclass_objs) {
936 			return (EINVAL);
937 		}
938 		if (nuser_objs + ngroup_objs + nother_objs + nclass_objs +
939 		    ngroups + nusers > MAX_ACL_ENTRIES)
940 			return (EINVAL);
941 	}
942 
943 	/*
944 	 * For default acl's, we require that there be either one (and only one)
945 	 * DEF_USER_OBJ, DEF_GROUP_OBJ and DEF_OTHER_OBJ
946 	 * or  there be none of them.
947 	 */
948 	if (flag & DEF_ACL_CHECK) {
949 		if (ndef_other_objs > 1 || ndef_user_objs > 1 ||
950 		    ndef_group_objs > 1 || ndef_class_objs > 1) {
951 			return (EINVAL);
952 		}
953 
954 		numdefs = ndef_other_objs + ndef_user_objs + ndef_group_objs;
955 
956 		if (numdefs != 0 && numdefs != 3) {
957 			return (EINVAL);
958 		}
959 		/*
960 		 * If there are ANY def_group acls, there MUST be a
961 		 * def_class_obj(mask) acl (1003.6/D12 P. 29 lines 75-80).
962 		 * XXX(jimh) This is inferred.
963 		 */
964 		if (ndef_groups && !ndef_class_objs) {
965 			return (EINVAL);
966 		}
967 		if ((ndef_users || ndef_groups) &&
968 		    ((numdefs != 3) && !ndef_class_objs)) {
969 			return (EINVAL);
970 		}
971 		if (ndef_user_objs + ndef_group_objs + ndef_other_objs +
972 		    ndef_class_objs + ndef_users + ndef_groups >
973 		    MAX_ACL_ENTRIES)
974 			return (EINVAL);
975 	}
976 	return (0);
977 }
978 
979 static int
980 formacl(ufs_ic_acl_t **aclpp, aclent_t *aclentp)
981 {
982 	ufs_ic_acl_t *uaclp;
983 
984 	uaclp = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP);
985 	uaclp->acl_ic_perm = aclentp->a_perm;
986 	uaclp->acl_ic_who = aclentp->a_id;
987 	uaclp->acl_ic_next = *aclpp;
988 	*aclpp = uaclp;
989 	return (0);
990 }
991 
992 /*
993  * XXX - Make more efficient
994  * Convert from the vsecattr struct, used by the VOP interface, to
995  * the ufs_acl_entry struct used for in-core storage of acl's.
996  *
997  * Parameters:
998  * vsap - Ptr to array of security attributes.
999  * spp - Ptr to ptr to si struct for the results
1000  *
1001  * Returns:	0 - Success
1002  * 		N - From errno.h
1003  */
1004 static int
1005 vsecattr2aclentry(vsecattr_t *vsap, si_t **spp)
1006 {
1007 	aclent_t	*aclentp, *aclp;
1008 	si_t		*sp;
1009 	int		err;
1010 	int		i;
1011 
1012 	/* Sort & validate the lists on the vsap */
1013 	ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
1014 			sizeof (aclent_t), cmp2acls);
1015 	ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt,
1016 			sizeof (aclent_t), cmp2acls);
1017 	if ((err = acl_validate(vsap->vsa_aclentp,
1018 			vsap->vsa_aclcnt, ACL_CHECK)) != 0)
1019 		return (err);
1020 	if ((err = acl_validate(vsap->vsa_dfaclentp,
1021 			vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0)
1022 		return (err);
1023 
1024 	/* Create new si struct and hang acl's off it */
1025 	sp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1026 	rw_init(&sp->s_lock, NULL, RW_DEFAULT, NULL);
1027 
1028 	/* Process acl list */
1029 	aclp = (aclent_t *)vsap->vsa_aclentp;
1030 	aclentp = aclp + vsap->vsa_aclcnt - 1;
1031 	for (i = 0; i < vsap->vsa_aclcnt; i++) {
1032 		switch (aclentp->a_type) {
1033 		case USER_OBJ:		/* Owner */
1034 			if (err = formacl(&sp->aowner, aclentp))
1035 				goto error;
1036 			break;
1037 		case GROUP_OBJ:		/* Group */
1038 			if (err = formacl(&sp->agroup, aclentp))
1039 				goto error;
1040 			break;
1041 		case OTHER_OBJ:		/* Other */
1042 			if (err = formacl(&sp->aother, aclentp))
1043 				goto error;
1044 			break;
1045 		case USER:
1046 			if (err = formacl(&sp->ausers, aclentp))
1047 				goto error;
1048 			break;
1049 		case CLASS_OBJ:		/* Mask */
1050 			sp->aclass.acl_ismask = 1;
1051 			sp->aclass.acl_maskbits = aclentp->a_perm;
1052 			break;
1053 		case GROUP:
1054 			if (err = formacl(&sp->agroups, aclentp))
1055 				goto error;
1056 			break;
1057 		default:
1058 			break;
1059 		}
1060 		aclentp--;
1061 	}
1062 
1063 	/* Process default acl list */
1064 	aclp = (aclent_t *)vsap->vsa_dfaclentp;
1065 	aclentp = aclp + vsap->vsa_dfaclcnt - 1;
1066 	for (i = 0; i < vsap->vsa_dfaclcnt; i++) {
1067 		switch (aclentp->a_type) {
1068 		case DEF_USER_OBJ:	/* Default Owner */
1069 			if (err = formacl(&sp->downer, aclentp))
1070 				goto error;
1071 			break;
1072 		case DEF_GROUP_OBJ:	/* Default Group */
1073 			if (err = formacl(&sp->dgroup, aclentp))
1074 				goto error;
1075 			break;
1076 		case DEF_OTHER_OBJ:	/* Default Other */
1077 			if (err = formacl(&sp->dother, aclentp))
1078 				goto error;
1079 			break;
1080 		case DEF_USER:
1081 			if (err = formacl(&sp->dusers, aclentp))
1082 				goto error;
1083 			break;
1084 		case DEF_CLASS_OBJ:	/* Default Mask */
1085 			sp->dclass.acl_ismask = 1;
1086 			sp->dclass.acl_maskbits = aclentp->a_perm;
1087 			break;
1088 		case DEF_GROUP:
1089 			if (err = formacl(&sp->dgroups, aclentp))
1090 				goto error;
1091 			break;
1092 		default:
1093 			break;
1094 		}
1095 		aclentp--;
1096 	}
1097 	*spp = sp;
1098 	return (0);
1099 
1100 error:
1101 	ufs_si_free_mem(sp);
1102 	return (err);
1103 }
1104 
1105 void
1106 formvsec(int obj_type, ufs_ic_acl_t *aclp, aclent_t **aclentpp)
1107 {
1108 	for (; aclp; aclp = aclp->acl_ic_next) {
1109 		(*aclentpp)->a_type = obj_type;
1110 		(*aclentpp)->a_perm = aclp->acl_ic_perm;
1111 		(*aclentpp)->a_id = aclp->acl_ic_who;
1112 		(*aclentpp)++;
1113 	}
1114 }
1115 
1116 /*
1117  * XXX - Make more efficient
1118  * Convert from the ufs_acl_entry struct used for in-core storage of acl's
1119  * to the vsecattr struct,  used by the VOP interface.
1120  *
1121  * Parameters:
1122  * sp - Ptr to si struct with the acls
1123  * vsap - Ptr to a vsecattr struct which will take the results.
1124  *
1125  * Returns:	0 - Success
1126  *		N - From errno table
1127  */
1128 static int
1129 aclentry2vsecattr(si_t *sp, vsecattr_t *vsap)
1130 {
1131 	aclent_t	*aclentp;
1132 	int		numacls = 0;
1133 	int		err;
1134 
1135 	vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL;
1136 
1137 	numacls = acl_count(sp->aowner) +
1138 	    acl_count(sp->agroup) +
1139 	    acl_count(sp->aother) +
1140 	    acl_count(sp->ausers) +
1141 	    acl_count(sp->agroups);
1142 	if (sp->aclass.acl_ismask)
1143 		numacls++;
1144 
1145 	if (numacls == 0)
1146 		goto do_defaults;
1147 
1148 	if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL))
1149 		vsap->vsa_aclcnt = numacls;
1150 
1151 	if (vsap->vsa_mask & VSA_ACL) {
1152 		vsap->vsa_aclentp = kmem_zalloc(numacls * sizeof (aclent_t),
1153 		    KM_SLEEP);
1154 		aclentp = vsap->vsa_aclentp;
1155 
1156 		formvsec(USER_OBJ, sp->aowner, &aclentp);
1157 		formvsec(USER, sp->ausers, &aclentp);
1158 		formvsec(GROUP_OBJ, sp->agroup, &aclentp);
1159 		formvsec(GROUP, sp->agroups, &aclentp);
1160 		formvsec(OTHER_OBJ, sp->aother, &aclentp);
1161 
1162 		if (sp->aclass.acl_ismask) {
1163 			aclentp->a_type = CLASS_OBJ;		/* Mask */
1164 			aclentp->a_perm = sp->aclass.acl_maskbits;
1165 			aclentp->a_id = 0;
1166 			aclentp++;
1167 		}
1168 
1169 		/* Sort the acl list */
1170 		ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
1171 				sizeof (aclent_t), cmp2acls);
1172 		/* Check the acl list */
1173 		if ((err = acl_validate(vsap->vsa_aclentp,
1174 				vsap->vsa_aclcnt, ACL_CHECK)) != 0) {
1175 			kmem_free(vsap->vsa_aclentp, numacls *
1176 				sizeof (aclent_t));
1177 			vsap->vsa_aclentp = NULL;
1178 			return (err);
1179 		}
1180 
1181 	}
1182 do_defaults:
1183 	/* Process Defaults */
1184 
1185 	numacls = acl_count(sp->downer) +
1186 	    acl_count(sp->dgroup) +
1187 	    acl_count(sp->dother) +
1188 	    acl_count(sp->dusers) +
1189 	    acl_count(sp->dgroups);
1190 	if (sp->dclass.acl_ismask)
1191 		numacls++;
1192 
1193 	if (numacls == 0)
1194 		goto do_others;
1195 
1196 	if (vsap->vsa_mask & (VSA_DFACLCNT | VSA_DFACL))
1197 		vsap->vsa_dfaclcnt = numacls;
1198 
1199 	if (vsap->vsa_mask & VSA_DFACL) {
1200 		vsap->vsa_dfaclentp = kmem_zalloc(numacls * sizeof (aclent_t),
1201 							KM_SLEEP);
1202 		aclentp = vsap->vsa_dfaclentp;
1203 		formvsec(DEF_USER_OBJ, sp->downer, &aclentp);
1204 		formvsec(DEF_USER, sp->dusers, &aclentp);
1205 		formvsec(DEF_GROUP_OBJ, sp->dgroup, &aclentp);
1206 		formvsec(DEF_GROUP, sp->dgroups, &aclentp);
1207 		formvsec(DEF_OTHER_OBJ, sp->dother, &aclentp);
1208 
1209 		if (sp->dclass.acl_ismask) {
1210 			aclentp->a_type = DEF_CLASS_OBJ;	/* Mask */
1211 			aclentp->a_perm = sp->dclass.acl_maskbits;
1212 			aclentp->a_id = 0;
1213 			aclentp++;
1214 		}
1215 
1216 		/* Sort the default acl list */
1217 		ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt,
1218 				sizeof (aclent_t), cmp2acls);
1219 		if ((err = acl_validate(vsap->vsa_dfaclentp,
1220 		    vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) {
1221 			if (vsap->vsa_aclentp != NULL)
1222 				kmem_free(vsap->vsa_aclentp,
1223 				    vsap->vsa_aclcnt * sizeof (aclent_t));
1224 			kmem_free(vsap->vsa_dfaclentp,
1225 			    vsap->vsa_dfaclcnt * sizeof (aclent_t));
1226 			vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL;
1227 			return (err);
1228 		}
1229 	}
1230 
1231 do_others:
1232 	return (0);
1233 }
1234 
1235 static void
1236 acl_free(ufs_ic_acl_t *aclp)
1237 {
1238 	while (aclp != NULL) {
1239 		ufs_ic_acl_t *nextaclp = aclp->acl_ic_next;
1240 		kmem_free(aclp, sizeof (ufs_ic_acl_t));
1241 		aclp = nextaclp;
1242 	}
1243 }
1244 
1245 /*
1246  * ufs_si_free_mem will discard the sp, and the acl hanging off of the
1247  * sp.  It is required that the sp not be locked, and not be in the
1248  * cache.
1249  *
1250  * input: pointer to sp to discard.
1251  *
1252  * return - nothing.
1253  *
1254  */
1255 static void
1256 ufs_si_free_mem(si_t *sp)
1257 {
1258 	ASSERT(!(sp->s_flags & SI_CACHED));
1259 	ASSERT(!RW_LOCK_HELD(&sp->s_lock));
1260 	/*
1261 	 *	remove from the cache
1262 	 *	free the acl entries
1263 	 */
1264 	acl_free(sp->aowner);
1265 	acl_free(sp->agroup);
1266 	acl_free(sp->aother);
1267 	acl_free(sp->ausers);
1268 	acl_free(sp->agroups);
1269 
1270 	acl_free(sp->downer);
1271 	acl_free(sp->dgroup);
1272 	acl_free(sp->dother);
1273 	acl_free(sp->dusers);
1274 	acl_free(sp->dgroups);
1275 
1276 	rw_destroy(&sp->s_lock);
1277 	kmem_free(sp, sizeof (si_t));
1278 }
1279 
1280 void
1281 acl_cpy(ufs_ic_acl_t *saclp, ufs_ic_acl_t *daclp)
1282 {
1283 	ufs_ic_acl_t  *aclp, *prev_aclp = NULL, *aclp1;
1284 
1285 	if (saclp == NULL) {
1286 		daclp = NULL;
1287 		return;
1288 	}
1289 	prev_aclp = daclp;
1290 
1291 	for (aclp = saclp; aclp != NULL; aclp = aclp->acl_ic_next) {
1292 		aclp1 = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP);
1293 		aclp1->acl_ic_next = NULL;
1294 		aclp1->acl_ic_who = aclp->acl_ic_who;
1295 		aclp1->acl_ic_perm = aclp->acl_ic_perm;
1296 		prev_aclp->acl_ic_next = aclp1;
1297 		prev_aclp = (ufs_ic_acl_t *)&aclp1->acl_ic_next;
1298 	}
1299 }
1300 
1301 /*
1302  *	ufs_si_inherit takes a parent acl structure (saclp) and the inode
1303  *	of the object that is inheriting an acl and returns the inode
1304  *	with the acl linked to it.  It also writes the acl to disk if
1305  *	it is a unique inode.
1306  *
1307  *	ip - pointer to inode of object inheriting the acl (contents lock)
1308  *	tdp - parent inode (rw_lock and contents lock)
1309  *	mode - creation modes
1310  *	cr - credentials pointer
1311  */
1312 int
1313 ufs_si_inherit(struct inode *ip, struct inode *tdp, o_mode_t mode, cred_t *cr)
1314 {
1315 	si_t *tsp, *sp = tdp->i_ufs_acl;
1316 	int error;
1317 	o_mode_t old_modes, old_uid, old_gid;
1318 	int mask;
1319 
1320 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
1321 	ASSERT(RW_WRITE_HELD(&tdp->i_rwlock));
1322 	ASSERT(RW_WRITE_HELD(&tdp->i_contents));
1323 
1324 	/*
1325 	 * if links/symbolic links, or other invalid acl objects are copied
1326 	 * or moved to a directory with a default acl do not allow inheritance
1327 	 * just return.
1328 	 */
1329 	if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
1330 		return (0);
1331 
1332 	/* lock the parent security information */
1333 	rw_enter(&sp->s_lock, RW_READER);
1334 
1335 	ASSERT(((tdp->i_mode & IFMT) == IFDIR) ||
1336 		((tdp->i_mode & IFMT) == IFATTRDIR));
1337 
1338 	mask = ((sp->downer != NULL) ? 1 : 0) |
1339 	    ((sp->dgroup != NULL) ? 2 : 0) |
1340 	    ((sp->dother != NULL) ? 4 : 0);
1341 
1342 	if (mask == 0) {
1343 		rw_exit(&sp->s_lock);
1344 		return (0);
1345 	}
1346 
1347 	if (mask != 7) {
1348 		rw_exit(&sp->s_lock);
1349 		return (EINVAL);
1350 	}
1351 
1352 	tsp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1353 	rw_init(&tsp->s_lock, NULL, RW_DEFAULT, NULL);
1354 
1355 	/* copy the default acls */
1356 
1357 	ASSERT(RW_READ_HELD(&sp->s_lock));
1358 	acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->aowner);
1359 	acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->agroup);
1360 	acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->aother);
1361 	acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->ausers);
1362 	acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->agroups);
1363 	tsp->aclass.acl_ismask = sp->dclass.acl_ismask;
1364 	tsp->aclass.acl_maskbits = sp->dclass.acl_maskbits;
1365 
1366 	/*
1367 	 * set the owner, group, and other values from the master
1368 	 * inode.
1369 	 */
1370 
1371 	MODE2ACL(tsp->aowner, (mode >> 6), ip->i_uid);
1372 	MODE2ACL(tsp->agroup, (mode >> 3), ip->i_gid);
1373 	MODE2ACL(tsp->aother, (mode), 0);
1374 
1375 	if (tsp->aclass.acl_ismask) {
1376 		tsp->aclass.acl_maskbits &= mode >> 3;
1377 	}
1378 
1379 
1380 	/* copy default acl if necessary */
1381 
1382 	if (((ip->i_mode & IFMT) == IFDIR) ||
1383 		((ip->i_mode & IFMT) == IFATTRDIR)) {
1384 		acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->downer);
1385 		acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->dgroup);
1386 		acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->dother);
1387 		acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->dusers);
1388 		acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->dgroups);
1389 		tsp->dclass.acl_ismask = sp->dclass.acl_ismask;
1390 		tsp->dclass.acl_maskbits = sp->dclass.acl_maskbits;
1391 	}
1392 	/*
1393 	 * save the new 9 mode bits in the inode (ip->ic_smode) for
1394 	 * ufs_getattr.  Be sure the mode can be recovered if the store
1395 	 * fails.
1396 	 */
1397 	old_modes = ip->i_mode;
1398 	old_uid = ip->i_uid;
1399 	old_gid = ip->i_gid;
1400 	/*
1401 	 * store the acl, and get back a new security anchor if
1402 	 * it is a duplicate.
1403 	 */
1404 	rw_exit(&sp->s_lock);
1405 	rw_enter(&ip->i_rwlock, RW_WRITER);
1406 
1407 	/*
1408 	 * Suppress out of inodes messages if instructed in the
1409 	 * tdp inode.
1410 	 */
1411 	ip->i_flag |= tdp->i_flag & IQUIET;
1412 
1413 	if ((error = ufs_si_store(ip, tsp, 0, cr)) != 0) {
1414 		ip->i_mode = old_modes;
1415 		ip->i_uid = old_uid;
1416 		ip->i_gid = old_gid;
1417 	}
1418 	ip->i_flag &= ~IQUIET;
1419 	rw_exit(&ip->i_rwlock);
1420 	return (error);
1421 }
1422 
1423 si_t *
1424 ufs_acl_cp(si_t *sp)
1425 {
1426 
1427 	si_t *dsp;
1428 
1429 	ASSERT(RW_READ_HELD(&sp->s_lock));
1430 	ASSERT(sp->s_ref && sp->s_use);
1431 
1432 	dsp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1433 	rw_init(&dsp->s_lock, NULL, RW_DEFAULT, NULL);
1434 
1435 	acl_cpy(sp->aowner, (ufs_ic_acl_t *)&dsp->aowner);
1436 	acl_cpy(sp->agroup, (ufs_ic_acl_t *)&dsp->agroup);
1437 	acl_cpy(sp->aother, (ufs_ic_acl_t *)&dsp->aother);
1438 	acl_cpy(sp->ausers, (ufs_ic_acl_t *)&dsp->ausers);
1439 	acl_cpy(sp->agroups, (ufs_ic_acl_t *)&dsp->agroups);
1440 
1441 	dsp->aclass.acl_ismask = sp->aclass.acl_ismask;
1442 	dsp->aclass.acl_maskbits = sp->aclass.acl_maskbits;
1443 
1444 	acl_cpy(sp->downer, (ufs_ic_acl_t *)&dsp->downer);
1445 	acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&dsp->dgroup);
1446 	acl_cpy(sp->dother, (ufs_ic_acl_t *)&dsp->dother);
1447 	acl_cpy(sp->dusers, (ufs_ic_acl_t *)&dsp->dusers);
1448 	acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&dsp->dgroups);
1449 
1450 	dsp->dclass.acl_ismask = sp->dclass.acl_ismask;
1451 	dsp->dclass.acl_maskbits = sp->dclass.acl_maskbits;
1452 
1453 	return (dsp);
1454 
1455 }
1456 
1457 int
1458 ufs_acl_setattr(struct inode *ip, struct vattr *vap, cred_t *cr)
1459 {
1460 
1461 	si_t *sp;
1462 	int mask = vap->va_mask;
1463 	int error = 0;
1464 
1465 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
1466 
1467 	if (!(mask & (AT_MODE|AT_UID|AT_GID)))
1468 		return (0);
1469 
1470 	/*
1471 	 * if no regular acl's, nothing to do, so let's get out
1472 	 */
1473 	if (!(ip->i_ufs_acl) || !(ip->i_ufs_acl->aowner))
1474 		return (0);
1475 
1476 	rw_enter(&ip->i_ufs_acl->s_lock, RW_READER);
1477 	sp = ufs_acl_cp(ip->i_ufs_acl);
1478 	ASSERT(sp != ip->i_ufs_acl);
1479 
1480 	/*
1481 	 * set the mask to the group permissions if a mask entry
1482 	 * exists.  Otherwise, set the group obj bits to the group
1483 	 * permissions.  Since non-trivial ACLs always have a mask,
1484 	 * and the mask is the final arbiter of group permissions,
1485 	 * setting the mask has the effect of changing the effective
1486 	 * group permissions, even if the group_obj permissions in
1487 	 * the ACL aren't changed.  Posix P1003.1e states that when
1488 	 * an ACL mask exists, chmod(2) must set the acl mask (NOT the
1489 	 * group_obj permissions) to the requested group permissions.
1490 	 */
1491 	if (mask & AT_MODE) {
1492 		sp->aowner->acl_ic_perm = (o_mode_t)(ip->i_mode & 0700) >> 6;
1493 		if (sp->aclass.acl_ismask)
1494 			sp->aclass.acl_maskbits =
1495 			    (o_mode_t)(ip->i_mode & 070) >> 3;
1496 		else
1497 			sp->agroup->acl_ic_perm =
1498 			    (o_mode_t)(ip->i_mode & 070) >> 3;
1499 		sp->aother->acl_ic_perm = (o_mode_t)(ip->i_mode & 07);
1500 	}
1501 
1502 	if (mask & AT_UID) {
1503 		/* Caller has verified our privileges */
1504 		sp->aowner->acl_ic_who = ip->i_uid;
1505 	}
1506 
1507 	if (mask & AT_GID) {
1508 		sp->agroup->acl_ic_who = ip->i_gid;
1509 	}
1510 
1511 	rw_exit(&ip->i_ufs_acl->s_lock);
1512 	error = ufs_si_store(ip, sp, 0, cr);
1513 	return (error);
1514 }
1515 
1516 static int
1517 acl_count(ufs_ic_acl_t *p)
1518 {
1519 	ufs_ic_acl_t	*acl;
1520 	int		count;
1521 
1522 	for (count = 0, acl = p; acl; acl = acl->acl_ic_next, count++)
1523 		;
1524 	return (count);
1525 }
1526 
1527 /*
1528  *	Takes as input a security structure and generates a buffer
1529  *	with fsd's in a form which be written to the shadow inode.
1530  */
1531 static int
1532 ufs_sectobuf(si_t *sp, caddr_t *buf, size_t *len)
1533 {
1534 	size_t		acl_size;
1535 	size_t		def_acl_size;
1536 	caddr_t		buffer;
1537 	struct ufs_fsd	*fsdp;
1538 	ufs_acl_t	*bufaclp;
1539 
1540 	/*
1541 	 * Calc size of buffer to hold all the acls
1542 	 */
1543 	acl_size = acl_count(sp->aowner) +		/* owner */
1544 	    acl_count(sp->agroup) +			/* owner group */
1545 	    acl_count(sp->aother) +			/* owner other */
1546 	    acl_count(sp->ausers) +			/* acl list */
1547 	    acl_count(sp->agroups);			/* group alcs */
1548 	if (sp->aclass.acl_ismask)
1549 		acl_size++;
1550 
1551 	/* Convert to bytes */
1552 	acl_size *= sizeof (ufs_acl_t);
1553 
1554 	/* Add fsd header */
1555 	if (acl_size)
1556 		acl_size += 2 * sizeof (int);
1557 
1558 	/*
1559 	 * Calc size of buffer to hold all the default acls
1560 	 */
1561 	def_acl_size =
1562 	    acl_count(sp->downer) +	/* def owner */
1563 	    acl_count(sp->dgroup) +	/* def owner group */
1564 	    acl_count(sp->dother) +	/* def owner other */
1565 	    acl_count(sp->dusers) +	/* def users  */
1566 	    acl_count(sp->dgroups);	/* def group acls */
1567 	if (sp->dclass.acl_ismask)
1568 		def_acl_size++;
1569 
1570 	/*
1571 	 * Convert to bytes
1572 	 */
1573 	def_acl_size *= sizeof (ufs_acl_t);
1574 
1575 	/*
1576 	 * Add fsd header
1577 	 */
1578 	if (def_acl_size)
1579 		def_acl_size += 2 * sizeof (int);
1580 
1581 	if (acl_size + def_acl_size == 0)
1582 		return (0);
1583 
1584 	buffer = kmem_zalloc((acl_size + def_acl_size), KM_SLEEP);
1585 	bufaclp = (ufs_acl_t *)buffer;
1586 
1587 	if (acl_size == 0)
1588 		goto wrtdefs;
1589 
1590 	/* create fsd and copy acls */
1591 	fsdp = (struct ufs_fsd *)bufaclp;
1592 	fsdp->fsd_type = FSD_ACL;
1593 	bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0];
1594 
1595 	ACL_MOVE(sp->aowner, USER_OBJ, bufaclp);
1596 	ACL_MOVE(sp->agroup, GROUP_OBJ, bufaclp);
1597 	ACL_MOVE(sp->aother, OTHER_OBJ, bufaclp);
1598 	ACL_MOVE(sp->ausers, USER, bufaclp);
1599 	ACL_MOVE(sp->agroups, GROUP, bufaclp);
1600 
1601 	if (sp->aclass.acl_ismask) {
1602 		bufaclp->acl_tag = CLASS_OBJ;
1603 		bufaclp->acl_who = (uid_t)sp->aclass.acl_ismask;
1604 		bufaclp->acl_perm = (o_mode_t)sp->aclass.acl_maskbits;
1605 		bufaclp++;
1606 	}
1607 	ASSERT(acl_size <= INT_MAX);
1608 	fsdp->fsd_size = (int)acl_size;
1609 
1610 wrtdefs:
1611 	if (def_acl_size == 0)
1612 		goto alldone;
1613 
1614 	/* if defaults exist then create fsd and copy default acls */
1615 	fsdp = (struct ufs_fsd *)bufaclp;
1616 	fsdp->fsd_type = FSD_DFACL;
1617 	bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0];
1618 
1619 	ACL_MOVE(sp->downer, DEF_USER_OBJ, bufaclp);
1620 	ACL_MOVE(sp->dgroup, DEF_GROUP_OBJ, bufaclp);
1621 	ACL_MOVE(sp->dother, DEF_OTHER_OBJ, bufaclp);
1622 	ACL_MOVE(sp->dusers, DEF_USER, bufaclp);
1623 	ACL_MOVE(sp->dgroups, DEF_GROUP, bufaclp);
1624 	if (sp->dclass.acl_ismask) {
1625 		bufaclp->acl_tag = DEF_CLASS_OBJ;
1626 		bufaclp->acl_who = (uid_t)sp->dclass.acl_ismask;
1627 		bufaclp->acl_perm = (o_mode_t)sp->dclass.acl_maskbits;
1628 		bufaclp++;
1629 	}
1630 	ASSERT(def_acl_size <= INT_MAX);
1631 	fsdp->fsd_size = (int)def_acl_size;
1632 
1633 alldone:
1634 	*buf = buffer;
1635 	*len = acl_size + def_acl_size;
1636 
1637 	return (0);
1638 }
1639 
1640 /*
1641  *  free a shadow inode  on disk and in memory
1642  */
1643 int
1644 ufs_si_free(si_t *sp, struct vfs *vfsp, cred_t *cr)
1645 {
1646 	struct inode 	*sip;
1647 	int 		shadow;
1648 	int 		err = 0;
1649 	int		refcnt;
1650 	int		signature;
1651 
1652 	ASSERT(vfsp);
1653 	ASSERT(sp);
1654 
1655 	rw_enter(&sp->s_lock, RW_READER);
1656 	ASSERT(sp->s_shadow <= INT_MAX);
1657 	shadow = (int)sp->s_shadow;
1658 	ASSERT(sp->s_ref);
1659 	rw_exit(&sp->s_lock);
1660 
1661 	/*
1662 	 * Decrement link count on the shadow inode,
1663 	 * and decrement reference count on the sip.
1664 	 */
1665 	if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) == 0) {
1666 		rw_enter(&sip->i_contents, RW_WRITER);
1667 		rw_enter(&sp->s_lock, RW_WRITER);
1668 		ASSERT(sp->s_shadow == shadow);
1669 		ASSERT(sip->i_dquot == 0);
1670 		/* Decrement link count */
1671 		ASSERT(sip->i_nlink > 0);
1672 		/*
1673 		 * bug #1264710 assertion failure below
1674 		 */
1675 		sp->s_use = --sip->i_nlink;
1676 		ufs_setreclaim(sip);
1677 		TRANS_INODE(sip->i_ufsvfs, sip);
1678 		sip->i_flag |= ICHG | IMOD;
1679 		sip->i_seq++;
1680 		ITIMES_NOLOCK(sip);
1681 		/* Dec ref counts on si referenced by this ip */
1682 		refcnt = --sp->s_ref;
1683 		signature = sp->s_signature;
1684 		ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
1685 		/*
1686 		 * Release s_lock before calling VN_RELE
1687 		 * (which may want to acquire i_contents).
1688 		 */
1689 		rw_exit(&sp->s_lock);
1690 		rw_exit(&sip->i_contents);
1691 		VN_RELE(ITOV(sip));
1692 	} else {
1693 		rw_enter(&sp->s_lock, RW_WRITER);
1694 		/* Dec ref counts on si referenced by this ip */
1695 		refcnt = --sp->s_ref;
1696 		signature = sp->s_signature;
1697 		ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
1698 		rw_exit(&sp->s_lock);
1699 	}
1700 
1701 	if (refcnt == 0)
1702 		si_cache_del(sp, signature);
1703 	return (err);
1704 }
1705 
1706 /*
1707  * Seach the si cache for an si structure by inode #.
1708  * Returns a locked si structure.
1709  *
1710  * Parameters:
1711  * ip - Ptr to an inode on this fs
1712  * spp - Ptr to ptr to si struct for the results, if found.
1713  *
1714  * Returns:	0 - Success (results in spp)
1715  *		1 - Failure (spp undefined)
1716  */
1717 static int
1718 si_cachei_get(struct inode *ip, si_t **spp)
1719 {
1720 	si_t	*sp;
1721 
1722 	rw_enter(&si_cache_lock, RW_READER);
1723 loop:
1724 	for (sp = si_cachei[SI_HASH(ip->i_shadow)]; sp; sp = sp->s_forw)
1725 		if (sp->s_shadow == ip->i_shadow && sp->s_dev == ip->i_dev)
1726 			break;
1727 
1728 	if (sp == NULL) {
1729 		/* Not in cache */
1730 		rw_exit(&si_cache_lock);
1731 		return (1);
1732 	}
1733 	/* Found it */
1734 	rw_enter(&sp->s_lock, RW_WRITER);
1735 alldone:
1736 	rw_exit(&si_cache_lock);
1737 	*spp = sp;
1738 	return (0);
1739 }
1740 
1741 /*
1742  * Seach the si cache by si structure (ie duplicate of the one passed in).
1743  * In order for a match the signatures must be the same and
1744  * the devices must be the same, the acls must match and
1745  * link count of the cached shadow must be less than the
1746  * size of ic_nlink - 1.  MAXLINK - 1 is used to allow the count
1747  * to be incremented one more time by the caller.
1748  * Returns a locked si structure.
1749  *
1750  * Parameters:
1751  * ip - Ptr to an inode on this fs
1752  * spi - Ptr to si the struct we're searching the cache for.
1753  * spp - Ptr to ptr to si struct for the results, if found.
1754  *
1755  * Returns:	0 - Success (results in spp)
1756  *		1 - Failure (spp undefined)
1757  */
1758 static int
1759 si_cachea_get(struct inode *ip, si_t *spi, si_t **spp)
1760 {
1761 	si_t	*sp;
1762 
1763 	spi->s_dev = ip->i_dev;
1764 	spi->s_signature = si_signature(spi);
1765 	rw_enter(&si_cache_lock, RW_READER);
1766 loop:
1767 	for (sp = si_cachea[SI_HASH(spi->s_signature)]; sp; sp = sp->s_next) {
1768 		if (sp->s_signature == spi->s_signature &&
1769 		    sp->s_dev == spi->s_dev &&
1770 		    sp->s_use > 0 &&			/* deleting */
1771 		    sp->s_use <= (MAXLINK - 1) &&	/* Too many links */
1772 		    !si_cmp(sp, spi))
1773 			break;
1774 	}
1775 
1776 	if (sp == NULL) {
1777 		/* Cache miss */
1778 		rw_exit(&si_cache_lock);
1779 		return (1);
1780 	}
1781 	/* Found it */
1782 	rw_enter(&sp->s_lock, RW_WRITER);
1783 alldone:
1784 	spi->s_shadow = sp->s_shadow; /* XXX For debugging */
1785 	rw_exit(&si_cache_lock);
1786 	*spp = sp;
1787 	return (0);
1788 }
1789 
1790 /*
1791  * Place an si structure in the si cache.  May cause duplicates.
1792  *
1793  * Parameters:
1794  * sp - Ptr to the si struct to add to the cache.
1795  *
1796  * Returns: Nothing (void)
1797  */
1798 static void
1799 si_cache_put(si_t *sp)
1800 {
1801 	si_t	**tspp;
1802 
1803 	ASSERT(sp->s_fore == NULL);
1804 	rw_enter(&si_cache_lock, RW_WRITER);
1805 	if (!sp->s_signature)
1806 		sp->s_signature = si_signature(sp);
1807 	sp->s_flags |= SI_CACHED;
1808 	sp->s_fore = NULL;
1809 
1810 	/* The 'by acl' chains */
1811 	tspp = &si_cachea[SI_HASH(sp->s_signature)];
1812 	sp->s_next = *tspp;
1813 	*tspp = sp;
1814 
1815 	/* The 'by inode' chains */
1816 	tspp = &si_cachei[SI_HASH(sp->s_shadow)];
1817 	sp->s_forw = *tspp;
1818 	*tspp = sp;
1819 
1820 	rw_exit(&si_cache_lock);
1821 }
1822 
1823 /*
1824  * The sp passed in is a candidate for deletion from the cache.  We acquire
1825  * the cache lock first, so no cache searches can be done.  Then we search
1826  * for the acl in the cache, and if we find it we can lock it and check that
1827  * nobody else attached to it while we were acquiring the locks.  If the acl
1828  * is in the cache and still has a zero reference count, then we remove it
1829  * from the cache and deallocate it.  If the reference count is non-zero or
1830  * it is not found in the cache, then someone else attached to it or has
1831  * already freed it, so we just return.
1832  *
1833  * Parameters:
1834  * sp - Ptr to the sp struct which is the candicate for deletion.
1835  * signature - the signature for the acl for lookup in the hash table
1836  *
1837  * Returns: Nothing (void)
1838  */
1839 void
1840 si_cache_del(si_t *sp, int signature)
1841 {
1842 	si_t	**tspp;
1843 	int	hash;
1844 	int	foundacl = 0;
1845 
1846 	/*
1847 	 * Unlink & free the sp from the other queues, then destroy it.
1848 	 * Search the 'by acl' chain first, then the 'by inode' chain
1849 	 * after the acl is locked.
1850 	 */
1851 	rw_enter(&si_cache_lock, RW_WRITER);
1852 	hash = SI_HASH(signature);
1853 	for (tspp = &si_cachea[hash]; *tspp; tspp = &(*tspp)->s_next) {
1854 		if (*tspp == sp) {
1855 			/*
1856 			 * Wait to grab the acl lock until after the acl has
1857 			 * been found in the cache.  Otherwise it might try to
1858 			 * grab a lock that has already been destroyed, or
1859 			 * delete an acl that has already been freed.
1860 			 */
1861 			rw_enter(&sp->s_lock, RW_WRITER);
1862 			/* See if someone else attached to it */
1863 			if (sp->s_ref) {
1864 				rw_exit(&sp->s_lock);
1865 				rw_exit(&si_cache_lock);
1866 				return;
1867 			}
1868 			ASSERT(sp->s_fore == NULL);
1869 			ASSERT(sp->s_flags & SI_CACHED);
1870 			foundacl = 1;
1871 			*tspp = sp->s_next;
1872 			break;
1873 		}
1874 	}
1875 
1876 	/*
1877 	 * If the acl was not in the cache, we assume another thread has
1878 	 * deleted it already. This could happen if another thread attaches to
1879 	 * the acl and then releases it after this thread has already found the
1880 	 * reference count to be zero but has not yet taken the cache lock.
1881 	 * Both threads end up seeing a reference count of zero, and call into
1882 	 * si_cache_del.  See bug 4244827 for details on the race condition.
1883 	 */
1884 	if (foundacl == 0) {
1885 		rw_exit(&si_cache_lock);
1886 		return;
1887 	}
1888 
1889 	/* Now check the 'by inode' chain */
1890 	hash = SI_HASH(sp->s_shadow);
1891 	for (tspp = &si_cachei[hash]; *tspp; tspp = &(*tspp)->s_forw) {
1892 		if (*tspp == sp) {
1893 			*tspp = sp->s_forw;
1894 			break;
1895 		}
1896 	}
1897 
1898 	/*
1899 	 * At this point, we can unlock everything because this si
1900 	 * is no longer in the cache, thus cannot be attached to.
1901 	 */
1902 	rw_exit(&sp->s_lock);
1903 	rw_exit(&si_cache_lock);
1904 	sp->s_flags &= ~SI_CACHED;
1905 	(void) ufs_si_free_mem(sp);
1906 }
1907 
1908 /*
1909  * Alloc the hash buckets for the si cache & initialize
1910  * the unreferenced anchor and the cache lock.
1911  */
1912 void
1913 si_cache_init(void)
1914 {
1915 	rw_init(&si_cache_lock, NULL, RW_DEFAULT, NULL);
1916 
1917 	/* The 'by acl' headers */
1918 	si_cachea = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP);
1919 	/* The 'by inode' headers */
1920 	si_cachei = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP);
1921 }
1922 
1923 /*
1924  *  aclcksum takes an acl and generates a checksum.  It takes as input
1925  *  the acl to start at.
1926  *
1927  *  s_aclp - pointer to starting acl
1928  *
1929  *  returns checksum
1930  */
1931 static int
1932 aclcksum(ufs_ic_acl_t *s_aclp)
1933 {
1934 	ufs_ic_acl_t *aclp;
1935 	int signature = 0;
1936 	for (aclp = s_aclp; aclp; aclp = aclp->acl_ic_next) {
1937 		signature += aclp->acl_ic_perm;
1938 		signature += aclp->acl_ic_who;
1939 	}
1940 	return (signature);
1941 }
1942 
1943 /*
1944  * Generate a unique signature for an si structure.  Used by the
1945  * search routine si_cachea_get() to quickly identify candidates
1946  * prior to calling si_cmp().
1947  * Parameters:
1948  * sp - Ptr to the si struct to generate the signature for.
1949  *
1950  * Returns:  A signature for the si struct (really a checksum)
1951  */
1952 static int
1953 si_signature(si_t *sp)
1954 {
1955 	int signature = sp->s_dev;
1956 
1957 	signature += aclcksum(sp->aowner) + aclcksum(sp->agroup) +
1958 	    aclcksum(sp->aother) + aclcksum(sp->ausers) +
1959 	    aclcksum(sp->agroups) + aclcksum(sp->downer) +
1960 	    aclcksum(sp->dgroup) + aclcksum(sp->dother) +
1961 	    aclcksum(sp->dusers) + aclcksum(sp->dgroups);
1962 	if (sp->aclass.acl_ismask)
1963 		signature += sp->aclass.acl_maskbits;
1964 	if (sp->dclass.acl_ismask)
1965 		signature += sp->dclass.acl_maskbits;
1966 
1967 	return (signature);
1968 }
1969 
1970 /*
1971  * aclcmp compares to acls to see if they are identical.
1972  *
1973  * sp1 is source
1974  * sp2 is sourceb
1975  *
1976  * returns 0 if equal and 1 if not equal
1977  */
1978 static int
1979 aclcmp(ufs_ic_acl_t *aclin1p, ufs_ic_acl_t *aclin2p)
1980 {
1981 	ufs_ic_acl_t *aclp1;
1982 	ufs_ic_acl_t *aclp2;
1983 
1984 	/*
1985 	 * if the starting pointers are equal then they are equal so
1986 	 * just return.
1987 	 */
1988 	if (aclin1p == aclin2p)
1989 		return (0);
1990 	/*
1991 	 * check element by element
1992 	 */
1993 	for (aclp1 = aclin1p, aclp2 = aclin2p; aclp1 && aclp2;
1994 	    aclp1 = aclp1->acl_ic_next, aclp2 = aclp2->acl_ic_next) {
1995 		if (aclp1->acl_ic_perm != aclp2->acl_ic_perm ||
1996 		    aclp1->acl_ic_who != aclp2->acl_ic_who)
1997 			return (1);
1998 	}
1999 	/*
2000 	 * both must be zero (at the end of the acl)
2001 	 */
2002 	if (aclp1 || aclp2)
2003 		return (1);
2004 
2005 	return (0);
2006 }
2007 
2008 /*
2009  * Do extensive, field-by-field compare of two si structures.  Returns
2010  * 0 if they are exactly identical, 1 otherwise.
2011  *
2012  * Paramters:
2013  * sp1 - Ptr to 1st si struct
2014  * sp2 - Ptr to 2nd si struct
2015  *
2016  * Returns:
2017  *		0 - Not identical
2018  * 		1 - Identical
2019  */
2020 static int
2021 si_cmp(si_t *sp1, si_t *sp2)
2022 {
2023 	if (sp1->s_dev != sp2->s_dev)
2024 		return (1);
2025 	if (aclcmp(sp1->aowner, sp2->aowner) ||
2026 	    aclcmp(sp1->agroup, sp2->agroup) ||
2027 	    aclcmp(sp1->aother, sp2->aother) ||
2028 	    aclcmp(sp1->ausers, sp2->ausers) ||
2029 	    aclcmp(sp1->agroups, sp2->agroups) ||
2030 	    aclcmp(sp1->downer, sp2->downer) ||
2031 	    aclcmp(sp1->dgroup, sp2->dgroup) ||
2032 	    aclcmp(sp1->dother, sp2->dother) ||
2033 	    aclcmp(sp1->dusers, sp2->dusers) ||
2034 	    aclcmp(sp1->dgroups, sp2->dgroups))
2035 		return (1);
2036 	if (sp1->aclass.acl_ismask != sp2->aclass.acl_ismask)
2037 		return (1);
2038 	if (sp1->dclass.acl_ismask != sp2->dclass.acl_ismask)
2039 		return (1);
2040 	if (sp1->aclass.acl_ismask &&
2041 		sp1->aclass.acl_maskbits != sp2->aclass.acl_maskbits)
2042 		return (1);
2043 	if (sp1->dclass.acl_ismask &&
2044 		sp1->dclass.acl_maskbits != sp2->dclass.acl_maskbits)
2045 		return (1);
2046 
2047 	return (0);
2048 }
2049 
2050 /*
2051  * Remove all acls associated with a device.  All acls must have
2052  * a reference count of zero.
2053  *
2054  * inputs:
2055  *	device - device to remove from the cache
2056  *
2057  * outputs:
2058  *	none
2059  */
2060 void
2061 ufs_si_cache_flush(dev_t dev)
2062 {
2063 	si_t *tsp, **tspp;
2064 	int i;
2065 
2066 	rw_enter(&si_cache_lock, RW_WRITER);
2067 	for (i = 0; i < si_cachecnt; i++) {
2068 		tspp = &si_cachea[i];
2069 		while (*tspp) {
2070 			if ((*tspp)->s_dev == dev) {
2071 				*tspp = (*tspp)->s_next;
2072 			} else {
2073 				tspp = &(*tspp)->s_next;
2074 			}
2075 		}
2076 	}
2077 	for (i = 0; i < si_cachecnt; i++) {
2078 		tspp = &si_cachei[i];
2079 		while (*tspp) {
2080 			if ((*tspp)->s_dev == dev) {
2081 				tsp = *tspp;
2082 				*tspp = (*tspp)->s_forw;
2083 				tsp->s_flags &= ~SI_CACHED;
2084 				ufs_si_free_mem(tsp);
2085 			} else {
2086 				tspp = &(*tspp)->s_forw;
2087 			}
2088 		}
2089 	}
2090 	rw_exit(&si_cache_lock);
2091 }
2092 
2093 /*
2094  * ufs_si_del is used to unhook a sp from a inode in memory
2095  *
2096  * ip is the inode to remove the sp from.
2097  */
2098 void
2099 ufs_si_del(struct inode *ip)
2100 {
2101 	si_t    *sp = ip->i_ufs_acl;
2102 	int	refcnt;
2103 	int	signature;
2104 
2105 	if (sp) {
2106 		rw_enter(&sp->s_lock, RW_WRITER);
2107 		refcnt = --sp->s_ref;
2108 		signature = sp->s_signature;
2109 		ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
2110 		rw_exit(&sp->s_lock);
2111 		if (refcnt == 0)
2112 			si_cache_del(sp, signature);
2113 		ip->i_ufs_acl = NULL;
2114 	}
2115 }
2116