1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 #pragma ident "%Z%%M% %I% %E% SMI" 26 27 28 #include <sys/types.h> 29 #include <sys/stat.h> 30 #include <sys/errno.h> 31 #include <sys/kmem.h> 32 #include <sys/t_lock.h> 33 #include <sys/ksynch.h> 34 #include <sys/buf.h> 35 #include <sys/vfs.h> 36 #include <sys/vnode.h> 37 #include <sys/mode.h> 38 #include <sys/systm.h> 39 #include <vm/seg.h> 40 #include <sys/file.h> 41 #include <sys/acl.h> 42 #include <sys/fs/ufs_inode.h> 43 #include <sys/fs/ufs_acl.h> 44 #include <sys/fs/ufs_quota.h> 45 #include <sys/sysmacros.h> 46 #include <sys/debug.h> 47 #include <sys/policy.h> 48 49 /* Cache routines */ 50 static int si_signature(si_t *); 51 static int si_cachei_get(struct inode *, si_t **); 52 static int si_cachea_get(struct inode *, si_t *, si_t **); 53 static int si_cmp(si_t *, si_t *); 54 static void si_cache_put(si_t *); 55 void si_cache_del(si_t *, int); 56 void si_cache_init(void); 57 58 static void ufs_si_free_mem(si_t *); 59 static int ufs_si_store(struct inode *, si_t *, int, cred_t *); 60 static si_t *ufs_acl_cp(si_t *); 61 static int ufs_sectobuf(si_t *, caddr_t *, size_t *); 62 static int acl_count(ufs_ic_acl_t *); 63 static int acl_validate(aclent_t *, int, int); 64 static int vsecattr2aclentry(vsecattr_t *, si_t **); 65 static int aclentry2vsecattr(si_t *, vsecattr_t *); 66 67 krwlock_t si_cache_lock; /* Protects si_cache */ 68 int si_cachecnt = 64; /* # buckets in si_cache[a|i] */ 69 si_t **si_cachea; /* The 'by acl' cache chains */ 70 si_t **si_cachei; /* The 'by inode' cache chains */ 71 long si_cachehit = 0; 72 long si_cachemiss = 0; 73 74 #define SI_HASH(S) ((int)(S) & (si_cachecnt - 1)) 75 76 /* 77 * Store the new acls in aclp. Attempts to make things atomic. 78 * Search the acl cache for an identical sp and, if found, attach 79 * the cache'd acl to ip. If the acl is new (not in the cache), 80 * add it to the cache, then attach it to ip. Last, remove and 81 * decrement the reference count of any prior acl list attached 82 * to the ip. 83 * 84 * Parameters: 85 * ip - Ptr to inode to receive the acl list 86 * sp - Ptr to in-core acl structure to attach to the inode. 87 * puship - 0 do not push the object inode(ip) 1 push the ip 88 * cr - Ptr to credentials 89 * 90 * Returns: 0 - Success 91 * N - From errno.h 92 */ 93 static int 94 ufs_si_store(struct inode *ip, si_t *sp, int puship, cred_t *cr) 95 { 96 struct vfs *vfsp; 97 struct inode *sip; 98 si_t *oldsp; 99 si_t *csp; 100 caddr_t acldata; 101 ino_t oldshadow; 102 size_t acldatalen; 103 off_t offset; 104 int shadow; 105 int err; 106 int refcnt; 107 int usecnt; 108 int signature; 109 int resid; 110 struct ufsvfs *ufsvfsp = ip->i_ufsvfs; 111 struct fs *fs = ufsvfsp->vfs_fs; 112 113 ASSERT(RW_WRITE_HELD(&ip->i_contents)); 114 ASSERT(ip->i_ufs_acl != sp); 115 116 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT)) 117 return (ENOSYS); 118 119 /* 120 * if there are only the three owner/group/other then do not 121 * create a shadow inode. If there is already a shadow with 122 * the file, remove it. 123 * 124 */ 125 if (!sp->ausers && 126 !sp->agroups && 127 !sp->downer && 128 !sp->dgroup && 129 !sp->dother && 130 sp->dclass.acl_ismask == 0 && 131 !sp->dusers && 132 !sp->dgroups) { 133 if (ip->i_ufs_acl) 134 err = ufs_si_free(ip->i_ufs_acl, ITOV(ip)->v_vfsp, cr); 135 ip->i_ufs_acl = NULL; 136 ip->i_shadow = 0; 137 ip->i_flag |= IMOD | IACC; 138 ip->i_mode = (ip->i_smode & ~0777) | 139 ((sp->aowner->acl_ic_perm & 07) << 6) | 140 (MASK2MODE(sp)) | 141 (sp->aother->acl_ic_perm & 07); 142 TRANS_INODE(ip->i_ufsvfs, ip); 143 ufs_iupdat(ip, 1); 144 ufs_si_free_mem(sp); 145 return (0); 146 } 147 148 loop: 149 150 /* 151 * Check cache. If in cache, use existing shadow inode. 152 * Increment the shadow link count, then attach to the 153 * cached ufs_acl_entry struct, and increment it's reference 154 * count. Then discard the passed-in ufs_acl_entry and 155 * return. 156 */ 157 if (si_cachea_get(ip, sp, &csp) == 0) { 158 ASSERT(RW_WRITE_HELD(&csp->s_lock)); 159 if (ip->i_ufs_acl == csp) { 160 rw_exit(&csp->s_lock); 161 (void) ufs_si_free_mem(sp); 162 return (0); 163 } 164 vfsp = ITOV(ip)->v_vfsp; 165 ASSERT(csp->s_shadow <= INT_MAX); 166 shadow = (int)csp->s_shadow; 167 /* 168 * We can't call ufs_iget while holding the csp locked, 169 * because we might deadlock. So we drop the 170 * lock on csp, then go search the si_cache again 171 * to see if the csp is still there. 172 */ 173 rw_exit(&csp->s_lock); 174 if ((err = ufs_iget(vfsp, shadow, &sip, cr)) != 0) { 175 (void) ufs_si_free_mem(sp); 176 return (EIO); 177 } 178 rw_enter(&sip->i_contents, RW_WRITER); 179 if ((sip->i_mode & IFMT) != IFSHAD || sip->i_nlink <= 0) { 180 rw_exit(&sip->i_contents); 181 VN_RELE(ITOV(sip)); 182 goto loop; 183 } 184 /* Get the csp again */ 185 if (si_cachea_get(ip, sp, &csp) != 0) { 186 rw_exit(&sip->i_contents); 187 VN_RELE(ITOV(sip)); 188 goto loop; 189 } 190 ASSERT(RW_WRITE_HELD(&csp->s_lock)); 191 /* See if we got the right shadow */ 192 if (csp->s_shadow != shadow) { 193 rw_exit(&csp->s_lock); 194 rw_exit(&sip->i_contents); 195 VN_RELE(ITOV(sip)); 196 goto loop; 197 } 198 ASSERT(RW_WRITE_HELD(&sip->i_contents)); 199 ASSERT(sip->i_dquot == 0); 200 /* Increment link count */ 201 ASSERT(sip->i_nlink > 0); 202 sip->i_nlink++; 203 TRANS_INODE(ufsvfsp, sip); 204 csp->s_use = sip->i_nlink; 205 csp->s_ref++; 206 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use); 207 sip->i_flag |= ICHG | IMOD; 208 sip->i_seq++; 209 ITIMES_NOLOCK(sip); 210 /* 211 * Always release s_lock before both releasing i_contents 212 * and calling VN_RELE. 213 */ 214 rw_exit(&csp->s_lock); 215 rw_exit(&sip->i_contents); 216 VN_RELE(ITOV(sip)); 217 (void) ufs_si_free_mem(sp); 218 sp = csp; 219 si_cachehit++; 220 goto switchshadows; 221 } 222 223 /* Alloc a shadow inode and fill it in */ 224 err = ufs_ialloc(ip, ip->i_number, (mode_t)IFSHAD, &sip, cr); 225 if (err) { 226 (void) ufs_si_free_mem(sp); 227 return (err); 228 } 229 rw_enter(&sip->i_contents, RW_WRITER); 230 sip->i_flag |= IACC | IUPD | ICHG; 231 sip->i_seq++; 232 sip->i_mode = (o_mode_t)IFSHAD; 233 ITOV(sip)->v_type = VREG; 234 sip->i_nlink = 1; 235 sip->i_uid = crgetuid(cr); 236 sip->i_suid = (ulong_t)sip->i_uid > (ulong_t)USHRT_MAX ? 237 UID_LONG : sip->i_uid; 238 sip->i_gid = crgetgid(cr); 239 sip->i_sgid = (ulong_t)sip->i_gid > (ulong_t)USHRT_MAX ? 240 GID_LONG : sip->i_gid; 241 sip->i_shadow = 0; 242 TRANS_INODE(ufsvfsp, sip); 243 sip->i_ufs_acl = NULL; 244 ASSERT(sip->i_size == 0); 245 246 sp->s_shadow = sip->i_number; 247 248 if ((err = ufs_sectobuf(sp, &acldata, &acldatalen)) != 0) 249 goto errout; 250 offset = 0; 251 252 /* 253 * We don't actually care about the residual count upon failure, 254 * but giving ufs_rdwri() the pointer means it won't translate 255 * all failures to EIO. Our caller needs to know when ENOSPC 256 * gets hit. 257 */ 258 resid = 0; 259 if (((err = ufs_rdwri(UIO_WRITE, FWRITE|FSYNC, sip, acldata, 260 acldatalen, (offset_t)0, UIO_SYSSPACE, &resid, cr)) != 0) || 261 (resid != 0)) { 262 kmem_free(acldata, acldatalen); 263 if ((resid != 0) && (err == 0)) 264 err = ENOSPC; 265 goto errout; 266 } 267 268 offset += acldatalen; 269 if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl) 270 ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize; 271 272 kmem_free(acldata, acldatalen); 273 /* Sync & free the shadow inode */ 274 ufs_iupdat(sip, 1); 275 rw_exit(&sip->i_contents); 276 VN_RELE(ITOV(sip)); 277 278 /* We're committed to using this sp */ 279 sp->s_use = 1; 280 sp->s_ref = 1; 281 282 /* Now put the new acl stuff in the cache */ 283 /* XXX Might make a duplicate */ 284 si_cache_put(sp); 285 si_cachemiss++; 286 287 switchshadows: 288 /* Now switch the parent inode to use the new shadow inode */ 289 ASSERT(RW_WRITE_HELD(&ip->i_contents)); 290 rw_enter(&sp->s_lock, RW_READER); 291 oldsp = ip->i_ufs_acl; 292 oldshadow = ip->i_shadow; 293 ip->i_ufs_acl = sp; 294 ASSERT(sp->s_shadow <= INT_MAX); 295 ip->i_shadow = (int32_t)sp->s_shadow; 296 ASSERT(oldsp != sp); 297 ASSERT(oldshadow != ip->i_number); 298 ASSERT(ip->i_number != ip->i_shadow); 299 /* 300 * Change the mode bits to follow the acl list 301 * 302 * NOTE: a directory is not required to have a "regular" acl 303 * bug id's 1238908, 1257173, 1263171 and 1263188 304 * 305 * but if a "regular" acl is present, it must contain 306 * an "owner", "group", and "other" acl 307 * 308 * If an ACL mask exists, the effective group rights are 309 * set to the mask. Otherwise, the effective group rights 310 * are set to the object group bits. 311 */ 312 if (sp->aowner) { /* Owner */ 313 ip->i_mode &= ~0700; /* clear Owner */ 314 ip->i_mode |= (sp->aowner->acl_ic_perm & 07) << 6; 315 ip->i_uid = sp->aowner->acl_ic_who; 316 } 317 318 if (sp->agroup) { /* Group */ 319 ip->i_mode &= ~0070; /* clear Group */ 320 ip->i_mode |= MASK2MODE(sp); /* apply mask */ 321 ip->i_gid = sp->agroup->acl_ic_who; 322 } 323 324 if (sp->aother) { /* Other */ 325 ip->i_mode &= ~0007; /* clear Other */ 326 ip->i_mode |= (sp->aother->acl_ic_perm & 07); 327 } 328 329 if (sp->aclass.acl_ismask) 330 ip->i_mode = (ip->i_mode & ~070) | 331 (((sp->aclass.acl_maskbits & 07) << 3) & 332 ip->i_mode); 333 334 TRANS_INODE(ufsvfsp, ip); 335 rw_exit(&sp->s_lock); 336 ip->i_flag |= ICHG; 337 ip->i_seq++; 338 /* 339 * when creating a file there is no need to push the inode, it 340 * is pushed later 341 */ 342 if (puship == 1) 343 ufs_iupdat(ip, 1); 344 345 /* 346 * Decrement link count on the old shadow inode, 347 * and decrement reference count on the old aclp, 348 */ 349 if (oldshadow) { 350 /* Get the shadow inode */ 351 ASSERT(RW_WRITE_HELD(&ip->i_contents)); 352 vfsp = ITOV(ip)->v_vfsp; 353 if ((err = ufs_iget_alloced(vfsp, oldshadow, &sip, cr)) != 0) { 354 return (EIO); 355 } 356 /* Decrement link count */ 357 rw_enter(&sip->i_contents, RW_WRITER); 358 if (oldsp) 359 rw_enter(&oldsp->s_lock, RW_WRITER); 360 ASSERT(sip->i_dquot == 0); 361 ASSERT(sip->i_nlink > 0); 362 usecnt = --sip->i_nlink; 363 ufs_setreclaim(sip); 364 TRANS_INODE(ufsvfsp, sip); 365 sip->i_flag |= ICHG | IMOD; 366 sip->i_seq++; 367 ITIMES_NOLOCK(sip); 368 if (oldsp) { 369 oldsp->s_use = usecnt; 370 refcnt = --oldsp->s_ref; 371 signature = oldsp->s_signature; 372 /* 373 * Always release s_lock before both releasing 374 * i_contents and calling VN_RELE. 375 */ 376 rw_exit(&oldsp->s_lock); 377 } 378 rw_exit(&sip->i_contents); 379 VN_RELE(ITOV(sip)); 380 if (oldsp && (refcnt == 0)) 381 si_cache_del(oldsp, signature); 382 } 383 return (0); 384 385 errout: 386 /* Throw the newly alloc'd inode away */ 387 sip->i_nlink = 0; 388 ufs_setreclaim(sip); 389 TRANS_INODE(ufsvfsp, sip); 390 ITIMES_NOLOCK(sip); 391 rw_exit(&sip->i_contents); 392 VN_RELE(ITOV(sip)); 393 ASSERT(!sp->s_use && !sp->s_ref && !(sp->s_flags & SI_CACHED)); 394 (void) ufs_si_free_mem(sp); 395 return (err); 396 } 397 398 /* 399 * Load the acls for inode ip either from disk (adding to the cache), 400 * or search the cache and attach the cache'd acl list to the ip. 401 * In either case, maintain the proper reference count on the cached entry. 402 * 403 * Parameters: 404 * ip - Ptr to the inode which needs the acl list loaded 405 * cr - Ptr to credentials 406 * 407 * Returns: 0 - Success 408 * N - From errno.h 409 */ 410 int 411 ufs_si_load(struct inode *ip, cred_t *cr) 412 /* 413 * ip parent inode in 414 * cr credentials in 415 */ 416 { 417 struct vfs *vfsp; 418 struct inode *sip; 419 ufs_fsd_t *fsdp; 420 si_t *sp; 421 vsecattr_t vsecattr = { 422 (uint_t)0, 423 (int)0, 424 (void *)NULL, 425 (int)0, 426 (void *)NULL}; 427 aclent_t *aclp; 428 ufs_acl_t *ufsaclp; 429 caddr_t acldata = NULL; 430 ino_t maxino; 431 int err; 432 size_t acldatalen; 433 int numacls; 434 int shadow; 435 int usecnt; 436 struct ufsvfs *ufsvfsp = ip->i_ufsvfs; 437 struct fs *fs = ufsvfsp->vfs_fs; 438 439 ASSERT(ip != NULL); 440 ASSERT(RW_WRITE_HELD(&ip->i_contents)); 441 ASSERT(ip->i_shadow && ip->i_ufs_acl == NULL); 442 ASSERT((ip->i_mode & IFMT) != IFSHAD); 443 444 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT)) 445 return (ENOSYS); 446 447 if (ip->i_shadow == ip->i_number) 448 return (EIO); 449 450 maxino = (ino_t)(ITOF(ip)->fs_ncg * ITOF(ip)->fs_ipg); 451 if (ip->i_shadow < UFSROOTINO || ip->i_shadow > maxino) 452 return (EIO); 453 454 /* 455 * XXX Check cache. If in cache, link to it and increment 456 * the reference count, then return. 457 */ 458 if (si_cachei_get(ip, &sp) == 0) { 459 ASSERT(RW_WRITE_HELD(&sp->s_lock)); 460 ip->i_ufs_acl = sp; 461 sp->s_ref++; 462 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use); 463 rw_exit(&sp->s_lock); 464 si_cachehit++; 465 return (0); 466 } 467 468 /* Get the shadow inode */ 469 vfsp = ITOV(ip)->v_vfsp; 470 shadow = ip->i_shadow; 471 if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) != 0) { 472 return (err); 473 } 474 rw_enter(&sip->i_contents, RW_WRITER); 475 476 if ((sip->i_mode & IFMT) != IFSHAD) { 477 rw_exit(&sip->i_contents); 478 err = EINVAL; 479 goto alldone; 480 } 481 482 ASSERT(sip->i_dquot == 0); 483 usecnt = sip->i_nlink; 484 if ((!ULOCKFS_IS_NOIACC(&ufsvfsp->vfs_ulockfs)) && 485 (!(sip)->i_ufsvfs->vfs_noatime)) { 486 sip->i_flag |= IACC; 487 } 488 rw_downgrade(&sip->i_contents); 489 490 ASSERT(sip->i_size <= MAXOFF_T); 491 /* Read the acl's and other stuff from disk */ 492 acldata = kmem_zalloc((size_t)sip->i_size, KM_SLEEP); 493 acldatalen = sip->i_size; 494 495 err = ufs_rdwri(UIO_READ, FREAD, sip, acldata, acldatalen, (offset_t)0, 496 UIO_SYSSPACE, (int *)0, cr); 497 498 rw_exit(&sip->i_contents); 499 500 if (err) 501 goto alldone; 502 503 /* 504 * Convert from disk format 505 * Result is a vsecattr struct which we then convert to the 506 * si struct. 507 */ 508 bzero((caddr_t)&vsecattr, sizeof (vsecattr_t)); 509 for (fsdp = (ufs_fsd_t *)acldata; 510 fsdp < (ufs_fsd_t *)(acldata + acldatalen); 511 fsdp = (ufs_fsd_t *)((caddr_t)fsdp + 512 FSD_RECSZ(fsdp, fsdp->fsd_size))) { 513 if (fsdp->fsd_size <= 0) 514 break; 515 switch (fsdp->fsd_type) { 516 case FSD_ACL: 517 numacls = vsecattr.vsa_aclcnt = 518 (int)((fsdp->fsd_size - 2 * sizeof (int)) / 519 sizeof (ufs_acl_t)); 520 aclp = vsecattr.vsa_aclentp = 521 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP); 522 for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data; 523 numacls; ufsaclp++) { 524 aclp->a_type = ufsaclp->acl_tag; 525 aclp->a_id = ufsaclp->acl_who; 526 aclp->a_perm = ufsaclp->acl_perm; 527 aclp++; 528 numacls--; 529 } 530 break; 531 case FSD_DFACL: 532 numacls = vsecattr.vsa_dfaclcnt = 533 (int)((fsdp->fsd_size - 2 * sizeof (int)) / 534 sizeof (ufs_acl_t)); 535 aclp = vsecattr.vsa_dfaclentp = 536 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP); 537 for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data; 538 numacls; ufsaclp++) { 539 aclp->a_type = ufsaclp->acl_tag; 540 aclp->a_id = ufsaclp->acl_who; 541 aclp->a_perm = ufsaclp->acl_perm; 542 aclp++; 543 numacls--; 544 } 545 break; 546 } 547 } 548 /* Sort the lists */ 549 if (vsecattr.vsa_aclentp) { 550 ksort((caddr_t)vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt, 551 sizeof (aclent_t), cmp2acls); 552 if ((err = acl_validate(vsecattr.vsa_aclentp, 553 vsecattr.vsa_aclcnt, ACL_CHECK)) != 0) { 554 goto alldone; 555 } 556 } 557 if (vsecattr.vsa_dfaclentp) { 558 ksort((caddr_t)vsecattr.vsa_dfaclentp, vsecattr.vsa_dfaclcnt, 559 sizeof (aclent_t), cmp2acls); 560 if ((err = acl_validate(vsecattr.vsa_dfaclentp, 561 vsecattr.vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) { 562 goto alldone; 563 } 564 } 565 566 /* ignore shadow inodes without ACLs */ 567 if (!vsecattr.vsa_aclentp && !vsecattr.vsa_dfaclentp) { 568 err = 0; 569 goto alldone; 570 } 571 572 /* Convert from vsecattr struct to ufs_acl_entry struct */ 573 if ((err = vsecattr2aclentry(&vsecattr, &sp)) != 0) { 574 goto alldone; 575 } 576 577 /* There aren't filled in by vsecattr2aclentry */ 578 sp->s_shadow = ip->i_shadow; 579 sp->s_dev = ip->i_dev; 580 sp->s_use = usecnt; 581 sp->s_ref = 1; 582 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use); 583 584 /* XXX Might make a duplicate */ 585 si_cache_put(sp); 586 587 /* Signal anyone waiting on this shadow to be loaded */ 588 ip->i_ufs_acl = sp; 589 err = 0; 590 si_cachemiss++; 591 if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl) 592 ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize; 593 alldone: 594 /* 595 * Common exit point. Mark shadow inode as ISTALE 596 * if we detect an internal inconsistency, to 597 * prevent stray inodes appearing in the cache. 598 */ 599 if (err) { 600 rw_enter(&sip->i_contents, RW_READER); 601 mutex_enter(&sip->i_tlock); 602 sip->i_flag |= ISTALE; 603 mutex_exit(&sip->i_tlock); 604 rw_exit(&sip->i_contents); 605 } 606 VN_RELE(ITOV(sip)); 607 608 /* 609 * Cleanup of data structures allocated 610 * on the fly. 611 */ 612 if (acldata) 613 kmem_free(acldata, acldatalen); 614 615 if (vsecattr.vsa_aclentp) 616 kmem_free(vsecattr.vsa_aclentp, 617 vsecattr.vsa_aclcnt * sizeof (aclent_t)); 618 if (vsecattr.vsa_dfaclentp) 619 kmem_free(vsecattr.vsa_dfaclentp, 620 vsecattr.vsa_dfaclcnt * sizeof (aclent_t)); 621 return (err); 622 } 623 624 /* 625 * Check the inode's ACL's to see if this mode of access is 626 * allowed; return 0 if allowed, EACCES if not. 627 * 628 * We follow the procedure defined in Sec. 3.3.5, ACL Access 629 * Check Algorithm, of the POSIX 1003.6 Draft Standard. 630 */ 631 int 632 ufs_acl_access(struct inode *ip, int mode, cred_t *cr) 633 /* 634 * ip parent inode 635 * mode mode of access read, write, execute/examine 636 * cr credentials 637 */ 638 { 639 ufs_ic_acl_t *acl; 640 int ismask, mask = 0; 641 int gperm = 0; 642 int ngroup = 0; 643 si_t *sp = NULL; 644 uid_t uid = crgetuid(cr); 645 uid_t owner; 646 647 ASSERT(ip->i_ufs_acl != NULL); 648 649 sp = ip->i_ufs_acl; 650 651 ismask = sp->aclass.acl_ismask ? 652 sp->aclass.acl_ismask : NULL; 653 654 if (ismask) 655 mask = sp->aclass.acl_maskbits; 656 else 657 mask = -1; 658 659 /* 660 * (1) If user owns the file, obey user mode bits 661 */ 662 owner = sp->aowner->acl_ic_who; 663 if (uid == owner) { 664 return (MODE_CHECK(owner, mode, (sp->aowner->acl_ic_perm << 6), 665 cr, ip)); 666 } 667 668 /* 669 * (2) Obey any matching ACL_USER entry 670 */ 671 if (sp->ausers) 672 for (acl = sp->ausers; acl != NULL; acl = acl->acl_ic_next) { 673 if (acl->acl_ic_who == uid) { 674 return (MODE_CHECK(owner, mode, 675 (mask & acl->acl_ic_perm) << 6, cr, ip)); 676 } 677 } 678 679 /* 680 * (3) If user belongs to file's group, obey group mode bits 681 * if no ACL mask is defined; if there is an ACL mask, we look 682 * at both the group mode bits and any ACL_GROUP entries. 683 */ 684 if (groupmember((uid_t)sp->agroup->acl_ic_who, cr)) { 685 ngroup++; 686 gperm = (sp->agroup->acl_ic_perm); 687 if (!ismask) 688 return (MODE_CHECK(owner, mode, (gperm << 6), cr, ip)); 689 } 690 691 /* 692 * (4) Accumulate the permissions in matching ACL_GROUP entries 693 */ 694 if (sp->agroups) 695 for (acl = sp->agroups; acl != NULL; acl = acl->acl_ic_next) 696 { 697 if (groupmember(acl->acl_ic_who, cr)) { 698 ngroup++; 699 gperm |= acl->acl_ic_perm; 700 } 701 } 702 703 if (ngroup != 0) 704 return (MODE_CHECK(owner, mode, ((gperm & mask) << 6), cr, ip)); 705 706 /* 707 * (5) Finally, use the "other" mode bits 708 */ 709 return (MODE_CHECK(owner, mode, sp->aother->acl_ic_perm << 6, cr, ip)); 710 } 711 712 /*ARGSUSED2*/ 713 int 714 ufs_acl_get(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr) 715 { 716 aclent_t *aclentp; 717 718 ASSERT(RW_LOCK_HELD(&ip->i_contents)); 719 720 /* XXX Range check, sanity check, shadow check */ 721 /* If an ACL is present, get the data from the shadow inode info */ 722 if (ip->i_ufs_acl) 723 return (aclentry2vsecattr(ip->i_ufs_acl, vsap)); 724 725 /* 726 * If no ACLs are present, fabricate one from the mode bits. 727 * This code is almost identical to fs_fab_acl(), but we 728 * already have the mode bits handy, so we'll avoid going 729 * through VOP_GETATTR() again. 730 */ 731 732 vsap->vsa_aclcnt = 0; 733 vsap->vsa_aclentp = NULL; 734 vsap->vsa_dfaclcnt = 0; /* Default ACLs are not fabricated */ 735 vsap->vsa_dfaclentp = NULL; 736 737 if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL)) 738 vsap->vsa_aclcnt = 4; /* USER, GROUP, OTHER, and CLASS */ 739 740 if (vsap->vsa_mask & VSA_ACL) { 741 vsap->vsa_aclentp = kmem_zalloc(4 * sizeof (aclent_t), 742 KM_SLEEP); 743 if (vsap->vsa_aclentp == NULL) 744 return (ENOMEM); 745 aclentp = vsap->vsa_aclentp; 746 747 /* Owner */ 748 aclentp->a_type = USER_OBJ; 749 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0700)) >> 6; 750 aclentp->a_id = ip->i_uid; /* Really undefined */ 751 aclentp++; 752 753 /* Group */ 754 aclentp->a_type = GROUP_OBJ; 755 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3; 756 aclentp->a_id = ip->i_gid; /* Really undefined */ 757 aclentp++; 758 759 /* Other */ 760 aclentp->a_type = OTHER_OBJ; 761 aclentp->a_perm = ip->i_mode & 0007; 762 aclentp->a_id = 0; /* Really undefined */ 763 aclentp++; 764 765 /* Class */ 766 aclentp->a_type = CLASS_OBJ; 767 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3; 768 aclentp->a_id = 0; /* Really undefined */ 769 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt, 770 sizeof (aclent_t), cmp2acls); 771 } 772 773 return (0); 774 } 775 776 /*ARGSUSED2*/ 777 int 778 ufs_acl_set(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr) 779 { 780 si_t *sp; 781 int err; 782 783 ASSERT(RW_WRITE_HELD(&ip->i_contents)); 784 785 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT)) 786 return (ENOSYS); 787 788 /* 789 * only the owner of the file or privileged users can change the ACLs 790 */ 791 if (secpolicy_vnode_setdac(cr, ip->i_uid) != 0) 792 return (EPERM); 793 794 /* Convert from vsecattr struct to ufs_acl_entry struct */ 795 if ((err = vsecattr2aclentry(vsap, &sp)) != 0) 796 return (err); 797 sp->s_dev = ip->i_dev; 798 799 /* 800 * Make the user & group objs in the acl list follow what's 801 * in the inode. 802 */ 803 #ifdef DEBUG 804 if (vsap->vsa_mask == VSA_ACL) { 805 ASSERT(sp->aowner); 806 ASSERT(sp->agroup); 807 ASSERT(sp->aother); 808 } 809 #endif /* DEBUG */ 810 811 if (sp->aowner) 812 sp->aowner->acl_ic_who = ip->i_uid; 813 if (sp->agroup) 814 sp->agroup->acl_ic_who = ip->i_gid; 815 816 /* 817 * Write and cache the new acl list 818 */ 819 err = ufs_si_store(ip, sp, 1, cr); 820 821 return (err); 822 } 823 824 /* 825 * XXX Scan sorted array of acl's, checking for: 826 * 1) Any duplicate/conflicting entries (same type and id) 827 * 2) More than 1 of USER_OBJ, GROUP_OBJ, OTHER_OBJ, CLASS_OBJ 828 * 3) More than 1 of DEF_USER_OBJ, DEF_GROUP_OBJ, DEF_OTHER_OBJ, DEF_CLASS_OBJ 829 * 830 * Parameters: 831 * aclentp - ptr to sorted list of acl entries. 832 * nentries - # acl entries on the list 833 * flag - Bitmap (ACL_CHECK and/or DEF_ACL_CHECK) indicating whether the 834 * list contains regular acls, default acls, or both. 835 * 836 * Returns: 0 - Success 837 * EINVAL - Invalid list (dups or multiple entries of type USER_OBJ, etc) 838 */ 839 static int 840 acl_validate(aclent_t *aclentp, int nentries, int flag) 841 { 842 int i; 843 int nuser_objs = 0; 844 int ngroup_objs = 0; 845 int nother_objs = 0; 846 int nclass_objs = 0; 847 int ndef_user_objs = 0; 848 int ndef_group_objs = 0; 849 int ndef_other_objs = 0; 850 int ndef_class_objs = 0; 851 int nusers = 0; 852 int ngroups = 0; 853 int ndef_users = 0; 854 int ndef_groups = 0; 855 int numdefs = 0; 856 857 /* Null list or list of one */ 858 if (aclentp == NULL) 859 return (0); 860 861 if (nentries <= 0) 862 return (EINVAL); 863 864 for (i = 1; i < nentries; i++) { 865 if (((aclentp[i - 1].a_type == aclentp[i].a_type) && 866 (aclentp[i - 1].a_id == aclentp[i].a_id)) || 867 (aclentp[i - 1].a_perm > 07)) { 868 return (EINVAL); 869 } 870 } 871 872 if (flag == 0 || (flag != ACL_CHECK && flag != DEF_ACL_CHECK)) 873 return (EINVAL); 874 875 /* Count types */ 876 for (i = 0; i < nentries; i++) { 877 switch (aclentp[i].a_type) { 878 case USER_OBJ: /* Owner */ 879 nuser_objs++; 880 break; 881 case GROUP_OBJ: /* Group */ 882 ngroup_objs++; 883 break; 884 case OTHER_OBJ: /* Other */ 885 nother_objs++; 886 break; 887 case CLASS_OBJ: /* Mask */ 888 nclass_objs++; 889 break; 890 case DEF_USER_OBJ: /* Default Owner */ 891 ndef_user_objs++; 892 break; 893 case DEF_GROUP_OBJ: /* Default Group */ 894 ndef_group_objs++; 895 break; 896 case DEF_OTHER_OBJ: /* Default Other */ 897 ndef_other_objs++; 898 break; 899 case DEF_CLASS_OBJ: /* Default Mask */ 900 ndef_class_objs++; 901 break; 902 case USER: /* Users */ 903 nusers++; 904 break; 905 case GROUP: /* Groups */ 906 ngroups++; 907 break; 908 case DEF_USER: /* Default Users */ 909 ndef_users++; 910 break; 911 case DEF_GROUP: /* Default Groups */ 912 ndef_groups++; 913 break; 914 default: /* Unknown type */ 915 return (EINVAL); 916 } 917 } 918 919 /* 920 * For normal acl's, we require there be one (and only one) 921 * USER_OBJ, GROUP_OBJ and OTHER_OBJ. There is either zero 922 * or one CLASS_OBJ. 923 */ 924 if (flag & ACL_CHECK) { 925 if (nuser_objs != 1 || ngroup_objs != 1 || 926 nother_objs != 1 || nclass_objs > 1) { 927 return (EINVAL); 928 } 929 /* 930 * If there are ANY group acls, there MUST be a 931 * class_obj(mask) acl (1003.6/D12 p. 29 lines 75-80). 932 */ 933 if (ngroups && !nclass_objs) { 934 return (EINVAL); 935 } 936 if (nuser_objs + ngroup_objs + nother_objs + nclass_objs + 937 ngroups + nusers > MAX_ACL_ENTRIES) 938 return (EINVAL); 939 } 940 941 /* 942 * For default acl's, we require that there be either one (and only one) 943 * DEF_USER_OBJ, DEF_GROUP_OBJ and DEF_OTHER_OBJ 944 * or there be none of them. 945 */ 946 if (flag & DEF_ACL_CHECK) { 947 if (ndef_other_objs > 1 || ndef_user_objs > 1 || 948 ndef_group_objs > 1 || ndef_class_objs > 1) { 949 return (EINVAL); 950 } 951 952 numdefs = ndef_other_objs + ndef_user_objs + ndef_group_objs; 953 954 if (numdefs != 0 && numdefs != 3) { 955 return (EINVAL); 956 } 957 /* 958 * If there are ANY def_group acls, there MUST be a 959 * def_class_obj(mask) acl (1003.6/D12 P. 29 lines 75-80). 960 * XXX(jimh) This is inferred. 961 */ 962 if (ndef_groups && !ndef_class_objs) { 963 return (EINVAL); 964 } 965 if ((ndef_users || ndef_groups) && 966 ((numdefs != 3) && !ndef_class_objs)) { 967 return (EINVAL); 968 } 969 if (ndef_user_objs + ndef_group_objs + ndef_other_objs + 970 ndef_class_objs + ndef_users + ndef_groups > 971 MAX_ACL_ENTRIES) 972 return (EINVAL); 973 } 974 return (0); 975 } 976 977 static int 978 formacl(ufs_ic_acl_t **aclpp, aclent_t *aclentp) 979 { 980 ufs_ic_acl_t *uaclp; 981 982 uaclp = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP); 983 uaclp->acl_ic_perm = aclentp->a_perm; 984 uaclp->acl_ic_who = aclentp->a_id; 985 uaclp->acl_ic_next = *aclpp; 986 *aclpp = uaclp; 987 return (0); 988 } 989 990 /* 991 * XXX - Make more efficient 992 * Convert from the vsecattr struct, used by the VOP interface, to 993 * the ufs_acl_entry struct used for in-core storage of acl's. 994 * 995 * Parameters: 996 * vsap - Ptr to array of security attributes. 997 * spp - Ptr to ptr to si struct for the results 998 * 999 * Returns: 0 - Success 1000 * N - From errno.h 1001 */ 1002 static int 1003 vsecattr2aclentry(vsecattr_t *vsap, si_t **spp) 1004 { 1005 aclent_t *aclentp, *aclp; 1006 si_t *sp; 1007 int err; 1008 int i; 1009 1010 /* Sort & validate the lists on the vsap */ 1011 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt, 1012 sizeof (aclent_t), cmp2acls); 1013 ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt, 1014 sizeof (aclent_t), cmp2acls); 1015 if ((err = acl_validate(vsap->vsa_aclentp, 1016 vsap->vsa_aclcnt, ACL_CHECK)) != 0) 1017 return (err); 1018 if ((err = acl_validate(vsap->vsa_dfaclentp, 1019 vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) 1020 return (err); 1021 1022 /* Create new si struct and hang acl's off it */ 1023 sp = kmem_zalloc(sizeof (si_t), KM_SLEEP); 1024 rw_init(&sp->s_lock, NULL, RW_DEFAULT, NULL); 1025 1026 /* Process acl list */ 1027 aclp = (aclent_t *)vsap->vsa_aclentp; 1028 aclentp = aclp + vsap->vsa_aclcnt - 1; 1029 for (i = 0; i < vsap->vsa_aclcnt; i++) { 1030 switch (aclentp->a_type) { 1031 case USER_OBJ: /* Owner */ 1032 if (err = formacl(&sp->aowner, aclentp)) 1033 goto error; 1034 break; 1035 case GROUP_OBJ: /* Group */ 1036 if (err = formacl(&sp->agroup, aclentp)) 1037 goto error; 1038 break; 1039 case OTHER_OBJ: /* Other */ 1040 if (err = formacl(&sp->aother, aclentp)) 1041 goto error; 1042 break; 1043 case USER: 1044 if (err = formacl(&sp->ausers, aclentp)) 1045 goto error; 1046 break; 1047 case CLASS_OBJ: /* Mask */ 1048 sp->aclass.acl_ismask = 1; 1049 sp->aclass.acl_maskbits = aclentp->a_perm; 1050 break; 1051 case GROUP: 1052 if (err = formacl(&sp->agroups, aclentp)) 1053 goto error; 1054 break; 1055 default: 1056 break; 1057 } 1058 aclentp--; 1059 } 1060 1061 /* Process default acl list */ 1062 aclp = (aclent_t *)vsap->vsa_dfaclentp; 1063 aclentp = aclp + vsap->vsa_dfaclcnt - 1; 1064 for (i = 0; i < vsap->vsa_dfaclcnt; i++) { 1065 switch (aclentp->a_type) { 1066 case DEF_USER_OBJ: /* Default Owner */ 1067 if (err = formacl(&sp->downer, aclentp)) 1068 goto error; 1069 break; 1070 case DEF_GROUP_OBJ: /* Default Group */ 1071 if (err = formacl(&sp->dgroup, aclentp)) 1072 goto error; 1073 break; 1074 case DEF_OTHER_OBJ: /* Default Other */ 1075 if (err = formacl(&sp->dother, aclentp)) 1076 goto error; 1077 break; 1078 case DEF_USER: 1079 if (err = formacl(&sp->dusers, aclentp)) 1080 goto error; 1081 break; 1082 case DEF_CLASS_OBJ: /* Default Mask */ 1083 sp->dclass.acl_ismask = 1; 1084 sp->dclass.acl_maskbits = aclentp->a_perm; 1085 break; 1086 case DEF_GROUP: 1087 if (err = formacl(&sp->dgroups, aclentp)) 1088 goto error; 1089 break; 1090 default: 1091 break; 1092 } 1093 aclentp--; 1094 } 1095 *spp = sp; 1096 return (0); 1097 1098 error: 1099 ufs_si_free_mem(sp); 1100 return (err); 1101 } 1102 1103 void 1104 formvsec(int obj_type, ufs_ic_acl_t *aclp, aclent_t **aclentpp) 1105 { 1106 for (; aclp; aclp = aclp->acl_ic_next) { 1107 (*aclentpp)->a_type = obj_type; 1108 (*aclentpp)->a_perm = aclp->acl_ic_perm; 1109 (*aclentpp)->a_id = aclp->acl_ic_who; 1110 (*aclentpp)++; 1111 } 1112 } 1113 1114 /* 1115 * XXX - Make more efficient 1116 * Convert from the ufs_acl_entry struct used for in-core storage of acl's 1117 * to the vsecattr struct, used by the VOP interface. 1118 * 1119 * Parameters: 1120 * sp - Ptr to si struct with the acls 1121 * vsap - Ptr to a vsecattr struct which will take the results. 1122 * 1123 * Returns: 0 - Success 1124 * N - From errno table 1125 */ 1126 static int 1127 aclentry2vsecattr(si_t *sp, vsecattr_t *vsap) 1128 { 1129 aclent_t *aclentp; 1130 int numacls = 0; 1131 int err; 1132 1133 vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL; 1134 1135 numacls = acl_count(sp->aowner) + 1136 acl_count(sp->agroup) + 1137 acl_count(sp->aother) + 1138 acl_count(sp->ausers) + 1139 acl_count(sp->agroups); 1140 if (sp->aclass.acl_ismask) 1141 numacls++; 1142 1143 if (numacls == 0) 1144 goto do_defaults; 1145 1146 if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL)) 1147 vsap->vsa_aclcnt = numacls; 1148 1149 if (vsap->vsa_mask & VSA_ACL) { 1150 vsap->vsa_aclentp = kmem_zalloc(numacls * sizeof (aclent_t), 1151 KM_SLEEP); 1152 aclentp = vsap->vsa_aclentp; 1153 1154 formvsec(USER_OBJ, sp->aowner, &aclentp); 1155 formvsec(USER, sp->ausers, &aclentp); 1156 formvsec(GROUP_OBJ, sp->agroup, &aclentp); 1157 formvsec(GROUP, sp->agroups, &aclentp); 1158 formvsec(OTHER_OBJ, sp->aother, &aclentp); 1159 1160 if (sp->aclass.acl_ismask) { 1161 aclentp->a_type = CLASS_OBJ; /* Mask */ 1162 aclentp->a_perm = sp->aclass.acl_maskbits; 1163 aclentp->a_id = 0; 1164 aclentp++; 1165 } 1166 1167 /* Sort the acl list */ 1168 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt, 1169 sizeof (aclent_t), cmp2acls); 1170 /* Check the acl list */ 1171 if ((err = acl_validate(vsap->vsa_aclentp, 1172 vsap->vsa_aclcnt, ACL_CHECK)) != 0) { 1173 kmem_free(vsap->vsa_aclentp, 1174 numacls * sizeof (aclent_t)); 1175 vsap->vsa_aclentp = NULL; 1176 return (err); 1177 } 1178 1179 } 1180 do_defaults: 1181 /* Process Defaults */ 1182 1183 numacls = acl_count(sp->downer) + 1184 acl_count(sp->dgroup) + 1185 acl_count(sp->dother) + 1186 acl_count(sp->dusers) + 1187 acl_count(sp->dgroups); 1188 if (sp->dclass.acl_ismask) 1189 numacls++; 1190 1191 if (numacls == 0) 1192 goto do_others; 1193 1194 if (vsap->vsa_mask & (VSA_DFACLCNT | VSA_DFACL)) 1195 vsap->vsa_dfaclcnt = numacls; 1196 1197 if (vsap->vsa_mask & VSA_DFACL) { 1198 vsap->vsa_dfaclentp = 1199 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP); 1200 aclentp = vsap->vsa_dfaclentp; 1201 formvsec(DEF_USER_OBJ, sp->downer, &aclentp); 1202 formvsec(DEF_USER, sp->dusers, &aclentp); 1203 formvsec(DEF_GROUP_OBJ, sp->dgroup, &aclentp); 1204 formvsec(DEF_GROUP, sp->dgroups, &aclentp); 1205 formvsec(DEF_OTHER_OBJ, sp->dother, &aclentp); 1206 1207 if (sp->dclass.acl_ismask) { 1208 aclentp->a_type = DEF_CLASS_OBJ; /* Mask */ 1209 aclentp->a_perm = sp->dclass.acl_maskbits; 1210 aclentp->a_id = 0; 1211 aclentp++; 1212 } 1213 1214 /* Sort the default acl list */ 1215 ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt, 1216 sizeof (aclent_t), cmp2acls); 1217 if ((err = acl_validate(vsap->vsa_dfaclentp, 1218 vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) { 1219 if (vsap->vsa_aclentp != NULL) 1220 kmem_free(vsap->vsa_aclentp, 1221 vsap->vsa_aclcnt * sizeof (aclent_t)); 1222 kmem_free(vsap->vsa_dfaclentp, 1223 vsap->vsa_dfaclcnt * sizeof (aclent_t)); 1224 vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL; 1225 return (err); 1226 } 1227 } 1228 1229 do_others: 1230 return (0); 1231 } 1232 1233 static void 1234 acl_free(ufs_ic_acl_t *aclp) 1235 { 1236 while (aclp != NULL) { 1237 ufs_ic_acl_t *nextaclp = aclp->acl_ic_next; 1238 kmem_free(aclp, sizeof (ufs_ic_acl_t)); 1239 aclp = nextaclp; 1240 } 1241 } 1242 1243 /* 1244 * ufs_si_free_mem will discard the sp, and the acl hanging off of the 1245 * sp. It is required that the sp not be locked, and not be in the 1246 * cache. 1247 * 1248 * input: pointer to sp to discard. 1249 * 1250 * return - nothing. 1251 * 1252 */ 1253 static void 1254 ufs_si_free_mem(si_t *sp) 1255 { 1256 ASSERT(!(sp->s_flags & SI_CACHED)); 1257 ASSERT(!RW_LOCK_HELD(&sp->s_lock)); 1258 /* 1259 * remove from the cache 1260 * free the acl entries 1261 */ 1262 acl_free(sp->aowner); 1263 acl_free(sp->agroup); 1264 acl_free(sp->aother); 1265 acl_free(sp->ausers); 1266 acl_free(sp->agroups); 1267 1268 acl_free(sp->downer); 1269 acl_free(sp->dgroup); 1270 acl_free(sp->dother); 1271 acl_free(sp->dusers); 1272 acl_free(sp->dgroups); 1273 1274 rw_destroy(&sp->s_lock); 1275 kmem_free(sp, sizeof (si_t)); 1276 } 1277 1278 void 1279 acl_cpy(ufs_ic_acl_t *saclp, ufs_ic_acl_t *daclp) 1280 { 1281 ufs_ic_acl_t *aclp, *prev_aclp = NULL, *aclp1; 1282 1283 if (saclp == NULL) { 1284 daclp = NULL; 1285 return; 1286 } 1287 prev_aclp = daclp; 1288 1289 for (aclp = saclp; aclp != NULL; aclp = aclp->acl_ic_next) { 1290 aclp1 = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP); 1291 aclp1->acl_ic_next = NULL; 1292 aclp1->acl_ic_who = aclp->acl_ic_who; 1293 aclp1->acl_ic_perm = aclp->acl_ic_perm; 1294 prev_aclp->acl_ic_next = aclp1; 1295 prev_aclp = (ufs_ic_acl_t *)&aclp1->acl_ic_next; 1296 } 1297 } 1298 1299 /* 1300 * ufs_si_inherit takes a parent acl structure (saclp) and the inode 1301 * of the object that is inheriting an acl and returns the inode 1302 * with the acl linked to it. It also writes the acl to disk if 1303 * it is a unique inode. 1304 * 1305 * ip - pointer to inode of object inheriting the acl (contents lock) 1306 * tdp - parent inode (rw_lock and contents lock) 1307 * mode - creation modes 1308 * cr - credentials pointer 1309 */ 1310 int 1311 ufs_si_inherit(struct inode *ip, struct inode *tdp, o_mode_t mode, cred_t *cr) 1312 { 1313 si_t *tsp, *sp = tdp->i_ufs_acl; 1314 int error; 1315 o_mode_t old_modes, old_uid, old_gid; 1316 int mask; 1317 1318 ASSERT(RW_WRITE_HELD(&ip->i_contents)); 1319 ASSERT(RW_WRITE_HELD(&tdp->i_rwlock)); 1320 ASSERT(RW_WRITE_HELD(&tdp->i_contents)); 1321 1322 /* 1323 * if links/symbolic links, or other invalid acl objects are copied 1324 * or moved to a directory with a default acl do not allow inheritance 1325 * just return. 1326 */ 1327 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT)) 1328 return (0); 1329 1330 /* lock the parent security information */ 1331 rw_enter(&sp->s_lock, RW_READER); 1332 1333 ASSERT(((tdp->i_mode & IFMT) == IFDIR) || 1334 ((tdp->i_mode & IFMT) == IFATTRDIR)); 1335 1336 mask = ((sp->downer != NULL) ? 1 : 0) | 1337 ((sp->dgroup != NULL) ? 2 : 0) | 1338 ((sp->dother != NULL) ? 4 : 0); 1339 1340 if (mask == 0) { 1341 rw_exit(&sp->s_lock); 1342 return (0); 1343 } 1344 1345 if (mask != 7) { 1346 rw_exit(&sp->s_lock); 1347 return (EINVAL); 1348 } 1349 1350 tsp = kmem_zalloc(sizeof (si_t), KM_SLEEP); 1351 rw_init(&tsp->s_lock, NULL, RW_DEFAULT, NULL); 1352 1353 /* copy the default acls */ 1354 1355 ASSERT(RW_READ_HELD(&sp->s_lock)); 1356 acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->aowner); 1357 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->agroup); 1358 acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->aother); 1359 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->ausers); 1360 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->agroups); 1361 tsp->aclass.acl_ismask = sp->dclass.acl_ismask; 1362 tsp->aclass.acl_maskbits = sp->dclass.acl_maskbits; 1363 1364 /* 1365 * set the owner, group, and other values from the master 1366 * inode. 1367 */ 1368 1369 MODE2ACL(tsp->aowner, (mode >> 6), ip->i_uid); 1370 MODE2ACL(tsp->agroup, (mode >> 3), ip->i_gid); 1371 MODE2ACL(tsp->aother, (mode), 0); 1372 1373 if (tsp->aclass.acl_ismask) { 1374 tsp->aclass.acl_maskbits &= mode >> 3; 1375 } 1376 1377 1378 /* copy default acl if necessary */ 1379 1380 if (((ip->i_mode & IFMT) == IFDIR) || 1381 ((ip->i_mode & IFMT) == IFATTRDIR)) { 1382 acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->downer); 1383 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->dgroup); 1384 acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->dother); 1385 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->dusers); 1386 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->dgroups); 1387 tsp->dclass.acl_ismask = sp->dclass.acl_ismask; 1388 tsp->dclass.acl_maskbits = sp->dclass.acl_maskbits; 1389 } 1390 /* 1391 * save the new 9 mode bits in the inode (ip->ic_smode) for 1392 * ufs_getattr. Be sure the mode can be recovered if the store 1393 * fails. 1394 */ 1395 old_modes = ip->i_mode; 1396 old_uid = ip->i_uid; 1397 old_gid = ip->i_gid; 1398 /* 1399 * store the acl, and get back a new security anchor if 1400 * it is a duplicate. 1401 */ 1402 rw_exit(&sp->s_lock); 1403 rw_enter(&ip->i_rwlock, RW_WRITER); 1404 1405 /* 1406 * Suppress out of inodes messages if instructed in the 1407 * tdp inode. 1408 */ 1409 ip->i_flag |= tdp->i_flag & IQUIET; 1410 1411 if ((error = ufs_si_store(ip, tsp, 0, cr)) != 0) { 1412 ip->i_mode = old_modes; 1413 ip->i_uid = old_uid; 1414 ip->i_gid = old_gid; 1415 } 1416 ip->i_flag &= ~IQUIET; 1417 rw_exit(&ip->i_rwlock); 1418 return (error); 1419 } 1420 1421 si_t * 1422 ufs_acl_cp(si_t *sp) 1423 { 1424 1425 si_t *dsp; 1426 1427 ASSERT(RW_READ_HELD(&sp->s_lock)); 1428 ASSERT(sp->s_ref && sp->s_use); 1429 1430 dsp = kmem_zalloc(sizeof (si_t), KM_SLEEP); 1431 rw_init(&dsp->s_lock, NULL, RW_DEFAULT, NULL); 1432 1433 acl_cpy(sp->aowner, (ufs_ic_acl_t *)&dsp->aowner); 1434 acl_cpy(sp->agroup, (ufs_ic_acl_t *)&dsp->agroup); 1435 acl_cpy(sp->aother, (ufs_ic_acl_t *)&dsp->aother); 1436 acl_cpy(sp->ausers, (ufs_ic_acl_t *)&dsp->ausers); 1437 acl_cpy(sp->agroups, (ufs_ic_acl_t *)&dsp->agroups); 1438 1439 dsp->aclass.acl_ismask = sp->aclass.acl_ismask; 1440 dsp->aclass.acl_maskbits = sp->aclass.acl_maskbits; 1441 1442 acl_cpy(sp->downer, (ufs_ic_acl_t *)&dsp->downer); 1443 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&dsp->dgroup); 1444 acl_cpy(sp->dother, (ufs_ic_acl_t *)&dsp->dother); 1445 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&dsp->dusers); 1446 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&dsp->dgroups); 1447 1448 dsp->dclass.acl_ismask = sp->dclass.acl_ismask; 1449 dsp->dclass.acl_maskbits = sp->dclass.acl_maskbits; 1450 1451 return (dsp); 1452 1453 } 1454 1455 int 1456 ufs_acl_setattr(struct inode *ip, struct vattr *vap, cred_t *cr) 1457 { 1458 1459 si_t *sp; 1460 int mask = vap->va_mask; 1461 int error = 0; 1462 1463 ASSERT(RW_WRITE_HELD(&ip->i_contents)); 1464 1465 if (!(mask & (AT_MODE|AT_UID|AT_GID))) 1466 return (0); 1467 1468 /* 1469 * if no regular acl's, nothing to do, so let's get out 1470 */ 1471 if (!(ip->i_ufs_acl) || !(ip->i_ufs_acl->aowner)) 1472 return (0); 1473 1474 rw_enter(&ip->i_ufs_acl->s_lock, RW_READER); 1475 sp = ufs_acl_cp(ip->i_ufs_acl); 1476 ASSERT(sp != ip->i_ufs_acl); 1477 1478 /* 1479 * set the mask to the group permissions if a mask entry 1480 * exists. Otherwise, set the group obj bits to the group 1481 * permissions. Since non-trivial ACLs always have a mask, 1482 * and the mask is the final arbiter of group permissions, 1483 * setting the mask has the effect of changing the effective 1484 * group permissions, even if the group_obj permissions in 1485 * the ACL aren't changed. Posix P1003.1e states that when 1486 * an ACL mask exists, chmod(2) must set the acl mask (NOT the 1487 * group_obj permissions) to the requested group permissions. 1488 */ 1489 if (mask & AT_MODE) { 1490 sp->aowner->acl_ic_perm = (o_mode_t)(ip->i_mode & 0700) >> 6; 1491 if (sp->aclass.acl_ismask) 1492 sp->aclass.acl_maskbits = 1493 (o_mode_t)(ip->i_mode & 070) >> 3; 1494 else 1495 sp->agroup->acl_ic_perm = 1496 (o_mode_t)(ip->i_mode & 070) >> 3; 1497 sp->aother->acl_ic_perm = (o_mode_t)(ip->i_mode & 07); 1498 } 1499 1500 if (mask & AT_UID) { 1501 /* Caller has verified our privileges */ 1502 sp->aowner->acl_ic_who = ip->i_uid; 1503 } 1504 1505 if (mask & AT_GID) { 1506 sp->agroup->acl_ic_who = ip->i_gid; 1507 } 1508 1509 rw_exit(&ip->i_ufs_acl->s_lock); 1510 error = ufs_si_store(ip, sp, 0, cr); 1511 return (error); 1512 } 1513 1514 static int 1515 acl_count(ufs_ic_acl_t *p) 1516 { 1517 ufs_ic_acl_t *acl; 1518 int count; 1519 1520 for (count = 0, acl = p; acl; acl = acl->acl_ic_next, count++) 1521 ; 1522 return (count); 1523 } 1524 1525 /* 1526 * Takes as input a security structure and generates a buffer 1527 * with fsd's in a form which be written to the shadow inode. 1528 */ 1529 static int 1530 ufs_sectobuf(si_t *sp, caddr_t *buf, size_t *len) 1531 { 1532 size_t acl_size; 1533 size_t def_acl_size; 1534 caddr_t buffer; 1535 struct ufs_fsd *fsdp; 1536 ufs_acl_t *bufaclp; 1537 1538 /* 1539 * Calc size of buffer to hold all the acls 1540 */ 1541 acl_size = acl_count(sp->aowner) + /* owner */ 1542 acl_count(sp->agroup) + /* owner group */ 1543 acl_count(sp->aother) + /* owner other */ 1544 acl_count(sp->ausers) + /* acl list */ 1545 acl_count(sp->agroups); /* group alcs */ 1546 if (sp->aclass.acl_ismask) 1547 acl_size++; 1548 1549 /* Convert to bytes */ 1550 acl_size *= sizeof (ufs_acl_t); 1551 1552 /* Add fsd header */ 1553 if (acl_size) 1554 acl_size += 2 * sizeof (int); 1555 1556 /* 1557 * Calc size of buffer to hold all the default acls 1558 */ 1559 def_acl_size = 1560 acl_count(sp->downer) + /* def owner */ 1561 acl_count(sp->dgroup) + /* def owner group */ 1562 acl_count(sp->dother) + /* def owner other */ 1563 acl_count(sp->dusers) + /* def users */ 1564 acl_count(sp->dgroups); /* def group acls */ 1565 if (sp->dclass.acl_ismask) 1566 def_acl_size++; 1567 1568 /* 1569 * Convert to bytes 1570 */ 1571 def_acl_size *= sizeof (ufs_acl_t); 1572 1573 /* 1574 * Add fsd header 1575 */ 1576 if (def_acl_size) 1577 def_acl_size += 2 * sizeof (int); 1578 1579 if (acl_size + def_acl_size == 0) 1580 return (0); 1581 1582 buffer = kmem_zalloc((acl_size + def_acl_size), KM_SLEEP); 1583 bufaclp = (ufs_acl_t *)buffer; 1584 1585 if (acl_size == 0) 1586 goto wrtdefs; 1587 1588 /* create fsd and copy acls */ 1589 fsdp = (struct ufs_fsd *)bufaclp; 1590 fsdp->fsd_type = FSD_ACL; 1591 bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0]; 1592 1593 ACL_MOVE(sp->aowner, USER_OBJ, bufaclp); 1594 ACL_MOVE(sp->agroup, GROUP_OBJ, bufaclp); 1595 ACL_MOVE(sp->aother, OTHER_OBJ, bufaclp); 1596 ACL_MOVE(sp->ausers, USER, bufaclp); 1597 ACL_MOVE(sp->agroups, GROUP, bufaclp); 1598 1599 if (sp->aclass.acl_ismask) { 1600 bufaclp->acl_tag = CLASS_OBJ; 1601 bufaclp->acl_who = (uid_t)sp->aclass.acl_ismask; 1602 bufaclp->acl_perm = (o_mode_t)sp->aclass.acl_maskbits; 1603 bufaclp++; 1604 } 1605 ASSERT(acl_size <= INT_MAX); 1606 fsdp->fsd_size = (int)acl_size; 1607 1608 wrtdefs: 1609 if (def_acl_size == 0) 1610 goto alldone; 1611 1612 /* if defaults exist then create fsd and copy default acls */ 1613 fsdp = (struct ufs_fsd *)bufaclp; 1614 fsdp->fsd_type = FSD_DFACL; 1615 bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0]; 1616 1617 ACL_MOVE(sp->downer, DEF_USER_OBJ, bufaclp); 1618 ACL_MOVE(sp->dgroup, DEF_GROUP_OBJ, bufaclp); 1619 ACL_MOVE(sp->dother, DEF_OTHER_OBJ, bufaclp); 1620 ACL_MOVE(sp->dusers, DEF_USER, bufaclp); 1621 ACL_MOVE(sp->dgroups, DEF_GROUP, bufaclp); 1622 if (sp->dclass.acl_ismask) { 1623 bufaclp->acl_tag = DEF_CLASS_OBJ; 1624 bufaclp->acl_who = (uid_t)sp->dclass.acl_ismask; 1625 bufaclp->acl_perm = (o_mode_t)sp->dclass.acl_maskbits; 1626 bufaclp++; 1627 } 1628 ASSERT(def_acl_size <= INT_MAX); 1629 fsdp->fsd_size = (int)def_acl_size; 1630 1631 alldone: 1632 *buf = buffer; 1633 *len = acl_size + def_acl_size; 1634 1635 return (0); 1636 } 1637 1638 /* 1639 * free a shadow inode on disk and in memory 1640 */ 1641 int 1642 ufs_si_free(si_t *sp, struct vfs *vfsp, cred_t *cr) 1643 { 1644 struct inode *sip; 1645 int shadow; 1646 int err = 0; 1647 int refcnt; 1648 int signature; 1649 1650 ASSERT(vfsp); 1651 ASSERT(sp); 1652 1653 rw_enter(&sp->s_lock, RW_READER); 1654 ASSERT(sp->s_shadow <= INT_MAX); 1655 shadow = (int)sp->s_shadow; 1656 ASSERT(sp->s_ref); 1657 rw_exit(&sp->s_lock); 1658 1659 /* 1660 * Decrement link count on the shadow inode, 1661 * and decrement reference count on the sip. 1662 */ 1663 if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) == 0) { 1664 rw_enter(&sip->i_contents, RW_WRITER); 1665 rw_enter(&sp->s_lock, RW_WRITER); 1666 ASSERT(sp->s_shadow == shadow); 1667 ASSERT(sip->i_dquot == 0); 1668 /* Decrement link count */ 1669 ASSERT(sip->i_nlink > 0); 1670 /* 1671 * bug #1264710 assertion failure below 1672 */ 1673 sp->s_use = --sip->i_nlink; 1674 ufs_setreclaim(sip); 1675 TRANS_INODE(sip->i_ufsvfs, sip); 1676 sip->i_flag |= ICHG | IMOD; 1677 sip->i_seq++; 1678 ITIMES_NOLOCK(sip); 1679 /* Dec ref counts on si referenced by this ip */ 1680 refcnt = --sp->s_ref; 1681 signature = sp->s_signature; 1682 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use); 1683 /* 1684 * Release s_lock before calling VN_RELE 1685 * (which may want to acquire i_contents). 1686 */ 1687 rw_exit(&sp->s_lock); 1688 rw_exit(&sip->i_contents); 1689 VN_RELE(ITOV(sip)); 1690 } else { 1691 rw_enter(&sp->s_lock, RW_WRITER); 1692 /* Dec ref counts on si referenced by this ip */ 1693 refcnt = --sp->s_ref; 1694 signature = sp->s_signature; 1695 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use); 1696 rw_exit(&sp->s_lock); 1697 } 1698 1699 if (refcnt == 0) 1700 si_cache_del(sp, signature); 1701 return (err); 1702 } 1703 1704 /* 1705 * Seach the si cache for an si structure by inode #. 1706 * Returns a locked si structure. 1707 * 1708 * Parameters: 1709 * ip - Ptr to an inode on this fs 1710 * spp - Ptr to ptr to si struct for the results, if found. 1711 * 1712 * Returns: 0 - Success (results in spp) 1713 * 1 - Failure (spp undefined) 1714 */ 1715 static int 1716 si_cachei_get(struct inode *ip, si_t **spp) 1717 { 1718 si_t *sp; 1719 1720 rw_enter(&si_cache_lock, RW_READER); 1721 loop: 1722 for (sp = si_cachei[SI_HASH(ip->i_shadow)]; sp; sp = sp->s_forw) 1723 if (sp->s_shadow == ip->i_shadow && sp->s_dev == ip->i_dev) 1724 break; 1725 1726 if (sp == NULL) { 1727 /* Not in cache */ 1728 rw_exit(&si_cache_lock); 1729 return (1); 1730 } 1731 /* Found it */ 1732 rw_enter(&sp->s_lock, RW_WRITER); 1733 alldone: 1734 rw_exit(&si_cache_lock); 1735 *spp = sp; 1736 return (0); 1737 } 1738 1739 /* 1740 * Seach the si cache by si structure (ie duplicate of the one passed in). 1741 * In order for a match the signatures must be the same and 1742 * the devices must be the same, the acls must match and 1743 * link count of the cached shadow must be less than the 1744 * size of ic_nlink - 1. MAXLINK - 1 is used to allow the count 1745 * to be incremented one more time by the caller. 1746 * Returns a locked si structure. 1747 * 1748 * Parameters: 1749 * ip - Ptr to an inode on this fs 1750 * spi - Ptr to si the struct we're searching the cache for. 1751 * spp - Ptr to ptr to si struct for the results, if found. 1752 * 1753 * Returns: 0 - Success (results in spp) 1754 * 1 - Failure (spp undefined) 1755 */ 1756 static int 1757 si_cachea_get(struct inode *ip, si_t *spi, si_t **spp) 1758 { 1759 si_t *sp; 1760 1761 spi->s_dev = ip->i_dev; 1762 spi->s_signature = si_signature(spi); 1763 rw_enter(&si_cache_lock, RW_READER); 1764 loop: 1765 for (sp = si_cachea[SI_HASH(spi->s_signature)]; sp; sp = sp->s_next) { 1766 if (sp->s_signature == spi->s_signature && 1767 sp->s_dev == spi->s_dev && 1768 sp->s_use > 0 && /* deleting */ 1769 sp->s_use <= (MAXLINK - 1) && /* Too many links */ 1770 !si_cmp(sp, spi)) 1771 break; 1772 } 1773 1774 if (sp == NULL) { 1775 /* Cache miss */ 1776 rw_exit(&si_cache_lock); 1777 return (1); 1778 } 1779 /* Found it */ 1780 rw_enter(&sp->s_lock, RW_WRITER); 1781 alldone: 1782 spi->s_shadow = sp->s_shadow; /* XXX For debugging */ 1783 rw_exit(&si_cache_lock); 1784 *spp = sp; 1785 return (0); 1786 } 1787 1788 /* 1789 * Place an si structure in the si cache. May cause duplicates. 1790 * 1791 * Parameters: 1792 * sp - Ptr to the si struct to add to the cache. 1793 * 1794 * Returns: Nothing (void) 1795 */ 1796 static void 1797 si_cache_put(si_t *sp) 1798 { 1799 si_t **tspp; 1800 1801 ASSERT(sp->s_fore == NULL); 1802 rw_enter(&si_cache_lock, RW_WRITER); 1803 if (!sp->s_signature) 1804 sp->s_signature = si_signature(sp); 1805 sp->s_flags |= SI_CACHED; 1806 sp->s_fore = NULL; 1807 1808 /* The 'by acl' chains */ 1809 tspp = &si_cachea[SI_HASH(sp->s_signature)]; 1810 sp->s_next = *tspp; 1811 *tspp = sp; 1812 1813 /* The 'by inode' chains */ 1814 tspp = &si_cachei[SI_HASH(sp->s_shadow)]; 1815 sp->s_forw = *tspp; 1816 *tspp = sp; 1817 1818 rw_exit(&si_cache_lock); 1819 } 1820 1821 /* 1822 * The sp passed in is a candidate for deletion from the cache. We acquire 1823 * the cache lock first, so no cache searches can be done. Then we search 1824 * for the acl in the cache, and if we find it we can lock it and check that 1825 * nobody else attached to it while we were acquiring the locks. If the acl 1826 * is in the cache and still has a zero reference count, then we remove it 1827 * from the cache and deallocate it. If the reference count is non-zero or 1828 * it is not found in the cache, then someone else attached to it or has 1829 * already freed it, so we just return. 1830 * 1831 * Parameters: 1832 * sp - Ptr to the sp struct which is the candicate for deletion. 1833 * signature - the signature for the acl for lookup in the hash table 1834 * 1835 * Returns: Nothing (void) 1836 */ 1837 void 1838 si_cache_del(si_t *sp, int signature) 1839 { 1840 si_t **tspp; 1841 int hash; 1842 int foundacl = 0; 1843 1844 /* 1845 * Unlink & free the sp from the other queues, then destroy it. 1846 * Search the 'by acl' chain first, then the 'by inode' chain 1847 * after the acl is locked. 1848 */ 1849 rw_enter(&si_cache_lock, RW_WRITER); 1850 hash = SI_HASH(signature); 1851 for (tspp = &si_cachea[hash]; *tspp; tspp = &(*tspp)->s_next) { 1852 if (*tspp == sp) { 1853 /* 1854 * Wait to grab the acl lock until after the acl has 1855 * been found in the cache. Otherwise it might try to 1856 * grab a lock that has already been destroyed, or 1857 * delete an acl that has already been freed. 1858 */ 1859 rw_enter(&sp->s_lock, RW_WRITER); 1860 /* See if someone else attached to it */ 1861 if (sp->s_ref) { 1862 rw_exit(&sp->s_lock); 1863 rw_exit(&si_cache_lock); 1864 return; 1865 } 1866 ASSERT(sp->s_fore == NULL); 1867 ASSERT(sp->s_flags & SI_CACHED); 1868 foundacl = 1; 1869 *tspp = sp->s_next; 1870 break; 1871 } 1872 } 1873 1874 /* 1875 * If the acl was not in the cache, we assume another thread has 1876 * deleted it already. This could happen if another thread attaches to 1877 * the acl and then releases it after this thread has already found the 1878 * reference count to be zero but has not yet taken the cache lock. 1879 * Both threads end up seeing a reference count of zero, and call into 1880 * si_cache_del. See bug 4244827 for details on the race condition. 1881 */ 1882 if (foundacl == 0) { 1883 rw_exit(&si_cache_lock); 1884 return; 1885 } 1886 1887 /* Now check the 'by inode' chain */ 1888 hash = SI_HASH(sp->s_shadow); 1889 for (tspp = &si_cachei[hash]; *tspp; tspp = &(*tspp)->s_forw) { 1890 if (*tspp == sp) { 1891 *tspp = sp->s_forw; 1892 break; 1893 } 1894 } 1895 1896 /* 1897 * At this point, we can unlock everything because this si 1898 * is no longer in the cache, thus cannot be attached to. 1899 */ 1900 rw_exit(&sp->s_lock); 1901 rw_exit(&si_cache_lock); 1902 sp->s_flags &= ~SI_CACHED; 1903 (void) ufs_si_free_mem(sp); 1904 } 1905 1906 /* 1907 * Alloc the hash buckets for the si cache & initialize 1908 * the unreferenced anchor and the cache lock. 1909 */ 1910 void 1911 si_cache_init(void) 1912 { 1913 rw_init(&si_cache_lock, NULL, RW_DEFAULT, NULL); 1914 1915 /* The 'by acl' headers */ 1916 si_cachea = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP); 1917 /* The 'by inode' headers */ 1918 si_cachei = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP); 1919 } 1920 1921 /* 1922 * aclcksum takes an acl and generates a checksum. It takes as input 1923 * the acl to start at. 1924 * 1925 * s_aclp - pointer to starting acl 1926 * 1927 * returns checksum 1928 */ 1929 static int 1930 aclcksum(ufs_ic_acl_t *s_aclp) 1931 { 1932 ufs_ic_acl_t *aclp; 1933 int signature = 0; 1934 for (aclp = s_aclp; aclp; aclp = aclp->acl_ic_next) { 1935 signature += aclp->acl_ic_perm; 1936 signature += aclp->acl_ic_who; 1937 } 1938 return (signature); 1939 } 1940 1941 /* 1942 * Generate a unique signature for an si structure. Used by the 1943 * search routine si_cachea_get() to quickly identify candidates 1944 * prior to calling si_cmp(). 1945 * Parameters: 1946 * sp - Ptr to the si struct to generate the signature for. 1947 * 1948 * Returns: A signature for the si struct (really a checksum) 1949 */ 1950 static int 1951 si_signature(si_t *sp) 1952 { 1953 int signature = sp->s_dev; 1954 1955 signature += aclcksum(sp->aowner) + aclcksum(sp->agroup) + 1956 aclcksum(sp->aother) + aclcksum(sp->ausers) + 1957 aclcksum(sp->agroups) + aclcksum(sp->downer) + 1958 aclcksum(sp->dgroup) + aclcksum(sp->dother) + 1959 aclcksum(sp->dusers) + aclcksum(sp->dgroups); 1960 if (sp->aclass.acl_ismask) 1961 signature += sp->aclass.acl_maskbits; 1962 if (sp->dclass.acl_ismask) 1963 signature += sp->dclass.acl_maskbits; 1964 1965 return (signature); 1966 } 1967 1968 /* 1969 * aclcmp compares to acls to see if they are identical. 1970 * 1971 * sp1 is source 1972 * sp2 is sourceb 1973 * 1974 * returns 0 if equal and 1 if not equal 1975 */ 1976 static int 1977 aclcmp(ufs_ic_acl_t *aclin1p, ufs_ic_acl_t *aclin2p) 1978 { 1979 ufs_ic_acl_t *aclp1; 1980 ufs_ic_acl_t *aclp2; 1981 1982 /* 1983 * if the starting pointers are equal then they are equal so 1984 * just return. 1985 */ 1986 if (aclin1p == aclin2p) 1987 return (0); 1988 /* 1989 * check element by element 1990 */ 1991 for (aclp1 = aclin1p, aclp2 = aclin2p; aclp1 && aclp2; 1992 aclp1 = aclp1->acl_ic_next, aclp2 = aclp2->acl_ic_next) { 1993 if (aclp1->acl_ic_perm != aclp2->acl_ic_perm || 1994 aclp1->acl_ic_who != aclp2->acl_ic_who) 1995 return (1); 1996 } 1997 /* 1998 * both must be zero (at the end of the acl) 1999 */ 2000 if (aclp1 || aclp2) 2001 return (1); 2002 2003 return (0); 2004 } 2005 2006 /* 2007 * Do extensive, field-by-field compare of two si structures. Returns 2008 * 0 if they are exactly identical, 1 otherwise. 2009 * 2010 * Paramters: 2011 * sp1 - Ptr to 1st si struct 2012 * sp2 - Ptr to 2nd si struct 2013 * 2014 * Returns: 2015 * 0 - Not identical 2016 * 1 - Identical 2017 */ 2018 static int 2019 si_cmp(si_t *sp1, si_t *sp2) 2020 { 2021 if (sp1->s_dev != sp2->s_dev) 2022 return (1); 2023 if (aclcmp(sp1->aowner, sp2->aowner) || 2024 aclcmp(sp1->agroup, sp2->agroup) || 2025 aclcmp(sp1->aother, sp2->aother) || 2026 aclcmp(sp1->ausers, sp2->ausers) || 2027 aclcmp(sp1->agroups, sp2->agroups) || 2028 aclcmp(sp1->downer, sp2->downer) || 2029 aclcmp(sp1->dgroup, sp2->dgroup) || 2030 aclcmp(sp1->dother, sp2->dother) || 2031 aclcmp(sp1->dusers, sp2->dusers) || 2032 aclcmp(sp1->dgroups, sp2->dgroups)) 2033 return (1); 2034 if (sp1->aclass.acl_ismask != sp2->aclass.acl_ismask) 2035 return (1); 2036 if (sp1->dclass.acl_ismask != sp2->dclass.acl_ismask) 2037 return (1); 2038 if (sp1->aclass.acl_ismask && 2039 sp1->aclass.acl_maskbits != sp2->aclass.acl_maskbits) 2040 return (1); 2041 if (sp1->dclass.acl_ismask && 2042 sp1->dclass.acl_maskbits != sp2->dclass.acl_maskbits) 2043 return (1); 2044 2045 return (0); 2046 } 2047 2048 /* 2049 * Remove all acls associated with a device. All acls must have 2050 * a reference count of zero. 2051 * 2052 * inputs: 2053 * device - device to remove from the cache 2054 * 2055 * outputs: 2056 * none 2057 */ 2058 void 2059 ufs_si_cache_flush(dev_t dev) 2060 { 2061 si_t *tsp, **tspp; 2062 int i; 2063 2064 rw_enter(&si_cache_lock, RW_WRITER); 2065 for (i = 0; i < si_cachecnt; i++) { 2066 tspp = &si_cachea[i]; 2067 while (*tspp) { 2068 if ((*tspp)->s_dev == dev) { 2069 *tspp = (*tspp)->s_next; 2070 } else { 2071 tspp = &(*tspp)->s_next; 2072 } 2073 } 2074 } 2075 for (i = 0; i < si_cachecnt; i++) { 2076 tspp = &si_cachei[i]; 2077 while (*tspp) { 2078 if ((*tspp)->s_dev == dev) { 2079 tsp = *tspp; 2080 *tspp = (*tspp)->s_forw; 2081 tsp->s_flags &= ~SI_CACHED; 2082 ufs_si_free_mem(tsp); 2083 } else { 2084 tspp = &(*tspp)->s_forw; 2085 } 2086 } 2087 } 2088 rw_exit(&si_cache_lock); 2089 } 2090 2091 /* 2092 * ufs_si_del is used to unhook a sp from a inode in memory 2093 * 2094 * ip is the inode to remove the sp from. 2095 */ 2096 void 2097 ufs_si_del(struct inode *ip) 2098 { 2099 si_t *sp = ip->i_ufs_acl; 2100 int refcnt; 2101 int signature; 2102 2103 if (sp) { 2104 rw_enter(&sp->s_lock, RW_WRITER); 2105 refcnt = --sp->s_ref; 2106 signature = sp->s_signature; 2107 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use); 2108 rw_exit(&sp->s_lock); 2109 if (refcnt == 0) 2110 si_cache_del(sp, signature); 2111 ip->i_ufs_acl = NULL; 2112 } 2113 } 2114