1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/param.h> 28 #include <sys/time.h> 29 #include <sys/systm.h> 30 #include <sys/sysmacros.h> 31 #include <sys/resource.h> 32 #include <sys/vfs.h> 33 #include <sys/vnode.h> 34 #include <sys/sid.h> 35 #include <sys/file.h> 36 #include <sys/stat.h> 37 #include <sys/kmem.h> 38 #include <sys/cmn_err.h> 39 #include <sys/errno.h> 40 #include <sys/unistd.h> 41 #include <sys/sdt.h> 42 #include <sys/fs/zfs.h> 43 #include <sys/mode.h> 44 #include <sys/policy.h> 45 #include <sys/zfs_znode.h> 46 #include <sys/zfs_fuid.h> 47 #include <sys/zfs_acl.h> 48 #include <sys/zfs_dir.h> 49 #include <sys/zfs_vfsops.h> 50 #include <sys/dmu.h> 51 #include <sys/dnode.h> 52 #include <sys/zap.h> 53 #include "fs/fs_subr.h" 54 #include <acl/acl_common.h> 55 56 #define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE 57 #define DENY ACE_ACCESS_DENIED_ACE_TYPE 58 #define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 59 #define MIN_ACE_TYPE ALLOW 60 61 #define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP) 62 #define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \ 63 ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE) 64 #define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \ 65 ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) 66 #define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \ 67 ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) 68 69 #define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \ 70 ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \ 71 ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \ 72 ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE) 73 74 #define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS) 75 #define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \ 76 ACE_DELETE|ACE_DELETE_CHILD) 77 #define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS) 78 79 #define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ 80 ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) 81 82 #define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ 83 ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) 84 85 #define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \ 86 ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE) 87 88 #define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER) 89 90 #define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\ 91 ZFS_ACL_PROTECTED) 92 93 #define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\ 94 ZFS_ACL_OBJ_ACE) 95 96 #define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH) 97 98 static uint16_t 99 zfs_ace_v0_get_type(void *acep) 100 { 101 return (((zfs_oldace_t *)acep)->z_type); 102 } 103 104 static uint16_t 105 zfs_ace_v0_get_flags(void *acep) 106 { 107 return (((zfs_oldace_t *)acep)->z_flags); 108 } 109 110 static uint32_t 111 zfs_ace_v0_get_mask(void *acep) 112 { 113 return (((zfs_oldace_t *)acep)->z_access_mask); 114 } 115 116 static uint64_t 117 zfs_ace_v0_get_who(void *acep) 118 { 119 return (((zfs_oldace_t *)acep)->z_fuid); 120 } 121 122 static void 123 zfs_ace_v0_set_type(void *acep, uint16_t type) 124 { 125 ((zfs_oldace_t *)acep)->z_type = type; 126 } 127 128 static void 129 zfs_ace_v0_set_flags(void *acep, uint16_t flags) 130 { 131 ((zfs_oldace_t *)acep)->z_flags = flags; 132 } 133 134 static void 135 zfs_ace_v0_set_mask(void *acep, uint32_t mask) 136 { 137 ((zfs_oldace_t *)acep)->z_access_mask = mask; 138 } 139 140 static void 141 zfs_ace_v0_set_who(void *acep, uint64_t who) 142 { 143 ((zfs_oldace_t *)acep)->z_fuid = who; 144 } 145 146 /*ARGSUSED*/ 147 static size_t 148 zfs_ace_v0_size(void *acep) 149 { 150 return (sizeof (zfs_oldace_t)); 151 } 152 153 static size_t 154 zfs_ace_v0_abstract_size(void) 155 { 156 return (sizeof (zfs_oldace_t)); 157 } 158 159 static int 160 zfs_ace_v0_mask_off(void) 161 { 162 return (offsetof(zfs_oldace_t, z_access_mask)); 163 } 164 165 /*ARGSUSED*/ 166 static int 167 zfs_ace_v0_data(void *acep, void **datap) 168 { 169 *datap = NULL; 170 return (0); 171 } 172 173 static acl_ops_t zfs_acl_v0_ops = { 174 zfs_ace_v0_get_mask, 175 zfs_ace_v0_set_mask, 176 zfs_ace_v0_get_flags, 177 zfs_ace_v0_set_flags, 178 zfs_ace_v0_get_type, 179 zfs_ace_v0_set_type, 180 zfs_ace_v0_get_who, 181 zfs_ace_v0_set_who, 182 zfs_ace_v0_size, 183 zfs_ace_v0_abstract_size, 184 zfs_ace_v0_mask_off, 185 zfs_ace_v0_data 186 }; 187 188 static uint16_t 189 zfs_ace_fuid_get_type(void *acep) 190 { 191 return (((zfs_ace_hdr_t *)acep)->z_type); 192 } 193 194 static uint16_t 195 zfs_ace_fuid_get_flags(void *acep) 196 { 197 return (((zfs_ace_hdr_t *)acep)->z_flags); 198 } 199 200 static uint32_t 201 zfs_ace_fuid_get_mask(void *acep) 202 { 203 return (((zfs_ace_hdr_t *)acep)->z_access_mask); 204 } 205 206 static uint64_t 207 zfs_ace_fuid_get_who(void *args) 208 { 209 uint16_t entry_type; 210 zfs_ace_t *acep = args; 211 212 entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; 213 214 if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || 215 entry_type == ACE_EVERYONE) 216 return (-1); 217 return (((zfs_ace_t *)acep)->z_fuid); 218 } 219 220 static void 221 zfs_ace_fuid_set_type(void *acep, uint16_t type) 222 { 223 ((zfs_ace_hdr_t *)acep)->z_type = type; 224 } 225 226 static void 227 zfs_ace_fuid_set_flags(void *acep, uint16_t flags) 228 { 229 ((zfs_ace_hdr_t *)acep)->z_flags = flags; 230 } 231 232 static void 233 zfs_ace_fuid_set_mask(void *acep, uint32_t mask) 234 { 235 ((zfs_ace_hdr_t *)acep)->z_access_mask = mask; 236 } 237 238 static void 239 zfs_ace_fuid_set_who(void *arg, uint64_t who) 240 { 241 zfs_ace_t *acep = arg; 242 243 uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; 244 245 if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || 246 entry_type == ACE_EVERYONE) 247 return; 248 acep->z_fuid = who; 249 } 250 251 static size_t 252 zfs_ace_fuid_size(void *acep) 253 { 254 zfs_ace_hdr_t *zacep = acep; 255 uint16_t entry_type; 256 257 switch (zacep->z_type) { 258 case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: 259 case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: 260 case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: 261 case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: 262 return (sizeof (zfs_object_ace_t)); 263 case ALLOW: 264 case DENY: 265 entry_type = 266 (((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS); 267 if (entry_type == ACE_OWNER || 268 entry_type == OWNING_GROUP || 269 entry_type == ACE_EVERYONE) 270 return (sizeof (zfs_ace_hdr_t)); 271 /*FALLTHROUGH*/ 272 default: 273 return (sizeof (zfs_ace_t)); 274 } 275 } 276 277 static size_t 278 zfs_ace_fuid_abstract_size(void) 279 { 280 return (sizeof (zfs_ace_hdr_t)); 281 } 282 283 static int 284 zfs_ace_fuid_mask_off(void) 285 { 286 return (offsetof(zfs_ace_hdr_t, z_access_mask)); 287 } 288 289 static int 290 zfs_ace_fuid_data(void *acep, void **datap) 291 { 292 zfs_ace_t *zacep = acep; 293 zfs_object_ace_t *zobjp; 294 295 switch (zacep->z_hdr.z_type) { 296 case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: 297 case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: 298 case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: 299 case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: 300 zobjp = acep; 301 *datap = (caddr_t)zobjp + sizeof (zfs_ace_t); 302 return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t)); 303 default: 304 *datap = NULL; 305 return (0); 306 } 307 } 308 309 static acl_ops_t zfs_acl_fuid_ops = { 310 zfs_ace_fuid_get_mask, 311 zfs_ace_fuid_set_mask, 312 zfs_ace_fuid_get_flags, 313 zfs_ace_fuid_set_flags, 314 zfs_ace_fuid_get_type, 315 zfs_ace_fuid_set_type, 316 zfs_ace_fuid_get_who, 317 zfs_ace_fuid_set_who, 318 zfs_ace_fuid_size, 319 zfs_ace_fuid_abstract_size, 320 zfs_ace_fuid_mask_off, 321 zfs_ace_fuid_data 322 }; 323 324 static int 325 zfs_acl_version(int version) 326 { 327 if (version < ZPL_VERSION_FUID) 328 return (ZFS_ACL_VERSION_INITIAL); 329 else 330 return (ZFS_ACL_VERSION_FUID); 331 } 332 333 static int 334 zfs_acl_version_zp(znode_t *zp) 335 { 336 return (zfs_acl_version(zp->z_zfsvfs->z_version)); 337 } 338 339 static zfs_acl_t * 340 zfs_acl_alloc(int vers) 341 { 342 zfs_acl_t *aclp; 343 344 aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP); 345 list_create(&aclp->z_acl, sizeof (zfs_acl_node_t), 346 offsetof(zfs_acl_node_t, z_next)); 347 aclp->z_version = vers; 348 if (vers == ZFS_ACL_VERSION_FUID) 349 aclp->z_ops = zfs_acl_fuid_ops; 350 else 351 aclp->z_ops = zfs_acl_v0_ops; 352 return (aclp); 353 } 354 355 static zfs_acl_node_t * 356 zfs_acl_node_alloc(size_t bytes) 357 { 358 zfs_acl_node_t *aclnode; 359 360 aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP); 361 if (bytes) { 362 aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP); 363 aclnode->z_allocdata = aclnode->z_acldata; 364 aclnode->z_allocsize = bytes; 365 aclnode->z_size = bytes; 366 } 367 368 return (aclnode); 369 } 370 371 static void 372 zfs_acl_node_free(zfs_acl_node_t *aclnode) 373 { 374 if (aclnode->z_allocsize) 375 kmem_free(aclnode->z_allocdata, aclnode->z_allocsize); 376 kmem_free(aclnode, sizeof (zfs_acl_node_t)); 377 } 378 379 static void 380 zfs_acl_release_nodes(zfs_acl_t *aclp) 381 { 382 zfs_acl_node_t *aclnode; 383 384 while (aclnode = list_head(&aclp->z_acl)) { 385 list_remove(&aclp->z_acl, aclnode); 386 zfs_acl_node_free(aclnode); 387 } 388 aclp->z_acl_count = 0; 389 aclp->z_acl_bytes = 0; 390 } 391 392 void 393 zfs_acl_free(zfs_acl_t *aclp) 394 { 395 zfs_acl_release_nodes(aclp); 396 list_destroy(&aclp->z_acl); 397 kmem_free(aclp, sizeof (zfs_acl_t)); 398 } 399 400 static boolean_t 401 zfs_acl_valid_ace_type(uint_t type, uint_t flags) 402 { 403 uint16_t entry_type; 404 405 switch (type) { 406 case ALLOW: 407 case DENY: 408 case ACE_SYSTEM_AUDIT_ACE_TYPE: 409 case ACE_SYSTEM_ALARM_ACE_TYPE: 410 entry_type = flags & ACE_TYPE_FLAGS; 411 return (entry_type == ACE_OWNER || 412 entry_type == OWNING_GROUP || 413 entry_type == ACE_EVERYONE || entry_type == 0 || 414 entry_type == ACE_IDENTIFIER_GROUP); 415 default: 416 if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE) 417 return (B_TRUE); 418 } 419 return (B_FALSE); 420 } 421 422 static boolean_t 423 zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags) 424 { 425 /* 426 * first check type of entry 427 */ 428 429 if (!zfs_acl_valid_ace_type(type, iflags)) 430 return (B_FALSE); 431 432 switch (type) { 433 case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: 434 case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: 435 case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: 436 case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: 437 if (aclp->z_version < ZFS_ACL_VERSION_FUID) 438 return (B_FALSE); 439 aclp->z_hints |= ZFS_ACL_OBJ_ACE; 440 } 441 442 /* 443 * next check inheritance level flags 444 */ 445 446 if (obj_type == VDIR && 447 (iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) 448 aclp->z_hints |= ZFS_INHERIT_ACE; 449 450 if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) { 451 if ((iflags & (ACE_FILE_INHERIT_ACE| 452 ACE_DIRECTORY_INHERIT_ACE)) == 0) { 453 return (B_FALSE); 454 } 455 } 456 457 return (B_TRUE); 458 } 459 460 static void * 461 zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who, 462 uint32_t *access_mask, uint16_t *iflags, uint16_t *type) 463 { 464 zfs_acl_node_t *aclnode; 465 466 if (start == NULL) { 467 aclnode = list_head(&aclp->z_acl); 468 if (aclnode == NULL) 469 return (NULL); 470 471 aclp->z_next_ace = aclnode->z_acldata; 472 aclp->z_curr_node = aclnode; 473 aclnode->z_ace_idx = 0; 474 } 475 476 aclnode = aclp->z_curr_node; 477 478 if (aclnode == NULL) 479 return (NULL); 480 481 if (aclnode->z_ace_idx >= aclnode->z_ace_count) { 482 aclnode = list_next(&aclp->z_acl, aclnode); 483 if (aclnode == NULL) 484 return (NULL); 485 else { 486 aclp->z_curr_node = aclnode; 487 aclnode->z_ace_idx = 0; 488 aclp->z_next_ace = aclnode->z_acldata; 489 } 490 } 491 492 if (aclnode->z_ace_idx < aclnode->z_ace_count) { 493 void *acep = aclp->z_next_ace; 494 size_t ace_size; 495 496 /* 497 * Make sure we don't overstep our bounds 498 */ 499 ace_size = aclp->z_ops.ace_size(acep); 500 501 if (((caddr_t)acep + ace_size) > 502 ((caddr_t)aclnode->z_acldata + aclnode->z_size)) { 503 return (NULL); 504 } 505 506 *iflags = aclp->z_ops.ace_flags_get(acep); 507 *type = aclp->z_ops.ace_type_get(acep); 508 *access_mask = aclp->z_ops.ace_mask_get(acep); 509 *who = aclp->z_ops.ace_who_get(acep); 510 aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size; 511 aclnode->z_ace_idx++; 512 return ((void *)acep); 513 } 514 return (NULL); 515 } 516 517 /*ARGSUSED*/ 518 static uint64_t 519 zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt, 520 uint16_t *flags, uint16_t *type, uint32_t *mask) 521 { 522 zfs_acl_t *aclp = datap; 523 zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie; 524 uint64_t who; 525 526 acep = zfs_acl_next_ace(aclp, acep, &who, mask, 527 flags, type); 528 return ((uint64_t)(uintptr_t)acep); 529 } 530 531 static zfs_acl_node_t * 532 zfs_acl_curr_node(zfs_acl_t *aclp) 533 { 534 ASSERT(aclp->z_curr_node); 535 return (aclp->z_curr_node); 536 } 537 538 /* 539 * Copy ACE to internal ZFS format. 540 * While processing the ACL each ACE will be validated for correctness. 541 * ACE FUIDs will be created later. 542 */ 543 int 544 zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp, 545 void *datap, zfs_ace_t *z_acl, int aclcnt, size_t *size, 546 zfs_fuid_info_t **fuidp, cred_t *cr) 547 { 548 int i; 549 uint16_t entry_type; 550 zfs_ace_t *aceptr = z_acl; 551 ace_t *acep = datap; 552 zfs_object_ace_t *zobjacep; 553 ace_object_t *aceobjp; 554 555 for (i = 0; i != aclcnt; i++) { 556 aceptr->z_hdr.z_access_mask = acep->a_access_mask; 557 aceptr->z_hdr.z_flags = acep->a_flags; 558 aceptr->z_hdr.z_type = acep->a_type; 559 entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS; 560 if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP && 561 entry_type != ACE_EVERYONE) { 562 aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who, 563 cr, (entry_type == 0) ? 564 ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp); 565 } 566 567 /* 568 * Make sure ACE is valid 569 */ 570 if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type, 571 aceptr->z_hdr.z_flags) != B_TRUE) 572 return (EINVAL); 573 574 switch (acep->a_type) { 575 case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: 576 case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: 577 case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: 578 case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: 579 zobjacep = (zfs_object_ace_t *)aceptr; 580 aceobjp = (ace_object_t *)acep; 581 582 bcopy(aceobjp->a_obj_type, zobjacep->z_object_type, 583 sizeof (aceobjp->a_obj_type)); 584 bcopy(aceobjp->a_inherit_obj_type, 585 zobjacep->z_inherit_type, 586 sizeof (aceobjp->a_inherit_obj_type)); 587 acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t)); 588 break; 589 default: 590 acep = (ace_t *)((caddr_t)acep + sizeof (ace_t)); 591 } 592 593 aceptr = (zfs_ace_t *)((caddr_t)aceptr + 594 aclp->z_ops.ace_size(aceptr)); 595 } 596 597 *size = (caddr_t)aceptr - (caddr_t)z_acl; 598 599 return (0); 600 } 601 602 /* 603 * Copy ZFS ACEs to fixed size ace_t layout 604 */ 605 static void 606 zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr, 607 void *datap, int filter) 608 { 609 uint64_t who; 610 uint32_t access_mask; 611 uint16_t iflags, type; 612 zfs_ace_hdr_t *zacep = NULL; 613 ace_t *acep = datap; 614 ace_object_t *objacep; 615 zfs_object_ace_t *zobjacep; 616 size_t ace_size; 617 uint16_t entry_type; 618 619 while (zacep = zfs_acl_next_ace(aclp, zacep, 620 &who, &access_mask, &iflags, &type)) { 621 622 switch (type) { 623 case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: 624 case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: 625 case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: 626 case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: 627 if (filter) { 628 continue; 629 } 630 zobjacep = (zfs_object_ace_t *)zacep; 631 objacep = (ace_object_t *)acep; 632 bcopy(zobjacep->z_object_type, 633 objacep->a_obj_type, 634 sizeof (zobjacep->z_object_type)); 635 bcopy(zobjacep->z_inherit_type, 636 objacep->a_inherit_obj_type, 637 sizeof (zobjacep->z_inherit_type)); 638 ace_size = sizeof (ace_object_t); 639 break; 640 default: 641 ace_size = sizeof (ace_t); 642 break; 643 } 644 645 entry_type = (iflags & ACE_TYPE_FLAGS); 646 if ((entry_type != ACE_OWNER && 647 entry_type != OWNING_GROUP && 648 entry_type != ACE_EVERYONE)) { 649 acep->a_who = zfs_fuid_map_id(zfsvfs, who, 650 cr, (entry_type & ACE_IDENTIFIER_GROUP) ? 651 ZFS_ACE_GROUP : ZFS_ACE_USER); 652 } else { 653 acep->a_who = (uid_t)(int64_t)who; 654 } 655 acep->a_access_mask = access_mask; 656 acep->a_flags = iflags; 657 acep->a_type = type; 658 acep = (ace_t *)((caddr_t)acep + ace_size); 659 } 660 } 661 662 static int 663 zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep, 664 zfs_oldace_t *z_acl, int aclcnt, size_t *size) 665 { 666 int i; 667 zfs_oldace_t *aceptr = z_acl; 668 669 for (i = 0; i != aclcnt; i++, aceptr++) { 670 aceptr->z_access_mask = acep[i].a_access_mask; 671 aceptr->z_type = acep[i].a_type; 672 aceptr->z_flags = acep[i].a_flags; 673 aceptr->z_fuid = acep[i].a_who; 674 /* 675 * Make sure ACE is valid 676 */ 677 if (zfs_ace_valid(obj_type, aclp, aceptr->z_type, 678 aceptr->z_flags) != B_TRUE) 679 return (EINVAL); 680 } 681 *size = (caddr_t)aceptr - (caddr_t)z_acl; 682 return (0); 683 } 684 685 /* 686 * convert old ACL format to new 687 */ 688 void 689 zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr) 690 { 691 zfs_oldace_t *oldaclp; 692 int i; 693 uint16_t type, iflags; 694 uint32_t access_mask; 695 uint64_t who; 696 void *cookie = NULL; 697 zfs_acl_node_t *newaclnode; 698 699 ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL); 700 /* 701 * First create the ACE in a contiguous piece of memory 702 * for zfs_copy_ace_2_fuid(). 703 * 704 * We only convert an ACL once, so this won't happen 705 * everytime. 706 */ 707 oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count, 708 KM_SLEEP); 709 i = 0; 710 while (cookie = zfs_acl_next_ace(aclp, cookie, &who, 711 &access_mask, &iflags, &type)) { 712 oldaclp[i].z_flags = iflags; 713 oldaclp[i].z_type = type; 714 oldaclp[i].z_fuid = who; 715 oldaclp[i++].z_access_mask = access_mask; 716 } 717 718 newaclnode = zfs_acl_node_alloc(aclp->z_acl_count * 719 sizeof (zfs_object_ace_t)); 720 aclp->z_ops = zfs_acl_fuid_ops; 721 VERIFY(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp, 722 oldaclp, newaclnode->z_acldata, aclp->z_acl_count, 723 &newaclnode->z_size, NULL, cr) == 0); 724 newaclnode->z_ace_count = aclp->z_acl_count; 725 aclp->z_version = ZFS_ACL_VERSION; 726 kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t)); 727 728 /* 729 * Release all previous ACL nodes 730 */ 731 732 zfs_acl_release_nodes(aclp); 733 734 list_insert_head(&aclp->z_acl, newaclnode); 735 736 aclp->z_acl_bytes = newaclnode->z_size; 737 aclp->z_acl_count = newaclnode->z_ace_count; 738 739 } 740 741 /* 742 * Convert unix access mask to v4 access mask 743 */ 744 static uint32_t 745 zfs_unix_to_v4(uint32_t access_mask) 746 { 747 uint32_t new_mask = 0; 748 749 if (access_mask & S_IXOTH) 750 new_mask |= ACE_EXECUTE; 751 if (access_mask & S_IWOTH) 752 new_mask |= ACE_WRITE_DATA; 753 if (access_mask & S_IROTH) 754 new_mask |= ACE_READ_DATA; 755 return (new_mask); 756 } 757 758 static void 759 zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask, 760 uint16_t access_type, uint64_t fuid, uint16_t entry_type) 761 { 762 uint16_t type = entry_type & ACE_TYPE_FLAGS; 763 764 aclp->z_ops.ace_mask_set(acep, access_mask); 765 aclp->z_ops.ace_type_set(acep, access_type); 766 aclp->z_ops.ace_flags_set(acep, entry_type); 767 if ((type != ACE_OWNER && type != OWNING_GROUP && 768 type != ACE_EVERYONE)) 769 aclp->z_ops.ace_who_set(acep, fuid); 770 } 771 772 /* 773 * Determine mode of file based on ACL. 774 * Also, create FUIDs for any User/Group ACEs 775 */ 776 static uint64_t 777 zfs_mode_compute(znode_t *zp, zfs_acl_t *aclp) 778 { 779 int entry_type; 780 mode_t mode; 781 mode_t seen = 0; 782 zfs_ace_hdr_t *acep = NULL; 783 uint64_t who; 784 uint16_t iflags, type; 785 uint32_t access_mask; 786 boolean_t an_exec_denied = B_FALSE; 787 788 mode = (zp->z_phys->zp_mode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX)); 789 790 while (acep = zfs_acl_next_ace(aclp, acep, &who, 791 &access_mask, &iflags, &type)) { 792 793 if (!zfs_acl_valid_ace_type(type, iflags)) 794 continue; 795 796 entry_type = (iflags & ACE_TYPE_FLAGS); 797 798 /* 799 * Skip over owner@, group@ or everyone@ inherit only ACEs 800 */ 801 if ((iflags & ACE_INHERIT_ONLY_ACE) && 802 (entry_type == ACE_OWNER || entry_type == ACE_EVERYONE || 803 entry_type == OWNING_GROUP)) 804 continue; 805 806 if (entry_type == ACE_OWNER) { 807 if ((access_mask & ACE_READ_DATA) && 808 (!(seen & S_IRUSR))) { 809 seen |= S_IRUSR; 810 if (type == ALLOW) { 811 mode |= S_IRUSR; 812 } 813 } 814 if ((access_mask & ACE_WRITE_DATA) && 815 (!(seen & S_IWUSR))) { 816 seen |= S_IWUSR; 817 if (type == ALLOW) { 818 mode |= S_IWUSR; 819 } 820 } 821 if ((access_mask & ACE_EXECUTE) && 822 (!(seen & S_IXUSR))) { 823 seen |= S_IXUSR; 824 if (type == ALLOW) { 825 mode |= S_IXUSR; 826 } 827 } 828 } else if (entry_type == OWNING_GROUP) { 829 if ((access_mask & ACE_READ_DATA) && 830 (!(seen & S_IRGRP))) { 831 seen |= S_IRGRP; 832 if (type == ALLOW) { 833 mode |= S_IRGRP; 834 } 835 } 836 if ((access_mask & ACE_WRITE_DATA) && 837 (!(seen & S_IWGRP))) { 838 seen |= S_IWGRP; 839 if (type == ALLOW) { 840 mode |= S_IWGRP; 841 } 842 } 843 if ((access_mask & ACE_EXECUTE) && 844 (!(seen & S_IXGRP))) { 845 seen |= S_IXGRP; 846 if (type == ALLOW) { 847 mode |= S_IXGRP; 848 } 849 } 850 } else if (entry_type == ACE_EVERYONE) { 851 if ((access_mask & ACE_READ_DATA)) { 852 if (!(seen & S_IRUSR)) { 853 seen |= S_IRUSR; 854 if (type == ALLOW) { 855 mode |= S_IRUSR; 856 } 857 } 858 if (!(seen & S_IRGRP)) { 859 seen |= S_IRGRP; 860 if (type == ALLOW) { 861 mode |= S_IRGRP; 862 } 863 } 864 if (!(seen & S_IROTH)) { 865 seen |= S_IROTH; 866 if (type == ALLOW) { 867 mode |= S_IROTH; 868 } 869 } 870 } 871 if ((access_mask & ACE_WRITE_DATA)) { 872 if (!(seen & S_IWUSR)) { 873 seen |= S_IWUSR; 874 if (type == ALLOW) { 875 mode |= S_IWUSR; 876 } 877 } 878 if (!(seen & S_IWGRP)) { 879 seen |= S_IWGRP; 880 if (type == ALLOW) { 881 mode |= S_IWGRP; 882 } 883 } 884 if (!(seen & S_IWOTH)) { 885 seen |= S_IWOTH; 886 if (type == ALLOW) { 887 mode |= S_IWOTH; 888 } 889 } 890 } 891 if ((access_mask & ACE_EXECUTE)) { 892 if (!(seen & S_IXUSR)) { 893 seen |= S_IXUSR; 894 if (type == ALLOW) { 895 mode |= S_IXUSR; 896 } 897 } 898 if (!(seen & S_IXGRP)) { 899 seen |= S_IXGRP; 900 if (type == ALLOW) { 901 mode |= S_IXGRP; 902 } 903 } 904 if (!(seen & S_IXOTH)) { 905 seen |= S_IXOTH; 906 if (type == ALLOW) { 907 mode |= S_IXOTH; 908 } 909 } 910 } 911 } else { 912 /* 913 * Only care if this IDENTIFIER_GROUP or 914 * USER ACE denies execute access to someone, 915 * mode is not affected 916 */ 917 if ((access_mask & ACE_EXECUTE) && type == DENY) 918 an_exec_denied = B_TRUE; 919 } 920 } 921 922 /* 923 * Failure to allow is effectively a deny, so execute permission 924 * is denied if it was never mentioned or if we explicitly 925 * weren't allowed it. 926 */ 927 if (!an_exec_denied && 928 ((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS || 929 (mode & ALL_MODE_EXECS) != ALL_MODE_EXECS)) 930 an_exec_denied = B_TRUE; 931 932 if (an_exec_denied) 933 zp->z_phys->zp_flags &= ~ZFS_NO_EXECS_DENIED; 934 else 935 zp->z_phys->zp_flags |= ZFS_NO_EXECS_DENIED; 936 937 return (mode); 938 } 939 940 static zfs_acl_t * 941 zfs_acl_node_read_internal(znode_t *zp, boolean_t will_modify) 942 { 943 zfs_acl_t *aclp; 944 zfs_acl_node_t *aclnode; 945 946 aclp = zfs_acl_alloc(zp->z_phys->zp_acl.z_acl_version); 947 948 /* 949 * Version 0 to 1 znode_acl_phys has the size/count fields swapped. 950 * Version 0 didn't have a size field, only a count. 951 */ 952 if (zp->z_phys->zp_acl.z_acl_version == ZFS_ACL_VERSION_INITIAL) { 953 aclp->z_acl_count = zp->z_phys->zp_acl.z_acl_size; 954 aclp->z_acl_bytes = ZFS_ACL_SIZE(aclp->z_acl_count); 955 } else { 956 aclp->z_acl_count = zp->z_phys->zp_acl.z_acl_count; 957 aclp->z_acl_bytes = zp->z_phys->zp_acl.z_acl_size; 958 } 959 960 aclnode = zfs_acl_node_alloc(will_modify ? aclp->z_acl_bytes : 0); 961 aclnode->z_ace_count = aclp->z_acl_count; 962 if (will_modify) { 963 bcopy(zp->z_phys->zp_acl.z_ace_data, aclnode->z_acldata, 964 aclp->z_acl_bytes); 965 } else { 966 aclnode->z_size = aclp->z_acl_bytes; 967 aclnode->z_acldata = &zp->z_phys->zp_acl.z_ace_data[0]; 968 } 969 970 list_insert_head(&aclp->z_acl, aclnode); 971 972 return (aclp); 973 } 974 975 /* 976 * Read an external acl object. If the intent is to modify, always 977 * create a new acl and leave any cached acl in place. 978 */ 979 static int 980 zfs_acl_node_read(znode_t *zp, zfs_acl_t **aclpp, boolean_t will_modify) 981 { 982 uint64_t extacl = zp->z_phys->zp_acl.z_acl_extern_obj; 983 zfs_acl_t *aclp; 984 size_t aclsize; 985 size_t acl_count; 986 zfs_acl_node_t *aclnode; 987 int error; 988 989 ASSERT(MUTEX_HELD(&zp->z_acl_lock)); 990 991 if (zp->z_acl_cached && !will_modify) { 992 *aclpp = zp->z_acl_cached; 993 return (0); 994 } 995 996 if (zp->z_phys->zp_acl.z_acl_extern_obj == 0) { 997 *aclpp = zfs_acl_node_read_internal(zp, will_modify); 998 if (!will_modify) 999 zp->z_acl_cached = *aclpp; 1000 return (0); 1001 } 1002 1003 aclp = zfs_acl_alloc(zp->z_phys->zp_acl.z_acl_version); 1004 if (zp->z_phys->zp_acl.z_acl_version == ZFS_ACL_VERSION_INITIAL) { 1005 zfs_acl_phys_v0_t *zacl0 = 1006 (zfs_acl_phys_v0_t *)&zp->z_phys->zp_acl; 1007 1008 aclsize = ZFS_ACL_SIZE(zacl0->z_acl_count); 1009 acl_count = zacl0->z_acl_count; 1010 } else { 1011 aclsize = zp->z_phys->zp_acl.z_acl_size; 1012 acl_count = zp->z_phys->zp_acl.z_acl_count; 1013 if (aclsize == 0) 1014 aclsize = acl_count * sizeof (zfs_ace_t); 1015 } 1016 aclnode = zfs_acl_node_alloc(aclsize); 1017 list_insert_head(&aclp->z_acl, aclnode); 1018 error = dmu_read(zp->z_zfsvfs->z_os, extacl, 0, 1019 aclsize, aclnode->z_acldata, DMU_READ_PREFETCH); 1020 aclnode->z_ace_count = acl_count; 1021 aclp->z_acl_count = acl_count; 1022 aclp->z_acl_bytes = aclsize; 1023 1024 if (error != 0) { 1025 zfs_acl_free(aclp); 1026 /* convert checksum errors into IO errors */ 1027 if (error == ECKSUM) 1028 error = EIO; 1029 return (error); 1030 } 1031 1032 *aclpp = aclp; 1033 if (!will_modify) 1034 zp->z_acl_cached = aclp; 1035 return (0); 1036 } 1037 1038 /* 1039 * common code for setting ACLs. 1040 * 1041 * This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl. 1042 * zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's 1043 * already checked the acl and knows whether to inherit. 1044 */ 1045 int 1046 zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx) 1047 { 1048 int error; 1049 znode_phys_t *zphys = zp->z_phys; 1050 zfs_acl_phys_t *zacl = &zphys->zp_acl; 1051 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1052 uint64_t aoid = zphys->zp_acl.z_acl_extern_obj; 1053 uint64_t off = 0; 1054 dmu_object_type_t otype; 1055 zfs_acl_node_t *aclnode; 1056 1057 dmu_buf_will_dirty(zp->z_dbuf, tx); 1058 1059 if (zp->z_acl_cached) { 1060 zfs_acl_free(zp->z_acl_cached); 1061 zp->z_acl_cached = NULL; 1062 } 1063 1064 zphys->zp_mode = zfs_mode_compute(zp, aclp); 1065 1066 /* 1067 * Decide which object type to use. If we are forced to 1068 * use old ACL format then transform ACL into zfs_oldace_t 1069 * layout. 1070 */ 1071 if (!zfsvfs->z_use_fuids) { 1072 otype = DMU_OT_OLDACL; 1073 } else { 1074 if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) && 1075 (zfsvfs->z_version >= ZPL_VERSION_FUID)) 1076 zfs_acl_xform(zp, aclp, cr); 1077 ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID); 1078 otype = DMU_OT_ACL; 1079 } 1080 1081 if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { 1082 /* 1083 * If ACL was previously external and we are now 1084 * converting to new ACL format then release old 1085 * ACL object and create a new one. 1086 */ 1087 if (aoid && aclp->z_version != zacl->z_acl_version) { 1088 error = dmu_object_free(zfsvfs->z_os, 1089 zp->z_phys->zp_acl.z_acl_extern_obj, tx); 1090 if (error) 1091 return (error); 1092 aoid = 0; 1093 } 1094 if (aoid == 0) { 1095 aoid = dmu_object_alloc(zfsvfs->z_os, 1096 otype, aclp->z_acl_bytes, 1097 otype == DMU_OT_ACL ? DMU_OT_SYSACL : DMU_OT_NONE, 1098 otype == DMU_OT_ACL ? DN_MAX_BONUSLEN : 0, tx); 1099 } else { 1100 (void) dmu_object_set_blocksize(zfsvfs->z_os, aoid, 1101 aclp->z_acl_bytes, 0, tx); 1102 } 1103 zphys->zp_acl.z_acl_extern_obj = aoid; 1104 for (aclnode = list_head(&aclp->z_acl); aclnode; 1105 aclnode = list_next(&aclp->z_acl, aclnode)) { 1106 if (aclnode->z_ace_count == 0) 1107 continue; 1108 dmu_write(zfsvfs->z_os, aoid, off, 1109 aclnode->z_size, aclnode->z_acldata, tx); 1110 off += aclnode->z_size; 1111 } 1112 } else { 1113 void *start = zacl->z_ace_data; 1114 /* 1115 * Migrating back embedded? 1116 */ 1117 if (zphys->zp_acl.z_acl_extern_obj) { 1118 error = dmu_object_free(zfsvfs->z_os, 1119 zp->z_phys->zp_acl.z_acl_extern_obj, tx); 1120 if (error) 1121 return (error); 1122 zphys->zp_acl.z_acl_extern_obj = 0; 1123 } 1124 1125 for (aclnode = list_head(&aclp->z_acl); aclnode; 1126 aclnode = list_next(&aclp->z_acl, aclnode)) { 1127 if (aclnode->z_ace_count == 0) 1128 continue; 1129 bcopy(aclnode->z_acldata, start, aclnode->z_size); 1130 start = (caddr_t)start + aclnode->z_size; 1131 } 1132 } 1133 1134 /* 1135 * If Old version then swap count/bytes to match old 1136 * layout of znode_acl_phys_t. 1137 */ 1138 if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { 1139 zphys->zp_acl.z_acl_size = aclp->z_acl_count; 1140 zphys->zp_acl.z_acl_count = aclp->z_acl_bytes; 1141 } else { 1142 zphys->zp_acl.z_acl_size = aclp->z_acl_bytes; 1143 zphys->zp_acl.z_acl_count = aclp->z_acl_count; 1144 } 1145 1146 zphys->zp_acl.z_acl_version = aclp->z_version; 1147 1148 /* 1149 * Replace ACL wide bits, but first clear them. 1150 */ 1151 zp->z_phys->zp_flags &= ~ZFS_ACL_WIDE_FLAGS; 1152 1153 zp->z_phys->zp_flags |= aclp->z_hints; 1154 1155 if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0) 1156 zp->z_phys->zp_flags |= ZFS_ACL_TRIVIAL; 1157 1158 return (0); 1159 } 1160 1161 /* 1162 * Update access mask for prepended ACE 1163 * 1164 * This applies the "groupmask" value for aclmode property. 1165 */ 1166 static void 1167 zfs_acl_prepend_fixup(zfs_acl_t *aclp, void *acep, void *origacep, 1168 mode_t mode, uint64_t owner) 1169 { 1170 int rmask, wmask, xmask; 1171 int user_ace; 1172 uint16_t aceflags; 1173 uint32_t origmask, acepmask; 1174 uint64_t fuid; 1175 1176 aceflags = aclp->z_ops.ace_flags_get(acep); 1177 fuid = aclp->z_ops.ace_who_get(acep); 1178 origmask = aclp->z_ops.ace_mask_get(origacep); 1179 acepmask = aclp->z_ops.ace_mask_get(acep); 1180 1181 user_ace = (!(aceflags & 1182 (ACE_OWNER|ACE_GROUP|ACE_IDENTIFIER_GROUP))); 1183 1184 if (user_ace && (fuid == owner)) { 1185 rmask = S_IRUSR; 1186 wmask = S_IWUSR; 1187 xmask = S_IXUSR; 1188 } else { 1189 rmask = S_IRGRP; 1190 wmask = S_IWGRP; 1191 xmask = S_IXGRP; 1192 } 1193 1194 if (origmask & ACE_READ_DATA) { 1195 if (mode & rmask) { 1196 acepmask &= ~ACE_READ_DATA; 1197 } else { 1198 acepmask |= ACE_READ_DATA; 1199 } 1200 } 1201 1202 if (origmask & ACE_WRITE_DATA) { 1203 if (mode & wmask) { 1204 acepmask &= ~ACE_WRITE_DATA; 1205 } else { 1206 acepmask |= ACE_WRITE_DATA; 1207 } 1208 } 1209 1210 if (origmask & ACE_APPEND_DATA) { 1211 if (mode & wmask) { 1212 acepmask &= ~ACE_APPEND_DATA; 1213 } else { 1214 acepmask |= ACE_APPEND_DATA; 1215 } 1216 } 1217 1218 if (origmask & ACE_EXECUTE) { 1219 if (mode & xmask) { 1220 acepmask &= ~ACE_EXECUTE; 1221 } else { 1222 acepmask |= ACE_EXECUTE; 1223 } 1224 } 1225 aclp->z_ops.ace_mask_set(acep, acepmask); 1226 } 1227 1228 /* 1229 * Apply mode to canonical six ACEs. 1230 */ 1231 static void 1232 zfs_acl_fixup_canonical_six(zfs_acl_t *aclp, mode_t mode) 1233 { 1234 zfs_acl_node_t *aclnode = list_tail(&aclp->z_acl); 1235 void *acep; 1236 int maskoff = aclp->z_ops.ace_mask_off(); 1237 size_t abstract_size = aclp->z_ops.ace_abstract_size(); 1238 1239 ASSERT(aclnode != NULL); 1240 1241 acep = (void *)((caddr_t)aclnode->z_acldata + 1242 aclnode->z_size - (abstract_size * 6)); 1243 1244 /* 1245 * Fixup final ACEs to match the mode 1246 */ 1247 1248 adjust_ace_pair_common(acep, maskoff, abstract_size, 1249 (mode & 0700) >> 6); /* owner@ */ 1250 1251 acep = (caddr_t)acep + (abstract_size * 2); 1252 1253 adjust_ace_pair_common(acep, maskoff, abstract_size, 1254 (mode & 0070) >> 3); /* group@ */ 1255 1256 acep = (caddr_t)acep + (abstract_size * 2); 1257 adjust_ace_pair_common(acep, maskoff, 1258 abstract_size, mode); /* everyone@ */ 1259 } 1260 1261 1262 static int 1263 zfs_acl_ace_match(zfs_acl_t *aclp, void *acep, int allow_deny, 1264 int entry_type, int accessmask) 1265 { 1266 uint32_t mask = aclp->z_ops.ace_mask_get(acep); 1267 uint16_t type = aclp->z_ops.ace_type_get(acep); 1268 uint16_t flags = aclp->z_ops.ace_flags_get(acep); 1269 1270 return (mask == accessmask && type == allow_deny && 1271 ((flags & ACE_TYPE_FLAGS) == entry_type)); 1272 } 1273 1274 /* 1275 * Can prepended ACE be reused? 1276 */ 1277 static int 1278 zfs_reuse_deny(zfs_acl_t *aclp, void *acep, void *prevacep) 1279 { 1280 int okay_masks; 1281 uint16_t prevtype; 1282 uint16_t prevflags; 1283 uint16_t flags; 1284 uint32_t mask, prevmask; 1285 1286 if (prevacep == NULL) 1287 return (B_FALSE); 1288 1289 prevtype = aclp->z_ops.ace_type_get(prevacep); 1290 prevflags = aclp->z_ops.ace_flags_get(prevacep); 1291 flags = aclp->z_ops.ace_flags_get(acep); 1292 mask = aclp->z_ops.ace_mask_get(acep); 1293 prevmask = aclp->z_ops.ace_mask_get(prevacep); 1294 1295 if (prevtype != DENY) 1296 return (B_FALSE); 1297 1298 if (prevflags != (flags & ACE_IDENTIFIER_GROUP)) 1299 return (B_FALSE); 1300 1301 okay_masks = (mask & OKAY_MASK_BITS); 1302 1303 if (prevmask & ~okay_masks) 1304 return (B_FALSE); 1305 1306 return (B_TRUE); 1307 } 1308 1309 1310 /* 1311 * Insert new ACL node into chain of zfs_acl_node_t's 1312 * 1313 * This will result in two possible results. 1314 * 1. If the ACL is currently just a single zfs_acl_node and 1315 * we are prepending the entry then current acl node will have 1316 * a new node inserted above it. 1317 * 1318 * 2. If we are inserting in the middle of current acl node then 1319 * the current node will be split in two and new node will be inserted 1320 * in between the two split nodes. 1321 */ 1322 static zfs_acl_node_t * 1323 zfs_acl_ace_insert(zfs_acl_t *aclp, void *acep) 1324 { 1325 zfs_acl_node_t *newnode; 1326 zfs_acl_node_t *trailernode = NULL; 1327 zfs_acl_node_t *currnode = zfs_acl_curr_node(aclp); 1328 int curr_idx = aclp->z_curr_node->z_ace_idx; 1329 int trailer_count; 1330 size_t oldsize; 1331 1332 newnode = zfs_acl_node_alloc(aclp->z_ops.ace_size(acep)); 1333 newnode->z_ace_count = 1; 1334 1335 oldsize = currnode->z_size; 1336 1337 if (curr_idx != 1) { 1338 trailernode = zfs_acl_node_alloc(0); 1339 trailernode->z_acldata = acep; 1340 1341 trailer_count = currnode->z_ace_count - curr_idx + 1; 1342 currnode->z_ace_count = curr_idx - 1; 1343 currnode->z_size = (caddr_t)acep - (caddr_t)currnode->z_acldata; 1344 trailernode->z_size = oldsize - currnode->z_size; 1345 trailernode->z_ace_count = trailer_count; 1346 } 1347 1348 aclp->z_acl_count += 1; 1349 aclp->z_acl_bytes += aclp->z_ops.ace_size(acep); 1350 1351 if (curr_idx == 1) 1352 list_insert_before(&aclp->z_acl, currnode, newnode); 1353 else 1354 list_insert_after(&aclp->z_acl, currnode, newnode); 1355 if (trailernode) { 1356 list_insert_after(&aclp->z_acl, newnode, trailernode); 1357 aclp->z_curr_node = trailernode; 1358 trailernode->z_ace_idx = 1; 1359 } 1360 1361 return (newnode); 1362 } 1363 1364 /* 1365 * Prepend deny ACE 1366 */ 1367 static void * 1368 zfs_acl_prepend_deny(uint64_t uid, zfs_acl_t *aclp, void *acep, 1369 mode_t mode) 1370 { 1371 zfs_acl_node_t *aclnode; 1372 void *newacep; 1373 uint64_t fuid; 1374 uint16_t flags; 1375 1376 aclnode = zfs_acl_ace_insert(aclp, acep); 1377 newacep = aclnode->z_acldata; 1378 fuid = aclp->z_ops.ace_who_get(acep); 1379 flags = aclp->z_ops.ace_flags_get(acep); 1380 zfs_set_ace(aclp, newacep, 0, DENY, fuid, (flags & ACE_TYPE_FLAGS)); 1381 zfs_acl_prepend_fixup(aclp, newacep, acep, mode, uid); 1382 1383 return (newacep); 1384 } 1385 1386 /* 1387 * Split an inherited ACE into inherit_only ACE 1388 * and original ACE with inheritance flags stripped off. 1389 */ 1390 static void 1391 zfs_acl_split_ace(zfs_acl_t *aclp, zfs_ace_hdr_t *acep) 1392 { 1393 zfs_acl_node_t *aclnode; 1394 zfs_acl_node_t *currnode; 1395 void *newacep; 1396 uint16_t type, flags; 1397 uint32_t mask; 1398 uint64_t fuid; 1399 1400 type = aclp->z_ops.ace_type_get(acep); 1401 flags = aclp->z_ops.ace_flags_get(acep); 1402 mask = aclp->z_ops.ace_mask_get(acep); 1403 fuid = aclp->z_ops.ace_who_get(acep); 1404 1405 aclnode = zfs_acl_ace_insert(aclp, acep); 1406 newacep = aclnode->z_acldata; 1407 1408 aclp->z_ops.ace_type_set(newacep, type); 1409 aclp->z_ops.ace_flags_set(newacep, flags | ACE_INHERIT_ONLY_ACE); 1410 aclp->z_ops.ace_mask_set(newacep, mask); 1411 aclp->z_ops.ace_type_set(newacep, type); 1412 aclp->z_ops.ace_who_set(newacep, fuid); 1413 aclp->z_next_ace = acep; 1414 flags &= ~ALL_INHERIT; 1415 aclp->z_ops.ace_flags_set(acep, flags); 1416 currnode = zfs_acl_curr_node(aclp); 1417 ASSERT(currnode->z_ace_idx >= 1); 1418 currnode->z_ace_idx -= 1; 1419 } 1420 1421 /* 1422 * Are ACES started at index i, the canonical six ACES? 1423 */ 1424 static int 1425 zfs_have_canonical_six(zfs_acl_t *aclp) 1426 { 1427 void *acep; 1428 zfs_acl_node_t *aclnode = list_tail(&aclp->z_acl); 1429 int i = 0; 1430 size_t abstract_size = aclp->z_ops.ace_abstract_size(); 1431 1432 ASSERT(aclnode != NULL); 1433 1434 if (aclnode->z_ace_count < 6) 1435 return (0); 1436 1437 acep = (void *)((caddr_t)aclnode->z_acldata + 1438 aclnode->z_size - (aclp->z_ops.ace_abstract_size() * 6)); 1439 1440 if ((zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), 1441 DENY, ACE_OWNER, 0) && 1442 zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), 1443 ALLOW, ACE_OWNER, OWNER_ALLOW_MASK) && 1444 zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), DENY, 1445 OWNING_GROUP, 0) && zfs_acl_ace_match(aclp, (caddr_t)acep + 1446 (abstract_size * i++), 1447 ALLOW, OWNING_GROUP, 0) && 1448 zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), 1449 DENY, ACE_EVERYONE, EVERYONE_DENY_MASK) && 1450 zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), 1451 ALLOW, ACE_EVERYONE, EVERYONE_ALLOW_MASK))) { 1452 return (1); 1453 } else { 1454 return (0); 1455 } 1456 } 1457 1458 1459 /* 1460 * Apply step 1g, to group entries 1461 * 1462 * Need to deal with corner case where group may have 1463 * greater permissions than owner. If so then limit 1464 * group permissions, based on what extra permissions 1465 * group has. 1466 */ 1467 static void 1468 zfs_fixup_group_entries(zfs_acl_t *aclp, void *acep, void *prevacep, 1469 mode_t mode) 1470 { 1471 uint32_t prevmask = aclp->z_ops.ace_mask_get(prevacep); 1472 uint32_t mask = aclp->z_ops.ace_mask_get(acep); 1473 uint16_t prevflags = aclp->z_ops.ace_flags_get(prevacep); 1474 mode_t extramode = (mode >> 3) & 07; 1475 mode_t ownermode = (mode >> 6); 1476 1477 if (prevflags & ACE_IDENTIFIER_GROUP) { 1478 1479 extramode &= ~ownermode; 1480 1481 if (extramode) { 1482 if (extramode & S_IROTH) { 1483 prevmask &= ~ACE_READ_DATA; 1484 mask &= ~ACE_READ_DATA; 1485 } 1486 if (extramode & S_IWOTH) { 1487 prevmask &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); 1488 mask &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); 1489 } 1490 if (extramode & S_IXOTH) { 1491 prevmask &= ~ACE_EXECUTE; 1492 mask &= ~ACE_EXECUTE; 1493 } 1494 } 1495 } 1496 aclp->z_ops.ace_mask_set(acep, mask); 1497 aclp->z_ops.ace_mask_set(prevacep, prevmask); 1498 } 1499 1500 /* 1501 * Apply the chmod algorithm as described 1502 * in PSARC/2002/240 1503 */ 1504 static void 1505 zfs_acl_chmod(zfsvfs_t *zfsvfs, uint64_t uid, 1506 uint64_t mode, zfs_acl_t *aclp) 1507 { 1508 void *acep = NULL, *prevacep = NULL; 1509 uint64_t who; 1510 int i; 1511 int entry_type; 1512 int reuse_deny; 1513 int need_canonical_six = 1; 1514 uint16_t iflags, type; 1515 uint32_t access_mask; 1516 1517 /* 1518 * If discard then just discard all ACL nodes which 1519 * represent the ACEs. 1520 * 1521 * New owner@/group@/everone@ ACEs will be added 1522 * later. 1523 */ 1524 if (zfsvfs->z_acl_mode == ZFS_ACL_DISCARD) 1525 zfs_acl_release_nodes(aclp); 1526 1527 while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, 1528 &iflags, &type)) { 1529 1530 entry_type = (iflags & ACE_TYPE_FLAGS); 1531 iflags = (iflags & ALL_INHERIT); 1532 1533 if ((type != ALLOW && type != DENY) || 1534 (iflags & ACE_INHERIT_ONLY_ACE)) { 1535 if (iflags) 1536 aclp->z_hints |= ZFS_INHERIT_ACE; 1537 switch (type) { 1538 case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: 1539 case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: 1540 case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: 1541 case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: 1542 aclp->z_hints |= ZFS_ACL_OBJ_ACE; 1543 break; 1544 } 1545 goto nextace; 1546 } 1547 1548 /* 1549 * Need to split ace into two? 1550 */ 1551 if ((iflags & (ACE_FILE_INHERIT_ACE| 1552 ACE_DIRECTORY_INHERIT_ACE)) && 1553 (!(iflags & ACE_INHERIT_ONLY_ACE))) { 1554 zfs_acl_split_ace(aclp, acep); 1555 aclp->z_hints |= ZFS_INHERIT_ACE; 1556 goto nextace; 1557 } 1558 1559 if (entry_type == ACE_OWNER || entry_type == ACE_EVERYONE || 1560 (entry_type == OWNING_GROUP)) { 1561 access_mask &= ~OGE_CLEAR; 1562 aclp->z_ops.ace_mask_set(acep, access_mask); 1563 goto nextace; 1564 } else { 1565 reuse_deny = B_TRUE; 1566 if (type == ALLOW) { 1567 1568 /* 1569 * Check preceding ACE if any, to see 1570 * if we need to prepend a DENY ACE. 1571 * This is only applicable when the acl_mode 1572 * property == groupmask. 1573 */ 1574 if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK) { 1575 1576 reuse_deny = zfs_reuse_deny(aclp, acep, 1577 prevacep); 1578 1579 if (!reuse_deny) { 1580 prevacep = 1581 zfs_acl_prepend_deny(uid, 1582 aclp, acep, mode); 1583 } else { 1584 zfs_acl_prepend_fixup( 1585 aclp, prevacep, 1586 acep, mode, uid); 1587 } 1588 zfs_fixup_group_entries(aclp, acep, 1589 prevacep, mode); 1590 } 1591 } 1592 } 1593 nextace: 1594 prevacep = acep; 1595 } 1596 1597 /* 1598 * Check out last six aces, if we have six. 1599 */ 1600 1601 if (aclp->z_acl_count >= 6) { 1602 if (zfs_have_canonical_six(aclp)) { 1603 need_canonical_six = 0; 1604 } 1605 } 1606 1607 if (need_canonical_six) { 1608 size_t abstract_size = aclp->z_ops.ace_abstract_size(); 1609 void *zacep; 1610 zfs_acl_node_t *aclnode = 1611 zfs_acl_node_alloc(abstract_size * 6); 1612 1613 aclnode->z_size = abstract_size * 6; 1614 aclnode->z_ace_count = 6; 1615 aclp->z_acl_bytes += aclnode->z_size; 1616 list_insert_tail(&aclp->z_acl, aclnode); 1617 1618 zacep = aclnode->z_acldata; 1619 1620 i = 0; 1621 zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 1622 0, DENY, -1, ACE_OWNER); 1623 zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 1624 OWNER_ALLOW_MASK, ALLOW, -1, ACE_OWNER); 1625 zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 0, 1626 DENY, -1, OWNING_GROUP); 1627 zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 0, 1628 ALLOW, -1, OWNING_GROUP); 1629 zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 1630 EVERYONE_DENY_MASK, DENY, -1, ACE_EVERYONE); 1631 zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 1632 EVERYONE_ALLOW_MASK, ALLOW, -1, ACE_EVERYONE); 1633 aclp->z_acl_count += 6; 1634 } 1635 1636 zfs_acl_fixup_canonical_six(aclp, mode); 1637 } 1638 1639 int 1640 zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode) 1641 { 1642 int error; 1643 1644 mutex_enter(&zp->z_lock); 1645 mutex_enter(&zp->z_acl_lock); 1646 *aclp = NULL; 1647 error = zfs_acl_node_read(zp, aclp, B_TRUE); 1648 if (error == 0) { 1649 (*aclp)->z_hints = zp->z_phys->zp_flags & V4_ACL_WIDE_FLAGS; 1650 zfs_acl_chmod(zp->z_zfsvfs, zp->z_phys->zp_uid, mode, *aclp); 1651 } 1652 mutex_exit(&zp->z_acl_lock); 1653 mutex_exit(&zp->z_lock); 1654 return (error); 1655 } 1656 1657 /* 1658 * strip off write_owner and write_acl 1659 */ 1660 static void 1661 zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep) 1662 { 1663 uint32_t mask = aclp->z_ops.ace_mask_get(acep); 1664 1665 if ((zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) && 1666 (aclp->z_ops.ace_type_get(acep) == ALLOW)) { 1667 mask &= ~RESTRICTED_CLEAR; 1668 aclp->z_ops.ace_mask_set(acep, mask); 1669 } 1670 } 1671 1672 /* 1673 * Should ACE be inherited? 1674 */ 1675 static int 1676 zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags) 1677 { 1678 int iflags = (acep_flags & 0xf); 1679 1680 if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE)) 1681 return (1); 1682 else if (iflags & ACE_FILE_INHERIT_ACE) 1683 return (!((vtype == VDIR) && 1684 (iflags & ACE_NO_PROPAGATE_INHERIT_ACE))); 1685 return (0); 1686 } 1687 1688 /* 1689 * inherit inheritable ACEs from parent 1690 */ 1691 static zfs_acl_t * 1692 zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp, 1693 uint64_t mode, boolean_t *need_chmod) 1694 { 1695 void *pacep; 1696 void *acep, *acep2; 1697 zfs_acl_node_t *aclnode, *aclnode2; 1698 zfs_acl_t *aclp = NULL; 1699 uint64_t who; 1700 uint32_t access_mask; 1701 uint16_t iflags, newflags, type; 1702 size_t ace_size; 1703 void *data1, *data2; 1704 size_t data1sz, data2sz; 1705 boolean_t vdir = vtype == VDIR; 1706 boolean_t vreg = vtype == VREG; 1707 boolean_t passthrough, passthrough_x, noallow; 1708 1709 passthrough_x = 1710 zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X; 1711 passthrough = passthrough_x || 1712 zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH; 1713 noallow = 1714 zfsvfs->z_acl_inherit == ZFS_ACL_NOALLOW; 1715 1716 *need_chmod = B_TRUE; 1717 pacep = NULL; 1718 aclp = zfs_acl_alloc(paclp->z_version); 1719 if (zfsvfs->z_acl_inherit == ZFS_ACL_DISCARD) 1720 return (aclp); 1721 while (pacep = zfs_acl_next_ace(paclp, pacep, &who, 1722 &access_mask, &iflags, &type)) { 1723 1724 /* 1725 * don't inherit bogus ACEs 1726 */ 1727 if (!zfs_acl_valid_ace_type(type, iflags)) 1728 continue; 1729 1730 if (noallow && type == ALLOW) 1731 continue; 1732 1733 ace_size = aclp->z_ops.ace_size(pacep); 1734 1735 if (!zfs_ace_can_use(vtype, iflags)) 1736 continue; 1737 1738 /* 1739 * If owner@, group@, or everyone@ inheritable 1740 * then zfs_acl_chmod() isn't needed. 1741 */ 1742 if (passthrough && 1743 ((iflags & (ACE_OWNER|ACE_EVERYONE)) || 1744 ((iflags & OWNING_GROUP) == 1745 OWNING_GROUP)) && (vreg || (vdir && (iflags & 1746 ACE_DIRECTORY_INHERIT_ACE)))) { 1747 *need_chmod = B_FALSE; 1748 1749 if (!vdir && passthrough_x && 1750 ((mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)) { 1751 access_mask &= ~ACE_EXECUTE; 1752 } 1753 } 1754 1755 aclnode = zfs_acl_node_alloc(ace_size); 1756 list_insert_tail(&aclp->z_acl, aclnode); 1757 acep = aclnode->z_acldata; 1758 1759 zfs_set_ace(aclp, acep, access_mask, type, 1760 who, iflags|ACE_INHERITED_ACE); 1761 1762 /* 1763 * Copy special opaque data if any 1764 */ 1765 if ((data1sz = paclp->z_ops.ace_data(pacep, &data1)) != 0) { 1766 VERIFY((data2sz = aclp->z_ops.ace_data(acep, 1767 &data2)) == data1sz); 1768 bcopy(data1, data2, data2sz); 1769 } 1770 aclp->z_acl_count++; 1771 aclnode->z_ace_count++; 1772 aclp->z_acl_bytes += aclnode->z_size; 1773 newflags = aclp->z_ops.ace_flags_get(acep); 1774 1775 if (vdir) 1776 aclp->z_hints |= ZFS_INHERIT_ACE; 1777 1778 if ((iflags & ACE_NO_PROPAGATE_INHERIT_ACE) || !vdir) { 1779 newflags &= ~ALL_INHERIT; 1780 aclp->z_ops.ace_flags_set(acep, 1781 newflags|ACE_INHERITED_ACE); 1782 zfs_restricted_update(zfsvfs, aclp, acep); 1783 continue; 1784 } 1785 1786 ASSERT(vdir); 1787 1788 newflags = aclp->z_ops.ace_flags_get(acep); 1789 if ((iflags & (ACE_FILE_INHERIT_ACE | 1790 ACE_DIRECTORY_INHERIT_ACE)) != 1791 ACE_FILE_INHERIT_ACE) { 1792 aclnode2 = zfs_acl_node_alloc(ace_size); 1793 list_insert_tail(&aclp->z_acl, aclnode2); 1794 acep2 = aclnode2->z_acldata; 1795 zfs_set_ace(aclp, acep2, 1796 access_mask, type, who, 1797 iflags|ACE_INHERITED_ACE); 1798 newflags |= ACE_INHERIT_ONLY_ACE; 1799 aclp->z_ops.ace_flags_set(acep, newflags); 1800 newflags &= ~ALL_INHERIT; 1801 aclp->z_ops.ace_flags_set(acep2, 1802 newflags|ACE_INHERITED_ACE); 1803 1804 /* 1805 * Copy special opaque data if any 1806 */ 1807 if ((data1sz = aclp->z_ops.ace_data(acep, 1808 &data1)) != 0) { 1809 VERIFY((data2sz = 1810 aclp->z_ops.ace_data(acep2, 1811 &data2)) == data1sz); 1812 bcopy(data1, data2, data1sz); 1813 } 1814 aclp->z_acl_count++; 1815 aclnode2->z_ace_count++; 1816 aclp->z_acl_bytes += aclnode->z_size; 1817 zfs_restricted_update(zfsvfs, aclp, acep2); 1818 } else { 1819 newflags |= ACE_INHERIT_ONLY_ACE; 1820 aclp->z_ops.ace_flags_set(acep, 1821 newflags|ACE_INHERITED_ACE); 1822 } 1823 } 1824 return (aclp); 1825 } 1826 1827 /* 1828 * Create file system object initial permissions 1829 * including inheritable ACEs. 1830 */ 1831 int 1832 zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, 1833 vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids) 1834 { 1835 int error; 1836 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1837 zfs_acl_t *paclp; 1838 gid_t gid; 1839 boolean_t need_chmod = B_TRUE; 1840 1841 bzero(acl_ids, sizeof (zfs_acl_ids_t)); 1842 acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode); 1843 1844 if (vsecp) 1845 if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr, 1846 &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0) 1847 return (error); 1848 1849 /* 1850 * Determine uid and gid. 1851 */ 1852 if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay || 1853 ((flag & IS_XATTR) && (vap->va_type == VDIR))) { 1854 acl_ids->z_fuid = zfs_fuid_create(zfsvfs, 1855 (uint64_t)vap->va_uid, cr, 1856 ZFS_OWNER, &acl_ids->z_fuidp); 1857 acl_ids->z_fgid = zfs_fuid_create(zfsvfs, 1858 (uint64_t)vap->va_gid, cr, 1859 ZFS_GROUP, &acl_ids->z_fuidp); 1860 gid = vap->va_gid; 1861 } else { 1862 acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER, 1863 cr, &acl_ids->z_fuidp); 1864 acl_ids->z_fgid = 0; 1865 if (vap->va_mask & AT_GID) { 1866 acl_ids->z_fgid = zfs_fuid_create(zfsvfs, 1867 (uint64_t)vap->va_gid, 1868 cr, ZFS_GROUP, &acl_ids->z_fuidp); 1869 gid = vap->va_gid; 1870 if (acl_ids->z_fgid != dzp->z_phys->zp_gid && 1871 !groupmember(vap->va_gid, cr) && 1872 secpolicy_vnode_create_gid(cr) != 0) 1873 acl_ids->z_fgid = 0; 1874 } 1875 if (acl_ids->z_fgid == 0) { 1876 if (dzp->z_phys->zp_mode & S_ISGID) { 1877 acl_ids->z_fgid = dzp->z_phys->zp_gid; 1878 gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid, 1879 cr, ZFS_GROUP); 1880 } else { 1881 acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs, 1882 ZFS_GROUP, cr, &acl_ids->z_fuidp); 1883 gid = crgetgid(cr); 1884 } 1885 } 1886 } 1887 1888 /* 1889 * If we're creating a directory, and the parent directory has the 1890 * set-GID bit set, set in on the new directory. 1891 * Otherwise, if the user is neither privileged nor a member of the 1892 * file's new group, clear the file's set-GID bit. 1893 */ 1894 1895 if (!(flag & IS_ROOT_NODE) && (dzp->z_phys->zp_mode & S_ISGID) && 1896 (vap->va_type == VDIR)) { 1897 acl_ids->z_mode |= S_ISGID; 1898 } else { 1899 if ((acl_ids->z_mode & S_ISGID) && 1900 secpolicy_vnode_setids_setgids(cr, gid) != 0) 1901 acl_ids->z_mode &= ~S_ISGID; 1902 } 1903 1904 if (acl_ids->z_aclp == NULL) { 1905 mutex_enter(&dzp->z_lock); 1906 if (!(flag & IS_ROOT_NODE) && (ZTOV(dzp)->v_type == VDIR && 1907 (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)) && 1908 !(dzp->z_phys->zp_flags & ZFS_XATTR)) { 1909 mutex_enter(&dzp->z_acl_lock); 1910 VERIFY(0 == zfs_acl_node_read(dzp, &paclp, B_FALSE)); 1911 mutex_exit(&dzp->z_acl_lock); 1912 acl_ids->z_aclp = zfs_acl_inherit(zfsvfs, 1913 vap->va_type, paclp, acl_ids->z_mode, &need_chmod); 1914 } else { 1915 acl_ids->z_aclp = 1916 zfs_acl_alloc(zfs_acl_version_zp(dzp)); 1917 } 1918 mutex_exit(&dzp->z_lock); 1919 if (need_chmod) { 1920 acl_ids->z_aclp->z_hints = (vap->va_type == VDIR) ? 1921 ZFS_ACL_AUTO_INHERIT : 0; 1922 zfs_acl_chmod(zfsvfs, acl_ids->z_fuid, 1923 acl_ids->z_mode, acl_ids->z_aclp); 1924 } 1925 } 1926 1927 return (0); 1928 } 1929 1930 /* 1931 * Free ACL and fuid_infop, but not the acl_ids structure 1932 */ 1933 void 1934 zfs_acl_ids_free(zfs_acl_ids_t *acl_ids) 1935 { 1936 if (acl_ids->z_aclp) 1937 zfs_acl_free(acl_ids->z_aclp); 1938 if (acl_ids->z_fuidp) 1939 zfs_fuid_info_free(acl_ids->z_fuidp); 1940 acl_ids->z_aclp = NULL; 1941 acl_ids->z_fuidp = NULL; 1942 } 1943 1944 boolean_t 1945 zfs_acl_ids_overquota(zfsvfs_t *zfsvfs, zfs_acl_ids_t *acl_ids) 1946 { 1947 return (zfs_usergroup_overquota(zfsvfs, B_FALSE, acl_ids->z_fuid) || 1948 zfs_usergroup_overquota(zfsvfs, B_TRUE, acl_ids->z_fgid)); 1949 } 1950 1951 /* 1952 * Retrieve a files ACL 1953 */ 1954 int 1955 zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) 1956 { 1957 zfs_acl_t *aclp; 1958 ulong_t mask; 1959 int error; 1960 int count = 0; 1961 int largeace = 0; 1962 1963 mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT | 1964 VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES); 1965 1966 if (error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)) 1967 return (error); 1968 1969 if (mask == 0) 1970 return (ENOSYS); 1971 1972 mutex_enter(&zp->z_acl_lock); 1973 1974 error = zfs_acl_node_read(zp, &aclp, B_FALSE); 1975 if (error != 0) { 1976 mutex_exit(&zp->z_acl_lock); 1977 return (error); 1978 } 1979 1980 /* 1981 * Scan ACL to determine number of ACEs 1982 */ 1983 if ((zp->z_phys->zp_flags & ZFS_ACL_OBJ_ACE) && 1984 !(mask & VSA_ACE_ALLTYPES)) { 1985 void *zacep = NULL; 1986 uint64_t who; 1987 uint32_t access_mask; 1988 uint16_t type, iflags; 1989 1990 while (zacep = zfs_acl_next_ace(aclp, zacep, 1991 &who, &access_mask, &iflags, &type)) { 1992 switch (type) { 1993 case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: 1994 case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: 1995 case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: 1996 case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: 1997 largeace++; 1998 continue; 1999 default: 2000 count++; 2001 } 2002 } 2003 vsecp->vsa_aclcnt = count; 2004 } else 2005 count = aclp->z_acl_count; 2006 2007 if (mask & VSA_ACECNT) { 2008 vsecp->vsa_aclcnt = count; 2009 } 2010 2011 if (mask & VSA_ACE) { 2012 size_t aclsz; 2013 2014 aclsz = count * sizeof (ace_t) + 2015 sizeof (ace_object_t) * largeace; 2016 2017 vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP); 2018 vsecp->vsa_aclentsz = aclsz; 2019 2020 if (aclp->z_version == ZFS_ACL_VERSION_FUID) 2021 zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr, 2022 vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES)); 2023 else { 2024 zfs_acl_node_t *aclnode; 2025 void *start = vsecp->vsa_aclentp; 2026 2027 for (aclnode = list_head(&aclp->z_acl); aclnode; 2028 aclnode = list_next(&aclp->z_acl, aclnode)) { 2029 bcopy(aclnode->z_acldata, start, 2030 aclnode->z_size); 2031 start = (caddr_t)start + aclnode->z_size; 2032 } 2033 ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp == 2034 aclp->z_acl_bytes); 2035 } 2036 } 2037 if (mask & VSA_ACE_ACLFLAGS) { 2038 vsecp->vsa_aclflags = 0; 2039 if (zp->z_phys->zp_flags & ZFS_ACL_DEFAULTED) 2040 vsecp->vsa_aclflags |= ACL_DEFAULTED; 2041 if (zp->z_phys->zp_flags & ZFS_ACL_PROTECTED) 2042 vsecp->vsa_aclflags |= ACL_PROTECTED; 2043 if (zp->z_phys->zp_flags & ZFS_ACL_AUTO_INHERIT) 2044 vsecp->vsa_aclflags |= ACL_AUTO_INHERIT; 2045 } 2046 2047 mutex_exit(&zp->z_acl_lock); 2048 2049 return (0); 2050 } 2051 2052 int 2053 zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type, 2054 vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp) 2055 { 2056 zfs_acl_t *aclp; 2057 zfs_acl_node_t *aclnode; 2058 int aclcnt = vsecp->vsa_aclcnt; 2059 int error; 2060 2061 if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0) 2062 return (EINVAL); 2063 2064 aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version)); 2065 2066 aclp->z_hints = 0; 2067 aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t)); 2068 if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { 2069 if ((error = zfs_copy_ace_2_oldace(obj_type, aclp, 2070 (ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata, 2071 aclcnt, &aclnode->z_size)) != 0) { 2072 zfs_acl_free(aclp); 2073 zfs_acl_node_free(aclnode); 2074 return (error); 2075 } 2076 } else { 2077 if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp, 2078 vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt, 2079 &aclnode->z_size, fuidp, cr)) != 0) { 2080 zfs_acl_free(aclp); 2081 zfs_acl_node_free(aclnode); 2082 return (error); 2083 } 2084 } 2085 aclp->z_acl_bytes = aclnode->z_size; 2086 aclnode->z_ace_count = aclcnt; 2087 aclp->z_acl_count = aclcnt; 2088 list_insert_head(&aclp->z_acl, aclnode); 2089 2090 /* 2091 * If flags are being set then add them to z_hints 2092 */ 2093 if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) { 2094 if (vsecp->vsa_aclflags & ACL_PROTECTED) 2095 aclp->z_hints |= ZFS_ACL_PROTECTED; 2096 if (vsecp->vsa_aclflags & ACL_DEFAULTED) 2097 aclp->z_hints |= ZFS_ACL_DEFAULTED; 2098 if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT) 2099 aclp->z_hints |= ZFS_ACL_AUTO_INHERIT; 2100 } 2101 2102 *zaclp = aclp; 2103 2104 return (0); 2105 } 2106 2107 /* 2108 * Set a files ACL 2109 */ 2110 int 2111 zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) 2112 { 2113 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2114 zilog_t *zilog = zfsvfs->z_log; 2115 ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT); 2116 dmu_tx_t *tx; 2117 int error; 2118 zfs_acl_t *aclp; 2119 zfs_fuid_info_t *fuidp = NULL; 2120 boolean_t fuid_dirtied; 2121 2122 if (mask == 0) 2123 return (ENOSYS); 2124 2125 if (zp->z_phys->zp_flags & ZFS_IMMUTABLE) 2126 return (EPERM); 2127 2128 if (error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)) 2129 return (error); 2130 2131 error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp, 2132 &aclp); 2133 if (error) 2134 return (error); 2135 2136 /* 2137 * If ACL wide flags aren't being set then preserve any 2138 * existing flags. 2139 */ 2140 if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) { 2141 aclp->z_hints |= (zp->z_phys->zp_flags & V4_ACL_WIDE_FLAGS); 2142 } 2143 top: 2144 mutex_enter(&zp->z_lock); 2145 mutex_enter(&zp->z_acl_lock); 2146 2147 tx = dmu_tx_create(zfsvfs->z_os); 2148 dmu_tx_hold_bonus(tx, zp->z_id); 2149 2150 if (zp->z_phys->zp_acl.z_acl_extern_obj) { 2151 /* Are we upgrading ACL? */ 2152 if (zfsvfs->z_version <= ZPL_VERSION_FUID && 2153 zp->z_phys->zp_acl.z_acl_version == 2154 ZFS_ACL_VERSION_INITIAL) { 2155 dmu_tx_hold_free(tx, 2156 zp->z_phys->zp_acl.z_acl_extern_obj, 2157 0, DMU_OBJECT_END); 2158 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2159 0, aclp->z_acl_bytes); 2160 } else { 2161 dmu_tx_hold_write(tx, 2162 zp->z_phys->zp_acl.z_acl_extern_obj, 2163 0, aclp->z_acl_bytes); 2164 } 2165 } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { 2166 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); 2167 } 2168 fuid_dirtied = zfsvfs->z_fuid_dirty; 2169 if (fuid_dirtied) 2170 zfs_fuid_txhold(zfsvfs, tx); 2171 2172 error = dmu_tx_assign(tx, TXG_NOWAIT); 2173 if (error) { 2174 mutex_exit(&zp->z_acl_lock); 2175 mutex_exit(&zp->z_lock); 2176 2177 if (error == ERESTART) { 2178 dmu_tx_wait(tx); 2179 dmu_tx_abort(tx); 2180 goto top; 2181 } 2182 dmu_tx_abort(tx); 2183 zfs_acl_free(aclp); 2184 return (error); 2185 } 2186 2187 error = zfs_aclset_common(zp, aclp, cr, tx); 2188 ASSERT(error == 0); 2189 zp->z_acl_cached = aclp; 2190 2191 if (fuid_dirtied) 2192 zfs_fuid_sync(zfsvfs, tx); 2193 2194 zfs_time_stamper_locked(zp, STATE_CHANGED, tx); 2195 zfs_log_acl(zilog, tx, zp, vsecp, fuidp); 2196 2197 if (fuidp) 2198 zfs_fuid_info_free(fuidp); 2199 dmu_tx_commit(tx); 2200 done: 2201 mutex_exit(&zp->z_acl_lock); 2202 mutex_exit(&zp->z_lock); 2203 2204 return (error); 2205 } 2206 2207 /* 2208 * Check accesses of interest (AoI) against attributes of the dataset 2209 * such as read-only. Returns zero if no AoI conflict with dataset 2210 * attributes, otherwise an appropriate errno is returned. 2211 */ 2212 static int 2213 zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode) 2214 { 2215 if ((v4_mode & WRITE_MASK) && 2216 (zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) && 2217 (!IS_DEVVP(ZTOV(zp)) || 2218 (IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) { 2219 return (EROFS); 2220 } 2221 2222 /* 2223 * Only check for READONLY on non-directories. 2224 */ 2225 if ((v4_mode & WRITE_MASK_DATA) && 2226 (((ZTOV(zp)->v_type != VDIR) && 2227 (zp->z_phys->zp_flags & (ZFS_READONLY | ZFS_IMMUTABLE))) || 2228 (ZTOV(zp)->v_type == VDIR && 2229 (zp->z_phys->zp_flags & ZFS_IMMUTABLE)))) { 2230 return (EPERM); 2231 } 2232 2233 if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) && 2234 (zp->z_phys->zp_flags & ZFS_NOUNLINK)) { 2235 return (EPERM); 2236 } 2237 2238 if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) && 2239 (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED))) { 2240 return (EACCES); 2241 } 2242 2243 return (0); 2244 } 2245 2246 /* 2247 * The primary usage of this function is to loop through all of the 2248 * ACEs in the znode, determining what accesses of interest (AoI) to 2249 * the caller are allowed or denied. The AoI are expressed as bits in 2250 * the working_mode parameter. As each ACE is processed, bits covered 2251 * by that ACE are removed from the working_mode. This removal 2252 * facilitates two things. The first is that when the working mode is 2253 * empty (= 0), we know we've looked at all the AoI. The second is 2254 * that the ACE interpretation rules don't allow a later ACE to undo 2255 * something granted or denied by an earlier ACE. Removing the 2256 * discovered access or denial enforces this rule. At the end of 2257 * processing the ACEs, all AoI that were found to be denied are 2258 * placed into the working_mode, giving the caller a mask of denied 2259 * accesses. Returns: 2260 * 0 if all AoI granted 2261 * EACCESS if the denied mask is non-zero 2262 * other error if abnormal failure (e.g., IO error) 2263 * 2264 * A secondary usage of the function is to determine if any of the 2265 * AoI are granted. If an ACE grants any access in 2266 * the working_mode, we immediately short circuit out of the function. 2267 * This mode is chosen by setting anyaccess to B_TRUE. The 2268 * working_mode is not a denied access mask upon exit if the function 2269 * is used in this manner. 2270 */ 2271 static int 2272 zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode, 2273 boolean_t anyaccess, cred_t *cr) 2274 { 2275 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2276 zfs_acl_t *aclp; 2277 int error; 2278 uid_t uid = crgetuid(cr); 2279 uint64_t who; 2280 uint16_t type, iflags; 2281 uint16_t entry_type; 2282 uint32_t access_mask; 2283 uint32_t deny_mask = 0; 2284 zfs_ace_hdr_t *acep = NULL; 2285 boolean_t checkit; 2286 uid_t fowner; 2287 uid_t gowner; 2288 2289 zfs_fuid_map_ids(zp, cr, &fowner, &gowner); 2290 2291 mutex_enter(&zp->z_acl_lock); 2292 2293 error = zfs_acl_node_read(zp, &aclp, B_FALSE); 2294 if (error != 0) { 2295 mutex_exit(&zp->z_acl_lock); 2296 return (error); 2297 } 2298 2299 while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, 2300 &iflags, &type)) { 2301 uint32_t mask_matched; 2302 2303 if (!zfs_acl_valid_ace_type(type, iflags)) 2304 continue; 2305 2306 if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE)) 2307 continue; 2308 2309 /* Skip ACE if it does not affect any AoI */ 2310 mask_matched = (access_mask & *working_mode); 2311 if (!mask_matched) 2312 continue; 2313 2314 entry_type = (iflags & ACE_TYPE_FLAGS); 2315 2316 checkit = B_FALSE; 2317 2318 switch (entry_type) { 2319 case ACE_OWNER: 2320 if (uid == fowner) 2321 checkit = B_TRUE; 2322 break; 2323 case OWNING_GROUP: 2324 who = gowner; 2325 /*FALLTHROUGH*/ 2326 case ACE_IDENTIFIER_GROUP: 2327 checkit = zfs_groupmember(zfsvfs, who, cr); 2328 break; 2329 case ACE_EVERYONE: 2330 checkit = B_TRUE; 2331 break; 2332 2333 /* USER Entry */ 2334 default: 2335 if (entry_type == 0) { 2336 uid_t newid; 2337 2338 newid = zfs_fuid_map_id(zfsvfs, who, cr, 2339 ZFS_ACE_USER); 2340 if (newid != IDMAP_WK_CREATOR_OWNER_UID && 2341 uid == newid) 2342 checkit = B_TRUE; 2343 break; 2344 } else { 2345 mutex_exit(&zp->z_acl_lock); 2346 return (EIO); 2347 } 2348 } 2349 2350 if (checkit) { 2351 if (type == DENY) { 2352 DTRACE_PROBE3(zfs__ace__denies, 2353 znode_t *, zp, 2354 zfs_ace_hdr_t *, acep, 2355 uint32_t, mask_matched); 2356 deny_mask |= mask_matched; 2357 } else { 2358 DTRACE_PROBE3(zfs__ace__allows, 2359 znode_t *, zp, 2360 zfs_ace_hdr_t *, acep, 2361 uint32_t, mask_matched); 2362 if (anyaccess) { 2363 mutex_exit(&zp->z_acl_lock); 2364 return (0); 2365 } 2366 } 2367 *working_mode &= ~mask_matched; 2368 } 2369 2370 /* Are we done? */ 2371 if (*working_mode == 0) 2372 break; 2373 } 2374 2375 mutex_exit(&zp->z_acl_lock); 2376 2377 /* Put the found 'denies' back on the working mode */ 2378 if (deny_mask) { 2379 *working_mode |= deny_mask; 2380 return (EACCES); 2381 } else if (*working_mode) { 2382 return (-1); 2383 } 2384 2385 return (0); 2386 } 2387 2388 /* 2389 * Return true if any access whatsoever granted, we don't actually 2390 * care what access is granted. 2391 */ 2392 boolean_t 2393 zfs_has_access(znode_t *zp, cred_t *cr) 2394 { 2395 uint32_t have = ACE_ALL_PERMS; 2396 2397 if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) { 2398 uid_t owner; 2399 2400 owner = zfs_fuid_map_id(zp->z_zfsvfs, 2401 zp->z_phys->zp_uid, cr, ZFS_OWNER); 2402 2403 return ( 2404 secpolicy_vnode_access(cr, ZTOV(zp), owner, VREAD) == 0 || 2405 secpolicy_vnode_access(cr, ZTOV(zp), owner, VWRITE) == 0 || 2406 secpolicy_vnode_access(cr, ZTOV(zp), owner, VEXEC) == 0 || 2407 secpolicy_vnode_chown(cr, owner) == 0 || 2408 secpolicy_vnode_setdac(cr, owner) == 0 || 2409 secpolicy_vnode_remove(cr) == 0); 2410 } 2411 return (B_TRUE); 2412 } 2413 2414 static int 2415 zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode, 2416 boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr) 2417 { 2418 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2419 int err; 2420 2421 *working_mode = v4_mode; 2422 *check_privs = B_TRUE; 2423 2424 /* 2425 * Short circuit empty requests 2426 */ 2427 if (v4_mode == 0 || zfsvfs->z_replay) { 2428 *working_mode = 0; 2429 return (0); 2430 } 2431 2432 if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) { 2433 *check_privs = B_FALSE; 2434 return (err); 2435 } 2436 2437 /* 2438 * The caller requested that the ACL check be skipped. This 2439 * would only happen if the caller checked VOP_ACCESS() with a 2440 * 32 bit ACE mask and already had the appropriate permissions. 2441 */ 2442 if (skipaclchk) { 2443 *working_mode = 0; 2444 return (0); 2445 } 2446 2447 return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr)); 2448 } 2449 2450 static int 2451 zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs, 2452 cred_t *cr) 2453 { 2454 if (*working_mode != ACE_WRITE_DATA) 2455 return (EACCES); 2456 2457 return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode, 2458 check_privs, B_FALSE, cr)); 2459 } 2460 2461 int 2462 zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr) 2463 { 2464 boolean_t owner = B_FALSE; 2465 boolean_t groupmbr = B_FALSE; 2466 boolean_t is_attr; 2467 uid_t fowner; 2468 uid_t gowner; 2469 uid_t uid = crgetuid(cr); 2470 int error; 2471 2472 if (zdp->z_phys->zp_flags & ZFS_AV_QUARANTINED) 2473 return (EACCES); 2474 2475 is_attr = ((zdp->z_phys->zp_flags & ZFS_XATTR) && 2476 (ZTOV(zdp)->v_type == VDIR)); 2477 if (is_attr) 2478 goto slow; 2479 2480 mutex_enter(&zdp->z_acl_lock); 2481 2482 if (zdp->z_phys->zp_flags & ZFS_NO_EXECS_DENIED) { 2483 mutex_exit(&zdp->z_acl_lock); 2484 return (0); 2485 } 2486 2487 if (FUID_INDEX(zdp->z_phys->zp_uid) != 0 || 2488 FUID_INDEX(zdp->z_phys->zp_gid) != 0) { 2489 mutex_exit(&zdp->z_acl_lock); 2490 goto slow; 2491 } 2492 2493 fowner = (uid_t)zdp->z_phys->zp_uid; 2494 gowner = (uid_t)zdp->z_phys->zp_gid; 2495 2496 if (uid == fowner) { 2497 owner = B_TRUE; 2498 if (zdp->z_phys->zp_mode & S_IXUSR) { 2499 mutex_exit(&zdp->z_acl_lock); 2500 return (0); 2501 } else { 2502 mutex_exit(&zdp->z_acl_lock); 2503 goto slow; 2504 } 2505 } 2506 if (groupmember(gowner, cr)) { 2507 groupmbr = B_TRUE; 2508 if (zdp->z_phys->zp_mode & S_IXGRP) { 2509 mutex_exit(&zdp->z_acl_lock); 2510 return (0); 2511 } else { 2512 mutex_exit(&zdp->z_acl_lock); 2513 goto slow; 2514 } 2515 } 2516 if (!owner && !groupmbr) { 2517 if (zdp->z_phys->zp_mode & S_IXOTH) { 2518 mutex_exit(&zdp->z_acl_lock); 2519 return (0); 2520 } 2521 } 2522 2523 mutex_exit(&zdp->z_acl_lock); 2524 2525 slow: 2526 DTRACE_PROBE(zfs__fastpath__execute__access__miss); 2527 ZFS_ENTER(zdp->z_zfsvfs); 2528 error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr); 2529 ZFS_EXIT(zdp->z_zfsvfs); 2530 return (error); 2531 } 2532 2533 /* 2534 * Determine whether Access should be granted/denied, invoking least 2535 * priv subsytem when a deny is determined. 2536 */ 2537 int 2538 zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr) 2539 { 2540 uint32_t working_mode; 2541 int error; 2542 int is_attr; 2543 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2544 boolean_t check_privs; 2545 znode_t *xzp; 2546 znode_t *check_zp = zp; 2547 2548 is_attr = ((zp->z_phys->zp_flags & ZFS_XATTR) && 2549 (ZTOV(zp)->v_type == VDIR)); 2550 2551 /* 2552 * If attribute then validate against base file 2553 */ 2554 if (is_attr) { 2555 if ((error = zfs_zget(zp->z_zfsvfs, 2556 zp->z_phys->zp_parent, &xzp)) != 0) { 2557 return (error); 2558 } 2559 2560 check_zp = xzp; 2561 2562 /* 2563 * fixup mode to map to xattr perms 2564 */ 2565 2566 if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) { 2567 mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); 2568 mode |= ACE_WRITE_NAMED_ATTRS; 2569 } 2570 2571 if (mode & (ACE_READ_DATA|ACE_EXECUTE)) { 2572 mode &= ~(ACE_READ_DATA|ACE_EXECUTE); 2573 mode |= ACE_READ_NAMED_ATTRS; 2574 } 2575 } 2576 2577 if ((error = zfs_zaccess_common(check_zp, mode, &working_mode, 2578 &check_privs, skipaclchk, cr)) == 0) { 2579 if (is_attr) 2580 VN_RELE(ZTOV(xzp)); 2581 return (0); 2582 } 2583 2584 if (error && !check_privs) { 2585 if (is_attr) 2586 VN_RELE(ZTOV(xzp)); 2587 return (error); 2588 } 2589 2590 if (error && (flags & V_APPEND)) { 2591 error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr); 2592 } 2593 2594 if (error && check_privs) { 2595 uid_t owner; 2596 mode_t checkmode = 0; 2597 2598 owner = zfs_fuid_map_id(zfsvfs, check_zp->z_phys->zp_uid, cr, 2599 ZFS_OWNER); 2600 2601 /* 2602 * First check for implicit owner permission on 2603 * read_acl/read_attributes 2604 */ 2605 2606 error = 0; 2607 ASSERT(working_mode != 0); 2608 2609 if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) && 2610 owner == crgetuid(cr))) 2611 working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES); 2612 2613 if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS| 2614 ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE)) 2615 checkmode |= VREAD; 2616 if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS| 2617 ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE)) 2618 checkmode |= VWRITE; 2619 if (working_mode & ACE_EXECUTE) 2620 checkmode |= VEXEC; 2621 2622 if (checkmode) 2623 error = secpolicy_vnode_access(cr, ZTOV(check_zp), 2624 owner, checkmode); 2625 2626 if (error == 0 && (working_mode & ACE_WRITE_OWNER)) 2627 error = secpolicy_vnode_chown(cr, owner); 2628 if (error == 0 && (working_mode & ACE_WRITE_ACL)) 2629 error = secpolicy_vnode_setdac(cr, owner); 2630 2631 if (error == 0 && (working_mode & 2632 (ACE_DELETE|ACE_DELETE_CHILD))) 2633 error = secpolicy_vnode_remove(cr); 2634 2635 if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) { 2636 error = secpolicy_vnode_chown(cr, owner); 2637 } 2638 if (error == 0) { 2639 /* 2640 * See if any bits other than those already checked 2641 * for are still present. If so then return EACCES 2642 */ 2643 if (working_mode & ~(ZFS_CHECKED_MASKS)) { 2644 error = EACCES; 2645 } 2646 } 2647 } 2648 2649 if (is_attr) 2650 VN_RELE(ZTOV(xzp)); 2651 2652 return (error); 2653 } 2654 2655 /* 2656 * Translate traditional unix VREAD/VWRITE/VEXEC mode into 2657 * native ACL format and call zfs_zaccess() 2658 */ 2659 int 2660 zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr) 2661 { 2662 return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr)); 2663 } 2664 2665 /* 2666 * Access function for secpolicy_vnode_setattr 2667 */ 2668 int 2669 zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr) 2670 { 2671 int v4_mode = zfs_unix_to_v4(mode >> 6); 2672 2673 return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr)); 2674 } 2675 2676 static int 2677 zfs_delete_final_check(znode_t *zp, znode_t *dzp, 2678 mode_t missing_perms, cred_t *cr) 2679 { 2680 int error; 2681 uid_t downer; 2682 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2683 2684 downer = zfs_fuid_map_id(zfsvfs, dzp->z_phys->zp_uid, cr, ZFS_OWNER); 2685 2686 error = secpolicy_vnode_access(cr, ZTOV(dzp), downer, missing_perms); 2687 2688 if (error == 0) 2689 error = zfs_sticky_remove_access(dzp, zp, cr); 2690 2691 return (error); 2692 } 2693 2694 /* 2695 * Determine whether Access should be granted/deny, without 2696 * consulting least priv subsystem. 2697 * 2698 * 2699 * The following chart is the recommended NFSv4 enforcement for 2700 * ability to delete an object. 2701 * 2702 * ------------------------------------------------------- 2703 * | Parent Dir | Target Object Permissions | 2704 * | permissions | | 2705 * ------------------------------------------------------- 2706 * | | ACL Allows | ACL Denies| Delete | 2707 * | | Delete | Delete | unspecified| 2708 * ------------------------------------------------------- 2709 * | ACL Allows | Permit | Permit | Permit | 2710 * | DELETE_CHILD | | 2711 * ------------------------------------------------------- 2712 * | ACL Denies | Permit | Deny | Deny | 2713 * | DELETE_CHILD | | | | 2714 * ------------------------------------------------------- 2715 * | ACL specifies | | | | 2716 * | only allow | Permit | Permit | Permit | 2717 * | write and | | | | 2718 * | execute | | | | 2719 * ------------------------------------------------------- 2720 * | ACL denies | | | | 2721 * | write and | Permit | Deny | Deny | 2722 * | execute | | | | 2723 * ------------------------------------------------------- 2724 * ^ 2725 * | 2726 * No search privilege, can't even look up file? 2727 * 2728 */ 2729 int 2730 zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr) 2731 { 2732 uint32_t dzp_working_mode = 0; 2733 uint32_t zp_working_mode = 0; 2734 int dzp_error, zp_error; 2735 mode_t missing_perms; 2736 boolean_t dzpcheck_privs = B_TRUE; 2737 boolean_t zpcheck_privs = B_TRUE; 2738 2739 /* 2740 * We want specific DELETE permissions to 2741 * take precedence over WRITE/EXECUTE. We don't 2742 * want an ACL such as this to mess us up. 2743 * user:joe:write_data:deny,user:joe:delete:allow 2744 * 2745 * However, deny permissions may ultimately be overridden 2746 * by secpolicy_vnode_access(). 2747 * 2748 * We will ask for all of the necessary permissions and then 2749 * look at the working modes from the directory and target object 2750 * to determine what was found. 2751 */ 2752 2753 if (zp->z_phys->zp_flags & (ZFS_IMMUTABLE | ZFS_NOUNLINK)) 2754 return (EPERM); 2755 2756 /* 2757 * First row 2758 * If the directory permissions allow the delete, we are done. 2759 */ 2760 if ((dzp_error = zfs_zaccess_common(dzp, ACE_DELETE_CHILD, 2761 &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0) 2762 return (0); 2763 2764 /* 2765 * If target object has delete permission then we are done 2766 */ 2767 if ((zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode, 2768 &zpcheck_privs, B_FALSE, cr)) == 0) 2769 return (0); 2770 2771 ASSERT(dzp_error && zp_error); 2772 2773 if (!dzpcheck_privs) 2774 return (dzp_error); 2775 if (!zpcheck_privs) 2776 return (zp_error); 2777 2778 /* 2779 * Second row 2780 * 2781 * If directory returns EACCES then delete_child was denied 2782 * due to deny delete_child. In this case send the request through 2783 * secpolicy_vnode_remove(). We don't use zfs_delete_final_check() 2784 * since that *could* allow the delete based on write/execute permission 2785 * and we want delete permissions to override write/execute. 2786 */ 2787 2788 if (dzp_error == EACCES) 2789 return (secpolicy_vnode_remove(cr)); 2790 2791 /* 2792 * Third Row 2793 * only need to see if we have write/execute on directory. 2794 */ 2795 2796 if ((dzp_error = zfs_zaccess_common(dzp, ACE_EXECUTE|ACE_WRITE_DATA, 2797 &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0) 2798 return (zfs_sticky_remove_access(dzp, zp, cr)); 2799 2800 if (!dzpcheck_privs) 2801 return (dzp_error); 2802 2803 /* 2804 * Fourth row 2805 */ 2806 2807 missing_perms = (dzp_working_mode & ACE_WRITE_DATA) ? VWRITE : 0; 2808 missing_perms |= (dzp_working_mode & ACE_EXECUTE) ? VEXEC : 0; 2809 2810 ASSERT(missing_perms); 2811 2812 return (zfs_delete_final_check(zp, dzp, missing_perms, cr)); 2813 2814 } 2815 2816 int 2817 zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp, 2818 znode_t *tzp, cred_t *cr) 2819 { 2820 int add_perm; 2821 int error; 2822 2823 if (szp->z_phys->zp_flags & ZFS_AV_QUARANTINED) 2824 return (EACCES); 2825 2826 add_perm = (ZTOV(szp)->v_type == VDIR) ? 2827 ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE; 2828 2829 /* 2830 * Rename permissions are combination of delete permission + 2831 * add file/subdir permission. 2832 */ 2833 2834 /* 2835 * first make sure we do the delete portion. 2836 * 2837 * If that succeeds then check for add_file/add_subdir permissions 2838 */ 2839 2840 if (error = zfs_zaccess_delete(sdzp, szp, cr)) 2841 return (error); 2842 2843 /* 2844 * If we have a tzp, see if we can delete it? 2845 */ 2846 if (tzp) { 2847 if (error = zfs_zaccess_delete(tdzp, tzp, cr)) 2848 return (error); 2849 } 2850 2851 /* 2852 * Now check for add permissions 2853 */ 2854 error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr); 2855 2856 return (error); 2857 } 2858