1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock LSM - Ruleset management 4 * 5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> 6 * Copyright © 2018-2020 ANSSI 7 */ 8 9 #include <linux/bits.h> 10 #include <linux/bug.h> 11 #include <linux/compiler_types.h> 12 #include <linux/err.h> 13 #include <linux/errno.h> 14 #include <linux/kernel.h> 15 #include <linux/lockdep.h> 16 #include <linux/overflow.h> 17 #include <linux/rbtree.h> 18 #include <linux/refcount.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 23 #include "limits.h" 24 #include "object.h" 25 #include "ruleset.h" 26 27 static struct landlock_ruleset *create_ruleset(const u32 num_layers) 28 { 29 struct landlock_ruleset *new_ruleset; 30 31 new_ruleset = 32 kzalloc(struct_size(new_ruleset, access_masks, num_layers), 33 GFP_KERNEL_ACCOUNT); 34 if (!new_ruleset) 35 return ERR_PTR(-ENOMEM); 36 refcount_set(&new_ruleset->usage, 1); 37 mutex_init(&new_ruleset->lock); 38 new_ruleset->root_inode = RB_ROOT; 39 40 #if IS_ENABLED(CONFIG_INET) 41 new_ruleset->root_net_port = RB_ROOT; 42 #endif /* IS_ENABLED(CONFIG_INET) */ 43 44 new_ruleset->num_layers = num_layers; 45 /* 46 * hierarchy = NULL 47 * num_rules = 0 48 * access_masks[] = 0 49 */ 50 return new_ruleset; 51 } 52 53 struct landlock_ruleset * 54 landlock_create_ruleset(const access_mask_t fs_access_mask, 55 const access_mask_t net_access_mask) 56 { 57 struct landlock_ruleset *new_ruleset; 58 59 /* Informs about useless ruleset. */ 60 if (!fs_access_mask && !net_access_mask) 61 return ERR_PTR(-ENOMSG); 62 new_ruleset = create_ruleset(1); 63 if (IS_ERR(new_ruleset)) 64 return new_ruleset; 65 if (fs_access_mask) 66 landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0); 67 if (net_access_mask) 68 landlock_add_net_access_mask(new_ruleset, net_access_mask, 0); 69 return new_ruleset; 70 } 71 72 static void build_check_rule(void) 73 { 74 const struct landlock_rule rule = { 75 .num_layers = ~0, 76 }; 77 78 BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS); 79 } 80 81 static bool is_object_pointer(const enum landlock_key_type key_type) 82 { 83 switch (key_type) { 84 case LANDLOCK_KEY_INODE: 85 return true; 86 87 #if IS_ENABLED(CONFIG_INET) 88 case LANDLOCK_KEY_NET_PORT: 89 return false; 90 #endif /* IS_ENABLED(CONFIG_INET) */ 91 92 default: 93 WARN_ON_ONCE(1); 94 return false; 95 } 96 } 97 98 static struct landlock_rule * 99 create_rule(const struct landlock_id id, 100 const struct landlock_layer (*const layers)[], const u32 num_layers, 101 const struct landlock_layer *const new_layer) 102 { 103 struct landlock_rule *new_rule; 104 u32 new_num_layers; 105 106 build_check_rule(); 107 if (new_layer) { 108 /* Should already be checked by landlock_merge_ruleset(). */ 109 if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS)) 110 return ERR_PTR(-E2BIG); 111 new_num_layers = num_layers + 1; 112 } else { 113 new_num_layers = num_layers; 114 } 115 new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers), 116 GFP_KERNEL_ACCOUNT); 117 if (!new_rule) 118 return ERR_PTR(-ENOMEM); 119 RB_CLEAR_NODE(&new_rule->node); 120 if (is_object_pointer(id.type)) { 121 /* This should be catched by insert_rule(). */ 122 WARN_ON_ONCE(!id.key.object); 123 landlock_get_object(id.key.object); 124 } 125 126 new_rule->key = id.key; 127 new_rule->num_layers = new_num_layers; 128 /* Copies the original layer stack. */ 129 memcpy(new_rule->layers, layers, 130 flex_array_size(new_rule, layers, num_layers)); 131 if (new_layer) 132 /* Adds a copy of @new_layer on the layer stack. */ 133 new_rule->layers[new_rule->num_layers - 1] = *new_layer; 134 return new_rule; 135 } 136 137 static struct rb_root *get_root(struct landlock_ruleset *const ruleset, 138 const enum landlock_key_type key_type) 139 { 140 switch (key_type) { 141 case LANDLOCK_KEY_INODE: 142 return &ruleset->root_inode; 143 144 #if IS_ENABLED(CONFIG_INET) 145 case LANDLOCK_KEY_NET_PORT: 146 return &ruleset->root_net_port; 147 #endif /* IS_ENABLED(CONFIG_INET) */ 148 149 default: 150 WARN_ON_ONCE(1); 151 return ERR_PTR(-EINVAL); 152 } 153 } 154 155 static void free_rule(struct landlock_rule *const rule, 156 const enum landlock_key_type key_type) 157 { 158 might_sleep(); 159 if (!rule) 160 return; 161 if (is_object_pointer(key_type)) 162 landlock_put_object(rule->key.object); 163 kfree(rule); 164 } 165 166 static void build_check_ruleset(void) 167 { 168 const struct landlock_ruleset ruleset = { 169 .num_rules = ~0, 170 .num_layers = ~0, 171 }; 172 typeof(ruleset.access_masks[0]) access_masks = ~0; 173 174 BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); 175 BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); 176 BUILD_BUG_ON(access_masks < 177 ((LANDLOCK_MASK_ACCESS_FS << LANDLOCK_SHIFT_ACCESS_FS) | 178 (LANDLOCK_MASK_ACCESS_NET << LANDLOCK_SHIFT_ACCESS_NET))); 179 } 180 181 /** 182 * insert_rule - Create and insert a rule in a ruleset 183 * 184 * @ruleset: The ruleset to be updated. 185 * @id: The ID to build the new rule with. The underlying kernel object, if 186 * any, must be held by the caller. 187 * @layers: One or multiple layers to be copied into the new rule. 188 * @num_layers: The number of @layers entries. 189 * 190 * When user space requests to add a new rule to a ruleset, @layers only 191 * contains one entry and this entry is not assigned to any level. In this 192 * case, the new rule will extend @ruleset, similarly to a boolean OR between 193 * access rights. 194 * 195 * When merging a ruleset in a domain, or copying a domain, @layers will be 196 * added to @ruleset as new constraints, similarly to a boolean AND between 197 * access rights. 198 */ 199 static int insert_rule(struct landlock_ruleset *const ruleset, 200 const struct landlock_id id, 201 const struct landlock_layer (*const layers)[], 202 const size_t num_layers) 203 { 204 struct rb_node **walker_node; 205 struct rb_node *parent_node = NULL; 206 struct landlock_rule *new_rule; 207 struct rb_root *root; 208 209 might_sleep(); 210 lockdep_assert_held(&ruleset->lock); 211 if (WARN_ON_ONCE(!layers)) 212 return -ENOENT; 213 214 if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object)) 215 return -ENOENT; 216 217 root = get_root(ruleset, id.type); 218 if (IS_ERR(root)) 219 return PTR_ERR(root); 220 221 walker_node = &root->rb_node; 222 while (*walker_node) { 223 struct landlock_rule *const this = 224 rb_entry(*walker_node, struct landlock_rule, node); 225 226 if (this->key.data != id.key.data) { 227 parent_node = *walker_node; 228 if (this->key.data < id.key.data) 229 walker_node = &((*walker_node)->rb_right); 230 else 231 walker_node = &((*walker_node)->rb_left); 232 continue; 233 } 234 235 /* Only a single-level layer should match an existing rule. */ 236 if (WARN_ON_ONCE(num_layers != 1)) 237 return -EINVAL; 238 239 /* If there is a matching rule, updates it. */ 240 if ((*layers)[0].level == 0) { 241 /* 242 * Extends access rights when the request comes from 243 * landlock_add_rule(2), i.e. @ruleset is not a domain. 244 */ 245 if (WARN_ON_ONCE(this->num_layers != 1)) 246 return -EINVAL; 247 if (WARN_ON_ONCE(this->layers[0].level != 0)) 248 return -EINVAL; 249 this->layers[0].access |= (*layers)[0].access; 250 return 0; 251 } 252 253 if (WARN_ON_ONCE(this->layers[0].level == 0)) 254 return -EINVAL; 255 256 /* 257 * Intersects access rights when it is a merge between a 258 * ruleset and a domain. 259 */ 260 new_rule = create_rule(id, &this->layers, this->num_layers, 261 &(*layers)[0]); 262 if (IS_ERR(new_rule)) 263 return PTR_ERR(new_rule); 264 rb_replace_node(&this->node, &new_rule->node, root); 265 free_rule(this, id.type); 266 return 0; 267 } 268 269 /* There is no match for @id. */ 270 build_check_ruleset(); 271 if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES) 272 return -E2BIG; 273 new_rule = create_rule(id, layers, num_layers, NULL); 274 if (IS_ERR(new_rule)) 275 return PTR_ERR(new_rule); 276 rb_link_node(&new_rule->node, parent_node, walker_node); 277 rb_insert_color(&new_rule->node, root); 278 ruleset->num_rules++; 279 return 0; 280 } 281 282 static void build_check_layer(void) 283 { 284 const struct landlock_layer layer = { 285 .level = ~0, 286 .access = ~0, 287 }; 288 289 BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS); 290 BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS); 291 } 292 293 /* @ruleset must be locked by the caller. */ 294 int landlock_insert_rule(struct landlock_ruleset *const ruleset, 295 const struct landlock_id id, 296 const access_mask_t access) 297 { 298 struct landlock_layer layers[] = { { 299 .access = access, 300 /* When @level is zero, insert_rule() extends @ruleset. */ 301 .level = 0, 302 } }; 303 304 build_check_layer(); 305 return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers)); 306 } 307 308 static void get_hierarchy(struct landlock_hierarchy *const hierarchy) 309 { 310 if (hierarchy) 311 refcount_inc(&hierarchy->usage); 312 } 313 314 static void put_hierarchy(struct landlock_hierarchy *hierarchy) 315 { 316 while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { 317 const struct landlock_hierarchy *const freeme = hierarchy; 318 319 hierarchy = hierarchy->parent; 320 kfree(freeme); 321 } 322 } 323 324 static int merge_tree(struct landlock_ruleset *const dst, 325 struct landlock_ruleset *const src, 326 const enum landlock_key_type key_type) 327 { 328 struct landlock_rule *walker_rule, *next_rule; 329 struct rb_root *src_root; 330 int err = 0; 331 332 might_sleep(); 333 lockdep_assert_held(&dst->lock); 334 lockdep_assert_held(&src->lock); 335 336 src_root = get_root(src, key_type); 337 if (IS_ERR(src_root)) 338 return PTR_ERR(src_root); 339 340 /* Merges the @src tree. */ 341 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root, 342 node) { 343 struct landlock_layer layers[] = { { 344 .level = dst->num_layers, 345 } }; 346 const struct landlock_id id = { 347 .key = walker_rule->key, 348 .type = key_type, 349 }; 350 351 if (WARN_ON_ONCE(walker_rule->num_layers != 1)) 352 return -EINVAL; 353 354 if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) 355 return -EINVAL; 356 357 layers[0].access = walker_rule->layers[0].access; 358 359 err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers)); 360 if (err) 361 return err; 362 } 363 return err; 364 } 365 366 static int merge_ruleset(struct landlock_ruleset *const dst, 367 struct landlock_ruleset *const src) 368 { 369 int err = 0; 370 371 might_sleep(); 372 /* Should already be checked by landlock_merge_ruleset() */ 373 if (WARN_ON_ONCE(!src)) 374 return 0; 375 /* Only merge into a domain. */ 376 if (WARN_ON_ONCE(!dst || !dst->hierarchy)) 377 return -EINVAL; 378 379 /* Locks @dst first because we are its only owner. */ 380 mutex_lock(&dst->lock); 381 mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING); 382 383 /* Stacks the new layer. */ 384 if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) { 385 err = -EINVAL; 386 goto out_unlock; 387 } 388 dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; 389 390 /* Merges the @src inode tree. */ 391 err = merge_tree(dst, src, LANDLOCK_KEY_INODE); 392 if (err) 393 goto out_unlock; 394 395 #if IS_ENABLED(CONFIG_INET) 396 /* Merges the @src network port tree. */ 397 err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT); 398 if (err) 399 goto out_unlock; 400 #endif /* IS_ENABLED(CONFIG_INET) */ 401 402 out_unlock: 403 mutex_unlock(&src->lock); 404 mutex_unlock(&dst->lock); 405 return err; 406 } 407 408 static int inherit_tree(struct landlock_ruleset *const parent, 409 struct landlock_ruleset *const child, 410 const enum landlock_key_type key_type) 411 { 412 struct landlock_rule *walker_rule, *next_rule; 413 struct rb_root *parent_root; 414 int err = 0; 415 416 might_sleep(); 417 lockdep_assert_held(&parent->lock); 418 lockdep_assert_held(&child->lock); 419 420 parent_root = get_root(parent, key_type); 421 if (IS_ERR(parent_root)) 422 return PTR_ERR(parent_root); 423 424 /* Copies the @parent inode or network tree. */ 425 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, 426 parent_root, node) { 427 const struct landlock_id id = { 428 .key = walker_rule->key, 429 .type = key_type, 430 }; 431 432 err = insert_rule(child, id, &walker_rule->layers, 433 walker_rule->num_layers); 434 if (err) 435 return err; 436 } 437 return err; 438 } 439 440 static int inherit_ruleset(struct landlock_ruleset *const parent, 441 struct landlock_ruleset *const child) 442 { 443 int err = 0; 444 445 might_sleep(); 446 if (!parent) 447 return 0; 448 449 /* Locks @child first because we are its only owner. */ 450 mutex_lock(&child->lock); 451 mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); 452 453 /* Copies the @parent inode tree. */ 454 err = inherit_tree(parent, child, LANDLOCK_KEY_INODE); 455 if (err) 456 goto out_unlock; 457 458 #if IS_ENABLED(CONFIG_INET) 459 /* Copies the @parent network port tree. */ 460 err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT); 461 if (err) 462 goto out_unlock; 463 #endif /* IS_ENABLED(CONFIG_INET) */ 464 465 if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) { 466 err = -EINVAL; 467 goto out_unlock; 468 } 469 /* Copies the parent layer stack and leaves a space for the new layer. */ 470 memcpy(child->access_masks, parent->access_masks, 471 flex_array_size(parent, access_masks, parent->num_layers)); 472 473 if (WARN_ON_ONCE(!parent->hierarchy)) { 474 err = -EINVAL; 475 goto out_unlock; 476 } 477 get_hierarchy(parent->hierarchy); 478 child->hierarchy->parent = parent->hierarchy; 479 480 out_unlock: 481 mutex_unlock(&parent->lock); 482 mutex_unlock(&child->lock); 483 return err; 484 } 485 486 static void free_ruleset(struct landlock_ruleset *const ruleset) 487 { 488 struct landlock_rule *freeme, *next; 489 490 might_sleep(); 491 rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, 492 node) 493 free_rule(freeme, LANDLOCK_KEY_INODE); 494 495 #if IS_ENABLED(CONFIG_INET) 496 rbtree_postorder_for_each_entry_safe(freeme, next, 497 &ruleset->root_net_port, node) 498 free_rule(freeme, LANDLOCK_KEY_NET_PORT); 499 #endif /* IS_ENABLED(CONFIG_INET) */ 500 501 put_hierarchy(ruleset->hierarchy); 502 kfree(ruleset); 503 } 504 505 void landlock_put_ruleset(struct landlock_ruleset *const ruleset) 506 { 507 might_sleep(); 508 if (ruleset && refcount_dec_and_test(&ruleset->usage)) 509 free_ruleset(ruleset); 510 } 511 512 static void free_ruleset_work(struct work_struct *const work) 513 { 514 struct landlock_ruleset *ruleset; 515 516 ruleset = container_of(work, struct landlock_ruleset, work_free); 517 free_ruleset(ruleset); 518 } 519 520 void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) 521 { 522 if (ruleset && refcount_dec_and_test(&ruleset->usage)) { 523 INIT_WORK(&ruleset->work_free, free_ruleset_work); 524 schedule_work(&ruleset->work_free); 525 } 526 } 527 528 /** 529 * landlock_merge_ruleset - Merge a ruleset with a domain 530 * 531 * @parent: Parent domain. 532 * @ruleset: New ruleset to be merged. 533 * 534 * Returns the intersection of @parent and @ruleset, or returns @parent if 535 * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty. 536 */ 537 struct landlock_ruleset * 538 landlock_merge_ruleset(struct landlock_ruleset *const parent, 539 struct landlock_ruleset *const ruleset) 540 { 541 struct landlock_ruleset *new_dom; 542 u32 num_layers; 543 int err; 544 545 might_sleep(); 546 if (WARN_ON_ONCE(!ruleset || parent == ruleset)) 547 return ERR_PTR(-EINVAL); 548 549 if (parent) { 550 if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS) 551 return ERR_PTR(-E2BIG); 552 num_layers = parent->num_layers + 1; 553 } else { 554 num_layers = 1; 555 } 556 557 /* Creates a new domain... */ 558 new_dom = create_ruleset(num_layers); 559 if (IS_ERR(new_dom)) 560 return new_dom; 561 new_dom->hierarchy = 562 kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT); 563 if (!new_dom->hierarchy) { 564 err = -ENOMEM; 565 goto out_put_dom; 566 } 567 refcount_set(&new_dom->hierarchy->usage, 1); 568 569 /* ...as a child of @parent... */ 570 err = inherit_ruleset(parent, new_dom); 571 if (err) 572 goto out_put_dom; 573 574 /* ...and including @ruleset. */ 575 err = merge_ruleset(new_dom, ruleset); 576 if (err) 577 goto out_put_dom; 578 579 return new_dom; 580 581 out_put_dom: 582 landlock_put_ruleset(new_dom); 583 return ERR_PTR(err); 584 } 585 586 /* 587 * The returned access has the same lifetime as @ruleset. 588 */ 589 const struct landlock_rule * 590 landlock_find_rule(const struct landlock_ruleset *const ruleset, 591 const struct landlock_id id) 592 { 593 const struct rb_root *root; 594 const struct rb_node *node; 595 596 root = get_root((struct landlock_ruleset *)ruleset, id.type); 597 if (IS_ERR(root)) 598 return NULL; 599 node = root->rb_node; 600 601 while (node) { 602 struct landlock_rule *this = 603 rb_entry(node, struct landlock_rule, node); 604 605 if (this->key.data == id.key.data) 606 return this; 607 if (this->key.data < id.key.data) 608 node = node->rb_right; 609 else 610 node = node->rb_left; 611 } 612 return NULL; 613 } 614 615 /* 616 * @layer_masks is read and may be updated according to the access request and 617 * the matching rule. 618 * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks). 619 * 620 * Returns true if the request is allowed (i.e. relevant layer masks for the 621 * request are empty). 622 */ 623 bool landlock_unmask_layers(const struct landlock_rule *const rule, 624 const access_mask_t access_request, 625 layer_mask_t (*const layer_masks)[], 626 const size_t masks_array_size) 627 { 628 size_t layer_level; 629 630 if (!access_request || !layer_masks) 631 return true; 632 if (!rule) 633 return false; 634 635 /* 636 * An access is granted if, for each policy layer, at least one rule 637 * encountered on the pathwalk grants the requested access, 638 * regardless of its position in the layer stack. We must then check 639 * the remaining layers for each inode, from the first added layer to 640 * the last one. When there is multiple requested accesses, for each 641 * policy layer, the full set of requested accesses may not be granted 642 * by only one rule, but by the union (binary OR) of multiple rules. 643 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read> 644 */ 645 for (layer_level = 0; layer_level < rule->num_layers; layer_level++) { 646 const struct landlock_layer *const layer = 647 &rule->layers[layer_level]; 648 const layer_mask_t layer_bit = BIT_ULL(layer->level - 1); 649 const unsigned long access_req = access_request; 650 unsigned long access_bit; 651 bool is_empty; 652 653 /* 654 * Records in @layer_masks which layer grants access to each 655 * requested access. 656 */ 657 is_empty = true; 658 for_each_set_bit(access_bit, &access_req, masks_array_size) { 659 if (layer->access & BIT_ULL(access_bit)) 660 (*layer_masks)[access_bit] &= ~layer_bit; 661 is_empty = is_empty && !(*layer_masks)[access_bit]; 662 } 663 if (is_empty) 664 return true; 665 } 666 return false; 667 } 668 669 typedef access_mask_t 670 get_access_mask_t(const struct landlock_ruleset *const ruleset, 671 const u16 layer_level); 672 673 /** 674 * landlock_init_layer_masks - Initialize layer masks from an access request 675 * 676 * Populates @layer_masks such that for each access right in @access_request, 677 * the bits for all the layers are set where this access right is handled. 678 * 679 * @domain: The domain that defines the current restrictions. 680 * @access_request: The requested access rights to check. 681 * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or 682 * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type. 683 * @key_type: The key type to switch between access masks of different types. 684 * 685 * Returns: An access mask where each access right bit is set which is handled 686 * in any of the active layers in @domain. 687 */ 688 access_mask_t 689 landlock_init_layer_masks(const struct landlock_ruleset *const domain, 690 const access_mask_t access_request, 691 layer_mask_t (*const layer_masks)[], 692 const enum landlock_key_type key_type) 693 { 694 access_mask_t handled_accesses = 0; 695 size_t layer_level, num_access; 696 get_access_mask_t *get_access_mask; 697 698 switch (key_type) { 699 case LANDLOCK_KEY_INODE: 700 get_access_mask = landlock_get_fs_access_mask; 701 num_access = LANDLOCK_NUM_ACCESS_FS; 702 break; 703 704 #if IS_ENABLED(CONFIG_INET) 705 case LANDLOCK_KEY_NET_PORT: 706 get_access_mask = landlock_get_net_access_mask; 707 num_access = LANDLOCK_NUM_ACCESS_NET; 708 break; 709 #endif /* IS_ENABLED(CONFIG_INET) */ 710 711 default: 712 WARN_ON_ONCE(1); 713 return 0; 714 } 715 716 memset(layer_masks, 0, 717 array_size(sizeof((*layer_masks)[0]), num_access)); 718 719 /* An empty access request can happen because of O_WRONLY | O_RDWR. */ 720 if (!access_request) 721 return 0; 722 723 /* Saves all handled accesses per layer. */ 724 for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { 725 const unsigned long access_req = access_request; 726 const access_mask_t access_mask = 727 get_access_mask(domain, layer_level); 728 unsigned long access_bit; 729 730 for_each_set_bit(access_bit, &access_req, num_access) { 731 if (BIT_ULL(access_bit) & access_mask) { 732 (*layer_masks)[access_bit] |= 733 BIT_ULL(layer_level); 734 handled_accesses |= BIT_ULL(access_bit); 735 } 736 } 737 } 738 return handled_accesses; 739 } 740