1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Landlock LSM - Ruleset management
4 *
5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 * Copyright © 2018-2020 ANSSI
7 */
8
9 #include <linux/bits.h>
10 #include <linux/bug.h>
11 #include <linux/cleanup.h>
12 #include <linux/compiler_types.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/lockdep.h>
17 #include <linux/mutex.h>
18 #include <linux/overflow.h>
19 #include <linux/rbtree.h>
20 #include <linux/refcount.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/workqueue.h>
24
25 #include "access.h"
26 #include "domain.h"
27 #include "limits.h"
28 #include "object.h"
29 #include "ruleset.h"
30
create_ruleset(const u32 num_layers)31 static struct landlock_ruleset *create_ruleset(const u32 num_layers)
32 {
33 struct landlock_ruleset *new_ruleset;
34
35 new_ruleset = kzalloc_flex(*new_ruleset, access_masks, num_layers,
36 GFP_KERNEL_ACCOUNT);
37 if (!new_ruleset)
38 return ERR_PTR(-ENOMEM);
39 refcount_set(&new_ruleset->usage, 1);
40 mutex_init(&new_ruleset->lock);
41 new_ruleset->root_inode = RB_ROOT;
42
43 #if IS_ENABLED(CONFIG_INET)
44 new_ruleset->root_net_port = RB_ROOT;
45 #endif /* IS_ENABLED(CONFIG_INET) */
46
47 new_ruleset->num_layers = num_layers;
48 /*
49 * hierarchy = NULL
50 * num_rules = 0
51 * access_masks[] = 0
52 */
53 return new_ruleset;
54 }
55
56 struct landlock_ruleset *
landlock_create_ruleset(const access_mask_t fs_access_mask,const access_mask_t net_access_mask,const access_mask_t scope_mask)57 landlock_create_ruleset(const access_mask_t fs_access_mask,
58 const access_mask_t net_access_mask,
59 const access_mask_t scope_mask)
60 {
61 struct landlock_ruleset *new_ruleset;
62
63 /* Informs about useless ruleset. */
64 if (!fs_access_mask && !net_access_mask && !scope_mask)
65 return ERR_PTR(-ENOMSG);
66 new_ruleset = create_ruleset(1);
67 if (IS_ERR(new_ruleset))
68 return new_ruleset;
69 if (fs_access_mask)
70 landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0);
71 if (net_access_mask)
72 landlock_add_net_access_mask(new_ruleset, net_access_mask, 0);
73 if (scope_mask)
74 landlock_add_scope_mask(new_ruleset, scope_mask, 0);
75 return new_ruleset;
76 }
77
build_check_rule(void)78 static void build_check_rule(void)
79 {
80 const struct landlock_rule rule = {
81 .num_layers = ~0,
82 };
83
84 /*
85 * Checks that .num_layers is large enough for at least
86 * LANDLOCK_MAX_NUM_LAYERS layers.
87 */
88 BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
89 }
90
is_object_pointer(const enum landlock_key_type key_type)91 static bool is_object_pointer(const enum landlock_key_type key_type)
92 {
93 switch (key_type) {
94 case LANDLOCK_KEY_INODE:
95 return true;
96
97 #if IS_ENABLED(CONFIG_INET)
98 case LANDLOCK_KEY_NET_PORT:
99 return false;
100 #endif /* IS_ENABLED(CONFIG_INET) */
101
102 default:
103 WARN_ON_ONCE(1);
104 return false;
105 }
106 }
107
108 static struct landlock_rule *
create_rule(const struct landlock_id id,const struct landlock_layer (* const layers)[],const u32 num_layers,const struct landlock_layer * const new_layer)109 create_rule(const struct landlock_id id,
110 const struct landlock_layer (*const layers)[], const u32 num_layers,
111 const struct landlock_layer *const new_layer)
112 {
113 struct landlock_rule *new_rule;
114 u32 new_num_layers;
115
116 build_check_rule();
117 if (new_layer) {
118 /* Should already be checked by landlock_merge_ruleset(). */
119 if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
120 return ERR_PTR(-E2BIG);
121 new_num_layers = num_layers + 1;
122 } else {
123 new_num_layers = num_layers;
124 }
125 new_rule = kzalloc_flex(*new_rule, layers, new_num_layers,
126 GFP_KERNEL_ACCOUNT);
127 if (!new_rule)
128 return ERR_PTR(-ENOMEM);
129 RB_CLEAR_NODE(&new_rule->node);
130 if (is_object_pointer(id.type)) {
131 /* This should have been caught by insert_rule(). */
132 WARN_ON_ONCE(!id.key.object);
133 landlock_get_object(id.key.object);
134 }
135
136 new_rule->key = id.key;
137 new_rule->num_layers = new_num_layers;
138 /* Copies the original layer stack. */
139 memcpy(new_rule->layers, layers,
140 flex_array_size(new_rule, layers, num_layers));
141 if (new_layer)
142 /* Adds a copy of @new_layer on the layer stack. */
143 new_rule->layers[new_rule->num_layers - 1] = *new_layer;
144 return new_rule;
145 }
146
get_root(struct landlock_ruleset * const ruleset,const enum landlock_key_type key_type)147 static struct rb_root *get_root(struct landlock_ruleset *const ruleset,
148 const enum landlock_key_type key_type)
149 {
150 switch (key_type) {
151 case LANDLOCK_KEY_INODE:
152 return &ruleset->root_inode;
153
154 #if IS_ENABLED(CONFIG_INET)
155 case LANDLOCK_KEY_NET_PORT:
156 return &ruleset->root_net_port;
157 #endif /* IS_ENABLED(CONFIG_INET) */
158
159 default:
160 WARN_ON_ONCE(1);
161 return ERR_PTR(-EINVAL);
162 }
163 }
164
free_rule(struct landlock_rule * const rule,const enum landlock_key_type key_type)165 static void free_rule(struct landlock_rule *const rule,
166 const enum landlock_key_type key_type)
167 {
168 might_sleep();
169 if (!rule)
170 return;
171 if (is_object_pointer(key_type))
172 landlock_put_object(rule->key.object);
173 kfree(rule);
174 }
175
build_check_ruleset(void)176 static void build_check_ruleset(void)
177 {
178 const struct landlock_ruleset ruleset = {
179 .num_rules = ~0,
180 .num_layers = ~0,
181 };
182
183 BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
184 BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
185 }
186
187 /**
188 * insert_rule - Create and insert a rule in a ruleset
189 *
190 * @ruleset: The ruleset to be updated.
191 * @id: The ID to build the new rule with. The underlying kernel object, if
192 * any, must be held by the caller.
193 * @layers: One or multiple layers to be copied into the new rule.
194 * @num_layers: The number of @layers entries.
195 *
196 * When user space requests to add a new rule to a ruleset, @layers only
197 * contains one entry and this entry is not assigned to any level. In this
198 * case, the new rule will extend @ruleset, similarly to a boolean OR between
199 * access rights.
200 *
201 * When merging a ruleset in a domain, or copying a domain, @layers will be
202 * added to @ruleset as new constraints, similarly to a boolean AND between
203 * access rights.
204 */
insert_rule(struct landlock_ruleset * const ruleset,const struct landlock_id id,const struct landlock_layer (* const layers)[],const size_t num_layers)205 static int insert_rule(struct landlock_ruleset *const ruleset,
206 const struct landlock_id id,
207 const struct landlock_layer (*const layers)[],
208 const size_t num_layers)
209 {
210 struct rb_node **walker_node;
211 struct rb_node *parent_node = NULL;
212 struct landlock_rule *new_rule;
213 struct rb_root *root;
214
215 might_sleep();
216 lockdep_assert_held(&ruleset->lock);
217 if (WARN_ON_ONCE(!layers))
218 return -ENOENT;
219
220 if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object))
221 return -ENOENT;
222
223 root = get_root(ruleset, id.type);
224 if (IS_ERR(root))
225 return PTR_ERR(root);
226
227 walker_node = &root->rb_node;
228 while (*walker_node) {
229 struct landlock_rule *const this =
230 rb_entry(*walker_node, struct landlock_rule, node);
231
232 if (this->key.data != id.key.data) {
233 parent_node = *walker_node;
234 if (this->key.data < id.key.data)
235 walker_node = &((*walker_node)->rb_right);
236 else
237 walker_node = &((*walker_node)->rb_left);
238 continue;
239 }
240
241 /* Only a single-level layer should match an existing rule. */
242 if (WARN_ON_ONCE(num_layers != 1))
243 return -EINVAL;
244
245 /* If there is a matching rule, updates it. */
246 if ((*layers)[0].level == 0) {
247 /*
248 * Extends access rights when the request comes from
249 * landlock_add_rule(2), i.e. @ruleset is not a domain.
250 */
251 if (WARN_ON_ONCE(this->num_layers != 1))
252 return -EINVAL;
253 if (WARN_ON_ONCE(this->layers[0].level != 0))
254 return -EINVAL;
255 this->layers[0].access |= (*layers)[0].access;
256 return 0;
257 }
258
259 if (WARN_ON_ONCE(this->layers[0].level == 0))
260 return -EINVAL;
261
262 /*
263 * Intersects access rights when it is a merge between a
264 * ruleset and a domain.
265 */
266 new_rule = create_rule(id, &this->layers, this->num_layers,
267 &(*layers)[0]);
268 if (IS_ERR(new_rule))
269 return PTR_ERR(new_rule);
270 rb_replace_node(&this->node, &new_rule->node, root);
271 free_rule(this, id.type);
272 return 0;
273 }
274
275 /* There is no match for @id. */
276 build_check_ruleset();
277 if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
278 return -E2BIG;
279 new_rule = create_rule(id, layers, num_layers, NULL);
280 if (IS_ERR(new_rule))
281 return PTR_ERR(new_rule);
282 rb_link_node(&new_rule->node, parent_node, walker_node);
283 rb_insert_color(&new_rule->node, root);
284 ruleset->num_rules++;
285 return 0;
286 }
287
build_check_layer(void)288 static void build_check_layer(void)
289 {
290 const struct landlock_layer layer = {
291 .level = ~0,
292 .access = ~0,
293 };
294
295 /*
296 * Checks that .level and .access are large enough to contain their expected
297 * maximum values.
298 */
299 BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
300 BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
301 }
302
303 /* @ruleset must be locked by the caller. */
landlock_insert_rule(struct landlock_ruleset * const ruleset,const struct landlock_id id,const access_mask_t access)304 int landlock_insert_rule(struct landlock_ruleset *const ruleset,
305 const struct landlock_id id,
306 const access_mask_t access)
307 {
308 struct landlock_layer layers[] = { {
309 .access = access,
310 /* When @level is zero, insert_rule() extends @ruleset. */
311 .level = 0,
312 } };
313
314 build_check_layer();
315 return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
316 }
317
merge_tree(struct landlock_ruleset * const dst,struct landlock_ruleset * const src,const enum landlock_key_type key_type)318 static int merge_tree(struct landlock_ruleset *const dst,
319 struct landlock_ruleset *const src,
320 const enum landlock_key_type key_type)
321 {
322 struct landlock_rule *walker_rule, *next_rule;
323 struct rb_root *src_root;
324 int err = 0;
325
326 might_sleep();
327 lockdep_assert_held(&dst->lock);
328 lockdep_assert_held(&src->lock);
329
330 src_root = get_root(src, key_type);
331 if (IS_ERR(src_root))
332 return PTR_ERR(src_root);
333
334 /* Merges the @src tree. */
335 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root,
336 node) {
337 struct landlock_layer layers[] = { {
338 .level = dst->num_layers,
339 } };
340 const struct landlock_id id = {
341 .key = walker_rule->key,
342 .type = key_type,
343 };
344
345 if (WARN_ON_ONCE(walker_rule->num_layers != 1))
346 return -EINVAL;
347
348 if (WARN_ON_ONCE(walker_rule->layers[0].level != 0))
349 return -EINVAL;
350
351 layers[0].access = walker_rule->layers[0].access;
352
353 err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers));
354 if (err)
355 return err;
356 }
357 return err;
358 }
359
merge_ruleset(struct landlock_ruleset * const dst,struct landlock_ruleset * const src)360 static int merge_ruleset(struct landlock_ruleset *const dst,
361 struct landlock_ruleset *const src)
362 {
363 int err = 0;
364
365 might_sleep();
366 /* Should already be checked by landlock_merge_ruleset() */
367 if (WARN_ON_ONCE(!src))
368 return 0;
369 /* Only merge into a domain. */
370 if (WARN_ON_ONCE(!dst || !dst->hierarchy))
371 return -EINVAL;
372
373 /* Locks @dst first because we are its only owner. */
374 mutex_lock(&dst->lock);
375 mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
376
377 /* Stacks the new layer. */
378 if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
379 err = -EINVAL;
380 goto out_unlock;
381 }
382 dst->access_masks[dst->num_layers - 1] =
383 landlock_upgrade_handled_access_masks(src->access_masks[0]);
384
385 /* Merges the @src inode tree. */
386 err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
387 if (err)
388 goto out_unlock;
389
390 #if IS_ENABLED(CONFIG_INET)
391 /* Merges the @src network port tree. */
392 err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT);
393 if (err)
394 goto out_unlock;
395 #endif /* IS_ENABLED(CONFIG_INET) */
396
397 out_unlock:
398 mutex_unlock(&src->lock);
399 mutex_unlock(&dst->lock);
400 return err;
401 }
402
inherit_tree(struct landlock_ruleset * const parent,struct landlock_ruleset * const child,const enum landlock_key_type key_type)403 static int inherit_tree(struct landlock_ruleset *const parent,
404 struct landlock_ruleset *const child,
405 const enum landlock_key_type key_type)
406 {
407 struct landlock_rule *walker_rule, *next_rule;
408 struct rb_root *parent_root;
409 int err = 0;
410
411 might_sleep();
412 lockdep_assert_held(&parent->lock);
413 lockdep_assert_held(&child->lock);
414
415 parent_root = get_root(parent, key_type);
416 if (IS_ERR(parent_root))
417 return PTR_ERR(parent_root);
418
419 /* Copies the @parent inode or network tree. */
420 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
421 parent_root, node) {
422 const struct landlock_id id = {
423 .key = walker_rule->key,
424 .type = key_type,
425 };
426
427 err = insert_rule(child, id, &walker_rule->layers,
428 walker_rule->num_layers);
429 if (err)
430 return err;
431 }
432 return err;
433 }
434
inherit_ruleset(struct landlock_ruleset * const parent,struct landlock_ruleset * const child)435 static int inherit_ruleset(struct landlock_ruleset *const parent,
436 struct landlock_ruleset *const child)
437 {
438 int err = 0;
439
440 might_sleep();
441 if (!parent)
442 return 0;
443
444 /* Locks @child first because we are its only owner. */
445 mutex_lock(&child->lock);
446 mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
447
448 /* Copies the @parent inode tree. */
449 err = inherit_tree(parent, child, LANDLOCK_KEY_INODE);
450 if (err)
451 goto out_unlock;
452
453 #if IS_ENABLED(CONFIG_INET)
454 /* Copies the @parent network port tree. */
455 err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT);
456 if (err)
457 goto out_unlock;
458 #endif /* IS_ENABLED(CONFIG_INET) */
459
460 if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
461 err = -EINVAL;
462 goto out_unlock;
463 }
464 /* Copies the parent layer stack and leaves a space for the new layer. */
465 memcpy(child->access_masks, parent->access_masks,
466 flex_array_size(parent, access_masks, parent->num_layers));
467
468 if (WARN_ON_ONCE(!parent->hierarchy)) {
469 err = -EINVAL;
470 goto out_unlock;
471 }
472 landlock_get_hierarchy(parent->hierarchy);
473 child->hierarchy->parent = parent->hierarchy;
474
475 out_unlock:
476 mutex_unlock(&parent->lock);
477 mutex_unlock(&child->lock);
478 return err;
479 }
480
free_ruleset(struct landlock_ruleset * const ruleset)481 static void free_ruleset(struct landlock_ruleset *const ruleset)
482 {
483 struct landlock_rule *freeme, *next;
484
485 might_sleep();
486 rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode,
487 node)
488 free_rule(freeme, LANDLOCK_KEY_INODE);
489
490 #if IS_ENABLED(CONFIG_INET)
491 rbtree_postorder_for_each_entry_safe(freeme, next,
492 &ruleset->root_net_port, node)
493 free_rule(freeme, LANDLOCK_KEY_NET_PORT);
494 #endif /* IS_ENABLED(CONFIG_INET) */
495
496 landlock_put_hierarchy(ruleset->hierarchy);
497 kfree(ruleset);
498 }
499
landlock_put_ruleset(struct landlock_ruleset * const ruleset)500 void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
501 {
502 might_sleep();
503 if (ruleset && refcount_dec_and_test(&ruleset->usage))
504 free_ruleset(ruleset);
505 }
506
free_ruleset_work(struct work_struct * const work)507 static void free_ruleset_work(struct work_struct *const work)
508 {
509 struct landlock_ruleset *ruleset;
510
511 ruleset = container_of(work, struct landlock_ruleset, work_free);
512 free_ruleset(ruleset);
513 }
514
515 /* Only called by hook_cred_free(). */
landlock_put_ruleset_deferred(struct landlock_ruleset * const ruleset)516 void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
517 {
518 if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
519 INIT_WORK(&ruleset->work_free, free_ruleset_work);
520 schedule_work(&ruleset->work_free);
521 }
522 }
523
524 /**
525 * landlock_merge_ruleset - Merge a ruleset with a domain
526 *
527 * @parent: Parent domain.
528 * @ruleset: New ruleset to be merged.
529 *
530 * The current task is requesting to be restricted. The subjective credentials
531 * must not be in an overridden state. cf. landlock_init_hierarchy_log().
532 *
533 * Returns the intersection of @parent and @ruleset, or returns @parent if
534 * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
535 */
536 struct landlock_ruleset *
landlock_merge_ruleset(struct landlock_ruleset * const parent,struct landlock_ruleset * const ruleset)537 landlock_merge_ruleset(struct landlock_ruleset *const parent,
538 struct landlock_ruleset *const ruleset)
539 {
540 struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL;
541 u32 num_layers;
542 int err;
543
544 might_sleep();
545 if (WARN_ON_ONCE(!ruleset || parent == ruleset))
546 return ERR_PTR(-EINVAL);
547
548 if (parent) {
549 if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
550 return ERR_PTR(-E2BIG);
551 num_layers = parent->num_layers + 1;
552 } else {
553 num_layers = 1;
554 }
555
556 /* Creates a new domain... */
557 new_dom = create_ruleset(num_layers);
558 if (IS_ERR(new_dom))
559 return new_dom;
560
561 new_dom->hierarchy =
562 kzalloc_obj(*new_dom->hierarchy, GFP_KERNEL_ACCOUNT);
563 if (!new_dom->hierarchy)
564 return ERR_PTR(-ENOMEM);
565
566 refcount_set(&new_dom->hierarchy->usage, 1);
567
568 /* ...as a child of @parent... */
569 err = inherit_ruleset(parent, new_dom);
570 if (err)
571 return ERR_PTR(err);
572
573 /* ...and including @ruleset. */
574 err = merge_ruleset(new_dom, ruleset);
575 if (err)
576 return ERR_PTR(err);
577
578 err = landlock_init_hierarchy_log(new_dom->hierarchy);
579 if (err)
580 return ERR_PTR(err);
581
582 return no_free_ptr(new_dom);
583 }
584
585 /*
586 * The returned access has the same lifetime as @ruleset.
587 */
588 const struct landlock_rule *
landlock_find_rule(const struct landlock_ruleset * const ruleset,const struct landlock_id id)589 landlock_find_rule(const struct landlock_ruleset *const ruleset,
590 const struct landlock_id id)
591 {
592 const struct rb_root *root;
593 const struct rb_node *node;
594
595 root = get_root((struct landlock_ruleset *)ruleset, id.type);
596 if (IS_ERR(root))
597 return NULL;
598 node = root->rb_node;
599
600 while (node) {
601 struct landlock_rule *this =
602 rb_entry(node, struct landlock_rule, node);
603
604 if (this->key.data == id.key.data)
605 return this;
606 if (this->key.data < id.key.data)
607 node = node->rb_right;
608 else
609 node = node->rb_left;
610 }
611 return NULL;
612 }
613
614 /**
615 * landlock_unmask_layers - Remove the access rights in @masks
616 * which are granted in @rule
617 *
618 * Updates the set of (per-layer) unfulfilled access rights @masks
619 * so that all the access rights granted in @rule are removed from it
620 * (because they are now fulfilled).
621 *
622 * @rule: A rule that grants a set of access rights for each layer
623 * @masks: A matrix of unfulfilled access rights for each layer
624 *
625 * Returns true if the request is allowed (i.e. the access rights granted all
626 * remaining unfulfilled access rights and masks has no leftover set bits).
627 */
landlock_unmask_layers(const struct landlock_rule * const rule,struct layer_access_masks * masks)628 bool landlock_unmask_layers(const struct landlock_rule *const rule,
629 struct layer_access_masks *masks)
630 {
631 if (!masks)
632 return true;
633 if (!rule)
634 return false;
635
636 /*
637 * An access is granted if, for each policy layer, at least one rule
638 * encountered on the pathwalk grants the requested access,
639 * regardless of its position in the layer stack. We must then check
640 * the remaining layers for each inode, from the first added layer to
641 * the last one. When there is multiple requested accesses, for each
642 * policy layer, the full set of requested accesses may not be granted
643 * by only one rule, but by the union (binary OR) of multiple rules.
644 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
645 */
646 for (size_t i = 0; i < rule->num_layers; i++) {
647 const struct landlock_layer *const layer = &rule->layers[i];
648
649 /* Clear the bits where the layer in the rule grants access. */
650 masks->access[layer->level - 1] &= ~layer->access;
651 }
652
653 for (size_t i = 0; i < ARRAY_SIZE(masks->access); i++) {
654 if (masks->access[i])
655 return false;
656 }
657 return true;
658 }
659
660 typedef access_mask_t
661 get_access_mask_t(const struct landlock_ruleset *const ruleset,
662 const u16 layer_level);
663
664 /**
665 * landlock_init_layer_masks - Initialize layer masks from an access request
666 *
667 * Populates @masks such that for each access right in @access_request,
668 * the bits for all the layers are set where this access right is handled.
669 *
670 * @domain: The domain that defines the current restrictions.
671 * @access_request: The requested access rights to check.
672 * @masks: Layer access masks to populate.
673 * @key_type: The key type to switch between access masks of different types.
674 *
675 * Returns: An access mask where each access right bit is set which is handled
676 * in any of the active layers in @domain.
677 */
678 access_mask_t
landlock_init_layer_masks(const struct landlock_ruleset * const domain,const access_mask_t access_request,struct layer_access_masks * const masks,const enum landlock_key_type key_type)679 landlock_init_layer_masks(const struct landlock_ruleset *const domain,
680 const access_mask_t access_request,
681 struct layer_access_masks *const masks,
682 const enum landlock_key_type key_type)
683 {
684 access_mask_t handled_accesses = 0;
685 get_access_mask_t *get_access_mask;
686
687 switch (key_type) {
688 case LANDLOCK_KEY_INODE:
689 get_access_mask = landlock_get_fs_access_mask;
690 break;
691
692 #if IS_ENABLED(CONFIG_INET)
693 case LANDLOCK_KEY_NET_PORT:
694 get_access_mask = landlock_get_net_access_mask;
695 break;
696 #endif /* IS_ENABLED(CONFIG_INET) */
697
698 default:
699 WARN_ON_ONCE(1);
700 return 0;
701 }
702
703 /* An empty access request can happen because of O_WRONLY | O_RDWR. */
704 if (!access_request)
705 return 0;
706
707 for (size_t i = 0; i < domain->num_layers; i++) {
708 const access_mask_t handled = get_access_mask(domain, i);
709
710 masks->access[i] = access_request & handled;
711 handled_accesses |= masks->access[i];
712 }
713 for (size_t i = domain->num_layers; i < ARRAY_SIZE(masks->access); i++)
714 masks->access[i] = 0;
715
716 return handled_accesses;
717 }
718