1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Landlock LSM - Ruleset management
4 *
5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 * Copyright © 2018-2020 ANSSI
7 */
8
9 #include <linux/bits.h>
10 #include <linux/bug.h>
11 #include <linux/compiler_types.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/lockdep.h>
16 #include <linux/overflow.h>
17 #include <linux/rbtree.h>
18 #include <linux/refcount.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22
23 #include "limits.h"
24 #include "object.h"
25 #include "ruleset.h"
26
create_ruleset(const u32 num_layers)27 static struct landlock_ruleset *create_ruleset(const u32 num_layers)
28 {
29 struct landlock_ruleset *new_ruleset;
30
31 new_ruleset =
32 kzalloc(struct_size(new_ruleset, access_masks, num_layers),
33 GFP_KERNEL_ACCOUNT);
34 if (!new_ruleset)
35 return ERR_PTR(-ENOMEM);
36 refcount_set(&new_ruleset->usage, 1);
37 mutex_init(&new_ruleset->lock);
38 new_ruleset->root_inode = RB_ROOT;
39
40 #if IS_ENABLED(CONFIG_INET)
41 new_ruleset->root_net_port = RB_ROOT;
42 #endif /* IS_ENABLED(CONFIG_INET) */
43
44 new_ruleset->num_layers = num_layers;
45 /*
46 * hierarchy = NULL
47 * num_rules = 0
48 * access_masks[] = 0
49 */
50 return new_ruleset;
51 }
52
53 struct landlock_ruleset *
landlock_create_ruleset(const access_mask_t fs_access_mask,const access_mask_t net_access_mask,const access_mask_t scope_mask)54 landlock_create_ruleset(const access_mask_t fs_access_mask,
55 const access_mask_t net_access_mask,
56 const access_mask_t scope_mask)
57 {
58 struct landlock_ruleset *new_ruleset;
59
60 /* Informs about useless ruleset. */
61 if (!fs_access_mask && !net_access_mask && !scope_mask)
62 return ERR_PTR(-ENOMSG);
63 new_ruleset = create_ruleset(1);
64 if (IS_ERR(new_ruleset))
65 return new_ruleset;
66 if (fs_access_mask)
67 landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0);
68 if (net_access_mask)
69 landlock_add_net_access_mask(new_ruleset, net_access_mask, 0);
70 if (scope_mask)
71 landlock_add_scope_mask(new_ruleset, scope_mask, 0);
72 return new_ruleset;
73 }
74
build_check_rule(void)75 static void build_check_rule(void)
76 {
77 const struct landlock_rule rule = {
78 .num_layers = ~0,
79 };
80
81 BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
82 }
83
is_object_pointer(const enum landlock_key_type key_type)84 static bool is_object_pointer(const enum landlock_key_type key_type)
85 {
86 switch (key_type) {
87 case LANDLOCK_KEY_INODE:
88 return true;
89
90 #if IS_ENABLED(CONFIG_INET)
91 case LANDLOCK_KEY_NET_PORT:
92 return false;
93 #endif /* IS_ENABLED(CONFIG_INET) */
94
95 default:
96 WARN_ON_ONCE(1);
97 return false;
98 }
99 }
100
101 static struct landlock_rule *
create_rule(const struct landlock_id id,const struct landlock_layer (* const layers)[],const u32 num_layers,const struct landlock_layer * const new_layer)102 create_rule(const struct landlock_id id,
103 const struct landlock_layer (*const layers)[], const u32 num_layers,
104 const struct landlock_layer *const new_layer)
105 {
106 struct landlock_rule *new_rule;
107 u32 new_num_layers;
108
109 build_check_rule();
110 if (new_layer) {
111 /* Should already be checked by landlock_merge_ruleset(). */
112 if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
113 return ERR_PTR(-E2BIG);
114 new_num_layers = num_layers + 1;
115 } else {
116 new_num_layers = num_layers;
117 }
118 new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
119 GFP_KERNEL_ACCOUNT);
120 if (!new_rule)
121 return ERR_PTR(-ENOMEM);
122 RB_CLEAR_NODE(&new_rule->node);
123 if (is_object_pointer(id.type)) {
124 /* This should be catched by insert_rule(). */
125 WARN_ON_ONCE(!id.key.object);
126 landlock_get_object(id.key.object);
127 }
128
129 new_rule->key = id.key;
130 new_rule->num_layers = new_num_layers;
131 /* Copies the original layer stack. */
132 memcpy(new_rule->layers, layers,
133 flex_array_size(new_rule, layers, num_layers));
134 if (new_layer)
135 /* Adds a copy of @new_layer on the layer stack. */
136 new_rule->layers[new_rule->num_layers - 1] = *new_layer;
137 return new_rule;
138 }
139
get_root(struct landlock_ruleset * const ruleset,const enum landlock_key_type key_type)140 static struct rb_root *get_root(struct landlock_ruleset *const ruleset,
141 const enum landlock_key_type key_type)
142 {
143 switch (key_type) {
144 case LANDLOCK_KEY_INODE:
145 return &ruleset->root_inode;
146
147 #if IS_ENABLED(CONFIG_INET)
148 case LANDLOCK_KEY_NET_PORT:
149 return &ruleset->root_net_port;
150 #endif /* IS_ENABLED(CONFIG_INET) */
151
152 default:
153 WARN_ON_ONCE(1);
154 return ERR_PTR(-EINVAL);
155 }
156 }
157
free_rule(struct landlock_rule * const rule,const enum landlock_key_type key_type)158 static void free_rule(struct landlock_rule *const rule,
159 const enum landlock_key_type key_type)
160 {
161 might_sleep();
162 if (!rule)
163 return;
164 if (is_object_pointer(key_type))
165 landlock_put_object(rule->key.object);
166 kfree(rule);
167 }
168
build_check_ruleset(void)169 static void build_check_ruleset(void)
170 {
171 const struct landlock_ruleset ruleset = {
172 .num_rules = ~0,
173 .num_layers = ~0,
174 };
175
176 BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
177 BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
178 }
179
180 /**
181 * insert_rule - Create and insert a rule in a ruleset
182 *
183 * @ruleset: The ruleset to be updated.
184 * @id: The ID to build the new rule with. The underlying kernel object, if
185 * any, must be held by the caller.
186 * @layers: One or multiple layers to be copied into the new rule.
187 * @num_layers: The number of @layers entries.
188 *
189 * When user space requests to add a new rule to a ruleset, @layers only
190 * contains one entry and this entry is not assigned to any level. In this
191 * case, the new rule will extend @ruleset, similarly to a boolean OR between
192 * access rights.
193 *
194 * When merging a ruleset in a domain, or copying a domain, @layers will be
195 * added to @ruleset as new constraints, similarly to a boolean AND between
196 * access rights.
197 */
insert_rule(struct landlock_ruleset * const ruleset,const struct landlock_id id,const struct landlock_layer (* const layers)[],const size_t num_layers)198 static int insert_rule(struct landlock_ruleset *const ruleset,
199 const struct landlock_id id,
200 const struct landlock_layer (*const layers)[],
201 const size_t num_layers)
202 {
203 struct rb_node **walker_node;
204 struct rb_node *parent_node = NULL;
205 struct landlock_rule *new_rule;
206 struct rb_root *root;
207
208 might_sleep();
209 lockdep_assert_held(&ruleset->lock);
210 if (WARN_ON_ONCE(!layers))
211 return -ENOENT;
212
213 if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object))
214 return -ENOENT;
215
216 root = get_root(ruleset, id.type);
217 if (IS_ERR(root))
218 return PTR_ERR(root);
219
220 walker_node = &root->rb_node;
221 while (*walker_node) {
222 struct landlock_rule *const this =
223 rb_entry(*walker_node, struct landlock_rule, node);
224
225 if (this->key.data != id.key.data) {
226 parent_node = *walker_node;
227 if (this->key.data < id.key.data)
228 walker_node = &((*walker_node)->rb_right);
229 else
230 walker_node = &((*walker_node)->rb_left);
231 continue;
232 }
233
234 /* Only a single-level layer should match an existing rule. */
235 if (WARN_ON_ONCE(num_layers != 1))
236 return -EINVAL;
237
238 /* If there is a matching rule, updates it. */
239 if ((*layers)[0].level == 0) {
240 /*
241 * Extends access rights when the request comes from
242 * landlock_add_rule(2), i.e. @ruleset is not a domain.
243 */
244 if (WARN_ON_ONCE(this->num_layers != 1))
245 return -EINVAL;
246 if (WARN_ON_ONCE(this->layers[0].level != 0))
247 return -EINVAL;
248 this->layers[0].access |= (*layers)[0].access;
249 return 0;
250 }
251
252 if (WARN_ON_ONCE(this->layers[0].level == 0))
253 return -EINVAL;
254
255 /*
256 * Intersects access rights when it is a merge between a
257 * ruleset and a domain.
258 */
259 new_rule = create_rule(id, &this->layers, this->num_layers,
260 &(*layers)[0]);
261 if (IS_ERR(new_rule))
262 return PTR_ERR(new_rule);
263 rb_replace_node(&this->node, &new_rule->node, root);
264 free_rule(this, id.type);
265 return 0;
266 }
267
268 /* There is no match for @id. */
269 build_check_ruleset();
270 if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
271 return -E2BIG;
272 new_rule = create_rule(id, layers, num_layers, NULL);
273 if (IS_ERR(new_rule))
274 return PTR_ERR(new_rule);
275 rb_link_node(&new_rule->node, parent_node, walker_node);
276 rb_insert_color(&new_rule->node, root);
277 ruleset->num_rules++;
278 return 0;
279 }
280
build_check_layer(void)281 static void build_check_layer(void)
282 {
283 const struct landlock_layer layer = {
284 .level = ~0,
285 .access = ~0,
286 };
287
288 BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
289 BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
290 }
291
292 /* @ruleset must be locked by the caller. */
landlock_insert_rule(struct landlock_ruleset * const ruleset,const struct landlock_id id,const access_mask_t access)293 int landlock_insert_rule(struct landlock_ruleset *const ruleset,
294 const struct landlock_id id,
295 const access_mask_t access)
296 {
297 struct landlock_layer layers[] = { {
298 .access = access,
299 /* When @level is zero, insert_rule() extends @ruleset. */
300 .level = 0,
301 } };
302
303 build_check_layer();
304 return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
305 }
306
get_hierarchy(struct landlock_hierarchy * const hierarchy)307 static void get_hierarchy(struct landlock_hierarchy *const hierarchy)
308 {
309 if (hierarchy)
310 refcount_inc(&hierarchy->usage);
311 }
312
put_hierarchy(struct landlock_hierarchy * hierarchy)313 static void put_hierarchy(struct landlock_hierarchy *hierarchy)
314 {
315 while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
316 const struct landlock_hierarchy *const freeme = hierarchy;
317
318 hierarchy = hierarchy->parent;
319 kfree(freeme);
320 }
321 }
322
merge_tree(struct landlock_ruleset * const dst,struct landlock_ruleset * const src,const enum landlock_key_type key_type)323 static int merge_tree(struct landlock_ruleset *const dst,
324 struct landlock_ruleset *const src,
325 const enum landlock_key_type key_type)
326 {
327 struct landlock_rule *walker_rule, *next_rule;
328 struct rb_root *src_root;
329 int err = 0;
330
331 might_sleep();
332 lockdep_assert_held(&dst->lock);
333 lockdep_assert_held(&src->lock);
334
335 src_root = get_root(src, key_type);
336 if (IS_ERR(src_root))
337 return PTR_ERR(src_root);
338
339 /* Merges the @src tree. */
340 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root,
341 node) {
342 struct landlock_layer layers[] = { {
343 .level = dst->num_layers,
344 } };
345 const struct landlock_id id = {
346 .key = walker_rule->key,
347 .type = key_type,
348 };
349
350 if (WARN_ON_ONCE(walker_rule->num_layers != 1))
351 return -EINVAL;
352
353 if (WARN_ON_ONCE(walker_rule->layers[0].level != 0))
354 return -EINVAL;
355
356 layers[0].access = walker_rule->layers[0].access;
357
358 err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers));
359 if (err)
360 return err;
361 }
362 return err;
363 }
364
merge_ruleset(struct landlock_ruleset * const dst,struct landlock_ruleset * const src)365 static int merge_ruleset(struct landlock_ruleset *const dst,
366 struct landlock_ruleset *const src)
367 {
368 int err = 0;
369
370 might_sleep();
371 /* Should already be checked by landlock_merge_ruleset() */
372 if (WARN_ON_ONCE(!src))
373 return 0;
374 /* Only merge into a domain. */
375 if (WARN_ON_ONCE(!dst || !dst->hierarchy))
376 return -EINVAL;
377
378 /* Locks @dst first because we are its only owner. */
379 mutex_lock(&dst->lock);
380 mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
381
382 /* Stacks the new layer. */
383 if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
384 err = -EINVAL;
385 goto out_unlock;
386 }
387 dst->access_masks[dst->num_layers - 1] = src->access_masks[0];
388
389 /* Merges the @src inode tree. */
390 err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
391 if (err)
392 goto out_unlock;
393
394 #if IS_ENABLED(CONFIG_INET)
395 /* Merges the @src network port tree. */
396 err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT);
397 if (err)
398 goto out_unlock;
399 #endif /* IS_ENABLED(CONFIG_INET) */
400
401 out_unlock:
402 mutex_unlock(&src->lock);
403 mutex_unlock(&dst->lock);
404 return err;
405 }
406
inherit_tree(struct landlock_ruleset * const parent,struct landlock_ruleset * const child,const enum landlock_key_type key_type)407 static int inherit_tree(struct landlock_ruleset *const parent,
408 struct landlock_ruleset *const child,
409 const enum landlock_key_type key_type)
410 {
411 struct landlock_rule *walker_rule, *next_rule;
412 struct rb_root *parent_root;
413 int err = 0;
414
415 might_sleep();
416 lockdep_assert_held(&parent->lock);
417 lockdep_assert_held(&child->lock);
418
419 parent_root = get_root(parent, key_type);
420 if (IS_ERR(parent_root))
421 return PTR_ERR(parent_root);
422
423 /* Copies the @parent inode or network tree. */
424 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
425 parent_root, node) {
426 const struct landlock_id id = {
427 .key = walker_rule->key,
428 .type = key_type,
429 };
430
431 err = insert_rule(child, id, &walker_rule->layers,
432 walker_rule->num_layers);
433 if (err)
434 return err;
435 }
436 return err;
437 }
438
inherit_ruleset(struct landlock_ruleset * const parent,struct landlock_ruleset * const child)439 static int inherit_ruleset(struct landlock_ruleset *const parent,
440 struct landlock_ruleset *const child)
441 {
442 int err = 0;
443
444 might_sleep();
445 if (!parent)
446 return 0;
447
448 /* Locks @child first because we are its only owner. */
449 mutex_lock(&child->lock);
450 mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
451
452 /* Copies the @parent inode tree. */
453 err = inherit_tree(parent, child, LANDLOCK_KEY_INODE);
454 if (err)
455 goto out_unlock;
456
457 #if IS_ENABLED(CONFIG_INET)
458 /* Copies the @parent network port tree. */
459 err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT);
460 if (err)
461 goto out_unlock;
462 #endif /* IS_ENABLED(CONFIG_INET) */
463
464 if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
465 err = -EINVAL;
466 goto out_unlock;
467 }
468 /* Copies the parent layer stack and leaves a space for the new layer. */
469 memcpy(child->access_masks, parent->access_masks,
470 flex_array_size(parent, access_masks, parent->num_layers));
471
472 if (WARN_ON_ONCE(!parent->hierarchy)) {
473 err = -EINVAL;
474 goto out_unlock;
475 }
476 get_hierarchy(parent->hierarchy);
477 child->hierarchy->parent = parent->hierarchy;
478
479 out_unlock:
480 mutex_unlock(&parent->lock);
481 mutex_unlock(&child->lock);
482 return err;
483 }
484
free_ruleset(struct landlock_ruleset * const ruleset)485 static void free_ruleset(struct landlock_ruleset *const ruleset)
486 {
487 struct landlock_rule *freeme, *next;
488
489 might_sleep();
490 rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode,
491 node)
492 free_rule(freeme, LANDLOCK_KEY_INODE);
493
494 #if IS_ENABLED(CONFIG_INET)
495 rbtree_postorder_for_each_entry_safe(freeme, next,
496 &ruleset->root_net_port, node)
497 free_rule(freeme, LANDLOCK_KEY_NET_PORT);
498 #endif /* IS_ENABLED(CONFIG_INET) */
499
500 put_hierarchy(ruleset->hierarchy);
501 kfree(ruleset);
502 }
503
landlock_put_ruleset(struct landlock_ruleset * const ruleset)504 void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
505 {
506 might_sleep();
507 if (ruleset && refcount_dec_and_test(&ruleset->usage))
508 free_ruleset(ruleset);
509 }
510
free_ruleset_work(struct work_struct * const work)511 static void free_ruleset_work(struct work_struct *const work)
512 {
513 struct landlock_ruleset *ruleset;
514
515 ruleset = container_of(work, struct landlock_ruleset, work_free);
516 free_ruleset(ruleset);
517 }
518
landlock_put_ruleset_deferred(struct landlock_ruleset * const ruleset)519 void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
520 {
521 if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
522 INIT_WORK(&ruleset->work_free, free_ruleset_work);
523 schedule_work(&ruleset->work_free);
524 }
525 }
526
527 /**
528 * landlock_merge_ruleset - Merge a ruleset with a domain
529 *
530 * @parent: Parent domain.
531 * @ruleset: New ruleset to be merged.
532 *
533 * Returns the intersection of @parent and @ruleset, or returns @parent if
534 * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
535 */
536 struct landlock_ruleset *
landlock_merge_ruleset(struct landlock_ruleset * const parent,struct landlock_ruleset * const ruleset)537 landlock_merge_ruleset(struct landlock_ruleset *const parent,
538 struct landlock_ruleset *const ruleset)
539 {
540 struct landlock_ruleset *new_dom;
541 u32 num_layers;
542 int err;
543
544 might_sleep();
545 if (WARN_ON_ONCE(!ruleset || parent == ruleset))
546 return ERR_PTR(-EINVAL);
547
548 if (parent) {
549 if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
550 return ERR_PTR(-E2BIG);
551 num_layers = parent->num_layers + 1;
552 } else {
553 num_layers = 1;
554 }
555
556 /* Creates a new domain... */
557 new_dom = create_ruleset(num_layers);
558 if (IS_ERR(new_dom))
559 return new_dom;
560 new_dom->hierarchy =
561 kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
562 if (!new_dom->hierarchy) {
563 err = -ENOMEM;
564 goto out_put_dom;
565 }
566 refcount_set(&new_dom->hierarchy->usage, 1);
567
568 /* ...as a child of @parent... */
569 err = inherit_ruleset(parent, new_dom);
570 if (err)
571 goto out_put_dom;
572
573 /* ...and including @ruleset. */
574 err = merge_ruleset(new_dom, ruleset);
575 if (err)
576 goto out_put_dom;
577
578 return new_dom;
579
580 out_put_dom:
581 landlock_put_ruleset(new_dom);
582 return ERR_PTR(err);
583 }
584
585 /*
586 * The returned access has the same lifetime as @ruleset.
587 */
588 const struct landlock_rule *
landlock_find_rule(const struct landlock_ruleset * const ruleset,const struct landlock_id id)589 landlock_find_rule(const struct landlock_ruleset *const ruleset,
590 const struct landlock_id id)
591 {
592 const struct rb_root *root;
593 const struct rb_node *node;
594
595 root = get_root((struct landlock_ruleset *)ruleset, id.type);
596 if (IS_ERR(root))
597 return NULL;
598 node = root->rb_node;
599
600 while (node) {
601 struct landlock_rule *this =
602 rb_entry(node, struct landlock_rule, node);
603
604 if (this->key.data == id.key.data)
605 return this;
606 if (this->key.data < id.key.data)
607 node = node->rb_right;
608 else
609 node = node->rb_left;
610 }
611 return NULL;
612 }
613
614 /*
615 * @layer_masks is read and may be updated according to the access request and
616 * the matching rule.
617 * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks).
618 *
619 * Returns true if the request is allowed (i.e. relevant layer masks for the
620 * request are empty).
621 */
landlock_unmask_layers(const struct landlock_rule * const rule,const access_mask_t access_request,layer_mask_t (* const layer_masks)[],const size_t masks_array_size)622 bool landlock_unmask_layers(const struct landlock_rule *const rule,
623 const access_mask_t access_request,
624 layer_mask_t (*const layer_masks)[],
625 const size_t masks_array_size)
626 {
627 size_t layer_level;
628
629 if (!access_request || !layer_masks)
630 return true;
631 if (!rule)
632 return false;
633
634 /*
635 * An access is granted if, for each policy layer, at least one rule
636 * encountered on the pathwalk grants the requested access,
637 * regardless of its position in the layer stack. We must then check
638 * the remaining layers for each inode, from the first added layer to
639 * the last one. When there is multiple requested accesses, for each
640 * policy layer, the full set of requested accesses may not be granted
641 * by only one rule, but by the union (binary OR) of multiple rules.
642 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
643 */
644 for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
645 const struct landlock_layer *const layer =
646 &rule->layers[layer_level];
647 const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
648 const unsigned long access_req = access_request;
649 unsigned long access_bit;
650 bool is_empty;
651
652 /*
653 * Records in @layer_masks which layer grants access to each
654 * requested access.
655 */
656 is_empty = true;
657 for_each_set_bit(access_bit, &access_req, masks_array_size) {
658 if (layer->access & BIT_ULL(access_bit))
659 (*layer_masks)[access_bit] &= ~layer_bit;
660 is_empty = is_empty && !(*layer_masks)[access_bit];
661 }
662 if (is_empty)
663 return true;
664 }
665 return false;
666 }
667
668 typedef access_mask_t
669 get_access_mask_t(const struct landlock_ruleset *const ruleset,
670 const u16 layer_level);
671
672 /**
673 * landlock_init_layer_masks - Initialize layer masks from an access request
674 *
675 * Populates @layer_masks such that for each access right in @access_request,
676 * the bits for all the layers are set where this access right is handled.
677 *
678 * @domain: The domain that defines the current restrictions.
679 * @access_request: The requested access rights to check.
680 * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or
681 * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type.
682 * @key_type: The key type to switch between access masks of different types.
683 *
684 * Returns: An access mask where each access right bit is set which is handled
685 * in any of the active layers in @domain.
686 */
687 access_mask_t
landlock_init_layer_masks(const struct landlock_ruleset * const domain,const access_mask_t access_request,layer_mask_t (* const layer_masks)[],const enum landlock_key_type key_type)688 landlock_init_layer_masks(const struct landlock_ruleset *const domain,
689 const access_mask_t access_request,
690 layer_mask_t (*const layer_masks)[],
691 const enum landlock_key_type key_type)
692 {
693 access_mask_t handled_accesses = 0;
694 size_t layer_level, num_access;
695 get_access_mask_t *get_access_mask;
696
697 switch (key_type) {
698 case LANDLOCK_KEY_INODE:
699 get_access_mask = landlock_get_fs_access_mask;
700 num_access = LANDLOCK_NUM_ACCESS_FS;
701 break;
702
703 #if IS_ENABLED(CONFIG_INET)
704 case LANDLOCK_KEY_NET_PORT:
705 get_access_mask = landlock_get_net_access_mask;
706 num_access = LANDLOCK_NUM_ACCESS_NET;
707 break;
708 #endif /* IS_ENABLED(CONFIG_INET) */
709
710 default:
711 WARN_ON_ONCE(1);
712 return 0;
713 }
714
715 memset(layer_masks, 0,
716 array_size(sizeof((*layer_masks)[0]), num_access));
717
718 /* An empty access request can happen because of O_WRONLY | O_RDWR. */
719 if (!access_request)
720 return 0;
721
722 /* Saves all handled accesses per layer. */
723 for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
724 const unsigned long access_req = access_request;
725 const access_mask_t access_mask =
726 get_access_mask(domain, layer_level);
727 unsigned long access_bit;
728
729 for_each_set_bit(access_bit, &access_req, num_access) {
730 if (BIT_ULL(access_bit) & access_mask) {
731 (*layer_masks)[access_bit] |=
732 BIT_ULL(layer_level);
733 handled_accesses |= BIT_ULL(access_bit);
734 }
735 }
736 }
737 return handled_accesses;
738 }
739