xref: /linux/security/landlock/ruleset.c (revision 60207df2ebf3b740770aa605173d2d7d19ee66b2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Landlock LSM - Ruleset management
4  *
5  * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6  * Copyright © 2018-2020 ANSSI
7  */
8 
9 #include <linux/bits.h>
10 #include <linux/bug.h>
11 #include <linux/cleanup.h>
12 #include <linux/compiler_types.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/lockdep.h>
17 #include <linux/mutex.h>
18 #include <linux/overflow.h>
19 #include <linux/rbtree.h>
20 #include <linux/refcount.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/workqueue.h>
24 
25 #include "access.h"
26 #include "domain.h"
27 #include "limits.h"
28 #include "object.h"
29 #include "ruleset.h"
30 
31 static struct landlock_ruleset *create_ruleset(const u32 num_layers)
32 {
33 	struct landlock_ruleset *new_ruleset;
34 
35 	new_ruleset =
36 		kzalloc(struct_size(new_ruleset, access_masks, num_layers),
37 			GFP_KERNEL_ACCOUNT);
38 	if (!new_ruleset)
39 		return ERR_PTR(-ENOMEM);
40 	refcount_set(&new_ruleset->usage, 1);
41 	mutex_init(&new_ruleset->lock);
42 	new_ruleset->root_inode = RB_ROOT;
43 
44 #if IS_ENABLED(CONFIG_INET)
45 	new_ruleset->root_net_port = RB_ROOT;
46 #endif /* IS_ENABLED(CONFIG_INET) */
47 
48 	new_ruleset->num_layers = num_layers;
49 	/*
50 	 * hierarchy = NULL
51 	 * num_rules = 0
52 	 * access_masks[] = 0
53 	 */
54 	return new_ruleset;
55 }
56 
57 struct landlock_ruleset *
58 landlock_create_ruleset(const access_mask_t fs_access_mask,
59 			const access_mask_t net_access_mask,
60 			const access_mask_t scope_mask)
61 {
62 	struct landlock_ruleset *new_ruleset;
63 
64 	/* Informs about useless ruleset. */
65 	if (!fs_access_mask && !net_access_mask && !scope_mask)
66 		return ERR_PTR(-ENOMSG);
67 	new_ruleset = create_ruleset(1);
68 	if (IS_ERR(new_ruleset))
69 		return new_ruleset;
70 	if (fs_access_mask)
71 		landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0);
72 	if (net_access_mask)
73 		landlock_add_net_access_mask(new_ruleset, net_access_mask, 0);
74 	if (scope_mask)
75 		landlock_add_scope_mask(new_ruleset, scope_mask, 0);
76 	return new_ruleset;
77 }
78 
79 static void build_check_rule(void)
80 {
81 	const struct landlock_rule rule = {
82 		.num_layers = ~0,
83 	};
84 
85 	/*
86 	 * Checks that .num_layers is large enough for at least
87 	 * LANDLOCK_MAX_NUM_LAYERS layers.
88 	 */
89 	BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
90 }
91 
92 static bool is_object_pointer(const enum landlock_key_type key_type)
93 {
94 	switch (key_type) {
95 	case LANDLOCK_KEY_INODE:
96 		return true;
97 
98 #if IS_ENABLED(CONFIG_INET)
99 	case LANDLOCK_KEY_NET_PORT:
100 		return false;
101 #endif /* IS_ENABLED(CONFIG_INET) */
102 
103 	default:
104 		WARN_ON_ONCE(1);
105 		return false;
106 	}
107 }
108 
109 static struct landlock_rule *
110 create_rule(const struct landlock_id id,
111 	    const struct landlock_layer (*const layers)[], const u32 num_layers,
112 	    const struct landlock_layer *const new_layer)
113 {
114 	struct landlock_rule *new_rule;
115 	u32 new_num_layers;
116 
117 	build_check_rule();
118 	if (new_layer) {
119 		/* Should already be checked by landlock_merge_ruleset(). */
120 		if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
121 			return ERR_PTR(-E2BIG);
122 		new_num_layers = num_layers + 1;
123 	} else {
124 		new_num_layers = num_layers;
125 	}
126 	new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
127 			   GFP_KERNEL_ACCOUNT);
128 	if (!new_rule)
129 		return ERR_PTR(-ENOMEM);
130 	RB_CLEAR_NODE(&new_rule->node);
131 	if (is_object_pointer(id.type)) {
132 		/* This should have been caught by insert_rule(). */
133 		WARN_ON_ONCE(!id.key.object);
134 		landlock_get_object(id.key.object);
135 	}
136 
137 	new_rule->key = id.key;
138 	new_rule->num_layers = new_num_layers;
139 	/* Copies the original layer stack. */
140 	memcpy(new_rule->layers, layers,
141 	       flex_array_size(new_rule, layers, num_layers));
142 	if (new_layer)
143 		/* Adds a copy of @new_layer on the layer stack. */
144 		new_rule->layers[new_rule->num_layers - 1] = *new_layer;
145 	return new_rule;
146 }
147 
148 static struct rb_root *get_root(struct landlock_ruleset *const ruleset,
149 				const enum landlock_key_type key_type)
150 {
151 	switch (key_type) {
152 	case LANDLOCK_KEY_INODE:
153 		return &ruleset->root_inode;
154 
155 #if IS_ENABLED(CONFIG_INET)
156 	case LANDLOCK_KEY_NET_PORT:
157 		return &ruleset->root_net_port;
158 #endif /* IS_ENABLED(CONFIG_INET) */
159 
160 	default:
161 		WARN_ON_ONCE(1);
162 		return ERR_PTR(-EINVAL);
163 	}
164 }
165 
166 static void free_rule(struct landlock_rule *const rule,
167 		      const enum landlock_key_type key_type)
168 {
169 	might_sleep();
170 	if (!rule)
171 		return;
172 	if (is_object_pointer(key_type))
173 		landlock_put_object(rule->key.object);
174 	kfree(rule);
175 }
176 
177 static void build_check_ruleset(void)
178 {
179 	const struct landlock_ruleset ruleset = {
180 		.num_rules = ~0,
181 		.num_layers = ~0,
182 	};
183 
184 	BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
185 	BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
186 }
187 
188 /**
189  * insert_rule - Create and insert a rule in a ruleset
190  *
191  * @ruleset: The ruleset to be updated.
192  * @id: The ID to build the new rule with.  The underlying kernel object, if
193  *      any, must be held by the caller.
194  * @layers: One or multiple layers to be copied into the new rule.
195  * @num_layers: The number of @layers entries.
196  *
197  * When user space requests to add a new rule to a ruleset, @layers only
198  * contains one entry and this entry is not assigned to any level.  In this
199  * case, the new rule will extend @ruleset, similarly to a boolean OR between
200  * access rights.
201  *
202  * When merging a ruleset in a domain, or copying a domain, @layers will be
203  * added to @ruleset as new constraints, similarly to a boolean AND between
204  * access rights.
205  */
206 static int insert_rule(struct landlock_ruleset *const ruleset,
207 		       const struct landlock_id id,
208 		       const struct landlock_layer (*const layers)[],
209 		       const size_t num_layers)
210 {
211 	struct rb_node **walker_node;
212 	struct rb_node *parent_node = NULL;
213 	struct landlock_rule *new_rule;
214 	struct rb_root *root;
215 
216 	might_sleep();
217 	lockdep_assert_held(&ruleset->lock);
218 	if (WARN_ON_ONCE(!layers))
219 		return -ENOENT;
220 
221 	if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object))
222 		return -ENOENT;
223 
224 	root = get_root(ruleset, id.type);
225 	if (IS_ERR(root))
226 		return PTR_ERR(root);
227 
228 	walker_node = &root->rb_node;
229 	while (*walker_node) {
230 		struct landlock_rule *const this =
231 			rb_entry(*walker_node, struct landlock_rule, node);
232 
233 		if (this->key.data != id.key.data) {
234 			parent_node = *walker_node;
235 			if (this->key.data < id.key.data)
236 				walker_node = &((*walker_node)->rb_right);
237 			else
238 				walker_node = &((*walker_node)->rb_left);
239 			continue;
240 		}
241 
242 		/* Only a single-level layer should match an existing rule. */
243 		if (WARN_ON_ONCE(num_layers != 1))
244 			return -EINVAL;
245 
246 		/* If there is a matching rule, updates it. */
247 		if ((*layers)[0].level == 0) {
248 			/*
249 			 * Extends access rights when the request comes from
250 			 * landlock_add_rule(2), i.e. @ruleset is not a domain.
251 			 */
252 			if (WARN_ON_ONCE(this->num_layers != 1))
253 				return -EINVAL;
254 			if (WARN_ON_ONCE(this->layers[0].level != 0))
255 				return -EINVAL;
256 			this->layers[0].access |= (*layers)[0].access;
257 			return 0;
258 		}
259 
260 		if (WARN_ON_ONCE(this->layers[0].level == 0))
261 			return -EINVAL;
262 
263 		/*
264 		 * Intersects access rights when it is a merge between a
265 		 * ruleset and a domain.
266 		 */
267 		new_rule = create_rule(id, &this->layers, this->num_layers,
268 				       &(*layers)[0]);
269 		if (IS_ERR(new_rule))
270 			return PTR_ERR(new_rule);
271 		rb_replace_node(&this->node, &new_rule->node, root);
272 		free_rule(this, id.type);
273 		return 0;
274 	}
275 
276 	/* There is no match for @id. */
277 	build_check_ruleset();
278 	if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
279 		return -E2BIG;
280 	new_rule = create_rule(id, layers, num_layers, NULL);
281 	if (IS_ERR(new_rule))
282 		return PTR_ERR(new_rule);
283 	rb_link_node(&new_rule->node, parent_node, walker_node);
284 	rb_insert_color(&new_rule->node, root);
285 	ruleset->num_rules++;
286 	return 0;
287 }
288 
289 static void build_check_layer(void)
290 {
291 	const struct landlock_layer layer = {
292 		.level = ~0,
293 		.access = ~0,
294 	};
295 
296 	/*
297 	 * Checks that .level and .access are large enough to contain their expected
298 	 * maximum values.
299 	 */
300 	BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
301 	BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
302 }
303 
304 /* @ruleset must be locked by the caller. */
305 int landlock_insert_rule(struct landlock_ruleset *const ruleset,
306 			 const struct landlock_id id,
307 			 const access_mask_t access)
308 {
309 	struct landlock_layer layers[] = { {
310 		.access = access,
311 		/* When @level is zero, insert_rule() extends @ruleset. */
312 		.level = 0,
313 	} };
314 
315 	build_check_layer();
316 	return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
317 }
318 
319 static int merge_tree(struct landlock_ruleset *const dst,
320 		      struct landlock_ruleset *const src,
321 		      const enum landlock_key_type key_type)
322 {
323 	struct landlock_rule *walker_rule, *next_rule;
324 	struct rb_root *src_root;
325 	int err = 0;
326 
327 	might_sleep();
328 	lockdep_assert_held(&dst->lock);
329 	lockdep_assert_held(&src->lock);
330 
331 	src_root = get_root(src, key_type);
332 	if (IS_ERR(src_root))
333 		return PTR_ERR(src_root);
334 
335 	/* Merges the @src tree. */
336 	rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root,
337 					     node) {
338 		struct landlock_layer layers[] = { {
339 			.level = dst->num_layers,
340 		} };
341 		const struct landlock_id id = {
342 			.key = walker_rule->key,
343 			.type = key_type,
344 		};
345 
346 		if (WARN_ON_ONCE(walker_rule->num_layers != 1))
347 			return -EINVAL;
348 
349 		if (WARN_ON_ONCE(walker_rule->layers[0].level != 0))
350 			return -EINVAL;
351 
352 		layers[0].access = walker_rule->layers[0].access;
353 
354 		err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers));
355 		if (err)
356 			return err;
357 	}
358 	return err;
359 }
360 
361 static int merge_ruleset(struct landlock_ruleset *const dst,
362 			 struct landlock_ruleset *const src)
363 {
364 	int err = 0;
365 
366 	might_sleep();
367 	/* Should already be checked by landlock_merge_ruleset() */
368 	if (WARN_ON_ONCE(!src))
369 		return 0;
370 	/* Only merge into a domain. */
371 	if (WARN_ON_ONCE(!dst || !dst->hierarchy))
372 		return -EINVAL;
373 
374 	/* Locks @dst first because we are its only owner. */
375 	mutex_lock(&dst->lock);
376 	mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
377 
378 	/* Stacks the new layer. */
379 	if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
380 		err = -EINVAL;
381 		goto out_unlock;
382 	}
383 	dst->access_masks[dst->num_layers - 1] =
384 		landlock_upgrade_handled_access_masks(src->access_masks[0]);
385 
386 	/* Merges the @src inode tree. */
387 	err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
388 	if (err)
389 		goto out_unlock;
390 
391 #if IS_ENABLED(CONFIG_INET)
392 	/* Merges the @src network port tree. */
393 	err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT);
394 	if (err)
395 		goto out_unlock;
396 #endif /* IS_ENABLED(CONFIG_INET) */
397 
398 out_unlock:
399 	mutex_unlock(&src->lock);
400 	mutex_unlock(&dst->lock);
401 	return err;
402 }
403 
404 static int inherit_tree(struct landlock_ruleset *const parent,
405 			struct landlock_ruleset *const child,
406 			const enum landlock_key_type key_type)
407 {
408 	struct landlock_rule *walker_rule, *next_rule;
409 	struct rb_root *parent_root;
410 	int err = 0;
411 
412 	might_sleep();
413 	lockdep_assert_held(&parent->lock);
414 	lockdep_assert_held(&child->lock);
415 
416 	parent_root = get_root(parent, key_type);
417 	if (IS_ERR(parent_root))
418 		return PTR_ERR(parent_root);
419 
420 	/* Copies the @parent inode or network tree. */
421 	rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
422 					     parent_root, node) {
423 		const struct landlock_id id = {
424 			.key = walker_rule->key,
425 			.type = key_type,
426 		};
427 
428 		err = insert_rule(child, id, &walker_rule->layers,
429 				  walker_rule->num_layers);
430 		if (err)
431 			return err;
432 	}
433 	return err;
434 }
435 
436 static int inherit_ruleset(struct landlock_ruleset *const parent,
437 			   struct landlock_ruleset *const child)
438 {
439 	int err = 0;
440 
441 	might_sleep();
442 	if (!parent)
443 		return 0;
444 
445 	/* Locks @child first because we are its only owner. */
446 	mutex_lock(&child->lock);
447 	mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
448 
449 	/* Copies the @parent inode tree. */
450 	err = inherit_tree(parent, child, LANDLOCK_KEY_INODE);
451 	if (err)
452 		goto out_unlock;
453 
454 #if IS_ENABLED(CONFIG_INET)
455 	/* Copies the @parent network port tree. */
456 	err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT);
457 	if (err)
458 		goto out_unlock;
459 #endif /* IS_ENABLED(CONFIG_INET) */
460 
461 	if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
462 		err = -EINVAL;
463 		goto out_unlock;
464 	}
465 	/* Copies the parent layer stack and leaves a space for the new layer. */
466 	memcpy(child->access_masks, parent->access_masks,
467 	       flex_array_size(parent, access_masks, parent->num_layers));
468 
469 	if (WARN_ON_ONCE(!parent->hierarchy)) {
470 		err = -EINVAL;
471 		goto out_unlock;
472 	}
473 	landlock_get_hierarchy(parent->hierarchy);
474 	child->hierarchy->parent = parent->hierarchy;
475 
476 out_unlock:
477 	mutex_unlock(&parent->lock);
478 	mutex_unlock(&child->lock);
479 	return err;
480 }
481 
482 static void free_ruleset(struct landlock_ruleset *const ruleset)
483 {
484 	struct landlock_rule *freeme, *next;
485 
486 	might_sleep();
487 	rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode,
488 					     node)
489 		free_rule(freeme, LANDLOCK_KEY_INODE);
490 
491 #if IS_ENABLED(CONFIG_INET)
492 	rbtree_postorder_for_each_entry_safe(freeme, next,
493 					     &ruleset->root_net_port, node)
494 		free_rule(freeme, LANDLOCK_KEY_NET_PORT);
495 #endif /* IS_ENABLED(CONFIG_INET) */
496 
497 	landlock_put_hierarchy(ruleset->hierarchy);
498 	kfree(ruleset);
499 }
500 
501 void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
502 {
503 	might_sleep();
504 	if (ruleset && refcount_dec_and_test(&ruleset->usage))
505 		free_ruleset(ruleset);
506 }
507 
508 static void free_ruleset_work(struct work_struct *const work)
509 {
510 	struct landlock_ruleset *ruleset;
511 
512 	ruleset = container_of(work, struct landlock_ruleset, work_free);
513 	free_ruleset(ruleset);
514 }
515 
516 /* Only called by hook_cred_free(). */
517 void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
518 {
519 	if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
520 		INIT_WORK(&ruleset->work_free, free_ruleset_work);
521 		schedule_work(&ruleset->work_free);
522 	}
523 }
524 
525 /**
526  * landlock_merge_ruleset - Merge a ruleset with a domain
527  *
528  * @parent: Parent domain.
529  * @ruleset: New ruleset to be merged.
530  *
531  * The current task is requesting to be restricted.  The subjective credentials
532  * must not be in an overridden state. cf. landlock_init_hierarchy_log().
533  *
534  * Returns the intersection of @parent and @ruleset, or returns @parent if
535  * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
536  */
537 struct landlock_ruleset *
538 landlock_merge_ruleset(struct landlock_ruleset *const parent,
539 		       struct landlock_ruleset *const ruleset)
540 {
541 	struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL;
542 	u32 num_layers;
543 	int err;
544 
545 	might_sleep();
546 	if (WARN_ON_ONCE(!ruleset || parent == ruleset))
547 		return ERR_PTR(-EINVAL);
548 
549 	if (parent) {
550 		if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
551 			return ERR_PTR(-E2BIG);
552 		num_layers = parent->num_layers + 1;
553 	} else {
554 		num_layers = 1;
555 	}
556 
557 	/* Creates a new domain... */
558 	new_dom = create_ruleset(num_layers);
559 	if (IS_ERR(new_dom))
560 		return new_dom;
561 
562 	new_dom->hierarchy =
563 		kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
564 	if (!new_dom->hierarchy)
565 		return ERR_PTR(-ENOMEM);
566 
567 	refcount_set(&new_dom->hierarchy->usage, 1);
568 
569 	/* ...as a child of @parent... */
570 	err = inherit_ruleset(parent, new_dom);
571 	if (err)
572 		return ERR_PTR(err);
573 
574 	/* ...and including @ruleset. */
575 	err = merge_ruleset(new_dom, ruleset);
576 	if (err)
577 		return ERR_PTR(err);
578 
579 	err = landlock_init_hierarchy_log(new_dom->hierarchy);
580 	if (err)
581 		return ERR_PTR(err);
582 
583 	return no_free_ptr(new_dom);
584 }
585 
586 /*
587  * The returned access has the same lifetime as @ruleset.
588  */
589 const struct landlock_rule *
590 landlock_find_rule(const struct landlock_ruleset *const ruleset,
591 		   const struct landlock_id id)
592 {
593 	const struct rb_root *root;
594 	const struct rb_node *node;
595 
596 	root = get_root((struct landlock_ruleset *)ruleset, id.type);
597 	if (IS_ERR(root))
598 		return NULL;
599 	node = root->rb_node;
600 
601 	while (node) {
602 		struct landlock_rule *this =
603 			rb_entry(node, struct landlock_rule, node);
604 
605 		if (this->key.data == id.key.data)
606 			return this;
607 		if (this->key.data < id.key.data)
608 			node = node->rb_right;
609 		else
610 			node = node->rb_left;
611 	}
612 	return NULL;
613 }
614 
615 /*
616  * @layer_masks is read and may be updated according to the access request and
617  * the matching rule.
618  * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks).
619  *
620  * Returns true if the request is allowed (i.e. relevant layer masks for the
621  * request are empty).
622  */
623 bool landlock_unmask_layers(const struct landlock_rule *const rule,
624 			    const access_mask_t access_request,
625 			    layer_mask_t (*const layer_masks)[],
626 			    const size_t masks_array_size)
627 {
628 	size_t layer_level;
629 
630 	if (!access_request || !layer_masks)
631 		return true;
632 	if (!rule)
633 		return false;
634 
635 	/*
636 	 * An access is granted if, for each policy layer, at least one rule
637 	 * encountered on the pathwalk grants the requested access,
638 	 * regardless of its position in the layer stack.  We must then check
639 	 * the remaining layers for each inode, from the first added layer to
640 	 * the last one.  When there is multiple requested accesses, for each
641 	 * policy layer, the full set of requested accesses may not be granted
642 	 * by only one rule, but by the union (binary OR) of multiple rules.
643 	 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
644 	 */
645 	for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
646 		const struct landlock_layer *const layer =
647 			&rule->layers[layer_level];
648 		const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
649 		const unsigned long access_req = access_request;
650 		unsigned long access_bit;
651 		bool is_empty;
652 
653 		/*
654 		 * Records in @layer_masks which layer grants access to each requested
655 		 * access: bit cleared if the related layer grants access.
656 		 */
657 		is_empty = true;
658 		for_each_set_bit(access_bit, &access_req, masks_array_size) {
659 			if (layer->access & BIT_ULL(access_bit))
660 				(*layer_masks)[access_bit] &= ~layer_bit;
661 			is_empty = is_empty && !(*layer_masks)[access_bit];
662 		}
663 		if (is_empty)
664 			return true;
665 	}
666 	return false;
667 }
668 
669 typedef access_mask_t
670 get_access_mask_t(const struct landlock_ruleset *const ruleset,
671 		  const u16 layer_level);
672 
673 /**
674  * landlock_init_layer_masks - Initialize layer masks from an access request
675  *
676  * Populates @layer_masks such that for each access right in @access_request,
677  * the bits for all the layers are set where this access right is handled.
678  *
679  * @domain: The domain that defines the current restrictions.
680  * @access_request: The requested access rights to check.
681  * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or
682  * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type.
683  * @key_type: The key type to switch between access masks of different types.
684  *
685  * Returns: An access mask where each access right bit is set which is handled
686  * in any of the active layers in @domain.
687  */
688 access_mask_t
689 landlock_init_layer_masks(const struct landlock_ruleset *const domain,
690 			  const access_mask_t access_request,
691 			  layer_mask_t (*const layer_masks)[],
692 			  const enum landlock_key_type key_type)
693 {
694 	access_mask_t handled_accesses = 0;
695 	size_t layer_level, num_access;
696 	get_access_mask_t *get_access_mask;
697 
698 	switch (key_type) {
699 	case LANDLOCK_KEY_INODE:
700 		get_access_mask = landlock_get_fs_access_mask;
701 		num_access = LANDLOCK_NUM_ACCESS_FS;
702 		break;
703 
704 #if IS_ENABLED(CONFIG_INET)
705 	case LANDLOCK_KEY_NET_PORT:
706 		get_access_mask = landlock_get_net_access_mask;
707 		num_access = LANDLOCK_NUM_ACCESS_NET;
708 		break;
709 #endif /* IS_ENABLED(CONFIG_INET) */
710 
711 	default:
712 		WARN_ON_ONCE(1);
713 		return 0;
714 	}
715 
716 	memset(layer_masks, 0,
717 	       array_size(sizeof((*layer_masks)[0]), num_access));
718 
719 	/* An empty access request can happen because of O_WRONLY | O_RDWR. */
720 	if (!access_request)
721 		return 0;
722 
723 	/* Saves all handled accesses per layer. */
724 	for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
725 		const unsigned long access_req = access_request;
726 		const access_mask_t access_mask =
727 			get_access_mask(domain, layer_level);
728 		unsigned long access_bit;
729 
730 		for_each_set_bit(access_bit, &access_req, num_access) {
731 			if (BIT_ULL(access_bit) & access_mask) {
732 				(*layer_masks)[access_bit] |=
733 					BIT_ULL(layer_level);
734 				handled_accesses |= BIT_ULL(access_bit);
735 			}
736 		}
737 	}
738 	return handled_accesses;
739 }
740