xref: /linux/security/landlock/ruleset.c (revision eee654ca9a55fd1e8632afb119975cba6af7d4ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Landlock LSM - Ruleset management
4  *
5  * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6  * Copyright © 2018-2020 ANSSI
7  */
8 
9 #include <linux/bits.h>
10 #include <linux/bug.h>
11 #include <linux/cleanup.h>
12 #include <linux/compiler_types.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/lockdep.h>
17 #include <linux/mutex.h>
18 #include <linux/overflow.h>
19 #include <linux/rbtree.h>
20 #include <linux/refcount.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/workqueue.h>
24 
25 #include "access.h"
26 #include "audit.h"
27 #include "domain.h"
28 #include "limits.h"
29 #include "object.h"
30 #include "ruleset.h"
31 
create_ruleset(const u32 num_layers)32 static struct landlock_ruleset *create_ruleset(const u32 num_layers)
33 {
34 	struct landlock_ruleset *new_ruleset;
35 
36 	new_ruleset =
37 		kzalloc(struct_size(new_ruleset, access_masks, num_layers),
38 			GFP_KERNEL_ACCOUNT);
39 	if (!new_ruleset)
40 		return ERR_PTR(-ENOMEM);
41 	refcount_set(&new_ruleset->usage, 1);
42 	mutex_init(&new_ruleset->lock);
43 	new_ruleset->root_inode = RB_ROOT;
44 
45 #if IS_ENABLED(CONFIG_INET)
46 	new_ruleset->root_net_port = RB_ROOT;
47 #endif /* IS_ENABLED(CONFIG_INET) */
48 
49 	new_ruleset->num_layers = num_layers;
50 	/*
51 	 * hierarchy = NULL
52 	 * num_rules = 0
53 	 * access_masks[] = 0
54 	 */
55 	return new_ruleset;
56 }
57 
58 struct landlock_ruleset *
landlock_create_ruleset(const access_mask_t fs_access_mask,const access_mask_t net_access_mask,const access_mask_t scope_mask)59 landlock_create_ruleset(const access_mask_t fs_access_mask,
60 			const access_mask_t net_access_mask,
61 			const access_mask_t scope_mask)
62 {
63 	struct landlock_ruleset *new_ruleset;
64 
65 	/* Informs about useless ruleset. */
66 	if (!fs_access_mask && !net_access_mask && !scope_mask)
67 		return ERR_PTR(-ENOMSG);
68 	new_ruleset = create_ruleset(1);
69 	if (IS_ERR(new_ruleset))
70 		return new_ruleset;
71 	if (fs_access_mask)
72 		landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0);
73 	if (net_access_mask)
74 		landlock_add_net_access_mask(new_ruleset, net_access_mask, 0);
75 	if (scope_mask)
76 		landlock_add_scope_mask(new_ruleset, scope_mask, 0);
77 	return new_ruleset;
78 }
79 
build_check_rule(void)80 static void build_check_rule(void)
81 {
82 	const struct landlock_rule rule = {
83 		.num_layers = ~0,
84 	};
85 
86 	/*
87 	 * Checks that .num_layers is large enough for at least
88 	 * LANDLOCK_MAX_NUM_LAYERS layers.
89 	 */
90 	BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
91 }
92 
is_object_pointer(const enum landlock_key_type key_type)93 static bool is_object_pointer(const enum landlock_key_type key_type)
94 {
95 	switch (key_type) {
96 	case LANDLOCK_KEY_INODE:
97 		return true;
98 
99 #if IS_ENABLED(CONFIG_INET)
100 	case LANDLOCK_KEY_NET_PORT:
101 		return false;
102 #endif /* IS_ENABLED(CONFIG_INET) */
103 
104 	default:
105 		WARN_ON_ONCE(1);
106 		return false;
107 	}
108 }
109 
110 static struct landlock_rule *
create_rule(const struct landlock_id id,const struct landlock_layer (* const layers)[],const u32 num_layers,const struct landlock_layer * const new_layer)111 create_rule(const struct landlock_id id,
112 	    const struct landlock_layer (*const layers)[], const u32 num_layers,
113 	    const struct landlock_layer *const new_layer)
114 {
115 	struct landlock_rule *new_rule;
116 	u32 new_num_layers;
117 
118 	build_check_rule();
119 	if (new_layer) {
120 		/* Should already be checked by landlock_merge_ruleset(). */
121 		if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
122 			return ERR_PTR(-E2BIG);
123 		new_num_layers = num_layers + 1;
124 	} else {
125 		new_num_layers = num_layers;
126 	}
127 	new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
128 			   GFP_KERNEL_ACCOUNT);
129 	if (!new_rule)
130 		return ERR_PTR(-ENOMEM);
131 	RB_CLEAR_NODE(&new_rule->node);
132 	if (is_object_pointer(id.type)) {
133 		/* This should have been caught by insert_rule(). */
134 		WARN_ON_ONCE(!id.key.object);
135 		landlock_get_object(id.key.object);
136 	}
137 
138 	new_rule->key = id.key;
139 	new_rule->num_layers = new_num_layers;
140 	/* Copies the original layer stack. */
141 	memcpy(new_rule->layers, layers,
142 	       flex_array_size(new_rule, layers, num_layers));
143 	if (new_layer)
144 		/* Adds a copy of @new_layer on the layer stack. */
145 		new_rule->layers[new_rule->num_layers - 1] = *new_layer;
146 	return new_rule;
147 }
148 
get_root(struct landlock_ruleset * const ruleset,const enum landlock_key_type key_type)149 static struct rb_root *get_root(struct landlock_ruleset *const ruleset,
150 				const enum landlock_key_type key_type)
151 {
152 	switch (key_type) {
153 	case LANDLOCK_KEY_INODE:
154 		return &ruleset->root_inode;
155 
156 #if IS_ENABLED(CONFIG_INET)
157 	case LANDLOCK_KEY_NET_PORT:
158 		return &ruleset->root_net_port;
159 #endif /* IS_ENABLED(CONFIG_INET) */
160 
161 	default:
162 		WARN_ON_ONCE(1);
163 		return ERR_PTR(-EINVAL);
164 	}
165 }
166 
free_rule(struct landlock_rule * const rule,const enum landlock_key_type key_type)167 static void free_rule(struct landlock_rule *const rule,
168 		      const enum landlock_key_type key_type)
169 {
170 	might_sleep();
171 	if (!rule)
172 		return;
173 	if (is_object_pointer(key_type))
174 		landlock_put_object(rule->key.object);
175 	kfree(rule);
176 }
177 
build_check_ruleset(void)178 static void build_check_ruleset(void)
179 {
180 	const struct landlock_ruleset ruleset = {
181 		.num_rules = ~0,
182 		.num_layers = ~0,
183 	};
184 
185 	BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
186 	BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
187 }
188 
189 /**
190  * insert_rule - Create and insert a rule in a ruleset
191  *
192  * @ruleset: The ruleset to be updated.
193  * @id: The ID to build the new rule with.  The underlying kernel object, if
194  *      any, must be held by the caller.
195  * @layers: One or multiple layers to be copied into the new rule.
196  * @num_layers: The number of @layers entries.
197  *
198  * When user space requests to add a new rule to a ruleset, @layers only
199  * contains one entry and this entry is not assigned to any level.  In this
200  * case, the new rule will extend @ruleset, similarly to a boolean OR between
201  * access rights.
202  *
203  * When merging a ruleset in a domain, or copying a domain, @layers will be
204  * added to @ruleset as new constraints, similarly to a boolean AND between
205  * access rights.
206  */
insert_rule(struct landlock_ruleset * const ruleset,const struct landlock_id id,const struct landlock_layer (* const layers)[],const size_t num_layers)207 static int insert_rule(struct landlock_ruleset *const ruleset,
208 		       const struct landlock_id id,
209 		       const struct landlock_layer (*const layers)[],
210 		       const size_t num_layers)
211 {
212 	struct rb_node **walker_node;
213 	struct rb_node *parent_node = NULL;
214 	struct landlock_rule *new_rule;
215 	struct rb_root *root;
216 
217 	might_sleep();
218 	lockdep_assert_held(&ruleset->lock);
219 	if (WARN_ON_ONCE(!layers))
220 		return -ENOENT;
221 
222 	if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object))
223 		return -ENOENT;
224 
225 	root = get_root(ruleset, id.type);
226 	if (IS_ERR(root))
227 		return PTR_ERR(root);
228 
229 	walker_node = &root->rb_node;
230 	while (*walker_node) {
231 		struct landlock_rule *const this =
232 			rb_entry(*walker_node, struct landlock_rule, node);
233 
234 		if (this->key.data != id.key.data) {
235 			parent_node = *walker_node;
236 			if (this->key.data < id.key.data)
237 				walker_node = &((*walker_node)->rb_right);
238 			else
239 				walker_node = &((*walker_node)->rb_left);
240 			continue;
241 		}
242 
243 		/* Only a single-level layer should match an existing rule. */
244 		if (WARN_ON_ONCE(num_layers != 1))
245 			return -EINVAL;
246 
247 		/* If there is a matching rule, updates it. */
248 		if ((*layers)[0].level == 0) {
249 			/*
250 			 * Extends access rights when the request comes from
251 			 * landlock_add_rule(2), i.e. @ruleset is not a domain.
252 			 */
253 			if (WARN_ON_ONCE(this->num_layers != 1))
254 				return -EINVAL;
255 			if (WARN_ON_ONCE(this->layers[0].level != 0))
256 				return -EINVAL;
257 			this->layers[0].access |= (*layers)[0].access;
258 			return 0;
259 		}
260 
261 		if (WARN_ON_ONCE(this->layers[0].level == 0))
262 			return -EINVAL;
263 
264 		/*
265 		 * Intersects access rights when it is a merge between a
266 		 * ruleset and a domain.
267 		 */
268 		new_rule = create_rule(id, &this->layers, this->num_layers,
269 				       &(*layers)[0]);
270 		if (IS_ERR(new_rule))
271 			return PTR_ERR(new_rule);
272 		rb_replace_node(&this->node, &new_rule->node, root);
273 		free_rule(this, id.type);
274 		return 0;
275 	}
276 
277 	/* There is no match for @id. */
278 	build_check_ruleset();
279 	if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
280 		return -E2BIG;
281 	new_rule = create_rule(id, layers, num_layers, NULL);
282 	if (IS_ERR(new_rule))
283 		return PTR_ERR(new_rule);
284 	rb_link_node(&new_rule->node, parent_node, walker_node);
285 	rb_insert_color(&new_rule->node, root);
286 	ruleset->num_rules++;
287 	return 0;
288 }
289 
build_check_layer(void)290 static void build_check_layer(void)
291 {
292 	const struct landlock_layer layer = {
293 		.level = ~0,
294 		.access = ~0,
295 	};
296 
297 	/*
298 	 * Checks that .level and .access are large enough to contain their expected
299 	 * maximum values.
300 	 */
301 	BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
302 	BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
303 }
304 
305 /* @ruleset must be locked by the caller. */
landlock_insert_rule(struct landlock_ruleset * const ruleset,const struct landlock_id id,const access_mask_t access)306 int landlock_insert_rule(struct landlock_ruleset *const ruleset,
307 			 const struct landlock_id id,
308 			 const access_mask_t access)
309 {
310 	struct landlock_layer layers[] = { {
311 		.access = access,
312 		/* When @level is zero, insert_rule() extends @ruleset. */
313 		.level = 0,
314 	} };
315 
316 	build_check_layer();
317 	return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
318 }
319 
merge_tree(struct landlock_ruleset * const dst,struct landlock_ruleset * const src,const enum landlock_key_type key_type)320 static int merge_tree(struct landlock_ruleset *const dst,
321 		      struct landlock_ruleset *const src,
322 		      const enum landlock_key_type key_type)
323 {
324 	struct landlock_rule *walker_rule, *next_rule;
325 	struct rb_root *src_root;
326 	int err = 0;
327 
328 	might_sleep();
329 	lockdep_assert_held(&dst->lock);
330 	lockdep_assert_held(&src->lock);
331 
332 	src_root = get_root(src, key_type);
333 	if (IS_ERR(src_root))
334 		return PTR_ERR(src_root);
335 
336 	/* Merges the @src tree. */
337 	rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root,
338 					     node) {
339 		struct landlock_layer layers[] = { {
340 			.level = dst->num_layers,
341 		} };
342 		const struct landlock_id id = {
343 			.key = walker_rule->key,
344 			.type = key_type,
345 		};
346 
347 		if (WARN_ON_ONCE(walker_rule->num_layers != 1))
348 			return -EINVAL;
349 
350 		if (WARN_ON_ONCE(walker_rule->layers[0].level != 0))
351 			return -EINVAL;
352 
353 		layers[0].access = walker_rule->layers[0].access;
354 
355 		err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers));
356 		if (err)
357 			return err;
358 	}
359 	return err;
360 }
361 
merge_ruleset(struct landlock_ruleset * const dst,struct landlock_ruleset * const src)362 static int merge_ruleset(struct landlock_ruleset *const dst,
363 			 struct landlock_ruleset *const src)
364 {
365 	int err = 0;
366 
367 	might_sleep();
368 	/* Should already be checked by landlock_merge_ruleset() */
369 	if (WARN_ON_ONCE(!src))
370 		return 0;
371 	/* Only merge into a domain. */
372 	if (WARN_ON_ONCE(!dst || !dst->hierarchy))
373 		return -EINVAL;
374 
375 	/* Locks @dst first because we are its only owner. */
376 	mutex_lock(&dst->lock);
377 	mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
378 
379 	/* Stacks the new layer. */
380 	if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
381 		err = -EINVAL;
382 		goto out_unlock;
383 	}
384 	dst->access_masks[dst->num_layers - 1] =
385 		landlock_upgrade_handled_access_masks(src->access_masks[0]);
386 
387 	/* Merges the @src inode tree. */
388 	err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
389 	if (err)
390 		goto out_unlock;
391 
392 #if IS_ENABLED(CONFIG_INET)
393 	/* Merges the @src network port tree. */
394 	err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT);
395 	if (err)
396 		goto out_unlock;
397 #endif /* IS_ENABLED(CONFIG_INET) */
398 
399 out_unlock:
400 	mutex_unlock(&src->lock);
401 	mutex_unlock(&dst->lock);
402 	return err;
403 }
404 
inherit_tree(struct landlock_ruleset * const parent,struct landlock_ruleset * const child,const enum landlock_key_type key_type)405 static int inherit_tree(struct landlock_ruleset *const parent,
406 			struct landlock_ruleset *const child,
407 			const enum landlock_key_type key_type)
408 {
409 	struct landlock_rule *walker_rule, *next_rule;
410 	struct rb_root *parent_root;
411 	int err = 0;
412 
413 	might_sleep();
414 	lockdep_assert_held(&parent->lock);
415 	lockdep_assert_held(&child->lock);
416 
417 	parent_root = get_root(parent, key_type);
418 	if (IS_ERR(parent_root))
419 		return PTR_ERR(parent_root);
420 
421 	/* Copies the @parent inode or network tree. */
422 	rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
423 					     parent_root, node) {
424 		const struct landlock_id id = {
425 			.key = walker_rule->key,
426 			.type = key_type,
427 		};
428 
429 		err = insert_rule(child, id, &walker_rule->layers,
430 				  walker_rule->num_layers);
431 		if (err)
432 			return err;
433 	}
434 	return err;
435 }
436 
inherit_ruleset(struct landlock_ruleset * const parent,struct landlock_ruleset * const child)437 static int inherit_ruleset(struct landlock_ruleset *const parent,
438 			   struct landlock_ruleset *const child)
439 {
440 	int err = 0;
441 
442 	might_sleep();
443 	if (!parent)
444 		return 0;
445 
446 	/* Locks @child first because we are its only owner. */
447 	mutex_lock(&child->lock);
448 	mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
449 
450 	/* Copies the @parent inode tree. */
451 	err = inherit_tree(parent, child, LANDLOCK_KEY_INODE);
452 	if (err)
453 		goto out_unlock;
454 
455 #if IS_ENABLED(CONFIG_INET)
456 	/* Copies the @parent network port tree. */
457 	err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT);
458 	if (err)
459 		goto out_unlock;
460 #endif /* IS_ENABLED(CONFIG_INET) */
461 
462 	if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
463 		err = -EINVAL;
464 		goto out_unlock;
465 	}
466 	/* Copies the parent layer stack and leaves a space for the new layer. */
467 	memcpy(child->access_masks, parent->access_masks,
468 	       flex_array_size(parent, access_masks, parent->num_layers));
469 
470 	if (WARN_ON_ONCE(!parent->hierarchy)) {
471 		err = -EINVAL;
472 		goto out_unlock;
473 	}
474 	landlock_get_hierarchy(parent->hierarchy);
475 	child->hierarchy->parent = parent->hierarchy;
476 
477 out_unlock:
478 	mutex_unlock(&parent->lock);
479 	mutex_unlock(&child->lock);
480 	return err;
481 }
482 
free_ruleset(struct landlock_ruleset * const ruleset)483 static void free_ruleset(struct landlock_ruleset *const ruleset)
484 {
485 	struct landlock_rule *freeme, *next;
486 
487 	might_sleep();
488 	rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode,
489 					     node)
490 		free_rule(freeme, LANDLOCK_KEY_INODE);
491 
492 #if IS_ENABLED(CONFIG_INET)
493 	rbtree_postorder_for_each_entry_safe(freeme, next,
494 					     &ruleset->root_net_port, node)
495 		free_rule(freeme, LANDLOCK_KEY_NET_PORT);
496 #endif /* IS_ENABLED(CONFIG_INET) */
497 
498 	landlock_put_hierarchy(ruleset->hierarchy);
499 	kfree(ruleset);
500 }
501 
landlock_put_ruleset(struct landlock_ruleset * const ruleset)502 void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
503 {
504 	might_sleep();
505 	if (ruleset && refcount_dec_and_test(&ruleset->usage))
506 		free_ruleset(ruleset);
507 }
508 
free_ruleset_work(struct work_struct * const work)509 static void free_ruleset_work(struct work_struct *const work)
510 {
511 	struct landlock_ruleset *ruleset;
512 
513 	ruleset = container_of(work, struct landlock_ruleset, work_free);
514 	free_ruleset(ruleset);
515 }
516 
517 /* Only called by hook_cred_free(). */
landlock_put_ruleset_deferred(struct landlock_ruleset * const ruleset)518 void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
519 {
520 	if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
521 		INIT_WORK(&ruleset->work_free, free_ruleset_work);
522 		schedule_work(&ruleset->work_free);
523 	}
524 }
525 
526 /**
527  * landlock_merge_ruleset - Merge a ruleset with a domain
528  *
529  * @parent: Parent domain.
530  * @ruleset: New ruleset to be merged.
531  *
532  * The current task is requesting to be restricted.  The subjective credentials
533  * must not be in an overridden state. cf. landlock_init_hierarchy_log().
534  *
535  * Returns the intersection of @parent and @ruleset, or returns @parent if
536  * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
537  */
538 struct landlock_ruleset *
landlock_merge_ruleset(struct landlock_ruleset * const parent,struct landlock_ruleset * const ruleset)539 landlock_merge_ruleset(struct landlock_ruleset *const parent,
540 		       struct landlock_ruleset *const ruleset)
541 {
542 	struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL;
543 	u32 num_layers;
544 	int err;
545 
546 	might_sleep();
547 	if (WARN_ON_ONCE(!ruleset || parent == ruleset))
548 		return ERR_PTR(-EINVAL);
549 
550 	if (parent) {
551 		if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
552 			return ERR_PTR(-E2BIG);
553 		num_layers = parent->num_layers + 1;
554 	} else {
555 		num_layers = 1;
556 	}
557 
558 	/* Creates a new domain... */
559 	new_dom = create_ruleset(num_layers);
560 	if (IS_ERR(new_dom))
561 		return new_dom;
562 
563 	new_dom->hierarchy =
564 		kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
565 	if (!new_dom->hierarchy)
566 		return ERR_PTR(-ENOMEM);
567 
568 	refcount_set(&new_dom->hierarchy->usage, 1);
569 
570 	/* ...as a child of @parent... */
571 	err = inherit_ruleset(parent, new_dom);
572 	if (err)
573 		return ERR_PTR(err);
574 
575 	/* ...and including @ruleset. */
576 	err = merge_ruleset(new_dom, ruleset);
577 	if (err)
578 		return ERR_PTR(err);
579 
580 	err = landlock_init_hierarchy_log(new_dom->hierarchy);
581 	if (err)
582 		return ERR_PTR(err);
583 
584 	return no_free_ptr(new_dom);
585 }
586 
587 /*
588  * The returned access has the same lifetime as @ruleset.
589  */
590 const struct landlock_rule *
landlock_find_rule(const struct landlock_ruleset * const ruleset,const struct landlock_id id)591 landlock_find_rule(const struct landlock_ruleset *const ruleset,
592 		   const struct landlock_id id)
593 {
594 	const struct rb_root *root;
595 	const struct rb_node *node;
596 
597 	root = get_root((struct landlock_ruleset *)ruleset, id.type);
598 	if (IS_ERR(root))
599 		return NULL;
600 	node = root->rb_node;
601 
602 	while (node) {
603 		struct landlock_rule *this =
604 			rb_entry(node, struct landlock_rule, node);
605 
606 		if (this->key.data == id.key.data)
607 			return this;
608 		if (this->key.data < id.key.data)
609 			node = node->rb_right;
610 		else
611 			node = node->rb_left;
612 	}
613 	return NULL;
614 }
615 
616 /*
617  * @layer_masks is read and may be updated according to the access request and
618  * the matching rule.
619  * @masks_array_size must be equal to ARRAY_SIZE(*layer_masks).
620  *
621  * Returns true if the request is allowed (i.e. relevant layer masks for the
622  * request are empty).
623  */
landlock_unmask_layers(const struct landlock_rule * const rule,const access_mask_t access_request,layer_mask_t (* const layer_masks)[],const size_t masks_array_size)624 bool landlock_unmask_layers(const struct landlock_rule *const rule,
625 			    const access_mask_t access_request,
626 			    layer_mask_t (*const layer_masks)[],
627 			    const size_t masks_array_size)
628 {
629 	size_t layer_level;
630 
631 	if (!access_request || !layer_masks)
632 		return true;
633 	if (!rule)
634 		return false;
635 
636 	/*
637 	 * An access is granted if, for each policy layer, at least one rule
638 	 * encountered on the pathwalk grants the requested access,
639 	 * regardless of its position in the layer stack.  We must then check
640 	 * the remaining layers for each inode, from the first added layer to
641 	 * the last one.  When there is multiple requested accesses, for each
642 	 * policy layer, the full set of requested accesses may not be granted
643 	 * by only one rule, but by the union (binary OR) of multiple rules.
644 	 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
645 	 */
646 	for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
647 		const struct landlock_layer *const layer =
648 			&rule->layers[layer_level];
649 		const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
650 		const unsigned long access_req = access_request;
651 		unsigned long access_bit;
652 		bool is_empty;
653 
654 		/*
655 		 * Records in @layer_masks which layer grants access to each requested
656 		 * access: bit cleared if the related layer grants access.
657 		 */
658 		is_empty = true;
659 		for_each_set_bit(access_bit, &access_req, masks_array_size) {
660 			if (layer->access & BIT_ULL(access_bit))
661 				(*layer_masks)[access_bit] &= ~layer_bit;
662 			is_empty = is_empty && !(*layer_masks)[access_bit];
663 		}
664 		if (is_empty)
665 			return true;
666 	}
667 	return false;
668 }
669 
670 typedef access_mask_t
671 get_access_mask_t(const struct landlock_ruleset *const ruleset,
672 		  const u16 layer_level);
673 
674 /**
675  * landlock_init_layer_masks - Initialize layer masks from an access request
676  *
677  * Populates @layer_masks such that for each access right in @access_request,
678  * the bits for all the layers are set where this access right is handled.
679  *
680  * @domain: The domain that defines the current restrictions.
681  * @access_request: The requested access rights to check.
682  * @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or
683  * %LANDLOCK_NUM_ACCESS_NET elements according to @key_type.
684  * @key_type: The key type to switch between access masks of different types.
685  *
686  * Returns: An access mask where each access right bit is set which is handled
687  * in any of the active layers in @domain.
688  */
689 access_mask_t
landlock_init_layer_masks(const struct landlock_ruleset * const domain,const access_mask_t access_request,layer_mask_t (* const layer_masks)[],const enum landlock_key_type key_type)690 landlock_init_layer_masks(const struct landlock_ruleset *const domain,
691 			  const access_mask_t access_request,
692 			  layer_mask_t (*const layer_masks)[],
693 			  const enum landlock_key_type key_type)
694 {
695 	access_mask_t handled_accesses = 0;
696 	size_t layer_level, num_access;
697 	get_access_mask_t *get_access_mask;
698 
699 	switch (key_type) {
700 	case LANDLOCK_KEY_INODE:
701 		get_access_mask = landlock_get_fs_access_mask;
702 		num_access = LANDLOCK_NUM_ACCESS_FS;
703 		break;
704 
705 #if IS_ENABLED(CONFIG_INET)
706 	case LANDLOCK_KEY_NET_PORT:
707 		get_access_mask = landlock_get_net_access_mask;
708 		num_access = LANDLOCK_NUM_ACCESS_NET;
709 		break;
710 #endif /* IS_ENABLED(CONFIG_INET) */
711 
712 	default:
713 		WARN_ON_ONCE(1);
714 		return 0;
715 	}
716 
717 	memset(layer_masks, 0,
718 	       array_size(sizeof((*layer_masks)[0]), num_access));
719 
720 	/* An empty access request can happen because of O_WRONLY | O_RDWR. */
721 	if (!access_request)
722 		return 0;
723 
724 	/* Saves all handled accesses per layer. */
725 	for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
726 		const unsigned long access_req = access_request;
727 		const access_mask_t access_mask =
728 			get_access_mask(domain, layer_level);
729 		unsigned long access_bit;
730 
731 		for_each_set_bit(access_bit, &access_req, num_access) {
732 			if (BIT_ULL(access_bit) & access_mask) {
733 				(*layer_masks)[access_bit] |=
734 					BIT_ULL(layer_level);
735 				handled_accesses |= BIT_ULL(access_bit);
736 			}
737 		}
738 	}
739 	return handled_accesses;
740 }
741