xref: /linux/security/landlock/fs.c (revision 5f2ff33e10843ef51275c8611bdb7b49537aba5d)
1cb2c7d1aSMickaël Salaün // SPDX-License-Identifier: GPL-2.0-only
2cb2c7d1aSMickaël Salaün /*
3cb2c7d1aSMickaël Salaün  * Landlock LSM - Filesystem management and hooks
4cb2c7d1aSMickaël Salaün  *
5cb2c7d1aSMickaël Salaün  * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6cb2c7d1aSMickaël Salaün  * Copyright © 2018-2020 ANSSI
7cb2c7d1aSMickaël Salaün  */
8cb2c7d1aSMickaël Salaün 
9cb2c7d1aSMickaël Salaün #include <linux/atomic.h>
10cb2c7d1aSMickaël Salaün #include <linux/bitops.h>
11cb2c7d1aSMickaël Salaün #include <linux/bits.h>
12cb2c7d1aSMickaël Salaün #include <linux/compiler_types.h>
13cb2c7d1aSMickaël Salaün #include <linux/dcache.h>
14cb2c7d1aSMickaël Salaün #include <linux/err.h>
15cb2c7d1aSMickaël Salaün #include <linux/fs.h>
16cb2c7d1aSMickaël Salaün #include <linux/init.h>
17cb2c7d1aSMickaël Salaün #include <linux/kernel.h>
18cb2c7d1aSMickaël Salaün #include <linux/limits.h>
19cb2c7d1aSMickaël Salaün #include <linux/list.h>
20cb2c7d1aSMickaël Salaün #include <linux/lsm_hooks.h>
21cb2c7d1aSMickaël Salaün #include <linux/mount.h>
22cb2c7d1aSMickaël Salaün #include <linux/namei.h>
23cb2c7d1aSMickaël Salaün #include <linux/path.h>
24cb2c7d1aSMickaël Salaün #include <linux/rcupdate.h>
25cb2c7d1aSMickaël Salaün #include <linux/spinlock.h>
26cb2c7d1aSMickaël Salaün #include <linux/stat.h>
27cb2c7d1aSMickaël Salaün #include <linux/types.h>
28cb2c7d1aSMickaël Salaün #include <linux/wait_bit.h>
29cb2c7d1aSMickaël Salaün #include <linux/workqueue.h>
30cb2c7d1aSMickaël Salaün #include <uapi/linux/landlock.h>
31cb2c7d1aSMickaël Salaün 
32cb2c7d1aSMickaël Salaün #include "common.h"
33cb2c7d1aSMickaël Salaün #include "cred.h"
34cb2c7d1aSMickaël Salaün #include "fs.h"
35cb2c7d1aSMickaël Salaün #include "limits.h"
36cb2c7d1aSMickaël Salaün #include "object.h"
37cb2c7d1aSMickaël Salaün #include "ruleset.h"
38cb2c7d1aSMickaël Salaün #include "setup.h"
39cb2c7d1aSMickaël Salaün 
40cb2c7d1aSMickaël Salaün /* Underlying object management */
41cb2c7d1aSMickaël Salaün 
42cb2c7d1aSMickaël Salaün static void release_inode(struct landlock_object *const object)
43cb2c7d1aSMickaël Salaün 	__releases(object->lock)
44cb2c7d1aSMickaël Salaün {
45cb2c7d1aSMickaël Salaün 	struct inode *const inode = object->underobj;
46cb2c7d1aSMickaël Salaün 	struct super_block *sb;
47cb2c7d1aSMickaël Salaün 
48cb2c7d1aSMickaël Salaün 	if (!inode) {
49cb2c7d1aSMickaël Salaün 		spin_unlock(&object->lock);
50cb2c7d1aSMickaël Salaün 		return;
51cb2c7d1aSMickaël Salaün 	}
52cb2c7d1aSMickaël Salaün 
53cb2c7d1aSMickaël Salaün 	/*
54cb2c7d1aSMickaël Salaün 	 * Protects against concurrent use by hook_sb_delete() of the reference
55cb2c7d1aSMickaël Salaün 	 * to the underlying inode.
56cb2c7d1aSMickaël Salaün 	 */
57cb2c7d1aSMickaël Salaün 	object->underobj = NULL;
58cb2c7d1aSMickaël Salaün 	/*
59cb2c7d1aSMickaël Salaün 	 * Makes sure that if the filesystem is concurrently unmounted,
60cb2c7d1aSMickaël Salaün 	 * hook_sb_delete() will wait for us to finish iput().
61cb2c7d1aSMickaël Salaün 	 */
62cb2c7d1aSMickaël Salaün 	sb = inode->i_sb;
63cb2c7d1aSMickaël Salaün 	atomic_long_inc(&landlock_superblock(sb)->inode_refs);
64cb2c7d1aSMickaël Salaün 	spin_unlock(&object->lock);
65cb2c7d1aSMickaël Salaün 	/*
66cb2c7d1aSMickaël Salaün 	 * Because object->underobj was not NULL, hook_sb_delete() and
67cb2c7d1aSMickaël Salaün 	 * get_inode_object() guarantee that it is safe to reset
68cb2c7d1aSMickaël Salaün 	 * landlock_inode(inode)->object while it is not NULL.  It is therefore
69cb2c7d1aSMickaël Salaün 	 * not necessary to lock inode->i_lock.
70cb2c7d1aSMickaël Salaün 	 */
71cb2c7d1aSMickaël Salaün 	rcu_assign_pointer(landlock_inode(inode)->object, NULL);
72cb2c7d1aSMickaël Salaün 	/*
73cb2c7d1aSMickaël Salaün 	 * Now, new rules can safely be tied to @inode with get_inode_object().
74cb2c7d1aSMickaël Salaün 	 */
75cb2c7d1aSMickaël Salaün 
76cb2c7d1aSMickaël Salaün 	iput(inode);
77cb2c7d1aSMickaël Salaün 	if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
78cb2c7d1aSMickaël Salaün 		wake_up_var(&landlock_superblock(sb)->inode_refs);
79cb2c7d1aSMickaël Salaün }
80cb2c7d1aSMickaël Salaün 
81cb2c7d1aSMickaël Salaün static const struct landlock_object_underops landlock_fs_underops = {
82cb2c7d1aSMickaël Salaün 	.release = release_inode
83cb2c7d1aSMickaël Salaün };
84cb2c7d1aSMickaël Salaün 
85cb2c7d1aSMickaël Salaün /* Ruleset management */
86cb2c7d1aSMickaël Salaün 
87cb2c7d1aSMickaël Salaün static struct landlock_object *get_inode_object(struct inode *const inode)
88cb2c7d1aSMickaël Salaün {
89cb2c7d1aSMickaël Salaün 	struct landlock_object *object, *new_object;
90cb2c7d1aSMickaël Salaün 	struct landlock_inode_security *inode_sec = landlock_inode(inode);
91cb2c7d1aSMickaël Salaün 
92cb2c7d1aSMickaël Salaün 	rcu_read_lock();
93cb2c7d1aSMickaël Salaün retry:
94cb2c7d1aSMickaël Salaün 	object = rcu_dereference(inode_sec->object);
95cb2c7d1aSMickaël Salaün 	if (object) {
96cb2c7d1aSMickaël Salaün 		if (likely(refcount_inc_not_zero(&object->usage))) {
97cb2c7d1aSMickaël Salaün 			rcu_read_unlock();
98cb2c7d1aSMickaël Salaün 			return object;
99cb2c7d1aSMickaël Salaün 		}
100cb2c7d1aSMickaël Salaün 		/*
101cb2c7d1aSMickaël Salaün 		 * We are racing with release_inode(), the object is going
102cb2c7d1aSMickaël Salaün 		 * away.  Wait for release_inode(), then retry.
103cb2c7d1aSMickaël Salaün 		 */
104cb2c7d1aSMickaël Salaün 		spin_lock(&object->lock);
105cb2c7d1aSMickaël Salaün 		spin_unlock(&object->lock);
106cb2c7d1aSMickaël Salaün 		goto retry;
107cb2c7d1aSMickaël Salaün 	}
108cb2c7d1aSMickaël Salaün 	rcu_read_unlock();
109cb2c7d1aSMickaël Salaün 
110cb2c7d1aSMickaël Salaün 	/*
111cb2c7d1aSMickaël Salaün 	 * If there is no object tied to @inode, then create a new one (without
112cb2c7d1aSMickaël Salaün 	 * holding any locks).
113cb2c7d1aSMickaël Salaün 	 */
114cb2c7d1aSMickaël Salaün 	new_object = landlock_create_object(&landlock_fs_underops, inode);
115cb2c7d1aSMickaël Salaün 	if (IS_ERR(new_object))
116cb2c7d1aSMickaël Salaün 		return new_object;
117cb2c7d1aSMickaël Salaün 
118cb2c7d1aSMickaël Salaün 	/*
119cb2c7d1aSMickaël Salaün 	 * Protects against concurrent calls to get_inode_object() or
120cb2c7d1aSMickaël Salaün 	 * hook_sb_delete().
121cb2c7d1aSMickaël Salaün 	 */
122cb2c7d1aSMickaël Salaün 	spin_lock(&inode->i_lock);
123cb2c7d1aSMickaël Salaün 	if (unlikely(rcu_access_pointer(inode_sec->object))) {
124cb2c7d1aSMickaël Salaün 		/* Someone else just created the object, bail out and retry. */
125cb2c7d1aSMickaël Salaün 		spin_unlock(&inode->i_lock);
126cb2c7d1aSMickaël Salaün 		kfree(new_object);
127cb2c7d1aSMickaël Salaün 
128cb2c7d1aSMickaël Salaün 		rcu_read_lock();
129cb2c7d1aSMickaël Salaün 		goto retry;
130cb2c7d1aSMickaël Salaün 	}
131cb2c7d1aSMickaël Salaün 
132cb2c7d1aSMickaël Salaün 	/*
133cb2c7d1aSMickaël Salaün 	 * @inode will be released by hook_sb_delete() on its superblock
134cb2c7d1aSMickaël Salaün 	 * shutdown, or by release_inode() when no more ruleset references the
135cb2c7d1aSMickaël Salaün 	 * related object.
136cb2c7d1aSMickaël Salaün 	 */
137cb2c7d1aSMickaël Salaün 	ihold(inode);
138cb2c7d1aSMickaël Salaün 	rcu_assign_pointer(inode_sec->object, new_object);
139cb2c7d1aSMickaël Salaün 	spin_unlock(&inode->i_lock);
140cb2c7d1aSMickaël Salaün 	return new_object;
141cb2c7d1aSMickaël Salaün }
142cb2c7d1aSMickaël Salaün 
143cb2c7d1aSMickaël Salaün /* All access rights that can be tied to files. */
1446cc2df8eSMickaël Salaün /* clang-format off */
145cb2c7d1aSMickaël Salaün #define ACCESS_FILE ( \
146cb2c7d1aSMickaël Salaün 	LANDLOCK_ACCESS_FS_EXECUTE | \
147cb2c7d1aSMickaël Salaün 	LANDLOCK_ACCESS_FS_WRITE_FILE | \
148cb2c7d1aSMickaël Salaün 	LANDLOCK_ACCESS_FS_READ_FILE)
1496cc2df8eSMickaël Salaün /* clang-format on */
150cb2c7d1aSMickaël Salaün 
151cb2c7d1aSMickaël Salaün /*
152cb2c7d1aSMickaël Salaün  * @path: Should have been checked by get_path_from_fd().
153cb2c7d1aSMickaël Salaün  */
154cb2c7d1aSMickaël Salaün int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
155*5f2ff33eSMickaël Salaün 			    const struct path *const path,
156*5f2ff33eSMickaël Salaün 			    access_mask_t access_rights)
157cb2c7d1aSMickaël Salaün {
158cb2c7d1aSMickaël Salaün 	int err;
159cb2c7d1aSMickaël Salaün 	struct landlock_object *object;
160cb2c7d1aSMickaël Salaün 
161cb2c7d1aSMickaël Salaün 	/* Files only get access rights that make sense. */
16206a1c40aSMickaël Salaün 	if (!d_is_dir(path->dentry) &&
16306a1c40aSMickaël Salaün 	    (access_rights | ACCESS_FILE) != ACCESS_FILE)
164cb2c7d1aSMickaël Salaün 		return -EINVAL;
165cb2c7d1aSMickaël Salaün 	if (WARN_ON_ONCE(ruleset->num_layers != 1))
166cb2c7d1aSMickaël Salaün 		return -EINVAL;
167cb2c7d1aSMickaël Salaün 
168cb2c7d1aSMickaël Salaün 	/* Transforms relative access rights to absolute ones. */
169cb2c7d1aSMickaël Salaün 	access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0];
170cb2c7d1aSMickaël Salaün 	object = get_inode_object(d_backing_inode(path->dentry));
171cb2c7d1aSMickaël Salaün 	if (IS_ERR(object))
172cb2c7d1aSMickaël Salaün 		return PTR_ERR(object);
173cb2c7d1aSMickaël Salaün 	mutex_lock(&ruleset->lock);
174cb2c7d1aSMickaël Salaün 	err = landlock_insert_rule(ruleset, object, access_rights);
175cb2c7d1aSMickaël Salaün 	mutex_unlock(&ruleset->lock);
176cb2c7d1aSMickaël Salaün 	/*
177cb2c7d1aSMickaël Salaün 	 * No need to check for an error because landlock_insert_rule()
178cb2c7d1aSMickaël Salaün 	 * increments the refcount for the new object if needed.
179cb2c7d1aSMickaël Salaün 	 */
180cb2c7d1aSMickaël Salaün 	landlock_put_object(object);
181cb2c7d1aSMickaël Salaün 	return err;
182cb2c7d1aSMickaël Salaün }
183cb2c7d1aSMickaël Salaün 
184cb2c7d1aSMickaël Salaün /* Access-control management */
185cb2c7d1aSMickaël Salaün 
18606a1c40aSMickaël Salaün static inline u64 unmask_layers(const struct landlock_ruleset *const domain,
18706a1c40aSMickaël Salaün 				const struct path *const path,
188*5f2ff33eSMickaël Salaün 				const access_mask_t access_request,
189*5f2ff33eSMickaël Salaün 				u64 layer_mask)
190cb2c7d1aSMickaël Salaün {
191cb2c7d1aSMickaël Salaün 	const struct landlock_rule *rule;
192cb2c7d1aSMickaël Salaün 	const struct inode *inode;
193cb2c7d1aSMickaël Salaün 	size_t i;
194cb2c7d1aSMickaël Salaün 
195cb2c7d1aSMickaël Salaün 	if (d_is_negative(path->dentry))
196cb2c7d1aSMickaël Salaün 		/* Ignore nonexistent leafs. */
197cb2c7d1aSMickaël Salaün 		return layer_mask;
198cb2c7d1aSMickaël Salaün 	inode = d_backing_inode(path->dentry);
199cb2c7d1aSMickaël Salaün 	rcu_read_lock();
20006a1c40aSMickaël Salaün 	rule = landlock_find_rule(
20106a1c40aSMickaël Salaün 		domain, rcu_dereference(landlock_inode(inode)->object));
202cb2c7d1aSMickaël Salaün 	rcu_read_unlock();
203cb2c7d1aSMickaël Salaün 	if (!rule)
204cb2c7d1aSMickaël Salaün 		return layer_mask;
205cb2c7d1aSMickaël Salaün 
206cb2c7d1aSMickaël Salaün 	/*
207cb2c7d1aSMickaël Salaün 	 * An access is granted if, for each policy layer, at least one rule
208cb2c7d1aSMickaël Salaün 	 * encountered on the pathwalk grants the requested accesses,
209cb2c7d1aSMickaël Salaün 	 * regardless of their position in the layer stack.  We must then check
210cb2c7d1aSMickaël Salaün 	 * the remaining layers for each inode, from the first added layer to
211cb2c7d1aSMickaël Salaün 	 * the last one.
212cb2c7d1aSMickaël Salaün 	 */
213cb2c7d1aSMickaël Salaün 	for (i = 0; i < rule->num_layers; i++) {
214cb2c7d1aSMickaël Salaün 		const struct landlock_layer *const layer = &rule->layers[i];
215cb2c7d1aSMickaël Salaün 		const u64 layer_level = BIT_ULL(layer->level - 1);
216cb2c7d1aSMickaël Salaün 
217cb2c7d1aSMickaël Salaün 		/* Checks that the layer grants access to the full request. */
218cb2c7d1aSMickaël Salaün 		if ((layer->access & access_request) == access_request) {
219cb2c7d1aSMickaël Salaün 			layer_mask &= ~layer_level;
220cb2c7d1aSMickaël Salaün 
221cb2c7d1aSMickaël Salaün 			if (layer_mask == 0)
222cb2c7d1aSMickaël Salaün 				return layer_mask;
223cb2c7d1aSMickaël Salaün 		}
224cb2c7d1aSMickaël Salaün 	}
225cb2c7d1aSMickaël Salaün 	return layer_mask;
226cb2c7d1aSMickaël Salaün }
227cb2c7d1aSMickaël Salaün 
228cb2c7d1aSMickaël Salaün static int check_access_path(const struct landlock_ruleset *const domain,
229*5f2ff33eSMickaël Salaün 			     const struct path *const path,
230*5f2ff33eSMickaël Salaün 			     const access_mask_t access_request)
231cb2c7d1aSMickaël Salaün {
232cb2c7d1aSMickaël Salaün 	bool allowed = false;
233cb2c7d1aSMickaël Salaün 	struct path walker_path;
234cb2c7d1aSMickaël Salaün 	u64 layer_mask;
235cb2c7d1aSMickaël Salaün 	size_t i;
236cb2c7d1aSMickaël Salaün 
237cb2c7d1aSMickaël Salaün 	/* Make sure all layers can be checked. */
238cb2c7d1aSMickaël Salaün 	BUILD_BUG_ON(BITS_PER_TYPE(layer_mask) < LANDLOCK_MAX_NUM_LAYERS);
239cb2c7d1aSMickaël Salaün 
240cb2c7d1aSMickaël Salaün 	if (!access_request)
241cb2c7d1aSMickaël Salaün 		return 0;
242cb2c7d1aSMickaël Salaün 	if (WARN_ON_ONCE(!domain || !path))
243cb2c7d1aSMickaël Salaün 		return 0;
244cb2c7d1aSMickaël Salaün 	/*
245cb2c7d1aSMickaël Salaün 	 * Allows access to pseudo filesystems that will never be mountable
246cb2c7d1aSMickaël Salaün 	 * (e.g. sockfs, pipefs), but can still be reachable through
247cb2c7d1aSMickaël Salaün 	 * /proc/<pid>/fd/<file-descriptor> .
248cb2c7d1aSMickaël Salaün 	 */
249cb2c7d1aSMickaël Salaün 	if ((path->dentry->d_sb->s_flags & SB_NOUSER) ||
250cb2c7d1aSMickaël Salaün 	    (d_is_positive(path->dentry) &&
251cb2c7d1aSMickaël Salaün 	     unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))))
252cb2c7d1aSMickaël Salaün 		return 0;
253cb2c7d1aSMickaël Salaün 	if (WARN_ON_ONCE(domain->num_layers < 1))
254cb2c7d1aSMickaël Salaün 		return -EACCES;
255cb2c7d1aSMickaël Salaün 
256cb2c7d1aSMickaël Salaün 	/* Saves all layers handling a subset of requested accesses. */
257cb2c7d1aSMickaël Salaün 	layer_mask = 0;
258cb2c7d1aSMickaël Salaün 	for (i = 0; i < domain->num_layers; i++) {
259cb2c7d1aSMickaël Salaün 		if (domain->fs_access_masks[i] & access_request)
260cb2c7d1aSMickaël Salaün 			layer_mask |= BIT_ULL(i);
261cb2c7d1aSMickaël Salaün 	}
262cb2c7d1aSMickaël Salaün 	/* An access request not handled by the domain is allowed. */
263cb2c7d1aSMickaël Salaün 	if (layer_mask == 0)
264cb2c7d1aSMickaël Salaün 		return 0;
265cb2c7d1aSMickaël Salaün 
266cb2c7d1aSMickaël Salaün 	walker_path = *path;
267cb2c7d1aSMickaël Salaün 	path_get(&walker_path);
268cb2c7d1aSMickaël Salaün 	/*
269cb2c7d1aSMickaël Salaün 	 * We need to walk through all the hierarchy to not miss any relevant
270cb2c7d1aSMickaël Salaün 	 * restriction.
271cb2c7d1aSMickaël Salaün 	 */
272cb2c7d1aSMickaël Salaün 	while (true) {
273cb2c7d1aSMickaël Salaün 		struct dentry *parent_dentry;
274cb2c7d1aSMickaël Salaün 
27506a1c40aSMickaël Salaün 		layer_mask = unmask_layers(domain, &walker_path, access_request,
27606a1c40aSMickaël Salaün 					   layer_mask);
277cb2c7d1aSMickaël Salaün 		if (layer_mask == 0) {
278cb2c7d1aSMickaël Salaün 			/* Stops when a rule from each layer grants access. */
279cb2c7d1aSMickaël Salaün 			allowed = true;
280cb2c7d1aSMickaël Salaün 			break;
281cb2c7d1aSMickaël Salaün 		}
282cb2c7d1aSMickaël Salaün 
283cb2c7d1aSMickaël Salaün jump_up:
284cb2c7d1aSMickaël Salaün 		if (walker_path.dentry == walker_path.mnt->mnt_root) {
285cb2c7d1aSMickaël Salaün 			if (follow_up(&walker_path)) {
286cb2c7d1aSMickaël Salaün 				/* Ignores hidden mount points. */
287cb2c7d1aSMickaël Salaün 				goto jump_up;
288cb2c7d1aSMickaël Salaün 			} else {
289cb2c7d1aSMickaël Salaün 				/*
290cb2c7d1aSMickaël Salaün 				 * Stops at the real root.  Denies access
291cb2c7d1aSMickaël Salaün 				 * because not all layers have granted access.
292cb2c7d1aSMickaël Salaün 				 */
293cb2c7d1aSMickaël Salaün 				allowed = false;
294cb2c7d1aSMickaël Salaün 				break;
295cb2c7d1aSMickaël Salaün 			}
296cb2c7d1aSMickaël Salaün 		}
297cb2c7d1aSMickaël Salaün 		if (unlikely(IS_ROOT(walker_path.dentry))) {
298cb2c7d1aSMickaël Salaün 			/*
299cb2c7d1aSMickaël Salaün 			 * Stops at disconnected root directories.  Only allows
300cb2c7d1aSMickaël Salaün 			 * access to internal filesystems (e.g. nsfs, which is
301cb2c7d1aSMickaël Salaün 			 * reachable through /proc/<pid>/ns/<namespace>).
302cb2c7d1aSMickaël Salaün 			 */
303cb2c7d1aSMickaël Salaün 			allowed = !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
304cb2c7d1aSMickaël Salaün 			break;
305cb2c7d1aSMickaël Salaün 		}
306cb2c7d1aSMickaël Salaün 		parent_dentry = dget_parent(walker_path.dentry);
307cb2c7d1aSMickaël Salaün 		dput(walker_path.dentry);
308cb2c7d1aSMickaël Salaün 		walker_path.dentry = parent_dentry;
309cb2c7d1aSMickaël Salaün 	}
310cb2c7d1aSMickaël Salaün 	path_put(&walker_path);
311cb2c7d1aSMickaël Salaün 	return allowed ? 0 : -EACCES;
312cb2c7d1aSMickaël Salaün }
313cb2c7d1aSMickaël Salaün 
314cb2c7d1aSMickaël Salaün static inline int current_check_access_path(const struct path *const path,
315*5f2ff33eSMickaël Salaün 					    const access_mask_t access_request)
316cb2c7d1aSMickaël Salaün {
317cb2c7d1aSMickaël Salaün 	const struct landlock_ruleset *const dom =
318cb2c7d1aSMickaël Salaün 		landlock_get_current_domain();
319cb2c7d1aSMickaël Salaün 
320cb2c7d1aSMickaël Salaün 	if (!dom)
321cb2c7d1aSMickaël Salaün 		return 0;
322cb2c7d1aSMickaël Salaün 	return check_access_path(dom, path, access_request);
323cb2c7d1aSMickaël Salaün }
324cb2c7d1aSMickaël Salaün 
325cb2c7d1aSMickaël Salaün /* Inode hooks */
326cb2c7d1aSMickaël Salaün 
327cb2c7d1aSMickaël Salaün static void hook_inode_free_security(struct inode *const inode)
328cb2c7d1aSMickaël Salaün {
329cb2c7d1aSMickaël Salaün 	/*
330cb2c7d1aSMickaël Salaün 	 * All inodes must already have been untied from their object by
331cb2c7d1aSMickaël Salaün 	 * release_inode() or hook_sb_delete().
332cb2c7d1aSMickaël Salaün 	 */
333cb2c7d1aSMickaël Salaün 	WARN_ON_ONCE(landlock_inode(inode)->object);
334cb2c7d1aSMickaël Salaün }
335cb2c7d1aSMickaël Salaün 
336cb2c7d1aSMickaël Salaün /* Super-block hooks */
337cb2c7d1aSMickaël Salaün 
338cb2c7d1aSMickaël Salaün /*
339cb2c7d1aSMickaël Salaün  * Release the inodes used in a security policy.
340cb2c7d1aSMickaël Salaün  *
341cb2c7d1aSMickaël Salaün  * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
342cb2c7d1aSMickaël Salaün  */
343cb2c7d1aSMickaël Salaün static void hook_sb_delete(struct super_block *const sb)
344cb2c7d1aSMickaël Salaün {
345cb2c7d1aSMickaël Salaün 	struct inode *inode, *prev_inode = NULL;
346cb2c7d1aSMickaël Salaün 
347cb2c7d1aSMickaël Salaün 	if (!landlock_initialized)
348cb2c7d1aSMickaël Salaün 		return;
349cb2c7d1aSMickaël Salaün 
350cb2c7d1aSMickaël Salaün 	spin_lock(&sb->s_inode_list_lock);
351cb2c7d1aSMickaël Salaün 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
352cb2c7d1aSMickaël Salaün 		struct landlock_object *object;
353cb2c7d1aSMickaël Salaün 
354cb2c7d1aSMickaël Salaün 		/* Only handles referenced inodes. */
355cb2c7d1aSMickaël Salaün 		if (!atomic_read(&inode->i_count))
356cb2c7d1aSMickaël Salaün 			continue;
357cb2c7d1aSMickaël Salaün 
358cb2c7d1aSMickaël Salaün 		/*
359cb2c7d1aSMickaël Salaün 		 * Protects against concurrent modification of inode (e.g.
360cb2c7d1aSMickaël Salaün 		 * from get_inode_object()).
361cb2c7d1aSMickaël Salaün 		 */
362cb2c7d1aSMickaël Salaün 		spin_lock(&inode->i_lock);
363cb2c7d1aSMickaël Salaün 		/*
364cb2c7d1aSMickaël Salaün 		 * Checks I_FREEING and I_WILL_FREE  to protect against a race
365cb2c7d1aSMickaël Salaün 		 * condition when release_inode() just called iput(), which
366cb2c7d1aSMickaël Salaün 		 * could lead to a NULL dereference of inode->security or a
367cb2c7d1aSMickaël Salaün 		 * second call to iput() for the same Landlock object.  Also
368cb2c7d1aSMickaël Salaün 		 * checks I_NEW because such inode cannot be tied to an object.
369cb2c7d1aSMickaël Salaün 		 */
370cb2c7d1aSMickaël Salaün 		if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
371cb2c7d1aSMickaël Salaün 			spin_unlock(&inode->i_lock);
372cb2c7d1aSMickaël Salaün 			continue;
373cb2c7d1aSMickaël Salaün 		}
374cb2c7d1aSMickaël Salaün 
375cb2c7d1aSMickaël Salaün 		rcu_read_lock();
376cb2c7d1aSMickaël Salaün 		object = rcu_dereference(landlock_inode(inode)->object);
377cb2c7d1aSMickaël Salaün 		if (!object) {
378cb2c7d1aSMickaël Salaün 			rcu_read_unlock();
379cb2c7d1aSMickaël Salaün 			spin_unlock(&inode->i_lock);
380cb2c7d1aSMickaël Salaün 			continue;
381cb2c7d1aSMickaël Salaün 		}
382cb2c7d1aSMickaël Salaün 		/* Keeps a reference to this inode until the next loop walk. */
383cb2c7d1aSMickaël Salaün 		__iget(inode);
384cb2c7d1aSMickaël Salaün 		spin_unlock(&inode->i_lock);
385cb2c7d1aSMickaël Salaün 
386cb2c7d1aSMickaël Salaün 		/*
387cb2c7d1aSMickaël Salaün 		 * If there is no concurrent release_inode() ongoing, then we
388cb2c7d1aSMickaël Salaün 		 * are in charge of calling iput() on this inode, otherwise we
389cb2c7d1aSMickaël Salaün 		 * will just wait for it to finish.
390cb2c7d1aSMickaël Salaün 		 */
391cb2c7d1aSMickaël Salaün 		spin_lock(&object->lock);
392cb2c7d1aSMickaël Salaün 		if (object->underobj == inode) {
393cb2c7d1aSMickaël Salaün 			object->underobj = NULL;
394cb2c7d1aSMickaël Salaün 			spin_unlock(&object->lock);
395cb2c7d1aSMickaël Salaün 			rcu_read_unlock();
396cb2c7d1aSMickaël Salaün 
397cb2c7d1aSMickaël Salaün 			/*
398cb2c7d1aSMickaël Salaün 			 * Because object->underobj was not NULL,
399cb2c7d1aSMickaël Salaün 			 * release_inode() and get_inode_object() guarantee
400cb2c7d1aSMickaël Salaün 			 * that it is safe to reset
401cb2c7d1aSMickaël Salaün 			 * landlock_inode(inode)->object while it is not NULL.
402cb2c7d1aSMickaël Salaün 			 * It is therefore not necessary to lock inode->i_lock.
403cb2c7d1aSMickaël Salaün 			 */
404cb2c7d1aSMickaël Salaün 			rcu_assign_pointer(landlock_inode(inode)->object, NULL);
405cb2c7d1aSMickaël Salaün 			/*
406cb2c7d1aSMickaël Salaün 			 * At this point, we own the ihold() reference that was
407cb2c7d1aSMickaël Salaün 			 * originally set up by get_inode_object() and the
408cb2c7d1aSMickaël Salaün 			 * __iget() reference that we just set in this loop
409cb2c7d1aSMickaël Salaün 			 * walk.  Therefore the following call to iput() will
410cb2c7d1aSMickaël Salaün 			 * not sleep nor drop the inode because there is now at
411cb2c7d1aSMickaël Salaün 			 * least two references to it.
412cb2c7d1aSMickaël Salaün 			 */
413cb2c7d1aSMickaël Salaün 			iput(inode);
414cb2c7d1aSMickaël Salaün 		} else {
415cb2c7d1aSMickaël Salaün 			spin_unlock(&object->lock);
416cb2c7d1aSMickaël Salaün 			rcu_read_unlock();
417cb2c7d1aSMickaël Salaün 		}
418cb2c7d1aSMickaël Salaün 
419cb2c7d1aSMickaël Salaün 		if (prev_inode) {
420cb2c7d1aSMickaël Salaün 			/*
421cb2c7d1aSMickaël Salaün 			 * At this point, we still own the __iget() reference
422cb2c7d1aSMickaël Salaün 			 * that we just set in this loop walk.  Therefore we
423cb2c7d1aSMickaël Salaün 			 * can drop the list lock and know that the inode won't
424cb2c7d1aSMickaël Salaün 			 * disappear from under us until the next loop walk.
425cb2c7d1aSMickaël Salaün 			 */
426cb2c7d1aSMickaël Salaün 			spin_unlock(&sb->s_inode_list_lock);
427cb2c7d1aSMickaël Salaün 			/*
428cb2c7d1aSMickaël Salaün 			 * We can now actually put the inode reference from the
429cb2c7d1aSMickaël Salaün 			 * previous loop walk, which is not needed anymore.
430cb2c7d1aSMickaël Salaün 			 */
431cb2c7d1aSMickaël Salaün 			iput(prev_inode);
432cb2c7d1aSMickaël Salaün 			cond_resched();
433cb2c7d1aSMickaël Salaün 			spin_lock(&sb->s_inode_list_lock);
434cb2c7d1aSMickaël Salaün 		}
435cb2c7d1aSMickaël Salaün 		prev_inode = inode;
436cb2c7d1aSMickaël Salaün 	}
437cb2c7d1aSMickaël Salaün 	spin_unlock(&sb->s_inode_list_lock);
438cb2c7d1aSMickaël Salaün 
439cb2c7d1aSMickaël Salaün 	/* Puts the inode reference from the last loop walk, if any. */
440cb2c7d1aSMickaël Salaün 	if (prev_inode)
441cb2c7d1aSMickaël Salaün 		iput(prev_inode);
442cb2c7d1aSMickaël Salaün 	/* Waits for pending iput() in release_inode(). */
44306a1c40aSMickaël Salaün 	wait_var_event(&landlock_superblock(sb)->inode_refs,
44406a1c40aSMickaël Salaün 		       !atomic_long_read(&landlock_superblock(sb)->inode_refs));
445cb2c7d1aSMickaël Salaün }
446cb2c7d1aSMickaël Salaün 
447cb2c7d1aSMickaël Salaün /*
448cb2c7d1aSMickaël Salaün  * Because a Landlock security policy is defined according to the filesystem
449cb2c7d1aSMickaël Salaün  * topology (i.e. the mount namespace), changing it may grant access to files
450cb2c7d1aSMickaël Salaün  * not previously allowed.
451cb2c7d1aSMickaël Salaün  *
452cb2c7d1aSMickaël Salaün  * To make it simple, deny any filesystem topology modification by landlocked
453cb2c7d1aSMickaël Salaün  * processes.  Non-landlocked processes may still change the namespace of a
454cb2c7d1aSMickaël Salaün  * landlocked process, but this kind of threat must be handled by a system-wide
455cb2c7d1aSMickaël Salaün  * access-control security policy.
456cb2c7d1aSMickaël Salaün  *
457cb2c7d1aSMickaël Salaün  * This could be lifted in the future if Landlock can safely handle mount
458cb2c7d1aSMickaël Salaün  * namespace updates requested by a landlocked process.  Indeed, we could
459cb2c7d1aSMickaël Salaün  * update the current domain (which is currently read-only) by taking into
460cb2c7d1aSMickaël Salaün  * account the accesses of the source and the destination of a new mount point.
461cb2c7d1aSMickaël Salaün  * However, it would also require to make all the child domains dynamically
462cb2c7d1aSMickaël Salaün  * inherit these new constraints.  Anyway, for backward compatibility reasons,
463cb2c7d1aSMickaël Salaün  * a dedicated user space option would be required (e.g. as a ruleset flag).
464cb2c7d1aSMickaël Salaün  */
465cb2c7d1aSMickaël Salaün static int hook_sb_mount(const char *const dev_name,
466cb2c7d1aSMickaël Salaün 			 const struct path *const path, const char *const type,
467cb2c7d1aSMickaël Salaün 			 const unsigned long flags, void *const data)
468cb2c7d1aSMickaël Salaün {
469cb2c7d1aSMickaël Salaün 	if (!landlock_get_current_domain())
470cb2c7d1aSMickaël Salaün 		return 0;
471cb2c7d1aSMickaël Salaün 	return -EPERM;
472cb2c7d1aSMickaël Salaün }
473cb2c7d1aSMickaël Salaün 
474cb2c7d1aSMickaël Salaün static int hook_move_mount(const struct path *const from_path,
475cb2c7d1aSMickaël Salaün 			   const struct path *const to_path)
476cb2c7d1aSMickaël Salaün {
477cb2c7d1aSMickaël Salaün 	if (!landlock_get_current_domain())
478cb2c7d1aSMickaël Salaün 		return 0;
479cb2c7d1aSMickaël Salaün 	return -EPERM;
480cb2c7d1aSMickaël Salaün }
481cb2c7d1aSMickaël Salaün 
482cb2c7d1aSMickaël Salaün /*
483cb2c7d1aSMickaël Salaün  * Removing a mount point may reveal a previously hidden file hierarchy, which
484cb2c7d1aSMickaël Salaün  * may then grant access to files, which may have previously been forbidden.
485cb2c7d1aSMickaël Salaün  */
486cb2c7d1aSMickaël Salaün static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
487cb2c7d1aSMickaël Salaün {
488cb2c7d1aSMickaël Salaün 	if (!landlock_get_current_domain())
489cb2c7d1aSMickaël Salaün 		return 0;
490cb2c7d1aSMickaël Salaün 	return -EPERM;
491cb2c7d1aSMickaël Salaün }
492cb2c7d1aSMickaël Salaün 
493cb2c7d1aSMickaël Salaün static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
494cb2c7d1aSMickaël Salaün {
495cb2c7d1aSMickaël Salaün 	if (!landlock_get_current_domain())
496cb2c7d1aSMickaël Salaün 		return 0;
497cb2c7d1aSMickaël Salaün 	return -EPERM;
498cb2c7d1aSMickaël Salaün }
499cb2c7d1aSMickaël Salaün 
500cb2c7d1aSMickaël Salaün /*
501cb2c7d1aSMickaël Salaün  * pivot_root(2), like mount(2), changes the current mount namespace.  It must
502cb2c7d1aSMickaël Salaün  * then be forbidden for a landlocked process.
503cb2c7d1aSMickaël Salaün  *
504cb2c7d1aSMickaël Salaün  * However, chroot(2) may be allowed because it only changes the relative root
505cb2c7d1aSMickaël Salaün  * directory of the current process.  Moreover, it can be used to restrict the
506cb2c7d1aSMickaël Salaün  * view of the filesystem.
507cb2c7d1aSMickaël Salaün  */
508cb2c7d1aSMickaël Salaün static int hook_sb_pivotroot(const struct path *const old_path,
509cb2c7d1aSMickaël Salaün 			     const struct path *const new_path)
510cb2c7d1aSMickaël Salaün {
511cb2c7d1aSMickaël Salaün 	if (!landlock_get_current_domain())
512cb2c7d1aSMickaël Salaün 		return 0;
513cb2c7d1aSMickaël Salaün 	return -EPERM;
514cb2c7d1aSMickaël Salaün }
515cb2c7d1aSMickaël Salaün 
516cb2c7d1aSMickaël Salaün /* Path hooks */
517cb2c7d1aSMickaël Salaün 
518*5f2ff33eSMickaël Salaün static inline access_mask_t get_mode_access(const umode_t mode)
519cb2c7d1aSMickaël Salaün {
520cb2c7d1aSMickaël Salaün 	switch (mode & S_IFMT) {
521cb2c7d1aSMickaël Salaün 	case S_IFLNK:
522cb2c7d1aSMickaël Salaün 		return LANDLOCK_ACCESS_FS_MAKE_SYM;
523cb2c7d1aSMickaël Salaün 	case 0:
524cb2c7d1aSMickaël Salaün 		/* A zero mode translates to S_IFREG. */
525cb2c7d1aSMickaël Salaün 	case S_IFREG:
526cb2c7d1aSMickaël Salaün 		return LANDLOCK_ACCESS_FS_MAKE_REG;
527cb2c7d1aSMickaël Salaün 	case S_IFDIR:
528cb2c7d1aSMickaël Salaün 		return LANDLOCK_ACCESS_FS_MAKE_DIR;
529cb2c7d1aSMickaël Salaün 	case S_IFCHR:
530cb2c7d1aSMickaël Salaün 		return LANDLOCK_ACCESS_FS_MAKE_CHAR;
531cb2c7d1aSMickaël Salaün 	case S_IFBLK:
532cb2c7d1aSMickaël Salaün 		return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
533cb2c7d1aSMickaël Salaün 	case S_IFIFO:
534cb2c7d1aSMickaël Salaün 		return LANDLOCK_ACCESS_FS_MAKE_FIFO;
535cb2c7d1aSMickaël Salaün 	case S_IFSOCK:
536cb2c7d1aSMickaël Salaün 		return LANDLOCK_ACCESS_FS_MAKE_SOCK;
537cb2c7d1aSMickaël Salaün 	default:
538cb2c7d1aSMickaël Salaün 		WARN_ON_ONCE(1);
539cb2c7d1aSMickaël Salaün 		return 0;
540cb2c7d1aSMickaël Salaün 	}
541cb2c7d1aSMickaël Salaün }
542cb2c7d1aSMickaël Salaün 
543cb2c7d1aSMickaël Salaün /*
544cb2c7d1aSMickaël Salaün  * Creating multiple links or renaming may lead to privilege escalations if not
545cb2c7d1aSMickaël Salaün  * handled properly.  Indeed, we must be sure that the source doesn't gain more
546cb2c7d1aSMickaël Salaün  * privileges by being accessible from the destination.  This is getting more
547cb2c7d1aSMickaël Salaün  * complex when dealing with multiple layers.  The whole picture can be seen as
548cb2c7d1aSMickaël Salaün  * a multilayer partial ordering problem.  A future version of Landlock will
549cb2c7d1aSMickaël Salaün  * deal with that.
550cb2c7d1aSMickaël Salaün  */
551cb2c7d1aSMickaël Salaün static int hook_path_link(struct dentry *const old_dentry,
552cb2c7d1aSMickaël Salaün 			  const struct path *const new_dir,
553cb2c7d1aSMickaël Salaün 			  struct dentry *const new_dentry)
554cb2c7d1aSMickaël Salaün {
555cb2c7d1aSMickaël Salaün 	const struct landlock_ruleset *const dom =
556cb2c7d1aSMickaël Salaün 		landlock_get_current_domain();
557cb2c7d1aSMickaël Salaün 
558cb2c7d1aSMickaël Salaün 	if (!dom)
559cb2c7d1aSMickaël Salaün 		return 0;
560cb2c7d1aSMickaël Salaün 	/* The mount points are the same for old and new paths, cf. EXDEV. */
561cb2c7d1aSMickaël Salaün 	if (old_dentry->d_parent != new_dir->dentry)
562cb2c7d1aSMickaël Salaün 		/* Gracefully forbids reparenting. */
563cb2c7d1aSMickaël Salaün 		return -EXDEV;
564cb2c7d1aSMickaël Salaün 	if (unlikely(d_is_negative(old_dentry)))
565cb2c7d1aSMickaël Salaün 		return -ENOENT;
56606a1c40aSMickaël Salaün 	return check_access_path(
56706a1c40aSMickaël Salaün 		dom, new_dir,
568cb2c7d1aSMickaël Salaün 		get_mode_access(d_backing_inode(old_dentry)->i_mode));
569cb2c7d1aSMickaël Salaün }
570cb2c7d1aSMickaël Salaün 
571*5f2ff33eSMickaël Salaün static inline access_mask_t maybe_remove(const struct dentry *const dentry)
572cb2c7d1aSMickaël Salaün {
573cb2c7d1aSMickaël Salaün 	if (d_is_negative(dentry))
574cb2c7d1aSMickaël Salaün 		return 0;
575cb2c7d1aSMickaël Salaün 	return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
576cb2c7d1aSMickaël Salaün 				  LANDLOCK_ACCESS_FS_REMOVE_FILE;
577cb2c7d1aSMickaël Salaün }
578cb2c7d1aSMickaël Salaün 
579cb2c7d1aSMickaël Salaün static int hook_path_rename(const struct path *const old_dir,
580cb2c7d1aSMickaël Salaün 			    struct dentry *const old_dentry,
581cb2c7d1aSMickaël Salaün 			    const struct path *const new_dir,
582cb2c7d1aSMickaël Salaün 			    struct dentry *const new_dentry)
583cb2c7d1aSMickaël Salaün {
584cb2c7d1aSMickaël Salaün 	const struct landlock_ruleset *const dom =
585cb2c7d1aSMickaël Salaün 		landlock_get_current_domain();
586cb2c7d1aSMickaël Salaün 
587cb2c7d1aSMickaël Salaün 	if (!dom)
588cb2c7d1aSMickaël Salaün 		return 0;
589cb2c7d1aSMickaël Salaün 	/* The mount points are the same for old and new paths, cf. EXDEV. */
590cb2c7d1aSMickaël Salaün 	if (old_dir->dentry != new_dir->dentry)
591cb2c7d1aSMickaël Salaün 		/* Gracefully forbids reparenting. */
592cb2c7d1aSMickaël Salaün 		return -EXDEV;
593cb2c7d1aSMickaël Salaün 	if (unlikely(d_is_negative(old_dentry)))
594cb2c7d1aSMickaël Salaün 		return -ENOENT;
595cb2c7d1aSMickaël Salaün 	/* RENAME_EXCHANGE is handled because directories are the same. */
59606a1c40aSMickaël Salaün 	return check_access_path(
59706a1c40aSMickaël Salaün 		dom, old_dir,
59806a1c40aSMickaël Salaün 		maybe_remove(old_dentry) | maybe_remove(new_dentry) |
599cb2c7d1aSMickaël Salaün 			get_mode_access(d_backing_inode(old_dentry)->i_mode));
600cb2c7d1aSMickaël Salaün }
601cb2c7d1aSMickaël Salaün 
602cb2c7d1aSMickaël Salaün static int hook_path_mkdir(const struct path *const dir,
603cb2c7d1aSMickaël Salaün 			   struct dentry *const dentry, const umode_t mode)
604cb2c7d1aSMickaël Salaün {
605cb2c7d1aSMickaël Salaün 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
606cb2c7d1aSMickaël Salaün }
607cb2c7d1aSMickaël Salaün 
608cb2c7d1aSMickaël Salaün static int hook_path_mknod(const struct path *const dir,
609cb2c7d1aSMickaël Salaün 			   struct dentry *const dentry, const umode_t mode,
610cb2c7d1aSMickaël Salaün 			   const unsigned int dev)
611cb2c7d1aSMickaël Salaün {
612cb2c7d1aSMickaël Salaün 	const struct landlock_ruleset *const dom =
613cb2c7d1aSMickaël Salaün 		landlock_get_current_domain();
614cb2c7d1aSMickaël Salaün 
615cb2c7d1aSMickaël Salaün 	if (!dom)
616cb2c7d1aSMickaël Salaün 		return 0;
617cb2c7d1aSMickaël Salaün 	return check_access_path(dom, dir, get_mode_access(mode));
618cb2c7d1aSMickaël Salaün }
619cb2c7d1aSMickaël Salaün 
620cb2c7d1aSMickaël Salaün static int hook_path_symlink(const struct path *const dir,
62106a1c40aSMickaël Salaün 			     struct dentry *const dentry,
62206a1c40aSMickaël Salaün 			     const char *const old_name)
623cb2c7d1aSMickaël Salaün {
624cb2c7d1aSMickaël Salaün 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
625cb2c7d1aSMickaël Salaün }
626cb2c7d1aSMickaël Salaün 
627cb2c7d1aSMickaël Salaün static int hook_path_unlink(const struct path *const dir,
628cb2c7d1aSMickaël Salaün 			    struct dentry *const dentry)
629cb2c7d1aSMickaël Salaün {
630cb2c7d1aSMickaël Salaün 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
631cb2c7d1aSMickaël Salaün }
632cb2c7d1aSMickaël Salaün 
633cb2c7d1aSMickaël Salaün static int hook_path_rmdir(const struct path *const dir,
634cb2c7d1aSMickaël Salaün 			   struct dentry *const dentry)
635cb2c7d1aSMickaël Salaün {
636cb2c7d1aSMickaël Salaün 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
637cb2c7d1aSMickaël Salaün }
638cb2c7d1aSMickaël Salaün 
639cb2c7d1aSMickaël Salaün /* File hooks */
640cb2c7d1aSMickaël Salaün 
641*5f2ff33eSMickaël Salaün static inline access_mask_t get_file_access(const struct file *const file)
642cb2c7d1aSMickaël Salaün {
643*5f2ff33eSMickaël Salaün 	access_mask_t access = 0;
644cb2c7d1aSMickaël Salaün 
645cb2c7d1aSMickaël Salaün 	if (file->f_mode & FMODE_READ) {
646cb2c7d1aSMickaël Salaün 		/* A directory can only be opened in read mode. */
647cb2c7d1aSMickaël Salaün 		if (S_ISDIR(file_inode(file)->i_mode))
648cb2c7d1aSMickaël Salaün 			return LANDLOCK_ACCESS_FS_READ_DIR;
649cb2c7d1aSMickaël Salaün 		access = LANDLOCK_ACCESS_FS_READ_FILE;
650cb2c7d1aSMickaël Salaün 	}
651cb2c7d1aSMickaël Salaün 	if (file->f_mode & FMODE_WRITE)
652cb2c7d1aSMickaël Salaün 		access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
653cb2c7d1aSMickaël Salaün 	/* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
654cb2c7d1aSMickaël Salaün 	if (file->f_flags & __FMODE_EXEC)
655cb2c7d1aSMickaël Salaün 		access |= LANDLOCK_ACCESS_FS_EXECUTE;
656cb2c7d1aSMickaël Salaün 	return access;
657cb2c7d1aSMickaël Salaün }
658cb2c7d1aSMickaël Salaün 
659cb2c7d1aSMickaël Salaün static int hook_file_open(struct file *const file)
660cb2c7d1aSMickaël Salaün {
661cb2c7d1aSMickaël Salaün 	const struct landlock_ruleset *const dom =
662cb2c7d1aSMickaël Salaün 		landlock_get_current_domain();
663cb2c7d1aSMickaël Salaün 
664cb2c7d1aSMickaël Salaün 	if (!dom)
665cb2c7d1aSMickaël Salaün 		return 0;
666cb2c7d1aSMickaël Salaün 	/*
667cb2c7d1aSMickaël Salaün 	 * Because a file may be opened with O_PATH, get_file_access() may
668cb2c7d1aSMickaël Salaün 	 * return 0.  This case will be handled with a future Landlock
669cb2c7d1aSMickaël Salaün 	 * evolution.
670cb2c7d1aSMickaël Salaün 	 */
671cb2c7d1aSMickaël Salaün 	return check_access_path(dom, &file->f_path, get_file_access(file));
672cb2c7d1aSMickaël Salaün }
673cb2c7d1aSMickaël Salaün 
674cb2c7d1aSMickaël Salaün static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
675cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
676cb2c7d1aSMickaël Salaün 
677cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(sb_delete, hook_sb_delete),
678cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(sb_mount, hook_sb_mount),
679cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(move_mount, hook_move_mount),
680cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(sb_umount, hook_sb_umount),
681cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(sb_remount, hook_sb_remount),
682cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
683cb2c7d1aSMickaël Salaün 
684cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(path_link, hook_path_link),
685cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(path_rename, hook_path_rename),
686cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
687cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(path_mknod, hook_path_mknod),
688cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(path_symlink, hook_path_symlink),
689cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(path_unlink, hook_path_unlink),
690cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
691cb2c7d1aSMickaël Salaün 
692cb2c7d1aSMickaël Salaün 	LSM_HOOK_INIT(file_open, hook_file_open),
693cb2c7d1aSMickaël Salaün };
694cb2c7d1aSMickaël Salaün 
695cb2c7d1aSMickaël Salaün __init void landlock_add_fs_hooks(void)
696cb2c7d1aSMickaël Salaün {
697cb2c7d1aSMickaël Salaün 	security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
698cb2c7d1aSMickaël Salaün 			   LANDLOCK_NAME);
699cb2c7d1aSMickaël Salaün }
700