1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Landlock LSM - Filesystem management and hooks
4 *
5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 * Copyright © 2018-2020 ANSSI
7 * Copyright © 2021-2022 Microsoft Corporation
8 * Copyright © 2022 Günther Noack <gnoack3000@gmail.com>
9 * Copyright © 2023-2024 Google LLC
10 */
11
12 #include <asm/ioctls.h>
13 #include <kunit/test.h>
14 #include <linux/atomic.h>
15 #include <linux/bitops.h>
16 #include <linux/bits.h>
17 #include <linux/compiler_types.h>
18 #include <linux/dcache.h>
19 #include <linux/err.h>
20 #include <linux/falloc.h>
21 #include <linux/fs.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/limits.h>
25 #include <linux/list.h>
26 #include <linux/lsm_hooks.h>
27 #include <linux/mount.h>
28 #include <linux/namei.h>
29 #include <linux/path.h>
30 #include <linux/rcupdate.h>
31 #include <linux/spinlock.h>
32 #include <linux/stat.h>
33 #include <linux/types.h>
34 #include <linux/wait_bit.h>
35 #include <linux/workqueue.h>
36 #include <uapi/linux/fiemap.h>
37 #include <uapi/linux/landlock.h>
38
39 #include "common.h"
40 #include "cred.h"
41 #include "fs.h"
42 #include "limits.h"
43 #include "object.h"
44 #include "ruleset.h"
45 #include "setup.h"
46
47 /* Underlying object management */
48
release_inode(struct landlock_object * const object)49 static void release_inode(struct landlock_object *const object)
50 __releases(object->lock)
51 {
52 struct inode *const inode = object->underobj;
53 struct super_block *sb;
54
55 if (!inode) {
56 spin_unlock(&object->lock);
57 return;
58 }
59
60 /*
61 * Protects against concurrent use by hook_sb_delete() of the reference
62 * to the underlying inode.
63 */
64 object->underobj = NULL;
65 /*
66 * Makes sure that if the filesystem is concurrently unmounted,
67 * hook_sb_delete() will wait for us to finish iput().
68 */
69 sb = inode->i_sb;
70 atomic_long_inc(&landlock_superblock(sb)->inode_refs);
71 spin_unlock(&object->lock);
72 /*
73 * Because object->underobj was not NULL, hook_sb_delete() and
74 * get_inode_object() guarantee that it is safe to reset
75 * landlock_inode(inode)->object while it is not NULL. It is therefore
76 * not necessary to lock inode->i_lock.
77 */
78 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
79 /*
80 * Now, new rules can safely be tied to @inode with get_inode_object().
81 */
82
83 iput(inode);
84 if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
85 wake_up_var(&landlock_superblock(sb)->inode_refs);
86 }
87
88 static const struct landlock_object_underops landlock_fs_underops = {
89 .release = release_inode
90 };
91
92 /* IOCTL helpers */
93
94 /**
95 * is_masked_device_ioctl - Determine whether an IOCTL command is always
96 * permitted with Landlock for device files. These commands can not be
97 * restricted on device files by enforcing a Landlock policy.
98 *
99 * @cmd: The IOCTL command that is supposed to be run.
100 *
101 * By default, any IOCTL on a device file requires the
102 * LANDLOCK_ACCESS_FS_IOCTL_DEV right. However, we blanket-permit some
103 * commands, if:
104 *
105 * 1. The command is implemented in fs/ioctl.c's do_vfs_ioctl(),
106 * not in f_ops->unlocked_ioctl() or f_ops->compat_ioctl().
107 *
108 * 2. The command is harmless when invoked on devices.
109 *
110 * We also permit commands that do not make sense for devices, but where the
111 * do_vfs_ioctl() implementation returns a more conventional error code.
112 *
113 * Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl()
114 * should be considered for inclusion here.
115 *
116 * Returns: true if the IOCTL @cmd can not be restricted with Landlock for
117 * device files.
118 */
is_masked_device_ioctl(const unsigned int cmd)119 static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd)
120 {
121 switch (cmd) {
122 /*
123 * FIOCLEX, FIONCLEX, FIONBIO and FIOASYNC manipulate the FD's
124 * close-on-exec and the file's buffered-IO and async flags. These
125 * operations are also available through fcntl(2), and are
126 * unconditionally permitted in Landlock.
127 */
128 case FIOCLEX:
129 case FIONCLEX:
130 case FIONBIO:
131 case FIOASYNC:
132 /*
133 * FIOQSIZE queries the size of a regular file, directory, or link.
134 *
135 * We still permit it, because it always returns -ENOTTY for
136 * other file types.
137 */
138 case FIOQSIZE:
139 /*
140 * FIFREEZE and FITHAW freeze and thaw the file system which the
141 * given file belongs to. Requires CAP_SYS_ADMIN.
142 *
143 * These commands operate on the file system's superblock rather
144 * than on the file itself. The same operations can also be
145 * done through any other file or directory on the same file
146 * system, so it is safe to permit these.
147 */
148 case FIFREEZE:
149 case FITHAW:
150 /*
151 * FS_IOC_FIEMAP queries information about the allocation of
152 * blocks within a file.
153 *
154 * This IOCTL command only makes sense for regular files and is
155 * not implemented by devices. It is harmless to permit.
156 */
157 case FS_IOC_FIEMAP:
158 /*
159 * FIGETBSZ queries the file system's block size for a file or
160 * directory.
161 *
162 * This command operates on the file system's superblock rather
163 * than on the file itself. The same operation can also be done
164 * through any other file or directory on the same file system,
165 * so it is safe to permit it.
166 */
167 case FIGETBSZ:
168 /*
169 * FICLONE, FICLONERANGE and FIDEDUPERANGE make files share
170 * their underlying storage ("reflink") between source and
171 * destination FDs, on file systems which support that.
172 *
173 * These IOCTL commands only apply to regular files
174 * and are harmless to permit for device files.
175 */
176 case FICLONE:
177 case FICLONERANGE:
178 case FIDEDUPERANGE:
179 /*
180 * FS_IOC_GETFSUUID and FS_IOC_GETFSSYSFSPATH both operate on
181 * the file system superblock, not on the specific file, so
182 * these operations are available through any other file on the
183 * same file system as well.
184 */
185 case FS_IOC_GETFSUUID:
186 case FS_IOC_GETFSSYSFSPATH:
187 return true;
188
189 /*
190 * FIONREAD, FS_IOC_GETFLAGS, FS_IOC_SETFLAGS, FS_IOC_FSGETXATTR and
191 * FS_IOC_FSSETXATTR are forwarded to device implementations.
192 */
193
194 /*
195 * file_ioctl() commands (FIBMAP, FS_IOC_RESVSP, FS_IOC_RESVSP64,
196 * FS_IOC_UNRESVSP, FS_IOC_UNRESVSP64 and FS_IOC_ZERO_RANGE) are
197 * forwarded to device implementations, so not permitted.
198 */
199
200 /* Other commands are guarded by the access right. */
201 default:
202 return false;
203 }
204 }
205
206 /*
207 * is_masked_device_ioctl_compat - same as the helper above, but checking the
208 * "compat" IOCTL commands.
209 *
210 * The IOCTL commands with special handling in compat-mode should behave the
211 * same as their non-compat counterparts.
212 */
213 static __attribute_const__ bool
is_masked_device_ioctl_compat(const unsigned int cmd)214 is_masked_device_ioctl_compat(const unsigned int cmd)
215 {
216 switch (cmd) {
217 /* FICLONE is permitted, same as in the non-compat variant. */
218 case FICLONE:
219 return true;
220
221 #if defined(CONFIG_X86_64)
222 /*
223 * FS_IOC_RESVSP_32, FS_IOC_RESVSP64_32, FS_IOC_UNRESVSP_32,
224 * FS_IOC_UNRESVSP64_32, FS_IOC_ZERO_RANGE_32: not blanket-permitted,
225 * for consistency with their non-compat variants.
226 */
227 case FS_IOC_RESVSP_32:
228 case FS_IOC_RESVSP64_32:
229 case FS_IOC_UNRESVSP_32:
230 case FS_IOC_UNRESVSP64_32:
231 case FS_IOC_ZERO_RANGE_32:
232 #endif
233
234 /*
235 * FS_IOC32_GETFLAGS, FS_IOC32_SETFLAGS are forwarded to their device
236 * implementations.
237 */
238 case FS_IOC32_GETFLAGS:
239 case FS_IOC32_SETFLAGS:
240 return false;
241 default:
242 return is_masked_device_ioctl(cmd);
243 }
244 }
245
246 /* Ruleset management */
247
get_inode_object(struct inode * const inode)248 static struct landlock_object *get_inode_object(struct inode *const inode)
249 {
250 struct landlock_object *object, *new_object;
251 struct landlock_inode_security *inode_sec = landlock_inode(inode);
252
253 rcu_read_lock();
254 retry:
255 object = rcu_dereference(inode_sec->object);
256 if (object) {
257 if (likely(refcount_inc_not_zero(&object->usage))) {
258 rcu_read_unlock();
259 return object;
260 }
261 /*
262 * We are racing with release_inode(), the object is going
263 * away. Wait for release_inode(), then retry.
264 */
265 spin_lock(&object->lock);
266 spin_unlock(&object->lock);
267 goto retry;
268 }
269 rcu_read_unlock();
270
271 /*
272 * If there is no object tied to @inode, then create a new one (without
273 * holding any locks).
274 */
275 new_object = landlock_create_object(&landlock_fs_underops, inode);
276 if (IS_ERR(new_object))
277 return new_object;
278
279 /*
280 * Protects against concurrent calls to get_inode_object() or
281 * hook_sb_delete().
282 */
283 spin_lock(&inode->i_lock);
284 if (unlikely(rcu_access_pointer(inode_sec->object))) {
285 /* Someone else just created the object, bail out and retry. */
286 spin_unlock(&inode->i_lock);
287 kfree(new_object);
288
289 rcu_read_lock();
290 goto retry;
291 }
292
293 /*
294 * @inode will be released by hook_sb_delete() on its superblock
295 * shutdown, or by release_inode() when no more ruleset references the
296 * related object.
297 */
298 ihold(inode);
299 rcu_assign_pointer(inode_sec->object, new_object);
300 spin_unlock(&inode->i_lock);
301 return new_object;
302 }
303
304 /* All access rights that can be tied to files. */
305 /* clang-format off */
306 #define ACCESS_FILE ( \
307 LANDLOCK_ACCESS_FS_EXECUTE | \
308 LANDLOCK_ACCESS_FS_WRITE_FILE | \
309 LANDLOCK_ACCESS_FS_READ_FILE | \
310 LANDLOCK_ACCESS_FS_TRUNCATE | \
311 LANDLOCK_ACCESS_FS_IOCTL_DEV)
312 /* clang-format on */
313
314 /*
315 * @path: Should have been checked by get_path_from_fd().
316 */
landlock_append_fs_rule(struct landlock_ruleset * const ruleset,const struct path * const path,access_mask_t access_rights)317 int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
318 const struct path *const path,
319 access_mask_t access_rights)
320 {
321 int err;
322 struct landlock_id id = {
323 .type = LANDLOCK_KEY_INODE,
324 };
325
326 /* Files only get access rights that make sense. */
327 if (!d_is_dir(path->dentry) &&
328 (access_rights | ACCESS_FILE) != ACCESS_FILE)
329 return -EINVAL;
330 if (WARN_ON_ONCE(ruleset->num_layers != 1))
331 return -EINVAL;
332
333 /* Transforms relative access rights to absolute ones. */
334 access_rights |= LANDLOCK_MASK_ACCESS_FS &
335 ~landlock_get_fs_access_mask(ruleset, 0);
336 id.key.object = get_inode_object(d_backing_inode(path->dentry));
337 if (IS_ERR(id.key.object))
338 return PTR_ERR(id.key.object);
339 mutex_lock(&ruleset->lock);
340 err = landlock_insert_rule(ruleset, id, access_rights);
341 mutex_unlock(&ruleset->lock);
342 /*
343 * No need to check for an error because landlock_insert_rule()
344 * increments the refcount for the new object if needed.
345 */
346 landlock_put_object(id.key.object);
347 return err;
348 }
349
350 /* Access-control management */
351
352 /*
353 * The lifetime of the returned rule is tied to @domain.
354 *
355 * Returns NULL if no rule is found or if @dentry is negative.
356 */
357 static const struct landlock_rule *
find_rule(const struct landlock_ruleset * const domain,const struct dentry * const dentry)358 find_rule(const struct landlock_ruleset *const domain,
359 const struct dentry *const dentry)
360 {
361 const struct landlock_rule *rule;
362 const struct inode *inode;
363 struct landlock_id id = {
364 .type = LANDLOCK_KEY_INODE,
365 };
366
367 /* Ignores nonexistent leafs. */
368 if (d_is_negative(dentry))
369 return NULL;
370
371 inode = d_backing_inode(dentry);
372 rcu_read_lock();
373 id.key.object = rcu_dereference(landlock_inode(inode)->object);
374 rule = landlock_find_rule(domain, id);
375 rcu_read_unlock();
376 return rule;
377 }
378
379 /*
380 * Allows access to pseudo filesystems that will never be mountable (e.g.
381 * sockfs, pipefs), but can still be reachable through
382 * /proc/<pid>/fd/<file-descriptor>
383 */
is_nouser_or_private(const struct dentry * dentry)384 static bool is_nouser_or_private(const struct dentry *dentry)
385 {
386 return (dentry->d_sb->s_flags & SB_NOUSER) ||
387 (d_is_positive(dentry) &&
388 unlikely(IS_PRIVATE(d_backing_inode(dentry))));
389 }
390
391 static access_mask_t
get_handled_fs_accesses(const struct landlock_ruleset * const domain)392 get_handled_fs_accesses(const struct landlock_ruleset *const domain)
393 {
394 /* Handles all initially denied by default access rights. */
395 return landlock_union_access_masks(domain).fs |
396 LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
397 }
398
399 static const struct access_masks any_fs = {
400 .fs = ~0,
401 };
402
get_current_fs_domain(void)403 static const struct landlock_ruleset *get_current_fs_domain(void)
404 {
405 return landlock_get_applicable_domain(landlock_get_current_domain(),
406 any_fs);
407 }
408
409 /*
410 * Check that a destination file hierarchy has more restrictions than a source
411 * file hierarchy. This is only used for link and rename actions.
412 *
413 * @layer_masks_child2: Optional child masks.
414 */
no_more_access(const layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],const bool child1_is_directory,const layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],const bool child2_is_directory)415 static bool no_more_access(
416 const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
417 const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
418 const bool child1_is_directory,
419 const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
420 const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
421 const bool child2_is_directory)
422 {
423 unsigned long access_bit;
424
425 for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
426 access_bit++) {
427 /* Ignores accesses that only make sense for directories. */
428 const bool is_file_access =
429 !!(BIT_ULL(access_bit) & ACCESS_FILE);
430
431 if (child1_is_directory || is_file_access) {
432 /*
433 * Checks if the destination restrictions are a
434 * superset of the source ones (i.e. inherited access
435 * rights without child exceptions):
436 * restrictions(parent2) >= restrictions(child1)
437 */
438 if ((((*layer_masks_parent1)[access_bit] &
439 (*layer_masks_child1)[access_bit]) |
440 (*layer_masks_parent2)[access_bit]) !=
441 (*layer_masks_parent2)[access_bit])
442 return false;
443 }
444
445 if (!layer_masks_child2)
446 continue;
447 if (child2_is_directory || is_file_access) {
448 /*
449 * Checks inverted restrictions for RENAME_EXCHANGE:
450 * restrictions(parent1) >= restrictions(child2)
451 */
452 if ((((*layer_masks_parent2)[access_bit] &
453 (*layer_masks_child2)[access_bit]) |
454 (*layer_masks_parent1)[access_bit]) !=
455 (*layer_masks_parent1)[access_bit])
456 return false;
457 }
458 }
459 return true;
460 }
461
462 #define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__))
463 #define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__))
464
465 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
466
test_no_more_access(struct kunit * const test)467 static void test_no_more_access(struct kunit *const test)
468 {
469 const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = {
470 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
471 [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0),
472 };
473 const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = {
474 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
475 [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0),
476 };
477 const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = {
478 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
479 };
480 const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = {
481 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1),
482 };
483 const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = {
484 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
485 BIT_ULL(1),
486 };
487 const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {};
488
489 /* Checks without restriction. */
490 NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false);
491 NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false);
492 NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false);
493
494 /*
495 * Checks that we can only refer a file if no more access could be
496 * inherited.
497 */
498 NMA_TRUE(&x0, &x0, false, &rx0, NULL, false);
499 NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false);
500 NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false);
501 NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false);
502
503 /* Checks allowed referring with different nested domains. */
504 NMA_TRUE(&x0, &x1, false, &x0, NULL, false);
505 NMA_TRUE(&x1, &x0, false, &x0, NULL, false);
506 NMA_TRUE(&x0, &x01, false, &x0, NULL, false);
507 NMA_TRUE(&x0, &x01, false, &rx0, NULL, false);
508 NMA_TRUE(&x01, &x0, false, &x0, NULL, false);
509 NMA_TRUE(&x01, &x0, false, &rx0, NULL, false);
510 NMA_FALSE(&x01, &x01, false, &x0, NULL, false);
511
512 /* Checks that file access rights are also enforced for a directory. */
513 NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false);
514
515 /* Checks that directory access rights don't impact file referring... */
516 NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false);
517 /* ...but only directory referring. */
518 NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false);
519
520 /* Checks directory exchange. */
521 NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true);
522 NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true);
523 NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true);
524 NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true);
525 NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true);
526
527 /* Checks file exchange with directory access rights... */
528 NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false);
529 NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false);
530 NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false);
531 NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false);
532 /* ...and with file access rights. */
533 NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false);
534 NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false);
535 NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false);
536 NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false);
537 NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false);
538
539 /*
540 * Allowing the following requests should not be a security risk
541 * because domain 0 denies execute access, and domain 1 is always
542 * nested with domain 0. However, adding an exception for this case
543 * would mean to check all nested domains to make sure none can get
544 * more privileges (e.g. processes only sandboxed by domain 0).
545 * Moreover, this behavior (i.e. composition of N domains) could then
546 * be inconsistent compared to domain 1's ruleset alone (e.g. it might
547 * be denied to link/rename with domain 1's ruleset, whereas it would
548 * be allowed if nested on top of domain 0). Another drawback would be
549 * to create a cover channel that could enable sandboxed processes to
550 * infer most of the filesystem restrictions from their domain. To
551 * make it simple, efficient, safe, and more consistent, this case is
552 * always denied.
553 */
554 NMA_FALSE(&x1, &x1, false, &x0, NULL, false);
555 NMA_FALSE(&x1, &x1, false, &rx0, NULL, false);
556 NMA_FALSE(&x1, &x1, true, &x0, NULL, false);
557 NMA_FALSE(&x1, &x1, true, &rx0, NULL, false);
558
559 /* Checks the same case of exclusive domains with a file... */
560 NMA_TRUE(&x1, &x1, false, &x01, NULL, false);
561 NMA_FALSE(&x1, &x1, false, &x01, &x0, false);
562 NMA_FALSE(&x1, &x1, false, &x01, &x01, false);
563 NMA_FALSE(&x1, &x1, false, &x0, &x0, false);
564 /* ...and with a directory. */
565 NMA_FALSE(&x1, &x1, false, &x0, &x0, true);
566 NMA_FALSE(&x1, &x1, true, &x0, &x0, false);
567 NMA_FALSE(&x1, &x1, true, &x0, &x0, true);
568 }
569
570 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
571
572 #undef NMA_TRUE
573 #undef NMA_FALSE
574
575 /*
576 * Removes @layer_masks accesses that are not requested.
577 *
578 * Returns true if the request is allowed, false otherwise.
579 */
580 static bool
scope_to_request(const access_mask_t access_request,layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS])581 scope_to_request(const access_mask_t access_request,
582 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
583 {
584 const unsigned long access_req = access_request;
585 unsigned long access_bit;
586
587 if (WARN_ON_ONCE(!layer_masks))
588 return true;
589
590 for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
591 (*layer_masks)[access_bit] = 0;
592 return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
593 }
594
595 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
596
test_scope_to_request_with_exec_none(struct kunit * const test)597 static void test_scope_to_request_with_exec_none(struct kunit *const test)
598 {
599 /* Allows everything. */
600 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
601
602 /* Checks and scopes with execute. */
603 KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
604 &layer_masks));
605 KUNIT_EXPECT_EQ(test, 0,
606 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
607 KUNIT_EXPECT_EQ(test, 0,
608 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
609 }
610
test_scope_to_request_with_exec_some(struct kunit * const test)611 static void test_scope_to_request_with_exec_some(struct kunit *const test)
612 {
613 /* Denies execute and write. */
614 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
615 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
616 [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
617 };
618
619 /* Checks and scopes with execute. */
620 KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
621 &layer_masks));
622 KUNIT_EXPECT_EQ(test, BIT_ULL(0),
623 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
624 KUNIT_EXPECT_EQ(test, 0,
625 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
626 }
627
test_scope_to_request_without_access(struct kunit * const test)628 static void test_scope_to_request_without_access(struct kunit *const test)
629 {
630 /* Denies execute and write. */
631 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
632 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
633 [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
634 };
635
636 /* Checks and scopes without access request. */
637 KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks));
638 KUNIT_EXPECT_EQ(test, 0,
639 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
640 KUNIT_EXPECT_EQ(test, 0,
641 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
642 }
643
644 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
645
646 /*
647 * Returns true if there is at least one access right different than
648 * LANDLOCK_ACCESS_FS_REFER.
649 */
650 static bool
is_eacces(const layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS],const access_mask_t access_request)651 is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
652 const access_mask_t access_request)
653 {
654 unsigned long access_bit;
655 /* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
656 const unsigned long access_check = access_request &
657 ~LANDLOCK_ACCESS_FS_REFER;
658
659 if (!layer_masks)
660 return false;
661
662 for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
663 if ((*layer_masks)[access_bit])
664 return true;
665 }
666 return false;
667 }
668
669 #define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__))
670 #define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__))
671
672 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
673
test_is_eacces_with_none(struct kunit * const test)674 static void test_is_eacces_with_none(struct kunit *const test)
675 {
676 const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
677
678 IE_FALSE(&layer_masks, 0);
679 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
680 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
681 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
682 }
683
test_is_eacces_with_refer(struct kunit * const test)684 static void test_is_eacces_with_refer(struct kunit *const test)
685 {
686 const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
687 [BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0),
688 };
689
690 IE_FALSE(&layer_masks, 0);
691 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
692 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
693 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
694 }
695
test_is_eacces_with_write(struct kunit * const test)696 static void test_is_eacces_with_write(struct kunit *const test)
697 {
698 const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
699 [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0),
700 };
701
702 IE_FALSE(&layer_masks, 0);
703 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
704 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
705
706 IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
707 }
708
709 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
710
711 #undef IE_TRUE
712 #undef IE_FALSE
713
714 /**
715 * is_access_to_paths_allowed - Check accesses for requests with a common path
716 *
717 * @domain: Domain to check against.
718 * @path: File hierarchy to walk through.
719 * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
720 * equal to @layer_masks_parent2 (if any). This is tied to the unique
721 * requested path for most actions, or the source in case of a refer action
722 * (i.e. rename or link), or the source and destination in case of
723 * RENAME_EXCHANGE.
724 * @layer_masks_parent1: Pointer to a matrix of layer masks per access
725 * masks, identifying the layers that forbid a specific access. Bits from
726 * this matrix can be unset according to the @path walk. An empty matrix
727 * means that @domain allows all possible Landlock accesses (i.e. not only
728 * those identified by @access_request_parent1). This matrix can
729 * initially refer to domain layer masks and, when the accesses for the
730 * destination and source are the same, to requested layer masks.
731 * @dentry_child1: Dentry to the initial child of the parent1 path. This
732 * pointer must be NULL for non-refer actions (i.e. not link nor rename).
733 * @access_request_parent2: Similar to @access_request_parent1 but for a
734 * request involving a source and a destination. This refers to the
735 * destination, except in case of RENAME_EXCHANGE where it also refers to
736 * the source. Must be set to 0 when using a simple path request.
737 * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
738 * action. This must be NULL otherwise.
739 * @dentry_child2: Dentry to the initial child of the parent2 path. This
740 * pointer is only set for RENAME_EXCHANGE actions and must be NULL
741 * otherwise.
742 *
743 * This helper first checks that the destination has a superset of restrictions
744 * compared to the source (if any) for a common path. Because of
745 * RENAME_EXCHANGE actions, source and destinations may be swapped. It then
746 * checks that the collected accesses and the remaining ones are enough to
747 * allow the request.
748 *
749 * Returns:
750 * - true if the access request is granted;
751 * - false otherwise.
752 */
is_access_to_paths_allowed(const struct landlock_ruleset * const domain,const struct path * const path,const access_mask_t access_request_parent1,layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child1,const access_mask_t access_request_parent2,layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child2)753 static bool is_access_to_paths_allowed(
754 const struct landlock_ruleset *const domain,
755 const struct path *const path,
756 const access_mask_t access_request_parent1,
757 layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
758 const struct dentry *const dentry_child1,
759 const access_mask_t access_request_parent2,
760 layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
761 const struct dentry *const dentry_child2)
762 {
763 bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
764 child1_is_directory = true, child2_is_directory = true;
765 struct path walker_path;
766 access_mask_t access_masked_parent1, access_masked_parent2;
767 layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
768 _layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
769 layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
770 (*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
771
772 if (!access_request_parent1 && !access_request_parent2)
773 return true;
774 if (WARN_ON_ONCE(!domain || !path))
775 return true;
776 if (is_nouser_or_private(path->dentry))
777 return true;
778 if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
779 return false;
780
781 if (unlikely(layer_masks_parent2)) {
782 if (WARN_ON_ONCE(!dentry_child1))
783 return false;
784 /*
785 * For a double request, first check for potential privilege
786 * escalation by looking at domain handled accesses (which are
787 * a superset of the meaningful requested accesses).
788 */
789 access_masked_parent1 = access_masked_parent2 =
790 get_handled_fs_accesses(domain);
791 is_dom_check = true;
792 } else {
793 if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
794 return false;
795 /* For a simple request, only check for requested accesses. */
796 access_masked_parent1 = access_request_parent1;
797 access_masked_parent2 = access_request_parent2;
798 is_dom_check = false;
799 }
800
801 if (unlikely(dentry_child1)) {
802 landlock_unmask_layers(
803 find_rule(domain, dentry_child1),
804 landlock_init_layer_masks(
805 domain, LANDLOCK_MASK_ACCESS_FS,
806 &_layer_masks_child1, LANDLOCK_KEY_INODE),
807 &_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1));
808 layer_masks_child1 = &_layer_masks_child1;
809 child1_is_directory = d_is_dir(dentry_child1);
810 }
811 if (unlikely(dentry_child2)) {
812 landlock_unmask_layers(
813 find_rule(domain, dentry_child2),
814 landlock_init_layer_masks(
815 domain, LANDLOCK_MASK_ACCESS_FS,
816 &_layer_masks_child2, LANDLOCK_KEY_INODE),
817 &_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2));
818 layer_masks_child2 = &_layer_masks_child2;
819 child2_is_directory = d_is_dir(dentry_child2);
820 }
821
822 walker_path = *path;
823 path_get(&walker_path);
824 /*
825 * We need to walk through all the hierarchy to not miss any relevant
826 * restriction.
827 */
828 while (true) {
829 struct dentry *parent_dentry;
830 const struct landlock_rule *rule;
831
832 /*
833 * If at least all accesses allowed on the destination are
834 * already allowed on the source, respectively if there is at
835 * least as much as restrictions on the destination than on the
836 * source, then we can safely refer files from the source to
837 * the destination without risking a privilege escalation.
838 * This also applies in the case of RENAME_EXCHANGE, which
839 * implies checks on both direction. This is crucial for
840 * standalone multilayered security policies. Furthermore,
841 * this helps avoid policy writers to shoot themselves in the
842 * foot.
843 */
844 if (unlikely(is_dom_check &&
845 no_more_access(
846 layer_masks_parent1, layer_masks_child1,
847 child1_is_directory, layer_masks_parent2,
848 layer_masks_child2,
849 child2_is_directory))) {
850 allowed_parent1 = scope_to_request(
851 access_request_parent1, layer_masks_parent1);
852 allowed_parent2 = scope_to_request(
853 access_request_parent2, layer_masks_parent2);
854
855 /* Stops when all accesses are granted. */
856 if (allowed_parent1 && allowed_parent2)
857 break;
858
859 /*
860 * Now, downgrades the remaining checks from domain
861 * handled accesses to requested accesses.
862 */
863 is_dom_check = false;
864 access_masked_parent1 = access_request_parent1;
865 access_masked_parent2 = access_request_parent2;
866 }
867
868 rule = find_rule(domain, walker_path.dentry);
869 allowed_parent1 = landlock_unmask_layers(
870 rule, access_masked_parent1, layer_masks_parent1,
871 ARRAY_SIZE(*layer_masks_parent1));
872 allowed_parent2 = landlock_unmask_layers(
873 rule, access_masked_parent2, layer_masks_parent2,
874 ARRAY_SIZE(*layer_masks_parent2));
875
876 /* Stops when a rule from each layer grants access. */
877 if (allowed_parent1 && allowed_parent2)
878 break;
879 jump_up:
880 if (walker_path.dentry == walker_path.mnt->mnt_root) {
881 if (follow_up(&walker_path)) {
882 /* Ignores hidden mount points. */
883 goto jump_up;
884 } else {
885 /*
886 * Stops at the real root. Denies access
887 * because not all layers have granted access.
888 */
889 break;
890 }
891 }
892 if (unlikely(IS_ROOT(walker_path.dentry))) {
893 /*
894 * Stops at disconnected root directories. Only allows
895 * access to internal filesystems (e.g. nsfs, which is
896 * reachable through /proc/<pid>/ns/<namespace>).
897 */
898 allowed_parent1 = allowed_parent2 =
899 !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
900 break;
901 }
902 parent_dentry = dget_parent(walker_path.dentry);
903 dput(walker_path.dentry);
904 walker_path.dentry = parent_dentry;
905 }
906 path_put(&walker_path);
907
908 return allowed_parent1 && allowed_parent2;
909 }
910
check_access_path(const struct landlock_ruleset * const domain,const struct path * const path,access_mask_t access_request)911 static int check_access_path(const struct landlock_ruleset *const domain,
912 const struct path *const path,
913 access_mask_t access_request)
914 {
915 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
916
917 access_request = landlock_init_layer_masks(
918 domain, access_request, &layer_masks, LANDLOCK_KEY_INODE);
919 if (is_access_to_paths_allowed(domain, path, access_request,
920 &layer_masks, NULL, 0, NULL, NULL))
921 return 0;
922 return -EACCES;
923 }
924
current_check_access_path(const struct path * const path,const access_mask_t access_request)925 static int current_check_access_path(const struct path *const path,
926 const access_mask_t access_request)
927 {
928 const struct landlock_ruleset *const dom = get_current_fs_domain();
929
930 if (!dom)
931 return 0;
932 return check_access_path(dom, path, access_request);
933 }
934
get_mode_access(const umode_t mode)935 static access_mask_t get_mode_access(const umode_t mode)
936 {
937 switch (mode & S_IFMT) {
938 case S_IFLNK:
939 return LANDLOCK_ACCESS_FS_MAKE_SYM;
940 case 0:
941 /* A zero mode translates to S_IFREG. */
942 case S_IFREG:
943 return LANDLOCK_ACCESS_FS_MAKE_REG;
944 case S_IFDIR:
945 return LANDLOCK_ACCESS_FS_MAKE_DIR;
946 case S_IFCHR:
947 return LANDLOCK_ACCESS_FS_MAKE_CHAR;
948 case S_IFBLK:
949 return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
950 case S_IFIFO:
951 return LANDLOCK_ACCESS_FS_MAKE_FIFO;
952 case S_IFSOCK:
953 return LANDLOCK_ACCESS_FS_MAKE_SOCK;
954 default:
955 WARN_ON_ONCE(1);
956 return 0;
957 }
958 }
959
maybe_remove(const struct dentry * const dentry)960 static access_mask_t maybe_remove(const struct dentry *const dentry)
961 {
962 if (d_is_negative(dentry))
963 return 0;
964 return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
965 LANDLOCK_ACCESS_FS_REMOVE_FILE;
966 }
967
968 /**
969 * collect_domain_accesses - Walk through a file path and collect accesses
970 *
971 * @domain: Domain to check against.
972 * @mnt_root: Last directory to check.
973 * @dir: Directory to start the walk from.
974 * @layer_masks_dom: Where to store the collected accesses.
975 *
976 * This helper is useful to begin a path walk from the @dir directory to a
977 * @mnt_root directory used as a mount point. This mount point is the common
978 * ancestor between the source and the destination of a renamed and linked
979 * file. While walking from @dir to @mnt_root, we record all the domain's
980 * allowed accesses in @layer_masks_dom.
981 *
982 * This is similar to is_access_to_paths_allowed() but much simpler because it
983 * only handles walking on the same mount point and only checks one set of
984 * accesses.
985 *
986 * Returns:
987 * - true if all the domain access rights are allowed for @dir;
988 * - false if the walk reached @mnt_root.
989 */
collect_domain_accesses(const struct landlock_ruleset * const domain,const struct dentry * const mnt_root,struct dentry * dir,layer_mask_t (* const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])990 static bool collect_domain_accesses(
991 const struct landlock_ruleset *const domain,
992 const struct dentry *const mnt_root, struct dentry *dir,
993 layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
994 {
995 unsigned long access_dom;
996 bool ret = false;
997
998 if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
999 return true;
1000 if (is_nouser_or_private(dir))
1001 return true;
1002
1003 access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
1004 layer_masks_dom,
1005 LANDLOCK_KEY_INODE);
1006
1007 dget(dir);
1008 while (true) {
1009 struct dentry *parent_dentry;
1010
1011 /* Gets all layers allowing all domain accesses. */
1012 if (landlock_unmask_layers(find_rule(domain, dir), access_dom,
1013 layer_masks_dom,
1014 ARRAY_SIZE(*layer_masks_dom))) {
1015 /*
1016 * Stops when all handled accesses are allowed by at
1017 * least one rule in each layer.
1018 */
1019 ret = true;
1020 break;
1021 }
1022
1023 /* We should not reach a root other than @mnt_root. */
1024 if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
1025 break;
1026
1027 parent_dentry = dget_parent(dir);
1028 dput(dir);
1029 dir = parent_dentry;
1030 }
1031 dput(dir);
1032 return ret;
1033 }
1034
1035 /**
1036 * current_check_refer_path - Check if a rename or link action is allowed
1037 *
1038 * @old_dentry: File or directory requested to be moved or linked.
1039 * @new_dir: Destination parent directory.
1040 * @new_dentry: Destination file or directory.
1041 * @removable: Sets to true if it is a rename operation.
1042 * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
1043 *
1044 * Because of its unprivileged constraints, Landlock relies on file hierarchies
1045 * (and not only inodes) to tie access rights to files. Being able to link or
1046 * rename a file hierarchy brings some challenges. Indeed, moving or linking a
1047 * file (i.e. creating a new reference to an inode) can have an impact on the
1048 * actions allowed for a set of files if it would change its parent directory
1049 * (i.e. reparenting).
1050 *
1051 * To avoid trivial access right bypasses, Landlock first checks if the file or
1052 * directory requested to be moved would gain new access rights inherited from
1053 * its new hierarchy. Before returning any error, Landlock then checks that
1054 * the parent source hierarchy and the destination hierarchy would allow the
1055 * link or rename action. If it is not the case, an error with EACCES is
1056 * returned to inform user space that there is no way to remove or create the
1057 * requested source file type. If it should be allowed but the new inherited
1058 * access rights would be greater than the source access rights, then the
1059 * kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables
1060 * user space to abort the whole operation if there is no way to do it, or to
1061 * manually copy the source to the destination if this remains allowed, e.g.
1062 * because file creation is allowed on the destination directory but not direct
1063 * linking.
1064 *
1065 * To achieve this goal, the kernel needs to compare two file hierarchies: the
1066 * one identifying the source file or directory (including itself), and the
1067 * destination one. This can be seen as a multilayer partial ordering problem.
1068 * The kernel walks through these paths and collects in a matrix the access
1069 * rights that are denied per layer. These matrices are then compared to see
1070 * if the destination one has more (or the same) restrictions as the source
1071 * one. If this is the case, the requested action will not return EXDEV, which
1072 * doesn't mean the action is allowed. The parent hierarchy of the source
1073 * (i.e. parent directory), and the destination hierarchy must also be checked
1074 * to verify that they explicitly allow such action (i.e. referencing,
1075 * creation and potentially removal rights). The kernel implementation is then
1076 * required to rely on potentially four matrices of access rights: one for the
1077 * source file or directory (i.e. the child), a potentially other one for the
1078 * other source/destination (in case of RENAME_EXCHANGE), one for the source
1079 * parent hierarchy and a last one for the destination hierarchy. These
1080 * ephemeral matrices take some space on the stack, which limits the number of
1081 * layers to a deemed reasonable number: 16.
1082 *
1083 * Returns:
1084 * - 0 if access is allowed;
1085 * - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
1086 * - -EACCES if file removal or creation is denied.
1087 */
current_check_refer_path(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const bool removable,const bool exchange)1088 static int current_check_refer_path(struct dentry *const old_dentry,
1089 const struct path *const new_dir,
1090 struct dentry *const new_dentry,
1091 const bool removable, const bool exchange)
1092 {
1093 const struct landlock_ruleset *const dom = get_current_fs_domain();
1094 bool allow_parent1, allow_parent2;
1095 access_mask_t access_request_parent1, access_request_parent2;
1096 struct path mnt_dir;
1097 struct dentry *old_parent;
1098 layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
1099 layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
1100
1101 if (!dom)
1102 return 0;
1103 if (WARN_ON_ONCE(dom->num_layers < 1))
1104 return -EACCES;
1105 if (unlikely(d_is_negative(old_dentry)))
1106 return -ENOENT;
1107 if (exchange) {
1108 if (unlikely(d_is_negative(new_dentry)))
1109 return -ENOENT;
1110 access_request_parent1 =
1111 get_mode_access(d_backing_inode(new_dentry)->i_mode);
1112 } else {
1113 access_request_parent1 = 0;
1114 }
1115 access_request_parent2 =
1116 get_mode_access(d_backing_inode(old_dentry)->i_mode);
1117 if (removable) {
1118 access_request_parent1 |= maybe_remove(old_dentry);
1119 access_request_parent2 |= maybe_remove(new_dentry);
1120 }
1121
1122 /* The mount points are the same for old and new paths, cf. EXDEV. */
1123 if (old_dentry->d_parent == new_dir->dentry) {
1124 /*
1125 * The LANDLOCK_ACCESS_FS_REFER access right is not required
1126 * for same-directory referer (i.e. no reparenting).
1127 */
1128 access_request_parent1 = landlock_init_layer_masks(
1129 dom, access_request_parent1 | access_request_parent2,
1130 &layer_masks_parent1, LANDLOCK_KEY_INODE);
1131 if (is_access_to_paths_allowed(
1132 dom, new_dir, access_request_parent1,
1133 &layer_masks_parent1, NULL, 0, NULL, NULL))
1134 return 0;
1135 return -EACCES;
1136 }
1137
1138 access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
1139 access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
1140
1141 /* Saves the common mount point. */
1142 mnt_dir.mnt = new_dir->mnt;
1143 mnt_dir.dentry = new_dir->mnt->mnt_root;
1144
1145 /*
1146 * old_dentry may be the root of the common mount point and
1147 * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
1148 * OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
1149 * we keep a reference to old_dentry.
1150 */
1151 old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
1152 old_dentry->d_parent;
1153
1154 /* new_dir->dentry is equal to new_dentry->d_parent */
1155 allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
1156 &layer_masks_parent1);
1157 allow_parent2 = collect_domain_accesses(
1158 dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
1159
1160 if (allow_parent1 && allow_parent2)
1161 return 0;
1162
1163 /*
1164 * To be able to compare source and destination domain access rights,
1165 * take into account the @old_dentry access rights aggregated with its
1166 * parent access rights. This will be useful to compare with the
1167 * destination parent access rights.
1168 */
1169 if (is_access_to_paths_allowed(
1170 dom, &mnt_dir, access_request_parent1, &layer_masks_parent1,
1171 old_dentry, access_request_parent2, &layer_masks_parent2,
1172 exchange ? new_dentry : NULL))
1173 return 0;
1174
1175 /*
1176 * This prioritizes EACCES over EXDEV for all actions, including
1177 * renames with RENAME_EXCHANGE.
1178 */
1179 if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) ||
1180 is_eacces(&layer_masks_parent2, access_request_parent2)))
1181 return -EACCES;
1182
1183 /*
1184 * Gracefully forbids reparenting if the destination directory
1185 * hierarchy is not a superset of restrictions of the source directory
1186 * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
1187 * source or the destination.
1188 */
1189 return -EXDEV;
1190 }
1191
1192 /* Inode hooks */
1193
hook_inode_free_security_rcu(void * inode_security)1194 static void hook_inode_free_security_rcu(void *inode_security)
1195 {
1196 struct landlock_inode_security *inode_sec;
1197
1198 /*
1199 * All inodes must already have been untied from their object by
1200 * release_inode() or hook_sb_delete().
1201 */
1202 inode_sec = inode_security + landlock_blob_sizes.lbs_inode;
1203 WARN_ON_ONCE(inode_sec->object);
1204 }
1205
1206 /* Super-block hooks */
1207
1208 /*
1209 * Release the inodes used in a security policy.
1210 *
1211 * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
1212 */
hook_sb_delete(struct super_block * const sb)1213 static void hook_sb_delete(struct super_block *const sb)
1214 {
1215 struct inode *inode, *prev_inode = NULL;
1216
1217 if (!landlock_initialized)
1218 return;
1219
1220 spin_lock(&sb->s_inode_list_lock);
1221 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1222 struct landlock_object *object;
1223
1224 /* Only handles referenced inodes. */
1225 if (!atomic_read(&inode->i_count))
1226 continue;
1227
1228 /*
1229 * Protects against concurrent modification of inode (e.g.
1230 * from get_inode_object()).
1231 */
1232 spin_lock(&inode->i_lock);
1233 /*
1234 * Checks I_FREEING and I_WILL_FREE to protect against a race
1235 * condition when release_inode() just called iput(), which
1236 * could lead to a NULL dereference of inode->security or a
1237 * second call to iput() for the same Landlock object. Also
1238 * checks I_NEW because such inode cannot be tied to an object.
1239 */
1240 if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
1241 spin_unlock(&inode->i_lock);
1242 continue;
1243 }
1244
1245 rcu_read_lock();
1246 object = rcu_dereference(landlock_inode(inode)->object);
1247 if (!object) {
1248 rcu_read_unlock();
1249 spin_unlock(&inode->i_lock);
1250 continue;
1251 }
1252 /* Keeps a reference to this inode until the next loop walk. */
1253 __iget(inode);
1254 spin_unlock(&inode->i_lock);
1255
1256 /*
1257 * If there is no concurrent release_inode() ongoing, then we
1258 * are in charge of calling iput() on this inode, otherwise we
1259 * will just wait for it to finish.
1260 */
1261 spin_lock(&object->lock);
1262 if (object->underobj == inode) {
1263 object->underobj = NULL;
1264 spin_unlock(&object->lock);
1265 rcu_read_unlock();
1266
1267 /*
1268 * Because object->underobj was not NULL,
1269 * release_inode() and get_inode_object() guarantee
1270 * that it is safe to reset
1271 * landlock_inode(inode)->object while it is not NULL.
1272 * It is therefore not necessary to lock inode->i_lock.
1273 */
1274 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
1275 /*
1276 * At this point, we own the ihold() reference that was
1277 * originally set up by get_inode_object() and the
1278 * __iget() reference that we just set in this loop
1279 * walk. Therefore the following call to iput() will
1280 * not sleep nor drop the inode because there is now at
1281 * least two references to it.
1282 */
1283 iput(inode);
1284 } else {
1285 spin_unlock(&object->lock);
1286 rcu_read_unlock();
1287 }
1288
1289 if (prev_inode) {
1290 /*
1291 * At this point, we still own the __iget() reference
1292 * that we just set in this loop walk. Therefore we
1293 * can drop the list lock and know that the inode won't
1294 * disappear from under us until the next loop walk.
1295 */
1296 spin_unlock(&sb->s_inode_list_lock);
1297 /*
1298 * We can now actually put the inode reference from the
1299 * previous loop walk, which is not needed anymore.
1300 */
1301 iput(prev_inode);
1302 cond_resched();
1303 spin_lock(&sb->s_inode_list_lock);
1304 }
1305 prev_inode = inode;
1306 }
1307 spin_unlock(&sb->s_inode_list_lock);
1308
1309 /* Puts the inode reference from the last loop walk, if any. */
1310 if (prev_inode)
1311 iput(prev_inode);
1312 /* Waits for pending iput() in release_inode(). */
1313 wait_var_event(&landlock_superblock(sb)->inode_refs,
1314 !atomic_long_read(&landlock_superblock(sb)->inode_refs));
1315 }
1316
1317 /*
1318 * Because a Landlock security policy is defined according to the filesystem
1319 * topology (i.e. the mount namespace), changing it may grant access to files
1320 * not previously allowed.
1321 *
1322 * To make it simple, deny any filesystem topology modification by landlocked
1323 * processes. Non-landlocked processes may still change the namespace of a
1324 * landlocked process, but this kind of threat must be handled by a system-wide
1325 * access-control security policy.
1326 *
1327 * This could be lifted in the future if Landlock can safely handle mount
1328 * namespace updates requested by a landlocked process. Indeed, we could
1329 * update the current domain (which is currently read-only) by taking into
1330 * account the accesses of the source and the destination of a new mount point.
1331 * However, it would also require to make all the child domains dynamically
1332 * inherit these new constraints. Anyway, for backward compatibility reasons,
1333 * a dedicated user space option would be required (e.g. as a ruleset flag).
1334 */
hook_sb_mount(const char * const dev_name,const struct path * const path,const char * const type,const unsigned long flags,void * const data)1335 static int hook_sb_mount(const char *const dev_name,
1336 const struct path *const path, const char *const type,
1337 const unsigned long flags, void *const data)
1338 {
1339 if (!get_current_fs_domain())
1340 return 0;
1341 return -EPERM;
1342 }
1343
hook_move_mount(const struct path * const from_path,const struct path * const to_path)1344 static int hook_move_mount(const struct path *const from_path,
1345 const struct path *const to_path)
1346 {
1347 if (!get_current_fs_domain())
1348 return 0;
1349 return -EPERM;
1350 }
1351
1352 /*
1353 * Removing a mount point may reveal a previously hidden file hierarchy, which
1354 * may then grant access to files, which may have previously been forbidden.
1355 */
hook_sb_umount(struct vfsmount * const mnt,const int flags)1356 static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
1357 {
1358 if (!get_current_fs_domain())
1359 return 0;
1360 return -EPERM;
1361 }
1362
hook_sb_remount(struct super_block * const sb,void * const mnt_opts)1363 static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
1364 {
1365 if (!get_current_fs_domain())
1366 return 0;
1367 return -EPERM;
1368 }
1369
1370 /*
1371 * pivot_root(2), like mount(2), changes the current mount namespace. It must
1372 * then be forbidden for a landlocked process.
1373 *
1374 * However, chroot(2) may be allowed because it only changes the relative root
1375 * directory of the current process. Moreover, it can be used to restrict the
1376 * view of the filesystem.
1377 */
hook_sb_pivotroot(const struct path * const old_path,const struct path * const new_path)1378 static int hook_sb_pivotroot(const struct path *const old_path,
1379 const struct path *const new_path)
1380 {
1381 if (!get_current_fs_domain())
1382 return 0;
1383 return -EPERM;
1384 }
1385
1386 /* Path hooks */
1387
hook_path_link(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry)1388 static int hook_path_link(struct dentry *const old_dentry,
1389 const struct path *const new_dir,
1390 struct dentry *const new_dentry)
1391 {
1392 return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
1393 false);
1394 }
1395
hook_path_rename(const struct path * const old_dir,struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const unsigned int flags)1396 static int hook_path_rename(const struct path *const old_dir,
1397 struct dentry *const old_dentry,
1398 const struct path *const new_dir,
1399 struct dentry *const new_dentry,
1400 const unsigned int flags)
1401 {
1402 /* old_dir refers to old_dentry->d_parent and new_dir->mnt */
1403 return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
1404 !!(flags & RENAME_EXCHANGE));
1405 }
1406
hook_path_mkdir(const struct path * const dir,struct dentry * const dentry,const umode_t mode)1407 static int hook_path_mkdir(const struct path *const dir,
1408 struct dentry *const dentry, const umode_t mode)
1409 {
1410 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
1411 }
1412
hook_path_mknod(const struct path * const dir,struct dentry * const dentry,const umode_t mode,const unsigned int dev)1413 static int hook_path_mknod(const struct path *const dir,
1414 struct dentry *const dentry, const umode_t mode,
1415 const unsigned int dev)
1416 {
1417 const struct landlock_ruleset *const dom = get_current_fs_domain();
1418
1419 if (!dom)
1420 return 0;
1421 return check_access_path(dom, dir, get_mode_access(mode));
1422 }
1423
hook_path_symlink(const struct path * const dir,struct dentry * const dentry,const char * const old_name)1424 static int hook_path_symlink(const struct path *const dir,
1425 struct dentry *const dentry,
1426 const char *const old_name)
1427 {
1428 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
1429 }
1430
hook_path_unlink(const struct path * const dir,struct dentry * const dentry)1431 static int hook_path_unlink(const struct path *const dir,
1432 struct dentry *const dentry)
1433 {
1434 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
1435 }
1436
hook_path_rmdir(const struct path * const dir,struct dentry * const dentry)1437 static int hook_path_rmdir(const struct path *const dir,
1438 struct dentry *const dentry)
1439 {
1440 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
1441 }
1442
hook_path_truncate(const struct path * const path)1443 static int hook_path_truncate(const struct path *const path)
1444 {
1445 return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE);
1446 }
1447
1448 /* File hooks */
1449
1450 /**
1451 * get_required_file_open_access - Get access needed to open a file
1452 *
1453 * @file: File being opened.
1454 *
1455 * Returns the access rights that are required for opening the given file,
1456 * depending on the file type and open mode.
1457 */
1458 static access_mask_t
get_required_file_open_access(const struct file * const file)1459 get_required_file_open_access(const struct file *const file)
1460 {
1461 access_mask_t access = 0;
1462
1463 if (file->f_mode & FMODE_READ) {
1464 /* A directory can only be opened in read mode. */
1465 if (S_ISDIR(file_inode(file)->i_mode))
1466 return LANDLOCK_ACCESS_FS_READ_DIR;
1467 access = LANDLOCK_ACCESS_FS_READ_FILE;
1468 }
1469 if (file->f_mode & FMODE_WRITE)
1470 access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
1471 /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
1472 if (file->f_flags & __FMODE_EXEC)
1473 access |= LANDLOCK_ACCESS_FS_EXECUTE;
1474 return access;
1475 }
1476
hook_file_alloc_security(struct file * const file)1477 static int hook_file_alloc_security(struct file *const file)
1478 {
1479 /*
1480 * Grants all access rights, even if most of them are not checked later
1481 * on. It is more consistent.
1482 *
1483 * Notably, file descriptors for regular files can also be acquired
1484 * without going through the file_open hook, for example when using
1485 * memfd_create(2).
1486 */
1487 landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS;
1488 return 0;
1489 }
1490
is_device(const struct file * const file)1491 static bool is_device(const struct file *const file)
1492 {
1493 const struct inode *inode = file_inode(file);
1494
1495 return S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode);
1496 }
1497
hook_file_open(struct file * const file)1498 static int hook_file_open(struct file *const file)
1499 {
1500 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
1501 access_mask_t open_access_request, full_access_request, allowed_access,
1502 optional_access;
1503 const struct landlock_ruleset *const dom =
1504 landlock_get_applicable_domain(
1505 landlock_cred(file->f_cred)->domain, any_fs);
1506
1507 if (!dom)
1508 return 0;
1509
1510 /*
1511 * Because a file may be opened with O_PATH, get_required_file_open_access()
1512 * may return 0. This case will be handled with a future Landlock
1513 * evolution.
1514 */
1515 open_access_request = get_required_file_open_access(file);
1516
1517 /*
1518 * We look up more access than what we immediately need for open(), so
1519 * that we can later authorize operations on opened files.
1520 */
1521 optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
1522 if (is_device(file))
1523 optional_access |= LANDLOCK_ACCESS_FS_IOCTL_DEV;
1524
1525 full_access_request = open_access_request | optional_access;
1526
1527 if (is_access_to_paths_allowed(
1528 dom, &file->f_path,
1529 landlock_init_layer_masks(dom, full_access_request,
1530 &layer_masks, LANDLOCK_KEY_INODE),
1531 &layer_masks, NULL, 0, NULL, NULL)) {
1532 allowed_access = full_access_request;
1533 } else {
1534 unsigned long access_bit;
1535 const unsigned long access_req = full_access_request;
1536
1537 /*
1538 * Calculate the actual allowed access rights from layer_masks.
1539 * Add each access right to allowed_access which has not been
1540 * vetoed by any layer.
1541 */
1542 allowed_access = 0;
1543 for_each_set_bit(access_bit, &access_req,
1544 ARRAY_SIZE(layer_masks)) {
1545 if (!layer_masks[access_bit])
1546 allowed_access |= BIT_ULL(access_bit);
1547 }
1548 }
1549
1550 /*
1551 * For operations on already opened files (i.e. ftruncate()), it is the
1552 * access rights at the time of open() which decide whether the
1553 * operation is permitted. Therefore, we record the relevant subset of
1554 * file access rights in the opened struct file.
1555 */
1556 landlock_file(file)->allowed_access = allowed_access;
1557
1558 if ((open_access_request & allowed_access) == open_access_request)
1559 return 0;
1560
1561 return -EACCES;
1562 }
1563
hook_file_truncate(struct file * const file)1564 static int hook_file_truncate(struct file *const file)
1565 {
1566 /*
1567 * Allows truncation if the truncate right was available at the time of
1568 * opening the file, to get a consistent access check as for read, write
1569 * and execute operations.
1570 *
1571 * Note: For checks done based on the file's Landlock allowed access, we
1572 * enforce them independently of whether the current thread is in a
1573 * Landlock domain, so that open files passed between independent
1574 * processes retain their behaviour.
1575 */
1576 if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
1577 return 0;
1578 return -EACCES;
1579 }
1580
hook_file_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1581 static int hook_file_ioctl(struct file *file, unsigned int cmd,
1582 unsigned long arg)
1583 {
1584 access_mask_t allowed_access = landlock_file(file)->allowed_access;
1585
1586 /*
1587 * It is the access rights at the time of opening the file which
1588 * determine whether IOCTL can be used on the opened file later.
1589 *
1590 * The access right is attached to the opened file in hook_file_open().
1591 */
1592 if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1593 return 0;
1594
1595 if (!is_device(file))
1596 return 0;
1597
1598 if (is_masked_device_ioctl(cmd))
1599 return 0;
1600
1601 return -EACCES;
1602 }
1603
hook_file_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1604 static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
1605 unsigned long arg)
1606 {
1607 access_mask_t allowed_access = landlock_file(file)->allowed_access;
1608
1609 /*
1610 * It is the access rights at the time of opening the file which
1611 * determine whether IOCTL can be used on the opened file later.
1612 *
1613 * The access right is attached to the opened file in hook_file_open().
1614 */
1615 if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1616 return 0;
1617
1618 if (!is_device(file))
1619 return 0;
1620
1621 if (is_masked_device_ioctl_compat(cmd))
1622 return 0;
1623
1624 return -EACCES;
1625 }
1626
hook_file_set_fowner(struct file * file)1627 static void hook_file_set_fowner(struct file *file)
1628 {
1629 struct landlock_ruleset *new_dom, *prev_dom;
1630
1631 /*
1632 * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
1633 * file_set_fowner LSM hook inconsistencies").
1634 */
1635 lockdep_assert_held(&file_f_owner(file)->lock);
1636 new_dom = landlock_get_current_domain();
1637 landlock_get_ruleset(new_dom);
1638 prev_dom = landlock_file(file)->fown_domain;
1639 landlock_file(file)->fown_domain = new_dom;
1640
1641 /* Called in an RCU read-side critical section. */
1642 landlock_put_ruleset_deferred(prev_dom);
1643 }
1644
hook_file_free_security(struct file * file)1645 static void hook_file_free_security(struct file *file)
1646 {
1647 landlock_put_ruleset_deferred(landlock_file(file)->fown_domain);
1648 }
1649
1650 static struct security_hook_list landlock_hooks[] __ro_after_init = {
1651 LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu),
1652
1653 LSM_HOOK_INIT(sb_delete, hook_sb_delete),
1654 LSM_HOOK_INIT(sb_mount, hook_sb_mount),
1655 LSM_HOOK_INIT(move_mount, hook_move_mount),
1656 LSM_HOOK_INIT(sb_umount, hook_sb_umount),
1657 LSM_HOOK_INIT(sb_remount, hook_sb_remount),
1658 LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
1659
1660 LSM_HOOK_INIT(path_link, hook_path_link),
1661 LSM_HOOK_INIT(path_rename, hook_path_rename),
1662 LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
1663 LSM_HOOK_INIT(path_mknod, hook_path_mknod),
1664 LSM_HOOK_INIT(path_symlink, hook_path_symlink),
1665 LSM_HOOK_INIT(path_unlink, hook_path_unlink),
1666 LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
1667 LSM_HOOK_INIT(path_truncate, hook_path_truncate),
1668
1669 LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security),
1670 LSM_HOOK_INIT(file_open, hook_file_open),
1671 LSM_HOOK_INIT(file_truncate, hook_file_truncate),
1672 LSM_HOOK_INIT(file_ioctl, hook_file_ioctl),
1673 LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat),
1674 LSM_HOOK_INIT(file_set_fowner, hook_file_set_fowner),
1675 LSM_HOOK_INIT(file_free_security, hook_file_free_security),
1676 };
1677
landlock_add_fs_hooks(void)1678 __init void landlock_add_fs_hooks(void)
1679 {
1680 security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
1681 &landlock_lsmid);
1682 }
1683
1684 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
1685
1686 /* clang-format off */
1687 static struct kunit_case test_cases[] = {
1688 KUNIT_CASE(test_no_more_access),
1689 KUNIT_CASE(test_scope_to_request_with_exec_none),
1690 KUNIT_CASE(test_scope_to_request_with_exec_some),
1691 KUNIT_CASE(test_scope_to_request_without_access),
1692 KUNIT_CASE(test_is_eacces_with_none),
1693 KUNIT_CASE(test_is_eacces_with_refer),
1694 KUNIT_CASE(test_is_eacces_with_write),
1695 {}
1696 };
1697 /* clang-format on */
1698
1699 static struct kunit_suite test_suite = {
1700 .name = "landlock_fs",
1701 .test_cases = test_cases,
1702 };
1703
1704 kunit_test_suite(test_suite);
1705
1706 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
1707