1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Landlock LSM - Filesystem management and hooks 4 * 5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> 6 * Copyright © 2018-2020 ANSSI 7 * Copyright © 2021-2022 Microsoft Corporation 8 * Copyright © 2022 Günther Noack <gnoack3000@gmail.com> 9 * Copyright © 2023-2024 Google LLC 10 */ 11 12 #include <asm/ioctls.h> 13 #include <kunit/test.h> 14 #include <linux/atomic.h> 15 #include <linux/bitops.h> 16 #include <linux/bits.h> 17 #include <linux/compiler_types.h> 18 #include <linux/dcache.h> 19 #include <linux/err.h> 20 #include <linux/falloc.h> 21 #include <linux/fs.h> 22 #include <linux/init.h> 23 #include <linux/kernel.h> 24 #include <linux/limits.h> 25 #include <linux/list.h> 26 #include <linux/lsm_hooks.h> 27 #include <linux/mount.h> 28 #include <linux/namei.h> 29 #include <linux/path.h> 30 #include <linux/pid.h> 31 #include <linux/rcupdate.h> 32 #include <linux/sched/signal.h> 33 #include <linux/spinlock.h> 34 #include <linux/stat.h> 35 #include <linux/types.h> 36 #include <linux/wait_bit.h> 37 #include <linux/workqueue.h> 38 #include <uapi/linux/fiemap.h> 39 #include <uapi/linux/landlock.h> 40 41 #include "access.h" 42 #include "common.h" 43 #include "cred.h" 44 #include "fs.h" 45 #include "limits.h" 46 #include "object.h" 47 #include "ruleset.h" 48 #include "setup.h" 49 50 /* Underlying object management */ 51 52 static void release_inode(struct landlock_object *const object) 53 __releases(object->lock) 54 { 55 struct inode *const inode = object->underobj; 56 struct super_block *sb; 57 58 if (!inode) { 59 spin_unlock(&object->lock); 60 return; 61 } 62 63 /* 64 * Protects against concurrent use by hook_sb_delete() of the reference 65 * to the underlying inode. 66 */ 67 object->underobj = NULL; 68 /* 69 * Makes sure that if the filesystem is concurrently unmounted, 70 * hook_sb_delete() will wait for us to finish iput(). 71 */ 72 sb = inode->i_sb; 73 atomic_long_inc(&landlock_superblock(sb)->inode_refs); 74 spin_unlock(&object->lock); 75 /* 76 * Because object->underobj was not NULL, hook_sb_delete() and 77 * get_inode_object() guarantee that it is safe to reset 78 * landlock_inode(inode)->object while it is not NULL. It is therefore 79 * not necessary to lock inode->i_lock. 80 */ 81 rcu_assign_pointer(landlock_inode(inode)->object, NULL); 82 /* 83 * Now, new rules can safely be tied to @inode with get_inode_object(). 84 */ 85 86 iput(inode); 87 if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs)) 88 wake_up_var(&landlock_superblock(sb)->inode_refs); 89 } 90 91 static const struct landlock_object_underops landlock_fs_underops = { 92 .release = release_inode 93 }; 94 95 /* IOCTL helpers */ 96 97 /** 98 * is_masked_device_ioctl - Determine whether an IOCTL command is always 99 * permitted with Landlock for device files. These commands can not be 100 * restricted on device files by enforcing a Landlock policy. 101 * 102 * @cmd: The IOCTL command that is supposed to be run. 103 * 104 * By default, any IOCTL on a device file requires the 105 * LANDLOCK_ACCESS_FS_IOCTL_DEV right. However, we blanket-permit some 106 * commands, if: 107 * 108 * 1. The command is implemented in fs/ioctl.c's do_vfs_ioctl(), 109 * not in f_ops->unlocked_ioctl() or f_ops->compat_ioctl(). 110 * 111 * 2. The command is harmless when invoked on devices. 112 * 113 * We also permit commands that do not make sense for devices, but where the 114 * do_vfs_ioctl() implementation returns a more conventional error code. 115 * 116 * Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl() 117 * should be considered for inclusion here. 118 * 119 * Returns: true if the IOCTL @cmd can not be restricted with Landlock for 120 * device files. 121 */ 122 static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd) 123 { 124 switch (cmd) { 125 /* 126 * FIOCLEX, FIONCLEX, FIONBIO and FIOASYNC manipulate the FD's 127 * close-on-exec and the file's buffered-IO and async flags. These 128 * operations are also available through fcntl(2), and are 129 * unconditionally permitted in Landlock. 130 */ 131 case FIOCLEX: 132 case FIONCLEX: 133 case FIONBIO: 134 case FIOASYNC: 135 /* 136 * FIOQSIZE queries the size of a regular file, directory, or link. 137 * 138 * We still permit it, because it always returns -ENOTTY for 139 * other file types. 140 */ 141 case FIOQSIZE: 142 /* 143 * FIFREEZE and FITHAW freeze and thaw the file system which the 144 * given file belongs to. Requires CAP_SYS_ADMIN. 145 * 146 * These commands operate on the file system's superblock rather 147 * than on the file itself. The same operations can also be 148 * done through any other file or directory on the same file 149 * system, so it is safe to permit these. 150 */ 151 case FIFREEZE: 152 case FITHAW: 153 /* 154 * FS_IOC_FIEMAP queries information about the allocation of 155 * blocks within a file. 156 * 157 * This IOCTL command only makes sense for regular files and is 158 * not implemented by devices. It is harmless to permit. 159 */ 160 case FS_IOC_FIEMAP: 161 /* 162 * FIGETBSZ queries the file system's block size for a file or 163 * directory. 164 * 165 * This command operates on the file system's superblock rather 166 * than on the file itself. The same operation can also be done 167 * through any other file or directory on the same file system, 168 * so it is safe to permit it. 169 */ 170 case FIGETBSZ: 171 /* 172 * FICLONE, FICLONERANGE and FIDEDUPERANGE make files share 173 * their underlying storage ("reflink") between source and 174 * destination FDs, on file systems which support that. 175 * 176 * These IOCTL commands only apply to regular files 177 * and are harmless to permit for device files. 178 */ 179 case FICLONE: 180 case FICLONERANGE: 181 case FIDEDUPERANGE: 182 /* 183 * FS_IOC_GETFSUUID and FS_IOC_GETFSSYSFSPATH both operate on 184 * the file system superblock, not on the specific file, so 185 * these operations are available through any other file on the 186 * same file system as well. 187 */ 188 case FS_IOC_GETFSUUID: 189 case FS_IOC_GETFSSYSFSPATH: 190 return true; 191 192 /* 193 * FIONREAD, FS_IOC_GETFLAGS, FS_IOC_SETFLAGS, FS_IOC_FSGETXATTR and 194 * FS_IOC_FSSETXATTR are forwarded to device implementations. 195 */ 196 197 /* 198 * file_ioctl() commands (FIBMAP, FS_IOC_RESVSP, FS_IOC_RESVSP64, 199 * FS_IOC_UNRESVSP, FS_IOC_UNRESVSP64 and FS_IOC_ZERO_RANGE) are 200 * forwarded to device implementations, so not permitted. 201 */ 202 203 /* Other commands are guarded by the access right. */ 204 default: 205 return false; 206 } 207 } 208 209 /* 210 * is_masked_device_ioctl_compat - same as the helper above, but checking the 211 * "compat" IOCTL commands. 212 * 213 * The IOCTL commands with special handling in compat-mode should behave the 214 * same as their non-compat counterparts. 215 */ 216 static __attribute_const__ bool 217 is_masked_device_ioctl_compat(const unsigned int cmd) 218 { 219 switch (cmd) { 220 /* FICLONE is permitted, same as in the non-compat variant. */ 221 case FICLONE: 222 return true; 223 224 #if defined(CONFIG_X86_64) 225 /* 226 * FS_IOC_RESVSP_32, FS_IOC_RESVSP64_32, FS_IOC_UNRESVSP_32, 227 * FS_IOC_UNRESVSP64_32, FS_IOC_ZERO_RANGE_32: not blanket-permitted, 228 * for consistency with their non-compat variants. 229 */ 230 case FS_IOC_RESVSP_32: 231 case FS_IOC_RESVSP64_32: 232 case FS_IOC_UNRESVSP_32: 233 case FS_IOC_UNRESVSP64_32: 234 case FS_IOC_ZERO_RANGE_32: 235 #endif 236 237 /* 238 * FS_IOC32_GETFLAGS, FS_IOC32_SETFLAGS are forwarded to their device 239 * implementations. 240 */ 241 case FS_IOC32_GETFLAGS: 242 case FS_IOC32_SETFLAGS: 243 return false; 244 default: 245 return is_masked_device_ioctl(cmd); 246 } 247 } 248 249 /* Ruleset management */ 250 251 static struct landlock_object *get_inode_object(struct inode *const inode) 252 { 253 struct landlock_object *object, *new_object; 254 struct landlock_inode_security *inode_sec = landlock_inode(inode); 255 256 rcu_read_lock(); 257 retry: 258 object = rcu_dereference(inode_sec->object); 259 if (object) { 260 if (likely(refcount_inc_not_zero(&object->usage))) { 261 rcu_read_unlock(); 262 return object; 263 } 264 /* 265 * We are racing with release_inode(), the object is going 266 * away. Wait for release_inode(), then retry. 267 */ 268 spin_lock(&object->lock); 269 spin_unlock(&object->lock); 270 goto retry; 271 } 272 rcu_read_unlock(); 273 274 /* 275 * If there is no object tied to @inode, then create a new one (without 276 * holding any locks). 277 */ 278 new_object = landlock_create_object(&landlock_fs_underops, inode); 279 if (IS_ERR(new_object)) 280 return new_object; 281 282 /* 283 * Protects against concurrent calls to get_inode_object() or 284 * hook_sb_delete(). 285 */ 286 spin_lock(&inode->i_lock); 287 if (unlikely(rcu_access_pointer(inode_sec->object))) { 288 /* Someone else just created the object, bail out and retry. */ 289 spin_unlock(&inode->i_lock); 290 kfree(new_object); 291 292 rcu_read_lock(); 293 goto retry; 294 } 295 296 /* 297 * @inode will be released by hook_sb_delete() on its superblock 298 * shutdown, or by release_inode() when no more ruleset references the 299 * related object. 300 */ 301 ihold(inode); 302 rcu_assign_pointer(inode_sec->object, new_object); 303 spin_unlock(&inode->i_lock); 304 return new_object; 305 } 306 307 /* All access rights that can be tied to files. */ 308 /* clang-format off */ 309 #define ACCESS_FILE ( \ 310 LANDLOCK_ACCESS_FS_EXECUTE | \ 311 LANDLOCK_ACCESS_FS_WRITE_FILE | \ 312 LANDLOCK_ACCESS_FS_READ_FILE | \ 313 LANDLOCK_ACCESS_FS_TRUNCATE | \ 314 LANDLOCK_ACCESS_FS_IOCTL_DEV) 315 /* clang-format on */ 316 317 /* 318 * @path: Should have been checked by get_path_from_fd(). 319 */ 320 int landlock_append_fs_rule(struct landlock_ruleset *const ruleset, 321 const struct path *const path, 322 access_mask_t access_rights) 323 { 324 int err; 325 struct landlock_id id = { 326 .type = LANDLOCK_KEY_INODE, 327 }; 328 329 /* Files only get access rights that make sense. */ 330 if (!d_is_dir(path->dentry) && 331 (access_rights | ACCESS_FILE) != ACCESS_FILE) 332 return -EINVAL; 333 if (WARN_ON_ONCE(ruleset->num_layers != 1)) 334 return -EINVAL; 335 336 /* Transforms relative access rights to absolute ones. */ 337 access_rights |= LANDLOCK_MASK_ACCESS_FS & 338 ~landlock_get_fs_access_mask(ruleset, 0); 339 id.key.object = get_inode_object(d_backing_inode(path->dentry)); 340 if (IS_ERR(id.key.object)) 341 return PTR_ERR(id.key.object); 342 mutex_lock(&ruleset->lock); 343 err = landlock_insert_rule(ruleset, id, access_rights); 344 mutex_unlock(&ruleset->lock); 345 /* 346 * No need to check for an error because landlock_insert_rule() 347 * increments the refcount for the new object if needed. 348 */ 349 landlock_put_object(id.key.object); 350 return err; 351 } 352 353 /* Access-control management */ 354 355 /* 356 * The lifetime of the returned rule is tied to @domain. 357 * 358 * Returns NULL if no rule is found or if @dentry is negative. 359 */ 360 static const struct landlock_rule * 361 find_rule(const struct landlock_ruleset *const domain, 362 const struct dentry *const dentry) 363 { 364 const struct landlock_rule *rule; 365 const struct inode *inode; 366 struct landlock_id id = { 367 .type = LANDLOCK_KEY_INODE, 368 }; 369 370 /* Ignores nonexistent leafs. */ 371 if (d_is_negative(dentry)) 372 return NULL; 373 374 inode = d_backing_inode(dentry); 375 rcu_read_lock(); 376 id.key.object = rcu_dereference(landlock_inode(inode)->object); 377 rule = landlock_find_rule(domain, id); 378 rcu_read_unlock(); 379 return rule; 380 } 381 382 /* 383 * Allows access to pseudo filesystems that will never be mountable (e.g. 384 * sockfs, pipefs), but can still be reachable through 385 * /proc/<pid>/fd/<file-descriptor> 386 */ 387 static bool is_nouser_or_private(const struct dentry *dentry) 388 { 389 return (dentry->d_sb->s_flags & SB_NOUSER) || 390 (d_is_positive(dentry) && 391 unlikely(IS_PRIVATE(d_backing_inode(dentry)))); 392 } 393 394 static const struct access_masks any_fs = { 395 .fs = ~0, 396 }; 397 398 static const struct landlock_ruleset *get_current_fs_domain(void) 399 { 400 return landlock_get_applicable_domain(landlock_get_current_domain(), 401 any_fs); 402 } 403 404 /* 405 * Check that a destination file hierarchy has more restrictions than a source 406 * file hierarchy. This is only used for link and rename actions. 407 * 408 * @layer_masks_child2: Optional child masks. 409 */ 410 static bool no_more_access( 411 const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS], 412 const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS], 413 const bool child1_is_directory, 414 const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS], 415 const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS], 416 const bool child2_is_directory) 417 { 418 unsigned long access_bit; 419 420 for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2); 421 access_bit++) { 422 /* Ignores accesses that only make sense for directories. */ 423 const bool is_file_access = 424 !!(BIT_ULL(access_bit) & ACCESS_FILE); 425 426 if (child1_is_directory || is_file_access) { 427 /* 428 * Checks if the destination restrictions are a 429 * superset of the source ones (i.e. inherited access 430 * rights without child exceptions): 431 * restrictions(parent2) >= restrictions(child1) 432 */ 433 if ((((*layer_masks_parent1)[access_bit] & 434 (*layer_masks_child1)[access_bit]) | 435 (*layer_masks_parent2)[access_bit]) != 436 (*layer_masks_parent2)[access_bit]) 437 return false; 438 } 439 440 if (!layer_masks_child2) 441 continue; 442 if (child2_is_directory || is_file_access) { 443 /* 444 * Checks inverted restrictions for RENAME_EXCHANGE: 445 * restrictions(parent1) >= restrictions(child2) 446 */ 447 if ((((*layer_masks_parent2)[access_bit] & 448 (*layer_masks_child2)[access_bit]) | 449 (*layer_masks_parent1)[access_bit]) != 450 (*layer_masks_parent1)[access_bit]) 451 return false; 452 } 453 } 454 return true; 455 } 456 457 #define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__)) 458 #define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__)) 459 460 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 461 462 static void test_no_more_access(struct kunit *const test) 463 { 464 const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = { 465 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0), 466 [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0), 467 }; 468 const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = { 469 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0), 470 [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0), 471 }; 472 const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = { 473 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0), 474 }; 475 const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = { 476 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1), 477 }; 478 const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = { 479 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) | 480 BIT_ULL(1), 481 }; 482 const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {}; 483 484 /* Checks without restriction. */ 485 NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false); 486 NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false); 487 NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false); 488 489 /* 490 * Checks that we can only refer a file if no more access could be 491 * inherited. 492 */ 493 NMA_TRUE(&x0, &x0, false, &rx0, NULL, false); 494 NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false); 495 NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false); 496 NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false); 497 498 /* Checks allowed referring with different nested domains. */ 499 NMA_TRUE(&x0, &x1, false, &x0, NULL, false); 500 NMA_TRUE(&x1, &x0, false, &x0, NULL, false); 501 NMA_TRUE(&x0, &x01, false, &x0, NULL, false); 502 NMA_TRUE(&x0, &x01, false, &rx0, NULL, false); 503 NMA_TRUE(&x01, &x0, false, &x0, NULL, false); 504 NMA_TRUE(&x01, &x0, false, &rx0, NULL, false); 505 NMA_FALSE(&x01, &x01, false, &x0, NULL, false); 506 507 /* Checks that file access rights are also enforced for a directory. */ 508 NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false); 509 510 /* Checks that directory access rights don't impact file referring... */ 511 NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false); 512 /* ...but only directory referring. */ 513 NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false); 514 515 /* Checks directory exchange. */ 516 NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true); 517 NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true); 518 NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true); 519 NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true); 520 NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true); 521 522 /* Checks file exchange with directory access rights... */ 523 NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false); 524 NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false); 525 NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false); 526 NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false); 527 /* ...and with file access rights. */ 528 NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false); 529 NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false); 530 NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false); 531 NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false); 532 NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false); 533 534 /* 535 * Allowing the following requests should not be a security risk 536 * because domain 0 denies execute access, and domain 1 is always 537 * nested with domain 0. However, adding an exception for this case 538 * would mean to check all nested domains to make sure none can get 539 * more privileges (e.g. processes only sandboxed by domain 0). 540 * Moreover, this behavior (i.e. composition of N domains) could then 541 * be inconsistent compared to domain 1's ruleset alone (e.g. it might 542 * be denied to link/rename with domain 1's ruleset, whereas it would 543 * be allowed if nested on top of domain 0). Another drawback would be 544 * to create a cover channel that could enable sandboxed processes to 545 * infer most of the filesystem restrictions from their domain. To 546 * make it simple, efficient, safe, and more consistent, this case is 547 * always denied. 548 */ 549 NMA_FALSE(&x1, &x1, false, &x0, NULL, false); 550 NMA_FALSE(&x1, &x1, false, &rx0, NULL, false); 551 NMA_FALSE(&x1, &x1, true, &x0, NULL, false); 552 NMA_FALSE(&x1, &x1, true, &rx0, NULL, false); 553 554 /* Checks the same case of exclusive domains with a file... */ 555 NMA_TRUE(&x1, &x1, false, &x01, NULL, false); 556 NMA_FALSE(&x1, &x1, false, &x01, &x0, false); 557 NMA_FALSE(&x1, &x1, false, &x01, &x01, false); 558 NMA_FALSE(&x1, &x1, false, &x0, &x0, false); 559 /* ...and with a directory. */ 560 NMA_FALSE(&x1, &x1, false, &x0, &x0, true); 561 NMA_FALSE(&x1, &x1, true, &x0, &x0, false); 562 NMA_FALSE(&x1, &x1, true, &x0, &x0, true); 563 } 564 565 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 566 567 #undef NMA_TRUE 568 #undef NMA_FALSE 569 570 static bool is_layer_masks_allowed( 571 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) 572 { 573 return !memchr_inv(layer_masks, 0, sizeof(*layer_masks)); 574 } 575 576 /* 577 * Removes @layer_masks accesses that are not requested. 578 * 579 * Returns true if the request is allowed, false otherwise. 580 */ 581 static bool 582 scope_to_request(const access_mask_t access_request, 583 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) 584 { 585 const unsigned long access_req = access_request; 586 unsigned long access_bit; 587 588 if (WARN_ON_ONCE(!layer_masks)) 589 return true; 590 591 for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks)) 592 (*layer_masks)[access_bit] = 0; 593 594 return is_layer_masks_allowed(layer_masks); 595 } 596 597 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 598 599 static void test_scope_to_request_with_exec_none(struct kunit *const test) 600 { 601 /* Allows everything. */ 602 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; 603 604 /* Checks and scopes with execute. */ 605 KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE, 606 &layer_masks)); 607 KUNIT_EXPECT_EQ(test, 0, 608 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]); 609 KUNIT_EXPECT_EQ(test, 0, 610 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]); 611 } 612 613 static void test_scope_to_request_with_exec_some(struct kunit *const test) 614 { 615 /* Denies execute and write. */ 616 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = { 617 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0), 618 [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1), 619 }; 620 621 /* Checks and scopes with execute. */ 622 KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE, 623 &layer_masks)); 624 KUNIT_EXPECT_EQ(test, BIT_ULL(0), 625 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]); 626 KUNIT_EXPECT_EQ(test, 0, 627 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]); 628 } 629 630 static void test_scope_to_request_without_access(struct kunit *const test) 631 { 632 /* Denies execute and write. */ 633 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = { 634 [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0), 635 [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1), 636 }; 637 638 /* Checks and scopes without access request. */ 639 KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks)); 640 KUNIT_EXPECT_EQ(test, 0, 641 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]); 642 KUNIT_EXPECT_EQ(test, 0, 643 layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]); 644 } 645 646 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 647 648 /* 649 * Returns true if there is at least one access right different than 650 * LANDLOCK_ACCESS_FS_REFER. 651 */ 652 static bool 653 is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS], 654 const access_mask_t access_request) 655 { 656 unsigned long access_bit; 657 /* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */ 658 const unsigned long access_check = access_request & 659 ~LANDLOCK_ACCESS_FS_REFER; 660 661 if (!layer_masks) 662 return false; 663 664 for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) { 665 if ((*layer_masks)[access_bit]) 666 return true; 667 } 668 return false; 669 } 670 671 #define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__)) 672 #define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__)) 673 674 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 675 676 static void test_is_eacces_with_none(struct kunit *const test) 677 { 678 const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; 679 680 IE_FALSE(&layer_masks, 0); 681 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER); 682 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE); 683 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE); 684 } 685 686 static void test_is_eacces_with_refer(struct kunit *const test) 687 { 688 const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = { 689 [BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0), 690 }; 691 692 IE_FALSE(&layer_masks, 0); 693 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER); 694 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE); 695 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE); 696 } 697 698 static void test_is_eacces_with_write(struct kunit *const test) 699 { 700 const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = { 701 [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0), 702 }; 703 704 IE_FALSE(&layer_masks, 0); 705 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER); 706 IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE); 707 708 IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE); 709 } 710 711 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 712 713 #undef IE_TRUE 714 #undef IE_FALSE 715 716 /** 717 * is_access_to_paths_allowed - Check accesses for requests with a common path 718 * 719 * @domain: Domain to check against. 720 * @path: File hierarchy to walk through. 721 * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is 722 * equal to @layer_masks_parent2 (if any). This is tied to the unique 723 * requested path for most actions, or the source in case of a refer action 724 * (i.e. rename or link), or the source and destination in case of 725 * RENAME_EXCHANGE. 726 * @layer_masks_parent1: Pointer to a matrix of layer masks per access 727 * masks, identifying the layers that forbid a specific access. Bits from 728 * this matrix can be unset according to the @path walk. An empty matrix 729 * means that @domain allows all possible Landlock accesses (i.e. not only 730 * those identified by @access_request_parent1). This matrix can 731 * initially refer to domain layer masks and, when the accesses for the 732 * destination and source are the same, to requested layer masks. 733 * @dentry_child1: Dentry to the initial child of the parent1 path. This 734 * pointer must be NULL for non-refer actions (i.e. not link nor rename). 735 * @access_request_parent2: Similar to @access_request_parent1 but for a 736 * request involving a source and a destination. This refers to the 737 * destination, except in case of RENAME_EXCHANGE where it also refers to 738 * the source. Must be set to 0 when using a simple path request. 739 * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer 740 * action. This must be NULL otherwise. 741 * @dentry_child2: Dentry to the initial child of the parent2 path. This 742 * pointer is only set for RENAME_EXCHANGE actions and must be NULL 743 * otherwise. 744 * 745 * This helper first checks that the destination has a superset of restrictions 746 * compared to the source (if any) for a common path. Because of 747 * RENAME_EXCHANGE actions, source and destinations may be swapped. It then 748 * checks that the collected accesses and the remaining ones are enough to 749 * allow the request. 750 * 751 * Returns: 752 * - true if the access request is granted; 753 * - false otherwise. 754 */ 755 static bool is_access_to_paths_allowed( 756 const struct landlock_ruleset *const domain, 757 const struct path *const path, 758 const access_mask_t access_request_parent1, 759 layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS], 760 const struct dentry *const dentry_child1, 761 const access_mask_t access_request_parent2, 762 layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS], 763 const struct dentry *const dentry_child2) 764 { 765 bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check, 766 child1_is_directory = true, child2_is_directory = true; 767 struct path walker_path; 768 access_mask_t access_masked_parent1, access_masked_parent2; 769 layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS], 770 _layer_masks_child2[LANDLOCK_NUM_ACCESS_FS]; 771 layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL, 772 (*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL; 773 774 if (!access_request_parent1 && !access_request_parent2) 775 return true; 776 if (WARN_ON_ONCE(!domain || !path)) 777 return true; 778 if (is_nouser_or_private(path->dentry)) 779 return true; 780 if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1)) 781 return false; 782 783 allowed_parent1 = is_layer_masks_allowed(layer_masks_parent1); 784 785 if (unlikely(layer_masks_parent2)) { 786 if (WARN_ON_ONCE(!dentry_child1)) 787 return false; 788 789 allowed_parent2 = is_layer_masks_allowed(layer_masks_parent2); 790 791 /* 792 * For a double request, first check for potential privilege 793 * escalation by looking at domain handled accesses (which are 794 * a superset of the meaningful requested accesses). 795 */ 796 access_masked_parent1 = access_masked_parent2 = 797 landlock_union_access_masks(domain).fs; 798 is_dom_check = true; 799 } else { 800 if (WARN_ON_ONCE(dentry_child1 || dentry_child2)) 801 return false; 802 /* For a simple request, only check for requested accesses. */ 803 access_masked_parent1 = access_request_parent1; 804 access_masked_parent2 = access_request_parent2; 805 is_dom_check = false; 806 } 807 808 if (unlikely(dentry_child1)) { 809 landlock_unmask_layers( 810 find_rule(domain, dentry_child1), 811 landlock_init_layer_masks( 812 domain, LANDLOCK_MASK_ACCESS_FS, 813 &_layer_masks_child1, LANDLOCK_KEY_INODE), 814 &_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1)); 815 layer_masks_child1 = &_layer_masks_child1; 816 child1_is_directory = d_is_dir(dentry_child1); 817 } 818 if (unlikely(dentry_child2)) { 819 landlock_unmask_layers( 820 find_rule(domain, dentry_child2), 821 landlock_init_layer_masks( 822 domain, LANDLOCK_MASK_ACCESS_FS, 823 &_layer_masks_child2, LANDLOCK_KEY_INODE), 824 &_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2)); 825 layer_masks_child2 = &_layer_masks_child2; 826 child2_is_directory = d_is_dir(dentry_child2); 827 } 828 829 walker_path = *path; 830 path_get(&walker_path); 831 /* 832 * We need to walk through all the hierarchy to not miss any relevant 833 * restriction. 834 */ 835 while (true) { 836 struct dentry *parent_dentry; 837 const struct landlock_rule *rule; 838 839 /* 840 * If at least all accesses allowed on the destination are 841 * already allowed on the source, respectively if there is at 842 * least as much as restrictions on the destination than on the 843 * source, then we can safely refer files from the source to 844 * the destination without risking a privilege escalation. 845 * This also applies in the case of RENAME_EXCHANGE, which 846 * implies checks on both direction. This is crucial for 847 * standalone multilayered security policies. Furthermore, 848 * this helps avoid policy writers to shoot themselves in the 849 * foot. 850 */ 851 if (unlikely(is_dom_check && 852 no_more_access( 853 layer_masks_parent1, layer_masks_child1, 854 child1_is_directory, layer_masks_parent2, 855 layer_masks_child2, 856 child2_is_directory))) { 857 /* 858 * Now, downgrades the remaining checks from domain 859 * handled accesses to requested accesses. 860 */ 861 is_dom_check = false; 862 access_masked_parent1 = access_request_parent1; 863 access_masked_parent2 = access_request_parent2; 864 865 allowed_parent1 = 866 allowed_parent1 || 867 scope_to_request(access_masked_parent1, 868 layer_masks_parent1); 869 allowed_parent2 = 870 allowed_parent2 || 871 scope_to_request(access_masked_parent2, 872 layer_masks_parent2); 873 874 /* Stops when all accesses are granted. */ 875 if (allowed_parent1 && allowed_parent2) 876 break; 877 } 878 879 rule = find_rule(domain, walker_path.dentry); 880 allowed_parent1 = allowed_parent1 || 881 landlock_unmask_layers( 882 rule, access_masked_parent1, 883 layer_masks_parent1, 884 ARRAY_SIZE(*layer_masks_parent1)); 885 allowed_parent2 = allowed_parent2 || 886 landlock_unmask_layers( 887 rule, access_masked_parent2, 888 layer_masks_parent2, 889 ARRAY_SIZE(*layer_masks_parent2)); 890 891 /* Stops when a rule from each layer grants access. */ 892 if (allowed_parent1 && allowed_parent2) 893 break; 894 jump_up: 895 if (walker_path.dentry == walker_path.mnt->mnt_root) { 896 if (follow_up(&walker_path)) { 897 /* Ignores hidden mount points. */ 898 goto jump_up; 899 } else { 900 /* 901 * Stops at the real root. Denies access 902 * because not all layers have granted access. 903 */ 904 break; 905 } 906 } 907 if (unlikely(IS_ROOT(walker_path.dentry))) { 908 /* 909 * Stops at disconnected root directories. Only allows 910 * access to internal filesystems (e.g. nsfs, which is 911 * reachable through /proc/<pid>/ns/<namespace>). 912 */ 913 if (walker_path.mnt->mnt_flags & MNT_INTERNAL) { 914 allowed_parent1 = true; 915 allowed_parent2 = true; 916 } 917 break; 918 } 919 parent_dentry = dget_parent(walker_path.dentry); 920 dput(walker_path.dentry); 921 walker_path.dentry = parent_dentry; 922 } 923 path_put(&walker_path); 924 925 return allowed_parent1 && allowed_parent2; 926 } 927 928 static int current_check_access_path(const struct path *const path, 929 access_mask_t access_request) 930 { 931 const struct landlock_ruleset *const dom = get_current_fs_domain(); 932 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; 933 934 if (!dom) 935 return 0; 936 937 access_request = landlock_init_layer_masks( 938 dom, access_request, &layer_masks, LANDLOCK_KEY_INODE); 939 if (is_access_to_paths_allowed(dom, path, access_request, &layer_masks, 940 NULL, 0, NULL, NULL)) 941 return 0; 942 943 return -EACCES; 944 } 945 946 static __attribute_const__ access_mask_t get_mode_access(const umode_t mode) 947 { 948 switch (mode & S_IFMT) { 949 case S_IFLNK: 950 return LANDLOCK_ACCESS_FS_MAKE_SYM; 951 case S_IFDIR: 952 return LANDLOCK_ACCESS_FS_MAKE_DIR; 953 case S_IFCHR: 954 return LANDLOCK_ACCESS_FS_MAKE_CHAR; 955 case S_IFBLK: 956 return LANDLOCK_ACCESS_FS_MAKE_BLOCK; 957 case S_IFIFO: 958 return LANDLOCK_ACCESS_FS_MAKE_FIFO; 959 case S_IFSOCK: 960 return LANDLOCK_ACCESS_FS_MAKE_SOCK; 961 case S_IFREG: 962 case 0: 963 /* A zero mode translates to S_IFREG. */ 964 default: 965 /* Treats weird files as regular files. */ 966 return LANDLOCK_ACCESS_FS_MAKE_REG; 967 } 968 } 969 970 static access_mask_t maybe_remove(const struct dentry *const dentry) 971 { 972 if (d_is_negative(dentry)) 973 return 0; 974 return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR : 975 LANDLOCK_ACCESS_FS_REMOVE_FILE; 976 } 977 978 /** 979 * collect_domain_accesses - Walk through a file path and collect accesses 980 * 981 * @domain: Domain to check against. 982 * @mnt_root: Last directory to check. 983 * @dir: Directory to start the walk from. 984 * @layer_masks_dom: Where to store the collected accesses. 985 * 986 * This helper is useful to begin a path walk from the @dir directory to a 987 * @mnt_root directory used as a mount point. This mount point is the common 988 * ancestor between the source and the destination of a renamed and linked 989 * file. While walking from @dir to @mnt_root, we record all the domain's 990 * allowed accesses in @layer_masks_dom. 991 * 992 * This is similar to is_access_to_paths_allowed() but much simpler because it 993 * only handles walking on the same mount point and only checks one set of 994 * accesses. 995 * 996 * Returns: 997 * - true if all the domain access rights are allowed for @dir; 998 * - false if the walk reached @mnt_root. 999 */ 1000 static bool collect_domain_accesses( 1001 const struct landlock_ruleset *const domain, 1002 const struct dentry *const mnt_root, struct dentry *dir, 1003 layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS]) 1004 { 1005 unsigned long access_dom; 1006 bool ret = false; 1007 1008 if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom)) 1009 return true; 1010 if (is_nouser_or_private(dir)) 1011 return true; 1012 1013 access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS, 1014 layer_masks_dom, 1015 LANDLOCK_KEY_INODE); 1016 1017 dget(dir); 1018 while (true) { 1019 struct dentry *parent_dentry; 1020 1021 /* Gets all layers allowing all domain accesses. */ 1022 if (landlock_unmask_layers(find_rule(domain, dir), access_dom, 1023 layer_masks_dom, 1024 ARRAY_SIZE(*layer_masks_dom))) { 1025 /* 1026 * Stops when all handled accesses are allowed by at 1027 * least one rule in each layer. 1028 */ 1029 ret = true; 1030 break; 1031 } 1032 1033 /* We should not reach a root other than @mnt_root. */ 1034 if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir))) 1035 break; 1036 1037 parent_dentry = dget_parent(dir); 1038 dput(dir); 1039 dir = parent_dentry; 1040 } 1041 dput(dir); 1042 return ret; 1043 } 1044 1045 /** 1046 * current_check_refer_path - Check if a rename or link action is allowed 1047 * 1048 * @old_dentry: File or directory requested to be moved or linked. 1049 * @new_dir: Destination parent directory. 1050 * @new_dentry: Destination file or directory. 1051 * @removable: Sets to true if it is a rename operation. 1052 * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE. 1053 * 1054 * Because of its unprivileged constraints, Landlock relies on file hierarchies 1055 * (and not only inodes) to tie access rights to files. Being able to link or 1056 * rename a file hierarchy brings some challenges. Indeed, moving or linking a 1057 * file (i.e. creating a new reference to an inode) can have an impact on the 1058 * actions allowed for a set of files if it would change its parent directory 1059 * (i.e. reparenting). 1060 * 1061 * To avoid trivial access right bypasses, Landlock first checks if the file or 1062 * directory requested to be moved would gain new access rights inherited from 1063 * its new hierarchy. Before returning any error, Landlock then checks that 1064 * the parent source hierarchy and the destination hierarchy would allow the 1065 * link or rename action. If it is not the case, an error with EACCES is 1066 * returned to inform user space that there is no way to remove or create the 1067 * requested source file type. If it should be allowed but the new inherited 1068 * access rights would be greater than the source access rights, then the 1069 * kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables 1070 * user space to abort the whole operation if there is no way to do it, or to 1071 * manually copy the source to the destination if this remains allowed, e.g. 1072 * because file creation is allowed on the destination directory but not direct 1073 * linking. 1074 * 1075 * To achieve this goal, the kernel needs to compare two file hierarchies: the 1076 * one identifying the source file or directory (including itself), and the 1077 * destination one. This can be seen as a multilayer partial ordering problem. 1078 * The kernel walks through these paths and collects in a matrix the access 1079 * rights that are denied per layer. These matrices are then compared to see 1080 * if the destination one has more (or the same) restrictions as the source 1081 * one. If this is the case, the requested action will not return EXDEV, which 1082 * doesn't mean the action is allowed. The parent hierarchy of the source 1083 * (i.e. parent directory), and the destination hierarchy must also be checked 1084 * to verify that they explicitly allow such action (i.e. referencing, 1085 * creation and potentially removal rights). The kernel implementation is then 1086 * required to rely on potentially four matrices of access rights: one for the 1087 * source file or directory (i.e. the child), a potentially other one for the 1088 * other source/destination (in case of RENAME_EXCHANGE), one for the source 1089 * parent hierarchy and a last one for the destination hierarchy. These 1090 * ephemeral matrices take some space on the stack, which limits the number of 1091 * layers to a deemed reasonable number: 16. 1092 * 1093 * Returns: 1094 * - 0 if access is allowed; 1095 * - -EXDEV if @old_dentry would inherit new access rights from @new_dir; 1096 * - -EACCES if file removal or creation is denied. 1097 */ 1098 static int current_check_refer_path(struct dentry *const old_dentry, 1099 const struct path *const new_dir, 1100 struct dentry *const new_dentry, 1101 const bool removable, const bool exchange) 1102 { 1103 const struct landlock_ruleset *const dom = get_current_fs_domain(); 1104 bool allow_parent1, allow_parent2; 1105 access_mask_t access_request_parent1, access_request_parent2; 1106 struct path mnt_dir; 1107 struct dentry *old_parent; 1108 layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {}, 1109 layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {}; 1110 1111 if (!dom) 1112 return 0; 1113 if (WARN_ON_ONCE(dom->num_layers < 1)) 1114 return -EACCES; 1115 if (unlikely(d_is_negative(old_dentry))) 1116 return -ENOENT; 1117 if (exchange) { 1118 if (unlikely(d_is_negative(new_dentry))) 1119 return -ENOENT; 1120 access_request_parent1 = 1121 get_mode_access(d_backing_inode(new_dentry)->i_mode); 1122 } else { 1123 access_request_parent1 = 0; 1124 } 1125 access_request_parent2 = 1126 get_mode_access(d_backing_inode(old_dentry)->i_mode); 1127 if (removable) { 1128 access_request_parent1 |= maybe_remove(old_dentry); 1129 access_request_parent2 |= maybe_remove(new_dentry); 1130 } 1131 1132 /* The mount points are the same for old and new paths, cf. EXDEV. */ 1133 if (old_dentry->d_parent == new_dir->dentry) { 1134 /* 1135 * The LANDLOCK_ACCESS_FS_REFER access right is not required 1136 * for same-directory referer (i.e. no reparenting). 1137 */ 1138 access_request_parent1 = landlock_init_layer_masks( 1139 dom, access_request_parent1 | access_request_parent2, 1140 &layer_masks_parent1, LANDLOCK_KEY_INODE); 1141 if (is_access_to_paths_allowed( 1142 dom, new_dir, access_request_parent1, 1143 &layer_masks_parent1, NULL, 0, NULL, NULL)) 1144 return 0; 1145 return -EACCES; 1146 } 1147 1148 access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER; 1149 access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER; 1150 1151 /* Saves the common mount point. */ 1152 mnt_dir.mnt = new_dir->mnt; 1153 mnt_dir.dentry = new_dir->mnt->mnt_root; 1154 1155 /* 1156 * old_dentry may be the root of the common mount point and 1157 * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and 1158 * OPEN_TREE_CLONE). We do not need to call dget(old_parent) because 1159 * we keep a reference to old_dentry. 1160 */ 1161 old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry : 1162 old_dentry->d_parent; 1163 1164 /* new_dir->dentry is equal to new_dentry->d_parent */ 1165 allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent, 1166 &layer_masks_parent1); 1167 allow_parent2 = collect_domain_accesses( 1168 dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2); 1169 1170 if (allow_parent1 && allow_parent2) 1171 return 0; 1172 1173 /* 1174 * To be able to compare source and destination domain access rights, 1175 * take into account the @old_dentry access rights aggregated with its 1176 * parent access rights. This will be useful to compare with the 1177 * destination parent access rights. 1178 */ 1179 if (is_access_to_paths_allowed( 1180 dom, &mnt_dir, access_request_parent1, &layer_masks_parent1, 1181 old_dentry, access_request_parent2, &layer_masks_parent2, 1182 exchange ? new_dentry : NULL)) 1183 return 0; 1184 1185 /* 1186 * This prioritizes EACCES over EXDEV for all actions, including 1187 * renames with RENAME_EXCHANGE. 1188 */ 1189 if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) || 1190 is_eacces(&layer_masks_parent2, access_request_parent2))) 1191 return -EACCES; 1192 1193 /* 1194 * Gracefully forbids reparenting if the destination directory 1195 * hierarchy is not a superset of restrictions of the source directory 1196 * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the 1197 * source or the destination. 1198 */ 1199 return -EXDEV; 1200 } 1201 1202 /* Inode hooks */ 1203 1204 static void hook_inode_free_security_rcu(void *inode_security) 1205 { 1206 struct landlock_inode_security *inode_sec; 1207 1208 /* 1209 * All inodes must already have been untied from their object by 1210 * release_inode() or hook_sb_delete(). 1211 */ 1212 inode_sec = inode_security + landlock_blob_sizes.lbs_inode; 1213 WARN_ON_ONCE(inode_sec->object); 1214 } 1215 1216 /* Super-block hooks */ 1217 1218 /* 1219 * Release the inodes used in a security policy. 1220 * 1221 * Cf. fsnotify_unmount_inodes() and invalidate_inodes() 1222 */ 1223 static void hook_sb_delete(struct super_block *const sb) 1224 { 1225 struct inode *inode, *prev_inode = NULL; 1226 1227 if (!landlock_initialized) 1228 return; 1229 1230 spin_lock(&sb->s_inode_list_lock); 1231 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 1232 struct landlock_object *object; 1233 1234 /* Only handles referenced inodes. */ 1235 if (!atomic_read(&inode->i_count)) 1236 continue; 1237 1238 /* 1239 * Protects against concurrent modification of inode (e.g. 1240 * from get_inode_object()). 1241 */ 1242 spin_lock(&inode->i_lock); 1243 /* 1244 * Checks I_FREEING and I_WILL_FREE to protect against a race 1245 * condition when release_inode() just called iput(), which 1246 * could lead to a NULL dereference of inode->security or a 1247 * second call to iput() for the same Landlock object. Also 1248 * checks I_NEW because such inode cannot be tied to an object. 1249 */ 1250 if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { 1251 spin_unlock(&inode->i_lock); 1252 continue; 1253 } 1254 1255 rcu_read_lock(); 1256 object = rcu_dereference(landlock_inode(inode)->object); 1257 if (!object) { 1258 rcu_read_unlock(); 1259 spin_unlock(&inode->i_lock); 1260 continue; 1261 } 1262 /* Keeps a reference to this inode until the next loop walk. */ 1263 __iget(inode); 1264 spin_unlock(&inode->i_lock); 1265 1266 /* 1267 * If there is no concurrent release_inode() ongoing, then we 1268 * are in charge of calling iput() on this inode, otherwise we 1269 * will just wait for it to finish. 1270 */ 1271 spin_lock(&object->lock); 1272 if (object->underobj == inode) { 1273 object->underobj = NULL; 1274 spin_unlock(&object->lock); 1275 rcu_read_unlock(); 1276 1277 /* 1278 * Because object->underobj was not NULL, 1279 * release_inode() and get_inode_object() guarantee 1280 * that it is safe to reset 1281 * landlock_inode(inode)->object while it is not NULL. 1282 * It is therefore not necessary to lock inode->i_lock. 1283 */ 1284 rcu_assign_pointer(landlock_inode(inode)->object, NULL); 1285 /* 1286 * At this point, we own the ihold() reference that was 1287 * originally set up by get_inode_object() and the 1288 * __iget() reference that we just set in this loop 1289 * walk. Therefore the following call to iput() will 1290 * not sleep nor drop the inode because there is now at 1291 * least two references to it. 1292 */ 1293 iput(inode); 1294 } else { 1295 spin_unlock(&object->lock); 1296 rcu_read_unlock(); 1297 } 1298 1299 if (prev_inode) { 1300 /* 1301 * At this point, we still own the __iget() reference 1302 * that we just set in this loop walk. Therefore we 1303 * can drop the list lock and know that the inode won't 1304 * disappear from under us until the next loop walk. 1305 */ 1306 spin_unlock(&sb->s_inode_list_lock); 1307 /* 1308 * We can now actually put the inode reference from the 1309 * previous loop walk, which is not needed anymore. 1310 */ 1311 iput(prev_inode); 1312 cond_resched(); 1313 spin_lock(&sb->s_inode_list_lock); 1314 } 1315 prev_inode = inode; 1316 } 1317 spin_unlock(&sb->s_inode_list_lock); 1318 1319 /* Puts the inode reference from the last loop walk, if any. */ 1320 if (prev_inode) 1321 iput(prev_inode); 1322 /* Waits for pending iput() in release_inode(). */ 1323 wait_var_event(&landlock_superblock(sb)->inode_refs, 1324 !atomic_long_read(&landlock_superblock(sb)->inode_refs)); 1325 } 1326 1327 /* 1328 * Because a Landlock security policy is defined according to the filesystem 1329 * topology (i.e. the mount namespace), changing it may grant access to files 1330 * not previously allowed. 1331 * 1332 * To make it simple, deny any filesystem topology modification by landlocked 1333 * processes. Non-landlocked processes may still change the namespace of a 1334 * landlocked process, but this kind of threat must be handled by a system-wide 1335 * access-control security policy. 1336 * 1337 * This could be lifted in the future if Landlock can safely handle mount 1338 * namespace updates requested by a landlocked process. Indeed, we could 1339 * update the current domain (which is currently read-only) by taking into 1340 * account the accesses of the source and the destination of a new mount point. 1341 * However, it would also require to make all the child domains dynamically 1342 * inherit these new constraints. Anyway, for backward compatibility reasons, 1343 * a dedicated user space option would be required (e.g. as a ruleset flag). 1344 */ 1345 static int hook_sb_mount(const char *const dev_name, 1346 const struct path *const path, const char *const type, 1347 const unsigned long flags, void *const data) 1348 { 1349 if (!get_current_fs_domain()) 1350 return 0; 1351 return -EPERM; 1352 } 1353 1354 static int hook_move_mount(const struct path *const from_path, 1355 const struct path *const to_path) 1356 { 1357 if (!get_current_fs_domain()) 1358 return 0; 1359 return -EPERM; 1360 } 1361 1362 /* 1363 * Removing a mount point may reveal a previously hidden file hierarchy, which 1364 * may then grant access to files, which may have previously been forbidden. 1365 */ 1366 static int hook_sb_umount(struct vfsmount *const mnt, const int flags) 1367 { 1368 if (!get_current_fs_domain()) 1369 return 0; 1370 return -EPERM; 1371 } 1372 1373 static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts) 1374 { 1375 if (!get_current_fs_domain()) 1376 return 0; 1377 return -EPERM; 1378 } 1379 1380 /* 1381 * pivot_root(2), like mount(2), changes the current mount namespace. It must 1382 * then be forbidden for a landlocked process. 1383 * 1384 * However, chroot(2) may be allowed because it only changes the relative root 1385 * directory of the current process. Moreover, it can be used to restrict the 1386 * view of the filesystem. 1387 */ 1388 static int hook_sb_pivotroot(const struct path *const old_path, 1389 const struct path *const new_path) 1390 { 1391 if (!get_current_fs_domain()) 1392 return 0; 1393 return -EPERM; 1394 } 1395 1396 /* Path hooks */ 1397 1398 static int hook_path_link(struct dentry *const old_dentry, 1399 const struct path *const new_dir, 1400 struct dentry *const new_dentry) 1401 { 1402 return current_check_refer_path(old_dentry, new_dir, new_dentry, false, 1403 false); 1404 } 1405 1406 static int hook_path_rename(const struct path *const old_dir, 1407 struct dentry *const old_dentry, 1408 const struct path *const new_dir, 1409 struct dentry *const new_dentry, 1410 const unsigned int flags) 1411 { 1412 /* old_dir refers to old_dentry->d_parent and new_dir->mnt */ 1413 return current_check_refer_path(old_dentry, new_dir, new_dentry, true, 1414 !!(flags & RENAME_EXCHANGE)); 1415 } 1416 1417 static int hook_path_mkdir(const struct path *const dir, 1418 struct dentry *const dentry, const umode_t mode) 1419 { 1420 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR); 1421 } 1422 1423 static int hook_path_mknod(const struct path *const dir, 1424 struct dentry *const dentry, const umode_t mode, 1425 const unsigned int dev) 1426 { 1427 return current_check_access_path(dir, get_mode_access(mode)); 1428 } 1429 1430 static int hook_path_symlink(const struct path *const dir, 1431 struct dentry *const dentry, 1432 const char *const old_name) 1433 { 1434 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM); 1435 } 1436 1437 static int hook_path_unlink(const struct path *const dir, 1438 struct dentry *const dentry) 1439 { 1440 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE); 1441 } 1442 1443 static int hook_path_rmdir(const struct path *const dir, 1444 struct dentry *const dentry) 1445 { 1446 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR); 1447 } 1448 1449 static int hook_path_truncate(const struct path *const path) 1450 { 1451 return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE); 1452 } 1453 1454 /* File hooks */ 1455 1456 /** 1457 * get_required_file_open_access - Get access needed to open a file 1458 * 1459 * @file: File being opened. 1460 * 1461 * Returns the access rights that are required for opening the given file, 1462 * depending on the file type and open mode. 1463 */ 1464 static access_mask_t 1465 get_required_file_open_access(const struct file *const file) 1466 { 1467 access_mask_t access = 0; 1468 1469 if (file->f_mode & FMODE_READ) { 1470 /* A directory can only be opened in read mode. */ 1471 if (S_ISDIR(file_inode(file)->i_mode)) 1472 return LANDLOCK_ACCESS_FS_READ_DIR; 1473 access = LANDLOCK_ACCESS_FS_READ_FILE; 1474 } 1475 if (file->f_mode & FMODE_WRITE) 1476 access |= LANDLOCK_ACCESS_FS_WRITE_FILE; 1477 /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */ 1478 if (file->f_flags & __FMODE_EXEC) 1479 access |= LANDLOCK_ACCESS_FS_EXECUTE; 1480 return access; 1481 } 1482 1483 static int hook_file_alloc_security(struct file *const file) 1484 { 1485 /* 1486 * Grants all access rights, even if most of them are not checked later 1487 * on. It is more consistent. 1488 * 1489 * Notably, file descriptors for regular files can also be acquired 1490 * without going through the file_open hook, for example when using 1491 * memfd_create(2). 1492 */ 1493 landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS; 1494 return 0; 1495 } 1496 1497 static bool is_device(const struct file *const file) 1498 { 1499 const struct inode *inode = file_inode(file); 1500 1501 return S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode); 1502 } 1503 1504 static int hook_file_open(struct file *const file) 1505 { 1506 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; 1507 access_mask_t open_access_request, full_access_request, allowed_access, 1508 optional_access; 1509 const struct landlock_ruleset *const dom = 1510 landlock_get_applicable_domain( 1511 landlock_cred(file->f_cred)->domain, any_fs); 1512 1513 if (!dom) 1514 return 0; 1515 1516 /* 1517 * Because a file may be opened with O_PATH, get_required_file_open_access() 1518 * may return 0. This case will be handled with a future Landlock 1519 * evolution. 1520 */ 1521 open_access_request = get_required_file_open_access(file); 1522 1523 /* 1524 * We look up more access than what we immediately need for open(), so 1525 * that we can later authorize operations on opened files. 1526 */ 1527 optional_access = LANDLOCK_ACCESS_FS_TRUNCATE; 1528 if (is_device(file)) 1529 optional_access |= LANDLOCK_ACCESS_FS_IOCTL_DEV; 1530 1531 full_access_request = open_access_request | optional_access; 1532 1533 if (is_access_to_paths_allowed( 1534 dom, &file->f_path, 1535 landlock_init_layer_masks(dom, full_access_request, 1536 &layer_masks, LANDLOCK_KEY_INODE), 1537 &layer_masks, NULL, 0, NULL, NULL)) { 1538 allowed_access = full_access_request; 1539 } else { 1540 unsigned long access_bit; 1541 const unsigned long access_req = full_access_request; 1542 1543 /* 1544 * Calculate the actual allowed access rights from layer_masks. 1545 * Add each access right to allowed_access which has not been 1546 * vetoed by any layer. 1547 */ 1548 allowed_access = 0; 1549 for_each_set_bit(access_bit, &access_req, 1550 ARRAY_SIZE(layer_masks)) { 1551 if (!layer_masks[access_bit]) 1552 allowed_access |= BIT_ULL(access_bit); 1553 } 1554 } 1555 1556 /* 1557 * For operations on already opened files (i.e. ftruncate()), it is the 1558 * access rights at the time of open() which decide whether the 1559 * operation is permitted. Therefore, we record the relevant subset of 1560 * file access rights in the opened struct file. 1561 */ 1562 landlock_file(file)->allowed_access = allowed_access; 1563 1564 if ((open_access_request & allowed_access) == open_access_request) 1565 return 0; 1566 1567 return -EACCES; 1568 } 1569 1570 static int hook_file_truncate(struct file *const file) 1571 { 1572 /* 1573 * Allows truncation if the truncate right was available at the time of 1574 * opening the file, to get a consistent access check as for read, write 1575 * and execute operations. 1576 * 1577 * Note: For checks done based on the file's Landlock allowed access, we 1578 * enforce them independently of whether the current thread is in a 1579 * Landlock domain, so that open files passed between independent 1580 * processes retain their behaviour. 1581 */ 1582 if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE) 1583 return 0; 1584 return -EACCES; 1585 } 1586 1587 static int hook_file_ioctl(struct file *file, unsigned int cmd, 1588 unsigned long arg) 1589 { 1590 access_mask_t allowed_access = landlock_file(file)->allowed_access; 1591 1592 /* 1593 * It is the access rights at the time of opening the file which 1594 * determine whether IOCTL can be used on the opened file later. 1595 * 1596 * The access right is attached to the opened file in hook_file_open(). 1597 */ 1598 if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV) 1599 return 0; 1600 1601 if (!is_device(file)) 1602 return 0; 1603 1604 if (is_masked_device_ioctl(cmd)) 1605 return 0; 1606 1607 return -EACCES; 1608 } 1609 1610 static int hook_file_ioctl_compat(struct file *file, unsigned int cmd, 1611 unsigned long arg) 1612 { 1613 access_mask_t allowed_access = landlock_file(file)->allowed_access; 1614 1615 /* 1616 * It is the access rights at the time of opening the file which 1617 * determine whether IOCTL can be used on the opened file later. 1618 * 1619 * The access right is attached to the opened file in hook_file_open(). 1620 */ 1621 if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV) 1622 return 0; 1623 1624 if (!is_device(file)) 1625 return 0; 1626 1627 if (is_masked_device_ioctl_compat(cmd)) 1628 return 0; 1629 1630 return -EACCES; 1631 } 1632 1633 /* 1634 * Always allow sending signals between threads of the same process. This 1635 * ensures consistency with hook_task_kill(). 1636 */ 1637 static bool control_current_fowner(struct fown_struct *const fown) 1638 { 1639 struct task_struct *p; 1640 1641 /* 1642 * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix 1643 * file_set_fowner LSM hook inconsistencies"). 1644 */ 1645 lockdep_assert_held(&fown->lock); 1646 1647 /* 1648 * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side 1649 * critical section. 1650 */ 1651 guard(rcu)(); 1652 p = pid_task(fown->pid, fown->pid_type); 1653 if (!p) 1654 return true; 1655 1656 return !same_thread_group(p, current); 1657 } 1658 1659 static void hook_file_set_fowner(struct file *file) 1660 { 1661 struct landlock_ruleset *prev_dom; 1662 struct landlock_ruleset *new_dom = NULL; 1663 1664 if (control_current_fowner(file_f_owner(file))) { 1665 new_dom = landlock_get_current_domain(); 1666 landlock_get_ruleset(new_dom); 1667 } 1668 1669 prev_dom = landlock_file(file)->fown_domain; 1670 landlock_file(file)->fown_domain = new_dom; 1671 1672 /* May be called in an RCU read-side critical section. */ 1673 landlock_put_ruleset_deferred(prev_dom); 1674 } 1675 1676 static void hook_file_free_security(struct file *file) 1677 { 1678 landlock_put_ruleset_deferred(landlock_file(file)->fown_domain); 1679 } 1680 1681 static struct security_hook_list landlock_hooks[] __ro_after_init = { 1682 LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu), 1683 1684 LSM_HOOK_INIT(sb_delete, hook_sb_delete), 1685 LSM_HOOK_INIT(sb_mount, hook_sb_mount), 1686 LSM_HOOK_INIT(move_mount, hook_move_mount), 1687 LSM_HOOK_INIT(sb_umount, hook_sb_umount), 1688 LSM_HOOK_INIT(sb_remount, hook_sb_remount), 1689 LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot), 1690 1691 LSM_HOOK_INIT(path_link, hook_path_link), 1692 LSM_HOOK_INIT(path_rename, hook_path_rename), 1693 LSM_HOOK_INIT(path_mkdir, hook_path_mkdir), 1694 LSM_HOOK_INIT(path_mknod, hook_path_mknod), 1695 LSM_HOOK_INIT(path_symlink, hook_path_symlink), 1696 LSM_HOOK_INIT(path_unlink, hook_path_unlink), 1697 LSM_HOOK_INIT(path_rmdir, hook_path_rmdir), 1698 LSM_HOOK_INIT(path_truncate, hook_path_truncate), 1699 1700 LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security), 1701 LSM_HOOK_INIT(file_open, hook_file_open), 1702 LSM_HOOK_INIT(file_truncate, hook_file_truncate), 1703 LSM_HOOK_INIT(file_ioctl, hook_file_ioctl), 1704 LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat), 1705 LSM_HOOK_INIT(file_set_fowner, hook_file_set_fowner), 1706 LSM_HOOK_INIT(file_free_security, hook_file_free_security), 1707 }; 1708 1709 __init void landlock_add_fs_hooks(void) 1710 { 1711 security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks), 1712 &landlock_lsmid); 1713 } 1714 1715 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST 1716 1717 /* clang-format off */ 1718 static struct kunit_case test_cases[] = { 1719 KUNIT_CASE(test_no_more_access), 1720 KUNIT_CASE(test_scope_to_request_with_exec_none), 1721 KUNIT_CASE(test_scope_to_request_with_exec_some), 1722 KUNIT_CASE(test_scope_to_request_without_access), 1723 KUNIT_CASE(test_is_eacces_with_none), 1724 KUNIT_CASE(test_is_eacces_with_refer), 1725 KUNIT_CASE(test_is_eacces_with_write), 1726 {} 1727 }; 1728 /* clang-format on */ 1729 1730 static struct kunit_suite test_suite = { 1731 .name = "landlock_fs", 1732 .test_cases = test_cases, 1733 }; 1734 1735 kunit_test_suite(test_suite); 1736 1737 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ 1738