1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/namespace.c 4 * 5 * (C) Copyright Al Viro 2000, 2001 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/export.h> 13 #include <linux/capability.h> 14 #include <linux/mnt_namespace.h> 15 #include <linux/user_namespace.h> 16 #include <linux/namei.h> 17 #include <linux/security.h> 18 #include <linux/cred.h> 19 #include <linux/idr.h> 20 #include <linux/init.h> /* init_rootfs */ 21 #include <linux/fs_struct.h> /* get_fs_root et.al. */ 22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ 23 #include <linux/file.h> 24 #include <linux/uaccess.h> 25 #include <linux/proc_ns.h> 26 #include <linux/magic.h> 27 #include <linux/memblock.h> 28 #include <linux/proc_fs.h> 29 #include <linux/task_work.h> 30 #include <linux/sched/task.h> 31 #include <uapi/linux/mount.h> 32 #include <linux/fs_context.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/mnt_idmapping.h> 35 #include <linux/pidfs.h> 36 #include <linux/nstree.h> 37 38 #include "pnode.h" 39 #include "internal.h" 40 41 /* Maximum number of mounts in a mount namespace */ 42 static unsigned int sysctl_mount_max __read_mostly = 100000; 43 44 static unsigned int m_hash_mask __ro_after_init; 45 static unsigned int m_hash_shift __ro_after_init; 46 static unsigned int mp_hash_mask __ro_after_init; 47 static unsigned int mp_hash_shift __ro_after_init; 48 49 static __initdata unsigned long mhash_entries; 50 static int __init set_mhash_entries(char *str) 51 { 52 if (!str) 53 return 0; 54 mhash_entries = simple_strtoul(str, &str, 0); 55 return 1; 56 } 57 __setup("mhash_entries=", set_mhash_entries); 58 59 static __initdata unsigned long mphash_entries; 60 static int __init set_mphash_entries(char *str) 61 { 62 if (!str) 63 return 0; 64 mphash_entries = simple_strtoul(str, &str, 0); 65 return 1; 66 } 67 __setup("mphash_entries=", set_mphash_entries); 68 69 static char * __initdata initramfs_options; 70 static int __init initramfs_options_setup(char *str) 71 { 72 initramfs_options = str; 73 return 1; 74 } 75 76 __setup("initramfs_options=", initramfs_options_setup); 77 78 static u64 event; 79 static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC); 80 static DEFINE_IDA(mnt_group_ida); 81 82 /* Don't allow confusion with old 32bit mount ID */ 83 #define MNT_UNIQUE_ID_OFFSET (1ULL << 31) 84 static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET; 85 86 static struct hlist_head *mount_hashtable __ro_after_init; 87 static struct hlist_head *mountpoint_hashtable __ro_after_init; 88 static struct kmem_cache *mnt_cache __ro_after_init; 89 static DECLARE_RWSEM(namespace_sem); 90 static HLIST_HEAD(unmounted); /* protected by namespace_sem */ 91 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ 92 static struct mnt_namespace *emptied_ns; /* protected by namespace_sem */ 93 94 static inline void namespace_lock(void); 95 static void namespace_unlock(void); 96 DEFINE_LOCK_GUARD_0(namespace_excl, namespace_lock(), namespace_unlock()) 97 DEFINE_LOCK_GUARD_0(namespace_shared, down_read(&namespace_sem), 98 up_read(&namespace_sem)) 99 100 DEFINE_FREE(mntput, struct vfsmount *, if (!IS_ERR(_T)) mntput(_T)) 101 102 #ifdef CONFIG_FSNOTIFY 103 LIST_HEAD(notify_list); /* protected by namespace_sem */ 104 #endif 105 106 enum mount_kattr_flags_t { 107 MOUNT_KATTR_RECURSE = (1 << 0), 108 MOUNT_KATTR_IDMAP_REPLACE = (1 << 1), 109 }; 110 111 struct mount_kattr { 112 unsigned int attr_set; 113 unsigned int attr_clr; 114 unsigned int propagation; 115 unsigned int lookup_flags; 116 enum mount_kattr_flags_t kflags; 117 struct user_namespace *mnt_userns; 118 struct mnt_idmap *mnt_idmap; 119 }; 120 121 /* /sys/fs */ 122 struct kobject *fs_kobj __ro_after_init; 123 EXPORT_SYMBOL_GPL(fs_kobj); 124 125 /* 126 * vfsmount lock may be taken for read to prevent changes to the 127 * vfsmount hash, ie. during mountpoint lookups or walking back 128 * up the tree. 129 * 130 * It should be taken for write in all cases where the vfsmount 131 * tree or hash is modified or when a vfsmount structure is modified. 132 */ 133 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 134 135 static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node) 136 { 137 struct ns_common *ns; 138 139 if (!node) 140 return NULL; 141 ns = rb_entry(node, struct ns_common, ns_tree_node); 142 return container_of(ns, struct mnt_namespace, ns); 143 } 144 145 static void mnt_ns_release(struct mnt_namespace *ns) 146 { 147 /* keep alive for {list,stat}mount() */ 148 if (ns && refcount_dec_and_test(&ns->passive)) { 149 fsnotify_mntns_delete(ns); 150 put_user_ns(ns->user_ns); 151 kfree(ns); 152 } 153 } 154 DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T)) 155 156 static void mnt_ns_release_rcu(struct rcu_head *rcu) 157 { 158 mnt_ns_release(container_of(rcu, struct mnt_namespace, ns.ns_rcu)); 159 } 160 161 static void mnt_ns_tree_remove(struct mnt_namespace *ns) 162 { 163 /* remove from global mount namespace list */ 164 if (ns_tree_active(ns)) 165 ns_tree_remove(ns); 166 167 call_rcu(&ns->ns.ns_rcu, mnt_ns_release_rcu); 168 } 169 170 /* 171 * Lookup a mount namespace by id and take a passive reference count. Taking a 172 * passive reference means the mount namespace can be emptied if e.g., the last 173 * task holding an active reference exits. To access the mounts of the 174 * namespace the @namespace_sem must first be acquired. If the namespace has 175 * already shut down before acquiring @namespace_sem, {list,stat}mount() will 176 * see that the mount rbtree of the namespace is empty. 177 * 178 * Note the lookup is lockless protected by a sequence counter. We only 179 * need to guard against false negatives as false positives aren't 180 * possible. So if we didn't find a mount namespace and the sequence 181 * counter has changed we need to retry. If the sequence counter is 182 * still the same we know the search actually failed. 183 */ 184 static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id) 185 { 186 struct mnt_namespace *mnt_ns; 187 struct ns_common *ns; 188 189 guard(rcu)(); 190 ns = ns_tree_lookup_rcu(mnt_ns_id, CLONE_NEWNS); 191 if (!ns) 192 return NULL; 193 194 /* 195 * The last reference count is put with RCU delay so we can 196 * unconditonally acquire a reference here. 197 */ 198 mnt_ns = container_of(ns, struct mnt_namespace, ns); 199 refcount_inc(&mnt_ns->passive); 200 return mnt_ns; 201 } 202 203 static inline void lock_mount_hash(void) 204 { 205 write_seqlock(&mount_lock); 206 } 207 208 static inline void unlock_mount_hash(void) 209 { 210 write_sequnlock(&mount_lock); 211 } 212 213 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) 214 { 215 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 216 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 217 tmp = tmp + (tmp >> m_hash_shift); 218 return &mount_hashtable[tmp & m_hash_mask]; 219 } 220 221 static inline struct hlist_head *mp_hash(struct dentry *dentry) 222 { 223 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); 224 tmp = tmp + (tmp >> mp_hash_shift); 225 return &mountpoint_hashtable[tmp & mp_hash_mask]; 226 } 227 228 static int mnt_alloc_id(struct mount *mnt) 229 { 230 int res; 231 232 xa_lock(&mnt_id_xa); 233 res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, XA_LIMIT(1, INT_MAX), GFP_KERNEL); 234 if (!res) 235 mnt->mnt_id_unique = ++mnt_id_ctr; 236 xa_unlock(&mnt_id_xa); 237 return res; 238 } 239 240 static void mnt_free_id(struct mount *mnt) 241 { 242 xa_erase(&mnt_id_xa, mnt->mnt_id); 243 } 244 245 /* 246 * Allocate a new peer group ID 247 */ 248 static int mnt_alloc_group_id(struct mount *mnt) 249 { 250 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL); 251 252 if (res < 0) 253 return res; 254 mnt->mnt_group_id = res; 255 return 0; 256 } 257 258 /* 259 * Release a peer group ID 260 */ 261 void mnt_release_group_id(struct mount *mnt) 262 { 263 ida_free(&mnt_group_ida, mnt->mnt_group_id); 264 mnt->mnt_group_id = 0; 265 } 266 267 /* 268 * vfsmount lock must be held for read 269 */ 270 static inline void mnt_add_count(struct mount *mnt, int n) 271 { 272 #ifdef CONFIG_SMP 273 this_cpu_add(mnt->mnt_pcp->mnt_count, n); 274 #else 275 preempt_disable(); 276 mnt->mnt_count += n; 277 preempt_enable(); 278 #endif 279 } 280 281 /* 282 * vfsmount lock must be held for write 283 */ 284 int mnt_get_count(struct mount *mnt) 285 { 286 #ifdef CONFIG_SMP 287 int count = 0; 288 int cpu; 289 290 for_each_possible_cpu(cpu) { 291 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; 292 } 293 294 return count; 295 #else 296 return mnt->mnt_count; 297 #endif 298 } 299 300 static struct mount *alloc_vfsmnt(const char *name) 301 { 302 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 303 if (mnt) { 304 int err; 305 306 err = mnt_alloc_id(mnt); 307 if (err) 308 goto out_free_cache; 309 310 if (name) 311 mnt->mnt_devname = kstrdup_const(name, 312 GFP_KERNEL_ACCOUNT); 313 else 314 mnt->mnt_devname = "none"; 315 if (!mnt->mnt_devname) 316 goto out_free_id; 317 318 #ifdef CONFIG_SMP 319 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); 320 if (!mnt->mnt_pcp) 321 goto out_free_devname; 322 323 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); 324 #else 325 mnt->mnt_count = 1; 326 mnt->mnt_writers = 0; 327 #endif 328 329 INIT_HLIST_NODE(&mnt->mnt_hash); 330 INIT_LIST_HEAD(&mnt->mnt_child); 331 INIT_LIST_HEAD(&mnt->mnt_mounts); 332 INIT_LIST_HEAD(&mnt->mnt_list); 333 INIT_LIST_HEAD(&mnt->mnt_expire); 334 INIT_LIST_HEAD(&mnt->mnt_share); 335 INIT_HLIST_HEAD(&mnt->mnt_slave_list); 336 INIT_HLIST_NODE(&mnt->mnt_slave); 337 INIT_HLIST_NODE(&mnt->mnt_mp_list); 338 INIT_HLIST_HEAD(&mnt->mnt_stuck_children); 339 RB_CLEAR_NODE(&mnt->mnt_node); 340 mnt->mnt.mnt_idmap = &nop_mnt_idmap; 341 } 342 return mnt; 343 344 #ifdef CONFIG_SMP 345 out_free_devname: 346 kfree_const(mnt->mnt_devname); 347 #endif 348 out_free_id: 349 mnt_free_id(mnt); 350 out_free_cache: 351 kmem_cache_free(mnt_cache, mnt); 352 return NULL; 353 } 354 355 /* 356 * Most r/o checks on a fs are for operations that take 357 * discrete amounts of time, like a write() or unlink(). 358 * We must keep track of when those operations start 359 * (for permission checks) and when they end, so that 360 * we can determine when writes are able to occur to 361 * a filesystem. 362 */ 363 /* 364 * __mnt_is_readonly: check whether a mount is read-only 365 * @mnt: the mount to check for its write status 366 * 367 * This shouldn't be used directly ouside of the VFS. 368 * It does not guarantee that the filesystem will stay 369 * r/w, just that it is right *now*. This can not and 370 * should not be used in place of IS_RDONLY(inode). 371 * mnt_want/drop_write() will _keep_ the filesystem 372 * r/w. 373 */ 374 bool __mnt_is_readonly(const struct vfsmount *mnt) 375 { 376 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); 377 } 378 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 379 380 static inline void mnt_inc_writers(struct mount *mnt) 381 { 382 #ifdef CONFIG_SMP 383 this_cpu_inc(mnt->mnt_pcp->mnt_writers); 384 #else 385 mnt->mnt_writers++; 386 #endif 387 } 388 389 static inline void mnt_dec_writers(struct mount *mnt) 390 { 391 #ifdef CONFIG_SMP 392 this_cpu_dec(mnt->mnt_pcp->mnt_writers); 393 #else 394 mnt->mnt_writers--; 395 #endif 396 } 397 398 static unsigned int mnt_get_writers(struct mount *mnt) 399 { 400 #ifdef CONFIG_SMP 401 unsigned int count = 0; 402 int cpu; 403 404 for_each_possible_cpu(cpu) { 405 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; 406 } 407 408 return count; 409 #else 410 return mnt->mnt_writers; 411 #endif 412 } 413 414 static int mnt_is_readonly(const struct vfsmount *mnt) 415 { 416 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount)) 417 return 1; 418 /* 419 * The barrier pairs with the barrier in sb_start_ro_state_change() 420 * making sure if we don't see s_readonly_remount set yet, we also will 421 * not see any superblock / mount flag changes done by remount. 422 * It also pairs with the barrier in sb_end_ro_state_change() 423 * assuring that if we see s_readonly_remount already cleared, we will 424 * see the values of superblock / mount flags updated by remount. 425 */ 426 smp_rmb(); 427 return __mnt_is_readonly(mnt); 428 } 429 430 /* 431 * Most r/o & frozen checks on a fs are for operations that take discrete 432 * amounts of time, like a write() or unlink(). We must keep track of when 433 * those operations start (for permission checks) and when they end, so that we 434 * can determine when writes are able to occur to a filesystem. 435 */ 436 /** 437 * mnt_get_write_access - get write access to a mount without freeze protection 438 * @m: the mount on which to take a write 439 * 440 * This tells the low-level filesystem that a write is about to be performed to 441 * it, and makes sure that writes are allowed (mnt it read-write) before 442 * returning success. This operation does not protect against filesystem being 443 * frozen. When the write operation is finished, mnt_put_write_access() must be 444 * called. This is effectively a refcount. 445 */ 446 int mnt_get_write_access(struct vfsmount *m) 447 { 448 struct mount *mnt = real_mount(m); 449 int ret = 0; 450 451 preempt_disable(); 452 mnt_inc_writers(mnt); 453 /* 454 * The store to mnt_inc_writers must be visible before we pass 455 * WRITE_HOLD loop below, so that the slowpath can see our 456 * incremented count after it has set WRITE_HOLD. 457 */ 458 smp_mb(); 459 might_lock(&mount_lock.lock); 460 while (__test_write_hold(READ_ONCE(mnt->mnt_pprev_for_sb))) { 461 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 462 cpu_relax(); 463 } else { 464 /* 465 * This prevents priority inversion, if the task 466 * setting WRITE_HOLD got preempted on a remote 467 * CPU, and it prevents life lock if the task setting 468 * WRITE_HOLD has a lower priority and is bound to 469 * the same CPU as the task that is spinning here. 470 */ 471 preempt_enable(); 472 read_seqlock_excl(&mount_lock); 473 read_sequnlock_excl(&mount_lock); 474 preempt_disable(); 475 } 476 } 477 /* 478 * The barrier pairs with the barrier sb_start_ro_state_change() making 479 * sure that if we see WRITE_HOLD cleared, we will also see 480 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in 481 * mnt_is_readonly() and bail in case we are racing with remount 482 * read-only. 483 */ 484 smp_rmb(); 485 if (mnt_is_readonly(m)) { 486 mnt_dec_writers(mnt); 487 ret = -EROFS; 488 } 489 preempt_enable(); 490 491 return ret; 492 } 493 EXPORT_SYMBOL_GPL(mnt_get_write_access); 494 495 /** 496 * mnt_want_write - get write access to a mount 497 * @m: the mount on which to take a write 498 * 499 * This tells the low-level filesystem that a write is about to be performed to 500 * it, and makes sure that writes are allowed (mount is read-write, filesystem 501 * is not frozen) before returning success. When the write operation is 502 * finished, mnt_drop_write() must be called. This is effectively a refcount. 503 */ 504 int mnt_want_write(struct vfsmount *m) 505 { 506 int ret; 507 508 sb_start_write(m->mnt_sb); 509 ret = mnt_get_write_access(m); 510 if (ret) 511 sb_end_write(m->mnt_sb); 512 return ret; 513 } 514 EXPORT_SYMBOL_GPL(mnt_want_write); 515 516 /** 517 * mnt_get_write_access_file - get write access to a file's mount 518 * @file: the file who's mount on which to take a write 519 * 520 * This is like mnt_get_write_access, but if @file is already open for write it 521 * skips incrementing mnt_writers (since the open file already has a reference) 522 * and instead only does the check for emergency r/o remounts. This must be 523 * paired with mnt_put_write_access_file. 524 */ 525 int mnt_get_write_access_file(struct file *file) 526 { 527 if (file->f_mode & FMODE_WRITER) { 528 /* 529 * Superblock may have become readonly while there are still 530 * writable fd's, e.g. due to a fs error with errors=remount-ro 531 */ 532 if (__mnt_is_readonly(file->f_path.mnt)) 533 return -EROFS; 534 return 0; 535 } 536 return mnt_get_write_access(file->f_path.mnt); 537 } 538 539 /** 540 * mnt_want_write_file - get write access to a file's mount 541 * @file: the file who's mount on which to take a write 542 * 543 * This is like mnt_want_write, but if the file is already open for writing it 544 * skips incrementing mnt_writers (since the open file already has a reference) 545 * and instead only does the freeze protection and the check for emergency r/o 546 * remounts. This must be paired with mnt_drop_write_file. 547 */ 548 int mnt_want_write_file(struct file *file) 549 { 550 int ret; 551 552 sb_start_write(file_inode(file)->i_sb); 553 ret = mnt_get_write_access_file(file); 554 if (ret) 555 sb_end_write(file_inode(file)->i_sb); 556 return ret; 557 } 558 EXPORT_SYMBOL_GPL(mnt_want_write_file); 559 560 /** 561 * mnt_put_write_access - give up write access to a mount 562 * @mnt: the mount on which to give up write access 563 * 564 * Tells the low-level filesystem that we are done 565 * performing writes to it. Must be matched with 566 * mnt_get_write_access() call above. 567 */ 568 void mnt_put_write_access(struct vfsmount *mnt) 569 { 570 preempt_disable(); 571 mnt_dec_writers(real_mount(mnt)); 572 preempt_enable(); 573 } 574 EXPORT_SYMBOL_GPL(mnt_put_write_access); 575 576 /** 577 * mnt_drop_write - give up write access to a mount 578 * @mnt: the mount on which to give up write access 579 * 580 * Tells the low-level filesystem that we are done performing writes to it and 581 * also allows filesystem to be frozen again. Must be matched with 582 * mnt_want_write() call above. 583 */ 584 void mnt_drop_write(struct vfsmount *mnt) 585 { 586 mnt_put_write_access(mnt); 587 sb_end_write(mnt->mnt_sb); 588 } 589 EXPORT_SYMBOL_GPL(mnt_drop_write); 590 591 void mnt_put_write_access_file(struct file *file) 592 { 593 if (!(file->f_mode & FMODE_WRITER)) 594 mnt_put_write_access(file->f_path.mnt); 595 } 596 597 void mnt_drop_write_file(struct file *file) 598 { 599 mnt_put_write_access_file(file); 600 sb_end_write(file_inode(file)->i_sb); 601 } 602 EXPORT_SYMBOL(mnt_drop_write_file); 603 604 /** 605 * mnt_hold_writers - prevent write access to the given mount 606 * @mnt: mnt to prevent write access to 607 * 608 * Prevents write access to @mnt if there are no active writers for @mnt. 609 * This function needs to be called and return successfully before changing 610 * properties of @mnt that need to remain stable for callers with write access 611 * to @mnt. 612 * 613 * After this functions has been called successfully callers must pair it with 614 * a call to mnt_unhold_writers() in order to stop preventing write access to 615 * @mnt. 616 * 617 * Context: This function expects to be in mount_locked_reader scope serializing 618 * setting WRITE_HOLD. 619 * Return: On success 0 is returned. 620 * On error, -EBUSY is returned. 621 */ 622 static inline int mnt_hold_writers(struct mount *mnt) 623 { 624 set_write_hold(mnt); 625 /* 626 * After storing WRITE_HOLD, we'll read the counters. This store 627 * should be visible before we do. 628 */ 629 smp_mb(); 630 631 /* 632 * With writers on hold, if this value is zero, then there are 633 * definitely no active writers (although held writers may subsequently 634 * increment the count, they'll have to wait, and decrement it after 635 * seeing MNT_READONLY). 636 * 637 * It is OK to have counter incremented on one CPU and decremented on 638 * another: the sum will add up correctly. The danger would be when we 639 * sum up each counter, if we read a counter before it is incremented, 640 * but then read another CPU's count which it has been subsequently 641 * decremented from -- we would see more decrements than we should. 642 * WRITE_HOLD protects against this scenario, because 643 * mnt_want_write first increments count, then smp_mb, then spins on 644 * WRITE_HOLD, so it can't be decremented by another CPU while 645 * we're counting up here. 646 */ 647 if (mnt_get_writers(mnt) > 0) 648 return -EBUSY; 649 650 return 0; 651 } 652 653 /** 654 * mnt_unhold_writers - stop preventing write access to the given mount 655 * @mnt: mnt to stop preventing write access to 656 * 657 * Stop preventing write access to @mnt allowing callers to gain write access 658 * to @mnt again. 659 * 660 * This function can only be called after a call to mnt_hold_writers(). 661 * 662 * Context: This function expects to be in the same mount_locked_reader scope 663 * as the matching mnt_hold_writers(). 664 */ 665 static inline void mnt_unhold_writers(struct mount *mnt) 666 { 667 if (!test_write_hold(mnt)) 668 return; 669 /* 670 * MNT_READONLY must become visible before ~WRITE_HOLD, so writers 671 * that become unheld will see MNT_READONLY. 672 */ 673 smp_wmb(); 674 clear_write_hold(mnt); 675 } 676 677 static inline void mnt_del_instance(struct mount *m) 678 { 679 struct mount **p = m->mnt_pprev_for_sb; 680 struct mount *next = m->mnt_next_for_sb; 681 682 if (next) 683 next->mnt_pprev_for_sb = p; 684 *p = next; 685 } 686 687 static inline void mnt_add_instance(struct mount *m, struct super_block *s) 688 { 689 struct mount *first = s->s_mounts; 690 691 if (first) 692 first->mnt_pprev_for_sb = &m->mnt_next_for_sb; 693 m->mnt_next_for_sb = first; 694 m->mnt_pprev_for_sb = &s->s_mounts; 695 s->s_mounts = m; 696 } 697 698 static int mnt_make_readonly(struct mount *mnt) 699 { 700 int ret; 701 702 ret = mnt_hold_writers(mnt); 703 if (!ret) 704 mnt->mnt.mnt_flags |= MNT_READONLY; 705 mnt_unhold_writers(mnt); 706 return ret; 707 } 708 709 int sb_prepare_remount_readonly(struct super_block *sb) 710 { 711 int err = 0; 712 713 /* Racy optimization. Recheck the counter under WRITE_HOLD */ 714 if (atomic_long_read(&sb->s_remove_count)) 715 return -EBUSY; 716 717 guard(mount_locked_reader)(); 718 719 for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) { 720 if (!(m->mnt.mnt_flags & MNT_READONLY)) { 721 err = mnt_hold_writers(m); 722 if (err) 723 break; 724 } 725 } 726 if (!err && atomic_long_read(&sb->s_remove_count)) 727 err = -EBUSY; 728 729 if (!err) 730 sb_start_ro_state_change(sb); 731 for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) { 732 if (test_write_hold(m)) 733 clear_write_hold(m); 734 } 735 736 return err; 737 } 738 739 static void free_vfsmnt(struct mount *mnt) 740 { 741 mnt_idmap_put(mnt_idmap(&mnt->mnt)); 742 kfree_const(mnt->mnt_devname); 743 #ifdef CONFIG_SMP 744 free_percpu(mnt->mnt_pcp); 745 #endif 746 kmem_cache_free(mnt_cache, mnt); 747 } 748 749 static void delayed_free_vfsmnt(struct rcu_head *head) 750 { 751 free_vfsmnt(container_of(head, struct mount, mnt_rcu)); 752 } 753 754 /* call under rcu_read_lock */ 755 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) 756 { 757 struct mount *mnt; 758 if (read_seqretry(&mount_lock, seq)) 759 return 1; 760 if (bastard == NULL) 761 return 0; 762 mnt = real_mount(bastard); 763 mnt_add_count(mnt, 1); 764 smp_mb(); // see mntput_no_expire() and do_umount() 765 if (likely(!read_seqretry(&mount_lock, seq))) 766 return 0; 767 lock_mount_hash(); 768 if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) { 769 mnt_add_count(mnt, -1); 770 unlock_mount_hash(); 771 return 1; 772 } 773 unlock_mount_hash(); 774 /* caller will mntput() */ 775 return -1; 776 } 777 778 /* call under rcu_read_lock */ 779 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 780 { 781 int res = __legitimize_mnt(bastard, seq); 782 if (likely(!res)) 783 return true; 784 if (unlikely(res < 0)) { 785 rcu_read_unlock(); 786 mntput(bastard); 787 rcu_read_lock(); 788 } 789 return false; 790 } 791 792 /** 793 * __lookup_mnt - mount hash lookup 794 * @mnt: parent mount 795 * @dentry: dentry of mountpoint 796 * 797 * If @mnt has a child mount @c mounted on @dentry find and return it. 798 * Caller must either hold the spinlock component of @mount_lock or 799 * hold rcu_read_lock(), sample the seqcount component before the call 800 * and recheck it afterwards. 801 * 802 * Return: The child of @mnt mounted on @dentry or %NULL. 803 */ 804 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 805 { 806 struct hlist_head *head = m_hash(mnt, dentry); 807 struct mount *p; 808 809 hlist_for_each_entry_rcu(p, head, mnt_hash) 810 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 811 return p; 812 return NULL; 813 } 814 815 /** 816 * lookup_mnt - Return the child mount mounted at given location 817 * @path: location in the namespace 818 * 819 * Acquires and returns a new reference to mount at given location 820 * or %NULL if nothing is mounted there. 821 */ 822 struct vfsmount *lookup_mnt(const struct path *path) 823 { 824 struct mount *child_mnt; 825 struct vfsmount *m; 826 unsigned seq; 827 828 rcu_read_lock(); 829 do { 830 seq = read_seqbegin(&mount_lock); 831 child_mnt = __lookup_mnt(path->mnt, path->dentry); 832 m = child_mnt ? &child_mnt->mnt : NULL; 833 } while (!legitimize_mnt(m, seq)); 834 rcu_read_unlock(); 835 return m; 836 } 837 838 /* 839 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the 840 * current mount namespace. 841 * 842 * The common case is dentries are not mountpoints at all and that 843 * test is handled inline. For the slow case when we are actually 844 * dealing with a mountpoint of some kind, walk through all of the 845 * mounts in the current mount namespace and test to see if the dentry 846 * is a mountpoint. 847 * 848 * The mount_hashtable is not usable in the context because we 849 * need to identify all mounts that may be in the current mount 850 * namespace not just a mount that happens to have some specified 851 * parent mount. 852 */ 853 bool __is_local_mountpoint(const struct dentry *dentry) 854 { 855 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 856 struct mount *mnt, *n; 857 858 guard(namespace_shared)(); 859 860 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) 861 if (mnt->mnt_mountpoint == dentry) 862 return true; 863 864 return false; 865 } 866 867 struct pinned_mountpoint { 868 struct hlist_node node; 869 struct mountpoint *mp; 870 struct mount *parent; 871 }; 872 873 static bool lookup_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m) 874 { 875 struct hlist_head *chain = mp_hash(dentry); 876 struct mountpoint *mp; 877 878 hlist_for_each_entry(mp, chain, m_hash) { 879 if (mp->m_dentry == dentry) { 880 hlist_add_head(&m->node, &mp->m_list); 881 m->mp = mp; 882 return true; 883 } 884 } 885 return false; 886 } 887 888 static int get_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m) 889 { 890 struct mountpoint *mp __free(kfree) = NULL; 891 bool found; 892 int ret; 893 894 if (d_mountpoint(dentry)) { 895 /* might be worth a WARN_ON() */ 896 if (d_unlinked(dentry)) 897 return -ENOENT; 898 mountpoint: 899 read_seqlock_excl(&mount_lock); 900 found = lookup_mountpoint(dentry, m); 901 read_sequnlock_excl(&mount_lock); 902 if (found) 903 return 0; 904 } 905 906 if (!mp) 907 mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); 908 if (!mp) 909 return -ENOMEM; 910 911 /* Exactly one processes may set d_mounted */ 912 ret = d_set_mounted(dentry); 913 914 /* Someone else set d_mounted? */ 915 if (ret == -EBUSY) 916 goto mountpoint; 917 918 /* The dentry is not available as a mountpoint? */ 919 if (ret) 920 return ret; 921 922 /* Add the new mountpoint to the hash table */ 923 read_seqlock_excl(&mount_lock); 924 mp->m_dentry = dget(dentry); 925 hlist_add_head(&mp->m_hash, mp_hash(dentry)); 926 INIT_HLIST_HEAD(&mp->m_list); 927 hlist_add_head(&m->node, &mp->m_list); 928 m->mp = no_free_ptr(mp); 929 read_sequnlock_excl(&mount_lock); 930 return 0; 931 } 932 933 /* 934 * vfsmount lock must be held. Additionally, the caller is responsible 935 * for serializing calls for given disposal list. 936 */ 937 static void maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list) 938 { 939 if (hlist_empty(&mp->m_list)) { 940 struct dentry *dentry = mp->m_dentry; 941 spin_lock(&dentry->d_lock); 942 dentry->d_flags &= ~DCACHE_MOUNTED; 943 spin_unlock(&dentry->d_lock); 944 dput_to_list(dentry, list); 945 hlist_del(&mp->m_hash); 946 kfree(mp); 947 } 948 } 949 950 /* 951 * locks: mount_lock [read_seqlock_excl], namespace_sem [excl] 952 */ 953 static void unpin_mountpoint(struct pinned_mountpoint *m) 954 { 955 if (m->mp) { 956 hlist_del(&m->node); 957 maybe_free_mountpoint(m->mp, &ex_mountpoints); 958 } 959 } 960 961 static inline int check_mnt(const struct mount *mnt) 962 { 963 return mnt->mnt_ns == current->nsproxy->mnt_ns; 964 } 965 966 static inline bool check_anonymous_mnt(struct mount *mnt) 967 { 968 u64 seq; 969 970 if (!is_anon_ns(mnt->mnt_ns)) 971 return false; 972 973 seq = mnt->mnt_ns->seq_origin; 974 return !seq || (seq == current->nsproxy->mnt_ns->ns.ns_id); 975 } 976 977 /* 978 * vfsmount lock must be held for write 979 */ 980 static void touch_mnt_namespace(struct mnt_namespace *ns) 981 { 982 if (ns) { 983 ns->event = ++event; 984 wake_up_interruptible(&ns->poll); 985 } 986 } 987 988 /* 989 * vfsmount lock must be held for write 990 */ 991 static void __touch_mnt_namespace(struct mnt_namespace *ns) 992 { 993 if (ns && ns->event != event) { 994 ns->event = event; 995 wake_up_interruptible(&ns->poll); 996 } 997 } 998 999 /* 1000 * locks: mount_lock[write_seqlock] 1001 */ 1002 static void __umount_mnt(struct mount *mnt, struct list_head *shrink_list) 1003 { 1004 struct mountpoint *mp; 1005 struct mount *parent = mnt->mnt_parent; 1006 if (unlikely(parent->overmount == mnt)) 1007 parent->overmount = NULL; 1008 mnt->mnt_parent = mnt; 1009 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1010 list_del_init(&mnt->mnt_child); 1011 hlist_del_init_rcu(&mnt->mnt_hash); 1012 hlist_del_init(&mnt->mnt_mp_list); 1013 mp = mnt->mnt_mp; 1014 mnt->mnt_mp = NULL; 1015 maybe_free_mountpoint(mp, shrink_list); 1016 } 1017 1018 /* 1019 * locks: mount_lock[write_seqlock], namespace_sem[excl] (for ex_mountpoints) 1020 */ 1021 static void umount_mnt(struct mount *mnt) 1022 { 1023 __umount_mnt(mnt, &ex_mountpoints); 1024 } 1025 1026 /* 1027 * vfsmount lock must be held for write 1028 */ 1029 void mnt_set_mountpoint(struct mount *mnt, 1030 struct mountpoint *mp, 1031 struct mount *child_mnt) 1032 { 1033 child_mnt->mnt_mountpoint = mp->m_dentry; 1034 child_mnt->mnt_parent = mnt; 1035 child_mnt->mnt_mp = mp; 1036 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); 1037 } 1038 1039 static void make_visible(struct mount *mnt) 1040 { 1041 struct mount *parent = mnt->mnt_parent; 1042 if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root)) 1043 parent->overmount = mnt; 1044 hlist_add_head_rcu(&mnt->mnt_hash, 1045 m_hash(&parent->mnt, mnt->mnt_mountpoint)); 1046 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 1047 } 1048 1049 /** 1050 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's 1051 * list of child mounts 1052 * @parent: the parent 1053 * @mnt: the new mount 1054 * @mp: the new mountpoint 1055 * 1056 * Mount @mnt at @mp on @parent. Then attach @mnt 1057 * to @parent's child mount list and to @mount_hashtable. 1058 * 1059 * Note, when make_visible() is called @mnt->mnt_parent already points 1060 * to the correct parent. 1061 * 1062 * Context: This function expects namespace_lock() and lock_mount_hash() 1063 * to have been acquired in that order. 1064 */ 1065 static void attach_mnt(struct mount *mnt, struct mount *parent, 1066 struct mountpoint *mp) 1067 { 1068 mnt_set_mountpoint(parent, mp, mnt); 1069 make_visible(mnt); 1070 } 1071 1072 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) 1073 { 1074 struct mountpoint *old_mp = mnt->mnt_mp; 1075 1076 list_del_init(&mnt->mnt_child); 1077 hlist_del_init(&mnt->mnt_mp_list); 1078 hlist_del_init_rcu(&mnt->mnt_hash); 1079 1080 attach_mnt(mnt, parent, mp); 1081 1082 maybe_free_mountpoint(old_mp, &ex_mountpoints); 1083 } 1084 1085 static inline struct mount *node_to_mount(struct rb_node *node) 1086 { 1087 return node ? rb_entry(node, struct mount, mnt_node) : NULL; 1088 } 1089 1090 static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt) 1091 { 1092 struct rb_node **link = &ns->mounts.rb_node; 1093 struct rb_node *parent = NULL; 1094 bool mnt_first_node = true, mnt_last_node = true; 1095 1096 WARN_ON(mnt_ns_attached(mnt)); 1097 mnt->mnt_ns = ns; 1098 while (*link) { 1099 parent = *link; 1100 if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) { 1101 link = &parent->rb_left; 1102 mnt_last_node = false; 1103 } else { 1104 link = &parent->rb_right; 1105 mnt_first_node = false; 1106 } 1107 } 1108 1109 if (mnt_last_node) 1110 ns->mnt_last_node = &mnt->mnt_node; 1111 if (mnt_first_node) 1112 ns->mnt_first_node = &mnt->mnt_node; 1113 rb_link_node(&mnt->mnt_node, parent, link); 1114 rb_insert_color(&mnt->mnt_node, &ns->mounts); 1115 1116 mnt_notify_add(mnt); 1117 } 1118 1119 static struct mount *next_mnt(struct mount *p, struct mount *root) 1120 { 1121 struct list_head *next = p->mnt_mounts.next; 1122 if (next == &p->mnt_mounts) { 1123 while (1) { 1124 if (p == root) 1125 return NULL; 1126 next = p->mnt_child.next; 1127 if (next != &p->mnt_parent->mnt_mounts) 1128 break; 1129 p = p->mnt_parent; 1130 } 1131 } 1132 return list_entry(next, struct mount, mnt_child); 1133 } 1134 1135 static struct mount *skip_mnt_tree(struct mount *p) 1136 { 1137 struct list_head *prev = p->mnt_mounts.prev; 1138 while (prev != &p->mnt_mounts) { 1139 p = list_entry(prev, struct mount, mnt_child); 1140 prev = p->mnt_mounts.prev; 1141 } 1142 return p; 1143 } 1144 1145 /* 1146 * vfsmount lock must be held for write 1147 */ 1148 static void commit_tree(struct mount *mnt) 1149 { 1150 struct mnt_namespace *n = mnt->mnt_parent->mnt_ns; 1151 1152 if (!mnt_ns_attached(mnt)) { 1153 for (struct mount *m = mnt; m; m = next_mnt(m, mnt)) 1154 mnt_add_to_ns(n, m); 1155 n->nr_mounts += n->pending_mounts; 1156 n->pending_mounts = 0; 1157 } 1158 1159 make_visible(mnt); 1160 touch_mnt_namespace(n); 1161 } 1162 1163 static void setup_mnt(struct mount *m, struct dentry *root) 1164 { 1165 struct super_block *s = root->d_sb; 1166 1167 atomic_inc(&s->s_active); 1168 m->mnt.mnt_sb = s; 1169 m->mnt.mnt_root = dget(root); 1170 m->mnt_mountpoint = m->mnt.mnt_root; 1171 m->mnt_parent = m; 1172 1173 guard(mount_locked_reader)(); 1174 mnt_add_instance(m, s); 1175 } 1176 1177 /** 1178 * vfs_create_mount - Create a mount for a configured superblock 1179 * @fc: The configuration context with the superblock attached 1180 * 1181 * Create a mount to an already configured superblock. If necessary, the 1182 * caller should invoke vfs_get_tree() before calling this. 1183 * 1184 * Note that this does not attach the mount to anything. 1185 */ 1186 struct vfsmount *vfs_create_mount(struct fs_context *fc) 1187 { 1188 struct mount *mnt; 1189 1190 if (!fc->root) 1191 return ERR_PTR(-EINVAL); 1192 1193 mnt = alloc_vfsmnt(fc->source); 1194 if (!mnt) 1195 return ERR_PTR(-ENOMEM); 1196 1197 if (fc->sb_flags & SB_KERNMOUNT) 1198 mnt->mnt.mnt_flags = MNT_INTERNAL; 1199 1200 setup_mnt(mnt, fc->root); 1201 1202 return &mnt->mnt; 1203 } 1204 EXPORT_SYMBOL(vfs_create_mount); 1205 1206 struct vfsmount *fc_mount(struct fs_context *fc) 1207 { 1208 int err = vfs_get_tree(fc); 1209 if (!err) { 1210 up_write(&fc->root->d_sb->s_umount); 1211 return vfs_create_mount(fc); 1212 } 1213 return ERR_PTR(err); 1214 } 1215 EXPORT_SYMBOL(fc_mount); 1216 1217 struct vfsmount *fc_mount_longterm(struct fs_context *fc) 1218 { 1219 struct vfsmount *mnt = fc_mount(fc); 1220 if (!IS_ERR(mnt)) 1221 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 1222 return mnt; 1223 } 1224 EXPORT_SYMBOL(fc_mount_longterm); 1225 1226 struct vfsmount *vfs_kern_mount(struct file_system_type *type, 1227 int flags, const char *name, 1228 void *data) 1229 { 1230 struct fs_context *fc; 1231 struct vfsmount *mnt; 1232 int ret = 0; 1233 1234 if (!type) 1235 return ERR_PTR(-EINVAL); 1236 1237 fc = fs_context_for_mount(type, flags); 1238 if (IS_ERR(fc)) 1239 return ERR_CAST(fc); 1240 1241 if (name) 1242 ret = vfs_parse_fs_string(fc, "source", name); 1243 if (!ret) 1244 ret = parse_monolithic_mount_data(fc, data); 1245 if (!ret) 1246 mnt = fc_mount(fc); 1247 else 1248 mnt = ERR_PTR(ret); 1249 1250 put_fs_context(fc); 1251 return mnt; 1252 } 1253 EXPORT_SYMBOL_GPL(vfs_kern_mount); 1254 1255 static struct mount *clone_mnt(struct mount *old, struct dentry *root, 1256 int flag) 1257 { 1258 struct mount *mnt; 1259 int err; 1260 1261 mnt = alloc_vfsmnt(old->mnt_devname); 1262 if (!mnt) 1263 return ERR_PTR(-ENOMEM); 1264 1265 mnt->mnt.mnt_flags = READ_ONCE(old->mnt.mnt_flags) & 1266 ~MNT_INTERNAL_FLAGS; 1267 1268 if (flag & (CL_SLAVE | CL_PRIVATE)) 1269 mnt->mnt_group_id = 0; /* not a peer of original */ 1270 else 1271 mnt->mnt_group_id = old->mnt_group_id; 1272 1273 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 1274 err = mnt_alloc_group_id(mnt); 1275 if (err) 1276 goto out_free; 1277 } 1278 1279 if (mnt->mnt_group_id) 1280 set_mnt_shared(mnt); 1281 1282 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt)); 1283 1284 setup_mnt(mnt, root); 1285 1286 if (flag & CL_PRIVATE) // we are done with it 1287 return mnt; 1288 1289 if (peers(mnt, old)) 1290 list_add(&mnt->mnt_share, &old->mnt_share); 1291 1292 if ((flag & CL_SLAVE) && old->mnt_group_id) { 1293 hlist_add_head(&mnt->mnt_slave, &old->mnt_slave_list); 1294 mnt->mnt_master = old; 1295 } else if (IS_MNT_SLAVE(old)) { 1296 hlist_add_behind(&mnt->mnt_slave, &old->mnt_slave); 1297 mnt->mnt_master = old->mnt_master; 1298 } 1299 return mnt; 1300 1301 out_free: 1302 mnt_free_id(mnt); 1303 free_vfsmnt(mnt); 1304 return ERR_PTR(err); 1305 } 1306 1307 static void cleanup_mnt(struct mount *mnt) 1308 { 1309 struct hlist_node *p; 1310 struct mount *m; 1311 /* 1312 * The warning here probably indicates that somebody messed 1313 * up a mnt_want/drop_write() pair. If this happens, the 1314 * filesystem was probably unable to make r/w->r/o transitions. 1315 * The locking used to deal with mnt_count decrement provides barriers, 1316 * so mnt_get_writers() below is safe. 1317 */ 1318 WARN_ON(mnt_get_writers(mnt)); 1319 if (unlikely(mnt->mnt_pins.first)) 1320 mnt_pin_kill(mnt); 1321 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { 1322 hlist_del(&m->mnt_umount); 1323 mntput(&m->mnt); 1324 } 1325 fsnotify_vfsmount_delete(&mnt->mnt); 1326 dput(mnt->mnt.mnt_root); 1327 deactivate_super(mnt->mnt.mnt_sb); 1328 mnt_free_id(mnt); 1329 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); 1330 } 1331 1332 static void __cleanup_mnt(struct rcu_head *head) 1333 { 1334 cleanup_mnt(container_of(head, struct mount, mnt_rcu)); 1335 } 1336 1337 static LLIST_HEAD(delayed_mntput_list); 1338 static void delayed_mntput(struct work_struct *unused) 1339 { 1340 struct llist_node *node = llist_del_all(&delayed_mntput_list); 1341 struct mount *m, *t; 1342 1343 llist_for_each_entry_safe(m, t, node, mnt_llist) 1344 cleanup_mnt(m); 1345 } 1346 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); 1347 1348 static void mntput_no_expire(struct mount *mnt) 1349 { 1350 LIST_HEAD(list); 1351 int count; 1352 1353 rcu_read_lock(); 1354 if (likely(READ_ONCE(mnt->mnt_ns))) { 1355 /* 1356 * Since we don't do lock_mount_hash() here, 1357 * ->mnt_ns can change under us. However, if it's 1358 * non-NULL, then there's a reference that won't 1359 * be dropped until after an RCU delay done after 1360 * turning ->mnt_ns NULL. So if we observe it 1361 * non-NULL under rcu_read_lock(), the reference 1362 * we are dropping is not the final one. 1363 */ 1364 mnt_add_count(mnt, -1); 1365 rcu_read_unlock(); 1366 return; 1367 } 1368 lock_mount_hash(); 1369 /* 1370 * make sure that if __legitimize_mnt() has not seen us grab 1371 * mount_lock, we'll see their refcount increment here. 1372 */ 1373 smp_mb(); 1374 mnt_add_count(mnt, -1); 1375 count = mnt_get_count(mnt); 1376 if (count != 0) { 1377 WARN_ON(count < 0); 1378 rcu_read_unlock(); 1379 unlock_mount_hash(); 1380 return; 1381 } 1382 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { 1383 rcu_read_unlock(); 1384 unlock_mount_hash(); 1385 return; 1386 } 1387 mnt->mnt.mnt_flags |= MNT_DOOMED; 1388 rcu_read_unlock(); 1389 1390 mnt_del_instance(mnt); 1391 if (unlikely(!list_empty(&mnt->mnt_expire))) 1392 list_del(&mnt->mnt_expire); 1393 1394 if (unlikely(!list_empty(&mnt->mnt_mounts))) { 1395 struct mount *p, *tmp; 1396 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1397 __umount_mnt(p, &list); 1398 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); 1399 } 1400 } 1401 unlock_mount_hash(); 1402 shrink_dentry_list(&list); 1403 1404 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { 1405 struct task_struct *task = current; 1406 if (likely(!(task->flags & PF_KTHREAD))) { 1407 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); 1408 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) 1409 return; 1410 } 1411 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) 1412 schedule_delayed_work(&delayed_mntput_work, 1); 1413 return; 1414 } 1415 cleanup_mnt(mnt); 1416 } 1417 1418 void mntput(struct vfsmount *mnt) 1419 { 1420 if (mnt) { 1421 struct mount *m = real_mount(mnt); 1422 /* avoid cacheline pingpong */ 1423 if (unlikely(m->mnt_expiry_mark)) 1424 WRITE_ONCE(m->mnt_expiry_mark, 0); 1425 mntput_no_expire(m); 1426 } 1427 } 1428 EXPORT_SYMBOL(mntput); 1429 1430 struct vfsmount *mntget(struct vfsmount *mnt) 1431 { 1432 if (mnt) 1433 mnt_add_count(real_mount(mnt), 1); 1434 return mnt; 1435 } 1436 EXPORT_SYMBOL(mntget); 1437 1438 /* 1439 * Make a mount point inaccessible to new lookups. 1440 * Because there may still be current users, the caller MUST WAIT 1441 * for an RCU grace period before destroying the mount point. 1442 */ 1443 void mnt_make_shortterm(struct vfsmount *mnt) 1444 { 1445 if (mnt) 1446 real_mount(mnt)->mnt_ns = NULL; 1447 } 1448 1449 /** 1450 * path_is_mountpoint() - Check if path is a mount in the current namespace. 1451 * @path: path to check 1452 * 1453 * d_mountpoint() can only be used reliably to establish if a dentry is 1454 * not mounted in any namespace and that common case is handled inline. 1455 * d_mountpoint() isn't aware of the possibility there may be multiple 1456 * mounts using a given dentry in a different namespace. This function 1457 * checks if the passed in path is a mountpoint rather than the dentry 1458 * alone. 1459 */ 1460 bool path_is_mountpoint(const struct path *path) 1461 { 1462 unsigned seq; 1463 bool res; 1464 1465 if (!d_mountpoint(path->dentry)) 1466 return false; 1467 1468 rcu_read_lock(); 1469 do { 1470 seq = read_seqbegin(&mount_lock); 1471 res = __path_is_mountpoint(path); 1472 } while (read_seqretry(&mount_lock, seq)); 1473 rcu_read_unlock(); 1474 1475 return res; 1476 } 1477 EXPORT_SYMBOL(path_is_mountpoint); 1478 1479 struct vfsmount *mnt_clone_internal(const struct path *path) 1480 { 1481 struct mount *p; 1482 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); 1483 if (IS_ERR(p)) 1484 return ERR_CAST(p); 1485 p->mnt.mnt_flags |= MNT_INTERNAL; 1486 return &p->mnt; 1487 } 1488 1489 /* 1490 * Returns the mount which either has the specified mnt_id, or has the next 1491 * smallest id afer the specified one. 1492 */ 1493 static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id) 1494 { 1495 struct rb_node *node = ns->mounts.rb_node; 1496 struct mount *ret = NULL; 1497 1498 while (node) { 1499 struct mount *m = node_to_mount(node); 1500 1501 if (mnt_id <= m->mnt_id_unique) { 1502 ret = node_to_mount(node); 1503 if (mnt_id == m->mnt_id_unique) 1504 break; 1505 node = node->rb_left; 1506 } else { 1507 node = node->rb_right; 1508 } 1509 } 1510 return ret; 1511 } 1512 1513 /* 1514 * Returns the mount which either has the specified mnt_id, or has the next 1515 * greater id before the specified one. 1516 */ 1517 static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id) 1518 { 1519 struct rb_node *node = ns->mounts.rb_node; 1520 struct mount *ret = NULL; 1521 1522 while (node) { 1523 struct mount *m = node_to_mount(node); 1524 1525 if (mnt_id >= m->mnt_id_unique) { 1526 ret = node_to_mount(node); 1527 if (mnt_id == m->mnt_id_unique) 1528 break; 1529 node = node->rb_right; 1530 } else { 1531 node = node->rb_left; 1532 } 1533 } 1534 return ret; 1535 } 1536 1537 #ifdef CONFIG_PROC_FS 1538 1539 /* iterator; we want it to have access to namespace_sem, thus here... */ 1540 static void *m_start(struct seq_file *m, loff_t *pos) 1541 { 1542 struct proc_mounts *p = m->private; 1543 1544 down_read(&namespace_sem); 1545 1546 return mnt_find_id_at(p->ns, *pos); 1547 } 1548 1549 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1550 { 1551 struct mount *next = NULL, *mnt = v; 1552 struct rb_node *node = rb_next(&mnt->mnt_node); 1553 1554 ++*pos; 1555 if (node) { 1556 next = node_to_mount(node); 1557 *pos = next->mnt_id_unique; 1558 } 1559 return next; 1560 } 1561 1562 static void m_stop(struct seq_file *m, void *v) 1563 { 1564 up_read(&namespace_sem); 1565 } 1566 1567 static int m_show(struct seq_file *m, void *v) 1568 { 1569 struct proc_mounts *p = m->private; 1570 struct mount *r = v; 1571 return p->show(m, &r->mnt); 1572 } 1573 1574 const struct seq_operations mounts_op = { 1575 .start = m_start, 1576 .next = m_next, 1577 .stop = m_stop, 1578 .show = m_show, 1579 }; 1580 1581 #endif /* CONFIG_PROC_FS */ 1582 1583 /** 1584 * may_umount_tree - check if a mount tree is busy 1585 * @m: root of mount tree 1586 * 1587 * This is called to check if a tree of mounts has any 1588 * open files, pwds, chroots or sub mounts that are 1589 * busy. 1590 */ 1591 int may_umount_tree(struct vfsmount *m) 1592 { 1593 struct mount *mnt = real_mount(m); 1594 bool busy = false; 1595 1596 /* write lock needed for mnt_get_count */ 1597 lock_mount_hash(); 1598 for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) { 1599 if (mnt_get_count(p) > (p == mnt ? 2 : 1)) { 1600 busy = true; 1601 break; 1602 } 1603 } 1604 unlock_mount_hash(); 1605 1606 return !busy; 1607 } 1608 1609 EXPORT_SYMBOL(may_umount_tree); 1610 1611 /** 1612 * may_umount - check if a mount point is busy 1613 * @mnt: root of mount 1614 * 1615 * This is called to check if a mount point has any 1616 * open files, pwds, chroots or sub mounts. If the 1617 * mount has sub mounts this will return busy 1618 * regardless of whether the sub mounts are busy. 1619 * 1620 * Doesn't take quota and stuff into account. IOW, in some cases it will 1621 * give false negatives. The main reason why it's here is that we need 1622 * a non-destructive way to look for easily umountable filesystems. 1623 */ 1624 int may_umount(struct vfsmount *mnt) 1625 { 1626 int ret = 1; 1627 down_read(&namespace_sem); 1628 lock_mount_hash(); 1629 if (propagate_mount_busy(real_mount(mnt), 2)) 1630 ret = 0; 1631 unlock_mount_hash(); 1632 up_read(&namespace_sem); 1633 return ret; 1634 } 1635 1636 EXPORT_SYMBOL(may_umount); 1637 1638 #ifdef CONFIG_FSNOTIFY 1639 static void mnt_notify(struct mount *p) 1640 { 1641 if (!p->prev_ns && p->mnt_ns) { 1642 fsnotify_mnt_attach(p->mnt_ns, &p->mnt); 1643 } else if (p->prev_ns && !p->mnt_ns) { 1644 fsnotify_mnt_detach(p->prev_ns, &p->mnt); 1645 } else if (p->prev_ns == p->mnt_ns) { 1646 fsnotify_mnt_move(p->mnt_ns, &p->mnt); 1647 } else { 1648 fsnotify_mnt_detach(p->prev_ns, &p->mnt); 1649 fsnotify_mnt_attach(p->mnt_ns, &p->mnt); 1650 } 1651 p->prev_ns = p->mnt_ns; 1652 } 1653 1654 static void notify_mnt_list(void) 1655 { 1656 struct mount *m, *tmp; 1657 /* 1658 * Notify about mounts that were added/reparented/detached/remain 1659 * connected after unmount. 1660 */ 1661 list_for_each_entry_safe(m, tmp, ¬ify_list, to_notify) { 1662 mnt_notify(m); 1663 list_del_init(&m->to_notify); 1664 } 1665 } 1666 1667 static bool need_notify_mnt_list(void) 1668 { 1669 return !list_empty(¬ify_list); 1670 } 1671 #else 1672 static void notify_mnt_list(void) 1673 { 1674 } 1675 1676 static bool need_notify_mnt_list(void) 1677 { 1678 return false; 1679 } 1680 #endif 1681 1682 static void free_mnt_ns(struct mnt_namespace *); 1683 static void namespace_unlock(void) 1684 { 1685 struct hlist_head head; 1686 struct hlist_node *p; 1687 struct mount *m; 1688 struct mnt_namespace *ns = emptied_ns; 1689 LIST_HEAD(list); 1690 1691 hlist_move_list(&unmounted, &head); 1692 list_splice_init(&ex_mountpoints, &list); 1693 emptied_ns = NULL; 1694 1695 if (need_notify_mnt_list()) { 1696 /* 1697 * No point blocking out concurrent readers while notifications 1698 * are sent. This will also allow statmount()/listmount() to run 1699 * concurrently. 1700 */ 1701 downgrade_write(&namespace_sem); 1702 notify_mnt_list(); 1703 up_read(&namespace_sem); 1704 } else { 1705 up_write(&namespace_sem); 1706 } 1707 if (unlikely(ns)) { 1708 /* Make sure we notice when we leak mounts. */ 1709 VFS_WARN_ON_ONCE(!mnt_ns_empty(ns)); 1710 free_mnt_ns(ns); 1711 } 1712 1713 shrink_dentry_list(&list); 1714 1715 if (likely(hlist_empty(&head))) 1716 return; 1717 1718 synchronize_rcu_expedited(); 1719 1720 hlist_for_each_entry_safe(m, p, &head, mnt_umount) { 1721 hlist_del(&m->mnt_umount); 1722 mntput(&m->mnt); 1723 } 1724 } 1725 1726 static inline void namespace_lock(void) 1727 { 1728 down_write(&namespace_sem); 1729 } 1730 1731 enum umount_tree_flags { 1732 UMOUNT_SYNC = 1, 1733 UMOUNT_PROPAGATE = 2, 1734 UMOUNT_CONNECTED = 4, 1735 }; 1736 1737 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) 1738 { 1739 /* Leaving mounts connected is only valid for lazy umounts */ 1740 if (how & UMOUNT_SYNC) 1741 return true; 1742 1743 /* A mount without a parent has nothing to be connected to */ 1744 if (!mnt_has_parent(mnt)) 1745 return true; 1746 1747 /* Because the reference counting rules change when mounts are 1748 * unmounted and connected, umounted mounts may not be 1749 * connected to mounted mounts. 1750 */ 1751 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) 1752 return true; 1753 1754 /* Has it been requested that the mount remain connected? */ 1755 if (how & UMOUNT_CONNECTED) 1756 return false; 1757 1758 /* Is the mount locked such that it needs to remain connected? */ 1759 if (IS_MNT_LOCKED(mnt)) 1760 return false; 1761 1762 /* By default disconnect the mount */ 1763 return true; 1764 } 1765 1766 /* 1767 * mount_lock must be held 1768 * namespace_sem must be held for write 1769 */ 1770 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) 1771 { 1772 LIST_HEAD(tmp_list); 1773 struct mount *p; 1774 1775 if (how & UMOUNT_PROPAGATE) 1776 propagate_mount_unlock(mnt); 1777 1778 /* Gather the mounts to umount */ 1779 for (p = mnt; p; p = next_mnt(p, mnt)) { 1780 p->mnt.mnt_flags |= MNT_UMOUNT; 1781 if (mnt_ns_attached(p)) 1782 move_from_ns(p); 1783 list_add_tail(&p->mnt_list, &tmp_list); 1784 } 1785 1786 /* Hide the mounts from mnt_mounts */ 1787 list_for_each_entry(p, &tmp_list, mnt_list) { 1788 list_del_init(&p->mnt_child); 1789 } 1790 1791 /* Add propagated mounts to the tmp_list */ 1792 if (how & UMOUNT_PROPAGATE) 1793 propagate_umount(&tmp_list); 1794 1795 bulk_make_private(&tmp_list); 1796 1797 while (!list_empty(&tmp_list)) { 1798 struct mnt_namespace *ns; 1799 bool disconnect; 1800 p = list_first_entry(&tmp_list, struct mount, mnt_list); 1801 list_del_init(&p->mnt_expire); 1802 list_del_init(&p->mnt_list); 1803 ns = p->mnt_ns; 1804 if (ns) { 1805 ns->nr_mounts--; 1806 __touch_mnt_namespace(ns); 1807 } 1808 p->mnt_ns = NULL; 1809 if (how & UMOUNT_SYNC) 1810 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1811 1812 disconnect = disconnect_mount(p, how); 1813 if (mnt_has_parent(p)) { 1814 if (!disconnect) { 1815 /* Don't forget about p */ 1816 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1817 } else { 1818 umount_mnt(p); 1819 } 1820 } 1821 if (disconnect) 1822 hlist_add_head(&p->mnt_umount, &unmounted); 1823 1824 /* 1825 * At this point p->mnt_ns is NULL, notification will be queued 1826 * only if 1827 * 1828 * - p->prev_ns is non-NULL *and* 1829 * - p->prev_ns->n_fsnotify_marks is non-NULL 1830 * 1831 * This will preclude queuing the mount if this is a cleanup 1832 * after a failed copy_tree() or destruction of an anonymous 1833 * namespace, etc. 1834 */ 1835 mnt_notify_add(p); 1836 } 1837 } 1838 1839 static void shrink_submounts(struct mount *mnt); 1840 1841 static int do_umount_root(struct super_block *sb) 1842 { 1843 int ret = 0; 1844 1845 down_write(&sb->s_umount); 1846 if (!sb_rdonly(sb)) { 1847 struct fs_context *fc; 1848 1849 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY, 1850 SB_RDONLY); 1851 if (IS_ERR(fc)) { 1852 ret = PTR_ERR(fc); 1853 } else { 1854 ret = parse_monolithic_mount_data(fc, NULL); 1855 if (!ret) 1856 ret = reconfigure_super(fc); 1857 put_fs_context(fc); 1858 } 1859 } 1860 up_write(&sb->s_umount); 1861 return ret; 1862 } 1863 1864 static int do_umount(struct mount *mnt, int flags) 1865 { 1866 struct super_block *sb = mnt->mnt.mnt_sb; 1867 int retval; 1868 1869 retval = security_sb_umount(&mnt->mnt, flags); 1870 if (retval) 1871 return retval; 1872 1873 /* 1874 * Allow userspace to request a mountpoint be expired rather than 1875 * unmounting unconditionally. Unmount only happens if: 1876 * (1) the mark is already set (the mark is cleared by mntput()) 1877 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1878 */ 1879 if (flags & MNT_EXPIRE) { 1880 if (&mnt->mnt == current->fs->root.mnt || 1881 flags & (MNT_FORCE | MNT_DETACH)) 1882 return -EINVAL; 1883 1884 /* 1885 * probably don't strictly need the lock here if we examined 1886 * all race cases, but it's a slowpath. 1887 */ 1888 lock_mount_hash(); 1889 if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) { 1890 unlock_mount_hash(); 1891 return -EBUSY; 1892 } 1893 unlock_mount_hash(); 1894 1895 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1896 return -EAGAIN; 1897 } 1898 1899 /* 1900 * If we may have to abort operations to get out of this 1901 * mount, and they will themselves hold resources we must 1902 * allow the fs to do things. In the Unix tradition of 1903 * 'Gee thats tricky lets do it in userspace' the umount_begin 1904 * might fail to complete on the first run through as other tasks 1905 * must return, and the like. Thats for the mount program to worry 1906 * about for the moment. 1907 */ 1908 1909 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1910 sb->s_op->umount_begin(sb); 1911 } 1912 1913 /* 1914 * No sense to grab the lock for this test, but test itself looks 1915 * somewhat bogus. Suggestions for better replacement? 1916 * Ho-hum... In principle, we might treat that as umount + switch 1917 * to rootfs. GC would eventually take care of the old vfsmount. 1918 * Actually it makes sense, especially if rootfs would contain a 1919 * /reboot - static binary that would close all descriptors and 1920 * call reboot(9). Then init(8) could umount root and exec /reboot. 1921 */ 1922 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1923 /* 1924 * Special case for "unmounting" root ... 1925 * we just try to remount it readonly. 1926 */ 1927 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 1928 return -EPERM; 1929 return do_umount_root(sb); 1930 } 1931 1932 namespace_lock(); 1933 lock_mount_hash(); 1934 1935 /* Repeat the earlier racy checks, now that we are holding the locks */ 1936 retval = -EINVAL; 1937 if (!check_mnt(mnt)) 1938 goto out; 1939 1940 if (mnt->mnt.mnt_flags & MNT_LOCKED) 1941 goto out; 1942 1943 if (!mnt_has_parent(mnt)) /* not the absolute root */ 1944 goto out; 1945 1946 event++; 1947 if (flags & MNT_DETACH) { 1948 umount_tree(mnt, UMOUNT_PROPAGATE); 1949 retval = 0; 1950 } else { 1951 smp_mb(); // paired with __legitimize_mnt() 1952 shrink_submounts(mnt); 1953 retval = -EBUSY; 1954 if (!propagate_mount_busy(mnt, 2)) { 1955 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 1956 retval = 0; 1957 } 1958 } 1959 out: 1960 unlock_mount_hash(); 1961 namespace_unlock(); 1962 return retval; 1963 } 1964 1965 /* 1966 * __detach_mounts - lazily unmount all mounts on the specified dentry 1967 * 1968 * During unlink, rmdir, and d_drop it is possible to loose the path 1969 * to an existing mountpoint, and wind up leaking the mount. 1970 * detach_mounts allows lazily unmounting those mounts instead of 1971 * leaking them. 1972 * 1973 * The caller may hold dentry->d_inode->i_rwsem. 1974 */ 1975 void __detach_mounts(struct dentry *dentry) 1976 { 1977 struct pinned_mountpoint mp = {}; 1978 struct mount *mnt; 1979 1980 guard(namespace_excl)(); 1981 guard(mount_writer)(); 1982 1983 if (!lookup_mountpoint(dentry, &mp)) 1984 return; 1985 1986 event++; 1987 while (mp.node.next) { 1988 mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list); 1989 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1990 umount_mnt(mnt); 1991 hlist_add_head(&mnt->mnt_umount, &unmounted); 1992 } 1993 else umount_tree(mnt, UMOUNT_CONNECTED); 1994 } 1995 unpin_mountpoint(&mp); 1996 } 1997 1998 /* 1999 * Is the caller allowed to modify his namespace? 2000 */ 2001 bool may_mount(void) 2002 { 2003 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); 2004 } 2005 2006 static void warn_mandlock(void) 2007 { 2008 pr_warn_once("=======================================================\n" 2009 "WARNING: The mand mount option has been deprecated and\n" 2010 " and is ignored by this kernel. Remove the mand\n" 2011 " option from the mount to silence this warning.\n" 2012 "=======================================================\n"); 2013 } 2014 2015 static int can_umount(const struct path *path, int flags) 2016 { 2017 struct mount *mnt = real_mount(path->mnt); 2018 struct super_block *sb = path->dentry->d_sb; 2019 2020 if (!may_mount()) 2021 return -EPERM; 2022 if (!path_mounted(path)) 2023 return -EINVAL; 2024 if (!check_mnt(mnt)) 2025 return -EINVAL; 2026 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ 2027 return -EINVAL; 2028 if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 2029 return -EPERM; 2030 return 0; 2031 } 2032 2033 // caller is responsible for flags being sane 2034 int path_umount(const struct path *path, int flags) 2035 { 2036 struct mount *mnt = real_mount(path->mnt); 2037 int ret; 2038 2039 ret = can_umount(path, flags); 2040 if (!ret) 2041 ret = do_umount(mnt, flags); 2042 2043 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 2044 dput(path->dentry); 2045 mntput_no_expire(mnt); 2046 return ret; 2047 } 2048 2049 static int ksys_umount(char __user *name, int flags) 2050 { 2051 int lookup_flags = LOOKUP_MOUNTPOINT; 2052 struct path path; 2053 int ret; 2054 2055 // basic validity checks done first 2056 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 2057 return -EINVAL; 2058 2059 if (!(flags & UMOUNT_NOFOLLOW)) 2060 lookup_flags |= LOOKUP_FOLLOW; 2061 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); 2062 if (ret) 2063 return ret; 2064 return path_umount(&path, flags); 2065 } 2066 2067 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 2068 { 2069 return ksys_umount(name, flags); 2070 } 2071 2072 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 2073 2074 /* 2075 * The 2.0 compatible umount. No flags. 2076 */ 2077 SYSCALL_DEFINE1(oldumount, char __user *, name) 2078 { 2079 return ksys_umount(name, 0); 2080 } 2081 2082 #endif 2083 2084 static bool is_mnt_ns_file(struct dentry *dentry) 2085 { 2086 struct ns_common *ns; 2087 2088 /* Is this a proxy for a mount namespace? */ 2089 if (dentry->d_op != &ns_dentry_operations) 2090 return false; 2091 2092 ns = d_inode(dentry)->i_private; 2093 2094 return ns->ops == &mntns_operations; 2095 } 2096 2097 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) 2098 { 2099 return &mnt->ns; 2100 } 2101 2102 struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous) 2103 { 2104 struct ns_common *ns; 2105 2106 guard(rcu)(); 2107 2108 for (;;) { 2109 ns = ns_tree_adjoined_rcu(mntns, previous); 2110 if (IS_ERR(ns)) 2111 return ERR_CAST(ns); 2112 2113 mntns = to_mnt_ns(ns); 2114 2115 /* 2116 * The last passive reference count is put with RCU 2117 * delay so accessing the mount namespace is not just 2118 * safe but all relevant members are still valid. 2119 */ 2120 if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN)) 2121 continue; 2122 2123 /* 2124 * We need an active reference count as we're persisting 2125 * the mount namespace and it might already be on its 2126 * deathbed. 2127 */ 2128 if (!ns_ref_get(mntns)) 2129 continue; 2130 2131 return mntns; 2132 } 2133 } 2134 2135 struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry) 2136 { 2137 if (!is_mnt_ns_file(dentry)) 2138 return NULL; 2139 2140 return to_mnt_ns(get_proc_ns(dentry->d_inode)); 2141 } 2142 2143 static bool mnt_ns_loop(struct dentry *dentry) 2144 { 2145 /* Could bind mounting the mount namespace inode cause a 2146 * mount namespace loop? 2147 */ 2148 struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry); 2149 2150 if (!mnt_ns) 2151 return false; 2152 2153 return current->nsproxy->mnt_ns->ns.ns_id >= mnt_ns->ns.ns_id; 2154 } 2155 2156 struct mount *copy_tree(struct mount *src_root, struct dentry *dentry, 2157 int flag) 2158 { 2159 struct mount *res, *src_parent, *src_root_child, *src_mnt, 2160 *dst_parent, *dst_mnt; 2161 2162 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root)) 2163 return ERR_PTR(-EINVAL); 2164 2165 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) 2166 return ERR_PTR(-EINVAL); 2167 2168 res = dst_mnt = clone_mnt(src_root, dentry, flag); 2169 if (IS_ERR(dst_mnt)) 2170 return dst_mnt; 2171 2172 src_parent = src_root; 2173 2174 list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) { 2175 if (!is_subdir(src_root_child->mnt_mountpoint, dentry)) 2176 continue; 2177 2178 for (src_mnt = src_root_child; src_mnt; 2179 src_mnt = next_mnt(src_mnt, src_root_child)) { 2180 if (!(flag & CL_COPY_UNBINDABLE) && 2181 IS_MNT_UNBINDABLE(src_mnt)) { 2182 if (src_mnt->mnt.mnt_flags & MNT_LOCKED) { 2183 /* Both unbindable and locked. */ 2184 dst_mnt = ERR_PTR(-EPERM); 2185 goto out; 2186 } else { 2187 src_mnt = skip_mnt_tree(src_mnt); 2188 continue; 2189 } 2190 } 2191 if (!(flag & CL_COPY_MNT_NS_FILE) && 2192 is_mnt_ns_file(src_mnt->mnt.mnt_root)) { 2193 src_mnt = skip_mnt_tree(src_mnt); 2194 continue; 2195 } 2196 while (src_parent != src_mnt->mnt_parent) { 2197 src_parent = src_parent->mnt_parent; 2198 dst_mnt = dst_mnt->mnt_parent; 2199 } 2200 2201 src_parent = src_mnt; 2202 dst_parent = dst_mnt; 2203 dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag); 2204 if (IS_ERR(dst_mnt)) 2205 goto out; 2206 lock_mount_hash(); 2207 if (src_mnt->mnt.mnt_flags & MNT_LOCKED) 2208 dst_mnt->mnt.mnt_flags |= MNT_LOCKED; 2209 if (unlikely(flag & CL_EXPIRE)) { 2210 /* stick the duplicate mount on the same expiry 2211 * list as the original if that was on one */ 2212 if (!list_empty(&src_mnt->mnt_expire)) 2213 list_add(&dst_mnt->mnt_expire, 2214 &src_mnt->mnt_expire); 2215 } 2216 attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp); 2217 unlock_mount_hash(); 2218 } 2219 } 2220 return res; 2221 2222 out: 2223 if (res) { 2224 lock_mount_hash(); 2225 umount_tree(res, UMOUNT_SYNC); 2226 unlock_mount_hash(); 2227 } 2228 return dst_mnt; 2229 } 2230 2231 static inline bool extend_array(struct path **res, struct path **to_free, 2232 unsigned n, unsigned *count, unsigned new_count) 2233 { 2234 struct path *p; 2235 2236 if (likely(n < *count)) 2237 return true; 2238 p = kmalloc_array(new_count, sizeof(struct path), GFP_KERNEL); 2239 if (p && *count) 2240 memcpy(p, *res, *count * sizeof(struct path)); 2241 *count = new_count; 2242 kfree(*to_free); 2243 *to_free = *res = p; 2244 return p; 2245 } 2246 2247 const struct path *collect_paths(const struct path *path, 2248 struct path *prealloc, unsigned count) 2249 { 2250 struct mount *root = real_mount(path->mnt); 2251 struct mount *child; 2252 struct path *res = prealloc, *to_free = NULL; 2253 unsigned n = 0; 2254 2255 guard(namespace_shared)(); 2256 2257 if (!check_mnt(root)) 2258 return ERR_PTR(-EINVAL); 2259 if (!extend_array(&res, &to_free, 0, &count, 32)) 2260 return ERR_PTR(-ENOMEM); 2261 res[n++] = *path; 2262 list_for_each_entry(child, &root->mnt_mounts, mnt_child) { 2263 if (!is_subdir(child->mnt_mountpoint, path->dentry)) 2264 continue; 2265 for (struct mount *m = child; m; m = next_mnt(m, child)) { 2266 if (!extend_array(&res, &to_free, n, &count, 2 * count)) 2267 return ERR_PTR(-ENOMEM); 2268 res[n].mnt = &m->mnt; 2269 res[n].dentry = m->mnt.mnt_root; 2270 n++; 2271 } 2272 } 2273 if (!extend_array(&res, &to_free, n, &count, count + 1)) 2274 return ERR_PTR(-ENOMEM); 2275 memset(res + n, 0, (count - n) * sizeof(struct path)); 2276 for (struct path *p = res; p->mnt; p++) 2277 path_get(p); 2278 return res; 2279 } 2280 2281 void drop_collected_paths(const struct path *paths, const struct path *prealloc) 2282 { 2283 for (const struct path *p = paths; p->mnt; p++) 2284 path_put(p); 2285 if (paths != prealloc) 2286 kfree(paths); 2287 } 2288 2289 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); 2290 2291 void dissolve_on_fput(struct vfsmount *mnt) 2292 { 2293 struct mount *m = real_mount(mnt); 2294 2295 /* 2296 * m used to be the root of anon namespace; if it still is one, 2297 * we need to dissolve the mount tree and free that namespace. 2298 * Let's try to avoid taking namespace_sem if we can determine 2299 * that there's nothing to do without it - rcu_read_lock() is 2300 * enough to make anon_ns_root() memory-safe and once m has 2301 * left its namespace, it's no longer our concern, since it will 2302 * never become a root of anon ns again. 2303 */ 2304 2305 scoped_guard(rcu) { 2306 if (!anon_ns_root(m)) 2307 return; 2308 } 2309 2310 scoped_guard(namespace_excl) { 2311 if (!anon_ns_root(m)) 2312 return; 2313 2314 emptied_ns = m->mnt_ns; 2315 lock_mount_hash(); 2316 umount_tree(m, UMOUNT_CONNECTED); 2317 unlock_mount_hash(); 2318 } 2319 } 2320 2321 /* locks: namespace_shared && pinned(mnt) || mount_locked_reader */ 2322 static bool __has_locked_children(struct mount *mnt, struct dentry *dentry) 2323 { 2324 struct mount *child; 2325 2326 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 2327 if (!is_subdir(child->mnt_mountpoint, dentry)) 2328 continue; 2329 2330 if (child->mnt.mnt_flags & MNT_LOCKED) 2331 return true; 2332 } 2333 return false; 2334 } 2335 2336 bool has_locked_children(struct mount *mnt, struct dentry *dentry) 2337 { 2338 guard(mount_locked_reader)(); 2339 return __has_locked_children(mnt, dentry); 2340 } 2341 2342 /* 2343 * Check that there aren't references to earlier/same mount namespaces in the 2344 * specified subtree. Such references can act as pins for mount namespaces 2345 * that aren't checked by the mount-cycle checking code, thereby allowing 2346 * cycles to be made. 2347 * 2348 * locks: mount_locked_reader || namespace_shared && pinned(subtree) 2349 */ 2350 static bool check_for_nsfs_mounts(struct mount *subtree) 2351 { 2352 for (struct mount *p = subtree; p; p = next_mnt(p, subtree)) 2353 if (mnt_ns_loop(p->mnt.mnt_root)) 2354 return false; 2355 return true; 2356 } 2357 2358 /** 2359 * clone_private_mount - create a private clone of a path 2360 * @path: path to clone 2361 * 2362 * This creates a new vfsmount, which will be the clone of @path. The new mount 2363 * will not be attached anywhere in the namespace and will be private (i.e. 2364 * changes to the originating mount won't be propagated into this). 2365 * 2366 * This assumes caller has called or done the equivalent of may_mount(). 2367 * 2368 * Release with mntput(). 2369 */ 2370 struct vfsmount *clone_private_mount(const struct path *path) 2371 { 2372 struct mount *old_mnt = real_mount(path->mnt); 2373 struct mount *new_mnt; 2374 2375 guard(namespace_shared)(); 2376 2377 if (IS_MNT_UNBINDABLE(old_mnt)) 2378 return ERR_PTR(-EINVAL); 2379 2380 /* 2381 * Make sure the source mount is acceptable. 2382 * Anything mounted in our mount namespace is allowed. 2383 * Otherwise, it must be the root of an anonymous mount 2384 * namespace, and we need to make sure no namespace 2385 * loops get created. 2386 */ 2387 if (!check_mnt(old_mnt)) { 2388 if (!anon_ns_root(old_mnt)) 2389 return ERR_PTR(-EINVAL); 2390 2391 if (!check_for_nsfs_mounts(old_mnt)) 2392 return ERR_PTR(-EINVAL); 2393 } 2394 2395 if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) 2396 return ERR_PTR(-EPERM); 2397 2398 if (__has_locked_children(old_mnt, path->dentry)) 2399 return ERR_PTR(-EINVAL); 2400 2401 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); 2402 if (IS_ERR(new_mnt)) 2403 return ERR_PTR(-EINVAL); 2404 2405 /* Longterm mount to be removed by kern_unmount*() */ 2406 new_mnt->mnt_ns = MNT_NS_INTERNAL; 2407 return &new_mnt->mnt; 2408 } 2409 EXPORT_SYMBOL_GPL(clone_private_mount); 2410 2411 static void lock_mnt_tree(struct mount *mnt) 2412 { 2413 struct mount *p; 2414 2415 for (p = mnt; p; p = next_mnt(p, mnt)) { 2416 int flags = p->mnt.mnt_flags; 2417 /* Don't allow unprivileged users to change mount flags */ 2418 flags |= MNT_LOCK_ATIME; 2419 2420 if (flags & MNT_READONLY) 2421 flags |= MNT_LOCK_READONLY; 2422 2423 if (flags & MNT_NODEV) 2424 flags |= MNT_LOCK_NODEV; 2425 2426 if (flags & MNT_NOSUID) 2427 flags |= MNT_LOCK_NOSUID; 2428 2429 if (flags & MNT_NOEXEC) 2430 flags |= MNT_LOCK_NOEXEC; 2431 /* Don't allow unprivileged users to reveal what is under a mount */ 2432 if (list_empty(&p->mnt_expire) && p != mnt) 2433 flags |= MNT_LOCKED; 2434 p->mnt.mnt_flags = flags; 2435 } 2436 } 2437 2438 static void cleanup_group_ids(struct mount *mnt, struct mount *end) 2439 { 2440 struct mount *p; 2441 2442 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 2443 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 2444 mnt_release_group_id(p); 2445 } 2446 } 2447 2448 static int invent_group_ids(struct mount *mnt, bool recurse) 2449 { 2450 struct mount *p; 2451 2452 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 2453 if (!p->mnt_group_id) { 2454 int err = mnt_alloc_group_id(p); 2455 if (err) { 2456 cleanup_group_ids(mnt, p); 2457 return err; 2458 } 2459 } 2460 } 2461 2462 return 0; 2463 } 2464 2465 int count_mounts(struct mnt_namespace *ns, struct mount *mnt) 2466 { 2467 unsigned int max = READ_ONCE(sysctl_mount_max); 2468 unsigned int mounts = 0; 2469 struct mount *p; 2470 2471 if (ns->nr_mounts >= max) 2472 return -ENOSPC; 2473 max -= ns->nr_mounts; 2474 if (ns->pending_mounts >= max) 2475 return -ENOSPC; 2476 max -= ns->pending_mounts; 2477 2478 for (p = mnt; p; p = next_mnt(p, mnt)) 2479 mounts++; 2480 2481 if (mounts > max) 2482 return -ENOSPC; 2483 2484 ns->pending_mounts += mounts; 2485 return 0; 2486 } 2487 2488 enum mnt_tree_flags_t { 2489 MNT_TREE_BENEATH = BIT(0), 2490 MNT_TREE_PROPAGATION = BIT(1), 2491 }; 2492 2493 /** 2494 * attach_recursive_mnt - attach a source mount tree 2495 * @source_mnt: mount tree to be attached 2496 * @dest: the context for mounting at the place where the tree should go 2497 * 2498 * NOTE: in the table below explains the semantics when a source mount 2499 * of a given type is attached to a destination mount of a given type. 2500 * --------------------------------------------------------------------------- 2501 * | BIND MOUNT OPERATION | 2502 * |************************************************************************** 2503 * | source-->| shared | private | slave | unbindable | 2504 * | dest | | | | | 2505 * | | | | | | | 2506 * | v | | | | | 2507 * |************************************************************************** 2508 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 2509 * | | | | | | 2510 * |non-shared| shared (+) | private | slave (*) | invalid | 2511 * *************************************************************************** 2512 * A bind operation clones the source mount and mounts the clone on the 2513 * destination mount. 2514 * 2515 * (++) the cloned mount is propagated to all the mounts in the propagation 2516 * tree of the destination mount and the cloned mount is added to 2517 * the peer group of the source mount. 2518 * (+) the cloned mount is created under the destination mount and is marked 2519 * as shared. The cloned mount is added to the peer group of the source 2520 * mount. 2521 * (+++) the mount is propagated to all the mounts in the propagation tree 2522 * of the destination mount and the cloned mount is made slave 2523 * of the same master as that of the source mount. The cloned mount 2524 * is marked as 'shared and slave'. 2525 * (*) the cloned mount is made a slave of the same master as that of the 2526 * source mount. 2527 * 2528 * --------------------------------------------------------------------------- 2529 * | MOVE MOUNT OPERATION | 2530 * |************************************************************************** 2531 * | source-->| shared | private | slave | unbindable | 2532 * | dest | | | | | 2533 * | | | | | | | 2534 * | v | | | | | 2535 * |************************************************************************** 2536 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 2537 * | | | | | | 2538 * |non-shared| shared (+*) | private | slave (*) | unbindable | 2539 * *************************************************************************** 2540 * 2541 * (+) the mount is moved to the destination. And is then propagated to 2542 * all the mounts in the propagation tree of the destination mount. 2543 * (+*) the mount is moved to the destination. 2544 * (+++) the mount is moved to the destination and is then propagated to 2545 * all the mounts belonging to the destination mount's propagation tree. 2546 * the mount is marked as 'shared and slave'. 2547 * (*) the mount continues to be a slave at the new location. 2548 * 2549 * if the source mount is a tree, the operations explained above is 2550 * applied to each mount in the tree. 2551 * Must be called without spinlocks held, since this function can sleep 2552 * in allocations. 2553 * 2554 * Context: The function expects namespace_lock() to be held. 2555 * Return: If @source_mnt was successfully attached 0 is returned. 2556 * Otherwise a negative error code is returned. 2557 */ 2558 static int attach_recursive_mnt(struct mount *source_mnt, 2559 const struct pinned_mountpoint *dest) 2560 { 2561 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2562 struct mount *dest_mnt = dest->parent; 2563 struct mountpoint *dest_mp = dest->mp; 2564 HLIST_HEAD(tree_list); 2565 struct mnt_namespace *ns = dest_mnt->mnt_ns; 2566 struct pinned_mountpoint root = {}; 2567 struct mountpoint *shorter = NULL; 2568 struct mount *child, *p; 2569 struct mount *top; 2570 struct hlist_node *n; 2571 int err = 0; 2572 bool moving = mnt_has_parent(source_mnt); 2573 2574 /* 2575 * Preallocate a mountpoint in case the new mounts need to be 2576 * mounted beneath mounts on the same mountpoint. 2577 */ 2578 for (top = source_mnt; unlikely(top->overmount); top = top->overmount) { 2579 if (!shorter && is_mnt_ns_file(top->mnt.mnt_root)) 2580 shorter = top->mnt_mp; 2581 } 2582 err = get_mountpoint(top->mnt.mnt_root, &root); 2583 if (err) 2584 return err; 2585 2586 /* Is there space to add these mounts to the mount namespace? */ 2587 if (!moving) { 2588 err = count_mounts(ns, source_mnt); 2589 if (err) 2590 goto out; 2591 } 2592 2593 if (IS_MNT_SHARED(dest_mnt)) { 2594 err = invent_group_ids(source_mnt, true); 2595 if (err) 2596 goto out; 2597 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 2598 } 2599 lock_mount_hash(); 2600 if (err) 2601 goto out_cleanup_ids; 2602 2603 if (IS_MNT_SHARED(dest_mnt)) { 2604 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2605 set_mnt_shared(p); 2606 } 2607 2608 if (moving) { 2609 umount_mnt(source_mnt); 2610 mnt_notify_add(source_mnt); 2611 /* if the mount is moved, it should no longer be expired 2612 * automatically */ 2613 list_del_init(&source_mnt->mnt_expire); 2614 } else { 2615 if (source_mnt->mnt_ns) { 2616 /* move from anon - the caller will destroy */ 2617 emptied_ns = source_mnt->mnt_ns; 2618 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2619 move_from_ns(p); 2620 } 2621 } 2622 2623 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 2624 /* 2625 * Now the original copy is in the same state as the secondaries - 2626 * its root attached to mountpoint, but not hashed and all mounts 2627 * in it are either in our namespace or in no namespace at all. 2628 * Add the original to the list of copies and deal with the 2629 * rest of work for all of them uniformly. 2630 */ 2631 hlist_add_head(&source_mnt->mnt_hash, &tree_list); 2632 2633 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { 2634 struct mount *q; 2635 hlist_del_init(&child->mnt_hash); 2636 /* Notice when we are propagating across user namespaces */ 2637 if (child->mnt_parent->mnt_ns->user_ns != user_ns) 2638 lock_mnt_tree(child); 2639 q = __lookup_mnt(&child->mnt_parent->mnt, 2640 child->mnt_mountpoint); 2641 commit_tree(child); 2642 if (q) { 2643 struct mount *r = topmost_overmount(child); 2644 struct mountpoint *mp = root.mp; 2645 2646 if (unlikely(shorter) && child != source_mnt) 2647 mp = shorter; 2648 mnt_change_mountpoint(r, mp, q); 2649 } 2650 } 2651 unpin_mountpoint(&root); 2652 unlock_mount_hash(); 2653 2654 return 0; 2655 2656 out_cleanup_ids: 2657 while (!hlist_empty(&tree_list)) { 2658 child = hlist_entry(tree_list.first, struct mount, mnt_hash); 2659 child->mnt_parent->mnt_ns->pending_mounts = 0; 2660 umount_tree(child, UMOUNT_SYNC); 2661 } 2662 unlock_mount_hash(); 2663 cleanup_group_ids(source_mnt, NULL); 2664 out: 2665 ns->pending_mounts = 0; 2666 2667 read_seqlock_excl(&mount_lock); 2668 unpin_mountpoint(&root); 2669 read_sequnlock_excl(&mount_lock); 2670 2671 return err; 2672 } 2673 2674 static inline struct mount *where_to_mount(const struct path *path, 2675 struct dentry **dentry, 2676 bool beneath) 2677 { 2678 struct mount *m; 2679 2680 if (unlikely(beneath)) { 2681 m = topmost_overmount(real_mount(path->mnt)); 2682 *dentry = m->mnt_mountpoint; 2683 return m->mnt_parent; 2684 } 2685 m = __lookup_mnt(path->mnt, path->dentry); 2686 if (unlikely(m)) { 2687 m = topmost_overmount(m); 2688 *dentry = m->mnt.mnt_root; 2689 return m; 2690 } 2691 *dentry = path->dentry; 2692 return real_mount(path->mnt); 2693 } 2694 2695 /** 2696 * do_lock_mount - acquire environment for mounting 2697 * @path: target path 2698 * @res: context to set up 2699 * @beneath: whether the intention is to mount beneath @path 2700 * 2701 * To mount something at given location, we need 2702 * namespace_sem locked exclusive 2703 * inode of dentry we are mounting on locked exclusive 2704 * struct mountpoint for that dentry 2705 * struct mount we are mounting on 2706 * 2707 * Results are stored in caller-supplied context (pinned_mountpoint); 2708 * on success we have res->parent and res->mp pointing to parent and 2709 * mountpoint respectively and res->node inserted into the ->m_list 2710 * of the mountpoint, making sure the mountpoint won't disappear. 2711 * On failure we have res->parent set to ERR_PTR(-E...), res->mp 2712 * left NULL, res->node - empty. 2713 * In case of success do_lock_mount returns with locks acquired (in 2714 * proper order - inode lock nests outside of namespace_sem). 2715 * 2716 * Request to mount on overmounted location is treated as "mount on 2717 * top of whatever's overmounting it"; request to mount beneath 2718 * a location - "mount immediately beneath the topmost mount at that 2719 * place". 2720 * 2721 * In all cases the location must not have been unmounted and the 2722 * chosen mountpoint must be allowed to be mounted on. For "beneath" 2723 * case we also require the location to be at the root of a mount 2724 * that has a parent (i.e. is not a root of some namespace). 2725 */ 2726 static void do_lock_mount(const struct path *path, 2727 struct pinned_mountpoint *res, 2728 bool beneath) 2729 { 2730 int err; 2731 2732 if (unlikely(beneath) && !path_mounted(path)) { 2733 res->parent = ERR_PTR(-EINVAL); 2734 return; 2735 } 2736 2737 do { 2738 struct dentry *dentry, *d; 2739 struct mount *m, *n; 2740 2741 scoped_guard(mount_locked_reader) { 2742 m = where_to_mount(path, &dentry, beneath); 2743 if (&m->mnt != path->mnt) { 2744 mntget(&m->mnt); 2745 dget(dentry); 2746 } 2747 } 2748 2749 inode_lock(dentry->d_inode); 2750 namespace_lock(); 2751 2752 // check if the chain of mounts (if any) has changed. 2753 scoped_guard(mount_locked_reader) 2754 n = where_to_mount(path, &d, beneath); 2755 2756 if (unlikely(n != m || dentry != d)) 2757 err = -EAGAIN; // something moved, retry 2758 else if (unlikely(cant_mount(dentry) || !is_mounted(path->mnt))) 2759 err = -ENOENT; // not to be mounted on 2760 else if (beneath && &m->mnt == path->mnt && !m->overmount) 2761 err = -EINVAL; 2762 else 2763 err = get_mountpoint(dentry, res); 2764 2765 if (unlikely(err)) { 2766 res->parent = ERR_PTR(err); 2767 namespace_unlock(); 2768 inode_unlock(dentry->d_inode); 2769 } else { 2770 res->parent = m; 2771 } 2772 /* 2773 * Drop the temporary references. This is subtle - on success 2774 * we are doing that under namespace_sem, which would normally 2775 * be forbidden. However, in that case we are guaranteed that 2776 * refcounts won't reach zero, since we know that path->mnt 2777 * is mounted and thus all mounts reachable from it are pinned 2778 * and stable, along with their mountpoints and roots. 2779 */ 2780 if (&m->mnt != path->mnt) { 2781 dput(dentry); 2782 mntput(&m->mnt); 2783 } 2784 } while (err == -EAGAIN); 2785 } 2786 2787 static void __unlock_mount(struct pinned_mountpoint *m) 2788 { 2789 inode_unlock(m->mp->m_dentry->d_inode); 2790 read_seqlock_excl(&mount_lock); 2791 unpin_mountpoint(m); 2792 read_sequnlock_excl(&mount_lock); 2793 namespace_unlock(); 2794 } 2795 2796 static inline void unlock_mount(struct pinned_mountpoint *m) 2797 { 2798 if (!IS_ERR(m->parent)) 2799 __unlock_mount(m); 2800 } 2801 2802 #define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \ 2803 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ 2804 do_lock_mount((path), &mp, (beneath)) 2805 #define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false) 2806 #define LOCK_MOUNT_EXACT(mp, path) \ 2807 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ 2808 lock_mount_exact((path), &mp) 2809 2810 static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp) 2811 { 2812 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) 2813 return -EINVAL; 2814 2815 if (d_is_dir(mp->mp->m_dentry) != 2816 d_is_dir(mnt->mnt.mnt_root)) 2817 return -ENOTDIR; 2818 2819 return attach_recursive_mnt(mnt, mp); 2820 } 2821 2822 static int may_change_propagation(const struct mount *m) 2823 { 2824 struct mnt_namespace *ns = m->mnt_ns; 2825 2826 // it must be mounted in some namespace 2827 if (IS_ERR_OR_NULL(ns)) // is_mounted() 2828 return -EINVAL; 2829 // and the caller must be admin in userns of that namespace 2830 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) 2831 return -EPERM; 2832 return 0; 2833 } 2834 2835 /* 2836 * Sanity check the flags to change_mnt_propagation. 2837 */ 2838 2839 static int flags_to_propagation_type(int ms_flags) 2840 { 2841 int type = ms_flags & ~(MS_REC | MS_SILENT); 2842 2843 /* Fail if any non-propagation flags are set */ 2844 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2845 return 0; 2846 /* Only one propagation flag should be set */ 2847 if (!is_power_of_2(type)) 2848 return 0; 2849 return type; 2850 } 2851 2852 /* 2853 * recursively change the type of the mountpoint. 2854 */ 2855 static int do_change_type(const struct path *path, int ms_flags) 2856 { 2857 struct mount *m; 2858 struct mount *mnt = real_mount(path->mnt); 2859 int recurse = ms_flags & MS_REC; 2860 int type; 2861 int err; 2862 2863 if (!path_mounted(path)) 2864 return -EINVAL; 2865 2866 type = flags_to_propagation_type(ms_flags); 2867 if (!type) 2868 return -EINVAL; 2869 2870 guard(namespace_excl)(); 2871 2872 err = may_change_propagation(mnt); 2873 if (err) 2874 return err; 2875 2876 if (type == MS_SHARED) { 2877 err = invent_group_ids(mnt, recurse); 2878 if (err) 2879 return err; 2880 } 2881 2882 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 2883 change_mnt_propagation(m, type); 2884 2885 return 0; 2886 } 2887 2888 /* may_copy_tree() - check if a mount tree can be copied 2889 * @path: path to the mount tree to be copied 2890 * 2891 * This helper checks if the caller may copy the mount tree starting 2892 * from @path->mnt. The caller may copy the mount tree under the 2893 * following circumstances: 2894 * 2895 * (1) The caller is located in the mount namespace of the mount tree. 2896 * This also implies that the mount does not belong to an anonymous 2897 * mount namespace. 2898 * (2) The caller tries to copy an nfs mount referring to a mount 2899 * namespace, i.e., the caller is trying to copy a mount namespace 2900 * entry from nsfs. 2901 * (3) The caller tries to copy a pidfs mount referring to a pidfd. 2902 * (4) The caller is trying to copy a mount tree that belongs to an 2903 * anonymous mount namespace. 2904 * 2905 * For that to be safe, this helper enforces that the origin mount 2906 * namespace the anonymous mount namespace was created from is the 2907 * same as the caller's mount namespace by comparing the sequence 2908 * numbers. 2909 * 2910 * This is not strictly necessary. The current semantics of the new 2911 * mount api enforce that the caller must be located in the same 2912 * mount namespace as the mount tree it interacts with. Using the 2913 * origin sequence number preserves these semantics even for 2914 * anonymous mount namespaces. However, one could envision extending 2915 * the api to directly operate across mount namespace if needed. 2916 * 2917 * The ownership of a non-anonymous mount namespace such as the 2918 * caller's cannot change. 2919 * => We know that the caller's mount namespace is stable. 2920 * 2921 * If the origin sequence number of the anonymous mount namespace is 2922 * the same as the sequence number of the caller's mount namespace. 2923 * => The owning namespaces are the same. 2924 * 2925 * ==> The earlier capability check on the owning namespace of the 2926 * caller's mount namespace ensures that the caller has the 2927 * ability to copy the mount tree. 2928 * 2929 * Returns true if the mount tree can be copied, false otherwise. 2930 */ 2931 static inline bool may_copy_tree(const struct path *path) 2932 { 2933 struct mount *mnt = real_mount(path->mnt); 2934 const struct dentry_operations *d_op; 2935 2936 if (check_mnt(mnt)) 2937 return true; 2938 2939 d_op = path->dentry->d_op; 2940 if (d_op == &ns_dentry_operations) 2941 return true; 2942 2943 if (d_op == &pidfs_dentry_operations) 2944 return true; 2945 2946 if (!is_mounted(path->mnt)) 2947 return false; 2948 2949 return check_anonymous_mnt(mnt); 2950 } 2951 2952 2953 static struct mount *__do_loopback(const struct path *old_path, int recurse) 2954 { 2955 struct mount *old = real_mount(old_path->mnt); 2956 2957 if (IS_MNT_UNBINDABLE(old)) 2958 return ERR_PTR(-EINVAL); 2959 2960 if (!may_copy_tree(old_path)) 2961 return ERR_PTR(-EINVAL); 2962 2963 if (!recurse && __has_locked_children(old, old_path->dentry)) 2964 return ERR_PTR(-EINVAL); 2965 2966 if (recurse) 2967 return copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); 2968 else 2969 return clone_mnt(old, old_path->dentry, 0); 2970 } 2971 2972 /* 2973 * do loopback mount. 2974 */ 2975 static int do_loopback(const struct path *path, const char *old_name, 2976 int recurse) 2977 { 2978 struct path old_path __free(path_put) = {}; 2979 struct mount *mnt = NULL; 2980 int err; 2981 if (!old_name || !*old_name) 2982 return -EINVAL; 2983 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 2984 if (err) 2985 return err; 2986 2987 if (mnt_ns_loop(old_path.dentry)) 2988 return -EINVAL; 2989 2990 LOCK_MOUNT(mp, path); 2991 if (IS_ERR(mp.parent)) 2992 return PTR_ERR(mp.parent); 2993 2994 if (!check_mnt(mp.parent)) 2995 return -EINVAL; 2996 2997 mnt = __do_loopback(&old_path, recurse); 2998 if (IS_ERR(mnt)) 2999 return PTR_ERR(mnt); 3000 3001 err = graft_tree(mnt, &mp); 3002 if (err) { 3003 lock_mount_hash(); 3004 umount_tree(mnt, UMOUNT_SYNC); 3005 unlock_mount_hash(); 3006 } 3007 return err; 3008 } 3009 3010 static struct mnt_namespace *get_detached_copy(const struct path *path, bool recursive) 3011 { 3012 struct mnt_namespace *ns, *mnt_ns = current->nsproxy->mnt_ns, *src_mnt_ns; 3013 struct user_namespace *user_ns = mnt_ns->user_ns; 3014 struct mount *mnt, *p; 3015 3016 ns = alloc_mnt_ns(user_ns, true); 3017 if (IS_ERR(ns)) 3018 return ns; 3019 3020 guard(namespace_excl)(); 3021 3022 /* 3023 * Record the sequence number of the source mount namespace. 3024 * This needs to hold namespace_sem to ensure that the mount 3025 * doesn't get attached. 3026 */ 3027 if (is_mounted(path->mnt)) { 3028 src_mnt_ns = real_mount(path->mnt)->mnt_ns; 3029 if (is_anon_ns(src_mnt_ns)) 3030 ns->seq_origin = src_mnt_ns->seq_origin; 3031 else 3032 ns->seq_origin = src_mnt_ns->ns.ns_id; 3033 } 3034 3035 mnt = __do_loopback(path, recursive); 3036 if (IS_ERR(mnt)) { 3037 emptied_ns = ns; 3038 return ERR_CAST(mnt); 3039 } 3040 3041 for (p = mnt; p; p = next_mnt(p, mnt)) { 3042 mnt_add_to_ns(ns, p); 3043 ns->nr_mounts++; 3044 } 3045 ns->root = mnt; 3046 return ns; 3047 } 3048 3049 static struct file *open_detached_copy(struct path *path, bool recursive) 3050 { 3051 struct mnt_namespace *ns = get_detached_copy(path, recursive); 3052 struct file *file; 3053 3054 if (IS_ERR(ns)) 3055 return ERR_CAST(ns); 3056 3057 mntput(path->mnt); 3058 path->mnt = mntget(&ns->root->mnt); 3059 file = dentry_open(path, O_PATH, current_cred()); 3060 if (IS_ERR(file)) 3061 dissolve_on_fput(path->mnt); 3062 else 3063 file->f_mode |= FMODE_NEED_UNMOUNT; 3064 return file; 3065 } 3066 3067 static struct file *vfs_open_tree(int dfd, const char __user *filename, unsigned int flags) 3068 { 3069 int ret; 3070 struct path path __free(path_put) = {}; 3071 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 3072 bool detached = flags & OPEN_TREE_CLONE; 3073 3074 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC); 3075 3076 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE | 3077 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE | 3078 OPEN_TREE_CLOEXEC)) 3079 return ERR_PTR(-EINVAL); 3080 3081 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE) 3082 return ERR_PTR(-EINVAL); 3083 3084 if (flags & AT_NO_AUTOMOUNT) 3085 lookup_flags &= ~LOOKUP_AUTOMOUNT; 3086 if (flags & AT_SYMLINK_NOFOLLOW) 3087 lookup_flags &= ~LOOKUP_FOLLOW; 3088 if (flags & AT_EMPTY_PATH) 3089 lookup_flags |= LOOKUP_EMPTY; 3090 3091 if (detached && !may_mount()) 3092 return ERR_PTR(-EPERM); 3093 3094 ret = user_path_at(dfd, filename, lookup_flags, &path); 3095 if (unlikely(ret)) 3096 return ERR_PTR(ret); 3097 3098 if (detached) 3099 return open_detached_copy(&path, flags & AT_RECURSIVE); 3100 3101 return dentry_open(&path, O_PATH, current_cred()); 3102 } 3103 3104 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) 3105 { 3106 return FD_ADD(flags, vfs_open_tree(dfd, filename, flags)); 3107 } 3108 3109 /* 3110 * Don't allow locked mount flags to be cleared. 3111 * 3112 * No locks need to be held here while testing the various MNT_LOCK 3113 * flags because those flags can never be cleared once they are set. 3114 */ 3115 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) 3116 { 3117 unsigned int fl = mnt->mnt.mnt_flags; 3118 3119 if ((fl & MNT_LOCK_READONLY) && 3120 !(mnt_flags & MNT_READONLY)) 3121 return false; 3122 3123 if ((fl & MNT_LOCK_NODEV) && 3124 !(mnt_flags & MNT_NODEV)) 3125 return false; 3126 3127 if ((fl & MNT_LOCK_NOSUID) && 3128 !(mnt_flags & MNT_NOSUID)) 3129 return false; 3130 3131 if ((fl & MNT_LOCK_NOEXEC) && 3132 !(mnt_flags & MNT_NOEXEC)) 3133 return false; 3134 3135 if ((fl & MNT_LOCK_ATIME) && 3136 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) 3137 return false; 3138 3139 return true; 3140 } 3141 3142 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) 3143 { 3144 bool readonly_request = (mnt_flags & MNT_READONLY); 3145 3146 if (readonly_request == __mnt_is_readonly(&mnt->mnt)) 3147 return 0; 3148 3149 if (readonly_request) 3150 return mnt_make_readonly(mnt); 3151 3152 mnt->mnt.mnt_flags &= ~MNT_READONLY; 3153 return 0; 3154 } 3155 3156 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) 3157 { 3158 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; 3159 mnt->mnt.mnt_flags = mnt_flags; 3160 touch_mnt_namespace(mnt->mnt_ns); 3161 } 3162 3163 static void mnt_warn_timestamp_expiry(const struct path *mountpoint, 3164 struct vfsmount *mnt) 3165 { 3166 struct super_block *sb = mnt->mnt_sb; 3167 3168 if (!__mnt_is_readonly(mnt) && 3169 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && 3170 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { 3171 char *buf, *mntpath; 3172 3173 buf = (char *)__get_free_page(GFP_KERNEL); 3174 if (buf) 3175 mntpath = d_path(mountpoint, buf, PAGE_SIZE); 3176 else 3177 mntpath = ERR_PTR(-ENOMEM); 3178 if (IS_ERR(mntpath)) 3179 mntpath = "(unknown)"; 3180 3181 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n", 3182 sb->s_type->name, 3183 is_mounted(mnt) ? "remounted" : "mounted", 3184 mntpath, &sb->s_time_max, 3185 (unsigned long long)sb->s_time_max); 3186 3187 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; 3188 if (buf) 3189 free_page((unsigned long)buf); 3190 } 3191 } 3192 3193 /* 3194 * Handle reconfiguration of the mountpoint only without alteration of the 3195 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND 3196 * to mount(2). 3197 */ 3198 static int do_reconfigure_mnt(const struct path *path, unsigned int mnt_flags) 3199 { 3200 struct super_block *sb = path->mnt->mnt_sb; 3201 struct mount *mnt = real_mount(path->mnt); 3202 int ret; 3203 3204 if (!check_mnt(mnt)) 3205 return -EINVAL; 3206 3207 if (!path_mounted(path)) 3208 return -EINVAL; 3209 3210 if (!can_change_locked_flags(mnt, mnt_flags)) 3211 return -EPERM; 3212 3213 /* 3214 * We're only checking whether the superblock is read-only not 3215 * changing it, so only take down_read(&sb->s_umount). 3216 */ 3217 down_read(&sb->s_umount); 3218 lock_mount_hash(); 3219 ret = change_mount_ro_state(mnt, mnt_flags); 3220 if (ret == 0) 3221 set_mount_attributes(mnt, mnt_flags); 3222 unlock_mount_hash(); 3223 up_read(&sb->s_umount); 3224 3225 mnt_warn_timestamp_expiry(path, &mnt->mnt); 3226 3227 return ret; 3228 } 3229 3230 /* 3231 * change filesystem flags. dir should be a physical root of filesystem. 3232 * If you've mounted a non-root directory somewhere and want to do remount 3233 * on it - tough luck. 3234 */ 3235 static int do_remount(const struct path *path, int sb_flags, 3236 int mnt_flags, void *data) 3237 { 3238 int err; 3239 struct super_block *sb = path->mnt->mnt_sb; 3240 struct mount *mnt = real_mount(path->mnt); 3241 struct fs_context *fc; 3242 3243 if (!check_mnt(mnt)) 3244 return -EINVAL; 3245 3246 if (!path_mounted(path)) 3247 return -EINVAL; 3248 3249 if (!can_change_locked_flags(mnt, mnt_flags)) 3250 return -EPERM; 3251 3252 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK); 3253 if (IS_ERR(fc)) 3254 return PTR_ERR(fc); 3255 3256 /* 3257 * Indicate to the filesystem that the remount request is coming 3258 * from the legacy mount system call. 3259 */ 3260 fc->oldapi = true; 3261 3262 err = parse_monolithic_mount_data(fc, data); 3263 if (!err) { 3264 down_write(&sb->s_umount); 3265 err = -EPERM; 3266 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) { 3267 err = reconfigure_super(fc); 3268 if (!err) { 3269 lock_mount_hash(); 3270 set_mount_attributes(mnt, mnt_flags); 3271 unlock_mount_hash(); 3272 } 3273 } 3274 up_write(&sb->s_umount); 3275 } 3276 3277 mnt_warn_timestamp_expiry(path, &mnt->mnt); 3278 3279 put_fs_context(fc); 3280 return err; 3281 } 3282 3283 static inline int tree_contains_unbindable(struct mount *mnt) 3284 { 3285 struct mount *p; 3286 for (p = mnt; p; p = next_mnt(p, mnt)) { 3287 if (IS_MNT_UNBINDABLE(p)) 3288 return 1; 3289 } 3290 return 0; 3291 } 3292 3293 static int do_set_group(const struct path *from_path, const struct path *to_path) 3294 { 3295 struct mount *from = real_mount(from_path->mnt); 3296 struct mount *to = real_mount(to_path->mnt); 3297 int err; 3298 3299 guard(namespace_excl)(); 3300 3301 err = may_change_propagation(from); 3302 if (err) 3303 return err; 3304 err = may_change_propagation(to); 3305 if (err) 3306 return err; 3307 3308 /* To and From paths should be mount roots */ 3309 if (!path_mounted(from_path)) 3310 return -EINVAL; 3311 if (!path_mounted(to_path)) 3312 return -EINVAL; 3313 3314 /* Setting sharing groups is only allowed across same superblock */ 3315 if (from->mnt.mnt_sb != to->mnt.mnt_sb) 3316 return -EINVAL; 3317 3318 /* From mount root should be wider than To mount root */ 3319 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) 3320 return -EINVAL; 3321 3322 /* From mount should not have locked children in place of To's root */ 3323 if (__has_locked_children(from, to->mnt.mnt_root)) 3324 return -EINVAL; 3325 3326 /* Setting sharing groups is only allowed on private mounts */ 3327 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) 3328 return -EINVAL; 3329 3330 /* From should not be private */ 3331 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) 3332 return -EINVAL; 3333 3334 if (IS_MNT_SLAVE(from)) { 3335 hlist_add_behind(&to->mnt_slave, &from->mnt_slave); 3336 to->mnt_master = from->mnt_master; 3337 } 3338 3339 if (IS_MNT_SHARED(from)) { 3340 to->mnt_group_id = from->mnt_group_id; 3341 list_add(&to->mnt_share, &from->mnt_share); 3342 set_mnt_shared(to); 3343 } 3344 return 0; 3345 } 3346 3347 /** 3348 * path_overmounted - check if path is overmounted 3349 * @path: path to check 3350 * 3351 * Check if path is overmounted, i.e., if there's a mount on top of 3352 * @path->mnt with @path->dentry as mountpoint. 3353 * 3354 * Context: namespace_sem must be held at least shared. 3355 * MUST NOT be called under lock_mount_hash() (there one should just 3356 * call __lookup_mnt() and check if it returns NULL). 3357 * Return: If path is overmounted true is returned, false if not. 3358 */ 3359 static inline bool path_overmounted(const struct path *path) 3360 { 3361 unsigned seq = read_seqbegin(&mount_lock); 3362 bool no_child; 3363 3364 rcu_read_lock(); 3365 no_child = !__lookup_mnt(path->mnt, path->dentry); 3366 rcu_read_unlock(); 3367 if (need_seqretry(&mount_lock, seq)) { 3368 read_seqlock_excl(&mount_lock); 3369 no_child = !__lookup_mnt(path->mnt, path->dentry); 3370 read_sequnlock_excl(&mount_lock); 3371 } 3372 return unlikely(!no_child); 3373 } 3374 3375 /* 3376 * Check if there is a possibly empty chain of descent from p1 to p2. 3377 * Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl). 3378 */ 3379 static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2) 3380 { 3381 while (p2 != p1 && mnt_has_parent(p2)) 3382 p2 = p2->mnt_parent; 3383 return p2 == p1; 3384 } 3385 3386 /** 3387 * can_move_mount_beneath - check that we can mount beneath the top mount 3388 * @mnt_from: mount we are trying to move 3389 * @mnt_to: mount under which to mount 3390 * @mp: mountpoint of @mnt_to 3391 * 3392 * - Make sure that nothing can be mounted beneath the caller's current 3393 * root or the rootfs of the namespace. 3394 * - Make sure that the caller can unmount the topmost mount ensuring 3395 * that the caller could reveal the underlying mountpoint. 3396 * - Ensure that nothing has been mounted on top of @mnt_from before we 3397 * grabbed @namespace_sem to avoid creating pointless shadow mounts. 3398 * - Prevent mounting beneath a mount if the propagation relationship 3399 * between the source mount, parent mount, and top mount would lead to 3400 * nonsensical mount trees. 3401 * 3402 * Context: This function expects namespace_lock() to be held. 3403 * Return: On success 0, and on error a negative error code is returned. 3404 */ 3405 static int can_move_mount_beneath(const struct mount *mnt_from, 3406 const struct mount *mnt_to, 3407 const struct mountpoint *mp) 3408 { 3409 struct mount *parent_mnt_to = mnt_to->mnt_parent; 3410 3411 if (IS_MNT_LOCKED(mnt_to)) 3412 return -EINVAL; 3413 3414 /* Avoid creating shadow mounts during mount propagation. */ 3415 if (mnt_from->overmount) 3416 return -EINVAL; 3417 3418 /* 3419 * Mounting beneath the rootfs only makes sense when the 3420 * semantics of pivot_root(".", ".") are used. 3421 */ 3422 if (&mnt_to->mnt == current->fs->root.mnt) 3423 return -EINVAL; 3424 if (parent_mnt_to == current->nsproxy->mnt_ns->root) 3425 return -EINVAL; 3426 3427 if (mount_is_ancestor(mnt_to, mnt_from)) 3428 return -EINVAL; 3429 3430 /* 3431 * If the parent mount propagates to the child mount this would 3432 * mean mounting @mnt_from on @mnt_to->mnt_parent and then 3433 * propagating a copy @c of @mnt_from on top of @mnt_to. This 3434 * defeats the whole purpose of mounting beneath another mount. 3435 */ 3436 if (propagation_would_overmount(parent_mnt_to, mnt_to, mp)) 3437 return -EINVAL; 3438 3439 /* 3440 * If @mnt_to->mnt_parent propagates to @mnt_from this would 3441 * mean propagating a copy @c of @mnt_from on top of @mnt_from. 3442 * Afterwards @mnt_from would be mounted on top of 3443 * @mnt_to->mnt_parent and @mnt_to would be unmounted from 3444 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is 3445 * already mounted on @mnt_from, @mnt_to would ultimately be 3446 * remounted on top of @c. Afterwards, @mnt_from would be 3447 * covered by a copy @c of @mnt_from and @c would be covered by 3448 * @mnt_from itself. This defeats the whole purpose of mounting 3449 * @mnt_from beneath @mnt_to. 3450 */ 3451 if (check_mnt(mnt_from) && 3452 propagation_would_overmount(parent_mnt_to, mnt_from, mp)) 3453 return -EINVAL; 3454 3455 return 0; 3456 } 3457 3458 /* may_use_mount() - check if a mount tree can be used 3459 * @mnt: vfsmount to be used 3460 * 3461 * This helper checks if the caller may use the mount tree starting 3462 * from @path->mnt. The caller may use the mount tree under the 3463 * following circumstances: 3464 * 3465 * (1) The caller is located in the mount namespace of the mount tree. 3466 * This also implies that the mount does not belong to an anonymous 3467 * mount namespace. 3468 * (2) The caller is trying to use a mount tree that belongs to an 3469 * anonymous mount namespace. 3470 * 3471 * For that to be safe, this helper enforces that the origin mount 3472 * namespace the anonymous mount namespace was created from is the 3473 * same as the caller's mount namespace by comparing the sequence 3474 * numbers. 3475 * 3476 * The ownership of a non-anonymous mount namespace such as the 3477 * caller's cannot change. 3478 * => We know that the caller's mount namespace is stable. 3479 * 3480 * If the origin sequence number of the anonymous mount namespace is 3481 * the same as the sequence number of the caller's mount namespace. 3482 * => The owning namespaces are the same. 3483 * 3484 * ==> The earlier capability check on the owning namespace of the 3485 * caller's mount namespace ensures that the caller has the 3486 * ability to use the mount tree. 3487 * 3488 * Returns true if the mount tree can be used, false otherwise. 3489 */ 3490 static inline bool may_use_mount(struct mount *mnt) 3491 { 3492 if (check_mnt(mnt)) 3493 return true; 3494 3495 /* 3496 * Make sure that noone unmounted the target path or somehow 3497 * managed to get their hands on something purely kernel 3498 * internal. 3499 */ 3500 if (!is_mounted(&mnt->mnt)) 3501 return false; 3502 3503 return check_anonymous_mnt(mnt); 3504 } 3505 3506 static int do_move_mount(const struct path *old_path, 3507 const struct path *new_path, 3508 enum mnt_tree_flags_t flags) 3509 { 3510 struct mount *old = real_mount(old_path->mnt); 3511 int err; 3512 bool beneath = flags & MNT_TREE_BENEATH; 3513 3514 if (!path_mounted(old_path)) 3515 return -EINVAL; 3516 3517 if (d_is_dir(new_path->dentry) != d_is_dir(old_path->dentry)) 3518 return -EINVAL; 3519 3520 LOCK_MOUNT_MAYBE_BENEATH(mp, new_path, beneath); 3521 if (IS_ERR(mp.parent)) 3522 return PTR_ERR(mp.parent); 3523 3524 if (check_mnt(old)) { 3525 /* if the source is in our namespace... */ 3526 /* ... it should be detachable from parent */ 3527 if (!mnt_has_parent(old) || IS_MNT_LOCKED(old)) 3528 return -EINVAL; 3529 /* ... which should not be shared */ 3530 if (IS_MNT_SHARED(old->mnt_parent)) 3531 return -EINVAL; 3532 /* ... and the target should be in our namespace */ 3533 if (!check_mnt(mp.parent)) 3534 return -EINVAL; 3535 } else { 3536 /* 3537 * otherwise the source must be the root of some anon namespace. 3538 */ 3539 if (!anon_ns_root(old)) 3540 return -EINVAL; 3541 /* 3542 * Bail out early if the target is within the same namespace - 3543 * subsequent checks would've rejected that, but they lose 3544 * some corner cases if we check it early. 3545 */ 3546 if (old->mnt_ns == mp.parent->mnt_ns) 3547 return -EINVAL; 3548 /* 3549 * Target should be either in our namespace or in an acceptable 3550 * anon namespace, sensu check_anonymous_mnt(). 3551 */ 3552 if (!may_use_mount(mp.parent)) 3553 return -EINVAL; 3554 } 3555 3556 if (beneath) { 3557 struct mount *over = real_mount(new_path->mnt); 3558 3559 if (mp.parent != over->mnt_parent) 3560 over = mp.parent->overmount; 3561 err = can_move_mount_beneath(old, over, mp.mp); 3562 if (err) 3563 return err; 3564 } 3565 3566 /* 3567 * Don't move a mount tree containing unbindable mounts to a destination 3568 * mount which is shared. 3569 */ 3570 if (IS_MNT_SHARED(mp.parent) && tree_contains_unbindable(old)) 3571 return -EINVAL; 3572 if (!check_for_nsfs_mounts(old)) 3573 return -ELOOP; 3574 if (mount_is_ancestor(old, mp.parent)) 3575 return -ELOOP; 3576 3577 return attach_recursive_mnt(old, &mp); 3578 } 3579 3580 static int do_move_mount_old(const struct path *path, const char *old_name) 3581 { 3582 struct path old_path __free(path_put) = {}; 3583 int err; 3584 3585 if (!old_name || !*old_name) 3586 return -EINVAL; 3587 3588 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 3589 if (err) 3590 return err; 3591 3592 return do_move_mount(&old_path, path, 0); 3593 } 3594 3595 /* 3596 * add a mount into a namespace's mount tree 3597 */ 3598 static int do_add_mount(struct mount *newmnt, const struct pinned_mountpoint *mp, 3599 int mnt_flags) 3600 { 3601 struct mount *parent = mp->parent; 3602 3603 if (IS_ERR(parent)) 3604 return PTR_ERR(parent); 3605 3606 mnt_flags &= ~MNT_INTERNAL_FLAGS; 3607 3608 if (unlikely(!check_mnt(parent))) { 3609 /* that's acceptable only for automounts done in private ns */ 3610 if (!(mnt_flags & MNT_SHRINKABLE)) 3611 return -EINVAL; 3612 /* ... and for those we'd better have mountpoint still alive */ 3613 if (!parent->mnt_ns) 3614 return -EINVAL; 3615 } 3616 3617 /* Refuse the same filesystem on the same mount point */ 3618 if (parent->mnt.mnt_sb == newmnt->mnt.mnt_sb && 3619 parent->mnt.mnt_root == mp->mp->m_dentry) 3620 return -EBUSY; 3621 3622 if (d_is_symlink(newmnt->mnt.mnt_root)) 3623 return -EINVAL; 3624 3625 newmnt->mnt.mnt_flags = mnt_flags; 3626 return graft_tree(newmnt, mp); 3627 } 3628 3629 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags); 3630 3631 /* 3632 * Create a new mount using a superblock configuration and request it 3633 * be added to the namespace tree. 3634 */ 3635 static int do_new_mount_fc(struct fs_context *fc, const struct path *mountpoint, 3636 unsigned int mnt_flags) 3637 { 3638 struct super_block *sb; 3639 struct vfsmount *mnt __free(mntput) = fc_mount(fc); 3640 int error; 3641 3642 if (IS_ERR(mnt)) 3643 return PTR_ERR(mnt); 3644 3645 sb = fc->root->d_sb; 3646 error = security_sb_kern_mount(sb); 3647 if (unlikely(error)) 3648 return error; 3649 3650 if (unlikely(mount_too_revealing(sb, &mnt_flags))) { 3651 errorfcp(fc, "VFS", "Mount too revealing"); 3652 return -EPERM; 3653 } 3654 3655 mnt_warn_timestamp_expiry(mountpoint, mnt); 3656 3657 LOCK_MOUNT(mp, mountpoint); 3658 error = do_add_mount(real_mount(mnt), &mp, mnt_flags); 3659 if (!error) 3660 retain_and_null_ptr(mnt); // consumed on success 3661 return error; 3662 } 3663 3664 /* 3665 * create a new mount for userspace and request it to be added into the 3666 * namespace's tree 3667 */ 3668 static int do_new_mount(const struct path *path, const char *fstype, 3669 int sb_flags, int mnt_flags, 3670 const char *name, void *data) 3671 { 3672 struct file_system_type *type; 3673 struct fs_context *fc; 3674 const char *subtype = NULL; 3675 int err = 0; 3676 3677 if (!fstype) 3678 return -EINVAL; 3679 3680 type = get_fs_type(fstype); 3681 if (!type) 3682 return -ENODEV; 3683 3684 if (type->fs_flags & FS_HAS_SUBTYPE) { 3685 subtype = strchr(fstype, '.'); 3686 if (subtype) { 3687 subtype++; 3688 if (!*subtype) { 3689 put_filesystem(type); 3690 return -EINVAL; 3691 } 3692 } 3693 } 3694 3695 fc = fs_context_for_mount(type, sb_flags); 3696 put_filesystem(type); 3697 if (IS_ERR(fc)) 3698 return PTR_ERR(fc); 3699 3700 /* 3701 * Indicate to the filesystem that the mount request is coming 3702 * from the legacy mount system call. 3703 */ 3704 fc->oldapi = true; 3705 3706 if (subtype) 3707 err = vfs_parse_fs_string(fc, "subtype", subtype); 3708 if (!err && name) 3709 err = vfs_parse_fs_string(fc, "source", name); 3710 if (!err) 3711 err = parse_monolithic_mount_data(fc, data); 3712 if (!err && !mount_capable(fc)) 3713 err = -EPERM; 3714 if (!err) 3715 err = do_new_mount_fc(fc, path, mnt_flags); 3716 3717 put_fs_context(fc); 3718 return err; 3719 } 3720 3721 static void lock_mount_exact(const struct path *path, 3722 struct pinned_mountpoint *mp) 3723 { 3724 struct dentry *dentry = path->dentry; 3725 int err; 3726 3727 inode_lock(dentry->d_inode); 3728 namespace_lock(); 3729 if (unlikely(cant_mount(dentry))) 3730 err = -ENOENT; 3731 else if (path_overmounted(path)) 3732 err = -EBUSY; 3733 else 3734 err = get_mountpoint(dentry, mp); 3735 if (unlikely(err)) { 3736 namespace_unlock(); 3737 inode_unlock(dentry->d_inode); 3738 mp->parent = ERR_PTR(err); 3739 } else { 3740 mp->parent = real_mount(path->mnt); 3741 } 3742 } 3743 3744 int finish_automount(struct vfsmount *__m, const struct path *path) 3745 { 3746 struct vfsmount *m __free(mntput) = __m; 3747 struct mount *mnt; 3748 int err; 3749 3750 if (!m) 3751 return 0; 3752 if (IS_ERR(m)) 3753 return PTR_ERR(m); 3754 3755 mnt = real_mount(m); 3756 3757 if (m->mnt_root == path->dentry) 3758 return -ELOOP; 3759 3760 /* 3761 * we don't want to use LOCK_MOUNT() - in this case finding something 3762 * that overmounts our mountpoint to be means "quitely drop what we've 3763 * got", not "try to mount it on top". 3764 */ 3765 LOCK_MOUNT_EXACT(mp, path); 3766 if (mp.parent == ERR_PTR(-EBUSY)) 3767 return 0; 3768 3769 err = do_add_mount(mnt, &mp, path->mnt->mnt_flags | MNT_SHRINKABLE); 3770 if (likely(!err)) 3771 retain_and_null_ptr(m); 3772 return err; 3773 } 3774 3775 /** 3776 * mnt_set_expiry - Put a mount on an expiration list 3777 * @mnt: The mount to list. 3778 * @expiry_list: The list to add the mount to. 3779 */ 3780 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 3781 { 3782 guard(mount_locked_reader)(); 3783 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 3784 } 3785 EXPORT_SYMBOL(mnt_set_expiry); 3786 3787 /* 3788 * process a list of expirable mountpoints with the intent of discarding any 3789 * mountpoints that aren't in use and haven't been touched since last we came 3790 * here 3791 */ 3792 void mark_mounts_for_expiry(struct list_head *mounts) 3793 { 3794 struct mount *mnt, *next; 3795 LIST_HEAD(graveyard); 3796 3797 if (list_empty(mounts)) 3798 return; 3799 3800 guard(namespace_excl)(); 3801 guard(mount_writer)(); 3802 3803 /* extract from the expiration list every vfsmount that matches the 3804 * following criteria: 3805 * - already mounted 3806 * - only referenced by its parent vfsmount 3807 * - still marked for expiry (marked on the last call here; marks are 3808 * cleared by mntput()) 3809 */ 3810 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 3811 if (!is_mounted(&mnt->mnt)) 3812 continue; 3813 if (!xchg(&mnt->mnt_expiry_mark, 1) || 3814 propagate_mount_busy(mnt, 1)) 3815 continue; 3816 list_move(&mnt->mnt_expire, &graveyard); 3817 } 3818 while (!list_empty(&graveyard)) { 3819 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 3820 touch_mnt_namespace(mnt->mnt_ns); 3821 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3822 } 3823 } 3824 3825 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 3826 3827 /* 3828 * Ripoff of 'select_parent()' 3829 * 3830 * search the list of submounts for a given mountpoint, and move any 3831 * shrinkable submounts to the 'graveyard' list. 3832 */ 3833 static int select_submounts(struct mount *parent, struct list_head *graveyard) 3834 { 3835 struct mount *this_parent = parent; 3836 struct list_head *next; 3837 int found = 0; 3838 3839 repeat: 3840 next = this_parent->mnt_mounts.next; 3841 resume: 3842 while (next != &this_parent->mnt_mounts) { 3843 struct list_head *tmp = next; 3844 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); 3845 3846 next = tmp->next; 3847 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) 3848 continue; 3849 /* 3850 * Descend a level if the d_mounts list is non-empty. 3851 */ 3852 if (!list_empty(&mnt->mnt_mounts)) { 3853 this_parent = mnt; 3854 goto repeat; 3855 } 3856 3857 if (!propagate_mount_busy(mnt, 1)) { 3858 list_move_tail(&mnt->mnt_expire, graveyard); 3859 found++; 3860 } 3861 } 3862 /* 3863 * All done at this level ... ascend and resume the search 3864 */ 3865 if (this_parent != parent) { 3866 next = this_parent->mnt_child.next; 3867 this_parent = this_parent->mnt_parent; 3868 goto resume; 3869 } 3870 return found; 3871 } 3872 3873 /* 3874 * process a list of expirable mountpoints with the intent of discarding any 3875 * submounts of a specific parent mountpoint 3876 * 3877 * mount_lock must be held for write 3878 */ 3879 static void shrink_submounts(struct mount *mnt) 3880 { 3881 LIST_HEAD(graveyard); 3882 struct mount *m; 3883 3884 /* extract submounts of 'mountpoint' from the expiration list */ 3885 while (select_submounts(mnt, &graveyard)) { 3886 while (!list_empty(&graveyard)) { 3887 m = list_first_entry(&graveyard, struct mount, 3888 mnt_expire); 3889 touch_mnt_namespace(m->mnt_ns); 3890 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3891 } 3892 } 3893 } 3894 3895 static void *copy_mount_options(const void __user * data) 3896 { 3897 char *copy; 3898 unsigned left, offset; 3899 3900 if (!data) 3901 return NULL; 3902 3903 copy = kmalloc(PAGE_SIZE, GFP_KERNEL); 3904 if (!copy) 3905 return ERR_PTR(-ENOMEM); 3906 3907 left = copy_from_user(copy, data, PAGE_SIZE); 3908 3909 /* 3910 * Not all architectures have an exact copy_from_user(). Resort to 3911 * byte at a time. 3912 */ 3913 offset = PAGE_SIZE - left; 3914 while (left) { 3915 char c; 3916 if (get_user(c, (const char __user *)data + offset)) 3917 break; 3918 copy[offset] = c; 3919 left--; 3920 offset++; 3921 } 3922 3923 if (left == PAGE_SIZE) { 3924 kfree(copy); 3925 return ERR_PTR(-EFAULT); 3926 } 3927 3928 return copy; 3929 } 3930 3931 static char *copy_mount_string(const void __user *data) 3932 { 3933 return data ? strndup_user(data, PATH_MAX) : NULL; 3934 } 3935 3936 /* 3937 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 3938 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 3939 * 3940 * data is a (void *) that can point to any structure up to 3941 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 3942 * information (or be NULL). 3943 * 3944 * Pre-0.97 versions of mount() didn't have a flags word. 3945 * When the flags word was introduced its top half was required 3946 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 3947 * Therefore, if this magic number is present, it carries no information 3948 * and must be discarded. 3949 */ 3950 int path_mount(const char *dev_name, const struct path *path, 3951 const char *type_page, unsigned long flags, void *data_page) 3952 { 3953 unsigned int mnt_flags = 0, sb_flags; 3954 int ret; 3955 3956 /* Discard magic */ 3957 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 3958 flags &= ~MS_MGC_MSK; 3959 3960 /* Basic sanity checks */ 3961 if (data_page) 3962 ((char *)data_page)[PAGE_SIZE - 1] = 0; 3963 3964 if (flags & MS_NOUSER) 3965 return -EINVAL; 3966 3967 ret = security_sb_mount(dev_name, path, type_page, flags, data_page); 3968 if (ret) 3969 return ret; 3970 if (!may_mount()) 3971 return -EPERM; 3972 if (flags & SB_MANDLOCK) 3973 warn_mandlock(); 3974 3975 /* Default to relatime unless overriden */ 3976 if (!(flags & MS_NOATIME)) 3977 mnt_flags |= MNT_RELATIME; 3978 3979 /* Separate the per-mountpoint flags */ 3980 if (flags & MS_NOSUID) 3981 mnt_flags |= MNT_NOSUID; 3982 if (flags & MS_NODEV) 3983 mnt_flags |= MNT_NODEV; 3984 if (flags & MS_NOEXEC) 3985 mnt_flags |= MNT_NOEXEC; 3986 if (flags & MS_NOATIME) 3987 mnt_flags |= MNT_NOATIME; 3988 if (flags & MS_NODIRATIME) 3989 mnt_flags |= MNT_NODIRATIME; 3990 if (flags & MS_STRICTATIME) 3991 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 3992 if (flags & MS_RDONLY) 3993 mnt_flags |= MNT_READONLY; 3994 if (flags & MS_NOSYMFOLLOW) 3995 mnt_flags |= MNT_NOSYMFOLLOW; 3996 3997 /* The default atime for remount is preservation */ 3998 if ((flags & MS_REMOUNT) && 3999 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | 4000 MS_STRICTATIME)) == 0)) { 4001 mnt_flags &= ~MNT_ATIME_MASK; 4002 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK; 4003 } 4004 4005 sb_flags = flags & (SB_RDONLY | 4006 SB_SYNCHRONOUS | 4007 SB_MANDLOCK | 4008 SB_DIRSYNC | 4009 SB_SILENT | 4010 SB_POSIXACL | 4011 SB_LAZYTIME | 4012 SB_I_VERSION); 4013 4014 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) 4015 return do_reconfigure_mnt(path, mnt_flags); 4016 if (flags & MS_REMOUNT) 4017 return do_remount(path, sb_flags, mnt_flags, data_page); 4018 if (flags & MS_BIND) 4019 return do_loopback(path, dev_name, flags & MS_REC); 4020 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 4021 return do_change_type(path, flags); 4022 if (flags & MS_MOVE) 4023 return do_move_mount_old(path, dev_name); 4024 4025 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name, 4026 data_page); 4027 } 4028 4029 int do_mount(const char *dev_name, const char __user *dir_name, 4030 const char *type_page, unsigned long flags, void *data_page) 4031 { 4032 struct path path __free(path_put) = {}; 4033 int ret; 4034 4035 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path); 4036 if (ret) 4037 return ret; 4038 return path_mount(dev_name, &path, type_page, flags, data_page); 4039 } 4040 4041 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) 4042 { 4043 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES); 4044 } 4045 4046 static void dec_mnt_namespaces(struct ucounts *ucounts) 4047 { 4048 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES); 4049 } 4050 4051 static void free_mnt_ns(struct mnt_namespace *ns) 4052 { 4053 if (!is_anon_ns(ns)) 4054 ns_common_free(ns); 4055 dec_mnt_namespaces(ns->ucounts); 4056 mnt_ns_tree_remove(ns); 4057 } 4058 4059 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon) 4060 { 4061 struct mnt_namespace *new_ns; 4062 struct ucounts *ucounts; 4063 int ret; 4064 4065 ucounts = inc_mnt_namespaces(user_ns); 4066 if (!ucounts) 4067 return ERR_PTR(-ENOSPC); 4068 4069 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT); 4070 if (!new_ns) { 4071 dec_mnt_namespaces(ucounts); 4072 return ERR_PTR(-ENOMEM); 4073 } 4074 4075 if (anon) 4076 ret = ns_common_init_inum(new_ns, MNT_NS_ANON_INO); 4077 else 4078 ret = ns_common_init(new_ns); 4079 if (ret) { 4080 kfree(new_ns); 4081 dec_mnt_namespaces(ucounts); 4082 return ERR_PTR(ret); 4083 } 4084 if (!anon) 4085 ns_tree_gen_id(&new_ns->ns); 4086 refcount_set(&new_ns->passive, 1); 4087 new_ns->mounts = RB_ROOT; 4088 init_waitqueue_head(&new_ns->poll); 4089 new_ns->user_ns = get_user_ns(user_ns); 4090 new_ns->ucounts = ucounts; 4091 return new_ns; 4092 } 4093 4094 __latent_entropy 4095 struct mnt_namespace *copy_mnt_ns(u64 flags, struct mnt_namespace *ns, 4096 struct user_namespace *user_ns, struct fs_struct *new_fs) 4097 { 4098 struct mnt_namespace *new_ns; 4099 struct vfsmount *rootmnt __free(mntput) = NULL; 4100 struct vfsmount *pwdmnt __free(mntput) = NULL; 4101 struct mount *p, *q; 4102 struct mount *old; 4103 struct mount *new; 4104 int copy_flags; 4105 4106 BUG_ON(!ns); 4107 4108 if (likely(!(flags & CLONE_NEWNS))) { 4109 get_mnt_ns(ns); 4110 return ns; 4111 } 4112 4113 old = ns->root; 4114 4115 new_ns = alloc_mnt_ns(user_ns, false); 4116 if (IS_ERR(new_ns)) 4117 return new_ns; 4118 4119 guard(namespace_excl)(); 4120 /* First pass: copy the tree topology */ 4121 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; 4122 if (user_ns != ns->user_ns) 4123 copy_flags |= CL_SLAVE; 4124 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 4125 if (IS_ERR(new)) { 4126 emptied_ns = new_ns; 4127 return ERR_CAST(new); 4128 } 4129 if (user_ns != ns->user_ns) { 4130 guard(mount_writer)(); 4131 lock_mnt_tree(new); 4132 } 4133 new_ns->root = new; 4134 4135 /* 4136 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 4137 * as belonging to new namespace. We have already acquired a private 4138 * fs_struct, so tsk->fs->lock is not needed. 4139 */ 4140 p = old; 4141 q = new; 4142 while (p) { 4143 mnt_add_to_ns(new_ns, q); 4144 new_ns->nr_mounts++; 4145 if (new_fs) { 4146 if (&p->mnt == new_fs->root.mnt) { 4147 new_fs->root.mnt = mntget(&q->mnt); 4148 rootmnt = &p->mnt; 4149 } 4150 if (&p->mnt == new_fs->pwd.mnt) { 4151 new_fs->pwd.mnt = mntget(&q->mnt); 4152 pwdmnt = &p->mnt; 4153 } 4154 } 4155 p = next_mnt(p, old); 4156 q = next_mnt(q, new); 4157 if (!q) 4158 break; 4159 // an mntns binding we'd skipped? 4160 while (p->mnt.mnt_root != q->mnt.mnt_root) 4161 p = next_mnt(skip_mnt_tree(p), old); 4162 } 4163 ns_tree_add_raw(new_ns); 4164 return new_ns; 4165 } 4166 4167 struct dentry *mount_subtree(struct vfsmount *m, const char *name) 4168 { 4169 struct mount *mnt = real_mount(m); 4170 struct mnt_namespace *ns; 4171 struct super_block *s; 4172 struct path path; 4173 int err; 4174 4175 ns = alloc_mnt_ns(&init_user_ns, true); 4176 if (IS_ERR(ns)) { 4177 mntput(m); 4178 return ERR_CAST(ns); 4179 } 4180 ns->root = mnt; 4181 ns->nr_mounts++; 4182 mnt_add_to_ns(ns, mnt); 4183 4184 err = vfs_path_lookup(m->mnt_root, m, 4185 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 4186 4187 put_mnt_ns(ns); 4188 4189 if (err) 4190 return ERR_PTR(err); 4191 4192 /* trade a vfsmount reference for active sb one */ 4193 s = path.mnt->mnt_sb; 4194 atomic_inc(&s->s_active); 4195 mntput(path.mnt); 4196 /* lock the sucker */ 4197 down_write(&s->s_umount); 4198 /* ... and return the root of (sub)tree on it */ 4199 return path.dentry; 4200 } 4201 EXPORT_SYMBOL(mount_subtree); 4202 4203 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 4204 char __user *, type, unsigned long, flags, void __user *, data) 4205 { 4206 int ret; 4207 char *kernel_type; 4208 char *kernel_dev; 4209 void *options; 4210 4211 kernel_type = copy_mount_string(type); 4212 ret = PTR_ERR(kernel_type); 4213 if (IS_ERR(kernel_type)) 4214 goto out_type; 4215 4216 kernel_dev = copy_mount_string(dev_name); 4217 ret = PTR_ERR(kernel_dev); 4218 if (IS_ERR(kernel_dev)) 4219 goto out_dev; 4220 4221 options = copy_mount_options(data); 4222 ret = PTR_ERR(options); 4223 if (IS_ERR(options)) 4224 goto out_data; 4225 4226 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options); 4227 4228 kfree(options); 4229 out_data: 4230 kfree(kernel_dev); 4231 out_dev: 4232 kfree(kernel_type); 4233 out_type: 4234 return ret; 4235 } 4236 4237 #define FSMOUNT_VALID_FLAGS \ 4238 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \ 4239 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \ 4240 MOUNT_ATTR_NOSYMFOLLOW) 4241 4242 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP) 4243 4244 #define MOUNT_SETATTR_PROPAGATION_FLAGS \ 4245 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED) 4246 4247 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags) 4248 { 4249 unsigned int mnt_flags = 0; 4250 4251 if (attr_flags & MOUNT_ATTR_RDONLY) 4252 mnt_flags |= MNT_READONLY; 4253 if (attr_flags & MOUNT_ATTR_NOSUID) 4254 mnt_flags |= MNT_NOSUID; 4255 if (attr_flags & MOUNT_ATTR_NODEV) 4256 mnt_flags |= MNT_NODEV; 4257 if (attr_flags & MOUNT_ATTR_NOEXEC) 4258 mnt_flags |= MNT_NOEXEC; 4259 if (attr_flags & MOUNT_ATTR_NODIRATIME) 4260 mnt_flags |= MNT_NODIRATIME; 4261 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW) 4262 mnt_flags |= MNT_NOSYMFOLLOW; 4263 4264 return mnt_flags; 4265 } 4266 4267 /* 4268 * Create a kernel mount representation for a new, prepared superblock 4269 * (specified by fs_fd) and attach to an open_tree-like file descriptor. 4270 */ 4271 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags, 4272 unsigned int, attr_flags) 4273 { 4274 struct path new_path __free(path_put) = {}; 4275 struct mnt_namespace *ns; 4276 struct fs_context *fc; 4277 struct vfsmount *new_mnt; 4278 struct mount *mnt; 4279 unsigned int mnt_flags = 0; 4280 long ret; 4281 4282 if (!may_mount()) 4283 return -EPERM; 4284 4285 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0) 4286 return -EINVAL; 4287 4288 if (attr_flags & ~FSMOUNT_VALID_FLAGS) 4289 return -EINVAL; 4290 4291 mnt_flags = attr_flags_to_mnt_flags(attr_flags); 4292 4293 switch (attr_flags & MOUNT_ATTR__ATIME) { 4294 case MOUNT_ATTR_STRICTATIME: 4295 break; 4296 case MOUNT_ATTR_NOATIME: 4297 mnt_flags |= MNT_NOATIME; 4298 break; 4299 case MOUNT_ATTR_RELATIME: 4300 mnt_flags |= MNT_RELATIME; 4301 break; 4302 default: 4303 return -EINVAL; 4304 } 4305 4306 CLASS(fd, f)(fs_fd); 4307 if (fd_empty(f)) 4308 return -EBADF; 4309 4310 if (fd_file(f)->f_op != &fscontext_fops) 4311 return -EINVAL; 4312 4313 fc = fd_file(f)->private_data; 4314 4315 ACQUIRE(mutex_intr, uapi_mutex)(&fc->uapi_mutex); 4316 ret = ACQUIRE_ERR(mutex_intr, &uapi_mutex); 4317 if (ret) 4318 return ret; 4319 4320 /* There must be a valid superblock or we can't mount it */ 4321 ret = -EINVAL; 4322 if (!fc->root) 4323 return ret; 4324 4325 ret = -EPERM; 4326 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) { 4327 errorfcp(fc, "VFS", "Mount too revealing"); 4328 return ret; 4329 } 4330 4331 ret = -EBUSY; 4332 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT) 4333 return ret; 4334 4335 if (fc->sb_flags & SB_MANDLOCK) 4336 warn_mandlock(); 4337 4338 new_mnt = vfs_create_mount(fc); 4339 if (IS_ERR(new_mnt)) 4340 return PTR_ERR(new_mnt); 4341 new_mnt->mnt_flags = mnt_flags; 4342 4343 new_path.dentry = dget(fc->root); 4344 new_path.mnt = new_mnt; 4345 4346 /* We've done the mount bit - now move the file context into more or 4347 * less the same state as if we'd done an fspick(). We don't want to 4348 * do any memory allocation or anything like that at this point as we 4349 * don't want to have to handle any errors incurred. 4350 */ 4351 vfs_clean_context(fc); 4352 4353 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true); 4354 if (IS_ERR(ns)) 4355 return PTR_ERR(ns); 4356 mnt = real_mount(new_path.mnt); 4357 ns->root = mnt; 4358 ns->nr_mounts = 1; 4359 mnt_add_to_ns(ns, mnt); 4360 mntget(new_path.mnt); 4361 4362 FD_PREPARE(fdf, (flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0, 4363 dentry_open(&new_path, O_PATH, fc->cred)); 4364 if (fdf.err) { 4365 dissolve_on_fput(new_path.mnt); 4366 return fdf.err; 4367 } 4368 4369 /* 4370 * Attach to an apparent O_PATH fd with a note that we 4371 * need to unmount it, not just simply put it. 4372 */ 4373 fd_prepare_file(fdf)->f_mode |= FMODE_NEED_UNMOUNT; 4374 return fd_publish(fdf); 4375 } 4376 4377 static inline int vfs_move_mount(const struct path *from_path, 4378 const struct path *to_path, 4379 enum mnt_tree_flags_t mflags) 4380 { 4381 int ret; 4382 4383 ret = security_move_mount(from_path, to_path); 4384 if (ret) 4385 return ret; 4386 4387 if (mflags & MNT_TREE_PROPAGATION) 4388 return do_set_group(from_path, to_path); 4389 4390 return do_move_mount(from_path, to_path, mflags); 4391 } 4392 4393 /* 4394 * Move a mount from one place to another. In combination with 4395 * fsopen()/fsmount() this is used to install a new mount and in combination 4396 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy 4397 * a mount subtree. 4398 * 4399 * Note the flags value is a combination of MOVE_MOUNT_* flags. 4400 */ 4401 SYSCALL_DEFINE5(move_mount, 4402 int, from_dfd, const char __user *, from_pathname, 4403 int, to_dfd, const char __user *, to_pathname, 4404 unsigned int, flags) 4405 { 4406 struct path to_path __free(path_put) = {}; 4407 struct path from_path __free(path_put) = {}; 4408 struct filename *to_name __free(putname) = NULL; 4409 struct filename *from_name __free(putname) = NULL; 4410 unsigned int lflags, uflags; 4411 enum mnt_tree_flags_t mflags = 0; 4412 int ret = 0; 4413 4414 if (!may_mount()) 4415 return -EPERM; 4416 4417 if (flags & ~MOVE_MOUNT__MASK) 4418 return -EINVAL; 4419 4420 if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) == 4421 (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) 4422 return -EINVAL; 4423 4424 if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION; 4425 if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH; 4426 4427 uflags = 0; 4428 if (flags & MOVE_MOUNT_T_EMPTY_PATH) 4429 uflags = AT_EMPTY_PATH; 4430 4431 to_name = getname_maybe_null(to_pathname, uflags); 4432 if (IS_ERR(to_name)) 4433 return PTR_ERR(to_name); 4434 4435 if (!to_name && to_dfd >= 0) { 4436 CLASS(fd_raw, f_to)(to_dfd); 4437 if (fd_empty(f_to)) 4438 return -EBADF; 4439 4440 to_path = fd_file(f_to)->f_path; 4441 path_get(&to_path); 4442 } else { 4443 lflags = 0; 4444 if (flags & MOVE_MOUNT_T_SYMLINKS) 4445 lflags |= LOOKUP_FOLLOW; 4446 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) 4447 lflags |= LOOKUP_AUTOMOUNT; 4448 ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL); 4449 if (ret) 4450 return ret; 4451 } 4452 4453 uflags = 0; 4454 if (flags & MOVE_MOUNT_F_EMPTY_PATH) 4455 uflags = AT_EMPTY_PATH; 4456 4457 from_name = getname_maybe_null(from_pathname, uflags); 4458 if (IS_ERR(from_name)) 4459 return PTR_ERR(from_name); 4460 4461 if (!from_name && from_dfd >= 0) { 4462 CLASS(fd_raw, f_from)(from_dfd); 4463 if (fd_empty(f_from)) 4464 return -EBADF; 4465 4466 return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags); 4467 } 4468 4469 lflags = 0; 4470 if (flags & MOVE_MOUNT_F_SYMLINKS) 4471 lflags |= LOOKUP_FOLLOW; 4472 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) 4473 lflags |= LOOKUP_AUTOMOUNT; 4474 ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL); 4475 if (ret) 4476 return ret; 4477 4478 return vfs_move_mount(&from_path, &to_path, mflags); 4479 } 4480 4481 /* 4482 * Return true if path is reachable from root 4483 * 4484 * locks: mount_locked_reader || namespace_shared && is_mounted(mnt) 4485 */ 4486 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, 4487 const struct path *root) 4488 { 4489 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { 4490 dentry = mnt->mnt_mountpoint; 4491 mnt = mnt->mnt_parent; 4492 } 4493 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); 4494 } 4495 4496 bool path_is_under(const struct path *path1, const struct path *path2) 4497 { 4498 guard(mount_locked_reader)(); 4499 return is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 4500 } 4501 EXPORT_SYMBOL(path_is_under); 4502 4503 /* 4504 * pivot_root Semantics: 4505 * Moves the root file system of the current process to the directory put_old, 4506 * makes new_root as the new root file system of the current process, and sets 4507 * root/cwd of all processes which had them on the current root to new_root. 4508 * 4509 * Restrictions: 4510 * The new_root and put_old must be directories, and must not be on the 4511 * same file system as the current process root. The put_old must be 4512 * underneath new_root, i.e. adding a non-zero number of /.. to the string 4513 * pointed to by put_old must yield the same directory as new_root. No other 4514 * file system may be mounted on put_old. After all, new_root is a mountpoint. 4515 * 4516 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 4517 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives 4518 * in this situation. 4519 * 4520 * Notes: 4521 * - we don't move root/cwd if they are not at the root (reason: if something 4522 * cared enough to change them, it's probably wrong to force them elsewhere) 4523 * - it's okay to pick a root that isn't the root of a file system, e.g. 4524 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 4525 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 4526 * first. 4527 */ 4528 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 4529 const char __user *, put_old) 4530 { 4531 struct path new __free(path_put) = {}; 4532 struct path old __free(path_put) = {}; 4533 struct path root __free(path_put) = {}; 4534 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent; 4535 int error; 4536 4537 if (!may_mount()) 4538 return -EPERM; 4539 4540 error = user_path_at(AT_FDCWD, new_root, 4541 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new); 4542 if (error) 4543 return error; 4544 4545 error = user_path_at(AT_FDCWD, put_old, 4546 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old); 4547 if (error) 4548 return error; 4549 4550 error = security_sb_pivotroot(&old, &new); 4551 if (error) 4552 return error; 4553 4554 get_fs_root(current->fs, &root); 4555 4556 LOCK_MOUNT(old_mp, &old); 4557 old_mnt = old_mp.parent; 4558 if (IS_ERR(old_mnt)) 4559 return PTR_ERR(old_mnt); 4560 4561 new_mnt = real_mount(new.mnt); 4562 root_mnt = real_mount(root.mnt); 4563 ex_parent = new_mnt->mnt_parent; 4564 root_parent = root_mnt->mnt_parent; 4565 if (IS_MNT_SHARED(old_mnt) || 4566 IS_MNT_SHARED(ex_parent) || 4567 IS_MNT_SHARED(root_parent)) 4568 return -EINVAL; 4569 if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) 4570 return -EINVAL; 4571 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) 4572 return -EINVAL; 4573 if (d_unlinked(new.dentry)) 4574 return -ENOENT; 4575 if (new_mnt == root_mnt || old_mnt == root_mnt) 4576 return -EBUSY; /* loop, on the same file system */ 4577 if (!path_mounted(&root)) 4578 return -EINVAL; /* not a mountpoint */ 4579 if (!mnt_has_parent(root_mnt)) 4580 return -EINVAL; /* absolute root */ 4581 if (!path_mounted(&new)) 4582 return -EINVAL; /* not a mountpoint */ 4583 if (!mnt_has_parent(new_mnt)) 4584 return -EINVAL; /* absolute root */ 4585 /* make sure we can reach put_old from new_root */ 4586 if (!is_path_reachable(old_mnt, old_mp.mp->m_dentry, &new)) 4587 return -EINVAL; 4588 /* make certain new is below the root */ 4589 if (!is_path_reachable(new_mnt, new.dentry, &root)) 4590 return -EINVAL; 4591 lock_mount_hash(); 4592 umount_mnt(new_mnt); 4593 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 4594 new_mnt->mnt.mnt_flags |= MNT_LOCKED; 4595 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; 4596 } 4597 /* mount new_root on / */ 4598 attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp); 4599 umount_mnt(root_mnt); 4600 /* mount old root on put_old */ 4601 attach_mnt(root_mnt, old_mnt, old_mp.mp); 4602 touch_mnt_namespace(current->nsproxy->mnt_ns); 4603 /* A moved mount should not expire automatically */ 4604 list_del_init(&new_mnt->mnt_expire); 4605 unlock_mount_hash(); 4606 mnt_notify_add(root_mnt); 4607 mnt_notify_add(new_mnt); 4608 chroot_fs_refs(&root, &new); 4609 return 0; 4610 } 4611 4612 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt) 4613 { 4614 unsigned int flags = mnt->mnt.mnt_flags; 4615 4616 /* flags to clear */ 4617 flags &= ~kattr->attr_clr; 4618 /* flags to raise */ 4619 flags |= kattr->attr_set; 4620 4621 return flags; 4622 } 4623 4624 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4625 { 4626 struct vfsmount *m = &mnt->mnt; 4627 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns; 4628 4629 if (!kattr->mnt_idmap) 4630 return 0; 4631 4632 /* 4633 * Creating an idmapped mount with the filesystem wide idmapping 4634 * doesn't make sense so block that. We don't allow mushy semantics. 4635 */ 4636 if (kattr->mnt_userns == m->mnt_sb->s_user_ns) 4637 return -EINVAL; 4638 4639 /* 4640 * We only allow an mount to change it's idmapping if it has 4641 * never been accessible to userspace. 4642 */ 4643 if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE) && is_idmapped_mnt(m)) 4644 return -EPERM; 4645 4646 /* The underlying filesystem doesn't support idmapped mounts yet. */ 4647 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP)) 4648 return -EINVAL; 4649 4650 /* The filesystem has turned off idmapped mounts. */ 4651 if (m->mnt_sb->s_iflags & SB_I_NOIDMAP) 4652 return -EINVAL; 4653 4654 /* We're not controlling the superblock. */ 4655 if (!ns_capable(fs_userns, CAP_SYS_ADMIN)) 4656 return -EPERM; 4657 4658 /* Mount has already been visible in the filesystem hierarchy. */ 4659 if (!is_anon_ns(mnt->mnt_ns)) 4660 return -EINVAL; 4661 4662 return 0; 4663 } 4664 4665 /** 4666 * mnt_allow_writers() - check whether the attribute change allows writers 4667 * @kattr: the new mount attributes 4668 * @mnt: the mount to which @kattr will be applied 4669 * 4670 * Check whether thew new mount attributes in @kattr allow concurrent writers. 4671 * 4672 * Return: true if writers need to be held, false if not 4673 */ 4674 static inline bool mnt_allow_writers(const struct mount_kattr *kattr, 4675 const struct mount *mnt) 4676 { 4677 return (!(kattr->attr_set & MNT_READONLY) || 4678 (mnt->mnt.mnt_flags & MNT_READONLY)) && 4679 !kattr->mnt_idmap; 4680 } 4681 4682 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt) 4683 { 4684 struct mount *m; 4685 int err; 4686 4687 for (m = mnt; m; m = next_mnt(m, mnt)) { 4688 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) { 4689 err = -EPERM; 4690 break; 4691 } 4692 4693 err = can_idmap_mount(kattr, m); 4694 if (err) 4695 break; 4696 4697 if (!mnt_allow_writers(kattr, m)) { 4698 err = mnt_hold_writers(m); 4699 if (err) { 4700 m = next_mnt(m, mnt); 4701 break; 4702 } 4703 } 4704 4705 if (!(kattr->kflags & MOUNT_KATTR_RECURSE)) 4706 return 0; 4707 } 4708 4709 if (err) { 4710 /* undo all mnt_hold_writers() we'd done */ 4711 for (struct mount *p = mnt; p != m; p = next_mnt(p, mnt)) 4712 mnt_unhold_writers(p); 4713 } 4714 return err; 4715 } 4716 4717 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4718 { 4719 struct mnt_idmap *old_idmap; 4720 4721 if (!kattr->mnt_idmap) 4722 return; 4723 4724 old_idmap = mnt_idmap(&mnt->mnt); 4725 4726 /* Pairs with smp_load_acquire() in mnt_idmap(). */ 4727 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap)); 4728 mnt_idmap_put(old_idmap); 4729 } 4730 4731 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt) 4732 { 4733 struct mount *m; 4734 4735 for (m = mnt; m; m = next_mnt(m, mnt)) { 4736 unsigned int flags; 4737 4738 do_idmap_mount(kattr, m); 4739 flags = recalc_flags(kattr, m); 4740 WRITE_ONCE(m->mnt.mnt_flags, flags); 4741 4742 /* If we had to hold writers unblock them. */ 4743 mnt_unhold_writers(m); 4744 4745 if (kattr->propagation) 4746 change_mnt_propagation(m, kattr->propagation); 4747 if (!(kattr->kflags & MOUNT_KATTR_RECURSE)) 4748 break; 4749 } 4750 touch_mnt_namespace(mnt->mnt_ns); 4751 } 4752 4753 static int do_mount_setattr(const struct path *path, struct mount_kattr *kattr) 4754 { 4755 struct mount *mnt = real_mount(path->mnt); 4756 int err = 0; 4757 4758 if (!path_mounted(path)) 4759 return -EINVAL; 4760 4761 if (kattr->mnt_userns) { 4762 struct mnt_idmap *mnt_idmap; 4763 4764 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns); 4765 if (IS_ERR(mnt_idmap)) 4766 return PTR_ERR(mnt_idmap); 4767 kattr->mnt_idmap = mnt_idmap; 4768 } 4769 4770 if (kattr->propagation) { 4771 /* 4772 * Only take namespace_lock() if we're actually changing 4773 * propagation. 4774 */ 4775 namespace_lock(); 4776 if (kattr->propagation == MS_SHARED) { 4777 err = invent_group_ids(mnt, kattr->kflags & MOUNT_KATTR_RECURSE); 4778 if (err) { 4779 namespace_unlock(); 4780 return err; 4781 } 4782 } 4783 } 4784 4785 err = -EINVAL; 4786 lock_mount_hash(); 4787 4788 if (!anon_ns_root(mnt) && !check_mnt(mnt)) 4789 goto out; 4790 4791 /* 4792 * First, we get the mount tree in a shape where we can change mount 4793 * properties without failure. If we succeeded to do so we commit all 4794 * changes and if we failed we clean up. 4795 */ 4796 err = mount_setattr_prepare(kattr, mnt); 4797 if (!err) 4798 mount_setattr_commit(kattr, mnt); 4799 4800 out: 4801 unlock_mount_hash(); 4802 4803 if (kattr->propagation) { 4804 if (err) 4805 cleanup_group_ids(mnt, NULL); 4806 namespace_unlock(); 4807 } 4808 4809 return err; 4810 } 4811 4812 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize, 4813 struct mount_kattr *kattr) 4814 { 4815 struct ns_common *ns; 4816 struct user_namespace *mnt_userns; 4817 4818 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP)) 4819 return 0; 4820 4821 if (attr->attr_clr & MOUNT_ATTR_IDMAP) { 4822 /* 4823 * We can only remove an idmapping if it's never been 4824 * exposed to userspace. 4825 */ 4826 if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE)) 4827 return -EINVAL; 4828 4829 /* 4830 * Removal of idmappings is equivalent to setting 4831 * nop_mnt_idmap. 4832 */ 4833 if (!(attr->attr_set & MOUNT_ATTR_IDMAP)) { 4834 kattr->mnt_idmap = &nop_mnt_idmap; 4835 return 0; 4836 } 4837 } 4838 4839 if (attr->userns_fd > INT_MAX) 4840 return -EINVAL; 4841 4842 CLASS(fd, f)(attr->userns_fd); 4843 if (fd_empty(f)) 4844 return -EBADF; 4845 4846 if (!proc_ns_file(fd_file(f))) 4847 return -EINVAL; 4848 4849 ns = get_proc_ns(file_inode(fd_file(f))); 4850 if (ns->ns_type != CLONE_NEWUSER) 4851 return -EINVAL; 4852 4853 /* 4854 * The initial idmapping cannot be used to create an idmapped 4855 * mount. We use the initial idmapping as an indicator of a mount 4856 * that is not idmapped. It can simply be passed into helpers that 4857 * are aware of idmapped mounts as a convenient shortcut. A user 4858 * can just create a dedicated identity mapping to achieve the same 4859 * result. 4860 */ 4861 mnt_userns = container_of(ns, struct user_namespace, ns); 4862 if (mnt_userns == &init_user_ns) 4863 return -EPERM; 4864 4865 /* We're not controlling the target namespace. */ 4866 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) 4867 return -EPERM; 4868 4869 kattr->mnt_userns = get_user_ns(mnt_userns); 4870 return 0; 4871 } 4872 4873 static int build_mount_kattr(const struct mount_attr *attr, size_t usize, 4874 struct mount_kattr *kattr) 4875 { 4876 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS) 4877 return -EINVAL; 4878 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1) 4879 return -EINVAL; 4880 kattr->propagation = attr->propagation; 4881 4882 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS) 4883 return -EINVAL; 4884 4885 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set); 4886 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr); 4887 4888 /* 4889 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap, 4890 * users wanting to transition to a different atime setting cannot 4891 * simply specify the atime setting in @attr_set, but must also 4892 * specify MOUNT_ATTR__ATIME in the @attr_clr field. 4893 * So ensure that MOUNT_ATTR__ATIME can't be partially set in 4894 * @attr_clr and that @attr_set can't have any atime bits set if 4895 * MOUNT_ATTR__ATIME isn't set in @attr_clr. 4896 */ 4897 if (attr->attr_clr & MOUNT_ATTR__ATIME) { 4898 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME) 4899 return -EINVAL; 4900 4901 /* 4902 * Clear all previous time settings as they are mutually 4903 * exclusive. 4904 */ 4905 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME; 4906 switch (attr->attr_set & MOUNT_ATTR__ATIME) { 4907 case MOUNT_ATTR_RELATIME: 4908 kattr->attr_set |= MNT_RELATIME; 4909 break; 4910 case MOUNT_ATTR_NOATIME: 4911 kattr->attr_set |= MNT_NOATIME; 4912 break; 4913 case MOUNT_ATTR_STRICTATIME: 4914 break; 4915 default: 4916 return -EINVAL; 4917 } 4918 } else { 4919 if (attr->attr_set & MOUNT_ATTR__ATIME) 4920 return -EINVAL; 4921 } 4922 4923 return build_mount_idmapped(attr, usize, kattr); 4924 } 4925 4926 static void finish_mount_kattr(struct mount_kattr *kattr) 4927 { 4928 if (kattr->mnt_userns) { 4929 put_user_ns(kattr->mnt_userns); 4930 kattr->mnt_userns = NULL; 4931 } 4932 4933 if (kattr->mnt_idmap) 4934 mnt_idmap_put(kattr->mnt_idmap); 4935 } 4936 4937 static int wants_mount_setattr(struct mount_attr __user *uattr, size_t usize, 4938 struct mount_kattr *kattr) 4939 { 4940 int ret; 4941 struct mount_attr attr; 4942 4943 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0); 4944 4945 if (unlikely(usize > PAGE_SIZE)) 4946 return -E2BIG; 4947 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0)) 4948 return -EINVAL; 4949 4950 if (!may_mount()) 4951 return -EPERM; 4952 4953 ret = copy_struct_from_user(&attr, sizeof(attr), uattr, usize); 4954 if (ret) 4955 return ret; 4956 4957 /* Don't bother walking through the mounts if this is a nop. */ 4958 if (attr.attr_set == 0 && 4959 attr.attr_clr == 0 && 4960 attr.propagation == 0) 4961 return 0; /* Tell caller to not bother. */ 4962 4963 ret = build_mount_kattr(&attr, usize, kattr); 4964 if (ret < 0) 4965 return ret; 4966 4967 return 1; 4968 } 4969 4970 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path, 4971 unsigned int, flags, struct mount_attr __user *, uattr, 4972 size_t, usize) 4973 { 4974 int err; 4975 struct path target; 4976 struct mount_kattr kattr; 4977 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 4978 4979 if (flags & ~(AT_EMPTY_PATH | 4980 AT_RECURSIVE | 4981 AT_SYMLINK_NOFOLLOW | 4982 AT_NO_AUTOMOUNT)) 4983 return -EINVAL; 4984 4985 if (flags & AT_NO_AUTOMOUNT) 4986 lookup_flags &= ~LOOKUP_AUTOMOUNT; 4987 if (flags & AT_SYMLINK_NOFOLLOW) 4988 lookup_flags &= ~LOOKUP_FOLLOW; 4989 if (flags & AT_EMPTY_PATH) 4990 lookup_flags |= LOOKUP_EMPTY; 4991 4992 kattr = (struct mount_kattr) { 4993 .lookup_flags = lookup_flags, 4994 }; 4995 4996 if (flags & AT_RECURSIVE) 4997 kattr.kflags |= MOUNT_KATTR_RECURSE; 4998 4999 err = wants_mount_setattr(uattr, usize, &kattr); 5000 if (err <= 0) 5001 return err; 5002 5003 err = user_path_at(dfd, path, kattr.lookup_flags, &target); 5004 if (!err) { 5005 err = do_mount_setattr(&target, &kattr); 5006 path_put(&target); 5007 } 5008 finish_mount_kattr(&kattr); 5009 return err; 5010 } 5011 5012 SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename, 5013 unsigned, flags, struct mount_attr __user *, uattr, 5014 size_t, usize) 5015 { 5016 if (!uattr && usize) 5017 return -EINVAL; 5018 5019 FD_PREPARE(fdf, flags, vfs_open_tree(dfd, filename, flags)); 5020 if (fdf.err) 5021 return fdf.err; 5022 5023 if (uattr) { 5024 struct mount_kattr kattr = {}; 5025 struct file *file = fd_prepare_file(fdf); 5026 int ret; 5027 5028 if (flags & OPEN_TREE_CLONE) 5029 kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE; 5030 if (flags & AT_RECURSIVE) 5031 kattr.kflags |= MOUNT_KATTR_RECURSE; 5032 5033 ret = wants_mount_setattr(uattr, usize, &kattr); 5034 if (ret > 0) { 5035 ret = do_mount_setattr(&file->f_path, &kattr); 5036 finish_mount_kattr(&kattr); 5037 } 5038 if (ret) 5039 return ret; 5040 } 5041 5042 return fd_publish(fdf); 5043 } 5044 5045 int show_path(struct seq_file *m, struct dentry *root) 5046 { 5047 if (root->d_sb->s_op->show_path) 5048 return root->d_sb->s_op->show_path(m, root); 5049 5050 seq_dentry(m, root, " \t\n\\"); 5051 return 0; 5052 } 5053 5054 static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns) 5055 { 5056 struct mount *mnt = mnt_find_id_at(ns, id); 5057 5058 if (!mnt || mnt->mnt_id_unique != id) 5059 return NULL; 5060 5061 return &mnt->mnt; 5062 } 5063 5064 struct kstatmount { 5065 struct statmount __user *buf; 5066 size_t bufsize; 5067 struct vfsmount *mnt; 5068 struct mnt_idmap *idmap; 5069 u64 mask; 5070 struct path root; 5071 struct seq_file seq; 5072 5073 /* Must be last --ends in a flexible-array member. */ 5074 struct statmount sm; 5075 }; 5076 5077 static u64 mnt_to_attr_flags(struct vfsmount *mnt) 5078 { 5079 unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags); 5080 u64 attr_flags = 0; 5081 5082 if (mnt_flags & MNT_READONLY) 5083 attr_flags |= MOUNT_ATTR_RDONLY; 5084 if (mnt_flags & MNT_NOSUID) 5085 attr_flags |= MOUNT_ATTR_NOSUID; 5086 if (mnt_flags & MNT_NODEV) 5087 attr_flags |= MOUNT_ATTR_NODEV; 5088 if (mnt_flags & MNT_NOEXEC) 5089 attr_flags |= MOUNT_ATTR_NOEXEC; 5090 if (mnt_flags & MNT_NODIRATIME) 5091 attr_flags |= MOUNT_ATTR_NODIRATIME; 5092 if (mnt_flags & MNT_NOSYMFOLLOW) 5093 attr_flags |= MOUNT_ATTR_NOSYMFOLLOW; 5094 5095 if (mnt_flags & MNT_NOATIME) 5096 attr_flags |= MOUNT_ATTR_NOATIME; 5097 else if (mnt_flags & MNT_RELATIME) 5098 attr_flags |= MOUNT_ATTR_RELATIME; 5099 else 5100 attr_flags |= MOUNT_ATTR_STRICTATIME; 5101 5102 if (is_idmapped_mnt(mnt)) 5103 attr_flags |= MOUNT_ATTR_IDMAP; 5104 5105 return attr_flags; 5106 } 5107 5108 static u64 mnt_to_propagation_flags(struct mount *m) 5109 { 5110 u64 propagation = 0; 5111 5112 if (IS_MNT_SHARED(m)) 5113 propagation |= MS_SHARED; 5114 if (IS_MNT_SLAVE(m)) 5115 propagation |= MS_SLAVE; 5116 if (IS_MNT_UNBINDABLE(m)) 5117 propagation |= MS_UNBINDABLE; 5118 if (!propagation) 5119 propagation |= MS_PRIVATE; 5120 5121 return propagation; 5122 } 5123 5124 static void statmount_sb_basic(struct kstatmount *s) 5125 { 5126 struct super_block *sb = s->mnt->mnt_sb; 5127 5128 s->sm.mask |= STATMOUNT_SB_BASIC; 5129 s->sm.sb_dev_major = MAJOR(sb->s_dev); 5130 s->sm.sb_dev_minor = MINOR(sb->s_dev); 5131 s->sm.sb_magic = sb->s_magic; 5132 s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME); 5133 } 5134 5135 static void statmount_mnt_basic(struct kstatmount *s) 5136 { 5137 struct mount *m = real_mount(s->mnt); 5138 5139 s->sm.mask |= STATMOUNT_MNT_BASIC; 5140 s->sm.mnt_id = m->mnt_id_unique; 5141 s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique; 5142 s->sm.mnt_id_old = m->mnt_id; 5143 s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id; 5144 s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt); 5145 s->sm.mnt_propagation = mnt_to_propagation_flags(m); 5146 s->sm.mnt_peer_group = m->mnt_group_id; 5147 s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0; 5148 } 5149 5150 static void statmount_propagate_from(struct kstatmount *s) 5151 { 5152 struct mount *m = real_mount(s->mnt); 5153 5154 s->sm.mask |= STATMOUNT_PROPAGATE_FROM; 5155 if (IS_MNT_SLAVE(m)) 5156 s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root); 5157 } 5158 5159 static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq) 5160 { 5161 int ret; 5162 size_t start = seq->count; 5163 5164 ret = show_path(seq, s->mnt->mnt_root); 5165 if (ret) 5166 return ret; 5167 5168 if (unlikely(seq_has_overflowed(seq))) 5169 return -EAGAIN; 5170 5171 /* 5172 * Unescape the result. It would be better if supplied string was not 5173 * escaped in the first place, but that's a pretty invasive change. 5174 */ 5175 seq->buf[seq->count] = '\0'; 5176 seq->count = start; 5177 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL)); 5178 return 0; 5179 } 5180 5181 static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq) 5182 { 5183 struct vfsmount *mnt = s->mnt; 5184 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 5185 int err; 5186 5187 err = seq_path_root(seq, &mnt_path, &s->root, ""); 5188 return err == SEQ_SKIP ? 0 : err; 5189 } 5190 5191 static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq) 5192 { 5193 struct super_block *sb = s->mnt->mnt_sb; 5194 5195 seq_puts(seq, sb->s_type->name); 5196 return 0; 5197 } 5198 5199 static void statmount_fs_subtype(struct kstatmount *s, struct seq_file *seq) 5200 { 5201 struct super_block *sb = s->mnt->mnt_sb; 5202 5203 if (sb->s_subtype) 5204 seq_puts(seq, sb->s_subtype); 5205 } 5206 5207 static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq) 5208 { 5209 struct super_block *sb = s->mnt->mnt_sb; 5210 struct mount *r = real_mount(s->mnt); 5211 5212 if (sb->s_op->show_devname) { 5213 size_t start = seq->count; 5214 int ret; 5215 5216 ret = sb->s_op->show_devname(seq, s->mnt->mnt_root); 5217 if (ret) 5218 return ret; 5219 5220 if (unlikely(seq_has_overflowed(seq))) 5221 return -EAGAIN; 5222 5223 /* Unescape the result */ 5224 seq->buf[seq->count] = '\0'; 5225 seq->count = start; 5226 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL)); 5227 } else { 5228 seq_puts(seq, r->mnt_devname); 5229 } 5230 return 0; 5231 } 5232 5233 static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns) 5234 { 5235 s->sm.mask |= STATMOUNT_MNT_NS_ID; 5236 s->sm.mnt_ns_id = ns->ns.ns_id; 5237 } 5238 5239 static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq) 5240 { 5241 struct vfsmount *mnt = s->mnt; 5242 struct super_block *sb = mnt->mnt_sb; 5243 size_t start = seq->count; 5244 int err; 5245 5246 err = security_sb_show_options(seq, sb); 5247 if (err) 5248 return err; 5249 5250 if (sb->s_op->show_options) { 5251 err = sb->s_op->show_options(seq, mnt->mnt_root); 5252 if (err) 5253 return err; 5254 } 5255 5256 if (unlikely(seq_has_overflowed(seq))) 5257 return -EAGAIN; 5258 5259 if (seq->count == start) 5260 return 0; 5261 5262 /* skip leading comma */ 5263 memmove(seq->buf + start, seq->buf + start + 1, 5264 seq->count - start - 1); 5265 seq->count--; 5266 5267 return 0; 5268 } 5269 5270 static inline int statmount_opt_process(struct seq_file *seq, size_t start) 5271 { 5272 char *buf_end, *opt_end, *src, *dst; 5273 int count = 0; 5274 5275 if (unlikely(seq_has_overflowed(seq))) 5276 return -EAGAIN; 5277 5278 buf_end = seq->buf + seq->count; 5279 dst = seq->buf + start; 5280 src = dst + 1; /* skip initial comma */ 5281 5282 if (src >= buf_end) { 5283 seq->count = start; 5284 return 0; 5285 } 5286 5287 *buf_end = '\0'; 5288 for (; src < buf_end; src = opt_end + 1) { 5289 opt_end = strchrnul(src, ','); 5290 *opt_end = '\0'; 5291 dst += string_unescape(src, dst, 0, UNESCAPE_OCTAL) + 1; 5292 if (WARN_ON_ONCE(++count == INT_MAX)) 5293 return -EOVERFLOW; 5294 } 5295 seq->count = dst - 1 - seq->buf; 5296 return count; 5297 } 5298 5299 static int statmount_opt_array(struct kstatmount *s, struct seq_file *seq) 5300 { 5301 struct vfsmount *mnt = s->mnt; 5302 struct super_block *sb = mnt->mnt_sb; 5303 size_t start = seq->count; 5304 int err; 5305 5306 if (!sb->s_op->show_options) 5307 return 0; 5308 5309 err = sb->s_op->show_options(seq, mnt->mnt_root); 5310 if (err) 5311 return err; 5312 5313 err = statmount_opt_process(seq, start); 5314 if (err < 0) 5315 return err; 5316 5317 s->sm.opt_num = err; 5318 return 0; 5319 } 5320 5321 static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq) 5322 { 5323 struct vfsmount *mnt = s->mnt; 5324 struct super_block *sb = mnt->mnt_sb; 5325 size_t start = seq->count; 5326 int err; 5327 5328 err = security_sb_show_options(seq, sb); 5329 if (err) 5330 return err; 5331 5332 err = statmount_opt_process(seq, start); 5333 if (err < 0) 5334 return err; 5335 5336 s->sm.opt_sec_num = err; 5337 return 0; 5338 } 5339 5340 static inline int statmount_mnt_uidmap(struct kstatmount *s, struct seq_file *seq) 5341 { 5342 int ret; 5343 5344 ret = statmount_mnt_idmap(s->idmap, seq, true); 5345 if (ret < 0) 5346 return ret; 5347 5348 s->sm.mnt_uidmap_num = ret; 5349 /* 5350 * Always raise STATMOUNT_MNT_UIDMAP even if there are no valid 5351 * mappings. This allows userspace to distinguish between a 5352 * non-idmapped mount and an idmapped mount where none of the 5353 * individual mappings are valid in the caller's idmapping. 5354 */ 5355 if (is_valid_mnt_idmap(s->idmap)) 5356 s->sm.mask |= STATMOUNT_MNT_UIDMAP; 5357 return 0; 5358 } 5359 5360 static inline int statmount_mnt_gidmap(struct kstatmount *s, struct seq_file *seq) 5361 { 5362 int ret; 5363 5364 ret = statmount_mnt_idmap(s->idmap, seq, false); 5365 if (ret < 0) 5366 return ret; 5367 5368 s->sm.mnt_gidmap_num = ret; 5369 /* 5370 * Always raise STATMOUNT_MNT_GIDMAP even if there are no valid 5371 * mappings. This allows userspace to distinguish between a 5372 * non-idmapped mount and an idmapped mount where none of the 5373 * individual mappings are valid in the caller's idmapping. 5374 */ 5375 if (is_valid_mnt_idmap(s->idmap)) 5376 s->sm.mask |= STATMOUNT_MNT_GIDMAP; 5377 return 0; 5378 } 5379 5380 static int statmount_string(struct kstatmount *s, u64 flag) 5381 { 5382 int ret = 0; 5383 size_t kbufsize; 5384 struct seq_file *seq = &s->seq; 5385 struct statmount *sm = &s->sm; 5386 u32 start, *offp; 5387 5388 /* Reserve an empty string at the beginning for any unset offsets */ 5389 if (!seq->count) 5390 seq_putc(seq, 0); 5391 5392 start = seq->count; 5393 5394 switch (flag) { 5395 case STATMOUNT_FS_TYPE: 5396 offp = &sm->fs_type; 5397 ret = statmount_fs_type(s, seq); 5398 break; 5399 case STATMOUNT_MNT_ROOT: 5400 offp = &sm->mnt_root; 5401 ret = statmount_mnt_root(s, seq); 5402 break; 5403 case STATMOUNT_MNT_POINT: 5404 offp = &sm->mnt_point; 5405 ret = statmount_mnt_point(s, seq); 5406 break; 5407 case STATMOUNT_MNT_OPTS: 5408 offp = &sm->mnt_opts; 5409 ret = statmount_mnt_opts(s, seq); 5410 break; 5411 case STATMOUNT_OPT_ARRAY: 5412 offp = &sm->opt_array; 5413 ret = statmount_opt_array(s, seq); 5414 break; 5415 case STATMOUNT_OPT_SEC_ARRAY: 5416 offp = &sm->opt_sec_array; 5417 ret = statmount_opt_sec_array(s, seq); 5418 break; 5419 case STATMOUNT_FS_SUBTYPE: 5420 offp = &sm->fs_subtype; 5421 statmount_fs_subtype(s, seq); 5422 break; 5423 case STATMOUNT_SB_SOURCE: 5424 offp = &sm->sb_source; 5425 ret = statmount_sb_source(s, seq); 5426 break; 5427 case STATMOUNT_MNT_UIDMAP: 5428 sm->mnt_uidmap = start; 5429 ret = statmount_mnt_uidmap(s, seq); 5430 break; 5431 case STATMOUNT_MNT_GIDMAP: 5432 sm->mnt_gidmap = start; 5433 ret = statmount_mnt_gidmap(s, seq); 5434 break; 5435 default: 5436 WARN_ON_ONCE(true); 5437 return -EINVAL; 5438 } 5439 5440 /* 5441 * If nothing was emitted, return to avoid setting the flag 5442 * and terminating the buffer. 5443 */ 5444 if (seq->count == start) 5445 return ret; 5446 if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize))) 5447 return -EOVERFLOW; 5448 if (kbufsize >= s->bufsize) 5449 return -EOVERFLOW; 5450 5451 /* signal a retry */ 5452 if (unlikely(seq_has_overflowed(seq))) 5453 return -EAGAIN; 5454 5455 if (ret) 5456 return ret; 5457 5458 seq->buf[seq->count++] = '\0'; 5459 sm->mask |= flag; 5460 *offp = start; 5461 return 0; 5462 } 5463 5464 static int copy_statmount_to_user(struct kstatmount *s) 5465 { 5466 struct statmount *sm = &s->sm; 5467 struct seq_file *seq = &s->seq; 5468 char __user *str = ((char __user *)s->buf) + sizeof(*sm); 5469 size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm)); 5470 5471 if (seq->count && copy_to_user(str, seq->buf, seq->count)) 5472 return -EFAULT; 5473 5474 /* Return the number of bytes copied to the buffer */ 5475 sm->size = copysize + seq->count; 5476 if (copy_to_user(s->buf, sm, copysize)) 5477 return -EFAULT; 5478 5479 return 0; 5480 } 5481 5482 static struct mount *listmnt_next(struct mount *curr, bool reverse) 5483 { 5484 struct rb_node *node; 5485 5486 if (reverse) 5487 node = rb_prev(&curr->mnt_node); 5488 else 5489 node = rb_next(&curr->mnt_node); 5490 5491 return node_to_mount(node); 5492 } 5493 5494 static int grab_requested_root(struct mnt_namespace *ns, struct path *root) 5495 { 5496 struct mount *first, *child; 5497 5498 rwsem_assert_held(&namespace_sem); 5499 5500 /* We're looking at our own ns, just use get_fs_root. */ 5501 if (ns == current->nsproxy->mnt_ns) { 5502 get_fs_root(current->fs, root); 5503 return 0; 5504 } 5505 5506 /* 5507 * We have to find the first mount in our ns and use that, however it 5508 * may not exist, so handle that properly. 5509 */ 5510 if (mnt_ns_empty(ns)) 5511 return -ENOENT; 5512 5513 first = child = ns->root; 5514 for (;;) { 5515 child = listmnt_next(child, false); 5516 if (!child) 5517 return -ENOENT; 5518 if (child->mnt_parent == first) 5519 break; 5520 } 5521 5522 root->mnt = mntget(&child->mnt); 5523 root->dentry = dget(root->mnt->mnt_root); 5524 return 0; 5525 } 5526 5527 /* This must be updated whenever a new flag is added */ 5528 #define STATMOUNT_SUPPORTED (STATMOUNT_SB_BASIC | \ 5529 STATMOUNT_MNT_BASIC | \ 5530 STATMOUNT_PROPAGATE_FROM | \ 5531 STATMOUNT_MNT_ROOT | \ 5532 STATMOUNT_MNT_POINT | \ 5533 STATMOUNT_FS_TYPE | \ 5534 STATMOUNT_MNT_NS_ID | \ 5535 STATMOUNT_MNT_OPTS | \ 5536 STATMOUNT_FS_SUBTYPE | \ 5537 STATMOUNT_SB_SOURCE | \ 5538 STATMOUNT_OPT_ARRAY | \ 5539 STATMOUNT_OPT_SEC_ARRAY | \ 5540 STATMOUNT_SUPPORTED_MASK | \ 5541 STATMOUNT_MNT_UIDMAP | \ 5542 STATMOUNT_MNT_GIDMAP) 5543 5544 /* locks: namespace_shared */ 5545 static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, 5546 struct mnt_namespace *ns) 5547 { 5548 struct mount *m; 5549 int err; 5550 5551 /* Has the namespace already been emptied? */ 5552 if (mnt_ns_id && mnt_ns_empty(ns)) 5553 return -ENOENT; 5554 5555 s->mnt = lookup_mnt_in_ns(mnt_id, ns); 5556 if (!s->mnt) 5557 return -ENOENT; 5558 5559 err = grab_requested_root(ns, &s->root); 5560 if (err) 5561 return err; 5562 5563 /* 5564 * Don't trigger audit denials. We just want to determine what 5565 * mounts to show users. 5566 */ 5567 m = real_mount(s->mnt); 5568 if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) && 5569 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5570 return -EPERM; 5571 5572 err = security_sb_statfs(s->mnt->mnt_root); 5573 if (err) 5574 return err; 5575 5576 /* 5577 * Note that mount properties in mnt->mnt_flags, mnt->mnt_idmap 5578 * can change concurrently as we only hold the read-side of the 5579 * namespace semaphore and mount properties may change with only 5580 * the mount lock held. 5581 * 5582 * We could sample the mount lock sequence counter to detect 5583 * those changes and retry. But it's not worth it. Worst that 5584 * happens is that the mnt->mnt_idmap pointer is already changed 5585 * while mnt->mnt_flags isn't or vica versa. So what. 5586 * 5587 * Both mnt->mnt_flags and mnt->mnt_idmap are set and retrieved 5588 * via READ_ONCE()/WRITE_ONCE() and guard against theoretical 5589 * torn read/write. That's all we care about right now. 5590 */ 5591 s->idmap = mnt_idmap(s->mnt); 5592 if (s->mask & STATMOUNT_MNT_BASIC) 5593 statmount_mnt_basic(s); 5594 5595 if (s->mask & STATMOUNT_SB_BASIC) 5596 statmount_sb_basic(s); 5597 5598 if (s->mask & STATMOUNT_PROPAGATE_FROM) 5599 statmount_propagate_from(s); 5600 5601 if (s->mask & STATMOUNT_FS_TYPE) 5602 err = statmount_string(s, STATMOUNT_FS_TYPE); 5603 5604 if (!err && s->mask & STATMOUNT_MNT_ROOT) 5605 err = statmount_string(s, STATMOUNT_MNT_ROOT); 5606 5607 if (!err && s->mask & STATMOUNT_MNT_POINT) 5608 err = statmount_string(s, STATMOUNT_MNT_POINT); 5609 5610 if (!err && s->mask & STATMOUNT_MNT_OPTS) 5611 err = statmount_string(s, STATMOUNT_MNT_OPTS); 5612 5613 if (!err && s->mask & STATMOUNT_OPT_ARRAY) 5614 err = statmount_string(s, STATMOUNT_OPT_ARRAY); 5615 5616 if (!err && s->mask & STATMOUNT_OPT_SEC_ARRAY) 5617 err = statmount_string(s, STATMOUNT_OPT_SEC_ARRAY); 5618 5619 if (!err && s->mask & STATMOUNT_FS_SUBTYPE) 5620 err = statmount_string(s, STATMOUNT_FS_SUBTYPE); 5621 5622 if (!err && s->mask & STATMOUNT_SB_SOURCE) 5623 err = statmount_string(s, STATMOUNT_SB_SOURCE); 5624 5625 if (!err && s->mask & STATMOUNT_MNT_UIDMAP) 5626 err = statmount_string(s, STATMOUNT_MNT_UIDMAP); 5627 5628 if (!err && s->mask & STATMOUNT_MNT_GIDMAP) 5629 err = statmount_string(s, STATMOUNT_MNT_GIDMAP); 5630 5631 if (!err && s->mask & STATMOUNT_MNT_NS_ID) 5632 statmount_mnt_ns_id(s, ns); 5633 5634 if (!err && s->mask & STATMOUNT_SUPPORTED_MASK) { 5635 s->sm.mask |= STATMOUNT_SUPPORTED_MASK; 5636 s->sm.supported_mask = STATMOUNT_SUPPORTED; 5637 } 5638 5639 if (err) 5640 return err; 5641 5642 /* Are there bits in the return mask not present in STATMOUNT_SUPPORTED? */ 5643 WARN_ON_ONCE(~STATMOUNT_SUPPORTED & s->sm.mask); 5644 5645 return 0; 5646 } 5647 5648 static inline bool retry_statmount(const long ret, size_t *seq_size) 5649 { 5650 if (likely(ret != -EAGAIN)) 5651 return false; 5652 if (unlikely(check_mul_overflow(*seq_size, 2, seq_size))) 5653 return false; 5654 if (unlikely(*seq_size > MAX_RW_COUNT)) 5655 return false; 5656 return true; 5657 } 5658 5659 #define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \ 5660 STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \ 5661 STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \ 5662 STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY | \ 5663 STATMOUNT_MNT_UIDMAP | STATMOUNT_MNT_GIDMAP) 5664 5665 static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq, 5666 struct statmount __user *buf, size_t bufsize, 5667 size_t seq_size) 5668 { 5669 if (!access_ok(buf, bufsize)) 5670 return -EFAULT; 5671 5672 memset(ks, 0, sizeof(*ks)); 5673 ks->mask = kreq->param; 5674 ks->buf = buf; 5675 ks->bufsize = bufsize; 5676 5677 if (ks->mask & STATMOUNT_STRING_REQ) { 5678 if (bufsize == sizeof(ks->sm)) 5679 return -EOVERFLOW; 5680 5681 ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT); 5682 if (!ks->seq.buf) 5683 return -ENOMEM; 5684 5685 ks->seq.size = seq_size; 5686 } 5687 5688 return 0; 5689 } 5690 5691 static int copy_mnt_id_req(const struct mnt_id_req __user *req, 5692 struct mnt_id_req *kreq) 5693 { 5694 int ret; 5695 size_t usize; 5696 5697 BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1); 5698 5699 ret = get_user(usize, &req->size); 5700 if (ret) 5701 return -EFAULT; 5702 if (unlikely(usize > PAGE_SIZE)) 5703 return -E2BIG; 5704 if (unlikely(usize < MNT_ID_REQ_SIZE_VER0)) 5705 return -EINVAL; 5706 memset(kreq, 0, sizeof(*kreq)); 5707 ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize); 5708 if (ret) 5709 return ret; 5710 if (kreq->spare != 0) 5711 return -EINVAL; 5712 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ 5713 if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET) 5714 return -EINVAL; 5715 return 0; 5716 } 5717 5718 /* 5719 * If the user requested a specific mount namespace id, look that up and return 5720 * that, or if not simply grab a passive reference on our mount namespace and 5721 * return that. 5722 */ 5723 static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq) 5724 { 5725 struct mnt_namespace *mnt_ns; 5726 5727 if (kreq->mnt_ns_id && kreq->spare) 5728 return ERR_PTR(-EINVAL); 5729 5730 if (kreq->mnt_ns_id) 5731 return lookup_mnt_ns(kreq->mnt_ns_id); 5732 5733 if (kreq->spare) { 5734 struct ns_common *ns; 5735 5736 CLASS(fd, f)(kreq->spare); 5737 if (fd_empty(f)) 5738 return ERR_PTR(-EBADF); 5739 5740 if (!proc_ns_file(fd_file(f))) 5741 return ERR_PTR(-EINVAL); 5742 5743 ns = get_proc_ns(file_inode(fd_file(f))); 5744 if (ns->ns_type != CLONE_NEWNS) 5745 return ERR_PTR(-EINVAL); 5746 5747 mnt_ns = to_mnt_ns(ns); 5748 } else { 5749 mnt_ns = current->nsproxy->mnt_ns; 5750 } 5751 5752 refcount_inc(&mnt_ns->passive); 5753 return mnt_ns; 5754 } 5755 5756 SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req, 5757 struct statmount __user *, buf, size_t, bufsize, 5758 unsigned int, flags) 5759 { 5760 struct mnt_namespace *ns __free(mnt_ns_release) = NULL; 5761 struct kstatmount *ks __free(kfree) = NULL; 5762 struct mnt_id_req kreq; 5763 /* We currently support retrieval of 3 strings. */ 5764 size_t seq_size = 3 * PATH_MAX; 5765 int ret; 5766 5767 if (flags) 5768 return -EINVAL; 5769 5770 ret = copy_mnt_id_req(req, &kreq); 5771 if (ret) 5772 return ret; 5773 5774 ns = grab_requested_mnt_ns(&kreq); 5775 if (!ns) 5776 return -ENOENT; 5777 5778 if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) && 5779 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5780 return -ENOENT; 5781 5782 ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT); 5783 if (!ks) 5784 return -ENOMEM; 5785 5786 retry: 5787 ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size); 5788 if (ret) 5789 return ret; 5790 5791 scoped_guard(namespace_shared) 5792 ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, ns); 5793 5794 if (!ret) 5795 ret = copy_statmount_to_user(ks); 5796 kvfree(ks->seq.buf); 5797 path_put(&ks->root); 5798 if (retry_statmount(ret, &seq_size)) 5799 goto retry; 5800 return ret; 5801 } 5802 5803 struct klistmount { 5804 u64 last_mnt_id; 5805 u64 mnt_parent_id; 5806 u64 *kmnt_ids; 5807 u32 nr_mnt_ids; 5808 struct mnt_namespace *ns; 5809 struct path root; 5810 }; 5811 5812 /* locks: namespace_shared */ 5813 static ssize_t do_listmount(struct klistmount *kls, bool reverse) 5814 { 5815 struct mnt_namespace *ns = kls->ns; 5816 u64 mnt_parent_id = kls->mnt_parent_id; 5817 u64 last_mnt_id = kls->last_mnt_id; 5818 u64 *mnt_ids = kls->kmnt_ids; 5819 size_t nr_mnt_ids = kls->nr_mnt_ids; 5820 struct path orig; 5821 struct mount *r, *first; 5822 ssize_t ret; 5823 5824 rwsem_assert_held(&namespace_sem); 5825 5826 ret = grab_requested_root(ns, &kls->root); 5827 if (ret) 5828 return ret; 5829 5830 if (mnt_parent_id == LSMT_ROOT) { 5831 orig = kls->root; 5832 } else { 5833 orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns); 5834 if (!orig.mnt) 5835 return -ENOENT; 5836 orig.dentry = orig.mnt->mnt_root; 5837 } 5838 5839 /* 5840 * Don't trigger audit denials. We just want to determine what 5841 * mounts to show users. 5842 */ 5843 if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) && 5844 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5845 return -EPERM; 5846 5847 ret = security_sb_statfs(orig.dentry); 5848 if (ret) 5849 return ret; 5850 5851 if (!last_mnt_id) { 5852 if (reverse) 5853 first = node_to_mount(ns->mnt_last_node); 5854 else 5855 first = node_to_mount(ns->mnt_first_node); 5856 } else { 5857 if (reverse) 5858 first = mnt_find_id_at_reverse(ns, last_mnt_id - 1); 5859 else 5860 first = mnt_find_id_at(ns, last_mnt_id + 1); 5861 } 5862 5863 for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) { 5864 if (r->mnt_id_unique == mnt_parent_id) 5865 continue; 5866 if (!is_path_reachable(r, r->mnt.mnt_root, &orig)) 5867 continue; 5868 *mnt_ids = r->mnt_id_unique; 5869 mnt_ids++; 5870 nr_mnt_ids--; 5871 ret++; 5872 } 5873 return ret; 5874 } 5875 5876 static void __free_klistmount_free(const struct klistmount *kls) 5877 { 5878 path_put(&kls->root); 5879 kvfree(kls->kmnt_ids); 5880 mnt_ns_release(kls->ns); 5881 } 5882 5883 static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq, 5884 size_t nr_mnt_ids) 5885 { 5886 5887 u64 last_mnt_id = kreq->param; 5888 5889 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ 5890 if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET) 5891 return -EINVAL; 5892 5893 kls->last_mnt_id = last_mnt_id; 5894 5895 kls->nr_mnt_ids = nr_mnt_ids; 5896 kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids), 5897 GFP_KERNEL_ACCOUNT); 5898 if (!kls->kmnt_ids) 5899 return -ENOMEM; 5900 5901 kls->ns = grab_requested_mnt_ns(kreq); 5902 if (!kls->ns) 5903 return -ENOENT; 5904 5905 kls->mnt_parent_id = kreq->mnt_id; 5906 return 0; 5907 } 5908 5909 SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, 5910 u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags) 5911 { 5912 struct klistmount kls __free(klistmount_free) = {}; 5913 const size_t maxcount = 1000000; 5914 struct mnt_id_req kreq; 5915 ssize_t ret; 5916 5917 if (flags & ~LISTMOUNT_REVERSE) 5918 return -EINVAL; 5919 5920 /* 5921 * If the mount namespace really has more than 1 million mounts the 5922 * caller must iterate over the mount namespace (and reconsider their 5923 * system design...). 5924 */ 5925 if (unlikely(nr_mnt_ids > maxcount)) 5926 return -EOVERFLOW; 5927 5928 if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids))) 5929 return -EFAULT; 5930 5931 ret = copy_mnt_id_req(req, &kreq); 5932 if (ret) 5933 return ret; 5934 5935 ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids); 5936 if (ret) 5937 return ret; 5938 5939 if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) && 5940 !ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN)) 5941 return -ENOENT; 5942 5943 /* 5944 * We only need to guard against mount topology changes as 5945 * listmount() doesn't care about any mount properties. 5946 */ 5947 scoped_guard(namespace_shared) 5948 ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE)); 5949 if (ret <= 0) 5950 return ret; 5951 5952 if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids))) 5953 return -EFAULT; 5954 5955 return ret; 5956 } 5957 5958 struct mnt_namespace init_mnt_ns = { 5959 .ns.inum = ns_init_inum(&init_mnt_ns), 5960 .ns.ops = &mntns_operations, 5961 .user_ns = &init_user_ns, 5962 .ns.__ns_ref = REFCOUNT_INIT(1), 5963 .ns.ns_type = ns_common_type(&init_mnt_ns), 5964 .passive = REFCOUNT_INIT(1), 5965 .mounts = RB_ROOT, 5966 .poll = __WAIT_QUEUE_HEAD_INITIALIZER(init_mnt_ns.poll), 5967 }; 5968 5969 static void __init init_mount_tree(void) 5970 { 5971 struct vfsmount *mnt; 5972 struct mount *m; 5973 struct path root; 5974 5975 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options); 5976 if (IS_ERR(mnt)) 5977 panic("Can't create rootfs"); 5978 5979 m = real_mount(mnt); 5980 init_mnt_ns.root = m; 5981 init_mnt_ns.nr_mounts = 1; 5982 mnt_add_to_ns(&init_mnt_ns, m); 5983 init_task.nsproxy->mnt_ns = &init_mnt_ns; 5984 get_mnt_ns(&init_mnt_ns); 5985 5986 root.mnt = mnt; 5987 root.dentry = mnt->mnt_root; 5988 5989 set_fs_pwd(current->fs, &root); 5990 set_fs_root(current->fs, &root); 5991 5992 ns_tree_add(&init_mnt_ns); 5993 } 5994 5995 void __init mnt_init(void) 5996 { 5997 int err; 5998 5999 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 6000 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); 6001 6002 mount_hashtable = alloc_large_system_hash("Mount-cache", 6003 sizeof(struct hlist_head), 6004 mhash_entries, 19, 6005 HASH_ZERO, 6006 &m_hash_shift, &m_hash_mask, 0, 0); 6007 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", 6008 sizeof(struct hlist_head), 6009 mphash_entries, 19, 6010 HASH_ZERO, 6011 &mp_hash_shift, &mp_hash_mask, 0, 0); 6012 6013 if (!mount_hashtable || !mountpoint_hashtable) 6014 panic("Failed to allocate mount hash table\n"); 6015 6016 kernfs_init(); 6017 6018 err = sysfs_init(); 6019 if (err) 6020 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 6021 __func__, err); 6022 fs_kobj = kobject_create_and_add("fs", NULL); 6023 if (!fs_kobj) 6024 printk(KERN_WARNING "%s: kobj create error\n", __func__); 6025 shmem_init(); 6026 init_rootfs(); 6027 init_mount_tree(); 6028 } 6029 6030 void put_mnt_ns(struct mnt_namespace *ns) 6031 { 6032 if (!ns_ref_put(ns)) 6033 return; 6034 guard(namespace_excl)(); 6035 emptied_ns = ns; 6036 guard(mount_writer)(); 6037 umount_tree(ns->root, 0); 6038 } 6039 6040 struct vfsmount *kern_mount(struct file_system_type *type) 6041 { 6042 struct vfsmount *mnt; 6043 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); 6044 if (!IS_ERR(mnt)) { 6045 /* 6046 * it is a longterm mount, don't release mnt until 6047 * we unmount before file sys is unregistered 6048 */ 6049 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 6050 } 6051 return mnt; 6052 } 6053 EXPORT_SYMBOL_GPL(kern_mount); 6054 6055 void kern_unmount(struct vfsmount *mnt) 6056 { 6057 /* release long term mount so mount point can be released */ 6058 if (!IS_ERR(mnt)) { 6059 mnt_make_shortterm(mnt); 6060 synchronize_rcu(); /* yecchhh... */ 6061 mntput(mnt); 6062 } 6063 } 6064 EXPORT_SYMBOL(kern_unmount); 6065 6066 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) 6067 { 6068 unsigned int i; 6069 6070 for (i = 0; i < num; i++) 6071 mnt_make_shortterm(mnt[i]); 6072 synchronize_rcu_expedited(); 6073 for (i = 0; i < num; i++) 6074 mntput(mnt[i]); 6075 } 6076 EXPORT_SYMBOL(kern_unmount_array); 6077 6078 bool our_mnt(struct vfsmount *mnt) 6079 { 6080 return check_mnt(real_mount(mnt)); 6081 } 6082 6083 bool current_chrooted(void) 6084 { 6085 /* Does the current process have a non-standard root */ 6086 struct path fs_root __free(path_put) = {}; 6087 struct mount *root; 6088 6089 get_fs_root(current->fs, &fs_root); 6090 6091 /* Find the namespace root */ 6092 6093 guard(mount_locked_reader)(); 6094 6095 root = topmost_overmount(current->nsproxy->mnt_ns->root); 6096 6097 return fs_root.mnt != &root->mnt || !path_mounted(&fs_root); 6098 } 6099 6100 static bool mnt_already_visible(struct mnt_namespace *ns, 6101 const struct super_block *sb, 6102 int *new_mnt_flags) 6103 { 6104 int new_flags = *new_mnt_flags; 6105 struct mount *mnt, *n; 6106 6107 guard(namespace_shared)(); 6108 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) { 6109 struct mount *child; 6110 int mnt_flags; 6111 6112 if (mnt->mnt.mnt_sb->s_type != sb->s_type) 6113 continue; 6114 6115 /* This mount is not fully visible if it's root directory 6116 * is not the root directory of the filesystem. 6117 */ 6118 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) 6119 continue; 6120 6121 /* A local view of the mount flags */ 6122 mnt_flags = mnt->mnt.mnt_flags; 6123 6124 /* Don't miss readonly hidden in the superblock flags */ 6125 if (sb_rdonly(mnt->mnt.mnt_sb)) 6126 mnt_flags |= MNT_LOCK_READONLY; 6127 6128 /* Verify the mount flags are equal to or more permissive 6129 * than the proposed new mount. 6130 */ 6131 if ((mnt_flags & MNT_LOCK_READONLY) && 6132 !(new_flags & MNT_READONLY)) 6133 continue; 6134 if ((mnt_flags & MNT_LOCK_ATIME) && 6135 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) 6136 continue; 6137 6138 /* This mount is not fully visible if there are any 6139 * locked child mounts that cover anything except for 6140 * empty directories. 6141 */ 6142 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 6143 struct inode *inode = child->mnt_mountpoint->d_inode; 6144 /* Only worry about locked mounts */ 6145 if (!(child->mnt.mnt_flags & MNT_LOCKED)) 6146 continue; 6147 /* Is the directory permanently empty? */ 6148 if (!is_empty_dir_inode(inode)) 6149 goto next; 6150 } 6151 /* Preserve the locked attributes */ 6152 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \ 6153 MNT_LOCK_ATIME); 6154 return true; 6155 next: ; 6156 } 6157 return false; 6158 } 6159 6160 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags) 6161 { 6162 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV; 6163 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 6164 unsigned long s_iflags; 6165 6166 if (ns->user_ns == &init_user_ns) 6167 return false; 6168 6169 /* Can this filesystem be too revealing? */ 6170 s_iflags = sb->s_iflags; 6171 if (!(s_iflags & SB_I_USERNS_VISIBLE)) 6172 return false; 6173 6174 if ((s_iflags & required_iflags) != required_iflags) { 6175 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n", 6176 required_iflags); 6177 return true; 6178 } 6179 6180 return !mnt_already_visible(ns, sb, new_mnt_flags); 6181 } 6182 6183 bool mnt_may_suid(struct vfsmount *mnt) 6184 { 6185 /* 6186 * Foreign mounts (accessed via fchdir or through /proc 6187 * symlinks) are always treated as if they are nosuid. This 6188 * prevents namespaces from trusting potentially unsafe 6189 * suid/sgid bits, file caps, or security labels that originate 6190 * in other namespaces. 6191 */ 6192 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && 6193 current_in_userns(mnt->mnt_sb->s_user_ns); 6194 } 6195 6196 static struct ns_common *mntns_get(struct task_struct *task) 6197 { 6198 struct ns_common *ns = NULL; 6199 struct nsproxy *nsproxy; 6200 6201 task_lock(task); 6202 nsproxy = task->nsproxy; 6203 if (nsproxy) { 6204 ns = &nsproxy->mnt_ns->ns; 6205 get_mnt_ns(to_mnt_ns(ns)); 6206 } 6207 task_unlock(task); 6208 6209 return ns; 6210 } 6211 6212 static void mntns_put(struct ns_common *ns) 6213 { 6214 put_mnt_ns(to_mnt_ns(ns)); 6215 } 6216 6217 static int mntns_install(struct nsset *nsset, struct ns_common *ns) 6218 { 6219 struct nsproxy *nsproxy = nsset->nsproxy; 6220 struct fs_struct *fs = nsset->fs; 6221 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; 6222 struct user_namespace *user_ns = nsset->cred->user_ns; 6223 struct path root; 6224 int err; 6225 6226 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || 6227 !ns_capable(user_ns, CAP_SYS_CHROOT) || 6228 !ns_capable(user_ns, CAP_SYS_ADMIN)) 6229 return -EPERM; 6230 6231 if (is_anon_ns(mnt_ns)) 6232 return -EINVAL; 6233 6234 if (fs->users != 1) 6235 return -EINVAL; 6236 6237 get_mnt_ns(mnt_ns); 6238 old_mnt_ns = nsproxy->mnt_ns; 6239 nsproxy->mnt_ns = mnt_ns; 6240 6241 /* Find the root */ 6242 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, 6243 "/", LOOKUP_DOWN, &root); 6244 if (err) { 6245 /* revert to old namespace */ 6246 nsproxy->mnt_ns = old_mnt_ns; 6247 put_mnt_ns(mnt_ns); 6248 return err; 6249 } 6250 6251 put_mnt_ns(old_mnt_ns); 6252 6253 /* Update the pwd and root */ 6254 set_fs_pwd(fs, &root); 6255 set_fs_root(fs, &root); 6256 6257 path_put(&root); 6258 return 0; 6259 } 6260 6261 static struct user_namespace *mntns_owner(struct ns_common *ns) 6262 { 6263 return to_mnt_ns(ns)->user_ns; 6264 } 6265 6266 const struct proc_ns_operations mntns_operations = { 6267 .name = "mnt", 6268 .get = mntns_get, 6269 .put = mntns_put, 6270 .install = mntns_install, 6271 .owner = mntns_owner, 6272 }; 6273 6274 #ifdef CONFIG_SYSCTL 6275 static const struct ctl_table fs_namespace_sysctls[] = { 6276 { 6277 .procname = "mount-max", 6278 .data = &sysctl_mount_max, 6279 .maxlen = sizeof(unsigned int), 6280 .mode = 0644, 6281 .proc_handler = proc_dointvec_minmax, 6282 .extra1 = SYSCTL_ONE, 6283 }, 6284 }; 6285 6286 static int __init init_fs_namespace_sysctls(void) 6287 { 6288 register_sysctl_init("fs", fs_namespace_sysctls); 6289 return 0; 6290 } 6291 fs_initcall(init_fs_namespace_sysctls); 6292 6293 #endif /* CONFIG_SYSCTL */ 6294