1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/namespace.c 4 * 5 * (C) Copyright Al Viro 2000, 2001 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/export.h> 13 #include <linux/capability.h> 14 #include <linux/mnt_namespace.h> 15 #include <linux/user_namespace.h> 16 #include <linux/namei.h> 17 #include <linux/security.h> 18 #include <linux/cred.h> 19 #include <linux/idr.h> 20 #include <linux/init.h> /* init_rootfs */ 21 #include <linux/fs_struct.h> /* get_fs_root et.al. */ 22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ 23 #include <linux/file.h> 24 #include <linux/uaccess.h> 25 #include <linux/proc_ns.h> 26 #include <linux/magic.h> 27 #include <linux/memblock.h> 28 #include <linux/proc_fs.h> 29 #include <linux/task_work.h> 30 #include <linux/sched/task.h> 31 #include <uapi/linux/mount.h> 32 #include <linux/fs_context.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/mnt_idmapping.h> 35 #include <linux/pidfs.h> 36 #include <linux/nstree.h> 37 38 #include "pnode.h" 39 #include "internal.h" 40 41 /* Maximum number of mounts in a mount namespace */ 42 static unsigned int sysctl_mount_max __read_mostly = 100000; 43 44 static unsigned int m_hash_mask __ro_after_init; 45 static unsigned int m_hash_shift __ro_after_init; 46 static unsigned int mp_hash_mask __ro_after_init; 47 static unsigned int mp_hash_shift __ro_after_init; 48 49 static __initdata unsigned long mhash_entries; 50 static int __init set_mhash_entries(char *str) 51 { 52 return kstrtoul(str, 0, &mhash_entries) == 0; 53 } 54 __setup("mhash_entries=", set_mhash_entries); 55 56 static __initdata unsigned long mphash_entries; 57 static int __init set_mphash_entries(char *str) 58 { 59 return kstrtoul(str, 0, &mphash_entries) == 0; 60 } 61 __setup("mphash_entries=", set_mphash_entries); 62 63 static char * __initdata initramfs_options; 64 static int __init initramfs_options_setup(char *str) 65 { 66 initramfs_options = str; 67 return 1; 68 } 69 70 __setup("initramfs_options=", initramfs_options_setup); 71 72 static u64 event; 73 static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC); 74 static DEFINE_IDA(mnt_group_ida); 75 76 /* Don't allow confusion with old 32bit mount ID */ 77 #define MNT_UNIQUE_ID_OFFSET (1ULL << 31) 78 static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET; 79 80 static struct hlist_head *mount_hashtable __ro_after_init; 81 static struct hlist_head *mountpoint_hashtable __ro_after_init; 82 static struct kmem_cache *mnt_cache __ro_after_init; 83 static DECLARE_RWSEM(namespace_sem); 84 static HLIST_HEAD(unmounted); /* protected by namespace_sem */ 85 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ 86 static struct mnt_namespace *emptied_ns; /* protected by namespace_sem */ 87 88 static inline void namespace_lock(void); 89 static void namespace_unlock(void); 90 DEFINE_LOCK_GUARD_0(namespace_excl, namespace_lock(), namespace_unlock()) 91 DEFINE_LOCK_GUARD_0(namespace_shared, down_read(&namespace_sem), 92 up_read(&namespace_sem)) 93 94 DEFINE_FREE(mntput, struct vfsmount *, if (!IS_ERR(_T)) mntput(_T)) 95 96 #ifdef CONFIG_FSNOTIFY 97 LIST_HEAD(notify_list); /* protected by namespace_sem */ 98 #endif 99 100 enum mount_kattr_flags_t { 101 MOUNT_KATTR_RECURSE = (1 << 0), 102 MOUNT_KATTR_IDMAP_REPLACE = (1 << 1), 103 }; 104 105 struct mount_kattr { 106 unsigned int attr_set; 107 unsigned int attr_clr; 108 unsigned int propagation; 109 unsigned int lookup_flags; 110 enum mount_kattr_flags_t kflags; 111 struct user_namespace *mnt_userns; 112 struct mnt_idmap *mnt_idmap; 113 }; 114 115 /* /sys/fs */ 116 struct kobject *fs_kobj __ro_after_init; 117 EXPORT_SYMBOL_GPL(fs_kobj); 118 119 /* 120 * vfsmount lock may be taken for read to prevent changes to the 121 * vfsmount hash, ie. during mountpoint lookups or walking back 122 * up the tree. 123 * 124 * It should be taken for write in all cases where the vfsmount 125 * tree or hash is modified or when a vfsmount structure is modified. 126 */ 127 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 128 129 static void mnt_ns_release(struct mnt_namespace *ns) 130 { 131 /* keep alive for {list,stat}mount() */ 132 if (ns && refcount_dec_and_test(&ns->passive)) { 133 fsnotify_mntns_delete(ns); 134 put_user_ns(ns->user_ns); 135 kfree(ns); 136 } 137 } 138 DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, 139 if (!IS_ERR(_T)) mnt_ns_release(_T)) 140 141 static void mnt_ns_release_rcu(struct rcu_head *rcu) 142 { 143 mnt_ns_release(container_of(rcu, struct mnt_namespace, ns.ns_rcu)); 144 } 145 146 static void mnt_ns_tree_remove(struct mnt_namespace *ns) 147 { 148 /* remove from global mount namespace list */ 149 if (ns_tree_active(ns)) 150 ns_tree_remove(ns); 151 152 call_rcu(&ns->ns.ns_rcu, mnt_ns_release_rcu); 153 } 154 155 /* 156 * Lookup a mount namespace by id and take a passive reference count. Taking a 157 * passive reference means the mount namespace can be emptied if e.g., the last 158 * task holding an active reference exits. To access the mounts of the 159 * namespace the @namespace_sem must first be acquired. If the namespace has 160 * already shut down before acquiring @namespace_sem, {list,stat}mount() will 161 * see that the mount rbtree of the namespace is empty. 162 * 163 * Note the lookup is lockless protected by a sequence counter. We only 164 * need to guard against false negatives as false positives aren't 165 * possible. So if we didn't find a mount namespace and the sequence 166 * counter has changed we need to retry. If the sequence counter is 167 * still the same we know the search actually failed. 168 */ 169 static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id) 170 { 171 struct mnt_namespace *mnt_ns; 172 struct ns_common *ns; 173 174 guard(rcu)(); 175 ns = ns_tree_lookup_rcu(mnt_ns_id, CLONE_NEWNS); 176 if (!ns) 177 return NULL; 178 179 /* 180 * The last reference count is put with RCU delay so we can 181 * unconditonally acquire a reference here. 182 */ 183 mnt_ns = container_of(ns, struct mnt_namespace, ns); 184 refcount_inc(&mnt_ns->passive); 185 return mnt_ns; 186 } 187 188 static inline void lock_mount_hash(void) 189 { 190 write_seqlock(&mount_lock); 191 } 192 193 static inline void unlock_mount_hash(void) 194 { 195 write_sequnlock(&mount_lock); 196 } 197 198 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) 199 { 200 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 201 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 202 tmp = tmp + (tmp >> m_hash_shift); 203 return &mount_hashtable[tmp & m_hash_mask]; 204 } 205 206 static inline struct hlist_head *mp_hash(struct dentry *dentry) 207 { 208 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); 209 tmp = tmp + (tmp >> mp_hash_shift); 210 return &mountpoint_hashtable[tmp & mp_hash_mask]; 211 } 212 213 static int mnt_alloc_id(struct mount *mnt) 214 { 215 int res; 216 217 xa_lock(&mnt_id_xa); 218 res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, xa_limit_31b, GFP_KERNEL); 219 if (!res) 220 mnt->mnt_id_unique = ++mnt_id_ctr; 221 xa_unlock(&mnt_id_xa); 222 return res; 223 } 224 225 static void mnt_free_id(struct mount *mnt) 226 { 227 xa_erase(&mnt_id_xa, mnt->mnt_id); 228 } 229 230 /* 231 * Allocate a new peer group ID 232 */ 233 static int mnt_alloc_group_id(struct mount *mnt) 234 { 235 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL); 236 237 if (res < 0) 238 return res; 239 mnt->mnt_group_id = res; 240 return 0; 241 } 242 243 /* 244 * Release a peer group ID 245 */ 246 void mnt_release_group_id(struct mount *mnt) 247 { 248 ida_free(&mnt_group_ida, mnt->mnt_group_id); 249 mnt->mnt_group_id = 0; 250 } 251 252 /* 253 * vfsmount lock must be held for read 254 */ 255 static inline void mnt_add_count(struct mount *mnt, int n) 256 { 257 #ifdef CONFIG_SMP 258 this_cpu_add(mnt->mnt_pcp->mnt_count, n); 259 #else 260 preempt_disable(); 261 mnt->mnt_count += n; 262 preempt_enable(); 263 #endif 264 } 265 266 /* 267 * vfsmount lock must be held for write 268 */ 269 int mnt_get_count(struct mount *mnt) 270 { 271 #ifdef CONFIG_SMP 272 int count = 0; 273 int cpu; 274 275 for_each_possible_cpu(cpu) { 276 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; 277 } 278 279 return count; 280 #else 281 return mnt->mnt_count; 282 #endif 283 } 284 285 static struct mount *alloc_vfsmnt(const char *name) 286 { 287 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 288 if (mnt) { 289 int err; 290 291 err = mnt_alloc_id(mnt); 292 if (err) 293 goto out_free_cache; 294 295 if (name) 296 mnt->mnt_devname = kstrdup_const(name, 297 GFP_KERNEL_ACCOUNT); 298 else 299 mnt->mnt_devname = "none"; 300 if (!mnt->mnt_devname) 301 goto out_free_id; 302 303 #ifdef CONFIG_SMP 304 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); 305 if (!mnt->mnt_pcp) 306 goto out_free_devname; 307 308 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); 309 #else 310 mnt->mnt_count = 1; 311 mnt->mnt_writers = 0; 312 #endif 313 314 INIT_HLIST_NODE(&mnt->mnt_hash); 315 INIT_LIST_HEAD(&mnt->mnt_child); 316 INIT_LIST_HEAD(&mnt->mnt_mounts); 317 INIT_LIST_HEAD(&mnt->mnt_list); 318 INIT_LIST_HEAD(&mnt->mnt_expire); 319 INIT_LIST_HEAD(&mnt->mnt_share); 320 INIT_HLIST_HEAD(&mnt->mnt_slave_list); 321 INIT_HLIST_NODE(&mnt->mnt_slave); 322 INIT_HLIST_NODE(&mnt->mnt_mp_list); 323 INIT_HLIST_HEAD(&mnt->mnt_stuck_children); 324 RB_CLEAR_NODE(&mnt->mnt_node); 325 mnt->mnt.mnt_idmap = &nop_mnt_idmap; 326 } 327 return mnt; 328 329 #ifdef CONFIG_SMP 330 out_free_devname: 331 kfree_const(mnt->mnt_devname); 332 #endif 333 out_free_id: 334 mnt_free_id(mnt); 335 out_free_cache: 336 kmem_cache_free(mnt_cache, mnt); 337 return NULL; 338 } 339 340 /* 341 * Most r/o checks on a fs are for operations that take 342 * discrete amounts of time, like a write() or unlink(). 343 * We must keep track of when those operations start 344 * (for permission checks) and when they end, so that 345 * we can determine when writes are able to occur to 346 * a filesystem. 347 */ 348 /* 349 * __mnt_is_readonly: check whether a mount is read-only 350 * @mnt: the mount to check for its write status 351 * 352 * This shouldn't be used directly ouside of the VFS. 353 * It does not guarantee that the filesystem will stay 354 * r/w, just that it is right *now*. This can not and 355 * should not be used in place of IS_RDONLY(inode). 356 * mnt_want/drop_write() will _keep_ the filesystem 357 * r/w. 358 */ 359 bool __mnt_is_readonly(const struct vfsmount *mnt) 360 { 361 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); 362 } 363 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 364 365 static inline void mnt_inc_writers(struct mount *mnt) 366 { 367 #ifdef CONFIG_SMP 368 this_cpu_inc(mnt->mnt_pcp->mnt_writers); 369 #else 370 mnt->mnt_writers++; 371 #endif 372 } 373 374 static inline void mnt_dec_writers(struct mount *mnt) 375 { 376 #ifdef CONFIG_SMP 377 this_cpu_dec(mnt->mnt_pcp->mnt_writers); 378 #else 379 mnt->mnt_writers--; 380 #endif 381 } 382 383 static unsigned int mnt_get_writers(struct mount *mnt) 384 { 385 #ifdef CONFIG_SMP 386 unsigned int count = 0; 387 int cpu; 388 389 for_each_possible_cpu(cpu) { 390 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; 391 } 392 393 return count; 394 #else 395 return mnt->mnt_writers; 396 #endif 397 } 398 399 static int mnt_is_readonly(const struct vfsmount *mnt) 400 { 401 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount)) 402 return 1; 403 /* 404 * The barrier pairs with the barrier in sb_start_ro_state_change() 405 * making sure if we don't see s_readonly_remount set yet, we also will 406 * not see any superblock / mount flag changes done by remount. 407 * It also pairs with the barrier in sb_end_ro_state_change() 408 * assuring that if we see s_readonly_remount already cleared, we will 409 * see the values of superblock / mount flags updated by remount. 410 */ 411 smp_rmb(); 412 return __mnt_is_readonly(mnt); 413 } 414 415 /* 416 * Most r/o & frozen checks on a fs are for operations that take discrete 417 * amounts of time, like a write() or unlink(). We must keep track of when 418 * those operations start (for permission checks) and when they end, so that we 419 * can determine when writes are able to occur to a filesystem. 420 */ 421 /** 422 * mnt_get_write_access - get write access to a mount without freeze protection 423 * @m: the mount on which to take a write 424 * 425 * This tells the low-level filesystem that a write is about to be performed to 426 * it, and makes sure that writes are allowed (mnt it read-write) before 427 * returning success. This operation does not protect against filesystem being 428 * frozen. When the write operation is finished, mnt_put_write_access() must be 429 * called. This is effectively a refcount. 430 */ 431 int mnt_get_write_access(struct vfsmount *m) 432 { 433 struct mount *mnt = real_mount(m); 434 int ret = 0; 435 436 preempt_disable(); 437 mnt_inc_writers(mnt); 438 /* 439 * The store to mnt_inc_writers must be visible before we pass 440 * WRITE_HOLD loop below, so that the slowpath can see our 441 * incremented count after it has set WRITE_HOLD. 442 */ 443 smp_mb(); 444 might_lock(&mount_lock.lock); 445 while (__test_write_hold(READ_ONCE(mnt->mnt_pprev_for_sb))) { 446 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 447 cpu_relax(); 448 } else { 449 /* 450 * This prevents priority inversion, if the task 451 * setting WRITE_HOLD got preempted on a remote 452 * CPU, and it prevents life lock if the task setting 453 * WRITE_HOLD has a lower priority and is bound to 454 * the same CPU as the task that is spinning here. 455 */ 456 preempt_enable(); 457 read_seqlock_excl(&mount_lock); 458 read_sequnlock_excl(&mount_lock); 459 preempt_disable(); 460 } 461 } 462 /* 463 * The barrier pairs with the barrier sb_start_ro_state_change() making 464 * sure that if we see WRITE_HOLD cleared, we will also see 465 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in 466 * mnt_is_readonly() and bail in case we are racing with remount 467 * read-only. 468 */ 469 smp_rmb(); 470 if (mnt_is_readonly(m)) { 471 mnt_dec_writers(mnt); 472 ret = -EROFS; 473 } 474 preempt_enable(); 475 476 return ret; 477 } 478 EXPORT_SYMBOL_GPL(mnt_get_write_access); 479 480 /** 481 * mnt_want_write - get write access to a mount 482 * @m: the mount on which to take a write 483 * 484 * This tells the low-level filesystem that a write is about to be performed to 485 * it, and makes sure that writes are allowed (mount is read-write, filesystem 486 * is not frozen) before returning success. When the write operation is 487 * finished, mnt_drop_write() must be called. This is effectively a refcount. 488 */ 489 int mnt_want_write(struct vfsmount *m) 490 { 491 int ret; 492 493 sb_start_write(m->mnt_sb); 494 ret = mnt_get_write_access(m); 495 if (ret) 496 sb_end_write(m->mnt_sb); 497 return ret; 498 } 499 EXPORT_SYMBOL_GPL(mnt_want_write); 500 501 /** 502 * mnt_get_write_access_file - get write access to a file's mount 503 * @file: the file who's mount on which to take a write 504 * 505 * This is like mnt_get_write_access, but if @file is already open for write it 506 * skips incrementing mnt_writers (since the open file already has a reference) 507 * and instead only does the check for emergency r/o remounts. This must be 508 * paired with mnt_put_write_access_file. 509 */ 510 int mnt_get_write_access_file(struct file *file) 511 { 512 if (file->f_mode & FMODE_WRITER) { 513 /* 514 * Superblock may have become readonly while there are still 515 * writable fd's, e.g. due to a fs error with errors=remount-ro 516 */ 517 if (__mnt_is_readonly(file->f_path.mnt)) 518 return -EROFS; 519 return 0; 520 } 521 return mnt_get_write_access(file->f_path.mnt); 522 } 523 524 /** 525 * mnt_want_write_file - get write access to a file's mount 526 * @file: the file who's mount on which to take a write 527 * 528 * This is like mnt_want_write, but if the file is already open for writing it 529 * skips incrementing mnt_writers (since the open file already has a reference) 530 * and instead only does the freeze protection and the check for emergency r/o 531 * remounts. This must be paired with mnt_drop_write_file. 532 */ 533 int mnt_want_write_file(struct file *file) 534 { 535 int ret; 536 537 sb_start_write(file_inode(file)->i_sb); 538 ret = mnt_get_write_access_file(file); 539 if (ret) 540 sb_end_write(file_inode(file)->i_sb); 541 return ret; 542 } 543 EXPORT_SYMBOL_GPL(mnt_want_write_file); 544 545 /** 546 * mnt_put_write_access - give up write access to a mount 547 * @mnt: the mount on which to give up write access 548 * 549 * Tells the low-level filesystem that we are done 550 * performing writes to it. Must be matched with 551 * mnt_get_write_access() call above. 552 */ 553 void mnt_put_write_access(struct vfsmount *mnt) 554 { 555 preempt_disable(); 556 mnt_dec_writers(real_mount(mnt)); 557 preempt_enable(); 558 } 559 EXPORT_SYMBOL_GPL(mnt_put_write_access); 560 561 /** 562 * mnt_drop_write - give up write access to a mount 563 * @mnt: the mount on which to give up write access 564 * 565 * Tells the low-level filesystem that we are done performing writes to it and 566 * also allows filesystem to be frozen again. Must be matched with 567 * mnt_want_write() call above. 568 */ 569 void mnt_drop_write(struct vfsmount *mnt) 570 { 571 mnt_put_write_access(mnt); 572 sb_end_write(mnt->mnt_sb); 573 } 574 EXPORT_SYMBOL_GPL(mnt_drop_write); 575 576 void mnt_put_write_access_file(struct file *file) 577 { 578 if (!(file->f_mode & FMODE_WRITER)) 579 mnt_put_write_access(file->f_path.mnt); 580 } 581 582 void mnt_drop_write_file(struct file *file) 583 { 584 mnt_put_write_access_file(file); 585 sb_end_write(file_inode(file)->i_sb); 586 } 587 EXPORT_SYMBOL(mnt_drop_write_file); 588 589 /** 590 * mnt_hold_writers - prevent write access to the given mount 591 * @mnt: mnt to prevent write access to 592 * 593 * Prevents write access to @mnt if there are no active writers for @mnt. 594 * This function needs to be called and return successfully before changing 595 * properties of @mnt that need to remain stable for callers with write access 596 * to @mnt. 597 * 598 * After this functions has been called successfully callers must pair it with 599 * a call to mnt_unhold_writers() in order to stop preventing write access to 600 * @mnt. 601 * 602 * Context: This function expects to be in mount_locked_reader scope serializing 603 * setting WRITE_HOLD. 604 * Return: On success 0 is returned. 605 * On error, -EBUSY is returned. 606 */ 607 static inline int mnt_hold_writers(struct mount *mnt) 608 { 609 set_write_hold(mnt); 610 /* 611 * After storing WRITE_HOLD, we'll read the counters. This store 612 * should be visible before we do. 613 */ 614 smp_mb(); 615 616 /* 617 * With writers on hold, if this value is zero, then there are 618 * definitely no active writers (although held writers may subsequently 619 * increment the count, they'll have to wait, and decrement it after 620 * seeing MNT_READONLY). 621 * 622 * It is OK to have counter incremented on one CPU and decremented on 623 * another: the sum will add up correctly. The danger would be when we 624 * sum up each counter, if we read a counter before it is incremented, 625 * but then read another CPU's count which it has been subsequently 626 * decremented from -- we would see more decrements than we should. 627 * WRITE_HOLD protects against this scenario, because 628 * mnt_want_write first increments count, then smp_mb, then spins on 629 * WRITE_HOLD, so it can't be decremented by another CPU while 630 * we're counting up here. 631 */ 632 if (mnt_get_writers(mnt) > 0) 633 return -EBUSY; 634 635 return 0; 636 } 637 638 /** 639 * mnt_unhold_writers - stop preventing write access to the given mount 640 * @mnt: mnt to stop preventing write access to 641 * 642 * Stop preventing write access to @mnt allowing callers to gain write access 643 * to @mnt again. 644 * 645 * This function can only be called after a call to mnt_hold_writers(). 646 * 647 * Context: This function expects to be in the same mount_locked_reader scope 648 * as the matching mnt_hold_writers(). 649 */ 650 static inline void mnt_unhold_writers(struct mount *mnt) 651 { 652 if (!test_write_hold(mnt)) 653 return; 654 /* 655 * MNT_READONLY must become visible before ~WRITE_HOLD, so writers 656 * that become unheld will see MNT_READONLY. 657 */ 658 smp_wmb(); 659 clear_write_hold(mnt); 660 } 661 662 static inline void mnt_del_instance(struct mount *m) 663 { 664 struct mount **p = m->mnt_pprev_for_sb; 665 struct mount *next = m->mnt_next_for_sb; 666 667 if (next) 668 next->mnt_pprev_for_sb = p; 669 *p = next; 670 } 671 672 static inline void mnt_add_instance(struct mount *m, struct super_block *s) 673 { 674 struct mount *first = s->s_mounts; 675 676 if (first) 677 first->mnt_pprev_for_sb = &m->mnt_next_for_sb; 678 m->mnt_next_for_sb = first; 679 m->mnt_pprev_for_sb = &s->s_mounts; 680 s->s_mounts = m; 681 } 682 683 static int mnt_make_readonly(struct mount *mnt) 684 { 685 int ret; 686 687 ret = mnt_hold_writers(mnt); 688 if (!ret) 689 mnt->mnt.mnt_flags |= MNT_READONLY; 690 mnt_unhold_writers(mnt); 691 return ret; 692 } 693 694 int sb_prepare_remount_readonly(struct super_block *sb) 695 { 696 int err = 0; 697 698 /* Racy optimization. Recheck the counter under WRITE_HOLD */ 699 if (atomic_long_read(&sb->s_remove_count)) 700 return -EBUSY; 701 702 guard(mount_locked_reader)(); 703 704 for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) { 705 if (!(m->mnt.mnt_flags & MNT_READONLY)) { 706 err = mnt_hold_writers(m); 707 if (err) 708 break; 709 } 710 } 711 if (!err && atomic_long_read(&sb->s_remove_count)) 712 err = -EBUSY; 713 714 if (!err) 715 sb_start_ro_state_change(sb); 716 for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) { 717 if (test_write_hold(m)) 718 clear_write_hold(m); 719 } 720 721 return err; 722 } 723 724 static void free_vfsmnt(struct mount *mnt) 725 { 726 mnt_idmap_put(mnt_idmap(&mnt->mnt)); 727 kfree_const(mnt->mnt_devname); 728 #ifdef CONFIG_SMP 729 free_percpu(mnt->mnt_pcp); 730 #endif 731 kmem_cache_free(mnt_cache, mnt); 732 } 733 734 static void delayed_free_vfsmnt(struct rcu_head *head) 735 { 736 free_vfsmnt(container_of(head, struct mount, mnt_rcu)); 737 } 738 739 /* call under rcu_read_lock */ 740 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) 741 { 742 struct mount *mnt; 743 if (read_seqretry(&mount_lock, seq)) 744 return 1; 745 if (bastard == NULL) 746 return 0; 747 mnt = real_mount(bastard); 748 mnt_add_count(mnt, 1); 749 smp_mb(); // see mntput_no_expire() and do_umount() 750 if (likely(!read_seqretry(&mount_lock, seq))) 751 return 0; 752 lock_mount_hash(); 753 if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) { 754 mnt_add_count(mnt, -1); 755 unlock_mount_hash(); 756 return 1; 757 } 758 unlock_mount_hash(); 759 /* caller will mntput() */ 760 return -1; 761 } 762 763 /* call under rcu_read_lock */ 764 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 765 { 766 int res = __legitimize_mnt(bastard, seq); 767 if (likely(!res)) 768 return true; 769 if (unlikely(res < 0)) { 770 rcu_read_unlock(); 771 mntput(bastard); 772 rcu_read_lock(); 773 } 774 return false; 775 } 776 777 /** 778 * __lookup_mnt - mount hash lookup 779 * @mnt: parent mount 780 * @dentry: dentry of mountpoint 781 * 782 * If @mnt has a child mount @c mounted on @dentry find and return it. 783 * Caller must either hold the spinlock component of @mount_lock or 784 * hold rcu_read_lock(), sample the seqcount component before the call 785 * and recheck it afterwards. 786 * 787 * Return: The child of @mnt mounted on @dentry or %NULL. 788 */ 789 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 790 { 791 struct hlist_head *head = m_hash(mnt, dentry); 792 struct mount *p; 793 794 hlist_for_each_entry_rcu(p, head, mnt_hash) 795 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 796 return p; 797 return NULL; 798 } 799 800 /** 801 * lookup_mnt - Return the child mount mounted at given location 802 * @path: location in the namespace 803 * 804 * Acquires and returns a new reference to mount at given location 805 * or %NULL if nothing is mounted there. 806 */ 807 struct vfsmount *lookup_mnt(const struct path *path) 808 { 809 struct mount *child_mnt; 810 struct vfsmount *m; 811 unsigned seq; 812 813 rcu_read_lock(); 814 do { 815 seq = read_seqbegin(&mount_lock); 816 child_mnt = __lookup_mnt(path->mnt, path->dentry); 817 m = child_mnt ? &child_mnt->mnt : NULL; 818 } while (!legitimize_mnt(m, seq)); 819 rcu_read_unlock(); 820 return m; 821 } 822 823 /* 824 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the 825 * current mount namespace. 826 * 827 * The common case is dentries are not mountpoints at all and that 828 * test is handled inline. For the slow case when we are actually 829 * dealing with a mountpoint of some kind, walk through all of the 830 * mounts in the current mount namespace and test to see if the dentry 831 * is a mountpoint. 832 * 833 * The mount_hashtable is not usable in the context because we 834 * need to identify all mounts that may be in the current mount 835 * namespace not just a mount that happens to have some specified 836 * parent mount. 837 */ 838 bool __is_local_mountpoint(const struct dentry *dentry) 839 { 840 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 841 struct mount *mnt, *n; 842 843 guard(namespace_shared)(); 844 845 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) 846 if (mnt->mnt_mountpoint == dentry) 847 return true; 848 849 return false; 850 } 851 852 struct pinned_mountpoint { 853 struct hlist_node node; 854 struct mountpoint *mp; 855 struct mount *parent; 856 }; 857 858 static bool lookup_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m) 859 { 860 struct hlist_head *chain = mp_hash(dentry); 861 struct mountpoint *mp; 862 863 hlist_for_each_entry(mp, chain, m_hash) { 864 if (mp->m_dentry == dentry) { 865 hlist_add_head(&m->node, &mp->m_list); 866 m->mp = mp; 867 return true; 868 } 869 } 870 return false; 871 } 872 873 static int get_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m) 874 { 875 struct mountpoint *mp __free(kfree) = NULL; 876 bool found; 877 int ret; 878 879 if (d_mountpoint(dentry)) { 880 /* might be worth a WARN_ON() */ 881 if (d_unlinked(dentry)) 882 return -ENOENT; 883 mountpoint: 884 read_seqlock_excl(&mount_lock); 885 found = lookup_mountpoint(dentry, m); 886 read_sequnlock_excl(&mount_lock); 887 if (found) 888 return 0; 889 } 890 891 if (!mp) 892 mp = kmalloc_obj(struct mountpoint); 893 if (!mp) 894 return -ENOMEM; 895 896 /* Exactly one processes may set d_mounted */ 897 ret = d_set_mounted(dentry); 898 899 /* Someone else set d_mounted? */ 900 if (ret == -EBUSY) 901 goto mountpoint; 902 903 /* The dentry is not available as a mountpoint? */ 904 if (ret) 905 return ret; 906 907 /* Add the new mountpoint to the hash table */ 908 read_seqlock_excl(&mount_lock); 909 mp->m_dentry = dget(dentry); 910 hlist_add_head(&mp->m_hash, mp_hash(dentry)); 911 INIT_HLIST_HEAD(&mp->m_list); 912 hlist_add_head(&m->node, &mp->m_list); 913 m->mp = no_free_ptr(mp); 914 read_sequnlock_excl(&mount_lock); 915 return 0; 916 } 917 918 /* 919 * vfsmount lock must be held. Additionally, the caller is responsible 920 * for serializing calls for given disposal list. 921 */ 922 static void maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list) 923 { 924 if (hlist_empty(&mp->m_list)) { 925 struct dentry *dentry = mp->m_dentry; 926 spin_lock(&dentry->d_lock); 927 dentry->d_flags &= ~DCACHE_MOUNTED; 928 spin_unlock(&dentry->d_lock); 929 dput_to_list(dentry, list); 930 hlist_del(&mp->m_hash); 931 kfree(mp); 932 } 933 } 934 935 /* 936 * locks: mount_lock [read_seqlock_excl], namespace_sem [excl] 937 */ 938 static void unpin_mountpoint(struct pinned_mountpoint *m) 939 { 940 if (m->mp) { 941 hlist_del(&m->node); 942 maybe_free_mountpoint(m->mp, &ex_mountpoints); 943 } 944 } 945 946 static inline int check_mnt(const struct mount *mnt) 947 { 948 return mnt->mnt_ns == current->nsproxy->mnt_ns; 949 } 950 951 static inline bool check_anonymous_mnt(struct mount *mnt) 952 { 953 u64 seq; 954 955 if (!is_anon_ns(mnt->mnt_ns)) 956 return false; 957 958 seq = mnt->mnt_ns->seq_origin; 959 return !seq || (seq == current->nsproxy->mnt_ns->ns.ns_id); 960 } 961 962 /* 963 * vfsmount lock must be held for write 964 */ 965 static void touch_mnt_namespace(struct mnt_namespace *ns) 966 { 967 if (ns) { 968 ns->event = ++event; 969 wake_up_interruptible(&ns->poll); 970 } 971 } 972 973 /* 974 * vfsmount lock must be held for write 975 */ 976 static void __touch_mnt_namespace(struct mnt_namespace *ns) 977 { 978 if (ns && ns->event != event) { 979 ns->event = event; 980 wake_up_interruptible(&ns->poll); 981 } 982 } 983 984 /* 985 * locks: mount_lock[write_seqlock] 986 */ 987 static void __umount_mnt(struct mount *mnt, struct list_head *shrink_list) 988 { 989 struct mountpoint *mp; 990 struct mount *parent = mnt->mnt_parent; 991 if (unlikely(parent->overmount == mnt)) 992 parent->overmount = NULL; 993 mnt->mnt_parent = mnt; 994 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 995 list_del_init(&mnt->mnt_child); 996 hlist_del_init_rcu(&mnt->mnt_hash); 997 hlist_del_init(&mnt->mnt_mp_list); 998 mp = mnt->mnt_mp; 999 mnt->mnt_mp = NULL; 1000 maybe_free_mountpoint(mp, shrink_list); 1001 } 1002 1003 /* 1004 * locks: mount_lock[write_seqlock], namespace_sem[excl] (for ex_mountpoints) 1005 */ 1006 static void umount_mnt(struct mount *mnt) 1007 { 1008 __umount_mnt(mnt, &ex_mountpoints); 1009 } 1010 1011 /* 1012 * vfsmount lock must be held for write 1013 */ 1014 void mnt_set_mountpoint(struct mount *mnt, 1015 struct mountpoint *mp, 1016 struct mount *child_mnt) 1017 { 1018 child_mnt->mnt_mountpoint = mp->m_dentry; 1019 child_mnt->mnt_parent = mnt; 1020 child_mnt->mnt_mp = mp; 1021 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); 1022 } 1023 1024 static void make_visible(struct mount *mnt) 1025 { 1026 struct mount *parent = mnt->mnt_parent; 1027 if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root)) 1028 parent->overmount = mnt; 1029 hlist_add_head_rcu(&mnt->mnt_hash, 1030 m_hash(&parent->mnt, mnt->mnt_mountpoint)); 1031 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 1032 } 1033 1034 /** 1035 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's 1036 * list of child mounts 1037 * @parent: the parent 1038 * @mnt: the new mount 1039 * @mp: the new mountpoint 1040 * 1041 * Mount @mnt at @mp on @parent. Then attach @mnt 1042 * to @parent's child mount list and to @mount_hashtable. 1043 * 1044 * Note, when make_visible() is called @mnt->mnt_parent already points 1045 * to the correct parent. 1046 * 1047 * Context: This function expects namespace_lock() and lock_mount_hash() 1048 * to have been acquired in that order. 1049 */ 1050 static void attach_mnt(struct mount *mnt, struct mount *parent, 1051 struct mountpoint *mp) 1052 { 1053 mnt_set_mountpoint(parent, mp, mnt); 1054 make_visible(mnt); 1055 } 1056 1057 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) 1058 { 1059 struct mountpoint *old_mp = mnt->mnt_mp; 1060 1061 list_del_init(&mnt->mnt_child); 1062 hlist_del_init(&mnt->mnt_mp_list); 1063 hlist_del_init_rcu(&mnt->mnt_hash); 1064 1065 attach_mnt(mnt, parent, mp); 1066 1067 maybe_free_mountpoint(old_mp, &ex_mountpoints); 1068 } 1069 1070 static inline struct mount *node_to_mount(struct rb_node *node) 1071 { 1072 return node ? rb_entry(node, struct mount, mnt_node) : NULL; 1073 } 1074 1075 static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt) 1076 { 1077 struct rb_node **link = &ns->mounts.rb_node; 1078 struct rb_node *parent = NULL; 1079 bool mnt_first_node = true, mnt_last_node = true; 1080 1081 WARN_ON(mnt_ns_attached(mnt)); 1082 mnt->mnt_ns = ns; 1083 while (*link) { 1084 parent = *link; 1085 if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) { 1086 link = &parent->rb_left; 1087 mnt_last_node = false; 1088 } else { 1089 link = &parent->rb_right; 1090 mnt_first_node = false; 1091 } 1092 } 1093 1094 if (mnt_last_node) 1095 ns->mnt_last_node = &mnt->mnt_node; 1096 if (mnt_first_node) 1097 ns->mnt_first_node = &mnt->mnt_node; 1098 rb_link_node(&mnt->mnt_node, parent, link); 1099 rb_insert_color(&mnt->mnt_node, &ns->mounts); 1100 1101 mnt_notify_add(mnt); 1102 } 1103 1104 static struct mount *next_mnt(struct mount *p, struct mount *root) 1105 { 1106 struct list_head *next = p->mnt_mounts.next; 1107 if (next == &p->mnt_mounts) { 1108 while (1) { 1109 if (p == root) 1110 return NULL; 1111 next = p->mnt_child.next; 1112 if (next != &p->mnt_parent->mnt_mounts) 1113 break; 1114 p = p->mnt_parent; 1115 } 1116 } 1117 return list_entry(next, struct mount, mnt_child); 1118 } 1119 1120 static struct mount *skip_mnt_tree(struct mount *p) 1121 { 1122 struct list_head *prev = p->mnt_mounts.prev; 1123 while (prev != &p->mnt_mounts) { 1124 p = list_entry(prev, struct mount, mnt_child); 1125 prev = p->mnt_mounts.prev; 1126 } 1127 return p; 1128 } 1129 1130 /* 1131 * vfsmount lock must be held for write 1132 */ 1133 static void commit_tree(struct mount *mnt) 1134 { 1135 struct mnt_namespace *n = mnt->mnt_parent->mnt_ns; 1136 1137 if (!mnt_ns_attached(mnt)) { 1138 for (struct mount *m = mnt; m; m = next_mnt(m, mnt)) 1139 mnt_add_to_ns(n, m); 1140 n->nr_mounts += n->pending_mounts; 1141 n->pending_mounts = 0; 1142 } 1143 1144 make_visible(mnt); 1145 touch_mnt_namespace(n); 1146 } 1147 1148 static void setup_mnt(struct mount *m, struct dentry *root) 1149 { 1150 struct super_block *s = root->d_sb; 1151 1152 atomic_inc(&s->s_active); 1153 m->mnt.mnt_sb = s; 1154 m->mnt.mnt_root = dget(root); 1155 m->mnt_mountpoint = m->mnt.mnt_root; 1156 m->mnt_parent = m; 1157 1158 guard(mount_locked_reader)(); 1159 mnt_add_instance(m, s); 1160 } 1161 1162 /** 1163 * vfs_create_mount - Create a mount for a configured superblock 1164 * @fc: The configuration context with the superblock attached 1165 * 1166 * Create a mount to an already configured superblock. If necessary, the 1167 * caller should invoke vfs_get_tree() before calling this. 1168 * 1169 * Note that this does not attach the mount to anything. 1170 */ 1171 struct vfsmount *vfs_create_mount(struct fs_context *fc) 1172 { 1173 struct mount *mnt; 1174 1175 if (!fc->root) 1176 return ERR_PTR(-EINVAL); 1177 1178 mnt = alloc_vfsmnt(fc->source); 1179 if (!mnt) 1180 return ERR_PTR(-ENOMEM); 1181 1182 if (fc->sb_flags & SB_KERNMOUNT) 1183 mnt->mnt.mnt_flags = MNT_INTERNAL; 1184 1185 setup_mnt(mnt, fc->root); 1186 1187 return &mnt->mnt; 1188 } 1189 EXPORT_SYMBOL(vfs_create_mount); 1190 1191 struct vfsmount *fc_mount(struct fs_context *fc) 1192 { 1193 int err = vfs_get_tree(fc); 1194 if (!err) { 1195 up_write(&fc->root->d_sb->s_umount); 1196 return vfs_create_mount(fc); 1197 } 1198 return ERR_PTR(err); 1199 } 1200 EXPORT_SYMBOL(fc_mount); 1201 1202 struct vfsmount *fc_mount_longterm(struct fs_context *fc) 1203 { 1204 struct vfsmount *mnt = fc_mount(fc); 1205 if (!IS_ERR(mnt)) 1206 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 1207 return mnt; 1208 } 1209 EXPORT_SYMBOL(fc_mount_longterm); 1210 1211 struct vfsmount *vfs_kern_mount(struct file_system_type *type, 1212 int flags, const char *name, 1213 void *data) 1214 { 1215 struct fs_context *fc; 1216 struct vfsmount *mnt; 1217 int ret = 0; 1218 1219 if (!type) 1220 return ERR_PTR(-EINVAL); 1221 1222 fc = fs_context_for_mount(type, flags); 1223 if (IS_ERR(fc)) 1224 return ERR_CAST(fc); 1225 1226 if (name) 1227 ret = vfs_parse_fs_string(fc, "source", name); 1228 if (!ret) 1229 ret = parse_monolithic_mount_data(fc, data); 1230 if (!ret) 1231 mnt = fc_mount(fc); 1232 else 1233 mnt = ERR_PTR(ret); 1234 1235 put_fs_context(fc); 1236 return mnt; 1237 } 1238 EXPORT_SYMBOL_GPL(vfs_kern_mount); 1239 1240 static struct mount *clone_mnt(struct mount *old, struct dentry *root, 1241 int flag) 1242 { 1243 struct mount *mnt; 1244 int err; 1245 1246 mnt = alloc_vfsmnt(old->mnt_devname); 1247 if (!mnt) 1248 return ERR_PTR(-ENOMEM); 1249 1250 mnt->mnt.mnt_flags = READ_ONCE(old->mnt.mnt_flags) & 1251 ~MNT_INTERNAL_FLAGS; 1252 1253 if (flag & (CL_SLAVE | CL_PRIVATE)) 1254 mnt->mnt_group_id = 0; /* not a peer of original */ 1255 else 1256 mnt->mnt_group_id = old->mnt_group_id; 1257 1258 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 1259 err = mnt_alloc_group_id(mnt); 1260 if (err) 1261 goto out_free; 1262 } 1263 1264 if (mnt->mnt_group_id) 1265 set_mnt_shared(mnt); 1266 1267 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt)); 1268 1269 setup_mnt(mnt, root); 1270 1271 if (flag & CL_PRIVATE) // we are done with it 1272 return mnt; 1273 1274 if (peers(mnt, old)) 1275 list_add(&mnt->mnt_share, &old->mnt_share); 1276 1277 if ((flag & CL_SLAVE) && old->mnt_group_id) { 1278 hlist_add_head(&mnt->mnt_slave, &old->mnt_slave_list); 1279 mnt->mnt_master = old; 1280 } else if (IS_MNT_SLAVE(old)) { 1281 hlist_add_behind(&mnt->mnt_slave, &old->mnt_slave); 1282 mnt->mnt_master = old->mnt_master; 1283 } 1284 return mnt; 1285 1286 out_free: 1287 mnt_free_id(mnt); 1288 free_vfsmnt(mnt); 1289 return ERR_PTR(err); 1290 } 1291 1292 static void cleanup_mnt(struct mount *mnt) 1293 { 1294 struct hlist_node *p; 1295 struct mount *m; 1296 /* 1297 * The warning here probably indicates that somebody messed 1298 * up a mnt_want/drop_write() pair. If this happens, the 1299 * filesystem was probably unable to make r/w->r/o transitions. 1300 * The locking used to deal with mnt_count decrement provides barriers, 1301 * so mnt_get_writers() below is safe. 1302 */ 1303 WARN_ON(mnt_get_writers(mnt)); 1304 if (unlikely(mnt->mnt_pins.first)) 1305 mnt_pin_kill(mnt); 1306 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { 1307 hlist_del(&m->mnt_umount); 1308 mntput(&m->mnt); 1309 } 1310 fsnotify_vfsmount_delete(&mnt->mnt); 1311 dput(mnt->mnt.mnt_root); 1312 deactivate_super(mnt->mnt.mnt_sb); 1313 mnt_free_id(mnt); 1314 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); 1315 } 1316 1317 static void __cleanup_mnt(struct rcu_head *head) 1318 { 1319 cleanup_mnt(container_of(head, struct mount, mnt_rcu)); 1320 } 1321 1322 static LLIST_HEAD(delayed_mntput_list); 1323 static void delayed_mntput(struct work_struct *unused) 1324 { 1325 struct llist_node *node = llist_del_all(&delayed_mntput_list); 1326 struct mount *m, *t; 1327 1328 llist_for_each_entry_safe(m, t, node, mnt_llist) 1329 cleanup_mnt(m); 1330 } 1331 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); 1332 1333 static void noinline mntput_no_expire_slowpath(struct mount *mnt) 1334 { 1335 LIST_HEAD(list); 1336 int count; 1337 1338 VFS_BUG_ON(mnt->mnt_ns); 1339 lock_mount_hash(); 1340 /* 1341 * make sure that if __legitimize_mnt() has not seen us grab 1342 * mount_lock, we'll see their refcount increment here. 1343 */ 1344 smp_mb(); 1345 mnt_add_count(mnt, -1); 1346 count = mnt_get_count(mnt); 1347 if (count != 0) { 1348 WARN_ON(count < 0); 1349 rcu_read_unlock(); 1350 unlock_mount_hash(); 1351 return; 1352 } 1353 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { 1354 rcu_read_unlock(); 1355 unlock_mount_hash(); 1356 return; 1357 } 1358 mnt->mnt.mnt_flags |= MNT_DOOMED; 1359 rcu_read_unlock(); 1360 1361 mnt_del_instance(mnt); 1362 if (unlikely(!list_empty(&mnt->mnt_expire))) 1363 list_del(&mnt->mnt_expire); 1364 1365 if (unlikely(!list_empty(&mnt->mnt_mounts))) { 1366 struct mount *p, *tmp; 1367 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1368 __umount_mnt(p, &list); 1369 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); 1370 } 1371 } 1372 unlock_mount_hash(); 1373 shrink_dentry_list(&list); 1374 1375 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { 1376 struct task_struct *task = current; 1377 if (likely(!(task->flags & PF_KTHREAD))) { 1378 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); 1379 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) 1380 return; 1381 } 1382 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) 1383 schedule_delayed_work(&delayed_mntput_work, 1); 1384 return; 1385 } 1386 cleanup_mnt(mnt); 1387 } 1388 1389 static void mntput_no_expire(struct mount *mnt) 1390 { 1391 rcu_read_lock(); 1392 if (likely(READ_ONCE(mnt->mnt_ns))) { 1393 /* 1394 * Since we don't do lock_mount_hash() here, 1395 * ->mnt_ns can change under us. However, if it's 1396 * non-NULL, then there's a reference that won't 1397 * be dropped until after an RCU delay done after 1398 * turning ->mnt_ns NULL. So if we observe it 1399 * non-NULL under rcu_read_lock(), the reference 1400 * we are dropping is not the final one. 1401 */ 1402 mnt_add_count(mnt, -1); 1403 rcu_read_unlock(); 1404 return; 1405 } 1406 mntput_no_expire_slowpath(mnt); 1407 } 1408 1409 void mntput(struct vfsmount *mnt) 1410 { 1411 if (mnt) { 1412 struct mount *m = real_mount(mnt); 1413 /* avoid cacheline pingpong */ 1414 if (unlikely(m->mnt_expiry_mark)) 1415 WRITE_ONCE(m->mnt_expiry_mark, 0); 1416 mntput_no_expire(m); 1417 } 1418 } 1419 EXPORT_SYMBOL(mntput); 1420 1421 struct vfsmount *mntget(struct vfsmount *mnt) 1422 { 1423 if (mnt) 1424 mnt_add_count(real_mount(mnt), 1); 1425 return mnt; 1426 } 1427 EXPORT_SYMBOL(mntget); 1428 1429 /* 1430 * Make a mount point inaccessible to new lookups. 1431 * Because there may still be current users, the caller MUST WAIT 1432 * for an RCU grace period before destroying the mount point. 1433 */ 1434 void mnt_make_shortterm(struct vfsmount *mnt) 1435 { 1436 if (mnt) 1437 real_mount(mnt)->mnt_ns = NULL; 1438 } 1439 1440 /** 1441 * path_is_mountpoint() - Check if path is a mount in the current namespace. 1442 * @path: path to check 1443 * 1444 * d_mountpoint() can only be used reliably to establish if a dentry is 1445 * not mounted in any namespace and that common case is handled inline. 1446 * d_mountpoint() isn't aware of the possibility there may be multiple 1447 * mounts using a given dentry in a different namespace. This function 1448 * checks if the passed in path is a mountpoint rather than the dentry 1449 * alone. 1450 */ 1451 bool path_is_mountpoint(const struct path *path) 1452 { 1453 unsigned seq; 1454 bool res; 1455 1456 if (!d_mountpoint(path->dentry)) 1457 return false; 1458 1459 rcu_read_lock(); 1460 do { 1461 seq = read_seqbegin(&mount_lock); 1462 res = __path_is_mountpoint(path); 1463 } while (read_seqretry(&mount_lock, seq)); 1464 rcu_read_unlock(); 1465 1466 return res; 1467 } 1468 EXPORT_SYMBOL(path_is_mountpoint); 1469 1470 struct vfsmount *mnt_clone_internal(const struct path *path) 1471 { 1472 struct mount *p; 1473 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); 1474 if (IS_ERR(p)) 1475 return ERR_CAST(p); 1476 p->mnt.mnt_flags |= MNT_INTERNAL; 1477 return &p->mnt; 1478 } 1479 1480 /* 1481 * Returns the mount which either has the specified mnt_id, or has the next 1482 * smallest id afer the specified one. 1483 */ 1484 static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id) 1485 { 1486 struct rb_node *node = ns->mounts.rb_node; 1487 struct mount *ret = NULL; 1488 1489 while (node) { 1490 struct mount *m = node_to_mount(node); 1491 1492 if (mnt_id <= m->mnt_id_unique) { 1493 ret = node_to_mount(node); 1494 if (mnt_id == m->mnt_id_unique) 1495 break; 1496 node = node->rb_left; 1497 } else { 1498 node = node->rb_right; 1499 } 1500 } 1501 return ret; 1502 } 1503 1504 /* 1505 * Returns the mount which either has the specified mnt_id, or has the next 1506 * greater id before the specified one. 1507 */ 1508 static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id) 1509 { 1510 struct rb_node *node = ns->mounts.rb_node; 1511 struct mount *ret = NULL; 1512 1513 while (node) { 1514 struct mount *m = node_to_mount(node); 1515 1516 if (mnt_id >= m->mnt_id_unique) { 1517 ret = node_to_mount(node); 1518 if (mnt_id == m->mnt_id_unique) 1519 break; 1520 node = node->rb_right; 1521 } else { 1522 node = node->rb_left; 1523 } 1524 } 1525 return ret; 1526 } 1527 1528 #ifdef CONFIG_PROC_FS 1529 1530 /* iterator; we want it to have access to namespace_sem, thus here... */ 1531 static void *m_start(struct seq_file *m, loff_t *pos) 1532 { 1533 struct proc_mounts *p = m->private; 1534 struct mount *mnt; 1535 1536 down_read(&namespace_sem); 1537 1538 mnt = mnt_find_id_at(p->ns, *pos); 1539 if (mnt) 1540 *pos = mnt->mnt_id_unique; 1541 return mnt; 1542 } 1543 1544 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1545 { 1546 struct mount *mnt = v; 1547 struct rb_node *node = rb_next(&mnt->mnt_node); 1548 1549 if (node) { 1550 struct mount *next = node_to_mount(node); 1551 *pos = next->mnt_id_unique; 1552 return next; 1553 } 1554 1555 /* 1556 * No more mounts. Set pos past current mount's ID so that if 1557 * iteration restarts, mnt_find_id_at() returns NULL. 1558 */ 1559 *pos = mnt->mnt_id_unique + 1; 1560 return NULL; 1561 } 1562 1563 static void m_stop(struct seq_file *m, void *v) 1564 { 1565 up_read(&namespace_sem); 1566 } 1567 1568 static int m_show(struct seq_file *m, void *v) 1569 { 1570 struct proc_mounts *p = m->private; 1571 struct mount *r = v; 1572 return p->show(m, &r->mnt); 1573 } 1574 1575 const struct seq_operations mounts_op = { 1576 .start = m_start, 1577 .next = m_next, 1578 .stop = m_stop, 1579 .show = m_show, 1580 }; 1581 1582 #endif /* CONFIG_PROC_FS */ 1583 1584 /** 1585 * may_umount_tree - check if a mount tree is busy 1586 * @m: root of mount tree 1587 * 1588 * This is called to check if a tree of mounts has any 1589 * open files, pwds, chroots or sub mounts that are 1590 * busy. 1591 */ 1592 int may_umount_tree(struct vfsmount *m) 1593 { 1594 struct mount *mnt = real_mount(m); 1595 bool busy = false; 1596 1597 /* write lock needed for mnt_get_count */ 1598 lock_mount_hash(); 1599 for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) { 1600 if (mnt_get_count(p) > (p == mnt ? 2 : 1)) { 1601 busy = true; 1602 break; 1603 } 1604 } 1605 unlock_mount_hash(); 1606 1607 return !busy; 1608 } 1609 1610 EXPORT_SYMBOL(may_umount_tree); 1611 1612 /** 1613 * may_umount - check if a mount point is busy 1614 * @mnt: root of mount 1615 * 1616 * This is called to check if a mount point has any 1617 * open files, pwds, chroots or sub mounts. If the 1618 * mount has sub mounts this will return busy 1619 * regardless of whether the sub mounts are busy. 1620 * 1621 * Doesn't take quota and stuff into account. IOW, in some cases it will 1622 * give false negatives. The main reason why it's here is that we need 1623 * a non-destructive way to look for easily umountable filesystems. 1624 */ 1625 int may_umount(struct vfsmount *mnt) 1626 { 1627 int ret = 1; 1628 down_read(&namespace_sem); 1629 lock_mount_hash(); 1630 if (propagate_mount_busy(real_mount(mnt), 2)) 1631 ret = 0; 1632 unlock_mount_hash(); 1633 up_read(&namespace_sem); 1634 return ret; 1635 } 1636 1637 EXPORT_SYMBOL(may_umount); 1638 1639 #ifdef CONFIG_FSNOTIFY 1640 static void mnt_notify(struct mount *p) 1641 { 1642 if (!p->prev_ns && p->mnt_ns) { 1643 fsnotify_mnt_attach(p->mnt_ns, &p->mnt); 1644 } else if (p->prev_ns && !p->mnt_ns) { 1645 fsnotify_mnt_detach(p->prev_ns, &p->mnt); 1646 } else if (p->prev_ns == p->mnt_ns) { 1647 fsnotify_mnt_move(p->mnt_ns, &p->mnt); 1648 } else { 1649 fsnotify_mnt_detach(p->prev_ns, &p->mnt); 1650 fsnotify_mnt_attach(p->mnt_ns, &p->mnt); 1651 } 1652 p->prev_ns = p->mnt_ns; 1653 } 1654 1655 static void notify_mnt_list(void) 1656 { 1657 struct mount *m, *tmp; 1658 /* 1659 * Notify about mounts that were added/reparented/detached/remain 1660 * connected after unmount. 1661 */ 1662 list_for_each_entry_safe(m, tmp, ¬ify_list, to_notify) { 1663 mnt_notify(m); 1664 list_del_init(&m->to_notify); 1665 } 1666 } 1667 1668 static bool need_notify_mnt_list(void) 1669 { 1670 return !list_empty(¬ify_list); 1671 } 1672 #else 1673 static void notify_mnt_list(void) 1674 { 1675 } 1676 1677 static bool need_notify_mnt_list(void) 1678 { 1679 return false; 1680 } 1681 #endif 1682 1683 static void free_mnt_ns(struct mnt_namespace *); 1684 static void namespace_unlock(void) 1685 { 1686 struct hlist_head head; 1687 struct hlist_node *p; 1688 struct mount *m; 1689 struct mnt_namespace *ns = emptied_ns; 1690 LIST_HEAD(list); 1691 1692 hlist_move_list(&unmounted, &head); 1693 list_splice_init(&ex_mountpoints, &list); 1694 emptied_ns = NULL; 1695 1696 if (need_notify_mnt_list()) { 1697 /* 1698 * No point blocking out concurrent readers while notifications 1699 * are sent. This will also allow statmount()/listmount() to run 1700 * concurrently. 1701 */ 1702 downgrade_write(&namespace_sem); 1703 notify_mnt_list(); 1704 up_read(&namespace_sem); 1705 } else { 1706 up_write(&namespace_sem); 1707 } 1708 if (unlikely(ns)) { 1709 /* Make sure we notice when we leak mounts. */ 1710 VFS_WARN_ON_ONCE(!mnt_ns_empty(ns)); 1711 free_mnt_ns(ns); 1712 } 1713 1714 shrink_dentry_list(&list); 1715 1716 if (likely(hlist_empty(&head))) 1717 return; 1718 1719 synchronize_rcu_expedited(); 1720 1721 hlist_for_each_entry_safe(m, p, &head, mnt_umount) { 1722 hlist_del(&m->mnt_umount); 1723 mntput(&m->mnt); 1724 } 1725 } 1726 1727 static inline void namespace_lock(void) 1728 { 1729 down_write(&namespace_sem); 1730 } 1731 1732 enum umount_tree_flags { 1733 UMOUNT_SYNC = 1, 1734 UMOUNT_PROPAGATE = 2, 1735 UMOUNT_CONNECTED = 4, 1736 }; 1737 1738 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) 1739 { 1740 /* Leaving mounts connected is only valid for lazy umounts */ 1741 if (how & UMOUNT_SYNC) 1742 return true; 1743 1744 /* A mount without a parent has nothing to be connected to */ 1745 if (!mnt_has_parent(mnt)) 1746 return true; 1747 1748 /* Because the reference counting rules change when mounts are 1749 * unmounted and connected, umounted mounts may not be 1750 * connected to mounted mounts. 1751 */ 1752 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) 1753 return true; 1754 1755 /* Has it been requested that the mount remain connected? */ 1756 if (how & UMOUNT_CONNECTED) 1757 return false; 1758 1759 /* Is the mount locked such that it needs to remain connected? */ 1760 if (IS_MNT_LOCKED(mnt)) 1761 return false; 1762 1763 /* By default disconnect the mount */ 1764 return true; 1765 } 1766 1767 /* 1768 * mount_lock must be held 1769 * namespace_sem must be held for write 1770 */ 1771 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) 1772 { 1773 LIST_HEAD(tmp_list); 1774 struct mount *p; 1775 1776 if (how & UMOUNT_PROPAGATE) 1777 propagate_mount_unlock(mnt); 1778 1779 /* Gather the mounts to umount */ 1780 for (p = mnt; p; p = next_mnt(p, mnt)) { 1781 p->mnt.mnt_flags |= MNT_UMOUNT; 1782 if (mnt_ns_attached(p)) 1783 move_from_ns(p); 1784 list_add_tail(&p->mnt_list, &tmp_list); 1785 } 1786 1787 /* Hide the mounts from mnt_mounts */ 1788 list_for_each_entry(p, &tmp_list, mnt_list) { 1789 list_del_init(&p->mnt_child); 1790 } 1791 1792 /* Add propagated mounts to the tmp_list */ 1793 if (how & UMOUNT_PROPAGATE) 1794 propagate_umount(&tmp_list); 1795 1796 bulk_make_private(&tmp_list); 1797 1798 while (!list_empty(&tmp_list)) { 1799 struct mnt_namespace *ns; 1800 bool disconnect; 1801 p = list_first_entry(&tmp_list, struct mount, mnt_list); 1802 list_del_init(&p->mnt_expire); 1803 list_del_init(&p->mnt_list); 1804 ns = p->mnt_ns; 1805 if (ns) { 1806 ns->nr_mounts--; 1807 __touch_mnt_namespace(ns); 1808 } 1809 p->mnt_ns = NULL; 1810 if (how & UMOUNT_SYNC) 1811 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1812 1813 disconnect = disconnect_mount(p, how); 1814 if (mnt_has_parent(p)) { 1815 if (!disconnect) { 1816 /* Don't forget about p */ 1817 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1818 } else { 1819 umount_mnt(p); 1820 } 1821 } 1822 if (disconnect) 1823 hlist_add_head(&p->mnt_umount, &unmounted); 1824 1825 /* 1826 * At this point p->mnt_ns is NULL, notification will be queued 1827 * only if 1828 * 1829 * - p->prev_ns is non-NULL *and* 1830 * - p->prev_ns->n_fsnotify_marks is non-NULL 1831 * 1832 * This will preclude queuing the mount if this is a cleanup 1833 * after a failed copy_tree() or destruction of an anonymous 1834 * namespace, etc. 1835 */ 1836 mnt_notify_add(p); 1837 } 1838 } 1839 1840 static void shrink_submounts(struct mount *mnt); 1841 1842 static int do_umount_root(struct super_block *sb) 1843 { 1844 int ret = 0; 1845 1846 down_write(&sb->s_umount); 1847 if (!sb_rdonly(sb)) { 1848 struct fs_context *fc; 1849 1850 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY, 1851 SB_RDONLY); 1852 if (IS_ERR(fc)) { 1853 ret = PTR_ERR(fc); 1854 } else { 1855 ret = parse_monolithic_mount_data(fc, NULL); 1856 if (!ret) 1857 ret = reconfigure_super(fc); 1858 put_fs_context(fc); 1859 } 1860 } 1861 up_write(&sb->s_umount); 1862 return ret; 1863 } 1864 1865 static int do_umount(struct mount *mnt, int flags) 1866 { 1867 struct super_block *sb = mnt->mnt.mnt_sb; 1868 int retval; 1869 1870 retval = security_sb_umount(&mnt->mnt, flags); 1871 if (retval) 1872 return retval; 1873 1874 /* 1875 * Allow userspace to request a mountpoint be expired rather than 1876 * unmounting unconditionally. Unmount only happens if: 1877 * (1) the mark is already set (the mark is cleared by mntput()) 1878 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1879 */ 1880 if (flags & MNT_EXPIRE) { 1881 if (&mnt->mnt == current->fs->root.mnt || 1882 flags & (MNT_FORCE | MNT_DETACH)) 1883 return -EINVAL; 1884 1885 /* 1886 * probably don't strictly need the lock here if we examined 1887 * all race cases, but it's a slowpath. 1888 */ 1889 lock_mount_hash(); 1890 if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) { 1891 unlock_mount_hash(); 1892 return -EBUSY; 1893 } 1894 unlock_mount_hash(); 1895 1896 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1897 return -EAGAIN; 1898 } 1899 1900 /* 1901 * If we may have to abort operations to get out of this 1902 * mount, and they will themselves hold resources we must 1903 * allow the fs to do things. In the Unix tradition of 1904 * 'Gee thats tricky lets do it in userspace' the umount_begin 1905 * might fail to complete on the first run through as other tasks 1906 * must return, and the like. Thats for the mount program to worry 1907 * about for the moment. 1908 */ 1909 1910 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1911 sb->s_op->umount_begin(sb); 1912 } 1913 1914 /* 1915 * No sense to grab the lock for this test, but test itself looks 1916 * somewhat bogus. Suggestions for better replacement? 1917 * Ho-hum... In principle, we might treat that as umount + switch 1918 * to rootfs. GC would eventually take care of the old vfsmount. 1919 * Actually it makes sense, especially if rootfs would contain a 1920 * /reboot - static binary that would close all descriptors and 1921 * call reboot(9). Then init(8) could umount root and exec /reboot. 1922 */ 1923 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1924 /* 1925 * Special case for "unmounting" root ... 1926 * we just try to remount it readonly. 1927 */ 1928 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 1929 return -EPERM; 1930 return do_umount_root(sb); 1931 } 1932 1933 namespace_lock(); 1934 lock_mount_hash(); 1935 1936 /* Repeat the earlier racy checks, now that we are holding the locks */ 1937 retval = -EINVAL; 1938 if (!check_mnt(mnt)) 1939 goto out; 1940 1941 if (mnt->mnt.mnt_flags & MNT_LOCKED) 1942 goto out; 1943 1944 if (!mnt_has_parent(mnt)) /* not the absolute root */ 1945 goto out; 1946 1947 event++; 1948 if (flags & MNT_DETACH) { 1949 umount_tree(mnt, UMOUNT_PROPAGATE); 1950 retval = 0; 1951 } else { 1952 smp_mb(); // paired with __legitimize_mnt() 1953 shrink_submounts(mnt); 1954 retval = -EBUSY; 1955 if (!propagate_mount_busy(mnt, 2)) { 1956 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 1957 retval = 0; 1958 } 1959 } 1960 out: 1961 unlock_mount_hash(); 1962 namespace_unlock(); 1963 return retval; 1964 } 1965 1966 /* 1967 * __detach_mounts - lazily unmount all mounts on the specified dentry 1968 * 1969 * During unlink, rmdir, and d_drop it is possible to loose the path 1970 * to an existing mountpoint, and wind up leaking the mount. 1971 * detach_mounts allows lazily unmounting those mounts instead of 1972 * leaking them. 1973 * 1974 * The caller may hold dentry->d_inode->i_rwsem. 1975 */ 1976 void __detach_mounts(struct dentry *dentry) 1977 { 1978 struct pinned_mountpoint mp = {}; 1979 struct mount *mnt; 1980 1981 guard(namespace_excl)(); 1982 guard(mount_writer)(); 1983 1984 if (!lookup_mountpoint(dentry, &mp)) 1985 return; 1986 1987 event++; 1988 while (mp.node.next) { 1989 mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list); 1990 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1991 umount_mnt(mnt); 1992 hlist_add_head(&mnt->mnt_umount, &unmounted); 1993 } 1994 else umount_tree(mnt, UMOUNT_CONNECTED); 1995 } 1996 unpin_mountpoint(&mp); 1997 } 1998 1999 /* 2000 * Is the caller allowed to modify his namespace? 2001 */ 2002 bool may_mount(void) 2003 { 2004 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); 2005 } 2006 2007 static void warn_mandlock(void) 2008 { 2009 pr_warn_once("=======================================================\n" 2010 "WARNING: The mand mount option has been deprecated and\n" 2011 " and is ignored by this kernel. Remove the mand\n" 2012 " option from the mount to silence this warning.\n" 2013 "=======================================================\n"); 2014 } 2015 2016 static int can_umount(const struct path *path, int flags) 2017 { 2018 struct mount *mnt = real_mount(path->mnt); 2019 struct super_block *sb = path->dentry->d_sb; 2020 2021 if (!may_mount()) 2022 return -EPERM; 2023 if (!path_mounted(path)) 2024 return -EINVAL; 2025 if (!check_mnt(mnt)) 2026 return -EINVAL; 2027 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ 2028 return -EINVAL; 2029 if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 2030 return -EPERM; 2031 return 0; 2032 } 2033 2034 // caller is responsible for flags being sane 2035 int path_umount(const struct path *path, int flags) 2036 { 2037 struct mount *mnt = real_mount(path->mnt); 2038 int ret; 2039 2040 ret = can_umount(path, flags); 2041 if (!ret) 2042 ret = do_umount(mnt, flags); 2043 2044 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 2045 dput(path->dentry); 2046 mntput_no_expire(mnt); 2047 return ret; 2048 } 2049 2050 static int ksys_umount(char __user *name, int flags) 2051 { 2052 int lookup_flags = LOOKUP_MOUNTPOINT; 2053 struct path path; 2054 int ret; 2055 2056 // basic validity checks done first 2057 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 2058 return -EINVAL; 2059 2060 if (!(flags & UMOUNT_NOFOLLOW)) 2061 lookup_flags |= LOOKUP_FOLLOW; 2062 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); 2063 if (ret) 2064 return ret; 2065 return path_umount(&path, flags); 2066 } 2067 2068 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 2069 { 2070 return ksys_umount(name, flags); 2071 } 2072 2073 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 2074 2075 /* 2076 * The 2.0 compatible umount. No flags. 2077 */ 2078 SYSCALL_DEFINE1(oldumount, char __user *, name) 2079 { 2080 return ksys_umount(name, 0); 2081 } 2082 2083 #endif 2084 2085 static bool is_mnt_ns_file(struct dentry *dentry) 2086 { 2087 struct ns_common *ns; 2088 2089 /* Is this a proxy for a mount namespace? */ 2090 if (dentry->d_op != &ns_dentry_operations) 2091 return false; 2092 2093 ns = d_inode(dentry)->i_private; 2094 2095 return ns->ops == &mntns_operations; 2096 } 2097 2098 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) 2099 { 2100 return &mnt->ns; 2101 } 2102 2103 struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous) 2104 { 2105 struct ns_common *ns; 2106 2107 guard(rcu)(); 2108 2109 for (;;) { 2110 ns = ns_tree_adjoined_rcu(mntns, previous); 2111 if (IS_ERR(ns)) 2112 return ERR_CAST(ns); 2113 2114 mntns = to_mnt_ns(ns); 2115 2116 /* 2117 * The last passive reference count is put with RCU 2118 * delay so accessing the mount namespace is not just 2119 * safe but all relevant members are still valid. 2120 */ 2121 if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN)) 2122 continue; 2123 2124 /* 2125 * We need an active reference count as we're persisting 2126 * the mount namespace and it might already be on its 2127 * deathbed. 2128 */ 2129 if (!ns_ref_get(mntns)) 2130 continue; 2131 2132 return mntns; 2133 } 2134 } 2135 2136 struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry) 2137 { 2138 if (!is_mnt_ns_file(dentry)) 2139 return NULL; 2140 2141 return to_mnt_ns(get_proc_ns(dentry->d_inode)); 2142 } 2143 2144 static bool mnt_ns_loop(struct dentry *dentry) 2145 { 2146 /* Could bind mounting the mount namespace inode cause a 2147 * mount namespace loop? 2148 */ 2149 struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry); 2150 2151 if (!mnt_ns) 2152 return false; 2153 2154 return current->nsproxy->mnt_ns->ns.ns_id >= mnt_ns->ns.ns_id; 2155 } 2156 2157 struct mount *copy_tree(struct mount *src_root, struct dentry *dentry, 2158 int flag) 2159 { 2160 struct mount *res, *src_parent, *src_root_child, *src_mnt, 2161 *dst_parent, *dst_mnt; 2162 2163 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root)) 2164 return ERR_PTR(-EINVAL); 2165 2166 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) 2167 return ERR_PTR(-EINVAL); 2168 2169 res = dst_mnt = clone_mnt(src_root, dentry, flag); 2170 if (IS_ERR(dst_mnt)) 2171 return dst_mnt; 2172 2173 src_parent = src_root; 2174 2175 list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) { 2176 if (!is_subdir(src_root_child->mnt_mountpoint, dentry)) 2177 continue; 2178 2179 for (src_mnt = src_root_child; src_mnt; 2180 src_mnt = next_mnt(src_mnt, src_root_child)) { 2181 if (!(flag & CL_COPY_UNBINDABLE) && 2182 IS_MNT_UNBINDABLE(src_mnt)) { 2183 if (src_mnt->mnt.mnt_flags & MNT_LOCKED) { 2184 /* Both unbindable and locked. */ 2185 dst_mnt = ERR_PTR(-EPERM); 2186 goto out; 2187 } else { 2188 src_mnt = skip_mnt_tree(src_mnt); 2189 continue; 2190 } 2191 } 2192 if (!(flag & CL_COPY_MNT_NS_FILE) && 2193 is_mnt_ns_file(src_mnt->mnt.mnt_root)) { 2194 src_mnt = skip_mnt_tree(src_mnt); 2195 continue; 2196 } 2197 while (src_parent != src_mnt->mnt_parent) { 2198 src_parent = src_parent->mnt_parent; 2199 dst_mnt = dst_mnt->mnt_parent; 2200 } 2201 2202 src_parent = src_mnt; 2203 dst_parent = dst_mnt; 2204 dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag); 2205 if (IS_ERR(dst_mnt)) 2206 goto out; 2207 lock_mount_hash(); 2208 if (src_mnt->mnt.mnt_flags & MNT_LOCKED) 2209 dst_mnt->mnt.mnt_flags |= MNT_LOCKED; 2210 if (unlikely(flag & CL_EXPIRE)) { 2211 /* stick the duplicate mount on the same expiry 2212 * list as the original if that was on one */ 2213 if (!list_empty(&src_mnt->mnt_expire)) 2214 list_add(&dst_mnt->mnt_expire, 2215 &src_mnt->mnt_expire); 2216 } 2217 attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp); 2218 unlock_mount_hash(); 2219 } 2220 } 2221 return res; 2222 2223 out: 2224 if (res) { 2225 lock_mount_hash(); 2226 umount_tree(res, UMOUNT_SYNC); 2227 unlock_mount_hash(); 2228 } 2229 return dst_mnt; 2230 } 2231 2232 static inline bool extend_array(struct path **res, struct path **to_free, 2233 unsigned n, unsigned *count, unsigned new_count) 2234 { 2235 struct path *p; 2236 2237 if (likely(n < *count)) 2238 return true; 2239 p = kmalloc_objs(struct path, new_count); 2240 if (p && *count) 2241 memcpy(p, *res, *count * sizeof(struct path)); 2242 *count = new_count; 2243 kfree(*to_free); 2244 *to_free = *res = p; 2245 return p; 2246 } 2247 2248 const struct path *collect_paths(const struct path *path, 2249 struct path *prealloc, unsigned count) 2250 { 2251 struct mount *root = real_mount(path->mnt); 2252 struct mount *child; 2253 struct path *res = prealloc, *to_free = NULL; 2254 unsigned n = 0; 2255 2256 guard(namespace_shared)(); 2257 2258 if (!check_mnt(root)) 2259 return ERR_PTR(-EINVAL); 2260 if (!extend_array(&res, &to_free, 0, &count, 32)) 2261 return ERR_PTR(-ENOMEM); 2262 res[n++] = *path; 2263 list_for_each_entry(child, &root->mnt_mounts, mnt_child) { 2264 if (!is_subdir(child->mnt_mountpoint, path->dentry)) 2265 continue; 2266 for (struct mount *m = child; m; m = next_mnt(m, child)) { 2267 if (!extend_array(&res, &to_free, n, &count, 2 * count)) 2268 return ERR_PTR(-ENOMEM); 2269 res[n].mnt = &m->mnt; 2270 res[n].dentry = m->mnt.mnt_root; 2271 n++; 2272 } 2273 } 2274 if (!extend_array(&res, &to_free, n, &count, count + 1)) 2275 return ERR_PTR(-ENOMEM); 2276 memset(res + n, 0, (count - n) * sizeof(struct path)); 2277 for (struct path *p = res; p->mnt; p++) 2278 path_get(p); 2279 return res; 2280 } 2281 2282 void drop_collected_paths(const struct path *paths, const struct path *prealloc) 2283 { 2284 for (const struct path *p = paths; p->mnt; p++) 2285 path_put(p); 2286 if (paths != prealloc) 2287 kfree(paths); 2288 } 2289 2290 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); 2291 2292 void dissolve_on_fput(struct vfsmount *mnt) 2293 { 2294 struct mount *m = real_mount(mnt); 2295 2296 /* 2297 * m used to be the root of anon namespace; if it still is one, 2298 * we need to dissolve the mount tree and free that namespace. 2299 * Let's try to avoid taking namespace_sem if we can determine 2300 * that there's nothing to do without it - rcu_read_lock() is 2301 * enough to make anon_ns_root() memory-safe and once m has 2302 * left its namespace, it's no longer our concern, since it will 2303 * never become a root of anon ns again. 2304 */ 2305 2306 scoped_guard(rcu) { 2307 if (!anon_ns_root(m)) 2308 return; 2309 } 2310 2311 scoped_guard(namespace_excl) { 2312 if (!anon_ns_root(m)) 2313 return; 2314 2315 emptied_ns = m->mnt_ns; 2316 lock_mount_hash(); 2317 umount_tree(m, UMOUNT_CONNECTED); 2318 unlock_mount_hash(); 2319 } 2320 } 2321 2322 /* locks: namespace_shared && pinned(mnt) || mount_locked_reader */ 2323 static bool __has_locked_children(struct mount *mnt, struct dentry *dentry) 2324 { 2325 struct mount *child; 2326 2327 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 2328 if (!is_subdir(child->mnt_mountpoint, dentry)) 2329 continue; 2330 2331 if (child->mnt.mnt_flags & MNT_LOCKED) 2332 return true; 2333 } 2334 return false; 2335 } 2336 2337 bool has_locked_children(struct mount *mnt, struct dentry *dentry) 2338 { 2339 guard(mount_locked_reader)(); 2340 return __has_locked_children(mnt, dentry); 2341 } 2342 2343 /* 2344 * Check that there aren't references to earlier/same mount namespaces in the 2345 * specified subtree. Such references can act as pins for mount namespaces 2346 * that aren't checked by the mount-cycle checking code, thereby allowing 2347 * cycles to be made. 2348 * 2349 * locks: mount_locked_reader || namespace_shared && pinned(subtree) 2350 */ 2351 static bool check_for_nsfs_mounts(struct mount *subtree) 2352 { 2353 for (struct mount *p = subtree; p; p = next_mnt(p, subtree)) 2354 if (mnt_ns_loop(p->mnt.mnt_root)) 2355 return false; 2356 return true; 2357 } 2358 2359 /** 2360 * clone_private_mount - create a private clone of a path 2361 * @path: path to clone 2362 * 2363 * This creates a new vfsmount, which will be the clone of @path. The new mount 2364 * will not be attached anywhere in the namespace and will be private (i.e. 2365 * changes to the originating mount won't be propagated into this). 2366 * 2367 * This assumes caller has called or done the equivalent of may_mount(). 2368 * 2369 * Release with mntput(). 2370 */ 2371 struct vfsmount *clone_private_mount(const struct path *path) 2372 { 2373 struct mount *old_mnt = real_mount(path->mnt); 2374 struct mount *new_mnt; 2375 2376 guard(namespace_shared)(); 2377 2378 if (IS_MNT_UNBINDABLE(old_mnt)) 2379 return ERR_PTR(-EINVAL); 2380 2381 /* 2382 * Make sure the source mount is acceptable. 2383 * Anything mounted in our mount namespace is allowed. 2384 * Otherwise, it must be the root of an anonymous mount 2385 * namespace, and we need to make sure no namespace 2386 * loops get created. 2387 */ 2388 if (!check_mnt(old_mnt)) { 2389 if (!anon_ns_root(old_mnt)) 2390 return ERR_PTR(-EINVAL); 2391 2392 if (!check_for_nsfs_mounts(old_mnt)) 2393 return ERR_PTR(-EINVAL); 2394 } 2395 2396 if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) 2397 return ERR_PTR(-EPERM); 2398 2399 if (__has_locked_children(old_mnt, path->dentry)) 2400 return ERR_PTR(-EINVAL); 2401 2402 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); 2403 if (IS_ERR(new_mnt)) 2404 return ERR_PTR(-EINVAL); 2405 2406 /* Longterm mount to be removed by kern_unmount*() */ 2407 new_mnt->mnt_ns = MNT_NS_INTERNAL; 2408 return &new_mnt->mnt; 2409 } 2410 EXPORT_SYMBOL_GPL(clone_private_mount); 2411 2412 static void lock_mnt_tree(struct mount *mnt) 2413 { 2414 struct mount *p; 2415 2416 for (p = mnt; p; p = next_mnt(p, mnt)) { 2417 int flags = p->mnt.mnt_flags; 2418 /* Don't allow unprivileged users to change mount flags */ 2419 flags |= MNT_LOCK_ATIME; 2420 2421 if (flags & MNT_READONLY) 2422 flags |= MNT_LOCK_READONLY; 2423 2424 if (flags & MNT_NODEV) 2425 flags |= MNT_LOCK_NODEV; 2426 2427 if (flags & MNT_NOSUID) 2428 flags |= MNT_LOCK_NOSUID; 2429 2430 if (flags & MNT_NOEXEC) 2431 flags |= MNT_LOCK_NOEXEC; 2432 /* Don't allow unprivileged users to reveal what is under a mount */ 2433 if (list_empty(&p->mnt_expire) && p != mnt) 2434 flags |= MNT_LOCKED; 2435 p->mnt.mnt_flags = flags; 2436 } 2437 } 2438 2439 static void cleanup_group_ids(struct mount *mnt, struct mount *end) 2440 { 2441 struct mount *p; 2442 2443 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 2444 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 2445 mnt_release_group_id(p); 2446 } 2447 } 2448 2449 static int invent_group_ids(struct mount *mnt, bool recurse) 2450 { 2451 struct mount *p; 2452 2453 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 2454 if (!p->mnt_group_id) { 2455 int err = mnt_alloc_group_id(p); 2456 if (err) { 2457 cleanup_group_ids(mnt, p); 2458 return err; 2459 } 2460 } 2461 } 2462 2463 return 0; 2464 } 2465 2466 int count_mounts(struct mnt_namespace *ns, struct mount *mnt) 2467 { 2468 unsigned int max = READ_ONCE(sysctl_mount_max); 2469 unsigned int mounts = 0; 2470 struct mount *p; 2471 2472 if (ns->nr_mounts >= max) 2473 return -ENOSPC; 2474 max -= ns->nr_mounts; 2475 if (ns->pending_mounts >= max) 2476 return -ENOSPC; 2477 max -= ns->pending_mounts; 2478 2479 for (p = mnt; p; p = next_mnt(p, mnt)) 2480 mounts++; 2481 2482 if (mounts > max) 2483 return -ENOSPC; 2484 2485 ns->pending_mounts += mounts; 2486 return 0; 2487 } 2488 2489 enum mnt_tree_flags_t { 2490 MNT_TREE_BENEATH = BIT(0), 2491 MNT_TREE_PROPAGATION = BIT(1), 2492 }; 2493 2494 /** 2495 * attach_recursive_mnt - attach a source mount tree 2496 * @source_mnt: mount tree to be attached 2497 * @dest: the context for mounting at the place where the tree should go 2498 * 2499 * NOTE: in the table below explains the semantics when a source mount 2500 * of a given type is attached to a destination mount of a given type. 2501 * --------------------------------------------------------------------------- 2502 * | BIND MOUNT OPERATION | 2503 * |************************************************************************** 2504 * | source-->| shared | private | slave | unbindable | 2505 * | dest | | | | | 2506 * | | | | | | | 2507 * | v | | | | | 2508 * |************************************************************************** 2509 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 2510 * | | | | | | 2511 * |non-shared| shared (+) | private | slave (*) | invalid | 2512 * *************************************************************************** 2513 * A bind operation clones the source mount and mounts the clone on the 2514 * destination mount. 2515 * 2516 * (++) the cloned mount is propagated to all the mounts in the propagation 2517 * tree of the destination mount and the cloned mount is added to 2518 * the peer group of the source mount. 2519 * (+) the cloned mount is created under the destination mount and is marked 2520 * as shared. The cloned mount is added to the peer group of the source 2521 * mount. 2522 * (+++) the mount is propagated to all the mounts in the propagation tree 2523 * of the destination mount and the cloned mount is made slave 2524 * of the same master as that of the source mount. The cloned mount 2525 * is marked as 'shared and slave'. 2526 * (*) the cloned mount is made a slave of the same master as that of the 2527 * source mount. 2528 * 2529 * --------------------------------------------------------------------------- 2530 * | MOVE MOUNT OPERATION | 2531 * |************************************************************************** 2532 * | source-->| shared | private | slave | unbindable | 2533 * | dest | | | | | 2534 * | | | | | | | 2535 * | v | | | | | 2536 * |************************************************************************** 2537 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 2538 * | | | | | | 2539 * |non-shared| shared (+*) | private | slave (*) | unbindable | 2540 * *************************************************************************** 2541 * 2542 * (+) the mount is moved to the destination. And is then propagated to 2543 * all the mounts in the propagation tree of the destination mount. 2544 * (+*) the mount is moved to the destination. 2545 * (+++) the mount is moved to the destination and is then propagated to 2546 * all the mounts belonging to the destination mount's propagation tree. 2547 * the mount is marked as 'shared and slave'. 2548 * (*) the mount continues to be a slave at the new location. 2549 * 2550 * if the source mount is a tree, the operations explained above is 2551 * applied to each mount in the tree. 2552 * Must be called without spinlocks held, since this function can sleep 2553 * in allocations. 2554 * 2555 * Context: The function expects namespace_lock() to be held. 2556 * Return: If @source_mnt was successfully attached 0 is returned. 2557 * Otherwise a negative error code is returned. 2558 */ 2559 static int attach_recursive_mnt(struct mount *source_mnt, 2560 const struct pinned_mountpoint *dest) 2561 { 2562 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2563 struct mount *dest_mnt = dest->parent; 2564 struct mountpoint *dest_mp = dest->mp; 2565 HLIST_HEAD(tree_list); 2566 struct mnt_namespace *ns = dest_mnt->mnt_ns; 2567 struct pinned_mountpoint root = {}; 2568 struct mountpoint *shorter = NULL; 2569 struct mount *child, *p; 2570 struct mount *top; 2571 struct hlist_node *n; 2572 int err = 0; 2573 bool moving = mnt_has_parent(source_mnt); 2574 2575 /* 2576 * Preallocate a mountpoint in case the new mounts need to be 2577 * mounted beneath mounts on the same mountpoint. 2578 */ 2579 for (top = source_mnt; unlikely(top->overmount); top = top->overmount) { 2580 if (!shorter && is_mnt_ns_file(top->mnt.mnt_root)) 2581 shorter = top->mnt_mp; 2582 } 2583 err = get_mountpoint(top->mnt.mnt_root, &root); 2584 if (err) 2585 return err; 2586 2587 /* Is there space to add these mounts to the mount namespace? */ 2588 if (!moving) { 2589 err = count_mounts(ns, source_mnt); 2590 if (err) 2591 goto out; 2592 } 2593 2594 if (IS_MNT_SHARED(dest_mnt)) { 2595 err = invent_group_ids(source_mnt, true); 2596 if (err) 2597 goto out; 2598 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 2599 } 2600 lock_mount_hash(); 2601 if (err) 2602 goto out_cleanup_ids; 2603 2604 if (IS_MNT_SHARED(dest_mnt)) { 2605 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2606 set_mnt_shared(p); 2607 } 2608 2609 if (moving) { 2610 umount_mnt(source_mnt); 2611 mnt_notify_add(source_mnt); 2612 /* if the mount is moved, it should no longer be expired 2613 * automatically */ 2614 list_del_init(&source_mnt->mnt_expire); 2615 } else { 2616 if (source_mnt->mnt_ns) { 2617 /* move from anon - the caller will destroy */ 2618 emptied_ns = source_mnt->mnt_ns; 2619 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2620 move_from_ns(p); 2621 } 2622 } 2623 2624 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 2625 /* 2626 * Now the original copy is in the same state as the secondaries - 2627 * its root attached to mountpoint, but not hashed and all mounts 2628 * in it are either in our namespace or in no namespace at all. 2629 * Add the original to the list of copies and deal with the 2630 * rest of work for all of them uniformly. 2631 */ 2632 hlist_add_head(&source_mnt->mnt_hash, &tree_list); 2633 2634 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { 2635 struct mount *q; 2636 hlist_del_init(&child->mnt_hash); 2637 /* Notice when we are propagating across user namespaces */ 2638 if (child->mnt_parent->mnt_ns->user_ns != user_ns) 2639 lock_mnt_tree(child); 2640 q = __lookup_mnt(&child->mnt_parent->mnt, 2641 child->mnt_mountpoint); 2642 commit_tree(child); 2643 if (q) { 2644 struct mount *r = topmost_overmount(child); 2645 struct mountpoint *mp = root.mp; 2646 2647 if (unlikely(shorter) && child != source_mnt) 2648 mp = shorter; 2649 mnt_change_mountpoint(r, mp, q); 2650 } 2651 } 2652 unpin_mountpoint(&root); 2653 unlock_mount_hash(); 2654 2655 return 0; 2656 2657 out_cleanup_ids: 2658 while (!hlist_empty(&tree_list)) { 2659 child = hlist_entry(tree_list.first, struct mount, mnt_hash); 2660 child->mnt_parent->mnt_ns->pending_mounts = 0; 2661 umount_tree(child, UMOUNT_SYNC); 2662 } 2663 unlock_mount_hash(); 2664 cleanup_group_ids(source_mnt, NULL); 2665 out: 2666 ns->pending_mounts = 0; 2667 2668 read_seqlock_excl(&mount_lock); 2669 unpin_mountpoint(&root); 2670 read_sequnlock_excl(&mount_lock); 2671 2672 return err; 2673 } 2674 2675 static inline struct mount *where_to_mount(const struct path *path, 2676 struct dentry **dentry, 2677 bool beneath) 2678 { 2679 struct mount *m; 2680 2681 if (unlikely(beneath)) { 2682 m = topmost_overmount(real_mount(path->mnt)); 2683 *dentry = m->mnt_mountpoint; 2684 return m->mnt_parent; 2685 } 2686 m = __lookup_mnt(path->mnt, path->dentry); 2687 if (unlikely(m)) { 2688 m = topmost_overmount(m); 2689 *dentry = m->mnt.mnt_root; 2690 return m; 2691 } 2692 *dentry = path->dentry; 2693 return real_mount(path->mnt); 2694 } 2695 2696 /** 2697 * do_lock_mount - acquire environment for mounting 2698 * @path: target path 2699 * @res: context to set up 2700 * @beneath: whether the intention is to mount beneath @path 2701 * 2702 * To mount something at given location, we need 2703 * namespace_sem locked exclusive 2704 * inode of dentry we are mounting on locked exclusive 2705 * struct mountpoint for that dentry 2706 * struct mount we are mounting on 2707 * 2708 * Results are stored in caller-supplied context (pinned_mountpoint); 2709 * on success we have res->parent and res->mp pointing to parent and 2710 * mountpoint respectively and res->node inserted into the ->m_list 2711 * of the mountpoint, making sure the mountpoint won't disappear. 2712 * On failure we have res->parent set to ERR_PTR(-E...), res->mp 2713 * left NULL, res->node - empty. 2714 * In case of success do_lock_mount returns with locks acquired (in 2715 * proper order - inode lock nests outside of namespace_sem). 2716 * 2717 * Request to mount on overmounted location is treated as "mount on 2718 * top of whatever's overmounting it"; request to mount beneath 2719 * a location - "mount immediately beneath the topmost mount at that 2720 * place". 2721 * 2722 * In all cases the location must not have been unmounted and the 2723 * chosen mountpoint must be allowed to be mounted on. For "beneath" 2724 * case we also require the location to be at the root of a mount 2725 * that has a parent (i.e. is not a root of some namespace). 2726 */ 2727 static void do_lock_mount(const struct path *path, 2728 struct pinned_mountpoint *res, 2729 bool beneath) 2730 { 2731 int err; 2732 2733 if (unlikely(beneath) && !path_mounted(path)) { 2734 res->parent = ERR_PTR(-EINVAL); 2735 return; 2736 } 2737 2738 do { 2739 struct dentry *dentry, *d; 2740 struct mount *m, *n; 2741 2742 scoped_guard(mount_locked_reader) { 2743 m = where_to_mount(path, &dentry, beneath); 2744 if (&m->mnt != path->mnt) { 2745 mntget(&m->mnt); 2746 dget(dentry); 2747 } 2748 } 2749 2750 inode_lock(dentry->d_inode); 2751 namespace_lock(); 2752 2753 // check if the chain of mounts (if any) has changed. 2754 scoped_guard(mount_locked_reader) 2755 n = where_to_mount(path, &d, beneath); 2756 2757 if (unlikely(n != m || dentry != d)) 2758 err = -EAGAIN; // something moved, retry 2759 else if (unlikely(cant_mount(dentry) || !is_mounted(path->mnt))) 2760 err = -ENOENT; // not to be mounted on 2761 else if (beneath && &m->mnt == path->mnt && !m->overmount) 2762 err = -EINVAL; 2763 else 2764 err = get_mountpoint(dentry, res); 2765 2766 if (unlikely(err)) { 2767 res->parent = ERR_PTR(err); 2768 namespace_unlock(); 2769 inode_unlock(dentry->d_inode); 2770 } else { 2771 res->parent = m; 2772 } 2773 /* 2774 * Drop the temporary references. This is subtle - on success 2775 * we are doing that under namespace_sem, which would normally 2776 * be forbidden. However, in that case we are guaranteed that 2777 * refcounts won't reach zero, since we know that path->mnt 2778 * is mounted and thus all mounts reachable from it are pinned 2779 * and stable, along with their mountpoints and roots. 2780 */ 2781 if (&m->mnt != path->mnt) { 2782 dput(dentry); 2783 mntput(&m->mnt); 2784 } 2785 } while (err == -EAGAIN); 2786 } 2787 2788 static void __unlock_mount(struct pinned_mountpoint *m) 2789 { 2790 inode_unlock(m->mp->m_dentry->d_inode); 2791 read_seqlock_excl(&mount_lock); 2792 unpin_mountpoint(m); 2793 read_sequnlock_excl(&mount_lock); 2794 namespace_unlock(); 2795 } 2796 2797 static inline void unlock_mount(struct pinned_mountpoint *m) 2798 { 2799 if (!IS_ERR(m->parent)) 2800 __unlock_mount(m); 2801 } 2802 2803 static void lock_mount_exact(const struct path *path, 2804 struct pinned_mountpoint *mp, bool copy_mount, 2805 unsigned int copy_flags); 2806 2807 #define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \ 2808 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ 2809 do_lock_mount((path), &mp, (beneath)) 2810 #define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false) 2811 #define LOCK_MOUNT_EXACT(mp, path) \ 2812 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ 2813 lock_mount_exact((path), &mp, false, 0) 2814 #define LOCK_MOUNT_EXACT_COPY(mp, path, copy_flags) \ 2815 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \ 2816 lock_mount_exact((path), &mp, true, (copy_flags)) 2817 2818 static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp) 2819 { 2820 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) 2821 return -EINVAL; 2822 2823 if (d_is_dir(mp->mp->m_dentry) != 2824 d_is_dir(mnt->mnt.mnt_root)) 2825 return -ENOTDIR; 2826 2827 return attach_recursive_mnt(mnt, mp); 2828 } 2829 2830 static int may_change_propagation(const struct mount *m) 2831 { 2832 struct mnt_namespace *ns = m->mnt_ns; 2833 2834 // it must be mounted in some namespace 2835 if (IS_ERR_OR_NULL(ns)) // is_mounted() 2836 return -EINVAL; 2837 // and the caller must be admin in userns of that namespace 2838 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) 2839 return -EPERM; 2840 return 0; 2841 } 2842 2843 /* 2844 * Sanity check the flags to change_mnt_propagation. 2845 */ 2846 2847 static int flags_to_propagation_type(int ms_flags) 2848 { 2849 int type = ms_flags & ~(MS_REC | MS_SILENT); 2850 2851 /* Fail if any non-propagation flags are set */ 2852 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2853 return 0; 2854 /* Only one propagation flag should be set */ 2855 if (!is_power_of_2(type)) 2856 return 0; 2857 return type; 2858 } 2859 2860 /* 2861 * recursively change the type of the mountpoint. 2862 */ 2863 static int do_change_type(const struct path *path, int ms_flags) 2864 { 2865 struct mount *m; 2866 struct mount *mnt = real_mount(path->mnt); 2867 int recurse = ms_flags & MS_REC; 2868 int type; 2869 int err; 2870 2871 if (!path_mounted(path)) 2872 return -EINVAL; 2873 2874 type = flags_to_propagation_type(ms_flags); 2875 if (!type) 2876 return -EINVAL; 2877 2878 guard(namespace_excl)(); 2879 2880 err = may_change_propagation(mnt); 2881 if (err) 2882 return err; 2883 2884 if (type == MS_SHARED) { 2885 err = invent_group_ids(mnt, recurse); 2886 if (err) 2887 return err; 2888 } 2889 2890 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 2891 change_mnt_propagation(m, type); 2892 2893 return 0; 2894 } 2895 2896 /* may_copy_tree() - check if a mount tree can be copied 2897 * @path: path to the mount tree to be copied 2898 * 2899 * This helper checks if the caller may copy the mount tree starting 2900 * from @path->mnt. The caller may copy the mount tree under the 2901 * following circumstances: 2902 * 2903 * (1) The caller is located in the mount namespace of the mount tree. 2904 * This also implies that the mount does not belong to an anonymous 2905 * mount namespace. 2906 * (2) The caller tries to copy an nfs mount referring to a mount 2907 * namespace, i.e., the caller is trying to copy a mount namespace 2908 * entry from nsfs. 2909 * (3) The caller tries to copy a pidfs mount referring to a pidfd. 2910 * (4) The caller is trying to copy a mount tree that belongs to an 2911 * anonymous mount namespace. 2912 * 2913 * For that to be safe, this helper enforces that the origin mount 2914 * namespace the anonymous mount namespace was created from is the 2915 * same as the caller's mount namespace by comparing the sequence 2916 * numbers. 2917 * 2918 * This is not strictly necessary. The current semantics of the new 2919 * mount api enforce that the caller must be located in the same 2920 * mount namespace as the mount tree it interacts with. Using the 2921 * origin sequence number preserves these semantics even for 2922 * anonymous mount namespaces. However, one could envision extending 2923 * the api to directly operate across mount namespace if needed. 2924 * 2925 * The ownership of a non-anonymous mount namespace such as the 2926 * caller's cannot change. 2927 * => We know that the caller's mount namespace is stable. 2928 * 2929 * If the origin sequence number of the anonymous mount namespace is 2930 * the same as the sequence number of the caller's mount namespace. 2931 * => The owning namespaces are the same. 2932 * 2933 * ==> The earlier capability check on the owning namespace of the 2934 * caller's mount namespace ensures that the caller has the 2935 * ability to copy the mount tree. 2936 * 2937 * Returns true if the mount tree can be copied, false otherwise. 2938 */ 2939 static inline bool may_copy_tree(const struct path *path) 2940 { 2941 struct mount *mnt = real_mount(path->mnt); 2942 const struct dentry_operations *d_op; 2943 2944 if (check_mnt(mnt)) 2945 return true; 2946 2947 d_op = path->dentry->d_op; 2948 if (d_op == &ns_dentry_operations) 2949 return true; 2950 2951 if (d_op == &pidfs_dentry_operations) 2952 return true; 2953 2954 if (!is_mounted(path->mnt)) 2955 return false; 2956 2957 return check_anonymous_mnt(mnt); 2958 } 2959 2960 static struct mount *__do_loopback(const struct path *old_path, 2961 bool recurse, unsigned int copy_flags) 2962 { 2963 struct mount *old = real_mount(old_path->mnt); 2964 2965 if (IS_MNT_UNBINDABLE(old)) 2966 return ERR_PTR(-EINVAL); 2967 2968 if (!may_copy_tree(old_path)) 2969 return ERR_PTR(-EINVAL); 2970 2971 if (!recurse && __has_locked_children(old, old_path->dentry)) 2972 return ERR_PTR(-EINVAL); 2973 2974 if (recurse) 2975 return copy_tree(old, old_path->dentry, copy_flags); 2976 2977 return clone_mnt(old, old_path->dentry, copy_flags); 2978 } 2979 2980 /* 2981 * do loopback mount. 2982 */ 2983 static int do_loopback(const struct path *path, const char *old_name, 2984 int recurse) 2985 { 2986 struct path old_path __free(path_put) = {}; 2987 struct mount *mnt = NULL; 2988 int err; 2989 2990 if (!old_name || !*old_name) 2991 return -EINVAL; 2992 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 2993 if (err) 2994 return err; 2995 2996 if (mnt_ns_loop(old_path.dentry)) 2997 return -EINVAL; 2998 2999 LOCK_MOUNT(mp, path); 3000 if (IS_ERR(mp.parent)) 3001 return PTR_ERR(mp.parent); 3002 3003 if (!check_mnt(mp.parent)) 3004 return -EINVAL; 3005 3006 mnt = __do_loopback(&old_path, recurse, CL_COPY_MNT_NS_FILE); 3007 if (IS_ERR(mnt)) 3008 return PTR_ERR(mnt); 3009 3010 err = graft_tree(mnt, &mp); 3011 if (err) { 3012 lock_mount_hash(); 3013 umount_tree(mnt, UMOUNT_SYNC); 3014 unlock_mount_hash(); 3015 } 3016 return err; 3017 } 3018 3019 static struct mnt_namespace *get_detached_copy(const struct path *path, unsigned int flags) 3020 { 3021 struct mnt_namespace *ns, *mnt_ns = current->nsproxy->mnt_ns, *src_mnt_ns; 3022 struct user_namespace *user_ns = mnt_ns->user_ns; 3023 struct mount *mnt, *p; 3024 3025 ns = alloc_mnt_ns(user_ns, true); 3026 if (IS_ERR(ns)) 3027 return ns; 3028 3029 guard(namespace_excl)(); 3030 3031 /* 3032 * Record the sequence number of the source mount namespace. 3033 * This needs to hold namespace_sem to ensure that the mount 3034 * doesn't get attached. 3035 */ 3036 if (is_mounted(path->mnt)) { 3037 src_mnt_ns = real_mount(path->mnt)->mnt_ns; 3038 if (is_anon_ns(src_mnt_ns)) 3039 ns->seq_origin = src_mnt_ns->seq_origin; 3040 else 3041 ns->seq_origin = src_mnt_ns->ns.ns_id; 3042 } 3043 3044 mnt = __do_loopback(path, (flags & AT_RECURSIVE), CL_COPY_MNT_NS_FILE); 3045 if (IS_ERR(mnt)) { 3046 emptied_ns = ns; 3047 return ERR_CAST(mnt); 3048 } 3049 3050 for (p = mnt; p; p = next_mnt(p, mnt)) { 3051 mnt_add_to_ns(ns, p); 3052 ns->nr_mounts++; 3053 } 3054 ns->root = mnt; 3055 return ns; 3056 } 3057 3058 static struct file *open_detached_copy(struct path *path, unsigned int flags) 3059 { 3060 struct mnt_namespace *ns = get_detached_copy(path, flags); 3061 struct file *file; 3062 3063 if (IS_ERR(ns)) 3064 return ERR_CAST(ns); 3065 3066 mntput(path->mnt); 3067 path->mnt = mntget(&ns->root->mnt); 3068 file = dentry_open(path, O_PATH, current_cred()); 3069 if (IS_ERR(file)) 3070 dissolve_on_fput(path->mnt); 3071 else 3072 file->f_mode |= FMODE_NEED_UNMOUNT; 3073 return file; 3074 } 3075 3076 static struct mnt_namespace *create_new_namespace(struct path *path, 3077 bool recurse) 3078 { 3079 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 3080 struct user_namespace *user_ns = current_user_ns(); 3081 struct mnt_namespace *new_ns; 3082 struct mount *new_ns_root, *old_ns_root; 3083 struct path to_path; 3084 struct mount *mnt; 3085 unsigned int copy_flags = 0; 3086 bool locked = false; 3087 3088 if (user_ns != ns->user_ns) 3089 copy_flags |= CL_SLAVE; 3090 3091 new_ns = alloc_mnt_ns(user_ns, false); 3092 if (IS_ERR(new_ns)) 3093 return ERR_CAST(new_ns); 3094 3095 old_ns_root = ns->root; 3096 to_path.mnt = &old_ns_root->mnt; 3097 to_path.dentry = old_ns_root->mnt.mnt_root; 3098 3099 VFS_WARN_ON_ONCE(old_ns_root->mnt.mnt_sb->s_type != &nullfs_fs_type); 3100 3101 LOCK_MOUNT_EXACT_COPY(mp, &to_path, copy_flags); 3102 if (IS_ERR(mp.parent)) { 3103 free_mnt_ns(new_ns); 3104 return ERR_CAST(mp.parent); 3105 } 3106 new_ns_root = mp.parent; 3107 3108 /* 3109 * If the real rootfs had a locked mount on top of it somewhere 3110 * in the stack, lock the new mount tree as well so it can't be 3111 * exposed. 3112 */ 3113 mnt = old_ns_root; 3114 while (mnt->overmount) { 3115 mnt = mnt->overmount; 3116 if (mnt->mnt.mnt_flags & MNT_LOCKED) 3117 locked = true; 3118 } 3119 3120 /* 3121 * We don't emulate unshare()ing a mount namespace. We stick to 3122 * the restrictions of creating detached bind-mounts. It has a 3123 * lot saner and simpler semantics. 3124 */ 3125 mnt = real_mount(path->mnt); 3126 if (!mnt->mnt_ns) { 3127 /* 3128 * If we're moving into a new mount namespace via 3129 * fsmount() swap the mount ids so the nullfs mount id 3130 * is the lowest in the mount namespace avoiding another 3131 * useless copy. This is fine we're not attached to any 3132 * mount namespace so the mount ids are pure decoration 3133 * at that point. 3134 */ 3135 swap(mnt->mnt_id_unique, new_ns_root->mnt_id_unique); 3136 swap(mnt->mnt_id, new_ns_root->mnt_id); 3137 mntget(&mnt->mnt); 3138 } else { 3139 mnt = __do_loopback(path, recurse, copy_flags); 3140 } 3141 scoped_guard(mount_writer) { 3142 if (IS_ERR(mnt)) { 3143 emptied_ns = new_ns; 3144 umount_tree(new_ns_root, 0); 3145 return ERR_CAST(mnt); 3146 } 3147 3148 if (locked) 3149 mnt->mnt.mnt_flags |= MNT_LOCKED; 3150 /* 3151 * now mount the detached tree on top of the copy 3152 * of the real rootfs we created. 3153 */ 3154 attach_mnt(mnt, new_ns_root, mp.mp); 3155 if (user_ns != ns->user_ns) 3156 lock_mnt_tree(new_ns_root); 3157 } 3158 3159 for (mnt = new_ns_root; mnt; mnt = next_mnt(mnt, new_ns_root)) { 3160 mnt_add_to_ns(new_ns, mnt); 3161 new_ns->nr_mounts++; 3162 } 3163 3164 new_ns->root = new_ns_root; 3165 ns_tree_add_raw(new_ns); 3166 return new_ns; 3167 } 3168 3169 static struct file *open_new_namespace(struct path *path, bool recurse) 3170 { 3171 struct mnt_namespace *new_ns; 3172 3173 new_ns = create_new_namespace(path, recurse); 3174 if (IS_ERR(new_ns)) 3175 return ERR_CAST(new_ns); 3176 return open_namespace_file(to_ns_common(new_ns)); 3177 } 3178 3179 static struct file *vfs_open_tree(int dfd, const char __user *filename, unsigned int flags) 3180 { 3181 int ret; 3182 struct path path __free(path_put) = {}; 3183 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 3184 3185 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC); 3186 3187 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE | 3188 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE | 3189 OPEN_TREE_CLOEXEC | OPEN_TREE_NAMESPACE)) 3190 return ERR_PTR(-EINVAL); 3191 3192 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE | OPEN_TREE_NAMESPACE)) == 3193 AT_RECURSIVE) 3194 return ERR_PTR(-EINVAL); 3195 3196 if (hweight32(flags & (OPEN_TREE_CLONE | OPEN_TREE_NAMESPACE)) > 1) 3197 return ERR_PTR(-EINVAL); 3198 3199 if (flags & AT_NO_AUTOMOUNT) 3200 lookup_flags &= ~LOOKUP_AUTOMOUNT; 3201 if (flags & AT_SYMLINK_NOFOLLOW) 3202 lookup_flags &= ~LOOKUP_FOLLOW; 3203 3204 /* 3205 * If we create a new mount namespace with the cloned mount tree we 3206 * just care about being privileged over our current user namespace. 3207 * The new mount namespace will be owned by it. 3208 */ 3209 if ((flags & OPEN_TREE_NAMESPACE) && 3210 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 3211 return ERR_PTR(-EPERM); 3212 3213 if ((flags & OPEN_TREE_CLONE) && !may_mount()) 3214 return ERR_PTR(-EPERM); 3215 3216 CLASS(filename_uflags, name)(filename, flags); 3217 ret = filename_lookup(dfd, name, lookup_flags, &path, NULL); 3218 if (unlikely(ret)) 3219 return ERR_PTR(ret); 3220 3221 if (flags & OPEN_TREE_NAMESPACE) 3222 return open_new_namespace(&path, (flags & AT_RECURSIVE)); 3223 3224 if (flags & OPEN_TREE_CLONE) 3225 return open_detached_copy(&path, flags); 3226 3227 return dentry_open(&path, O_PATH, current_cred()); 3228 } 3229 3230 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) 3231 { 3232 return FD_ADD(flags, vfs_open_tree(dfd, filename, flags)); 3233 } 3234 3235 /* 3236 * Don't allow locked mount flags to be cleared. 3237 * 3238 * No locks need to be held here while testing the various MNT_LOCK 3239 * flags because those flags can never be cleared once they are set. 3240 */ 3241 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) 3242 { 3243 unsigned int fl = mnt->mnt.mnt_flags; 3244 3245 if ((fl & MNT_LOCK_READONLY) && 3246 !(mnt_flags & MNT_READONLY)) 3247 return false; 3248 3249 if ((fl & MNT_LOCK_NODEV) && 3250 !(mnt_flags & MNT_NODEV)) 3251 return false; 3252 3253 if ((fl & MNT_LOCK_NOSUID) && 3254 !(mnt_flags & MNT_NOSUID)) 3255 return false; 3256 3257 if ((fl & MNT_LOCK_NOEXEC) && 3258 !(mnt_flags & MNT_NOEXEC)) 3259 return false; 3260 3261 if ((fl & MNT_LOCK_ATIME) && 3262 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) 3263 return false; 3264 3265 return true; 3266 } 3267 3268 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) 3269 { 3270 bool readonly_request = (mnt_flags & MNT_READONLY); 3271 3272 if (readonly_request == __mnt_is_readonly(&mnt->mnt)) 3273 return 0; 3274 3275 if (readonly_request) 3276 return mnt_make_readonly(mnt); 3277 3278 mnt->mnt.mnt_flags &= ~MNT_READONLY; 3279 return 0; 3280 } 3281 3282 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) 3283 { 3284 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; 3285 mnt->mnt.mnt_flags = mnt_flags; 3286 touch_mnt_namespace(mnt->mnt_ns); 3287 } 3288 3289 static void mnt_warn_timestamp_expiry(const struct path *mountpoint, 3290 struct vfsmount *mnt) 3291 { 3292 struct super_block *sb = mnt->mnt_sb; 3293 3294 if (!__mnt_is_readonly(mnt) && 3295 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && 3296 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { 3297 char *buf, *mntpath; 3298 3299 buf = (char *)__get_free_page(GFP_KERNEL); 3300 if (buf) 3301 mntpath = d_path(mountpoint, buf, PAGE_SIZE); 3302 else 3303 mntpath = ERR_PTR(-ENOMEM); 3304 if (IS_ERR(mntpath)) 3305 mntpath = "(unknown)"; 3306 3307 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n", 3308 sb->s_type->name, 3309 is_mounted(mnt) ? "remounted" : "mounted", 3310 mntpath, &sb->s_time_max, 3311 (unsigned long long)sb->s_time_max); 3312 3313 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; 3314 if (buf) 3315 free_page((unsigned long)buf); 3316 } 3317 } 3318 3319 /* 3320 * Handle reconfiguration of the mountpoint only without alteration of the 3321 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND 3322 * to mount(2). 3323 */ 3324 static int do_reconfigure_mnt(const struct path *path, unsigned int mnt_flags) 3325 { 3326 struct super_block *sb = path->mnt->mnt_sb; 3327 struct mount *mnt = real_mount(path->mnt); 3328 int ret; 3329 3330 if (!check_mnt(mnt)) 3331 return -EINVAL; 3332 3333 if (!path_mounted(path)) 3334 return -EINVAL; 3335 3336 if (!can_change_locked_flags(mnt, mnt_flags)) 3337 return -EPERM; 3338 3339 /* 3340 * We're only checking whether the superblock is read-only not 3341 * changing it, so only take down_read(&sb->s_umount). 3342 */ 3343 down_read(&sb->s_umount); 3344 lock_mount_hash(); 3345 ret = change_mount_ro_state(mnt, mnt_flags); 3346 if (ret == 0) 3347 set_mount_attributes(mnt, mnt_flags); 3348 unlock_mount_hash(); 3349 up_read(&sb->s_umount); 3350 3351 mnt_warn_timestamp_expiry(path, &mnt->mnt); 3352 3353 return ret; 3354 } 3355 3356 /* 3357 * change filesystem flags. dir should be a physical root of filesystem. 3358 * If you've mounted a non-root directory somewhere and want to do remount 3359 * on it - tough luck. 3360 */ 3361 static int do_remount(const struct path *path, int sb_flags, 3362 int mnt_flags, void *data) 3363 { 3364 int err; 3365 struct super_block *sb = path->mnt->mnt_sb; 3366 struct mount *mnt = real_mount(path->mnt); 3367 struct fs_context *fc; 3368 3369 if (!check_mnt(mnt)) 3370 return -EINVAL; 3371 3372 if (!path_mounted(path)) 3373 return -EINVAL; 3374 3375 if (!can_change_locked_flags(mnt, mnt_flags)) 3376 return -EPERM; 3377 3378 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK); 3379 if (IS_ERR(fc)) 3380 return PTR_ERR(fc); 3381 3382 /* 3383 * Indicate to the filesystem that the remount request is coming 3384 * from the legacy mount system call. 3385 */ 3386 fc->oldapi = true; 3387 3388 err = parse_monolithic_mount_data(fc, data); 3389 if (!err) { 3390 down_write(&sb->s_umount); 3391 err = -EPERM; 3392 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) { 3393 err = reconfigure_super(fc); 3394 if (!err) { 3395 lock_mount_hash(); 3396 set_mount_attributes(mnt, mnt_flags); 3397 unlock_mount_hash(); 3398 } 3399 } 3400 up_write(&sb->s_umount); 3401 } 3402 3403 mnt_warn_timestamp_expiry(path, &mnt->mnt); 3404 3405 put_fs_context(fc); 3406 return err; 3407 } 3408 3409 static inline int tree_contains_unbindable(struct mount *mnt) 3410 { 3411 struct mount *p; 3412 for (p = mnt; p; p = next_mnt(p, mnt)) { 3413 if (IS_MNT_UNBINDABLE(p)) 3414 return 1; 3415 } 3416 return 0; 3417 } 3418 3419 static int do_set_group(const struct path *from_path, const struct path *to_path) 3420 { 3421 struct mount *from = real_mount(from_path->mnt); 3422 struct mount *to = real_mount(to_path->mnt); 3423 int err; 3424 3425 guard(namespace_excl)(); 3426 3427 err = may_change_propagation(from); 3428 if (err) 3429 return err; 3430 err = may_change_propagation(to); 3431 if (err) 3432 return err; 3433 3434 /* To and From paths should be mount roots */ 3435 if (!path_mounted(from_path)) 3436 return -EINVAL; 3437 if (!path_mounted(to_path)) 3438 return -EINVAL; 3439 3440 /* Setting sharing groups is only allowed across same superblock */ 3441 if (from->mnt.mnt_sb != to->mnt.mnt_sb) 3442 return -EINVAL; 3443 3444 /* From mount root should be wider than To mount root */ 3445 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) 3446 return -EINVAL; 3447 3448 /* From mount should not have locked children in place of To's root */ 3449 if (__has_locked_children(from, to->mnt.mnt_root)) 3450 return -EINVAL; 3451 3452 /* Setting sharing groups is only allowed on private mounts */ 3453 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) 3454 return -EINVAL; 3455 3456 /* From should not be private */ 3457 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) 3458 return -EINVAL; 3459 3460 if (IS_MNT_SLAVE(from)) { 3461 hlist_add_behind(&to->mnt_slave, &from->mnt_slave); 3462 to->mnt_master = from->mnt_master; 3463 } 3464 3465 if (IS_MNT_SHARED(from)) { 3466 to->mnt_group_id = from->mnt_group_id; 3467 list_add(&to->mnt_share, &from->mnt_share); 3468 set_mnt_shared(to); 3469 } 3470 return 0; 3471 } 3472 3473 /** 3474 * path_overmounted - check if path is overmounted 3475 * @path: path to check 3476 * 3477 * Check if path is overmounted, i.e., if there's a mount on top of 3478 * @path->mnt with @path->dentry as mountpoint. 3479 * 3480 * Context: namespace_sem must be held at least shared. 3481 * MUST NOT be called under lock_mount_hash() (there one should just 3482 * call __lookup_mnt() and check if it returns NULL). 3483 * Return: If path is overmounted true is returned, false if not. 3484 */ 3485 static inline bool path_overmounted(const struct path *path) 3486 { 3487 unsigned seq = read_seqbegin(&mount_lock); 3488 bool no_child; 3489 3490 rcu_read_lock(); 3491 no_child = !__lookup_mnt(path->mnt, path->dentry); 3492 rcu_read_unlock(); 3493 if (need_seqretry(&mount_lock, seq)) { 3494 read_seqlock_excl(&mount_lock); 3495 no_child = !__lookup_mnt(path->mnt, path->dentry); 3496 read_sequnlock_excl(&mount_lock); 3497 } 3498 return unlikely(!no_child); 3499 } 3500 3501 /* 3502 * Check if there is a possibly empty chain of descent from p1 to p2. 3503 * Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl). 3504 */ 3505 static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2) 3506 { 3507 while (p2 != p1 && mnt_has_parent(p2)) 3508 p2 = p2->mnt_parent; 3509 return p2 == p1; 3510 } 3511 3512 /** 3513 * can_move_mount_beneath - check that we can mount beneath the top mount 3514 * @mnt_from: mount we are trying to move 3515 * @mnt_to: mount under which to mount 3516 * @mp: mountpoint of @mnt_to 3517 * 3518 * - Make sure that nothing can be mounted beneath the caller's current 3519 * root or the rootfs of the namespace. 3520 * - Make sure that the caller can unmount the topmost mount ensuring 3521 * that the caller could reveal the underlying mountpoint. 3522 * - Ensure that nothing has been mounted on top of @mnt_from before we 3523 * grabbed @namespace_sem to avoid creating pointless shadow mounts. 3524 * - Prevent mounting beneath a mount if the propagation relationship 3525 * between the source mount, parent mount, and top mount would lead to 3526 * nonsensical mount trees. 3527 * 3528 * Context: This function expects namespace_lock() to be held. 3529 * Return: On success 0, and on error a negative error code is returned. 3530 */ 3531 static int can_move_mount_beneath(const struct mount *mnt_from, 3532 const struct mount *mnt_to, 3533 const struct mountpoint *mp) 3534 { 3535 struct mount *parent_mnt_to = mnt_to->mnt_parent; 3536 3537 if (IS_MNT_LOCKED(mnt_to)) 3538 return -EINVAL; 3539 3540 /* Avoid creating shadow mounts during mount propagation. */ 3541 if (mnt_from->overmount) 3542 return -EINVAL; 3543 3544 /* 3545 * Mounting beneath the rootfs only makes sense when the 3546 * semantics of pivot_root(".", ".") are used. 3547 */ 3548 if (&mnt_to->mnt == current->fs->root.mnt) 3549 return -EINVAL; 3550 if (parent_mnt_to == current->nsproxy->mnt_ns->root) 3551 return -EINVAL; 3552 3553 if (mount_is_ancestor(mnt_to, mnt_from)) 3554 return -EINVAL; 3555 3556 /* 3557 * If the parent mount propagates to the child mount this would 3558 * mean mounting @mnt_from on @mnt_to->mnt_parent and then 3559 * propagating a copy @c of @mnt_from on top of @mnt_to. This 3560 * defeats the whole purpose of mounting beneath another mount. 3561 */ 3562 if (propagation_would_overmount(parent_mnt_to, mnt_to, mp)) 3563 return -EINVAL; 3564 3565 /* 3566 * If @mnt_to->mnt_parent propagates to @mnt_from this would 3567 * mean propagating a copy @c of @mnt_from on top of @mnt_from. 3568 * Afterwards @mnt_from would be mounted on top of 3569 * @mnt_to->mnt_parent and @mnt_to would be unmounted from 3570 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is 3571 * already mounted on @mnt_from, @mnt_to would ultimately be 3572 * remounted on top of @c. Afterwards, @mnt_from would be 3573 * covered by a copy @c of @mnt_from and @c would be covered by 3574 * @mnt_from itself. This defeats the whole purpose of mounting 3575 * @mnt_from beneath @mnt_to. 3576 */ 3577 if (check_mnt(mnt_from) && 3578 propagation_would_overmount(parent_mnt_to, mnt_from, mp)) 3579 return -EINVAL; 3580 3581 return 0; 3582 } 3583 3584 /* may_use_mount() - check if a mount tree can be used 3585 * @mnt: vfsmount to be used 3586 * 3587 * This helper checks if the caller may use the mount tree starting 3588 * from @path->mnt. The caller may use the mount tree under the 3589 * following circumstances: 3590 * 3591 * (1) The caller is located in the mount namespace of the mount tree. 3592 * This also implies that the mount does not belong to an anonymous 3593 * mount namespace. 3594 * (2) The caller is trying to use a mount tree that belongs to an 3595 * anonymous mount namespace. 3596 * 3597 * For that to be safe, this helper enforces that the origin mount 3598 * namespace the anonymous mount namespace was created from is the 3599 * same as the caller's mount namespace by comparing the sequence 3600 * numbers. 3601 * 3602 * The ownership of a non-anonymous mount namespace such as the 3603 * caller's cannot change. 3604 * => We know that the caller's mount namespace is stable. 3605 * 3606 * If the origin sequence number of the anonymous mount namespace is 3607 * the same as the sequence number of the caller's mount namespace. 3608 * => The owning namespaces are the same. 3609 * 3610 * ==> The earlier capability check on the owning namespace of the 3611 * caller's mount namespace ensures that the caller has the 3612 * ability to use the mount tree. 3613 * 3614 * Returns true if the mount tree can be used, false otherwise. 3615 */ 3616 static inline bool may_use_mount(struct mount *mnt) 3617 { 3618 if (check_mnt(mnt)) 3619 return true; 3620 3621 /* 3622 * Make sure that noone unmounted the target path or somehow 3623 * managed to get their hands on something purely kernel 3624 * internal. 3625 */ 3626 if (!is_mounted(&mnt->mnt)) 3627 return false; 3628 3629 return check_anonymous_mnt(mnt); 3630 } 3631 3632 static int do_move_mount(const struct path *old_path, 3633 const struct path *new_path, 3634 enum mnt_tree_flags_t flags) 3635 { 3636 struct mount *old = real_mount(old_path->mnt); 3637 int err; 3638 bool beneath = flags & MNT_TREE_BENEATH; 3639 3640 if (!path_mounted(old_path)) 3641 return -EINVAL; 3642 3643 if (d_is_dir(new_path->dentry) != d_is_dir(old_path->dentry)) 3644 return -EINVAL; 3645 3646 LOCK_MOUNT_MAYBE_BENEATH(mp, new_path, beneath); 3647 if (IS_ERR(mp.parent)) 3648 return PTR_ERR(mp.parent); 3649 3650 if (check_mnt(old)) { 3651 /* if the source is in our namespace... */ 3652 /* ... it should be detachable from parent */ 3653 if (!mnt_has_parent(old) || IS_MNT_LOCKED(old)) 3654 return -EINVAL; 3655 /* ... which should not be shared */ 3656 if (IS_MNT_SHARED(old->mnt_parent)) 3657 return -EINVAL; 3658 /* ... and the target should be in our namespace */ 3659 if (!check_mnt(mp.parent)) 3660 return -EINVAL; 3661 } else { 3662 /* 3663 * otherwise the source must be the root of some anon namespace. 3664 */ 3665 if (!anon_ns_root(old)) 3666 return -EINVAL; 3667 /* 3668 * Bail out early if the target is within the same namespace - 3669 * subsequent checks would've rejected that, but they lose 3670 * some corner cases if we check it early. 3671 */ 3672 if (old->mnt_ns == mp.parent->mnt_ns) 3673 return -EINVAL; 3674 /* 3675 * Target should be either in our namespace or in an acceptable 3676 * anon namespace, sensu check_anonymous_mnt(). 3677 */ 3678 if (!may_use_mount(mp.parent)) 3679 return -EINVAL; 3680 } 3681 3682 if (beneath) { 3683 struct mount *over = real_mount(new_path->mnt); 3684 3685 if (mp.parent != over->mnt_parent) 3686 over = mp.parent->overmount; 3687 err = can_move_mount_beneath(old, over, mp.mp); 3688 if (err) 3689 return err; 3690 } 3691 3692 /* 3693 * Don't move a mount tree containing unbindable mounts to a destination 3694 * mount which is shared. 3695 */ 3696 if (IS_MNT_SHARED(mp.parent) && tree_contains_unbindable(old)) 3697 return -EINVAL; 3698 if (!check_for_nsfs_mounts(old)) 3699 return -ELOOP; 3700 if (mount_is_ancestor(old, mp.parent)) 3701 return -ELOOP; 3702 3703 return attach_recursive_mnt(old, &mp); 3704 } 3705 3706 static int do_move_mount_old(const struct path *path, const char *old_name) 3707 { 3708 struct path old_path __free(path_put) = {}; 3709 int err; 3710 3711 if (!old_name || !*old_name) 3712 return -EINVAL; 3713 3714 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 3715 if (err) 3716 return err; 3717 3718 return do_move_mount(&old_path, path, 0); 3719 } 3720 3721 /* 3722 * add a mount into a namespace's mount tree 3723 */ 3724 static int do_add_mount(struct mount *newmnt, const struct pinned_mountpoint *mp, 3725 int mnt_flags) 3726 { 3727 struct mount *parent = mp->parent; 3728 3729 if (IS_ERR(parent)) 3730 return PTR_ERR(parent); 3731 3732 mnt_flags &= ~MNT_INTERNAL_FLAGS; 3733 3734 if (unlikely(!check_mnt(parent))) { 3735 /* that's acceptable only for automounts done in private ns */ 3736 if (!(mnt_flags & MNT_SHRINKABLE)) 3737 return -EINVAL; 3738 /* ... and for those we'd better have mountpoint still alive */ 3739 if (!parent->mnt_ns) 3740 return -EINVAL; 3741 } 3742 3743 /* Refuse the same filesystem on the same mount point */ 3744 if (parent->mnt.mnt_sb == newmnt->mnt.mnt_sb && 3745 parent->mnt.mnt_root == mp->mp->m_dentry) 3746 return -EBUSY; 3747 3748 if (d_is_symlink(newmnt->mnt.mnt_root)) 3749 return -EINVAL; 3750 3751 newmnt->mnt.mnt_flags = mnt_flags; 3752 return graft_tree(newmnt, mp); 3753 } 3754 3755 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags); 3756 3757 /* 3758 * Create a new mount using a superblock configuration and request it 3759 * be added to the namespace tree. 3760 */ 3761 static int do_new_mount_fc(struct fs_context *fc, const struct path *mountpoint, 3762 unsigned int mnt_flags) 3763 { 3764 struct super_block *sb; 3765 struct vfsmount *mnt __free(mntput) = fc_mount(fc); 3766 int error; 3767 3768 if (IS_ERR(mnt)) 3769 return PTR_ERR(mnt); 3770 3771 sb = fc->root->d_sb; 3772 error = security_sb_kern_mount(sb); 3773 if (unlikely(error)) 3774 return error; 3775 3776 if (unlikely(mount_too_revealing(sb, &mnt_flags))) { 3777 errorfcp(fc, "VFS", "Mount too revealing"); 3778 return -EPERM; 3779 } 3780 3781 mnt_warn_timestamp_expiry(mountpoint, mnt); 3782 3783 LOCK_MOUNT(mp, mountpoint); 3784 error = do_add_mount(real_mount(mnt), &mp, mnt_flags); 3785 if (!error) 3786 retain_and_null_ptr(mnt); // consumed on success 3787 return error; 3788 } 3789 3790 /* 3791 * create a new mount for userspace and request it to be added into the 3792 * namespace's tree 3793 */ 3794 static int do_new_mount(const struct path *path, const char *fstype, 3795 int sb_flags, int mnt_flags, 3796 const char *name, void *data) 3797 { 3798 struct file_system_type *type; 3799 struct fs_context *fc; 3800 const char *subtype = NULL; 3801 int err = 0; 3802 3803 if (!fstype) 3804 return -EINVAL; 3805 3806 type = get_fs_type(fstype); 3807 if (!type) 3808 return -ENODEV; 3809 3810 if (type->fs_flags & FS_HAS_SUBTYPE) { 3811 subtype = strchr(fstype, '.'); 3812 if (subtype) { 3813 subtype++; 3814 if (!*subtype) { 3815 put_filesystem(type); 3816 return -EINVAL; 3817 } 3818 } 3819 } 3820 3821 fc = fs_context_for_mount(type, sb_flags); 3822 put_filesystem(type); 3823 if (IS_ERR(fc)) 3824 return PTR_ERR(fc); 3825 3826 /* 3827 * Indicate to the filesystem that the mount request is coming 3828 * from the legacy mount system call. 3829 */ 3830 fc->oldapi = true; 3831 3832 if (subtype) 3833 err = vfs_parse_fs_string(fc, "subtype", subtype); 3834 if (!err && name) 3835 err = vfs_parse_fs_string(fc, "source", name); 3836 if (!err) 3837 err = parse_monolithic_mount_data(fc, data); 3838 if (!err && !mount_capable(fc)) 3839 err = -EPERM; 3840 if (!err) 3841 err = do_new_mount_fc(fc, path, mnt_flags); 3842 3843 put_fs_context(fc); 3844 return err; 3845 } 3846 3847 static void lock_mount_exact(const struct path *path, 3848 struct pinned_mountpoint *mp, bool copy_mount, 3849 unsigned int copy_flags) 3850 { 3851 struct dentry *dentry = path->dentry; 3852 int err; 3853 3854 /* Assert that inode_lock() locked the correct inode. */ 3855 VFS_WARN_ON_ONCE(copy_mount && !path_mounted(path)); 3856 3857 inode_lock(dentry->d_inode); 3858 namespace_lock(); 3859 if (unlikely(cant_mount(dentry))) 3860 err = -ENOENT; 3861 else if (!copy_mount && path_overmounted(path)) 3862 err = -EBUSY; 3863 else 3864 err = get_mountpoint(dentry, mp); 3865 if (unlikely(err)) { 3866 namespace_unlock(); 3867 inode_unlock(dentry->d_inode); 3868 mp->parent = ERR_PTR(err); 3869 return; 3870 } 3871 3872 if (copy_mount) 3873 mp->parent = clone_mnt(real_mount(path->mnt), dentry, copy_flags); 3874 else 3875 mp->parent = real_mount(path->mnt); 3876 if (unlikely(IS_ERR(mp->parent))) 3877 __unlock_mount(mp); 3878 } 3879 3880 int finish_automount(struct vfsmount *__m, const struct path *path) 3881 { 3882 struct vfsmount *m __free(mntput) = __m; 3883 struct mount *mnt; 3884 int err; 3885 3886 if (!m) 3887 return 0; 3888 if (IS_ERR(m)) 3889 return PTR_ERR(m); 3890 3891 mnt = real_mount(m); 3892 3893 if (m->mnt_root == path->dentry) 3894 return -ELOOP; 3895 3896 /* 3897 * we don't want to use LOCK_MOUNT() - in this case finding something 3898 * that overmounts our mountpoint to be means "quitely drop what we've 3899 * got", not "try to mount it on top". 3900 */ 3901 LOCK_MOUNT_EXACT(mp, path); 3902 if (mp.parent == ERR_PTR(-EBUSY)) 3903 return 0; 3904 3905 err = do_add_mount(mnt, &mp, path->mnt->mnt_flags | MNT_SHRINKABLE); 3906 if (likely(!err)) 3907 retain_and_null_ptr(m); 3908 return err; 3909 } 3910 3911 /** 3912 * mnt_set_expiry - Put a mount on an expiration list 3913 * @mnt: The mount to list. 3914 * @expiry_list: The list to add the mount to. 3915 */ 3916 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 3917 { 3918 guard(mount_locked_reader)(); 3919 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 3920 } 3921 EXPORT_SYMBOL(mnt_set_expiry); 3922 3923 /* 3924 * process a list of expirable mountpoints with the intent of discarding any 3925 * mountpoints that aren't in use and haven't been touched since last we came 3926 * here 3927 */ 3928 void mark_mounts_for_expiry(struct list_head *mounts) 3929 { 3930 struct mount *mnt, *next; 3931 LIST_HEAD(graveyard); 3932 3933 if (list_empty(mounts)) 3934 return; 3935 3936 guard(namespace_excl)(); 3937 guard(mount_writer)(); 3938 3939 /* extract from the expiration list every vfsmount that matches the 3940 * following criteria: 3941 * - already mounted 3942 * - only referenced by its parent vfsmount 3943 * - still marked for expiry (marked on the last call here; marks are 3944 * cleared by mntput()) 3945 */ 3946 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 3947 if (!is_mounted(&mnt->mnt)) 3948 continue; 3949 if (!xchg(&mnt->mnt_expiry_mark, 1) || 3950 propagate_mount_busy(mnt, 1)) 3951 continue; 3952 list_move(&mnt->mnt_expire, &graveyard); 3953 } 3954 while (!list_empty(&graveyard)) { 3955 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 3956 touch_mnt_namespace(mnt->mnt_ns); 3957 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3958 } 3959 } 3960 3961 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 3962 3963 /* 3964 * Ripoff of 'select_parent()' 3965 * 3966 * search the list of submounts for a given mountpoint, and move any 3967 * shrinkable submounts to the 'graveyard' list. 3968 */ 3969 static int select_submounts(struct mount *parent, struct list_head *graveyard) 3970 { 3971 struct mount *this_parent = parent; 3972 struct list_head *next; 3973 int found = 0; 3974 3975 repeat: 3976 next = this_parent->mnt_mounts.next; 3977 resume: 3978 while (next != &this_parent->mnt_mounts) { 3979 struct list_head *tmp = next; 3980 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); 3981 3982 next = tmp->next; 3983 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) 3984 continue; 3985 /* 3986 * Descend a level if the d_mounts list is non-empty. 3987 */ 3988 if (!list_empty(&mnt->mnt_mounts)) { 3989 this_parent = mnt; 3990 goto repeat; 3991 } 3992 3993 if (!propagate_mount_busy(mnt, 1)) { 3994 list_move_tail(&mnt->mnt_expire, graveyard); 3995 found++; 3996 } 3997 } 3998 /* 3999 * All done at this level ... ascend and resume the search 4000 */ 4001 if (this_parent != parent) { 4002 next = this_parent->mnt_child.next; 4003 this_parent = this_parent->mnt_parent; 4004 goto resume; 4005 } 4006 return found; 4007 } 4008 4009 /* 4010 * process a list of expirable mountpoints with the intent of discarding any 4011 * submounts of a specific parent mountpoint 4012 * 4013 * mount_lock must be held for write 4014 */ 4015 static void shrink_submounts(struct mount *mnt) 4016 { 4017 LIST_HEAD(graveyard); 4018 struct mount *m; 4019 4020 /* extract submounts of 'mountpoint' from the expiration list */ 4021 while (select_submounts(mnt, &graveyard)) { 4022 while (!list_empty(&graveyard)) { 4023 m = list_first_entry(&graveyard, struct mount, 4024 mnt_expire); 4025 touch_mnt_namespace(m->mnt_ns); 4026 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); 4027 } 4028 } 4029 } 4030 4031 static void *copy_mount_options(const void __user * data) 4032 { 4033 char *copy; 4034 unsigned left, offset; 4035 4036 if (!data) 4037 return NULL; 4038 4039 copy = kmalloc(PAGE_SIZE, GFP_KERNEL); 4040 if (!copy) 4041 return ERR_PTR(-ENOMEM); 4042 4043 left = copy_from_user(copy, data, PAGE_SIZE); 4044 4045 /* 4046 * Not all architectures have an exact copy_from_user(). Resort to 4047 * byte at a time. 4048 */ 4049 offset = PAGE_SIZE - left; 4050 while (left) { 4051 char c; 4052 if (get_user(c, (const char __user *)data + offset)) 4053 break; 4054 copy[offset] = c; 4055 left--; 4056 offset++; 4057 } 4058 4059 if (left == PAGE_SIZE) { 4060 kfree(copy); 4061 return ERR_PTR(-EFAULT); 4062 } 4063 4064 return copy; 4065 } 4066 4067 static char *copy_mount_string(const void __user *data) 4068 { 4069 return data ? strndup_user(data, PATH_MAX) : NULL; 4070 } 4071 4072 /* 4073 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 4074 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 4075 * 4076 * data is a (void *) that can point to any structure up to 4077 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 4078 * information (or be NULL). 4079 * 4080 * Pre-0.97 versions of mount() didn't have a flags word. 4081 * When the flags word was introduced its top half was required 4082 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 4083 * Therefore, if this magic number is present, it carries no information 4084 * and must be discarded. 4085 */ 4086 int path_mount(const char *dev_name, const struct path *path, 4087 const char *type_page, unsigned long flags, void *data_page) 4088 { 4089 unsigned int mnt_flags = 0, sb_flags; 4090 int ret; 4091 4092 /* Discard magic */ 4093 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 4094 flags &= ~MS_MGC_MSK; 4095 4096 /* Basic sanity checks */ 4097 if (data_page) 4098 ((char *)data_page)[PAGE_SIZE - 1] = 0; 4099 4100 if (flags & MS_NOUSER) 4101 return -EINVAL; 4102 4103 ret = security_sb_mount(dev_name, path, type_page, flags, data_page); 4104 if (ret) 4105 return ret; 4106 if (!may_mount()) 4107 return -EPERM; 4108 if (flags & SB_MANDLOCK) 4109 warn_mandlock(); 4110 4111 /* Default to relatime unless overriden */ 4112 if (!(flags & MS_NOATIME)) 4113 mnt_flags |= MNT_RELATIME; 4114 4115 /* Separate the per-mountpoint flags */ 4116 if (flags & MS_NOSUID) 4117 mnt_flags |= MNT_NOSUID; 4118 if (flags & MS_NODEV) 4119 mnt_flags |= MNT_NODEV; 4120 if (flags & MS_NOEXEC) 4121 mnt_flags |= MNT_NOEXEC; 4122 if (flags & MS_NOATIME) 4123 mnt_flags |= MNT_NOATIME; 4124 if (flags & MS_NODIRATIME) 4125 mnt_flags |= MNT_NODIRATIME; 4126 if (flags & MS_STRICTATIME) 4127 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 4128 if (flags & MS_RDONLY) 4129 mnt_flags |= MNT_READONLY; 4130 if (flags & MS_NOSYMFOLLOW) 4131 mnt_flags |= MNT_NOSYMFOLLOW; 4132 4133 /* The default atime for remount is preservation */ 4134 if ((flags & MS_REMOUNT) && 4135 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | 4136 MS_STRICTATIME)) == 0)) { 4137 mnt_flags &= ~MNT_ATIME_MASK; 4138 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK; 4139 } 4140 4141 sb_flags = flags & (SB_RDONLY | 4142 SB_SYNCHRONOUS | 4143 SB_MANDLOCK | 4144 SB_DIRSYNC | 4145 SB_SILENT | 4146 SB_POSIXACL | 4147 SB_LAZYTIME | 4148 SB_I_VERSION); 4149 4150 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) 4151 return do_reconfigure_mnt(path, mnt_flags); 4152 if (flags & MS_REMOUNT) 4153 return do_remount(path, sb_flags, mnt_flags, data_page); 4154 if (flags & MS_BIND) 4155 return do_loopback(path, dev_name, flags & MS_REC); 4156 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 4157 return do_change_type(path, flags); 4158 if (flags & MS_MOVE) 4159 return do_move_mount_old(path, dev_name); 4160 4161 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name, 4162 data_page); 4163 } 4164 4165 int do_mount(const char *dev_name, const char __user *dir_name, 4166 const char *type_page, unsigned long flags, void *data_page) 4167 { 4168 struct path path __free(path_put) = {}; 4169 int ret; 4170 4171 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path); 4172 if (ret) 4173 return ret; 4174 return path_mount(dev_name, &path, type_page, flags, data_page); 4175 } 4176 4177 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) 4178 { 4179 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES); 4180 } 4181 4182 static void dec_mnt_namespaces(struct ucounts *ucounts) 4183 { 4184 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES); 4185 } 4186 4187 static void free_mnt_ns(struct mnt_namespace *ns) 4188 { 4189 if (!is_anon_ns(ns)) 4190 ns_common_free(ns); 4191 dec_mnt_namespaces(ns->ucounts); 4192 mnt_ns_tree_remove(ns); 4193 } 4194 4195 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon) 4196 { 4197 struct mnt_namespace *new_ns; 4198 struct ucounts *ucounts; 4199 int ret; 4200 4201 ucounts = inc_mnt_namespaces(user_ns); 4202 if (!ucounts) 4203 return ERR_PTR(-ENOSPC); 4204 4205 new_ns = kzalloc_obj(struct mnt_namespace, GFP_KERNEL_ACCOUNT); 4206 if (!new_ns) { 4207 dec_mnt_namespaces(ucounts); 4208 return ERR_PTR(-ENOMEM); 4209 } 4210 4211 if (anon) 4212 ret = ns_common_init_inum(new_ns, MNT_NS_ANON_INO); 4213 else 4214 ret = ns_common_init(new_ns); 4215 if (ret) { 4216 kfree(new_ns); 4217 dec_mnt_namespaces(ucounts); 4218 return ERR_PTR(ret); 4219 } 4220 ns_tree_gen_id(new_ns); 4221 4222 new_ns->is_anon = anon; 4223 refcount_set(&new_ns->passive, 1); 4224 new_ns->mounts = RB_ROOT; 4225 init_waitqueue_head(&new_ns->poll); 4226 new_ns->user_ns = get_user_ns(user_ns); 4227 new_ns->ucounts = ucounts; 4228 return new_ns; 4229 } 4230 4231 __latent_entropy 4232 struct mnt_namespace *copy_mnt_ns(u64 flags, struct mnt_namespace *ns, 4233 struct user_namespace *user_ns, struct fs_struct *new_fs) 4234 { 4235 struct mnt_namespace *new_ns; 4236 struct path old_root __free(path_put) = {}; 4237 struct path old_pwd __free(path_put) = {}; 4238 struct mount *p, *q; 4239 struct mount *old; 4240 struct mount *new; 4241 int copy_flags; 4242 4243 BUG_ON(!ns); 4244 4245 if (likely(!(flags & CLONE_NEWNS))) { 4246 get_mnt_ns(ns); 4247 return ns; 4248 } 4249 4250 old = ns->root; 4251 4252 new_ns = alloc_mnt_ns(user_ns, false); 4253 if (IS_ERR(new_ns)) 4254 return new_ns; 4255 4256 guard(namespace_excl)(); 4257 4258 if (flags & CLONE_EMPTY_MNTNS) 4259 copy_flags = 0; 4260 else 4261 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; 4262 if (user_ns != ns->user_ns) 4263 copy_flags |= CL_SLAVE; 4264 4265 if (flags & CLONE_EMPTY_MNTNS) 4266 new = clone_mnt(old, old->mnt.mnt_root, copy_flags); 4267 else 4268 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 4269 if (IS_ERR(new)) { 4270 emptied_ns = new_ns; 4271 return ERR_CAST(new); 4272 } 4273 if (user_ns != ns->user_ns) { 4274 guard(mount_writer)(); 4275 lock_mnt_tree(new); 4276 } 4277 new_ns->root = new; 4278 4279 if (flags & CLONE_EMPTY_MNTNS) { 4280 /* 4281 * Empty mount namespace: only the root mount exists. 4282 * Reset root and pwd to the cloned mount's root dentry. 4283 */ 4284 if (new_fs) { 4285 old_root = new_fs->root; 4286 old_pwd = new_fs->pwd; 4287 4288 new_fs->root.mnt = mntget(&new->mnt); 4289 new_fs->root.dentry = dget(new->mnt.mnt_root); 4290 4291 new_fs->pwd.mnt = mntget(&new->mnt); 4292 new_fs->pwd.dentry = dget(new->mnt.mnt_root); 4293 } 4294 mnt_add_to_ns(new_ns, new); 4295 new_ns->nr_mounts++; 4296 } else { 4297 /* 4298 * Full copy: walk old and new trees in parallel, switching 4299 * the tsk->fs->* elements and marking new vfsmounts as 4300 * belonging to new namespace. We have already acquired a 4301 * private fs_struct, so tsk->fs->lock is not needed. 4302 */ 4303 p = old; 4304 q = new; 4305 while (p) { 4306 mnt_add_to_ns(new_ns, q); 4307 new_ns->nr_mounts++; 4308 if (new_fs) { 4309 if (&p->mnt == new_fs->root.mnt) { 4310 old_root.mnt = new_fs->root.mnt; 4311 new_fs->root.mnt = mntget(&q->mnt); 4312 } 4313 if (&p->mnt == new_fs->pwd.mnt) { 4314 old_pwd.mnt = new_fs->pwd.mnt; 4315 new_fs->pwd.mnt = mntget(&q->mnt); 4316 } 4317 } 4318 p = next_mnt(p, old); 4319 q = next_mnt(q, new); 4320 if (!q) 4321 break; 4322 // an mntns binding we'd skipped? 4323 while (p->mnt.mnt_root != q->mnt.mnt_root) 4324 p = next_mnt(skip_mnt_tree(p), old); 4325 } 4326 } 4327 ns_tree_add_raw(new_ns); 4328 return new_ns; 4329 } 4330 4331 struct dentry *mount_subtree(struct vfsmount *m, const char *name) 4332 { 4333 struct mount *mnt = real_mount(m); 4334 struct mnt_namespace *ns; 4335 struct super_block *s; 4336 struct path path; 4337 int err; 4338 4339 ns = alloc_mnt_ns(&init_user_ns, true); 4340 if (IS_ERR(ns)) { 4341 mntput(m); 4342 return ERR_CAST(ns); 4343 } 4344 ns->root = mnt; 4345 ns->nr_mounts++; 4346 mnt_add_to_ns(ns, mnt); 4347 4348 err = vfs_path_lookup(m->mnt_root, m, 4349 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 4350 4351 put_mnt_ns(ns); 4352 4353 if (err) 4354 return ERR_PTR(err); 4355 4356 /* trade a vfsmount reference for active sb one */ 4357 s = path.mnt->mnt_sb; 4358 atomic_inc(&s->s_active); 4359 mntput(path.mnt); 4360 /* lock the sucker */ 4361 down_write(&s->s_umount); 4362 /* ... and return the root of (sub)tree on it */ 4363 return path.dentry; 4364 } 4365 EXPORT_SYMBOL(mount_subtree); 4366 4367 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 4368 char __user *, type, unsigned long, flags, void __user *, data) 4369 { 4370 int ret; 4371 char *kernel_type; 4372 char *kernel_dev; 4373 void *options; 4374 4375 kernel_type = copy_mount_string(type); 4376 ret = PTR_ERR(kernel_type); 4377 if (IS_ERR(kernel_type)) 4378 goto out_type; 4379 4380 kernel_dev = copy_mount_string(dev_name); 4381 ret = PTR_ERR(kernel_dev); 4382 if (IS_ERR(kernel_dev)) 4383 goto out_dev; 4384 4385 options = copy_mount_options(data); 4386 ret = PTR_ERR(options); 4387 if (IS_ERR(options)) 4388 goto out_data; 4389 4390 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options); 4391 4392 kfree(options); 4393 out_data: 4394 kfree(kernel_dev); 4395 out_dev: 4396 kfree(kernel_type); 4397 out_type: 4398 return ret; 4399 } 4400 4401 #define FSMOUNT_VALID_FLAGS \ 4402 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \ 4403 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \ 4404 MOUNT_ATTR_NOSYMFOLLOW) 4405 4406 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP) 4407 4408 #define MOUNT_SETATTR_PROPAGATION_FLAGS \ 4409 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED) 4410 4411 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags) 4412 { 4413 unsigned int mnt_flags = 0; 4414 4415 if (attr_flags & MOUNT_ATTR_RDONLY) 4416 mnt_flags |= MNT_READONLY; 4417 if (attr_flags & MOUNT_ATTR_NOSUID) 4418 mnt_flags |= MNT_NOSUID; 4419 if (attr_flags & MOUNT_ATTR_NODEV) 4420 mnt_flags |= MNT_NODEV; 4421 if (attr_flags & MOUNT_ATTR_NOEXEC) 4422 mnt_flags |= MNT_NOEXEC; 4423 if (attr_flags & MOUNT_ATTR_NODIRATIME) 4424 mnt_flags |= MNT_NODIRATIME; 4425 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW) 4426 mnt_flags |= MNT_NOSYMFOLLOW; 4427 4428 return mnt_flags; 4429 } 4430 4431 /* 4432 * Create a kernel mount representation for a new, prepared superblock 4433 * (specified by fs_fd) and attach to an open_tree-like file descriptor. 4434 */ 4435 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags, 4436 unsigned int, attr_flags) 4437 { 4438 struct path new_path __free(path_put) = {}; 4439 struct mnt_namespace *ns; 4440 struct fs_context *fc; 4441 struct vfsmount *new_mnt; 4442 struct mount *mnt; 4443 unsigned int mnt_flags = 0; 4444 long ret; 4445 4446 if ((flags & ~(FSMOUNT_CLOEXEC | FSMOUNT_NAMESPACE)) != 0) 4447 return -EINVAL; 4448 4449 if ((flags & FSMOUNT_NAMESPACE) && 4450 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 4451 return -EPERM; 4452 4453 if (!(flags & FSMOUNT_NAMESPACE) && !may_mount()) 4454 return -EPERM; 4455 4456 if (attr_flags & ~FSMOUNT_VALID_FLAGS) 4457 return -EINVAL; 4458 4459 mnt_flags = attr_flags_to_mnt_flags(attr_flags); 4460 4461 switch (attr_flags & MOUNT_ATTR__ATIME) { 4462 case MOUNT_ATTR_STRICTATIME: 4463 break; 4464 case MOUNT_ATTR_NOATIME: 4465 mnt_flags |= MNT_NOATIME; 4466 break; 4467 case MOUNT_ATTR_RELATIME: 4468 mnt_flags |= MNT_RELATIME; 4469 break; 4470 default: 4471 return -EINVAL; 4472 } 4473 4474 CLASS(fd, f)(fs_fd); 4475 if (fd_empty(f)) 4476 return -EBADF; 4477 4478 if (fd_file(f)->f_op != &fscontext_fops) 4479 return -EINVAL; 4480 4481 fc = fd_file(f)->private_data; 4482 4483 ACQUIRE(mutex_intr, uapi_mutex)(&fc->uapi_mutex); 4484 ret = ACQUIRE_ERR(mutex_intr, &uapi_mutex); 4485 if (ret) 4486 return ret; 4487 4488 /* There must be a valid superblock or we can't mount it */ 4489 ret = -EINVAL; 4490 if (!fc->root) 4491 return ret; 4492 4493 ret = -EPERM; 4494 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) { 4495 errorfcp(fc, "VFS", "Mount too revealing"); 4496 return ret; 4497 } 4498 4499 ret = -EBUSY; 4500 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT) 4501 return ret; 4502 4503 if (fc->sb_flags & SB_MANDLOCK) 4504 warn_mandlock(); 4505 4506 new_mnt = vfs_create_mount(fc); 4507 if (IS_ERR(new_mnt)) 4508 return PTR_ERR(new_mnt); 4509 new_mnt->mnt_flags = mnt_flags; 4510 4511 new_path.dentry = dget(fc->root); 4512 new_path.mnt = new_mnt; 4513 4514 /* We've done the mount bit - now move the file context into more or 4515 * less the same state as if we'd done an fspick(). We don't want to 4516 * do any memory allocation or anything like that at this point as we 4517 * don't want to have to handle any errors incurred. 4518 */ 4519 vfs_clean_context(fc); 4520 4521 if (flags & FSMOUNT_NAMESPACE) 4522 return FD_ADD((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0, 4523 open_new_namespace(&new_path, 0)); 4524 4525 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true); 4526 if (IS_ERR(ns)) 4527 return PTR_ERR(ns); 4528 mnt = real_mount(new_path.mnt); 4529 ns->root = mnt; 4530 ns->nr_mounts = 1; 4531 mnt_add_to_ns(ns, mnt); 4532 mntget(new_path.mnt); 4533 4534 FD_PREPARE(fdf, (flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0, 4535 dentry_open(&new_path, O_PATH, fc->cred)); 4536 if (fdf.err) { 4537 dissolve_on_fput(new_path.mnt); 4538 return fdf.err; 4539 } 4540 4541 /* 4542 * Attach to an apparent O_PATH fd with a note that we 4543 * need to unmount it, not just simply put it. 4544 */ 4545 fd_prepare_file(fdf)->f_mode |= FMODE_NEED_UNMOUNT; 4546 return fd_publish(fdf); 4547 } 4548 4549 static inline int vfs_move_mount(const struct path *from_path, 4550 const struct path *to_path, 4551 enum mnt_tree_flags_t mflags) 4552 { 4553 int ret; 4554 4555 ret = security_move_mount(from_path, to_path); 4556 if (ret) 4557 return ret; 4558 4559 if (mflags & MNT_TREE_PROPAGATION) 4560 return do_set_group(from_path, to_path); 4561 4562 return do_move_mount(from_path, to_path, mflags); 4563 } 4564 4565 /* 4566 * Move a mount from one place to another. In combination with 4567 * fsopen()/fsmount() this is used to install a new mount and in combination 4568 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy 4569 * a mount subtree. 4570 * 4571 * Note the flags value is a combination of MOVE_MOUNT_* flags. 4572 */ 4573 SYSCALL_DEFINE5(move_mount, 4574 int, from_dfd, const char __user *, from_pathname, 4575 int, to_dfd, const char __user *, to_pathname, 4576 unsigned int, flags) 4577 { 4578 struct path to_path __free(path_put) = {}; 4579 struct path from_path __free(path_put) = {}; 4580 unsigned int lflags, uflags; 4581 enum mnt_tree_flags_t mflags = 0; 4582 int ret = 0; 4583 4584 if (!may_mount()) 4585 return -EPERM; 4586 4587 if (flags & ~MOVE_MOUNT__MASK) 4588 return -EINVAL; 4589 4590 if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) == 4591 (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) 4592 return -EINVAL; 4593 4594 if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION; 4595 if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH; 4596 4597 uflags = 0; 4598 if (flags & MOVE_MOUNT_T_EMPTY_PATH) 4599 uflags = AT_EMPTY_PATH; 4600 4601 CLASS(filename_maybe_null,to_name)(to_pathname, uflags); 4602 if (!to_name && to_dfd >= 0) { 4603 CLASS(fd_raw, f_to)(to_dfd); 4604 if (fd_empty(f_to)) 4605 return -EBADF; 4606 4607 to_path = fd_file(f_to)->f_path; 4608 path_get(&to_path); 4609 } else { 4610 lflags = 0; 4611 if (flags & MOVE_MOUNT_T_SYMLINKS) 4612 lflags |= LOOKUP_FOLLOW; 4613 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) 4614 lflags |= LOOKUP_AUTOMOUNT; 4615 ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL); 4616 if (ret) 4617 return ret; 4618 } 4619 4620 uflags = 0; 4621 if (flags & MOVE_MOUNT_F_EMPTY_PATH) 4622 uflags = AT_EMPTY_PATH; 4623 4624 CLASS(filename_maybe_null,from_name)(from_pathname, uflags); 4625 if (!from_name && from_dfd >= 0) { 4626 CLASS(fd_raw, f_from)(from_dfd); 4627 if (fd_empty(f_from)) 4628 return -EBADF; 4629 4630 return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags); 4631 } 4632 4633 lflags = 0; 4634 if (flags & MOVE_MOUNT_F_SYMLINKS) 4635 lflags |= LOOKUP_FOLLOW; 4636 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) 4637 lflags |= LOOKUP_AUTOMOUNT; 4638 ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL); 4639 if (ret) 4640 return ret; 4641 4642 return vfs_move_mount(&from_path, &to_path, mflags); 4643 } 4644 4645 /* 4646 * Return true if path is reachable from root 4647 * 4648 * locks: mount_locked_reader || namespace_shared && is_mounted(mnt) 4649 */ 4650 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, 4651 const struct path *root) 4652 { 4653 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { 4654 dentry = mnt->mnt_mountpoint; 4655 mnt = mnt->mnt_parent; 4656 } 4657 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); 4658 } 4659 4660 bool path_is_under(const struct path *path1, const struct path *path2) 4661 { 4662 guard(mount_locked_reader)(); 4663 return is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 4664 } 4665 EXPORT_SYMBOL(path_is_under); 4666 4667 int path_pivot_root(struct path *new, struct path *old) 4668 { 4669 struct path root __free(path_put) = {}; 4670 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent; 4671 int error; 4672 4673 if (!may_mount()) 4674 return -EPERM; 4675 4676 error = security_sb_pivotroot(old, new); 4677 if (error) 4678 return error; 4679 4680 get_fs_root(current->fs, &root); 4681 4682 LOCK_MOUNT(old_mp, old); 4683 old_mnt = old_mp.parent; 4684 if (IS_ERR(old_mnt)) 4685 return PTR_ERR(old_mnt); 4686 4687 new_mnt = real_mount(new->mnt); 4688 root_mnt = real_mount(root.mnt); 4689 ex_parent = new_mnt->mnt_parent; 4690 root_parent = root_mnt->mnt_parent; 4691 if (IS_MNT_SHARED(old_mnt) || 4692 IS_MNT_SHARED(ex_parent) || 4693 IS_MNT_SHARED(root_parent)) 4694 return -EINVAL; 4695 if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) 4696 return -EINVAL; 4697 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) 4698 return -EINVAL; 4699 if (d_unlinked(new->dentry)) 4700 return -ENOENT; 4701 if (new_mnt == root_mnt || old_mnt == root_mnt) 4702 return -EBUSY; /* loop, on the same file system */ 4703 if (!path_mounted(&root)) 4704 return -EINVAL; /* not a mountpoint */ 4705 if (!mnt_has_parent(root_mnt)) 4706 return -EINVAL; /* absolute root */ 4707 if (!path_mounted(new)) 4708 return -EINVAL; /* not a mountpoint */ 4709 if (!mnt_has_parent(new_mnt)) 4710 return -EINVAL; /* absolute root */ 4711 /* make sure we can reach put_old from new_root */ 4712 if (!is_path_reachable(old_mnt, old_mp.mp->m_dentry, new)) 4713 return -EINVAL; 4714 /* make certain new is below the root */ 4715 if (!is_path_reachable(new_mnt, new->dentry, &root)) 4716 return -EINVAL; 4717 lock_mount_hash(); 4718 umount_mnt(new_mnt); 4719 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 4720 new_mnt->mnt.mnt_flags |= MNT_LOCKED; 4721 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; 4722 } 4723 /* mount new_root on / */ 4724 attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp); 4725 umount_mnt(root_mnt); 4726 /* mount old root on put_old */ 4727 attach_mnt(root_mnt, old_mnt, old_mp.mp); 4728 touch_mnt_namespace(current->nsproxy->mnt_ns); 4729 /* A moved mount should not expire automatically */ 4730 list_del_init(&new_mnt->mnt_expire); 4731 unlock_mount_hash(); 4732 mnt_notify_add(root_mnt); 4733 mnt_notify_add(new_mnt); 4734 chroot_fs_refs(&root, new); 4735 return 0; 4736 } 4737 4738 /* 4739 * pivot_root Semantics: 4740 * Moves the root file system of the current process to the directory put_old, 4741 * makes new_root as the new root file system of the current process, and sets 4742 * root/cwd of all processes which had them on the current root to new_root. 4743 * 4744 * Restrictions: 4745 * The new_root and put_old must be directories, and must not be on the 4746 * same file system as the current process root. The put_old must be 4747 * underneath new_root, i.e. adding a non-zero number of /.. to the string 4748 * pointed to by put_old must yield the same directory as new_root. No other 4749 * file system may be mounted on put_old. After all, new_root is a mountpoint. 4750 * 4751 * The immutable nullfs filesystem is mounted as the true root of the VFS 4752 * hierarchy. The mutable rootfs (tmpfs/ramfs) is layered on top of this, 4753 * allowing pivot_root() to work normally from initramfs. 4754 * 4755 * Notes: 4756 * - we don't move root/cwd if they are not at the root (reason: if something 4757 * cared enough to change them, it's probably wrong to force them elsewhere) 4758 * - it's okay to pick a root that isn't the root of a file system, e.g. 4759 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 4760 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 4761 * first. 4762 */ 4763 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 4764 const char __user *, put_old) 4765 { 4766 struct path new __free(path_put) = {}; 4767 struct path old __free(path_put) = {}; 4768 int error; 4769 4770 error = user_path_at(AT_FDCWD, new_root, 4771 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new); 4772 if (error) 4773 return error; 4774 4775 error = user_path_at(AT_FDCWD, put_old, 4776 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old); 4777 if (error) 4778 return error; 4779 4780 return path_pivot_root(&new, &old); 4781 } 4782 4783 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt) 4784 { 4785 unsigned int flags = mnt->mnt.mnt_flags; 4786 4787 /* flags to clear */ 4788 flags &= ~kattr->attr_clr; 4789 /* flags to raise */ 4790 flags |= kattr->attr_set; 4791 4792 return flags; 4793 } 4794 4795 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4796 { 4797 struct vfsmount *m = &mnt->mnt; 4798 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns; 4799 4800 if (!kattr->mnt_idmap) 4801 return 0; 4802 4803 /* 4804 * Creating an idmapped mount with the filesystem wide idmapping 4805 * doesn't make sense so block that. We don't allow mushy semantics. 4806 */ 4807 if (kattr->mnt_userns == m->mnt_sb->s_user_ns) 4808 return -EINVAL; 4809 4810 /* 4811 * We only allow an mount to change it's idmapping if it has 4812 * never been accessible to userspace. 4813 */ 4814 if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE) && is_idmapped_mnt(m)) 4815 return -EPERM; 4816 4817 /* The underlying filesystem doesn't support idmapped mounts yet. */ 4818 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP)) 4819 return -EINVAL; 4820 4821 /* The filesystem has turned off idmapped mounts. */ 4822 if (m->mnt_sb->s_iflags & SB_I_NOIDMAP) 4823 return -EINVAL; 4824 4825 /* We're not controlling the superblock. */ 4826 if (!ns_capable(fs_userns, CAP_SYS_ADMIN)) 4827 return -EPERM; 4828 4829 /* Mount has already been visible in the filesystem hierarchy. */ 4830 if (!is_anon_ns(mnt->mnt_ns)) 4831 return -EINVAL; 4832 4833 return 0; 4834 } 4835 4836 /** 4837 * mnt_allow_writers() - check whether the attribute change allows writers 4838 * @kattr: the new mount attributes 4839 * @mnt: the mount to which @kattr will be applied 4840 * 4841 * Check whether thew new mount attributes in @kattr allow concurrent writers. 4842 * 4843 * Return: true if writers need to be held, false if not 4844 */ 4845 static inline bool mnt_allow_writers(const struct mount_kattr *kattr, 4846 const struct mount *mnt) 4847 { 4848 return (!(kattr->attr_set & MNT_READONLY) || 4849 (mnt->mnt.mnt_flags & MNT_READONLY)) && 4850 !kattr->mnt_idmap; 4851 } 4852 4853 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt) 4854 { 4855 struct mount *m; 4856 int err; 4857 4858 for (m = mnt; m; m = next_mnt(m, mnt)) { 4859 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) { 4860 err = -EPERM; 4861 break; 4862 } 4863 4864 err = can_idmap_mount(kattr, m); 4865 if (err) 4866 break; 4867 4868 if (!mnt_allow_writers(kattr, m)) { 4869 err = mnt_hold_writers(m); 4870 if (err) { 4871 m = next_mnt(m, mnt); 4872 break; 4873 } 4874 } 4875 4876 if (!(kattr->kflags & MOUNT_KATTR_RECURSE)) 4877 return 0; 4878 } 4879 4880 if (err) { 4881 /* undo all mnt_hold_writers() we'd done */ 4882 for (struct mount *p = mnt; p != m; p = next_mnt(p, mnt)) 4883 mnt_unhold_writers(p); 4884 } 4885 return err; 4886 } 4887 4888 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4889 { 4890 struct mnt_idmap *old_idmap; 4891 4892 if (!kattr->mnt_idmap) 4893 return; 4894 4895 old_idmap = mnt_idmap(&mnt->mnt); 4896 4897 /* Pairs with smp_load_acquire() in mnt_idmap(). */ 4898 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap)); 4899 mnt_idmap_put(old_idmap); 4900 } 4901 4902 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt) 4903 { 4904 struct mount *m; 4905 4906 for (m = mnt; m; m = next_mnt(m, mnt)) { 4907 unsigned int flags; 4908 4909 do_idmap_mount(kattr, m); 4910 flags = recalc_flags(kattr, m); 4911 WRITE_ONCE(m->mnt.mnt_flags, flags); 4912 4913 /* If we had to hold writers unblock them. */ 4914 mnt_unhold_writers(m); 4915 4916 if (kattr->propagation) 4917 change_mnt_propagation(m, kattr->propagation); 4918 if (!(kattr->kflags & MOUNT_KATTR_RECURSE)) 4919 break; 4920 } 4921 touch_mnt_namespace(mnt->mnt_ns); 4922 } 4923 4924 static int do_mount_setattr(const struct path *path, struct mount_kattr *kattr) 4925 { 4926 struct mount *mnt = real_mount(path->mnt); 4927 int err = 0; 4928 4929 if (!path_mounted(path)) 4930 return -EINVAL; 4931 4932 if (kattr->mnt_userns) { 4933 struct mnt_idmap *mnt_idmap; 4934 4935 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns); 4936 if (IS_ERR(mnt_idmap)) 4937 return PTR_ERR(mnt_idmap); 4938 kattr->mnt_idmap = mnt_idmap; 4939 } 4940 4941 if (kattr->propagation) { 4942 /* 4943 * Only take namespace_lock() if we're actually changing 4944 * propagation. 4945 */ 4946 namespace_lock(); 4947 if (kattr->propagation == MS_SHARED) { 4948 err = invent_group_ids(mnt, kattr->kflags & MOUNT_KATTR_RECURSE); 4949 if (err) { 4950 namespace_unlock(); 4951 return err; 4952 } 4953 } 4954 } 4955 4956 err = -EINVAL; 4957 lock_mount_hash(); 4958 4959 if (!anon_ns_root(mnt) && !check_mnt(mnt)) 4960 goto out; 4961 4962 /* 4963 * First, we get the mount tree in a shape where we can change mount 4964 * properties without failure. If we succeeded to do so we commit all 4965 * changes and if we failed we clean up. 4966 */ 4967 err = mount_setattr_prepare(kattr, mnt); 4968 if (!err) 4969 mount_setattr_commit(kattr, mnt); 4970 4971 out: 4972 unlock_mount_hash(); 4973 4974 if (kattr->propagation) { 4975 if (err) 4976 cleanup_group_ids(mnt, NULL); 4977 namespace_unlock(); 4978 } 4979 4980 return err; 4981 } 4982 4983 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize, 4984 struct mount_kattr *kattr) 4985 { 4986 struct ns_common *ns; 4987 struct user_namespace *mnt_userns; 4988 4989 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP)) 4990 return 0; 4991 4992 if (attr->attr_clr & MOUNT_ATTR_IDMAP) { 4993 /* 4994 * We can only remove an idmapping if it's never been 4995 * exposed to userspace. 4996 */ 4997 if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE)) 4998 return -EINVAL; 4999 5000 /* 5001 * Removal of idmappings is equivalent to setting 5002 * nop_mnt_idmap. 5003 */ 5004 if (!(attr->attr_set & MOUNT_ATTR_IDMAP)) { 5005 kattr->mnt_idmap = &nop_mnt_idmap; 5006 return 0; 5007 } 5008 } 5009 5010 if (attr->userns_fd > INT_MAX) 5011 return -EINVAL; 5012 5013 CLASS(fd, f)(attr->userns_fd); 5014 if (fd_empty(f)) 5015 return -EBADF; 5016 5017 if (!proc_ns_file(fd_file(f))) 5018 return -EINVAL; 5019 5020 ns = get_proc_ns(file_inode(fd_file(f))); 5021 if (ns->ns_type != CLONE_NEWUSER) 5022 return -EINVAL; 5023 5024 /* 5025 * The initial idmapping cannot be used to create an idmapped 5026 * mount. We use the initial idmapping as an indicator of a mount 5027 * that is not idmapped. It can simply be passed into helpers that 5028 * are aware of idmapped mounts as a convenient shortcut. A user 5029 * can just create a dedicated identity mapping to achieve the same 5030 * result. 5031 */ 5032 mnt_userns = container_of(ns, struct user_namespace, ns); 5033 if (mnt_userns == &init_user_ns) 5034 return -EPERM; 5035 5036 /* We're not controlling the target namespace. */ 5037 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) 5038 return -EPERM; 5039 5040 kattr->mnt_userns = get_user_ns(mnt_userns); 5041 return 0; 5042 } 5043 5044 static int build_mount_kattr(const struct mount_attr *attr, size_t usize, 5045 struct mount_kattr *kattr) 5046 { 5047 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS) 5048 return -EINVAL; 5049 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1) 5050 return -EINVAL; 5051 kattr->propagation = attr->propagation; 5052 5053 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS) 5054 return -EINVAL; 5055 5056 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set); 5057 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr); 5058 5059 /* 5060 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap, 5061 * users wanting to transition to a different atime setting cannot 5062 * simply specify the atime setting in @attr_set, but must also 5063 * specify MOUNT_ATTR__ATIME in the @attr_clr field. 5064 * So ensure that MOUNT_ATTR__ATIME can't be partially set in 5065 * @attr_clr and that @attr_set can't have any atime bits set if 5066 * MOUNT_ATTR__ATIME isn't set in @attr_clr. 5067 */ 5068 if (attr->attr_clr & MOUNT_ATTR__ATIME) { 5069 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME) 5070 return -EINVAL; 5071 5072 /* 5073 * Clear all previous time settings as they are mutually 5074 * exclusive. 5075 */ 5076 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME; 5077 switch (attr->attr_set & MOUNT_ATTR__ATIME) { 5078 case MOUNT_ATTR_RELATIME: 5079 kattr->attr_set |= MNT_RELATIME; 5080 break; 5081 case MOUNT_ATTR_NOATIME: 5082 kattr->attr_set |= MNT_NOATIME; 5083 break; 5084 case MOUNT_ATTR_STRICTATIME: 5085 break; 5086 default: 5087 return -EINVAL; 5088 } 5089 } else { 5090 if (attr->attr_set & MOUNT_ATTR__ATIME) 5091 return -EINVAL; 5092 } 5093 5094 return build_mount_idmapped(attr, usize, kattr); 5095 } 5096 5097 static void finish_mount_kattr(struct mount_kattr *kattr) 5098 { 5099 if (kattr->mnt_userns) { 5100 put_user_ns(kattr->mnt_userns); 5101 kattr->mnt_userns = NULL; 5102 } 5103 5104 if (kattr->mnt_idmap) 5105 mnt_idmap_put(kattr->mnt_idmap); 5106 } 5107 5108 static int wants_mount_setattr(struct mount_attr __user *uattr, size_t usize, 5109 struct mount_kattr *kattr) 5110 { 5111 int ret; 5112 struct mount_attr attr; 5113 5114 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0); 5115 5116 if (unlikely(usize > PAGE_SIZE)) 5117 return -E2BIG; 5118 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0)) 5119 return -EINVAL; 5120 5121 if (!may_mount()) 5122 return -EPERM; 5123 5124 ret = copy_struct_from_user(&attr, sizeof(attr), uattr, usize); 5125 if (ret) 5126 return ret; 5127 5128 /* Don't bother walking through the mounts if this is a nop. */ 5129 if (attr.attr_set == 0 && 5130 attr.attr_clr == 0 && 5131 attr.propagation == 0) 5132 return 0; /* Tell caller to not bother. */ 5133 5134 ret = build_mount_kattr(&attr, usize, kattr); 5135 if (ret < 0) 5136 return ret; 5137 5138 return 1; 5139 } 5140 5141 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path, 5142 unsigned int, flags, struct mount_attr __user *, uattr, 5143 size_t, usize) 5144 { 5145 int err; 5146 struct path target; 5147 struct mount_kattr kattr; 5148 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 5149 5150 if (flags & ~(AT_EMPTY_PATH | 5151 AT_RECURSIVE | 5152 AT_SYMLINK_NOFOLLOW | 5153 AT_NO_AUTOMOUNT)) 5154 return -EINVAL; 5155 5156 if (flags & AT_NO_AUTOMOUNT) 5157 lookup_flags &= ~LOOKUP_AUTOMOUNT; 5158 if (flags & AT_SYMLINK_NOFOLLOW) 5159 lookup_flags &= ~LOOKUP_FOLLOW; 5160 5161 kattr = (struct mount_kattr) { 5162 .lookup_flags = lookup_flags, 5163 }; 5164 5165 if (flags & AT_RECURSIVE) 5166 kattr.kflags |= MOUNT_KATTR_RECURSE; 5167 5168 err = wants_mount_setattr(uattr, usize, &kattr); 5169 if (err <= 0) 5170 return err; 5171 5172 CLASS(filename_uflags, name)(path, flags); 5173 err = filename_lookup(dfd, name, kattr.lookup_flags, &target, NULL); 5174 if (!err) { 5175 err = do_mount_setattr(&target, &kattr); 5176 path_put(&target); 5177 } 5178 finish_mount_kattr(&kattr); 5179 return err; 5180 } 5181 5182 SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename, 5183 unsigned, flags, struct mount_attr __user *, uattr, 5184 size_t, usize) 5185 { 5186 if (!uattr && usize) 5187 return -EINVAL; 5188 5189 FD_PREPARE(fdf, flags, vfs_open_tree(dfd, filename, flags)); 5190 if (fdf.err) 5191 return fdf.err; 5192 5193 if (uattr) { 5194 struct mount_kattr kattr = {}; 5195 struct file *file = fd_prepare_file(fdf); 5196 int ret; 5197 5198 if (flags & OPEN_TREE_CLONE) 5199 kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE; 5200 if (flags & AT_RECURSIVE) 5201 kattr.kflags |= MOUNT_KATTR_RECURSE; 5202 5203 ret = wants_mount_setattr(uattr, usize, &kattr); 5204 if (ret > 0) { 5205 ret = do_mount_setattr(&file->f_path, &kattr); 5206 finish_mount_kattr(&kattr); 5207 } 5208 if (ret) 5209 return ret; 5210 } 5211 5212 return fd_publish(fdf); 5213 } 5214 5215 int show_path(struct seq_file *m, struct dentry *root) 5216 { 5217 if (root->d_sb->s_op->show_path) 5218 return root->d_sb->s_op->show_path(m, root); 5219 5220 seq_dentry(m, root, " \t\n\\"); 5221 return 0; 5222 } 5223 5224 static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns) 5225 { 5226 struct mount *mnt = mnt_find_id_at(ns, id); 5227 5228 if (!mnt || mnt->mnt_id_unique != id) 5229 return NULL; 5230 5231 return &mnt->mnt; 5232 } 5233 5234 struct kstatmount { 5235 struct statmount __user *buf; 5236 size_t bufsize; 5237 struct vfsmount *mnt; 5238 struct mnt_idmap *idmap; 5239 u64 mask; 5240 struct path root; 5241 struct seq_file seq; 5242 5243 /* Must be last --ends in a flexible-array member. */ 5244 struct statmount sm; 5245 }; 5246 5247 static u64 mnt_to_attr_flags(struct vfsmount *mnt) 5248 { 5249 unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags); 5250 u64 attr_flags = 0; 5251 5252 if (mnt_flags & MNT_READONLY) 5253 attr_flags |= MOUNT_ATTR_RDONLY; 5254 if (mnt_flags & MNT_NOSUID) 5255 attr_flags |= MOUNT_ATTR_NOSUID; 5256 if (mnt_flags & MNT_NODEV) 5257 attr_flags |= MOUNT_ATTR_NODEV; 5258 if (mnt_flags & MNT_NOEXEC) 5259 attr_flags |= MOUNT_ATTR_NOEXEC; 5260 if (mnt_flags & MNT_NODIRATIME) 5261 attr_flags |= MOUNT_ATTR_NODIRATIME; 5262 if (mnt_flags & MNT_NOSYMFOLLOW) 5263 attr_flags |= MOUNT_ATTR_NOSYMFOLLOW; 5264 5265 if (mnt_flags & MNT_NOATIME) 5266 attr_flags |= MOUNT_ATTR_NOATIME; 5267 else if (mnt_flags & MNT_RELATIME) 5268 attr_flags |= MOUNT_ATTR_RELATIME; 5269 else 5270 attr_flags |= MOUNT_ATTR_STRICTATIME; 5271 5272 if (is_idmapped_mnt(mnt)) 5273 attr_flags |= MOUNT_ATTR_IDMAP; 5274 5275 return attr_flags; 5276 } 5277 5278 static u64 mnt_to_propagation_flags(struct mount *m) 5279 { 5280 u64 propagation = 0; 5281 5282 if (IS_MNT_SHARED(m)) 5283 propagation |= MS_SHARED; 5284 if (IS_MNT_SLAVE(m)) 5285 propagation |= MS_SLAVE; 5286 if (IS_MNT_UNBINDABLE(m)) 5287 propagation |= MS_UNBINDABLE; 5288 if (!propagation) 5289 propagation |= MS_PRIVATE; 5290 5291 return propagation; 5292 } 5293 5294 u64 vfsmount_to_propagation_flags(struct vfsmount *mnt) 5295 { 5296 return mnt_to_propagation_flags(real_mount(mnt)); 5297 } 5298 EXPORT_SYMBOL_GPL(vfsmount_to_propagation_flags); 5299 5300 static void statmount_sb_basic(struct kstatmount *s) 5301 { 5302 struct super_block *sb = s->mnt->mnt_sb; 5303 5304 s->sm.mask |= STATMOUNT_SB_BASIC; 5305 s->sm.sb_dev_major = MAJOR(sb->s_dev); 5306 s->sm.sb_dev_minor = MINOR(sb->s_dev); 5307 s->sm.sb_magic = sb->s_magic; 5308 s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME); 5309 } 5310 5311 static void statmount_mnt_basic(struct kstatmount *s) 5312 { 5313 struct mount *m = real_mount(s->mnt); 5314 5315 s->sm.mask |= STATMOUNT_MNT_BASIC; 5316 s->sm.mnt_id = m->mnt_id_unique; 5317 s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique; 5318 s->sm.mnt_id_old = m->mnt_id; 5319 s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id; 5320 s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt); 5321 s->sm.mnt_propagation = mnt_to_propagation_flags(m); 5322 s->sm.mnt_peer_group = m->mnt_group_id; 5323 s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0; 5324 } 5325 5326 static void statmount_propagate_from(struct kstatmount *s) 5327 { 5328 struct mount *m = real_mount(s->mnt); 5329 5330 s->sm.mask |= STATMOUNT_PROPAGATE_FROM; 5331 if (IS_MNT_SLAVE(m)) 5332 s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root); 5333 } 5334 5335 static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq) 5336 { 5337 int ret; 5338 size_t start = seq->count; 5339 5340 ret = show_path(seq, s->mnt->mnt_root); 5341 if (ret) 5342 return ret; 5343 5344 if (unlikely(seq_has_overflowed(seq))) 5345 return -EAGAIN; 5346 5347 /* 5348 * Unescape the result. It would be better if supplied string was not 5349 * escaped in the first place, but that's a pretty invasive change. 5350 */ 5351 seq->buf[seq->count] = '\0'; 5352 seq->count = start; 5353 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL)); 5354 return 0; 5355 } 5356 5357 static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq) 5358 { 5359 struct vfsmount *mnt = s->mnt; 5360 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 5361 int err; 5362 5363 err = seq_path_root(seq, &mnt_path, &s->root, ""); 5364 return err == SEQ_SKIP ? 0 : err; 5365 } 5366 5367 static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq) 5368 { 5369 struct super_block *sb = s->mnt->mnt_sb; 5370 5371 seq_puts(seq, sb->s_type->name); 5372 return 0; 5373 } 5374 5375 static void statmount_fs_subtype(struct kstatmount *s, struct seq_file *seq) 5376 { 5377 struct super_block *sb = s->mnt->mnt_sb; 5378 5379 if (sb->s_subtype) 5380 seq_puts(seq, sb->s_subtype); 5381 } 5382 5383 static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq) 5384 { 5385 struct super_block *sb = s->mnt->mnt_sb; 5386 struct mount *r = real_mount(s->mnt); 5387 5388 if (sb->s_op->show_devname) { 5389 size_t start = seq->count; 5390 int ret; 5391 5392 ret = sb->s_op->show_devname(seq, s->mnt->mnt_root); 5393 if (ret) 5394 return ret; 5395 5396 if (unlikely(seq_has_overflowed(seq))) 5397 return -EAGAIN; 5398 5399 /* Unescape the result */ 5400 seq->buf[seq->count] = '\0'; 5401 seq->count = start; 5402 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL)); 5403 } else { 5404 seq_puts(seq, r->mnt_devname); 5405 } 5406 return 0; 5407 } 5408 5409 static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns) 5410 { 5411 s->sm.mask |= STATMOUNT_MNT_NS_ID; 5412 s->sm.mnt_ns_id = ns->ns.ns_id; 5413 } 5414 5415 static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq) 5416 { 5417 struct vfsmount *mnt = s->mnt; 5418 struct super_block *sb = mnt->mnt_sb; 5419 size_t start = seq->count; 5420 int err; 5421 5422 err = security_sb_show_options(seq, sb); 5423 if (err) 5424 return err; 5425 5426 if (sb->s_op->show_options) { 5427 err = sb->s_op->show_options(seq, mnt->mnt_root); 5428 if (err) 5429 return err; 5430 } 5431 5432 if (unlikely(seq_has_overflowed(seq))) 5433 return -EAGAIN; 5434 5435 if (seq->count == start) 5436 return 0; 5437 5438 /* skip leading comma */ 5439 memmove(seq->buf + start, seq->buf + start + 1, 5440 seq->count - start - 1); 5441 seq->count--; 5442 5443 return 0; 5444 } 5445 5446 static inline int statmount_opt_process(struct seq_file *seq, size_t start) 5447 { 5448 char *buf_end, *opt_end, *src, *dst; 5449 int count = 0; 5450 5451 if (unlikely(seq_has_overflowed(seq))) 5452 return -EAGAIN; 5453 5454 buf_end = seq->buf + seq->count; 5455 dst = seq->buf + start; 5456 src = dst + 1; /* skip initial comma */ 5457 5458 if (src >= buf_end) { 5459 seq->count = start; 5460 return 0; 5461 } 5462 5463 *buf_end = '\0'; 5464 for (; src < buf_end; src = opt_end + 1) { 5465 opt_end = strchrnul(src, ','); 5466 *opt_end = '\0'; 5467 dst += string_unescape(src, dst, 0, UNESCAPE_OCTAL) + 1; 5468 if (WARN_ON_ONCE(++count == INT_MAX)) 5469 return -EOVERFLOW; 5470 } 5471 seq->count = dst - 1 - seq->buf; 5472 return count; 5473 } 5474 5475 static int statmount_opt_array(struct kstatmount *s, struct seq_file *seq) 5476 { 5477 struct vfsmount *mnt = s->mnt; 5478 struct super_block *sb = mnt->mnt_sb; 5479 size_t start = seq->count; 5480 int err; 5481 5482 if (!sb->s_op->show_options) 5483 return 0; 5484 5485 err = sb->s_op->show_options(seq, mnt->mnt_root); 5486 if (err) 5487 return err; 5488 5489 err = statmount_opt_process(seq, start); 5490 if (err < 0) 5491 return err; 5492 5493 s->sm.opt_num = err; 5494 return 0; 5495 } 5496 5497 static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq) 5498 { 5499 struct vfsmount *mnt = s->mnt; 5500 struct super_block *sb = mnt->mnt_sb; 5501 size_t start = seq->count; 5502 int err; 5503 5504 err = security_sb_show_options(seq, sb); 5505 if (err) 5506 return err; 5507 5508 err = statmount_opt_process(seq, start); 5509 if (err < 0) 5510 return err; 5511 5512 s->sm.opt_sec_num = err; 5513 return 0; 5514 } 5515 5516 static inline int statmount_mnt_uidmap(struct kstatmount *s, struct seq_file *seq) 5517 { 5518 int ret; 5519 5520 ret = statmount_mnt_idmap(s->idmap, seq, true); 5521 if (ret < 0) 5522 return ret; 5523 5524 s->sm.mnt_uidmap_num = ret; 5525 /* 5526 * Always raise STATMOUNT_MNT_UIDMAP even if there are no valid 5527 * mappings. This allows userspace to distinguish between a 5528 * non-idmapped mount and an idmapped mount where none of the 5529 * individual mappings are valid in the caller's idmapping. 5530 */ 5531 if (is_valid_mnt_idmap(s->idmap)) 5532 s->sm.mask |= STATMOUNT_MNT_UIDMAP; 5533 return 0; 5534 } 5535 5536 static inline int statmount_mnt_gidmap(struct kstatmount *s, struct seq_file *seq) 5537 { 5538 int ret; 5539 5540 ret = statmount_mnt_idmap(s->idmap, seq, false); 5541 if (ret < 0) 5542 return ret; 5543 5544 s->sm.mnt_gidmap_num = ret; 5545 /* 5546 * Always raise STATMOUNT_MNT_GIDMAP even if there are no valid 5547 * mappings. This allows userspace to distinguish between a 5548 * non-idmapped mount and an idmapped mount where none of the 5549 * individual mappings are valid in the caller's idmapping. 5550 */ 5551 if (is_valid_mnt_idmap(s->idmap)) 5552 s->sm.mask |= STATMOUNT_MNT_GIDMAP; 5553 return 0; 5554 } 5555 5556 static int statmount_string(struct kstatmount *s, u64 flag) 5557 { 5558 int ret = 0; 5559 size_t kbufsize; 5560 struct seq_file *seq = &s->seq; 5561 struct statmount *sm = &s->sm; 5562 u32 start, *offp; 5563 5564 /* Reserve an empty string at the beginning for any unset offsets */ 5565 if (!seq->count) 5566 seq_putc(seq, 0); 5567 5568 start = seq->count; 5569 5570 switch (flag) { 5571 case STATMOUNT_FS_TYPE: 5572 offp = &sm->fs_type; 5573 ret = statmount_fs_type(s, seq); 5574 break; 5575 case STATMOUNT_MNT_ROOT: 5576 offp = &sm->mnt_root; 5577 ret = statmount_mnt_root(s, seq); 5578 break; 5579 case STATMOUNT_MNT_POINT: 5580 offp = &sm->mnt_point; 5581 ret = statmount_mnt_point(s, seq); 5582 break; 5583 case STATMOUNT_MNT_OPTS: 5584 offp = &sm->mnt_opts; 5585 ret = statmount_mnt_opts(s, seq); 5586 break; 5587 case STATMOUNT_OPT_ARRAY: 5588 offp = &sm->opt_array; 5589 ret = statmount_opt_array(s, seq); 5590 break; 5591 case STATMOUNT_OPT_SEC_ARRAY: 5592 offp = &sm->opt_sec_array; 5593 ret = statmount_opt_sec_array(s, seq); 5594 break; 5595 case STATMOUNT_FS_SUBTYPE: 5596 offp = &sm->fs_subtype; 5597 statmount_fs_subtype(s, seq); 5598 break; 5599 case STATMOUNT_SB_SOURCE: 5600 offp = &sm->sb_source; 5601 ret = statmount_sb_source(s, seq); 5602 break; 5603 case STATMOUNT_MNT_UIDMAP: 5604 offp = &sm->mnt_uidmap; 5605 ret = statmount_mnt_uidmap(s, seq); 5606 break; 5607 case STATMOUNT_MNT_GIDMAP: 5608 offp = &sm->mnt_gidmap; 5609 ret = statmount_mnt_gidmap(s, seq); 5610 break; 5611 default: 5612 WARN_ON_ONCE(true); 5613 return -EINVAL; 5614 } 5615 5616 /* 5617 * If nothing was emitted, return to avoid setting the flag 5618 * and terminating the buffer. 5619 */ 5620 if (seq->count == start) 5621 return ret; 5622 if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize))) 5623 return -EOVERFLOW; 5624 if (kbufsize >= s->bufsize) 5625 return -EOVERFLOW; 5626 5627 /* signal a retry */ 5628 if (unlikely(seq_has_overflowed(seq))) 5629 return -EAGAIN; 5630 5631 if (ret) 5632 return ret; 5633 5634 seq->buf[seq->count++] = '\0'; 5635 sm->mask |= flag; 5636 *offp = start; 5637 return 0; 5638 } 5639 5640 static int copy_statmount_to_user(struct kstatmount *s) 5641 { 5642 struct statmount *sm = &s->sm; 5643 struct seq_file *seq = &s->seq; 5644 char __user *str = ((char __user *)s->buf) + sizeof(*sm); 5645 size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm)); 5646 5647 if (seq->count && copy_to_user(str, seq->buf, seq->count)) 5648 return -EFAULT; 5649 5650 /* Return the number of bytes copied to the buffer */ 5651 sm->size = copysize + seq->count; 5652 if (copy_to_user(s->buf, sm, copysize)) 5653 return -EFAULT; 5654 5655 return 0; 5656 } 5657 5658 static struct mount *listmnt_next(struct mount *curr, bool reverse) 5659 { 5660 struct rb_node *node; 5661 5662 if (reverse) 5663 node = rb_prev(&curr->mnt_node); 5664 else 5665 node = rb_next(&curr->mnt_node); 5666 5667 return node_to_mount(node); 5668 } 5669 5670 static int grab_requested_root(struct mnt_namespace *ns, struct path *root) 5671 { 5672 struct mount *first, *child; 5673 5674 rwsem_assert_held(&namespace_sem); 5675 5676 /* We're looking at our own ns, just use get_fs_root. */ 5677 if (ns == current->nsproxy->mnt_ns) { 5678 get_fs_root(current->fs, root); 5679 return 0; 5680 } 5681 5682 /* 5683 * We have to find the first mount in our ns and use that, however it 5684 * may not exist, so handle that properly. 5685 */ 5686 if (mnt_ns_empty(ns)) 5687 return -ENOENT; 5688 5689 first = ns->root; 5690 for (child = node_to_mount(ns->mnt_first_node); child; 5691 child = listmnt_next(child, false)) { 5692 if (child != first && child->mnt_parent == first) 5693 break; 5694 } 5695 if (!child) 5696 return -ENOENT; 5697 5698 root->mnt = mntget(&child->mnt); 5699 root->dentry = dget(root->mnt->mnt_root); 5700 return 0; 5701 } 5702 5703 /* This must be updated whenever a new flag is added */ 5704 #define STATMOUNT_SUPPORTED (STATMOUNT_SB_BASIC | \ 5705 STATMOUNT_MNT_BASIC | \ 5706 STATMOUNT_PROPAGATE_FROM | \ 5707 STATMOUNT_MNT_ROOT | \ 5708 STATMOUNT_MNT_POINT | \ 5709 STATMOUNT_FS_TYPE | \ 5710 STATMOUNT_MNT_NS_ID | \ 5711 STATMOUNT_MNT_OPTS | \ 5712 STATMOUNT_FS_SUBTYPE | \ 5713 STATMOUNT_SB_SOURCE | \ 5714 STATMOUNT_OPT_ARRAY | \ 5715 STATMOUNT_OPT_SEC_ARRAY | \ 5716 STATMOUNT_SUPPORTED_MASK | \ 5717 STATMOUNT_MNT_UIDMAP | \ 5718 STATMOUNT_MNT_GIDMAP) 5719 5720 /* locks: namespace_shared */ 5721 static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, 5722 struct file *mnt_file, struct mnt_namespace *ns) 5723 { 5724 int err; 5725 5726 if (mnt_file) { 5727 WARN_ON_ONCE(ns != NULL); 5728 5729 s->mnt = mnt_file->f_path.mnt; 5730 ns = real_mount(s->mnt)->mnt_ns; 5731 if (IS_ERR(ns)) 5732 return PTR_ERR(ns); 5733 if (!ns) 5734 /* 5735 * We can't set mount point and mnt_ns_id since we don't have a 5736 * ns for the mount. This can happen if the mount is unmounted 5737 * with MNT_DETACH. 5738 */ 5739 s->mask &= ~(STATMOUNT_MNT_POINT | STATMOUNT_MNT_NS_ID); 5740 } else { 5741 /* Has the namespace already been emptied? */ 5742 if (mnt_ns_id && mnt_ns_empty(ns)) 5743 return -ENOENT; 5744 5745 s->mnt = lookup_mnt_in_ns(mnt_id, ns); 5746 if (!s->mnt) 5747 return -ENOENT; 5748 } 5749 5750 if (ns) { 5751 err = grab_requested_root(ns, &s->root); 5752 if (err) 5753 return err; 5754 5755 if (!mnt_file) { 5756 struct mount *m; 5757 /* 5758 * Don't trigger audit denials. We just want to determine what 5759 * mounts to show users. 5760 */ 5761 m = real_mount(s->mnt); 5762 if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) && 5763 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5764 return -EPERM; 5765 } 5766 } 5767 5768 err = security_sb_statfs(s->mnt->mnt_root); 5769 if (err) 5770 return err; 5771 5772 /* 5773 * Note that mount properties in mnt->mnt_flags, mnt->mnt_idmap 5774 * can change concurrently as we only hold the read-side of the 5775 * namespace semaphore and mount properties may change with only 5776 * the mount lock held. 5777 * 5778 * We could sample the mount lock sequence counter to detect 5779 * those changes and retry. But it's not worth it. Worst that 5780 * happens is that the mnt->mnt_idmap pointer is already changed 5781 * while mnt->mnt_flags isn't or vica versa. So what. 5782 * 5783 * Both mnt->mnt_flags and mnt->mnt_idmap are set and retrieved 5784 * via READ_ONCE()/WRITE_ONCE() and guard against theoretical 5785 * torn read/write. That's all we care about right now. 5786 */ 5787 s->idmap = mnt_idmap(s->mnt); 5788 if (s->mask & STATMOUNT_MNT_BASIC) 5789 statmount_mnt_basic(s); 5790 5791 if (s->mask & STATMOUNT_SB_BASIC) 5792 statmount_sb_basic(s); 5793 5794 if (s->mask & STATMOUNT_PROPAGATE_FROM) 5795 statmount_propagate_from(s); 5796 5797 if (s->mask & STATMOUNT_FS_TYPE) 5798 err = statmount_string(s, STATMOUNT_FS_TYPE); 5799 5800 if (!err && s->mask & STATMOUNT_MNT_ROOT) 5801 err = statmount_string(s, STATMOUNT_MNT_ROOT); 5802 5803 if (!err && s->mask & STATMOUNT_MNT_POINT) 5804 err = statmount_string(s, STATMOUNT_MNT_POINT); 5805 5806 if (!err && s->mask & STATMOUNT_MNT_OPTS) 5807 err = statmount_string(s, STATMOUNT_MNT_OPTS); 5808 5809 if (!err && s->mask & STATMOUNT_OPT_ARRAY) 5810 err = statmount_string(s, STATMOUNT_OPT_ARRAY); 5811 5812 if (!err && s->mask & STATMOUNT_OPT_SEC_ARRAY) 5813 err = statmount_string(s, STATMOUNT_OPT_SEC_ARRAY); 5814 5815 if (!err && s->mask & STATMOUNT_FS_SUBTYPE) 5816 err = statmount_string(s, STATMOUNT_FS_SUBTYPE); 5817 5818 if (!err && s->mask & STATMOUNT_SB_SOURCE) 5819 err = statmount_string(s, STATMOUNT_SB_SOURCE); 5820 5821 if (!err && s->mask & STATMOUNT_MNT_UIDMAP) 5822 err = statmount_string(s, STATMOUNT_MNT_UIDMAP); 5823 5824 if (!err && s->mask & STATMOUNT_MNT_GIDMAP) 5825 err = statmount_string(s, STATMOUNT_MNT_GIDMAP); 5826 5827 if (!err && s->mask & STATMOUNT_MNT_NS_ID) 5828 statmount_mnt_ns_id(s, ns); 5829 5830 if (!err && s->mask & STATMOUNT_SUPPORTED_MASK) { 5831 s->sm.mask |= STATMOUNT_SUPPORTED_MASK; 5832 s->sm.supported_mask = STATMOUNT_SUPPORTED; 5833 } 5834 5835 if (err) 5836 return err; 5837 5838 /* Are there bits in the return mask not present in STATMOUNT_SUPPORTED? */ 5839 WARN_ON_ONCE(~STATMOUNT_SUPPORTED & s->sm.mask); 5840 5841 return 0; 5842 } 5843 5844 static inline bool retry_statmount(const long ret, size_t *seq_size) 5845 { 5846 if (likely(ret != -EAGAIN)) 5847 return false; 5848 if (unlikely(check_mul_overflow(*seq_size, 2, seq_size))) 5849 return false; 5850 if (unlikely(*seq_size > MAX_RW_COUNT)) 5851 return false; 5852 return true; 5853 } 5854 5855 #define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \ 5856 STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \ 5857 STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \ 5858 STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY | \ 5859 STATMOUNT_MNT_UIDMAP | STATMOUNT_MNT_GIDMAP) 5860 5861 static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq, 5862 struct statmount __user *buf, size_t bufsize, 5863 size_t seq_size) 5864 { 5865 if (!access_ok(buf, bufsize)) 5866 return -EFAULT; 5867 5868 memset(ks, 0, sizeof(*ks)); 5869 ks->mask = kreq->param; 5870 ks->buf = buf; 5871 ks->bufsize = bufsize; 5872 5873 if (ks->mask & STATMOUNT_STRING_REQ) { 5874 if (bufsize == sizeof(ks->sm)) 5875 return -EOVERFLOW; 5876 5877 ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT); 5878 if (!ks->seq.buf) 5879 return -ENOMEM; 5880 5881 ks->seq.size = seq_size; 5882 } 5883 5884 return 0; 5885 } 5886 5887 static int copy_mnt_id_req(const struct mnt_id_req __user *req, 5888 struct mnt_id_req *kreq, unsigned int flags) 5889 { 5890 int ret; 5891 size_t usize; 5892 5893 BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1); 5894 5895 ret = get_user(usize, &req->size); 5896 if (ret) 5897 return -EFAULT; 5898 if (unlikely(usize > PAGE_SIZE)) 5899 return -E2BIG; 5900 if (unlikely(usize < MNT_ID_REQ_SIZE_VER0)) 5901 return -EINVAL; 5902 memset(kreq, 0, sizeof(*kreq)); 5903 ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize); 5904 if (ret) 5905 return ret; 5906 5907 if (flags & STATMOUNT_BY_FD) { 5908 if (kreq->mnt_id || kreq->mnt_ns_id) 5909 return -EINVAL; 5910 } else { 5911 if (kreq->mnt_ns_fd != 0 && kreq->mnt_ns_id) 5912 return -EINVAL; 5913 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ 5914 if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET) 5915 return -EINVAL; 5916 } 5917 return 0; 5918 } 5919 5920 /* 5921 * If the user requested a specific mount namespace id, look that up and return 5922 * that, or if not simply grab a passive reference on our mount namespace and 5923 * return that. 5924 */ 5925 static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq) 5926 { 5927 struct mnt_namespace *mnt_ns; 5928 5929 if (kreq->mnt_ns_id) { 5930 mnt_ns = lookup_mnt_ns(kreq->mnt_ns_id); 5931 if (!mnt_ns) 5932 return ERR_PTR(-ENOENT); 5933 } else if (kreq->mnt_ns_fd) { 5934 struct ns_common *ns; 5935 5936 CLASS(fd, f)(kreq->mnt_ns_fd); 5937 if (fd_empty(f)) 5938 return ERR_PTR(-EBADF); 5939 5940 if (!proc_ns_file(fd_file(f))) 5941 return ERR_PTR(-EINVAL); 5942 5943 ns = get_proc_ns(file_inode(fd_file(f))); 5944 if (ns->ns_type != CLONE_NEWNS) 5945 return ERR_PTR(-EINVAL); 5946 5947 mnt_ns = to_mnt_ns(ns); 5948 refcount_inc(&mnt_ns->passive); 5949 } else { 5950 mnt_ns = current->nsproxy->mnt_ns; 5951 refcount_inc(&mnt_ns->passive); 5952 } 5953 5954 return mnt_ns; 5955 } 5956 5957 SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req, 5958 struct statmount __user *, buf, size_t, bufsize, 5959 unsigned int, flags) 5960 { 5961 struct mnt_namespace *ns __free(mnt_ns_release) = NULL; 5962 struct kstatmount *ks __free(kfree) = NULL; 5963 struct file *mnt_file __free(fput) = NULL; 5964 struct mnt_id_req kreq; 5965 /* We currently support retrieval of 3 strings. */ 5966 size_t seq_size = 3 * PATH_MAX; 5967 int ret; 5968 5969 if (flags & ~STATMOUNT_BY_FD) 5970 return -EINVAL; 5971 5972 ret = copy_mnt_id_req(req, &kreq, flags); 5973 if (ret) 5974 return ret; 5975 5976 if (flags & STATMOUNT_BY_FD) { 5977 mnt_file = fget_raw(kreq.mnt_fd); 5978 if (!mnt_file) 5979 return -EBADF; 5980 /* do_statmount sets ns in case of STATMOUNT_BY_FD */ 5981 } else { 5982 ns = grab_requested_mnt_ns(&kreq); 5983 if (IS_ERR(ns)) 5984 return PTR_ERR(ns); 5985 5986 if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) && 5987 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5988 return -EPERM; 5989 } 5990 5991 ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT); 5992 if (!ks) 5993 return -ENOMEM; 5994 5995 retry: 5996 ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size); 5997 if (ret) 5998 return ret; 5999 6000 scoped_guard(namespace_shared) 6001 ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, mnt_file, ns); 6002 6003 if (!ret) 6004 ret = copy_statmount_to_user(ks); 6005 kvfree(ks->seq.buf); 6006 path_put(&ks->root); 6007 if (retry_statmount(ret, &seq_size)) 6008 goto retry; 6009 return ret; 6010 } 6011 6012 struct klistmount { 6013 u64 last_mnt_id; 6014 u64 mnt_parent_id; 6015 u64 *kmnt_ids; 6016 u32 nr_mnt_ids; 6017 struct mnt_namespace *ns; 6018 struct path root; 6019 }; 6020 6021 /* locks: namespace_shared */ 6022 static ssize_t do_listmount(struct klistmount *kls, bool reverse) 6023 { 6024 struct mnt_namespace *ns = kls->ns; 6025 u64 mnt_parent_id = kls->mnt_parent_id; 6026 u64 last_mnt_id = kls->last_mnt_id; 6027 u64 *mnt_ids = kls->kmnt_ids; 6028 size_t nr_mnt_ids = kls->nr_mnt_ids; 6029 struct path orig; 6030 struct mount *r, *first; 6031 ssize_t ret; 6032 6033 rwsem_assert_held(&namespace_sem); 6034 6035 ret = grab_requested_root(ns, &kls->root); 6036 if (ret) 6037 return ret; 6038 6039 if (mnt_parent_id == LSMT_ROOT) { 6040 orig = kls->root; 6041 } else { 6042 orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns); 6043 if (!orig.mnt) 6044 return -ENOENT; 6045 orig.dentry = orig.mnt->mnt_root; 6046 } 6047 6048 /* 6049 * Don't trigger audit denials. We just want to determine what 6050 * mounts to show users. 6051 */ 6052 if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) && 6053 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 6054 return -EPERM; 6055 6056 ret = security_sb_statfs(orig.dentry); 6057 if (ret) 6058 return ret; 6059 6060 if (!last_mnt_id) { 6061 if (reverse) 6062 first = node_to_mount(ns->mnt_last_node); 6063 else 6064 first = node_to_mount(ns->mnt_first_node); 6065 } else { 6066 if (reverse) 6067 first = mnt_find_id_at_reverse(ns, last_mnt_id - 1); 6068 else 6069 first = mnt_find_id_at(ns, last_mnt_id + 1); 6070 } 6071 6072 for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) { 6073 if (r->mnt_id_unique == mnt_parent_id) 6074 continue; 6075 if (!is_path_reachable(r, r->mnt.mnt_root, &orig)) 6076 continue; 6077 *mnt_ids = r->mnt_id_unique; 6078 mnt_ids++; 6079 nr_mnt_ids--; 6080 ret++; 6081 } 6082 return ret; 6083 } 6084 6085 static void __free_klistmount_free(const struct klistmount *kls) 6086 { 6087 path_put(&kls->root); 6088 kvfree(kls->kmnt_ids); 6089 mnt_ns_release(kls->ns); 6090 } 6091 6092 static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq, 6093 size_t nr_mnt_ids) 6094 { 6095 u64 last_mnt_id = kreq->param; 6096 struct mnt_namespace *ns; 6097 6098 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ 6099 if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET) 6100 return -EINVAL; 6101 6102 kls->last_mnt_id = last_mnt_id; 6103 6104 kls->nr_mnt_ids = nr_mnt_ids; 6105 kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids), 6106 GFP_KERNEL_ACCOUNT); 6107 if (!kls->kmnt_ids) 6108 return -ENOMEM; 6109 6110 ns = grab_requested_mnt_ns(kreq); 6111 if (IS_ERR(ns)) 6112 return PTR_ERR(ns); 6113 kls->ns = ns; 6114 6115 kls->mnt_parent_id = kreq->mnt_id; 6116 return 0; 6117 } 6118 6119 SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, 6120 u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags) 6121 { 6122 struct klistmount kls __free(klistmount_free) = {}; 6123 const size_t maxcount = 1000000; 6124 struct mnt_id_req kreq; 6125 ssize_t ret; 6126 6127 if (flags & ~LISTMOUNT_REVERSE) 6128 return -EINVAL; 6129 6130 /* 6131 * If the mount namespace really has more than 1 million mounts the 6132 * caller must iterate over the mount namespace (and reconsider their 6133 * system design...). 6134 */ 6135 if (unlikely(nr_mnt_ids > maxcount)) 6136 return -EOVERFLOW; 6137 6138 if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids))) 6139 return -EFAULT; 6140 6141 ret = copy_mnt_id_req(req, &kreq, 0); 6142 if (ret) 6143 return ret; 6144 6145 ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids); 6146 if (ret) 6147 return ret; 6148 6149 if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) && 6150 !ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN)) 6151 return -ENOENT; 6152 6153 /* 6154 * We only need to guard against mount topology changes as 6155 * listmount() doesn't care about any mount properties. 6156 */ 6157 scoped_guard(namespace_shared) 6158 ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE)); 6159 if (ret <= 0) 6160 return ret; 6161 6162 if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids))) 6163 return -EFAULT; 6164 6165 return ret; 6166 } 6167 6168 struct mnt_namespace init_mnt_ns = { 6169 .ns = NS_COMMON_INIT(init_mnt_ns), 6170 .user_ns = &init_user_ns, 6171 .passive = REFCOUNT_INIT(1), 6172 .mounts = RB_ROOT, 6173 .poll = __WAIT_QUEUE_HEAD_INITIALIZER(init_mnt_ns.poll), 6174 }; 6175 6176 static void __init init_mount_tree(void) 6177 { 6178 struct vfsmount *mnt, *nullfs_mnt; 6179 struct mount *mnt_root; 6180 struct path root; 6181 6182 /* 6183 * We create two mounts: 6184 * 6185 * (1) nullfs with mount id 1 6186 * (2) mutable rootfs with mount id 2 6187 * 6188 * with (2) mounted on top of (1). 6189 */ 6190 nullfs_mnt = vfs_kern_mount(&nullfs_fs_type, 0, "nullfs", NULL); 6191 if (IS_ERR(nullfs_mnt)) 6192 panic("VFS: Failed to create nullfs"); 6193 6194 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options); 6195 if (IS_ERR(mnt)) 6196 panic("Can't create rootfs"); 6197 6198 VFS_WARN_ON_ONCE(real_mount(nullfs_mnt)->mnt_id != 1); 6199 VFS_WARN_ON_ONCE(real_mount(mnt)->mnt_id != 2); 6200 6201 /* The namespace root is the nullfs mnt. */ 6202 mnt_root = real_mount(nullfs_mnt); 6203 init_mnt_ns.root = mnt_root; 6204 6205 /* Mount mutable rootfs on top of nullfs. */ 6206 root.mnt = nullfs_mnt; 6207 root.dentry = nullfs_mnt->mnt_root; 6208 6209 LOCK_MOUNT_EXACT(mp, &root); 6210 if (unlikely(IS_ERR(mp.parent))) 6211 panic("VFS: Failed to mount rootfs on nullfs"); 6212 scoped_guard(mount_writer) 6213 attach_mnt(real_mount(mnt), mp.parent, mp.mp); 6214 6215 pr_info("VFS: Finished mounting rootfs on nullfs\n"); 6216 6217 /* 6218 * We've dropped all locks here but that's fine. Not just are we 6219 * the only task that's running, there's no other mount 6220 * namespace in existence and the initial mount namespace is 6221 * completely empty until we add the mounts we just created. 6222 */ 6223 for (struct mount *p = mnt_root; p; p = next_mnt(p, mnt_root)) { 6224 mnt_add_to_ns(&init_mnt_ns, p); 6225 init_mnt_ns.nr_mounts++; 6226 } 6227 6228 init_task.nsproxy->mnt_ns = &init_mnt_ns; 6229 get_mnt_ns(&init_mnt_ns); 6230 6231 /* The root and pwd always point to the mutable rootfs. */ 6232 root.mnt = mnt; 6233 root.dentry = mnt->mnt_root; 6234 set_fs_pwd(current->fs, &root); 6235 set_fs_root(current->fs, &root); 6236 6237 ns_tree_add(&init_mnt_ns); 6238 } 6239 6240 void __init mnt_init(void) 6241 { 6242 int err; 6243 6244 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 6245 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); 6246 6247 mount_hashtable = alloc_large_system_hash("Mount-cache", 6248 sizeof(struct hlist_head), 6249 mhash_entries, 19, 6250 HASH_ZERO, 6251 &m_hash_shift, &m_hash_mask, 0, 0); 6252 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", 6253 sizeof(struct hlist_head), 6254 mphash_entries, 19, 6255 HASH_ZERO, 6256 &mp_hash_shift, &mp_hash_mask, 0, 0); 6257 6258 if (!mount_hashtable || !mountpoint_hashtable) 6259 panic("Failed to allocate mount hash table\n"); 6260 6261 kernfs_init(); 6262 6263 err = sysfs_init(); 6264 if (err) 6265 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 6266 __func__, err); 6267 fs_kobj = kobject_create_and_add("fs", NULL); 6268 if (!fs_kobj) 6269 printk(KERN_WARNING "%s: kobj create error\n", __func__); 6270 shmem_init(); 6271 init_rootfs(); 6272 init_mount_tree(); 6273 } 6274 6275 void put_mnt_ns(struct mnt_namespace *ns) 6276 { 6277 if (!ns_ref_put(ns)) 6278 return; 6279 guard(namespace_excl)(); 6280 emptied_ns = ns; 6281 guard(mount_writer)(); 6282 umount_tree(ns->root, 0); 6283 } 6284 6285 struct vfsmount *kern_mount(struct file_system_type *type) 6286 { 6287 struct vfsmount *mnt; 6288 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); 6289 if (!IS_ERR(mnt)) { 6290 /* 6291 * it is a longterm mount, don't release mnt until 6292 * we unmount before file sys is unregistered 6293 */ 6294 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 6295 } 6296 return mnt; 6297 } 6298 EXPORT_SYMBOL_GPL(kern_mount); 6299 6300 void kern_unmount(struct vfsmount *mnt) 6301 { 6302 /* release long term mount so mount point can be released */ 6303 if (!IS_ERR(mnt)) { 6304 mnt_make_shortterm(mnt); 6305 synchronize_rcu(); /* yecchhh... */ 6306 mntput(mnt); 6307 } 6308 } 6309 EXPORT_SYMBOL(kern_unmount); 6310 6311 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) 6312 { 6313 unsigned int i; 6314 6315 for (i = 0; i < num; i++) 6316 mnt_make_shortterm(mnt[i]); 6317 synchronize_rcu_expedited(); 6318 for (i = 0; i < num; i++) 6319 mntput(mnt[i]); 6320 } 6321 EXPORT_SYMBOL(kern_unmount_array); 6322 6323 bool our_mnt(struct vfsmount *mnt) 6324 { 6325 return check_mnt(real_mount(mnt)); 6326 } 6327 6328 bool current_chrooted(void) 6329 { 6330 /* Does the current process have a non-standard root */ 6331 struct path fs_root __free(path_put) = {}; 6332 struct mount *root; 6333 6334 get_fs_root(current->fs, &fs_root); 6335 6336 /* Find the namespace root */ 6337 6338 guard(mount_locked_reader)(); 6339 6340 root = topmost_overmount(current->nsproxy->mnt_ns->root); 6341 6342 return fs_root.mnt != &root->mnt || !path_mounted(&fs_root); 6343 } 6344 6345 static bool mnt_already_visible(struct mnt_namespace *ns, 6346 const struct super_block *sb, 6347 int *new_mnt_flags) 6348 { 6349 int new_flags = *new_mnt_flags; 6350 struct mount *mnt, *n; 6351 6352 guard(namespace_shared)(); 6353 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) { 6354 struct mount *child; 6355 int mnt_flags; 6356 6357 if (mnt->mnt.mnt_sb->s_type != sb->s_type) 6358 continue; 6359 6360 /* This mount is not fully visible if it's root directory 6361 * is not the root directory of the filesystem. 6362 */ 6363 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) 6364 continue; 6365 6366 /* A local view of the mount flags */ 6367 mnt_flags = mnt->mnt.mnt_flags; 6368 6369 /* Don't miss readonly hidden in the superblock flags */ 6370 if (sb_rdonly(mnt->mnt.mnt_sb)) 6371 mnt_flags |= MNT_LOCK_READONLY; 6372 6373 /* Verify the mount flags are equal to or more permissive 6374 * than the proposed new mount. 6375 */ 6376 if ((mnt_flags & MNT_LOCK_READONLY) && 6377 !(new_flags & MNT_READONLY)) 6378 continue; 6379 if ((mnt_flags & MNT_LOCK_ATIME) && 6380 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) 6381 continue; 6382 6383 /* This mount is not fully visible if there are any 6384 * locked child mounts that cover anything except for 6385 * empty directories. 6386 */ 6387 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 6388 struct inode *inode = child->mnt_mountpoint->d_inode; 6389 /* Only worry about locked mounts */ 6390 if (!(child->mnt.mnt_flags & MNT_LOCKED)) 6391 continue; 6392 /* Is the directory permanently empty? */ 6393 if (!is_empty_dir_inode(inode)) 6394 goto next; 6395 } 6396 /* Preserve the locked attributes */ 6397 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \ 6398 MNT_LOCK_ATIME); 6399 return true; 6400 next: ; 6401 } 6402 return false; 6403 } 6404 6405 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags) 6406 { 6407 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV; 6408 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 6409 unsigned long s_iflags; 6410 6411 if (ns->user_ns == &init_user_ns) 6412 return false; 6413 6414 /* Can this filesystem be too revealing? */ 6415 s_iflags = sb->s_iflags; 6416 if (!(s_iflags & SB_I_USERNS_VISIBLE)) 6417 return false; 6418 6419 if ((s_iflags & required_iflags) != required_iflags) { 6420 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n", 6421 required_iflags); 6422 return true; 6423 } 6424 6425 return !mnt_already_visible(ns, sb, new_mnt_flags); 6426 } 6427 6428 bool mnt_may_suid(struct vfsmount *mnt) 6429 { 6430 /* 6431 * Foreign mounts (accessed via fchdir or through /proc 6432 * symlinks) are always treated as if they are nosuid. This 6433 * prevents namespaces from trusting potentially unsafe 6434 * suid/sgid bits, file caps, or security labels that originate 6435 * in other namespaces. 6436 */ 6437 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && 6438 current_in_userns(mnt->mnt_sb->s_user_ns); 6439 } 6440 6441 static struct ns_common *mntns_get(struct task_struct *task) 6442 { 6443 struct ns_common *ns = NULL; 6444 struct nsproxy *nsproxy; 6445 6446 task_lock(task); 6447 nsproxy = task->nsproxy; 6448 if (nsproxy) { 6449 ns = &nsproxy->mnt_ns->ns; 6450 get_mnt_ns(to_mnt_ns(ns)); 6451 } 6452 task_unlock(task); 6453 6454 return ns; 6455 } 6456 6457 static void mntns_put(struct ns_common *ns) 6458 { 6459 put_mnt_ns(to_mnt_ns(ns)); 6460 } 6461 6462 static int mntns_install(struct nsset *nsset, struct ns_common *ns) 6463 { 6464 struct nsproxy *nsproxy = nsset->nsproxy; 6465 struct fs_struct *fs = nsset->fs; 6466 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; 6467 struct user_namespace *user_ns = nsset->cred->user_ns; 6468 struct path root; 6469 int err; 6470 6471 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || 6472 !ns_capable(user_ns, CAP_SYS_CHROOT) || 6473 !ns_capable(user_ns, CAP_SYS_ADMIN)) 6474 return -EPERM; 6475 6476 if (is_anon_ns(mnt_ns)) 6477 return -EINVAL; 6478 6479 if (fs->users != 1) 6480 return -EINVAL; 6481 6482 get_mnt_ns(mnt_ns); 6483 old_mnt_ns = nsproxy->mnt_ns; 6484 nsproxy->mnt_ns = mnt_ns; 6485 6486 /* Find the root */ 6487 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, 6488 "/", LOOKUP_DOWN, &root); 6489 if (err) { 6490 /* revert to old namespace */ 6491 nsproxy->mnt_ns = old_mnt_ns; 6492 put_mnt_ns(mnt_ns); 6493 return err; 6494 } 6495 6496 put_mnt_ns(old_mnt_ns); 6497 6498 /* Update the pwd and root */ 6499 set_fs_pwd(fs, &root); 6500 set_fs_root(fs, &root); 6501 6502 path_put(&root); 6503 return 0; 6504 } 6505 6506 static struct user_namespace *mntns_owner(struct ns_common *ns) 6507 { 6508 return to_mnt_ns(ns)->user_ns; 6509 } 6510 6511 const struct proc_ns_operations mntns_operations = { 6512 .name = "mnt", 6513 .get = mntns_get, 6514 .put = mntns_put, 6515 .install = mntns_install, 6516 .owner = mntns_owner, 6517 }; 6518 6519 #ifdef CONFIG_SYSCTL 6520 static const struct ctl_table fs_namespace_sysctls[] = { 6521 { 6522 .procname = "mount-max", 6523 .data = &sysctl_mount_max, 6524 .maxlen = sizeof(unsigned int), 6525 .mode = 0644, 6526 .proc_handler = proc_dointvec_minmax, 6527 .extra1 = SYSCTL_ONE, 6528 }, 6529 }; 6530 6531 static int __init init_fs_namespace_sysctls(void) 6532 { 6533 register_sysctl_init("fs", fs_namespace_sysctls); 6534 return 0; 6535 } 6536 fs_initcall(init_fs_namespace_sysctls); 6537 6538 #endif /* CONFIG_SYSCTL */ 6539