1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/namespace.c 4 * 5 * (C) Copyright Al Viro 2000, 2001 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/export.h> 13 #include <linux/capability.h> 14 #include <linux/mnt_namespace.h> 15 #include <linux/user_namespace.h> 16 #include <linux/namei.h> 17 #include <linux/security.h> 18 #include <linux/cred.h> 19 #include <linux/idr.h> 20 #include <linux/init.h> /* init_rootfs */ 21 #include <linux/fs_struct.h> /* get_fs_root et.al. */ 22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ 23 #include <linux/file.h> 24 #include <linux/uaccess.h> 25 #include <linux/proc_ns.h> 26 #include <linux/magic.h> 27 #include <linux/memblock.h> 28 #include <linux/proc_fs.h> 29 #include <linux/task_work.h> 30 #include <linux/sched/task.h> 31 #include <uapi/linux/mount.h> 32 #include <linux/fs_context.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/mnt_idmapping.h> 35 #include <linux/pidfs.h> 36 37 #include "pnode.h" 38 #include "internal.h" 39 40 /* Maximum number of mounts in a mount namespace */ 41 static unsigned int sysctl_mount_max __read_mostly = 100000; 42 43 static unsigned int m_hash_mask __ro_after_init; 44 static unsigned int m_hash_shift __ro_after_init; 45 static unsigned int mp_hash_mask __ro_after_init; 46 static unsigned int mp_hash_shift __ro_after_init; 47 48 static __initdata unsigned long mhash_entries; 49 static int __init set_mhash_entries(char *str) 50 { 51 if (!str) 52 return 0; 53 mhash_entries = simple_strtoul(str, &str, 0); 54 return 1; 55 } 56 __setup("mhash_entries=", set_mhash_entries); 57 58 static __initdata unsigned long mphash_entries; 59 static int __init set_mphash_entries(char *str) 60 { 61 if (!str) 62 return 0; 63 mphash_entries = simple_strtoul(str, &str, 0); 64 return 1; 65 } 66 __setup("mphash_entries=", set_mphash_entries); 67 68 static u64 event; 69 static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC); 70 static DEFINE_IDA(mnt_group_ida); 71 72 /* Don't allow confusion with old 32bit mount ID */ 73 #define MNT_UNIQUE_ID_OFFSET (1ULL << 31) 74 static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET; 75 76 static struct hlist_head *mount_hashtable __ro_after_init; 77 static struct hlist_head *mountpoint_hashtable __ro_after_init; 78 static struct kmem_cache *mnt_cache __ro_after_init; 79 static DECLARE_RWSEM(namespace_sem); 80 static HLIST_HEAD(unmounted); /* protected by namespace_sem */ 81 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ 82 static DEFINE_SEQLOCK(mnt_ns_tree_lock); 83 84 #ifdef CONFIG_FSNOTIFY 85 LIST_HEAD(notify_list); /* protected by namespace_sem */ 86 #endif 87 static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */ 88 static LIST_HEAD(mnt_ns_list); /* protected by mnt_ns_tree_lock */ 89 90 struct mount_kattr { 91 unsigned int attr_set; 92 unsigned int attr_clr; 93 unsigned int propagation; 94 unsigned int lookup_flags; 95 bool recurse; 96 struct user_namespace *mnt_userns; 97 struct mnt_idmap *mnt_idmap; 98 }; 99 100 /* /sys/fs */ 101 struct kobject *fs_kobj __ro_after_init; 102 EXPORT_SYMBOL_GPL(fs_kobj); 103 104 /* 105 * vfsmount lock may be taken for read to prevent changes to the 106 * vfsmount hash, ie. during mountpoint lookups or walking back 107 * up the tree. 108 * 109 * It should be taken for write in all cases where the vfsmount 110 * tree or hash is modified or when a vfsmount structure is modified. 111 */ 112 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 113 114 static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node) 115 { 116 if (!node) 117 return NULL; 118 return rb_entry(node, struct mnt_namespace, mnt_ns_tree_node); 119 } 120 121 static int mnt_ns_cmp(struct rb_node *a, const struct rb_node *b) 122 { 123 struct mnt_namespace *ns_a = node_to_mnt_ns(a); 124 struct mnt_namespace *ns_b = node_to_mnt_ns(b); 125 u64 seq_a = ns_a->seq; 126 u64 seq_b = ns_b->seq; 127 128 if (seq_a < seq_b) 129 return -1; 130 if (seq_a > seq_b) 131 return 1; 132 return 0; 133 } 134 135 static inline void mnt_ns_tree_write_lock(void) 136 { 137 write_seqlock(&mnt_ns_tree_lock); 138 } 139 140 static inline void mnt_ns_tree_write_unlock(void) 141 { 142 write_sequnlock(&mnt_ns_tree_lock); 143 } 144 145 static void mnt_ns_tree_add(struct mnt_namespace *ns) 146 { 147 struct rb_node *node, *prev; 148 149 mnt_ns_tree_write_lock(); 150 node = rb_find_add_rcu(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_cmp); 151 /* 152 * If there's no previous entry simply add it after the 153 * head and if there is add it after the previous entry. 154 */ 155 prev = rb_prev(&ns->mnt_ns_tree_node); 156 if (!prev) 157 list_add_rcu(&ns->mnt_ns_list, &mnt_ns_list); 158 else 159 list_add_rcu(&ns->mnt_ns_list, &node_to_mnt_ns(prev)->mnt_ns_list); 160 mnt_ns_tree_write_unlock(); 161 162 WARN_ON_ONCE(node); 163 } 164 165 static void mnt_ns_release(struct mnt_namespace *ns) 166 { 167 /* keep alive for {list,stat}mount() */ 168 if (refcount_dec_and_test(&ns->passive)) { 169 fsnotify_mntns_delete(ns); 170 put_user_ns(ns->user_ns); 171 kfree(ns); 172 } 173 } 174 DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T)) 175 176 static void mnt_ns_release_rcu(struct rcu_head *rcu) 177 { 178 mnt_ns_release(container_of(rcu, struct mnt_namespace, mnt_ns_rcu)); 179 } 180 181 static void mnt_ns_tree_remove(struct mnt_namespace *ns) 182 { 183 /* remove from global mount namespace list */ 184 if (!is_anon_ns(ns)) { 185 mnt_ns_tree_write_lock(); 186 rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree); 187 list_bidir_del_rcu(&ns->mnt_ns_list); 188 mnt_ns_tree_write_unlock(); 189 } 190 191 call_rcu(&ns->mnt_ns_rcu, mnt_ns_release_rcu); 192 } 193 194 static int mnt_ns_find(const void *key, const struct rb_node *node) 195 { 196 const u64 mnt_ns_id = *(u64 *)key; 197 const struct mnt_namespace *ns = node_to_mnt_ns(node); 198 199 if (mnt_ns_id < ns->seq) 200 return -1; 201 if (mnt_ns_id > ns->seq) 202 return 1; 203 return 0; 204 } 205 206 /* 207 * Lookup a mount namespace by id and take a passive reference count. Taking a 208 * passive reference means the mount namespace can be emptied if e.g., the last 209 * task holding an active reference exits. To access the mounts of the 210 * namespace the @namespace_sem must first be acquired. If the namespace has 211 * already shut down before acquiring @namespace_sem, {list,stat}mount() will 212 * see that the mount rbtree of the namespace is empty. 213 * 214 * Note the lookup is lockless protected by a sequence counter. We only 215 * need to guard against false negatives as false positives aren't 216 * possible. So if we didn't find a mount namespace and the sequence 217 * counter has changed we need to retry. If the sequence counter is 218 * still the same we know the search actually failed. 219 */ 220 static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id) 221 { 222 struct mnt_namespace *ns; 223 struct rb_node *node; 224 unsigned int seq; 225 226 guard(rcu)(); 227 do { 228 seq = read_seqbegin(&mnt_ns_tree_lock); 229 node = rb_find_rcu(&mnt_ns_id, &mnt_ns_tree, mnt_ns_find); 230 if (node) 231 break; 232 } while (read_seqretry(&mnt_ns_tree_lock, seq)); 233 234 if (!node) 235 return NULL; 236 237 /* 238 * The last reference count is put with RCU delay so we can 239 * unconditonally acquire a reference here. 240 */ 241 ns = node_to_mnt_ns(node); 242 refcount_inc(&ns->passive); 243 return ns; 244 } 245 246 static inline void lock_mount_hash(void) 247 { 248 write_seqlock(&mount_lock); 249 } 250 251 static inline void unlock_mount_hash(void) 252 { 253 write_sequnlock(&mount_lock); 254 } 255 256 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) 257 { 258 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 259 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 260 tmp = tmp + (tmp >> m_hash_shift); 261 return &mount_hashtable[tmp & m_hash_mask]; 262 } 263 264 static inline struct hlist_head *mp_hash(struct dentry *dentry) 265 { 266 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); 267 tmp = tmp + (tmp >> mp_hash_shift); 268 return &mountpoint_hashtable[tmp & mp_hash_mask]; 269 } 270 271 static int mnt_alloc_id(struct mount *mnt) 272 { 273 int res; 274 275 xa_lock(&mnt_id_xa); 276 res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, XA_LIMIT(1, INT_MAX), GFP_KERNEL); 277 if (!res) 278 mnt->mnt_id_unique = ++mnt_id_ctr; 279 xa_unlock(&mnt_id_xa); 280 return res; 281 } 282 283 static void mnt_free_id(struct mount *mnt) 284 { 285 xa_erase(&mnt_id_xa, mnt->mnt_id); 286 } 287 288 /* 289 * Allocate a new peer group ID 290 */ 291 static int mnt_alloc_group_id(struct mount *mnt) 292 { 293 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL); 294 295 if (res < 0) 296 return res; 297 mnt->mnt_group_id = res; 298 return 0; 299 } 300 301 /* 302 * Release a peer group ID 303 */ 304 void mnt_release_group_id(struct mount *mnt) 305 { 306 ida_free(&mnt_group_ida, mnt->mnt_group_id); 307 mnt->mnt_group_id = 0; 308 } 309 310 /* 311 * vfsmount lock must be held for read 312 */ 313 static inline void mnt_add_count(struct mount *mnt, int n) 314 { 315 #ifdef CONFIG_SMP 316 this_cpu_add(mnt->mnt_pcp->mnt_count, n); 317 #else 318 preempt_disable(); 319 mnt->mnt_count += n; 320 preempt_enable(); 321 #endif 322 } 323 324 /* 325 * vfsmount lock must be held for write 326 */ 327 int mnt_get_count(struct mount *mnt) 328 { 329 #ifdef CONFIG_SMP 330 int count = 0; 331 int cpu; 332 333 for_each_possible_cpu(cpu) { 334 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; 335 } 336 337 return count; 338 #else 339 return mnt->mnt_count; 340 #endif 341 } 342 343 static struct mount *alloc_vfsmnt(const char *name) 344 { 345 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 346 if (mnt) { 347 int err; 348 349 err = mnt_alloc_id(mnt); 350 if (err) 351 goto out_free_cache; 352 353 if (name) { 354 mnt->mnt_devname = kstrdup_const(name, 355 GFP_KERNEL_ACCOUNT); 356 if (!mnt->mnt_devname) 357 goto out_free_id; 358 } 359 360 #ifdef CONFIG_SMP 361 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); 362 if (!mnt->mnt_pcp) 363 goto out_free_devname; 364 365 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); 366 #else 367 mnt->mnt_count = 1; 368 mnt->mnt_writers = 0; 369 #endif 370 371 INIT_HLIST_NODE(&mnt->mnt_hash); 372 INIT_LIST_HEAD(&mnt->mnt_child); 373 INIT_LIST_HEAD(&mnt->mnt_mounts); 374 INIT_LIST_HEAD(&mnt->mnt_list); 375 INIT_LIST_HEAD(&mnt->mnt_expire); 376 INIT_LIST_HEAD(&mnt->mnt_share); 377 INIT_LIST_HEAD(&mnt->mnt_slave_list); 378 INIT_LIST_HEAD(&mnt->mnt_slave); 379 INIT_HLIST_NODE(&mnt->mnt_mp_list); 380 INIT_LIST_HEAD(&mnt->mnt_umounting); 381 INIT_HLIST_HEAD(&mnt->mnt_stuck_children); 382 RB_CLEAR_NODE(&mnt->mnt_node); 383 mnt->mnt.mnt_idmap = &nop_mnt_idmap; 384 } 385 return mnt; 386 387 #ifdef CONFIG_SMP 388 out_free_devname: 389 kfree_const(mnt->mnt_devname); 390 #endif 391 out_free_id: 392 mnt_free_id(mnt); 393 out_free_cache: 394 kmem_cache_free(mnt_cache, mnt); 395 return NULL; 396 } 397 398 /* 399 * Most r/o checks on a fs are for operations that take 400 * discrete amounts of time, like a write() or unlink(). 401 * We must keep track of when those operations start 402 * (for permission checks) and when they end, so that 403 * we can determine when writes are able to occur to 404 * a filesystem. 405 */ 406 /* 407 * __mnt_is_readonly: check whether a mount is read-only 408 * @mnt: the mount to check for its write status 409 * 410 * This shouldn't be used directly ouside of the VFS. 411 * It does not guarantee that the filesystem will stay 412 * r/w, just that it is right *now*. This can not and 413 * should not be used in place of IS_RDONLY(inode). 414 * mnt_want/drop_write() will _keep_ the filesystem 415 * r/w. 416 */ 417 bool __mnt_is_readonly(struct vfsmount *mnt) 418 { 419 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); 420 } 421 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 422 423 static inline void mnt_inc_writers(struct mount *mnt) 424 { 425 #ifdef CONFIG_SMP 426 this_cpu_inc(mnt->mnt_pcp->mnt_writers); 427 #else 428 mnt->mnt_writers++; 429 #endif 430 } 431 432 static inline void mnt_dec_writers(struct mount *mnt) 433 { 434 #ifdef CONFIG_SMP 435 this_cpu_dec(mnt->mnt_pcp->mnt_writers); 436 #else 437 mnt->mnt_writers--; 438 #endif 439 } 440 441 static unsigned int mnt_get_writers(struct mount *mnt) 442 { 443 #ifdef CONFIG_SMP 444 unsigned int count = 0; 445 int cpu; 446 447 for_each_possible_cpu(cpu) { 448 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; 449 } 450 451 return count; 452 #else 453 return mnt->mnt_writers; 454 #endif 455 } 456 457 static int mnt_is_readonly(struct vfsmount *mnt) 458 { 459 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount)) 460 return 1; 461 /* 462 * The barrier pairs with the barrier in sb_start_ro_state_change() 463 * making sure if we don't see s_readonly_remount set yet, we also will 464 * not see any superblock / mount flag changes done by remount. 465 * It also pairs with the barrier in sb_end_ro_state_change() 466 * assuring that if we see s_readonly_remount already cleared, we will 467 * see the values of superblock / mount flags updated by remount. 468 */ 469 smp_rmb(); 470 return __mnt_is_readonly(mnt); 471 } 472 473 /* 474 * Most r/o & frozen checks on a fs are for operations that take discrete 475 * amounts of time, like a write() or unlink(). We must keep track of when 476 * those operations start (for permission checks) and when they end, so that we 477 * can determine when writes are able to occur to a filesystem. 478 */ 479 /** 480 * mnt_get_write_access - get write access to a mount without freeze protection 481 * @m: the mount on which to take a write 482 * 483 * This tells the low-level filesystem that a write is about to be performed to 484 * it, and makes sure that writes are allowed (mnt it read-write) before 485 * returning success. This operation does not protect against filesystem being 486 * frozen. When the write operation is finished, mnt_put_write_access() must be 487 * called. This is effectively a refcount. 488 */ 489 int mnt_get_write_access(struct vfsmount *m) 490 { 491 struct mount *mnt = real_mount(m); 492 int ret = 0; 493 494 preempt_disable(); 495 mnt_inc_writers(mnt); 496 /* 497 * The store to mnt_inc_writers must be visible before we pass 498 * MNT_WRITE_HOLD loop below, so that the slowpath can see our 499 * incremented count after it has set MNT_WRITE_HOLD. 500 */ 501 smp_mb(); 502 might_lock(&mount_lock.lock); 503 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { 504 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 505 cpu_relax(); 506 } else { 507 /* 508 * This prevents priority inversion, if the task 509 * setting MNT_WRITE_HOLD got preempted on a remote 510 * CPU, and it prevents life lock if the task setting 511 * MNT_WRITE_HOLD has a lower priority and is bound to 512 * the same CPU as the task that is spinning here. 513 */ 514 preempt_enable(); 515 lock_mount_hash(); 516 unlock_mount_hash(); 517 preempt_disable(); 518 } 519 } 520 /* 521 * The barrier pairs with the barrier sb_start_ro_state_change() making 522 * sure that if we see MNT_WRITE_HOLD cleared, we will also see 523 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in 524 * mnt_is_readonly() and bail in case we are racing with remount 525 * read-only. 526 */ 527 smp_rmb(); 528 if (mnt_is_readonly(m)) { 529 mnt_dec_writers(mnt); 530 ret = -EROFS; 531 } 532 preempt_enable(); 533 534 return ret; 535 } 536 EXPORT_SYMBOL_GPL(mnt_get_write_access); 537 538 /** 539 * mnt_want_write - get write access to a mount 540 * @m: the mount on which to take a write 541 * 542 * This tells the low-level filesystem that a write is about to be performed to 543 * it, and makes sure that writes are allowed (mount is read-write, filesystem 544 * is not frozen) before returning success. When the write operation is 545 * finished, mnt_drop_write() must be called. This is effectively a refcount. 546 */ 547 int mnt_want_write(struct vfsmount *m) 548 { 549 int ret; 550 551 sb_start_write(m->mnt_sb); 552 ret = mnt_get_write_access(m); 553 if (ret) 554 sb_end_write(m->mnt_sb); 555 return ret; 556 } 557 EXPORT_SYMBOL_GPL(mnt_want_write); 558 559 /** 560 * mnt_get_write_access_file - get write access to a file's mount 561 * @file: the file who's mount on which to take a write 562 * 563 * This is like mnt_get_write_access, but if @file is already open for write it 564 * skips incrementing mnt_writers (since the open file already has a reference) 565 * and instead only does the check for emergency r/o remounts. This must be 566 * paired with mnt_put_write_access_file. 567 */ 568 int mnt_get_write_access_file(struct file *file) 569 { 570 if (file->f_mode & FMODE_WRITER) { 571 /* 572 * Superblock may have become readonly while there are still 573 * writable fd's, e.g. due to a fs error with errors=remount-ro 574 */ 575 if (__mnt_is_readonly(file->f_path.mnt)) 576 return -EROFS; 577 return 0; 578 } 579 return mnt_get_write_access(file->f_path.mnt); 580 } 581 582 /** 583 * mnt_want_write_file - get write access to a file's mount 584 * @file: the file who's mount on which to take a write 585 * 586 * This is like mnt_want_write, but if the file is already open for writing it 587 * skips incrementing mnt_writers (since the open file already has a reference) 588 * and instead only does the freeze protection and the check for emergency r/o 589 * remounts. This must be paired with mnt_drop_write_file. 590 */ 591 int mnt_want_write_file(struct file *file) 592 { 593 int ret; 594 595 sb_start_write(file_inode(file)->i_sb); 596 ret = mnt_get_write_access_file(file); 597 if (ret) 598 sb_end_write(file_inode(file)->i_sb); 599 return ret; 600 } 601 EXPORT_SYMBOL_GPL(mnt_want_write_file); 602 603 /** 604 * mnt_put_write_access - give up write access to a mount 605 * @mnt: the mount on which to give up write access 606 * 607 * Tells the low-level filesystem that we are done 608 * performing writes to it. Must be matched with 609 * mnt_get_write_access() call above. 610 */ 611 void mnt_put_write_access(struct vfsmount *mnt) 612 { 613 preempt_disable(); 614 mnt_dec_writers(real_mount(mnt)); 615 preempt_enable(); 616 } 617 EXPORT_SYMBOL_GPL(mnt_put_write_access); 618 619 /** 620 * mnt_drop_write - give up write access to a mount 621 * @mnt: the mount on which to give up write access 622 * 623 * Tells the low-level filesystem that we are done performing writes to it and 624 * also allows filesystem to be frozen again. Must be matched with 625 * mnt_want_write() call above. 626 */ 627 void mnt_drop_write(struct vfsmount *mnt) 628 { 629 mnt_put_write_access(mnt); 630 sb_end_write(mnt->mnt_sb); 631 } 632 EXPORT_SYMBOL_GPL(mnt_drop_write); 633 634 void mnt_put_write_access_file(struct file *file) 635 { 636 if (!(file->f_mode & FMODE_WRITER)) 637 mnt_put_write_access(file->f_path.mnt); 638 } 639 640 void mnt_drop_write_file(struct file *file) 641 { 642 mnt_put_write_access_file(file); 643 sb_end_write(file_inode(file)->i_sb); 644 } 645 EXPORT_SYMBOL(mnt_drop_write_file); 646 647 /** 648 * mnt_hold_writers - prevent write access to the given mount 649 * @mnt: mnt to prevent write access to 650 * 651 * Prevents write access to @mnt if there are no active writers for @mnt. 652 * This function needs to be called and return successfully before changing 653 * properties of @mnt that need to remain stable for callers with write access 654 * to @mnt. 655 * 656 * After this functions has been called successfully callers must pair it with 657 * a call to mnt_unhold_writers() in order to stop preventing write access to 658 * @mnt. 659 * 660 * Context: This function expects lock_mount_hash() to be held serializing 661 * setting MNT_WRITE_HOLD. 662 * Return: On success 0 is returned. 663 * On error, -EBUSY is returned. 664 */ 665 static inline int mnt_hold_writers(struct mount *mnt) 666 { 667 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 668 /* 669 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 670 * should be visible before we do. 671 */ 672 smp_mb(); 673 674 /* 675 * With writers on hold, if this value is zero, then there are 676 * definitely no active writers (although held writers may subsequently 677 * increment the count, they'll have to wait, and decrement it after 678 * seeing MNT_READONLY). 679 * 680 * It is OK to have counter incremented on one CPU and decremented on 681 * another: the sum will add up correctly. The danger would be when we 682 * sum up each counter, if we read a counter before it is incremented, 683 * but then read another CPU's count which it has been subsequently 684 * decremented from -- we would see more decrements than we should. 685 * MNT_WRITE_HOLD protects against this scenario, because 686 * mnt_want_write first increments count, then smp_mb, then spins on 687 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while 688 * we're counting up here. 689 */ 690 if (mnt_get_writers(mnt) > 0) 691 return -EBUSY; 692 693 return 0; 694 } 695 696 /** 697 * mnt_unhold_writers - stop preventing write access to the given mount 698 * @mnt: mnt to stop preventing write access to 699 * 700 * Stop preventing write access to @mnt allowing callers to gain write access 701 * to @mnt again. 702 * 703 * This function can only be called after a successful call to 704 * mnt_hold_writers(). 705 * 706 * Context: This function expects lock_mount_hash() to be held. 707 */ 708 static inline void mnt_unhold_writers(struct mount *mnt) 709 { 710 /* 711 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers 712 * that become unheld will see MNT_READONLY. 713 */ 714 smp_wmb(); 715 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 716 } 717 718 static int mnt_make_readonly(struct mount *mnt) 719 { 720 int ret; 721 722 ret = mnt_hold_writers(mnt); 723 if (!ret) 724 mnt->mnt.mnt_flags |= MNT_READONLY; 725 mnt_unhold_writers(mnt); 726 return ret; 727 } 728 729 int sb_prepare_remount_readonly(struct super_block *sb) 730 { 731 struct mount *mnt; 732 int err = 0; 733 734 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ 735 if (atomic_long_read(&sb->s_remove_count)) 736 return -EBUSY; 737 738 lock_mount_hash(); 739 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 740 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 741 err = mnt_hold_writers(mnt); 742 if (err) 743 break; 744 } 745 } 746 if (!err && atomic_long_read(&sb->s_remove_count)) 747 err = -EBUSY; 748 749 if (!err) 750 sb_start_ro_state_change(sb); 751 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 752 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 753 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 754 } 755 unlock_mount_hash(); 756 757 return err; 758 } 759 760 static void free_vfsmnt(struct mount *mnt) 761 { 762 mnt_idmap_put(mnt_idmap(&mnt->mnt)); 763 kfree_const(mnt->mnt_devname); 764 #ifdef CONFIG_SMP 765 free_percpu(mnt->mnt_pcp); 766 #endif 767 kmem_cache_free(mnt_cache, mnt); 768 } 769 770 static void delayed_free_vfsmnt(struct rcu_head *head) 771 { 772 free_vfsmnt(container_of(head, struct mount, mnt_rcu)); 773 } 774 775 /* call under rcu_read_lock */ 776 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) 777 { 778 struct mount *mnt; 779 if (read_seqretry(&mount_lock, seq)) 780 return 1; 781 if (bastard == NULL) 782 return 0; 783 mnt = real_mount(bastard); 784 mnt_add_count(mnt, 1); 785 smp_mb(); // see mntput_no_expire() 786 if (likely(!read_seqretry(&mount_lock, seq))) 787 return 0; 788 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { 789 mnt_add_count(mnt, -1); 790 return 1; 791 } 792 lock_mount_hash(); 793 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { 794 mnt_add_count(mnt, -1); 795 unlock_mount_hash(); 796 return 1; 797 } 798 unlock_mount_hash(); 799 /* caller will mntput() */ 800 return -1; 801 } 802 803 /* call under rcu_read_lock */ 804 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 805 { 806 int res = __legitimize_mnt(bastard, seq); 807 if (likely(!res)) 808 return true; 809 if (unlikely(res < 0)) { 810 rcu_read_unlock(); 811 mntput(bastard); 812 rcu_read_lock(); 813 } 814 return false; 815 } 816 817 /** 818 * __lookup_mnt - find first child mount 819 * @mnt: parent mount 820 * @dentry: mountpoint 821 * 822 * If @mnt has a child mount @c mounted @dentry find and return it. 823 * 824 * Note that the child mount @c need not be unique. There are cases 825 * where shadow mounts are created. For example, during mount 826 * propagation when a source mount @mnt whose root got overmounted by a 827 * mount @o after path lookup but before @namespace_sem could be 828 * acquired gets copied and propagated. So @mnt gets copied including 829 * @o. When @mnt is propagated to a destination mount @d that already 830 * has another mount @n mounted at the same mountpoint then the source 831 * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on 832 * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt 833 * on @dentry. 834 * 835 * Return: The first child of @mnt mounted @dentry or NULL. 836 */ 837 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 838 { 839 struct hlist_head *head = m_hash(mnt, dentry); 840 struct mount *p; 841 842 hlist_for_each_entry_rcu(p, head, mnt_hash) 843 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 844 return p; 845 return NULL; 846 } 847 848 /* 849 * lookup_mnt - Return the first child mount mounted at path 850 * 851 * "First" means first mounted chronologically. If you create the 852 * following mounts: 853 * 854 * mount /dev/sda1 /mnt 855 * mount /dev/sda2 /mnt 856 * mount /dev/sda3 /mnt 857 * 858 * Then lookup_mnt() on the base /mnt dentry in the root mount will 859 * return successively the root dentry and vfsmount of /dev/sda1, then 860 * /dev/sda2, then /dev/sda3, then NULL. 861 * 862 * lookup_mnt takes a reference to the found vfsmount. 863 */ 864 struct vfsmount *lookup_mnt(const struct path *path) 865 { 866 struct mount *child_mnt; 867 struct vfsmount *m; 868 unsigned seq; 869 870 rcu_read_lock(); 871 do { 872 seq = read_seqbegin(&mount_lock); 873 child_mnt = __lookup_mnt(path->mnt, path->dentry); 874 m = child_mnt ? &child_mnt->mnt : NULL; 875 } while (!legitimize_mnt(m, seq)); 876 rcu_read_unlock(); 877 return m; 878 } 879 880 /* 881 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the 882 * current mount namespace. 883 * 884 * The common case is dentries are not mountpoints at all and that 885 * test is handled inline. For the slow case when we are actually 886 * dealing with a mountpoint of some kind, walk through all of the 887 * mounts in the current mount namespace and test to see if the dentry 888 * is a mountpoint. 889 * 890 * The mount_hashtable is not usable in the context because we 891 * need to identify all mounts that may be in the current mount 892 * namespace not just a mount that happens to have some specified 893 * parent mount. 894 */ 895 bool __is_local_mountpoint(struct dentry *dentry) 896 { 897 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 898 struct mount *mnt, *n; 899 bool is_covered = false; 900 901 down_read(&namespace_sem); 902 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) { 903 is_covered = (mnt->mnt_mountpoint == dentry); 904 if (is_covered) 905 break; 906 } 907 up_read(&namespace_sem); 908 909 return is_covered; 910 } 911 912 static struct mountpoint *lookup_mountpoint(struct dentry *dentry) 913 { 914 struct hlist_head *chain = mp_hash(dentry); 915 struct mountpoint *mp; 916 917 hlist_for_each_entry(mp, chain, m_hash) { 918 if (mp->m_dentry == dentry) { 919 mp->m_count++; 920 return mp; 921 } 922 } 923 return NULL; 924 } 925 926 static struct mountpoint *get_mountpoint(struct dentry *dentry) 927 { 928 struct mountpoint *mp, *new = NULL; 929 int ret; 930 931 if (d_mountpoint(dentry)) { 932 /* might be worth a WARN_ON() */ 933 if (d_unlinked(dentry)) 934 return ERR_PTR(-ENOENT); 935 mountpoint: 936 read_seqlock_excl(&mount_lock); 937 mp = lookup_mountpoint(dentry); 938 read_sequnlock_excl(&mount_lock); 939 if (mp) 940 goto done; 941 } 942 943 if (!new) 944 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); 945 if (!new) 946 return ERR_PTR(-ENOMEM); 947 948 949 /* Exactly one processes may set d_mounted */ 950 ret = d_set_mounted(dentry); 951 952 /* Someone else set d_mounted? */ 953 if (ret == -EBUSY) 954 goto mountpoint; 955 956 /* The dentry is not available as a mountpoint? */ 957 mp = ERR_PTR(ret); 958 if (ret) 959 goto done; 960 961 /* Add the new mountpoint to the hash table */ 962 read_seqlock_excl(&mount_lock); 963 new->m_dentry = dget(dentry); 964 new->m_count = 1; 965 hlist_add_head(&new->m_hash, mp_hash(dentry)); 966 INIT_HLIST_HEAD(&new->m_list); 967 read_sequnlock_excl(&mount_lock); 968 969 mp = new; 970 new = NULL; 971 done: 972 kfree(new); 973 return mp; 974 } 975 976 /* 977 * vfsmount lock must be held. Additionally, the caller is responsible 978 * for serializing calls for given disposal list. 979 */ 980 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list) 981 { 982 if (!--mp->m_count) { 983 struct dentry *dentry = mp->m_dentry; 984 BUG_ON(!hlist_empty(&mp->m_list)); 985 spin_lock(&dentry->d_lock); 986 dentry->d_flags &= ~DCACHE_MOUNTED; 987 spin_unlock(&dentry->d_lock); 988 dput_to_list(dentry, list); 989 hlist_del(&mp->m_hash); 990 kfree(mp); 991 } 992 } 993 994 /* called with namespace_lock and vfsmount lock */ 995 static void put_mountpoint(struct mountpoint *mp) 996 { 997 __put_mountpoint(mp, &ex_mountpoints); 998 } 999 1000 static inline int check_mnt(struct mount *mnt) 1001 { 1002 return mnt->mnt_ns == current->nsproxy->mnt_ns; 1003 } 1004 1005 /* 1006 * vfsmount lock must be held for write 1007 */ 1008 static void touch_mnt_namespace(struct mnt_namespace *ns) 1009 { 1010 if (ns) { 1011 ns->event = ++event; 1012 wake_up_interruptible(&ns->poll); 1013 } 1014 } 1015 1016 /* 1017 * vfsmount lock must be held for write 1018 */ 1019 static void __touch_mnt_namespace(struct mnt_namespace *ns) 1020 { 1021 if (ns && ns->event != event) { 1022 ns->event = event; 1023 wake_up_interruptible(&ns->poll); 1024 } 1025 } 1026 1027 /* 1028 * vfsmount lock must be held for write 1029 */ 1030 static struct mountpoint *unhash_mnt(struct mount *mnt) 1031 { 1032 struct mountpoint *mp; 1033 mnt->mnt_parent = mnt; 1034 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1035 list_del_init(&mnt->mnt_child); 1036 hlist_del_init_rcu(&mnt->mnt_hash); 1037 hlist_del_init(&mnt->mnt_mp_list); 1038 mp = mnt->mnt_mp; 1039 mnt->mnt_mp = NULL; 1040 return mp; 1041 } 1042 1043 /* 1044 * vfsmount lock must be held for write 1045 */ 1046 static void umount_mnt(struct mount *mnt) 1047 { 1048 put_mountpoint(unhash_mnt(mnt)); 1049 } 1050 1051 /* 1052 * vfsmount lock must be held for write 1053 */ 1054 void mnt_set_mountpoint(struct mount *mnt, 1055 struct mountpoint *mp, 1056 struct mount *child_mnt) 1057 { 1058 mp->m_count++; 1059 mnt_add_count(mnt, 1); /* essentially, that's mntget */ 1060 child_mnt->mnt_mountpoint = mp->m_dentry; 1061 child_mnt->mnt_parent = mnt; 1062 child_mnt->mnt_mp = mp; 1063 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); 1064 } 1065 1066 /** 1067 * mnt_set_mountpoint_beneath - mount a mount beneath another one 1068 * 1069 * @new_parent: the source mount 1070 * @top_mnt: the mount beneath which @new_parent is mounted 1071 * @new_mp: the new mountpoint of @top_mnt on @new_parent 1072 * 1073 * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and 1074 * parent @top_mnt->mnt_parent and mount it on top of @new_parent at 1075 * @new_mp. And mount @new_parent on the old parent and old 1076 * mountpoint of @top_mnt. 1077 * 1078 * Context: This function expects namespace_lock() and lock_mount_hash() 1079 * to have been acquired in that order. 1080 */ 1081 static void mnt_set_mountpoint_beneath(struct mount *new_parent, 1082 struct mount *top_mnt, 1083 struct mountpoint *new_mp) 1084 { 1085 struct mount *old_top_parent = top_mnt->mnt_parent; 1086 struct mountpoint *old_top_mp = top_mnt->mnt_mp; 1087 1088 mnt_set_mountpoint(old_top_parent, old_top_mp, new_parent); 1089 mnt_change_mountpoint(new_parent, new_mp, top_mnt); 1090 } 1091 1092 1093 static void __attach_mnt(struct mount *mnt, struct mount *parent) 1094 { 1095 hlist_add_head_rcu(&mnt->mnt_hash, 1096 m_hash(&parent->mnt, mnt->mnt_mountpoint)); 1097 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 1098 } 1099 1100 /** 1101 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's 1102 * list of child mounts 1103 * @parent: the parent 1104 * @mnt: the new mount 1105 * @mp: the new mountpoint 1106 * @beneath: whether to mount @mnt beneath or on top of @parent 1107 * 1108 * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt 1109 * to @parent's child mount list and to @mount_hashtable. 1110 * 1111 * If @beneath is true, remove @mnt from its current parent and 1112 * mountpoint and mount it on @mp on @parent, and mount @parent on the 1113 * old parent and old mountpoint of @mnt. Finally, attach @parent to 1114 * @mnt_hashtable and @parent->mnt_parent->mnt_mounts. 1115 * 1116 * Note, when __attach_mnt() is called @mnt->mnt_parent already points 1117 * to the correct parent. 1118 * 1119 * Context: This function expects namespace_lock() and lock_mount_hash() 1120 * to have been acquired in that order. 1121 */ 1122 static void attach_mnt(struct mount *mnt, struct mount *parent, 1123 struct mountpoint *mp, bool beneath) 1124 { 1125 if (beneath) 1126 mnt_set_mountpoint_beneath(mnt, parent, mp); 1127 else 1128 mnt_set_mountpoint(parent, mp, mnt); 1129 /* 1130 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted 1131 * beneath @parent then @mnt will need to be attached to 1132 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent 1133 * isn't the same mount as @parent. 1134 */ 1135 __attach_mnt(mnt, mnt->mnt_parent); 1136 } 1137 1138 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) 1139 { 1140 struct mountpoint *old_mp = mnt->mnt_mp; 1141 struct mount *old_parent = mnt->mnt_parent; 1142 1143 list_del_init(&mnt->mnt_child); 1144 hlist_del_init(&mnt->mnt_mp_list); 1145 hlist_del_init_rcu(&mnt->mnt_hash); 1146 1147 attach_mnt(mnt, parent, mp, false); 1148 1149 put_mountpoint(old_mp); 1150 mnt_add_count(old_parent, -1); 1151 } 1152 1153 static inline struct mount *node_to_mount(struct rb_node *node) 1154 { 1155 return node ? rb_entry(node, struct mount, mnt_node) : NULL; 1156 } 1157 1158 static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt) 1159 { 1160 struct rb_node **link = &ns->mounts.rb_node; 1161 struct rb_node *parent = NULL; 1162 bool mnt_first_node = true, mnt_last_node = true; 1163 1164 WARN_ON(mnt_ns_attached(mnt)); 1165 mnt->mnt_ns = ns; 1166 while (*link) { 1167 parent = *link; 1168 if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) { 1169 link = &parent->rb_left; 1170 mnt_last_node = false; 1171 } else { 1172 link = &parent->rb_right; 1173 mnt_first_node = false; 1174 } 1175 } 1176 1177 if (mnt_last_node) 1178 ns->mnt_last_node = &mnt->mnt_node; 1179 if (mnt_first_node) 1180 ns->mnt_first_node = &mnt->mnt_node; 1181 rb_link_node(&mnt->mnt_node, parent, link); 1182 rb_insert_color(&mnt->mnt_node, &ns->mounts); 1183 1184 mnt_notify_add(mnt); 1185 } 1186 1187 /* 1188 * vfsmount lock must be held for write 1189 */ 1190 static void commit_tree(struct mount *mnt) 1191 { 1192 struct mount *parent = mnt->mnt_parent; 1193 struct mount *m; 1194 LIST_HEAD(head); 1195 struct mnt_namespace *n = parent->mnt_ns; 1196 1197 BUG_ON(parent == mnt); 1198 1199 list_add_tail(&head, &mnt->mnt_list); 1200 while (!list_empty(&head)) { 1201 m = list_first_entry(&head, typeof(*m), mnt_list); 1202 list_del(&m->mnt_list); 1203 1204 mnt_add_to_ns(n, m); 1205 } 1206 n->nr_mounts += n->pending_mounts; 1207 n->pending_mounts = 0; 1208 1209 __attach_mnt(mnt, parent); 1210 touch_mnt_namespace(n); 1211 } 1212 1213 static struct mount *next_mnt(struct mount *p, struct mount *root) 1214 { 1215 struct list_head *next = p->mnt_mounts.next; 1216 if (next == &p->mnt_mounts) { 1217 while (1) { 1218 if (p == root) 1219 return NULL; 1220 next = p->mnt_child.next; 1221 if (next != &p->mnt_parent->mnt_mounts) 1222 break; 1223 p = p->mnt_parent; 1224 } 1225 } 1226 return list_entry(next, struct mount, mnt_child); 1227 } 1228 1229 static struct mount *skip_mnt_tree(struct mount *p) 1230 { 1231 struct list_head *prev = p->mnt_mounts.prev; 1232 while (prev != &p->mnt_mounts) { 1233 p = list_entry(prev, struct mount, mnt_child); 1234 prev = p->mnt_mounts.prev; 1235 } 1236 return p; 1237 } 1238 1239 /** 1240 * vfs_create_mount - Create a mount for a configured superblock 1241 * @fc: The configuration context with the superblock attached 1242 * 1243 * Create a mount to an already configured superblock. If necessary, the 1244 * caller should invoke vfs_get_tree() before calling this. 1245 * 1246 * Note that this does not attach the mount to anything. 1247 */ 1248 struct vfsmount *vfs_create_mount(struct fs_context *fc) 1249 { 1250 struct mount *mnt; 1251 1252 if (!fc->root) 1253 return ERR_PTR(-EINVAL); 1254 1255 mnt = alloc_vfsmnt(fc->source ?: "none"); 1256 if (!mnt) 1257 return ERR_PTR(-ENOMEM); 1258 1259 if (fc->sb_flags & SB_KERNMOUNT) 1260 mnt->mnt.mnt_flags = MNT_INTERNAL; 1261 1262 atomic_inc(&fc->root->d_sb->s_active); 1263 mnt->mnt.mnt_sb = fc->root->d_sb; 1264 mnt->mnt.mnt_root = dget(fc->root); 1265 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1266 mnt->mnt_parent = mnt; 1267 1268 lock_mount_hash(); 1269 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts); 1270 unlock_mount_hash(); 1271 return &mnt->mnt; 1272 } 1273 EXPORT_SYMBOL(vfs_create_mount); 1274 1275 struct vfsmount *fc_mount(struct fs_context *fc) 1276 { 1277 int err = vfs_get_tree(fc); 1278 if (!err) { 1279 up_write(&fc->root->d_sb->s_umount); 1280 return vfs_create_mount(fc); 1281 } 1282 return ERR_PTR(err); 1283 } 1284 EXPORT_SYMBOL(fc_mount); 1285 1286 struct vfsmount *vfs_kern_mount(struct file_system_type *type, 1287 int flags, const char *name, 1288 void *data) 1289 { 1290 struct fs_context *fc; 1291 struct vfsmount *mnt; 1292 int ret = 0; 1293 1294 if (!type) 1295 return ERR_PTR(-EINVAL); 1296 1297 fc = fs_context_for_mount(type, flags); 1298 if (IS_ERR(fc)) 1299 return ERR_CAST(fc); 1300 1301 if (name) 1302 ret = vfs_parse_fs_string(fc, "source", 1303 name, strlen(name)); 1304 if (!ret) 1305 ret = parse_monolithic_mount_data(fc, data); 1306 if (!ret) 1307 mnt = fc_mount(fc); 1308 else 1309 mnt = ERR_PTR(ret); 1310 1311 put_fs_context(fc); 1312 return mnt; 1313 } 1314 EXPORT_SYMBOL_GPL(vfs_kern_mount); 1315 1316 struct vfsmount * 1317 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type, 1318 const char *name, void *data) 1319 { 1320 /* Until it is worked out how to pass the user namespace 1321 * through from the parent mount to the submount don't support 1322 * unprivileged mounts with submounts. 1323 */ 1324 if (mountpoint->d_sb->s_user_ns != &init_user_ns) 1325 return ERR_PTR(-EPERM); 1326 1327 return vfs_kern_mount(type, SB_SUBMOUNT, name, data); 1328 } 1329 EXPORT_SYMBOL_GPL(vfs_submount); 1330 1331 static struct mount *clone_mnt(struct mount *old, struct dentry *root, 1332 int flag) 1333 { 1334 struct super_block *sb = old->mnt.mnt_sb; 1335 struct mount *mnt; 1336 int err; 1337 1338 mnt = alloc_vfsmnt(old->mnt_devname); 1339 if (!mnt) 1340 return ERR_PTR(-ENOMEM); 1341 1342 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) 1343 mnt->mnt_group_id = 0; /* not a peer of original */ 1344 else 1345 mnt->mnt_group_id = old->mnt_group_id; 1346 1347 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 1348 err = mnt_alloc_group_id(mnt); 1349 if (err) 1350 goto out_free; 1351 } 1352 1353 mnt->mnt.mnt_flags = old->mnt.mnt_flags; 1354 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); 1355 1356 atomic_inc(&sb->s_active); 1357 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt)); 1358 1359 mnt->mnt.mnt_sb = sb; 1360 mnt->mnt.mnt_root = dget(root); 1361 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1362 mnt->mnt_parent = mnt; 1363 lock_mount_hash(); 1364 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 1365 unlock_mount_hash(); 1366 1367 if ((flag & CL_SLAVE) || 1368 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { 1369 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 1370 mnt->mnt_master = old; 1371 CLEAR_MNT_SHARED(mnt); 1372 } else if (!(flag & CL_PRIVATE)) { 1373 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) 1374 list_add(&mnt->mnt_share, &old->mnt_share); 1375 if (IS_MNT_SLAVE(old)) 1376 list_add(&mnt->mnt_slave, &old->mnt_slave); 1377 mnt->mnt_master = old->mnt_master; 1378 } else { 1379 CLEAR_MNT_SHARED(mnt); 1380 } 1381 if (flag & CL_MAKE_SHARED) 1382 set_mnt_shared(mnt); 1383 1384 /* stick the duplicate mount on the same expiry list 1385 * as the original if that was on one */ 1386 if (flag & CL_EXPIRE) { 1387 if (!list_empty(&old->mnt_expire)) 1388 list_add(&mnt->mnt_expire, &old->mnt_expire); 1389 } 1390 1391 return mnt; 1392 1393 out_free: 1394 mnt_free_id(mnt); 1395 free_vfsmnt(mnt); 1396 return ERR_PTR(err); 1397 } 1398 1399 static void cleanup_mnt(struct mount *mnt) 1400 { 1401 struct hlist_node *p; 1402 struct mount *m; 1403 /* 1404 * The warning here probably indicates that somebody messed 1405 * up a mnt_want/drop_write() pair. If this happens, the 1406 * filesystem was probably unable to make r/w->r/o transitions. 1407 * The locking used to deal with mnt_count decrement provides barriers, 1408 * so mnt_get_writers() below is safe. 1409 */ 1410 WARN_ON(mnt_get_writers(mnt)); 1411 if (unlikely(mnt->mnt_pins.first)) 1412 mnt_pin_kill(mnt); 1413 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { 1414 hlist_del(&m->mnt_umount); 1415 mntput(&m->mnt); 1416 } 1417 fsnotify_vfsmount_delete(&mnt->mnt); 1418 dput(mnt->mnt.mnt_root); 1419 deactivate_super(mnt->mnt.mnt_sb); 1420 mnt_free_id(mnt); 1421 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); 1422 } 1423 1424 static void __cleanup_mnt(struct rcu_head *head) 1425 { 1426 cleanup_mnt(container_of(head, struct mount, mnt_rcu)); 1427 } 1428 1429 static LLIST_HEAD(delayed_mntput_list); 1430 static void delayed_mntput(struct work_struct *unused) 1431 { 1432 struct llist_node *node = llist_del_all(&delayed_mntput_list); 1433 struct mount *m, *t; 1434 1435 llist_for_each_entry_safe(m, t, node, mnt_llist) 1436 cleanup_mnt(m); 1437 } 1438 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); 1439 1440 static void mntput_no_expire(struct mount *mnt) 1441 { 1442 LIST_HEAD(list); 1443 int count; 1444 1445 rcu_read_lock(); 1446 if (likely(READ_ONCE(mnt->mnt_ns))) { 1447 /* 1448 * Since we don't do lock_mount_hash() here, 1449 * ->mnt_ns can change under us. However, if it's 1450 * non-NULL, then there's a reference that won't 1451 * be dropped until after an RCU delay done after 1452 * turning ->mnt_ns NULL. So if we observe it 1453 * non-NULL under rcu_read_lock(), the reference 1454 * we are dropping is not the final one. 1455 */ 1456 mnt_add_count(mnt, -1); 1457 rcu_read_unlock(); 1458 return; 1459 } 1460 lock_mount_hash(); 1461 /* 1462 * make sure that if __legitimize_mnt() has not seen us grab 1463 * mount_lock, we'll see their refcount increment here. 1464 */ 1465 smp_mb(); 1466 mnt_add_count(mnt, -1); 1467 count = mnt_get_count(mnt); 1468 if (count != 0) { 1469 WARN_ON(count < 0); 1470 rcu_read_unlock(); 1471 unlock_mount_hash(); 1472 return; 1473 } 1474 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { 1475 rcu_read_unlock(); 1476 unlock_mount_hash(); 1477 return; 1478 } 1479 mnt->mnt.mnt_flags |= MNT_DOOMED; 1480 rcu_read_unlock(); 1481 1482 list_del(&mnt->mnt_instance); 1483 1484 if (unlikely(!list_empty(&mnt->mnt_mounts))) { 1485 struct mount *p, *tmp; 1486 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1487 __put_mountpoint(unhash_mnt(p), &list); 1488 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); 1489 } 1490 } 1491 unlock_mount_hash(); 1492 shrink_dentry_list(&list); 1493 1494 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { 1495 struct task_struct *task = current; 1496 if (likely(!(task->flags & PF_KTHREAD))) { 1497 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); 1498 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) 1499 return; 1500 } 1501 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) 1502 schedule_delayed_work(&delayed_mntput_work, 1); 1503 return; 1504 } 1505 cleanup_mnt(mnt); 1506 } 1507 1508 void mntput(struct vfsmount *mnt) 1509 { 1510 if (mnt) { 1511 struct mount *m = real_mount(mnt); 1512 /* avoid cacheline pingpong */ 1513 if (unlikely(m->mnt_expiry_mark)) 1514 WRITE_ONCE(m->mnt_expiry_mark, 0); 1515 mntput_no_expire(m); 1516 } 1517 } 1518 EXPORT_SYMBOL(mntput); 1519 1520 struct vfsmount *mntget(struct vfsmount *mnt) 1521 { 1522 if (mnt) 1523 mnt_add_count(real_mount(mnt), 1); 1524 return mnt; 1525 } 1526 EXPORT_SYMBOL(mntget); 1527 1528 /* 1529 * Make a mount point inaccessible to new lookups. 1530 * Because there may still be current users, the caller MUST WAIT 1531 * for an RCU grace period before destroying the mount point. 1532 */ 1533 void mnt_make_shortterm(struct vfsmount *mnt) 1534 { 1535 if (mnt) 1536 real_mount(mnt)->mnt_ns = NULL; 1537 } 1538 1539 /** 1540 * path_is_mountpoint() - Check if path is a mount in the current namespace. 1541 * @path: path to check 1542 * 1543 * d_mountpoint() can only be used reliably to establish if a dentry is 1544 * not mounted in any namespace and that common case is handled inline. 1545 * d_mountpoint() isn't aware of the possibility there may be multiple 1546 * mounts using a given dentry in a different namespace. This function 1547 * checks if the passed in path is a mountpoint rather than the dentry 1548 * alone. 1549 */ 1550 bool path_is_mountpoint(const struct path *path) 1551 { 1552 unsigned seq; 1553 bool res; 1554 1555 if (!d_mountpoint(path->dentry)) 1556 return false; 1557 1558 rcu_read_lock(); 1559 do { 1560 seq = read_seqbegin(&mount_lock); 1561 res = __path_is_mountpoint(path); 1562 } while (read_seqretry(&mount_lock, seq)); 1563 rcu_read_unlock(); 1564 1565 return res; 1566 } 1567 EXPORT_SYMBOL(path_is_mountpoint); 1568 1569 struct vfsmount *mnt_clone_internal(const struct path *path) 1570 { 1571 struct mount *p; 1572 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); 1573 if (IS_ERR(p)) 1574 return ERR_CAST(p); 1575 p->mnt.mnt_flags |= MNT_INTERNAL; 1576 return &p->mnt; 1577 } 1578 1579 /* 1580 * Returns the mount which either has the specified mnt_id, or has the next 1581 * smallest id afer the specified one. 1582 */ 1583 static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id) 1584 { 1585 struct rb_node *node = ns->mounts.rb_node; 1586 struct mount *ret = NULL; 1587 1588 while (node) { 1589 struct mount *m = node_to_mount(node); 1590 1591 if (mnt_id <= m->mnt_id_unique) { 1592 ret = node_to_mount(node); 1593 if (mnt_id == m->mnt_id_unique) 1594 break; 1595 node = node->rb_left; 1596 } else { 1597 node = node->rb_right; 1598 } 1599 } 1600 return ret; 1601 } 1602 1603 /* 1604 * Returns the mount which either has the specified mnt_id, or has the next 1605 * greater id before the specified one. 1606 */ 1607 static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id) 1608 { 1609 struct rb_node *node = ns->mounts.rb_node; 1610 struct mount *ret = NULL; 1611 1612 while (node) { 1613 struct mount *m = node_to_mount(node); 1614 1615 if (mnt_id >= m->mnt_id_unique) { 1616 ret = node_to_mount(node); 1617 if (mnt_id == m->mnt_id_unique) 1618 break; 1619 node = node->rb_right; 1620 } else { 1621 node = node->rb_left; 1622 } 1623 } 1624 return ret; 1625 } 1626 1627 #ifdef CONFIG_PROC_FS 1628 1629 /* iterator; we want it to have access to namespace_sem, thus here... */ 1630 static void *m_start(struct seq_file *m, loff_t *pos) 1631 { 1632 struct proc_mounts *p = m->private; 1633 1634 down_read(&namespace_sem); 1635 1636 return mnt_find_id_at(p->ns, *pos); 1637 } 1638 1639 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1640 { 1641 struct mount *next = NULL, *mnt = v; 1642 struct rb_node *node = rb_next(&mnt->mnt_node); 1643 1644 ++*pos; 1645 if (node) { 1646 next = node_to_mount(node); 1647 *pos = next->mnt_id_unique; 1648 } 1649 return next; 1650 } 1651 1652 static void m_stop(struct seq_file *m, void *v) 1653 { 1654 up_read(&namespace_sem); 1655 } 1656 1657 static int m_show(struct seq_file *m, void *v) 1658 { 1659 struct proc_mounts *p = m->private; 1660 struct mount *r = v; 1661 return p->show(m, &r->mnt); 1662 } 1663 1664 const struct seq_operations mounts_op = { 1665 .start = m_start, 1666 .next = m_next, 1667 .stop = m_stop, 1668 .show = m_show, 1669 }; 1670 1671 #endif /* CONFIG_PROC_FS */ 1672 1673 /** 1674 * may_umount_tree - check if a mount tree is busy 1675 * @m: root of mount tree 1676 * 1677 * This is called to check if a tree of mounts has any 1678 * open files, pwds, chroots or sub mounts that are 1679 * busy. 1680 */ 1681 int may_umount_tree(struct vfsmount *m) 1682 { 1683 struct mount *mnt = real_mount(m); 1684 int actual_refs = 0; 1685 int minimum_refs = 0; 1686 struct mount *p; 1687 BUG_ON(!m); 1688 1689 /* write lock needed for mnt_get_count */ 1690 lock_mount_hash(); 1691 for (p = mnt; p; p = next_mnt(p, mnt)) { 1692 actual_refs += mnt_get_count(p); 1693 minimum_refs += 2; 1694 } 1695 unlock_mount_hash(); 1696 1697 if (actual_refs > minimum_refs) 1698 return 0; 1699 1700 return 1; 1701 } 1702 1703 EXPORT_SYMBOL(may_umount_tree); 1704 1705 /** 1706 * may_umount - check if a mount point is busy 1707 * @mnt: root of mount 1708 * 1709 * This is called to check if a mount point has any 1710 * open files, pwds, chroots or sub mounts. If the 1711 * mount has sub mounts this will return busy 1712 * regardless of whether the sub mounts are busy. 1713 * 1714 * Doesn't take quota and stuff into account. IOW, in some cases it will 1715 * give false negatives. The main reason why it's here is that we need 1716 * a non-destructive way to look for easily umountable filesystems. 1717 */ 1718 int may_umount(struct vfsmount *mnt) 1719 { 1720 int ret = 1; 1721 down_read(&namespace_sem); 1722 lock_mount_hash(); 1723 if (propagate_mount_busy(real_mount(mnt), 2)) 1724 ret = 0; 1725 unlock_mount_hash(); 1726 up_read(&namespace_sem); 1727 return ret; 1728 } 1729 1730 EXPORT_SYMBOL(may_umount); 1731 1732 #ifdef CONFIG_FSNOTIFY 1733 static void mnt_notify(struct mount *p) 1734 { 1735 if (!p->prev_ns && p->mnt_ns) { 1736 fsnotify_mnt_attach(p->mnt_ns, &p->mnt); 1737 } else if (p->prev_ns && !p->mnt_ns) { 1738 fsnotify_mnt_detach(p->prev_ns, &p->mnt); 1739 } else if (p->prev_ns == p->mnt_ns) { 1740 fsnotify_mnt_move(p->mnt_ns, &p->mnt); 1741 } else { 1742 fsnotify_mnt_detach(p->prev_ns, &p->mnt); 1743 fsnotify_mnt_attach(p->mnt_ns, &p->mnt); 1744 } 1745 p->prev_ns = p->mnt_ns; 1746 } 1747 1748 static void notify_mnt_list(void) 1749 { 1750 struct mount *m, *tmp; 1751 /* 1752 * Notify about mounts that were added/reparented/detached/remain 1753 * connected after unmount. 1754 */ 1755 list_for_each_entry_safe(m, tmp, ¬ify_list, to_notify) { 1756 mnt_notify(m); 1757 list_del_init(&m->to_notify); 1758 } 1759 } 1760 1761 static bool need_notify_mnt_list(void) 1762 { 1763 return !list_empty(¬ify_list); 1764 } 1765 #else 1766 static void notify_mnt_list(void) 1767 { 1768 } 1769 1770 static bool need_notify_mnt_list(void) 1771 { 1772 return false; 1773 } 1774 #endif 1775 1776 static void namespace_unlock(void) 1777 { 1778 struct hlist_head head; 1779 struct hlist_node *p; 1780 struct mount *m; 1781 LIST_HEAD(list); 1782 1783 hlist_move_list(&unmounted, &head); 1784 list_splice_init(&ex_mountpoints, &list); 1785 1786 if (need_notify_mnt_list()) { 1787 /* 1788 * No point blocking out concurrent readers while notifications 1789 * are sent. This will also allow statmount()/listmount() to run 1790 * concurrently. 1791 */ 1792 downgrade_write(&namespace_sem); 1793 notify_mnt_list(); 1794 up_read(&namespace_sem); 1795 } else { 1796 up_write(&namespace_sem); 1797 } 1798 1799 shrink_dentry_list(&list); 1800 1801 if (likely(hlist_empty(&head))) 1802 return; 1803 1804 synchronize_rcu_expedited(); 1805 1806 hlist_for_each_entry_safe(m, p, &head, mnt_umount) { 1807 hlist_del(&m->mnt_umount); 1808 mntput(&m->mnt); 1809 } 1810 } 1811 1812 static inline void namespace_lock(void) 1813 { 1814 down_write(&namespace_sem); 1815 } 1816 1817 enum umount_tree_flags { 1818 UMOUNT_SYNC = 1, 1819 UMOUNT_PROPAGATE = 2, 1820 UMOUNT_CONNECTED = 4, 1821 }; 1822 1823 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) 1824 { 1825 /* Leaving mounts connected is only valid for lazy umounts */ 1826 if (how & UMOUNT_SYNC) 1827 return true; 1828 1829 /* A mount without a parent has nothing to be connected to */ 1830 if (!mnt_has_parent(mnt)) 1831 return true; 1832 1833 /* Because the reference counting rules change when mounts are 1834 * unmounted and connected, umounted mounts may not be 1835 * connected to mounted mounts. 1836 */ 1837 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) 1838 return true; 1839 1840 /* Has it been requested that the mount remain connected? */ 1841 if (how & UMOUNT_CONNECTED) 1842 return false; 1843 1844 /* Is the mount locked such that it needs to remain connected? */ 1845 if (IS_MNT_LOCKED(mnt)) 1846 return false; 1847 1848 /* By default disconnect the mount */ 1849 return true; 1850 } 1851 1852 /* 1853 * mount_lock must be held 1854 * namespace_sem must be held for write 1855 */ 1856 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) 1857 { 1858 LIST_HEAD(tmp_list); 1859 struct mount *p; 1860 1861 if (how & UMOUNT_PROPAGATE) 1862 propagate_mount_unlock(mnt); 1863 1864 /* Gather the mounts to umount */ 1865 for (p = mnt; p; p = next_mnt(p, mnt)) { 1866 p->mnt.mnt_flags |= MNT_UMOUNT; 1867 if (mnt_ns_attached(p)) 1868 move_from_ns(p, &tmp_list); 1869 else 1870 list_move(&p->mnt_list, &tmp_list); 1871 } 1872 1873 /* Hide the mounts from mnt_mounts */ 1874 list_for_each_entry(p, &tmp_list, mnt_list) { 1875 list_del_init(&p->mnt_child); 1876 } 1877 1878 /* Add propagated mounts to the tmp_list */ 1879 if (how & UMOUNT_PROPAGATE) 1880 propagate_umount(&tmp_list); 1881 1882 while (!list_empty(&tmp_list)) { 1883 struct mnt_namespace *ns; 1884 bool disconnect; 1885 p = list_first_entry(&tmp_list, struct mount, mnt_list); 1886 list_del_init(&p->mnt_expire); 1887 list_del_init(&p->mnt_list); 1888 ns = p->mnt_ns; 1889 if (ns) { 1890 ns->nr_mounts--; 1891 __touch_mnt_namespace(ns); 1892 } 1893 p->mnt_ns = NULL; 1894 if (how & UMOUNT_SYNC) 1895 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1896 1897 disconnect = disconnect_mount(p, how); 1898 if (mnt_has_parent(p)) { 1899 mnt_add_count(p->mnt_parent, -1); 1900 if (!disconnect) { 1901 /* Don't forget about p */ 1902 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1903 } else { 1904 umount_mnt(p); 1905 } 1906 } 1907 change_mnt_propagation(p, MS_PRIVATE); 1908 if (disconnect) 1909 hlist_add_head(&p->mnt_umount, &unmounted); 1910 1911 /* 1912 * At this point p->mnt_ns is NULL, notification will be queued 1913 * only if 1914 * 1915 * - p->prev_ns is non-NULL *and* 1916 * - p->prev_ns->n_fsnotify_marks is non-NULL 1917 * 1918 * This will preclude queuing the mount if this is a cleanup 1919 * after a failed copy_tree() or destruction of an anonymous 1920 * namespace, etc. 1921 */ 1922 mnt_notify_add(p); 1923 } 1924 } 1925 1926 static void shrink_submounts(struct mount *mnt); 1927 1928 static int do_umount_root(struct super_block *sb) 1929 { 1930 int ret = 0; 1931 1932 down_write(&sb->s_umount); 1933 if (!sb_rdonly(sb)) { 1934 struct fs_context *fc; 1935 1936 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY, 1937 SB_RDONLY); 1938 if (IS_ERR(fc)) { 1939 ret = PTR_ERR(fc); 1940 } else { 1941 ret = parse_monolithic_mount_data(fc, NULL); 1942 if (!ret) 1943 ret = reconfigure_super(fc); 1944 put_fs_context(fc); 1945 } 1946 } 1947 up_write(&sb->s_umount); 1948 return ret; 1949 } 1950 1951 static int do_umount(struct mount *mnt, int flags) 1952 { 1953 struct super_block *sb = mnt->mnt.mnt_sb; 1954 int retval; 1955 1956 retval = security_sb_umount(&mnt->mnt, flags); 1957 if (retval) 1958 return retval; 1959 1960 /* 1961 * Allow userspace to request a mountpoint be expired rather than 1962 * unmounting unconditionally. Unmount only happens if: 1963 * (1) the mark is already set (the mark is cleared by mntput()) 1964 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1965 */ 1966 if (flags & MNT_EXPIRE) { 1967 if (&mnt->mnt == current->fs->root.mnt || 1968 flags & (MNT_FORCE | MNT_DETACH)) 1969 return -EINVAL; 1970 1971 /* 1972 * probably don't strictly need the lock here if we examined 1973 * all race cases, but it's a slowpath. 1974 */ 1975 lock_mount_hash(); 1976 if (mnt_get_count(mnt) != 2) { 1977 unlock_mount_hash(); 1978 return -EBUSY; 1979 } 1980 unlock_mount_hash(); 1981 1982 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1983 return -EAGAIN; 1984 } 1985 1986 /* 1987 * If we may have to abort operations to get out of this 1988 * mount, and they will themselves hold resources we must 1989 * allow the fs to do things. In the Unix tradition of 1990 * 'Gee thats tricky lets do it in userspace' the umount_begin 1991 * might fail to complete on the first run through as other tasks 1992 * must return, and the like. Thats for the mount program to worry 1993 * about for the moment. 1994 */ 1995 1996 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1997 sb->s_op->umount_begin(sb); 1998 } 1999 2000 /* 2001 * No sense to grab the lock for this test, but test itself looks 2002 * somewhat bogus. Suggestions for better replacement? 2003 * Ho-hum... In principle, we might treat that as umount + switch 2004 * to rootfs. GC would eventually take care of the old vfsmount. 2005 * Actually it makes sense, especially if rootfs would contain a 2006 * /reboot - static binary that would close all descriptors and 2007 * call reboot(9). Then init(8) could umount root and exec /reboot. 2008 */ 2009 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 2010 /* 2011 * Special case for "unmounting" root ... 2012 * we just try to remount it readonly. 2013 */ 2014 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 2015 return -EPERM; 2016 return do_umount_root(sb); 2017 } 2018 2019 namespace_lock(); 2020 lock_mount_hash(); 2021 2022 /* Recheck MNT_LOCKED with the locks held */ 2023 retval = -EINVAL; 2024 if (mnt->mnt.mnt_flags & MNT_LOCKED) 2025 goto out; 2026 2027 event++; 2028 if (flags & MNT_DETACH) { 2029 if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list)) 2030 umount_tree(mnt, UMOUNT_PROPAGATE); 2031 retval = 0; 2032 } else { 2033 shrink_submounts(mnt); 2034 retval = -EBUSY; 2035 if (!propagate_mount_busy(mnt, 2)) { 2036 if (mnt_ns_attached(mnt) || !list_empty(&mnt->mnt_list)) 2037 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 2038 retval = 0; 2039 } 2040 } 2041 out: 2042 unlock_mount_hash(); 2043 namespace_unlock(); 2044 return retval; 2045 } 2046 2047 /* 2048 * __detach_mounts - lazily unmount all mounts on the specified dentry 2049 * 2050 * During unlink, rmdir, and d_drop it is possible to loose the path 2051 * to an existing mountpoint, and wind up leaking the mount. 2052 * detach_mounts allows lazily unmounting those mounts instead of 2053 * leaking them. 2054 * 2055 * The caller may hold dentry->d_inode->i_mutex. 2056 */ 2057 void __detach_mounts(struct dentry *dentry) 2058 { 2059 struct mountpoint *mp; 2060 struct mount *mnt; 2061 2062 namespace_lock(); 2063 lock_mount_hash(); 2064 mp = lookup_mountpoint(dentry); 2065 if (!mp) 2066 goto out_unlock; 2067 2068 event++; 2069 while (!hlist_empty(&mp->m_list)) { 2070 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 2071 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 2072 umount_mnt(mnt); 2073 hlist_add_head(&mnt->mnt_umount, &unmounted); 2074 } 2075 else umount_tree(mnt, UMOUNT_CONNECTED); 2076 } 2077 put_mountpoint(mp); 2078 out_unlock: 2079 unlock_mount_hash(); 2080 namespace_unlock(); 2081 } 2082 2083 /* 2084 * Is the caller allowed to modify his namespace? 2085 */ 2086 bool may_mount(void) 2087 { 2088 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); 2089 } 2090 2091 static void warn_mandlock(void) 2092 { 2093 pr_warn_once("=======================================================\n" 2094 "WARNING: The mand mount option has been deprecated and\n" 2095 " and is ignored by this kernel. Remove the mand\n" 2096 " option from the mount to silence this warning.\n" 2097 "=======================================================\n"); 2098 } 2099 2100 static int can_umount(const struct path *path, int flags) 2101 { 2102 struct mount *mnt = real_mount(path->mnt); 2103 2104 if (!may_mount()) 2105 return -EPERM; 2106 if (!path_mounted(path)) 2107 return -EINVAL; 2108 if (!check_mnt(mnt)) 2109 return -EINVAL; 2110 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ 2111 return -EINVAL; 2112 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) 2113 return -EPERM; 2114 return 0; 2115 } 2116 2117 // caller is responsible for flags being sane 2118 int path_umount(struct path *path, int flags) 2119 { 2120 struct mount *mnt = real_mount(path->mnt); 2121 int ret; 2122 2123 ret = can_umount(path, flags); 2124 if (!ret) 2125 ret = do_umount(mnt, flags); 2126 2127 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 2128 dput(path->dentry); 2129 mntput_no_expire(mnt); 2130 return ret; 2131 } 2132 2133 static int ksys_umount(char __user *name, int flags) 2134 { 2135 int lookup_flags = LOOKUP_MOUNTPOINT; 2136 struct path path; 2137 int ret; 2138 2139 // basic validity checks done first 2140 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 2141 return -EINVAL; 2142 2143 if (!(flags & UMOUNT_NOFOLLOW)) 2144 lookup_flags |= LOOKUP_FOLLOW; 2145 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); 2146 if (ret) 2147 return ret; 2148 return path_umount(&path, flags); 2149 } 2150 2151 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 2152 { 2153 return ksys_umount(name, flags); 2154 } 2155 2156 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 2157 2158 /* 2159 * The 2.0 compatible umount. No flags. 2160 */ 2161 SYSCALL_DEFINE1(oldumount, char __user *, name) 2162 { 2163 return ksys_umount(name, 0); 2164 } 2165 2166 #endif 2167 2168 static bool is_mnt_ns_file(struct dentry *dentry) 2169 { 2170 struct ns_common *ns; 2171 2172 /* Is this a proxy for a mount namespace? */ 2173 if (dentry->d_op != &ns_dentry_operations) 2174 return false; 2175 2176 ns = d_inode(dentry)->i_private; 2177 2178 return ns->ops == &mntns_operations; 2179 } 2180 2181 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) 2182 { 2183 return &mnt->ns; 2184 } 2185 2186 struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous) 2187 { 2188 guard(rcu)(); 2189 2190 for (;;) { 2191 struct list_head *list; 2192 2193 if (previous) 2194 list = rcu_dereference(list_bidir_prev_rcu(&mntns->mnt_ns_list)); 2195 else 2196 list = rcu_dereference(list_next_rcu(&mntns->mnt_ns_list)); 2197 if (list_is_head(list, &mnt_ns_list)) 2198 return ERR_PTR(-ENOENT); 2199 2200 mntns = list_entry_rcu(list, struct mnt_namespace, mnt_ns_list); 2201 2202 /* 2203 * The last passive reference count is put with RCU 2204 * delay so accessing the mount namespace is not just 2205 * safe but all relevant members are still valid. 2206 */ 2207 if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN)) 2208 continue; 2209 2210 /* 2211 * We need an active reference count as we're persisting 2212 * the mount namespace and it might already be on its 2213 * deathbed. 2214 */ 2215 if (!refcount_inc_not_zero(&mntns->ns.count)) 2216 continue; 2217 2218 return mntns; 2219 } 2220 } 2221 2222 struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry) 2223 { 2224 if (!is_mnt_ns_file(dentry)) 2225 return NULL; 2226 2227 return to_mnt_ns(get_proc_ns(dentry->d_inode)); 2228 } 2229 2230 static bool mnt_ns_loop(struct dentry *dentry) 2231 { 2232 /* Could bind mounting the mount namespace inode cause a 2233 * mount namespace loop? 2234 */ 2235 struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry); 2236 2237 if (!mnt_ns) 2238 return false; 2239 2240 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; 2241 } 2242 2243 struct mount *copy_tree(struct mount *src_root, struct dentry *dentry, 2244 int flag) 2245 { 2246 struct mount *res, *src_parent, *src_root_child, *src_mnt, 2247 *dst_parent, *dst_mnt; 2248 2249 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root)) 2250 return ERR_PTR(-EINVAL); 2251 2252 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) 2253 return ERR_PTR(-EINVAL); 2254 2255 res = dst_mnt = clone_mnt(src_root, dentry, flag); 2256 if (IS_ERR(dst_mnt)) 2257 return dst_mnt; 2258 2259 src_parent = src_root; 2260 dst_mnt->mnt_mountpoint = src_root->mnt_mountpoint; 2261 2262 list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) { 2263 if (!is_subdir(src_root_child->mnt_mountpoint, dentry)) 2264 continue; 2265 2266 for (src_mnt = src_root_child; src_mnt; 2267 src_mnt = next_mnt(src_mnt, src_root_child)) { 2268 if (!(flag & CL_COPY_UNBINDABLE) && 2269 IS_MNT_UNBINDABLE(src_mnt)) { 2270 if (src_mnt->mnt.mnt_flags & MNT_LOCKED) { 2271 /* Both unbindable and locked. */ 2272 dst_mnt = ERR_PTR(-EPERM); 2273 goto out; 2274 } else { 2275 src_mnt = skip_mnt_tree(src_mnt); 2276 continue; 2277 } 2278 } 2279 if (!(flag & CL_COPY_MNT_NS_FILE) && 2280 is_mnt_ns_file(src_mnt->mnt.mnt_root)) { 2281 src_mnt = skip_mnt_tree(src_mnt); 2282 continue; 2283 } 2284 while (src_parent != src_mnt->mnt_parent) { 2285 src_parent = src_parent->mnt_parent; 2286 dst_mnt = dst_mnt->mnt_parent; 2287 } 2288 2289 src_parent = src_mnt; 2290 dst_parent = dst_mnt; 2291 dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag); 2292 if (IS_ERR(dst_mnt)) 2293 goto out; 2294 lock_mount_hash(); 2295 list_add_tail(&dst_mnt->mnt_list, &res->mnt_list); 2296 attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp, false); 2297 unlock_mount_hash(); 2298 } 2299 } 2300 return res; 2301 2302 out: 2303 if (res) { 2304 lock_mount_hash(); 2305 umount_tree(res, UMOUNT_SYNC); 2306 unlock_mount_hash(); 2307 } 2308 return dst_mnt; 2309 } 2310 2311 /* Caller should check returned pointer for errors */ 2312 2313 struct vfsmount *collect_mounts(const struct path *path) 2314 { 2315 struct mount *tree; 2316 namespace_lock(); 2317 if (!check_mnt(real_mount(path->mnt))) 2318 tree = ERR_PTR(-EINVAL); 2319 else 2320 tree = copy_tree(real_mount(path->mnt), path->dentry, 2321 CL_COPY_ALL | CL_PRIVATE); 2322 namespace_unlock(); 2323 if (IS_ERR(tree)) 2324 return ERR_CAST(tree); 2325 return &tree->mnt; 2326 } 2327 2328 static void free_mnt_ns(struct mnt_namespace *); 2329 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); 2330 2331 void dissolve_on_fput(struct vfsmount *mnt) 2332 { 2333 struct mnt_namespace *ns; 2334 namespace_lock(); 2335 lock_mount_hash(); 2336 ns = real_mount(mnt)->mnt_ns; 2337 if (ns) { 2338 if (is_anon_ns(ns)) 2339 umount_tree(real_mount(mnt), UMOUNT_CONNECTED); 2340 else 2341 ns = NULL; 2342 } 2343 unlock_mount_hash(); 2344 namespace_unlock(); 2345 if (ns) 2346 free_mnt_ns(ns); 2347 } 2348 2349 void drop_collected_mounts(struct vfsmount *mnt) 2350 { 2351 namespace_lock(); 2352 lock_mount_hash(); 2353 umount_tree(real_mount(mnt), 0); 2354 unlock_mount_hash(); 2355 namespace_unlock(); 2356 } 2357 2358 bool has_locked_children(struct mount *mnt, struct dentry *dentry) 2359 { 2360 struct mount *child; 2361 2362 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 2363 if (!is_subdir(child->mnt_mountpoint, dentry)) 2364 continue; 2365 2366 if (child->mnt.mnt_flags & MNT_LOCKED) 2367 return true; 2368 } 2369 return false; 2370 } 2371 2372 /** 2373 * clone_private_mount - create a private clone of a path 2374 * @path: path to clone 2375 * 2376 * This creates a new vfsmount, which will be the clone of @path. The new mount 2377 * will not be attached anywhere in the namespace and will be private (i.e. 2378 * changes to the originating mount won't be propagated into this). 2379 * 2380 * Release with mntput(). 2381 */ 2382 struct vfsmount *clone_private_mount(const struct path *path) 2383 { 2384 struct mount *old_mnt = real_mount(path->mnt); 2385 struct mount *new_mnt; 2386 2387 down_read(&namespace_sem); 2388 if (IS_MNT_UNBINDABLE(old_mnt)) 2389 goto invalid; 2390 2391 if (!check_mnt(old_mnt)) 2392 goto invalid; 2393 2394 if (has_locked_children(old_mnt, path->dentry)) 2395 goto invalid; 2396 2397 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); 2398 up_read(&namespace_sem); 2399 2400 if (IS_ERR(new_mnt)) 2401 return ERR_CAST(new_mnt); 2402 2403 /* Longterm mount to be removed by kern_unmount*() */ 2404 new_mnt->mnt_ns = MNT_NS_INTERNAL; 2405 2406 return &new_mnt->mnt; 2407 2408 invalid: 2409 up_read(&namespace_sem); 2410 return ERR_PTR(-EINVAL); 2411 } 2412 EXPORT_SYMBOL_GPL(clone_private_mount); 2413 2414 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, 2415 struct vfsmount *root) 2416 { 2417 struct mount *mnt; 2418 int res = f(root, arg); 2419 if (res) 2420 return res; 2421 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { 2422 res = f(&mnt->mnt, arg); 2423 if (res) 2424 return res; 2425 } 2426 return 0; 2427 } 2428 2429 static void lock_mnt_tree(struct mount *mnt) 2430 { 2431 struct mount *p; 2432 2433 for (p = mnt; p; p = next_mnt(p, mnt)) { 2434 int flags = p->mnt.mnt_flags; 2435 /* Don't allow unprivileged users to change mount flags */ 2436 flags |= MNT_LOCK_ATIME; 2437 2438 if (flags & MNT_READONLY) 2439 flags |= MNT_LOCK_READONLY; 2440 2441 if (flags & MNT_NODEV) 2442 flags |= MNT_LOCK_NODEV; 2443 2444 if (flags & MNT_NOSUID) 2445 flags |= MNT_LOCK_NOSUID; 2446 2447 if (flags & MNT_NOEXEC) 2448 flags |= MNT_LOCK_NOEXEC; 2449 /* Don't allow unprivileged users to reveal what is under a mount */ 2450 if (list_empty(&p->mnt_expire)) 2451 flags |= MNT_LOCKED; 2452 p->mnt.mnt_flags = flags; 2453 } 2454 } 2455 2456 static void cleanup_group_ids(struct mount *mnt, struct mount *end) 2457 { 2458 struct mount *p; 2459 2460 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 2461 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 2462 mnt_release_group_id(p); 2463 } 2464 } 2465 2466 static int invent_group_ids(struct mount *mnt, bool recurse) 2467 { 2468 struct mount *p; 2469 2470 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 2471 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 2472 int err = mnt_alloc_group_id(p); 2473 if (err) { 2474 cleanup_group_ids(mnt, p); 2475 return err; 2476 } 2477 } 2478 } 2479 2480 return 0; 2481 } 2482 2483 int count_mounts(struct mnt_namespace *ns, struct mount *mnt) 2484 { 2485 unsigned int max = READ_ONCE(sysctl_mount_max); 2486 unsigned int mounts = 0; 2487 struct mount *p; 2488 2489 if (ns->nr_mounts >= max) 2490 return -ENOSPC; 2491 max -= ns->nr_mounts; 2492 if (ns->pending_mounts >= max) 2493 return -ENOSPC; 2494 max -= ns->pending_mounts; 2495 2496 for (p = mnt; p; p = next_mnt(p, mnt)) 2497 mounts++; 2498 2499 if (mounts > max) 2500 return -ENOSPC; 2501 2502 ns->pending_mounts += mounts; 2503 return 0; 2504 } 2505 2506 enum mnt_tree_flags_t { 2507 MNT_TREE_MOVE = BIT(0), 2508 MNT_TREE_BENEATH = BIT(1), 2509 }; 2510 2511 /** 2512 * attach_recursive_mnt - attach a source mount tree 2513 * @source_mnt: mount tree to be attached 2514 * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath 2515 * @dest_mp: the mountpoint @source_mnt will be mounted at 2516 * @flags: modify how @source_mnt is supposed to be attached 2517 * 2518 * NOTE: in the table below explains the semantics when a source mount 2519 * of a given type is attached to a destination mount of a given type. 2520 * --------------------------------------------------------------------------- 2521 * | BIND MOUNT OPERATION | 2522 * |************************************************************************** 2523 * | source-->| shared | private | slave | unbindable | 2524 * | dest | | | | | 2525 * | | | | | | | 2526 * | v | | | | | 2527 * |************************************************************************** 2528 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 2529 * | | | | | | 2530 * |non-shared| shared (+) | private | slave (*) | invalid | 2531 * *************************************************************************** 2532 * A bind operation clones the source mount and mounts the clone on the 2533 * destination mount. 2534 * 2535 * (++) the cloned mount is propagated to all the mounts in the propagation 2536 * tree of the destination mount and the cloned mount is added to 2537 * the peer group of the source mount. 2538 * (+) the cloned mount is created under the destination mount and is marked 2539 * as shared. The cloned mount is added to the peer group of the source 2540 * mount. 2541 * (+++) the mount is propagated to all the mounts in the propagation tree 2542 * of the destination mount and the cloned mount is made slave 2543 * of the same master as that of the source mount. The cloned mount 2544 * is marked as 'shared and slave'. 2545 * (*) the cloned mount is made a slave of the same master as that of the 2546 * source mount. 2547 * 2548 * --------------------------------------------------------------------------- 2549 * | MOVE MOUNT OPERATION | 2550 * |************************************************************************** 2551 * | source-->| shared | private | slave | unbindable | 2552 * | dest | | | | | 2553 * | | | | | | | 2554 * | v | | | | | 2555 * |************************************************************************** 2556 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 2557 * | | | | | | 2558 * |non-shared| shared (+*) | private | slave (*) | unbindable | 2559 * *************************************************************************** 2560 * 2561 * (+) the mount is moved to the destination. And is then propagated to 2562 * all the mounts in the propagation tree of the destination mount. 2563 * (+*) the mount is moved to the destination. 2564 * (+++) the mount is moved to the destination and is then propagated to 2565 * all the mounts belonging to the destination mount's propagation tree. 2566 * the mount is marked as 'shared and slave'. 2567 * (*) the mount continues to be a slave at the new location. 2568 * 2569 * if the source mount is a tree, the operations explained above is 2570 * applied to each mount in the tree. 2571 * Must be called without spinlocks held, since this function can sleep 2572 * in allocations. 2573 * 2574 * Context: The function expects namespace_lock() to be held. 2575 * Return: If @source_mnt was successfully attached 0 is returned. 2576 * Otherwise a negative error code is returned. 2577 */ 2578 static int attach_recursive_mnt(struct mount *source_mnt, 2579 struct mount *top_mnt, 2580 struct mountpoint *dest_mp, 2581 enum mnt_tree_flags_t flags) 2582 { 2583 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2584 HLIST_HEAD(tree_list); 2585 struct mnt_namespace *ns = top_mnt->mnt_ns; 2586 struct mountpoint *smp; 2587 struct mount *child, *dest_mnt, *p; 2588 struct hlist_node *n; 2589 int err = 0; 2590 bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH; 2591 2592 /* 2593 * Preallocate a mountpoint in case the new mounts need to be 2594 * mounted beneath mounts on the same mountpoint. 2595 */ 2596 smp = get_mountpoint(source_mnt->mnt.mnt_root); 2597 if (IS_ERR(smp)) 2598 return PTR_ERR(smp); 2599 2600 /* Is there space to add these mounts to the mount namespace? */ 2601 if (!moving) { 2602 err = count_mounts(ns, source_mnt); 2603 if (err) 2604 goto out; 2605 } 2606 2607 if (beneath) 2608 dest_mnt = top_mnt->mnt_parent; 2609 else 2610 dest_mnt = top_mnt; 2611 2612 if (IS_MNT_SHARED(dest_mnt)) { 2613 err = invent_group_ids(source_mnt, true); 2614 if (err) 2615 goto out; 2616 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 2617 } 2618 lock_mount_hash(); 2619 if (err) 2620 goto out_cleanup_ids; 2621 2622 if (IS_MNT_SHARED(dest_mnt)) { 2623 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2624 set_mnt_shared(p); 2625 } 2626 2627 if (moving) { 2628 if (beneath) 2629 dest_mp = smp; 2630 unhash_mnt(source_mnt); 2631 attach_mnt(source_mnt, top_mnt, dest_mp, beneath); 2632 mnt_notify_add(source_mnt); 2633 touch_mnt_namespace(source_mnt->mnt_ns); 2634 } else { 2635 if (source_mnt->mnt_ns) { 2636 LIST_HEAD(head); 2637 2638 /* move from anon - the caller will destroy */ 2639 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2640 move_from_ns(p, &head); 2641 list_del_init(&head); 2642 } 2643 if (beneath) 2644 mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp); 2645 else 2646 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 2647 commit_tree(source_mnt); 2648 } 2649 2650 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { 2651 struct mount *q; 2652 hlist_del_init(&child->mnt_hash); 2653 q = __lookup_mnt(&child->mnt_parent->mnt, 2654 child->mnt_mountpoint); 2655 if (q) 2656 mnt_change_mountpoint(child, smp, q); 2657 /* Notice when we are propagating across user namespaces */ 2658 if (child->mnt_parent->mnt_ns->user_ns != user_ns) 2659 lock_mnt_tree(child); 2660 child->mnt.mnt_flags &= ~MNT_LOCKED; 2661 commit_tree(child); 2662 } 2663 put_mountpoint(smp); 2664 unlock_mount_hash(); 2665 2666 return 0; 2667 2668 out_cleanup_ids: 2669 while (!hlist_empty(&tree_list)) { 2670 child = hlist_entry(tree_list.first, struct mount, mnt_hash); 2671 child->mnt_parent->mnt_ns->pending_mounts = 0; 2672 umount_tree(child, UMOUNT_SYNC); 2673 } 2674 unlock_mount_hash(); 2675 cleanup_group_ids(source_mnt, NULL); 2676 out: 2677 ns->pending_mounts = 0; 2678 2679 read_seqlock_excl(&mount_lock); 2680 put_mountpoint(smp); 2681 read_sequnlock_excl(&mount_lock); 2682 2683 return err; 2684 } 2685 2686 /** 2687 * do_lock_mount - lock mount and mountpoint 2688 * @path: target path 2689 * @beneath: whether the intention is to mount beneath @path 2690 * 2691 * Follow the mount stack on @path until the top mount @mnt is found. If 2692 * the initial @path->{mnt,dentry} is a mountpoint lookup the first 2693 * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root} 2694 * until nothing is stacked on top of it anymore. 2695 * 2696 * Acquire the inode_lock() on the top mount's ->mnt_root to protect 2697 * against concurrent removal of the new mountpoint from another mount 2698 * namespace. 2699 * 2700 * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint 2701 * @mp on @mnt->mnt_parent must be acquired. This protects against a 2702 * concurrent unlink of @mp->mnt_dentry from another mount namespace 2703 * where @mnt doesn't have a child mount mounted @mp. A concurrent 2704 * removal of @mnt->mnt_root doesn't matter as nothing will be mounted 2705 * on top of it for @beneath. 2706 * 2707 * In addition, @beneath needs to make sure that @mnt hasn't been 2708 * unmounted or moved from its current mountpoint in between dropping 2709 * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt 2710 * being unmounted would be detected later by e.g., calling 2711 * check_mnt(mnt) in the function it's called from. For the @beneath 2712 * case however, it's useful to detect it directly in do_lock_mount(). 2713 * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points 2714 * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will 2715 * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL. 2716 * 2717 * Return: Either the target mountpoint on the top mount or the top 2718 * mount's mountpoint. 2719 */ 2720 static struct mountpoint *do_lock_mount(struct path *path, bool beneath) 2721 { 2722 struct vfsmount *mnt = path->mnt; 2723 struct dentry *dentry; 2724 struct mountpoint *mp = ERR_PTR(-ENOENT); 2725 2726 for (;;) { 2727 struct mount *m; 2728 2729 if (beneath) { 2730 m = real_mount(mnt); 2731 read_seqlock_excl(&mount_lock); 2732 dentry = dget(m->mnt_mountpoint); 2733 read_sequnlock_excl(&mount_lock); 2734 } else { 2735 dentry = path->dentry; 2736 } 2737 2738 inode_lock(dentry->d_inode); 2739 if (unlikely(cant_mount(dentry))) { 2740 inode_unlock(dentry->d_inode); 2741 goto out; 2742 } 2743 2744 namespace_lock(); 2745 2746 if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) { 2747 namespace_unlock(); 2748 inode_unlock(dentry->d_inode); 2749 goto out; 2750 } 2751 2752 mnt = lookup_mnt(path); 2753 if (likely(!mnt)) 2754 break; 2755 2756 namespace_unlock(); 2757 inode_unlock(dentry->d_inode); 2758 if (beneath) 2759 dput(dentry); 2760 path_put(path); 2761 path->mnt = mnt; 2762 path->dentry = dget(mnt->mnt_root); 2763 } 2764 2765 mp = get_mountpoint(dentry); 2766 if (IS_ERR(mp)) { 2767 namespace_unlock(); 2768 inode_unlock(dentry->d_inode); 2769 } 2770 2771 out: 2772 if (beneath) 2773 dput(dentry); 2774 2775 return mp; 2776 } 2777 2778 static inline struct mountpoint *lock_mount(struct path *path) 2779 { 2780 return do_lock_mount(path, false); 2781 } 2782 2783 static void unlock_mount(struct mountpoint *where) 2784 { 2785 struct dentry *dentry = where->m_dentry; 2786 2787 read_seqlock_excl(&mount_lock); 2788 put_mountpoint(where); 2789 read_sequnlock_excl(&mount_lock); 2790 2791 namespace_unlock(); 2792 inode_unlock(dentry->d_inode); 2793 } 2794 2795 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) 2796 { 2797 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) 2798 return -EINVAL; 2799 2800 if (d_is_dir(mp->m_dentry) != 2801 d_is_dir(mnt->mnt.mnt_root)) 2802 return -ENOTDIR; 2803 2804 return attach_recursive_mnt(mnt, p, mp, 0); 2805 } 2806 2807 /* 2808 * Sanity check the flags to change_mnt_propagation. 2809 */ 2810 2811 static int flags_to_propagation_type(int ms_flags) 2812 { 2813 int type = ms_flags & ~(MS_REC | MS_SILENT); 2814 2815 /* Fail if any non-propagation flags are set */ 2816 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2817 return 0; 2818 /* Only one propagation flag should be set */ 2819 if (!is_power_of_2(type)) 2820 return 0; 2821 return type; 2822 } 2823 2824 /* 2825 * recursively change the type of the mountpoint. 2826 */ 2827 static int do_change_type(struct path *path, int ms_flags) 2828 { 2829 struct mount *m; 2830 struct mount *mnt = real_mount(path->mnt); 2831 int recurse = ms_flags & MS_REC; 2832 int type; 2833 int err = 0; 2834 2835 if (!path_mounted(path)) 2836 return -EINVAL; 2837 2838 type = flags_to_propagation_type(ms_flags); 2839 if (!type) 2840 return -EINVAL; 2841 2842 namespace_lock(); 2843 if (type == MS_SHARED) { 2844 err = invent_group_ids(mnt, recurse); 2845 if (err) 2846 goto out_unlock; 2847 } 2848 2849 lock_mount_hash(); 2850 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 2851 change_mnt_propagation(m, type); 2852 unlock_mount_hash(); 2853 2854 out_unlock: 2855 namespace_unlock(); 2856 return err; 2857 } 2858 2859 static struct mount *__do_loopback(struct path *old_path, int recurse) 2860 { 2861 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt); 2862 2863 if (IS_MNT_UNBINDABLE(old)) 2864 return mnt; 2865 2866 if (!check_mnt(old)) { 2867 const struct dentry_operations *d_op = old_path->dentry->d_op; 2868 2869 if (d_op != &ns_dentry_operations && 2870 d_op != &pidfs_dentry_operations) 2871 return mnt; 2872 } 2873 2874 if (!recurse && has_locked_children(old, old_path->dentry)) 2875 return mnt; 2876 2877 if (recurse) 2878 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); 2879 else 2880 mnt = clone_mnt(old, old_path->dentry, 0); 2881 2882 if (!IS_ERR(mnt)) 2883 mnt->mnt.mnt_flags &= ~MNT_LOCKED; 2884 2885 return mnt; 2886 } 2887 2888 /* 2889 * do loopback mount. 2890 */ 2891 static int do_loopback(struct path *path, const char *old_name, 2892 int recurse) 2893 { 2894 struct path old_path; 2895 struct mount *mnt = NULL, *parent; 2896 struct mountpoint *mp; 2897 int err; 2898 if (!old_name || !*old_name) 2899 return -EINVAL; 2900 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 2901 if (err) 2902 return err; 2903 2904 err = -EINVAL; 2905 if (mnt_ns_loop(old_path.dentry)) 2906 goto out; 2907 2908 mp = lock_mount(path); 2909 if (IS_ERR(mp)) { 2910 err = PTR_ERR(mp); 2911 goto out; 2912 } 2913 2914 parent = real_mount(path->mnt); 2915 if (!check_mnt(parent)) 2916 goto out2; 2917 2918 mnt = __do_loopback(&old_path, recurse); 2919 if (IS_ERR(mnt)) { 2920 err = PTR_ERR(mnt); 2921 goto out2; 2922 } 2923 2924 err = graft_tree(mnt, parent, mp); 2925 if (err) { 2926 lock_mount_hash(); 2927 umount_tree(mnt, UMOUNT_SYNC); 2928 unlock_mount_hash(); 2929 } 2930 out2: 2931 unlock_mount(mp); 2932 out: 2933 path_put(&old_path); 2934 return err; 2935 } 2936 2937 static struct file *open_detached_copy(struct path *path, bool recursive) 2938 { 2939 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2940 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true); 2941 struct mount *mnt, *p; 2942 struct file *file; 2943 2944 if (IS_ERR(ns)) 2945 return ERR_CAST(ns); 2946 2947 namespace_lock(); 2948 mnt = __do_loopback(path, recursive); 2949 if (IS_ERR(mnt)) { 2950 namespace_unlock(); 2951 free_mnt_ns(ns); 2952 return ERR_CAST(mnt); 2953 } 2954 2955 lock_mount_hash(); 2956 for (p = mnt; p; p = next_mnt(p, mnt)) { 2957 mnt_add_to_ns(ns, p); 2958 ns->nr_mounts++; 2959 } 2960 ns->root = mnt; 2961 mntget(&mnt->mnt); 2962 unlock_mount_hash(); 2963 namespace_unlock(); 2964 2965 mntput(path->mnt); 2966 path->mnt = &mnt->mnt; 2967 file = dentry_open(path, O_PATH, current_cred()); 2968 if (IS_ERR(file)) 2969 dissolve_on_fput(path->mnt); 2970 else 2971 file->f_mode |= FMODE_NEED_UNMOUNT; 2972 return file; 2973 } 2974 2975 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) 2976 { 2977 struct file *file; 2978 struct path path; 2979 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 2980 bool detached = flags & OPEN_TREE_CLONE; 2981 int error; 2982 int fd; 2983 2984 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC); 2985 2986 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE | 2987 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE | 2988 OPEN_TREE_CLOEXEC)) 2989 return -EINVAL; 2990 2991 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE) 2992 return -EINVAL; 2993 2994 if (flags & AT_NO_AUTOMOUNT) 2995 lookup_flags &= ~LOOKUP_AUTOMOUNT; 2996 if (flags & AT_SYMLINK_NOFOLLOW) 2997 lookup_flags &= ~LOOKUP_FOLLOW; 2998 if (flags & AT_EMPTY_PATH) 2999 lookup_flags |= LOOKUP_EMPTY; 3000 3001 if (detached && !may_mount()) 3002 return -EPERM; 3003 3004 fd = get_unused_fd_flags(flags & O_CLOEXEC); 3005 if (fd < 0) 3006 return fd; 3007 3008 error = user_path_at(dfd, filename, lookup_flags, &path); 3009 if (unlikely(error)) { 3010 file = ERR_PTR(error); 3011 } else { 3012 if (detached) 3013 file = open_detached_copy(&path, flags & AT_RECURSIVE); 3014 else 3015 file = dentry_open(&path, O_PATH, current_cred()); 3016 path_put(&path); 3017 } 3018 if (IS_ERR(file)) { 3019 put_unused_fd(fd); 3020 return PTR_ERR(file); 3021 } 3022 fd_install(fd, file); 3023 return fd; 3024 } 3025 3026 /* 3027 * Don't allow locked mount flags to be cleared. 3028 * 3029 * No locks need to be held here while testing the various MNT_LOCK 3030 * flags because those flags can never be cleared once they are set. 3031 */ 3032 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) 3033 { 3034 unsigned int fl = mnt->mnt.mnt_flags; 3035 3036 if ((fl & MNT_LOCK_READONLY) && 3037 !(mnt_flags & MNT_READONLY)) 3038 return false; 3039 3040 if ((fl & MNT_LOCK_NODEV) && 3041 !(mnt_flags & MNT_NODEV)) 3042 return false; 3043 3044 if ((fl & MNT_LOCK_NOSUID) && 3045 !(mnt_flags & MNT_NOSUID)) 3046 return false; 3047 3048 if ((fl & MNT_LOCK_NOEXEC) && 3049 !(mnt_flags & MNT_NOEXEC)) 3050 return false; 3051 3052 if ((fl & MNT_LOCK_ATIME) && 3053 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) 3054 return false; 3055 3056 return true; 3057 } 3058 3059 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) 3060 { 3061 bool readonly_request = (mnt_flags & MNT_READONLY); 3062 3063 if (readonly_request == __mnt_is_readonly(&mnt->mnt)) 3064 return 0; 3065 3066 if (readonly_request) 3067 return mnt_make_readonly(mnt); 3068 3069 mnt->mnt.mnt_flags &= ~MNT_READONLY; 3070 return 0; 3071 } 3072 3073 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) 3074 { 3075 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; 3076 mnt->mnt.mnt_flags = mnt_flags; 3077 touch_mnt_namespace(mnt->mnt_ns); 3078 } 3079 3080 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt) 3081 { 3082 struct super_block *sb = mnt->mnt_sb; 3083 3084 if (!__mnt_is_readonly(mnt) && 3085 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && 3086 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { 3087 char *buf, *mntpath; 3088 3089 buf = (char *)__get_free_page(GFP_KERNEL); 3090 if (buf) 3091 mntpath = d_path(mountpoint, buf, PAGE_SIZE); 3092 else 3093 mntpath = ERR_PTR(-ENOMEM); 3094 if (IS_ERR(mntpath)) 3095 mntpath = "(unknown)"; 3096 3097 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n", 3098 sb->s_type->name, 3099 is_mounted(mnt) ? "remounted" : "mounted", 3100 mntpath, &sb->s_time_max, 3101 (unsigned long long)sb->s_time_max); 3102 3103 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; 3104 if (buf) 3105 free_page((unsigned long)buf); 3106 } 3107 } 3108 3109 /* 3110 * Handle reconfiguration of the mountpoint only without alteration of the 3111 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND 3112 * to mount(2). 3113 */ 3114 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags) 3115 { 3116 struct super_block *sb = path->mnt->mnt_sb; 3117 struct mount *mnt = real_mount(path->mnt); 3118 int ret; 3119 3120 if (!check_mnt(mnt)) 3121 return -EINVAL; 3122 3123 if (!path_mounted(path)) 3124 return -EINVAL; 3125 3126 if (!can_change_locked_flags(mnt, mnt_flags)) 3127 return -EPERM; 3128 3129 /* 3130 * We're only checking whether the superblock is read-only not 3131 * changing it, so only take down_read(&sb->s_umount). 3132 */ 3133 down_read(&sb->s_umount); 3134 lock_mount_hash(); 3135 ret = change_mount_ro_state(mnt, mnt_flags); 3136 if (ret == 0) 3137 set_mount_attributes(mnt, mnt_flags); 3138 unlock_mount_hash(); 3139 up_read(&sb->s_umount); 3140 3141 mnt_warn_timestamp_expiry(path, &mnt->mnt); 3142 3143 return ret; 3144 } 3145 3146 /* 3147 * change filesystem flags. dir should be a physical root of filesystem. 3148 * If you've mounted a non-root directory somewhere and want to do remount 3149 * on it - tough luck. 3150 */ 3151 static int do_remount(struct path *path, int ms_flags, int sb_flags, 3152 int mnt_flags, void *data) 3153 { 3154 int err; 3155 struct super_block *sb = path->mnt->mnt_sb; 3156 struct mount *mnt = real_mount(path->mnt); 3157 struct fs_context *fc; 3158 3159 if (!check_mnt(mnt)) 3160 return -EINVAL; 3161 3162 if (!path_mounted(path)) 3163 return -EINVAL; 3164 3165 if (!can_change_locked_flags(mnt, mnt_flags)) 3166 return -EPERM; 3167 3168 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK); 3169 if (IS_ERR(fc)) 3170 return PTR_ERR(fc); 3171 3172 /* 3173 * Indicate to the filesystem that the remount request is coming 3174 * from the legacy mount system call. 3175 */ 3176 fc->oldapi = true; 3177 3178 err = parse_monolithic_mount_data(fc, data); 3179 if (!err) { 3180 down_write(&sb->s_umount); 3181 err = -EPERM; 3182 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) { 3183 err = reconfigure_super(fc); 3184 if (!err) { 3185 lock_mount_hash(); 3186 set_mount_attributes(mnt, mnt_flags); 3187 unlock_mount_hash(); 3188 } 3189 } 3190 up_write(&sb->s_umount); 3191 } 3192 3193 mnt_warn_timestamp_expiry(path, &mnt->mnt); 3194 3195 put_fs_context(fc); 3196 return err; 3197 } 3198 3199 static inline int tree_contains_unbindable(struct mount *mnt) 3200 { 3201 struct mount *p; 3202 for (p = mnt; p; p = next_mnt(p, mnt)) { 3203 if (IS_MNT_UNBINDABLE(p)) 3204 return 1; 3205 } 3206 return 0; 3207 } 3208 3209 /* 3210 * Check that there aren't references to earlier/same mount namespaces in the 3211 * specified subtree. Such references can act as pins for mount namespaces 3212 * that aren't checked by the mount-cycle checking code, thereby allowing 3213 * cycles to be made. 3214 */ 3215 static bool check_for_nsfs_mounts(struct mount *subtree) 3216 { 3217 struct mount *p; 3218 bool ret = false; 3219 3220 lock_mount_hash(); 3221 for (p = subtree; p; p = next_mnt(p, subtree)) 3222 if (mnt_ns_loop(p->mnt.mnt_root)) 3223 goto out; 3224 3225 ret = true; 3226 out: 3227 unlock_mount_hash(); 3228 return ret; 3229 } 3230 3231 static int do_set_group(struct path *from_path, struct path *to_path) 3232 { 3233 struct mount *from, *to; 3234 int err; 3235 3236 from = real_mount(from_path->mnt); 3237 to = real_mount(to_path->mnt); 3238 3239 namespace_lock(); 3240 3241 err = -EINVAL; 3242 /* To and From must be mounted */ 3243 if (!is_mounted(&from->mnt)) 3244 goto out; 3245 if (!is_mounted(&to->mnt)) 3246 goto out; 3247 3248 err = -EPERM; 3249 /* We should be allowed to modify mount namespaces of both mounts */ 3250 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN)) 3251 goto out; 3252 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN)) 3253 goto out; 3254 3255 err = -EINVAL; 3256 /* To and From paths should be mount roots */ 3257 if (!path_mounted(from_path)) 3258 goto out; 3259 if (!path_mounted(to_path)) 3260 goto out; 3261 3262 /* Setting sharing groups is only allowed across same superblock */ 3263 if (from->mnt.mnt_sb != to->mnt.mnt_sb) 3264 goto out; 3265 3266 /* From mount root should be wider than To mount root */ 3267 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) 3268 goto out; 3269 3270 /* From mount should not have locked children in place of To's root */ 3271 if (has_locked_children(from, to->mnt.mnt_root)) 3272 goto out; 3273 3274 /* Setting sharing groups is only allowed on private mounts */ 3275 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) 3276 goto out; 3277 3278 /* From should not be private */ 3279 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) 3280 goto out; 3281 3282 if (IS_MNT_SLAVE(from)) { 3283 struct mount *m = from->mnt_master; 3284 3285 list_add(&to->mnt_slave, &m->mnt_slave_list); 3286 to->mnt_master = m; 3287 } 3288 3289 if (IS_MNT_SHARED(from)) { 3290 to->mnt_group_id = from->mnt_group_id; 3291 list_add(&to->mnt_share, &from->mnt_share); 3292 lock_mount_hash(); 3293 set_mnt_shared(to); 3294 unlock_mount_hash(); 3295 } 3296 3297 err = 0; 3298 out: 3299 namespace_unlock(); 3300 return err; 3301 } 3302 3303 /** 3304 * path_overmounted - check if path is overmounted 3305 * @path: path to check 3306 * 3307 * Check if path is overmounted, i.e., if there's a mount on top of 3308 * @path->mnt with @path->dentry as mountpoint. 3309 * 3310 * Context: This function expects namespace_lock() to be held. 3311 * Return: If path is overmounted true is returned, false if not. 3312 */ 3313 static inline bool path_overmounted(const struct path *path) 3314 { 3315 rcu_read_lock(); 3316 if (unlikely(__lookup_mnt(path->mnt, path->dentry))) { 3317 rcu_read_unlock(); 3318 return true; 3319 } 3320 rcu_read_unlock(); 3321 return false; 3322 } 3323 3324 /** 3325 * can_move_mount_beneath - check that we can mount beneath the top mount 3326 * @from: mount to mount beneath 3327 * @to: mount under which to mount 3328 * @mp: mountpoint of @to 3329 * 3330 * - Make sure that @to->dentry is actually the root of a mount under 3331 * which we can mount another mount. 3332 * - Make sure that nothing can be mounted beneath the caller's current 3333 * root or the rootfs of the namespace. 3334 * - Make sure that the caller can unmount the topmost mount ensuring 3335 * that the caller could reveal the underlying mountpoint. 3336 * - Ensure that nothing has been mounted on top of @from before we 3337 * grabbed @namespace_sem to avoid creating pointless shadow mounts. 3338 * - Prevent mounting beneath a mount if the propagation relationship 3339 * between the source mount, parent mount, and top mount would lead to 3340 * nonsensical mount trees. 3341 * 3342 * Context: This function expects namespace_lock() to be held. 3343 * Return: On success 0, and on error a negative error code is returned. 3344 */ 3345 static int can_move_mount_beneath(const struct path *from, 3346 const struct path *to, 3347 const struct mountpoint *mp) 3348 { 3349 struct mount *mnt_from = real_mount(from->mnt), 3350 *mnt_to = real_mount(to->mnt), 3351 *parent_mnt_to = mnt_to->mnt_parent; 3352 3353 if (!mnt_has_parent(mnt_to)) 3354 return -EINVAL; 3355 3356 if (!path_mounted(to)) 3357 return -EINVAL; 3358 3359 if (IS_MNT_LOCKED(mnt_to)) 3360 return -EINVAL; 3361 3362 /* Avoid creating shadow mounts during mount propagation. */ 3363 if (path_overmounted(from)) 3364 return -EINVAL; 3365 3366 /* 3367 * Mounting beneath the rootfs only makes sense when the 3368 * semantics of pivot_root(".", ".") are used. 3369 */ 3370 if (&mnt_to->mnt == current->fs->root.mnt) 3371 return -EINVAL; 3372 if (parent_mnt_to == current->nsproxy->mnt_ns->root) 3373 return -EINVAL; 3374 3375 for (struct mount *p = mnt_from; mnt_has_parent(p); p = p->mnt_parent) 3376 if (p == mnt_to) 3377 return -EINVAL; 3378 3379 /* 3380 * If the parent mount propagates to the child mount this would 3381 * mean mounting @mnt_from on @mnt_to->mnt_parent and then 3382 * propagating a copy @c of @mnt_from on top of @mnt_to. This 3383 * defeats the whole purpose of mounting beneath another mount. 3384 */ 3385 if (propagation_would_overmount(parent_mnt_to, mnt_to, mp)) 3386 return -EINVAL; 3387 3388 /* 3389 * If @mnt_to->mnt_parent propagates to @mnt_from this would 3390 * mean propagating a copy @c of @mnt_from on top of @mnt_from. 3391 * Afterwards @mnt_from would be mounted on top of 3392 * @mnt_to->mnt_parent and @mnt_to would be unmounted from 3393 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is 3394 * already mounted on @mnt_from, @mnt_to would ultimately be 3395 * remounted on top of @c. Afterwards, @mnt_from would be 3396 * covered by a copy @c of @mnt_from and @c would be covered by 3397 * @mnt_from itself. This defeats the whole purpose of mounting 3398 * @mnt_from beneath @mnt_to. 3399 */ 3400 if (propagation_would_overmount(parent_mnt_to, mnt_from, mp)) 3401 return -EINVAL; 3402 3403 return 0; 3404 } 3405 3406 static int do_move_mount(struct path *old_path, struct path *new_path, 3407 bool beneath) 3408 { 3409 struct mnt_namespace *ns; 3410 struct mount *p; 3411 struct mount *old; 3412 struct mount *parent; 3413 struct mountpoint *mp, *old_mp; 3414 int err; 3415 bool attached; 3416 enum mnt_tree_flags_t flags = 0; 3417 3418 mp = do_lock_mount(new_path, beneath); 3419 if (IS_ERR(mp)) 3420 return PTR_ERR(mp); 3421 3422 old = real_mount(old_path->mnt); 3423 p = real_mount(new_path->mnt); 3424 parent = old->mnt_parent; 3425 attached = mnt_has_parent(old); 3426 if (attached) 3427 flags |= MNT_TREE_MOVE; 3428 old_mp = old->mnt_mp; 3429 ns = old->mnt_ns; 3430 3431 err = -EINVAL; 3432 /* The mountpoint must be in our namespace. */ 3433 if (!check_mnt(p)) 3434 goto out; 3435 3436 /* The thing moved must be mounted... */ 3437 if (!is_mounted(&old->mnt)) 3438 goto out; 3439 3440 /* ... and either ours or the root of anon namespace */ 3441 if (!(attached ? check_mnt(old) : is_anon_ns(ns))) 3442 goto out; 3443 3444 if (old->mnt.mnt_flags & MNT_LOCKED) 3445 goto out; 3446 3447 if (!path_mounted(old_path)) 3448 goto out; 3449 3450 if (d_is_dir(new_path->dentry) != 3451 d_is_dir(old_path->dentry)) 3452 goto out; 3453 /* 3454 * Don't move a mount residing in a shared parent. 3455 */ 3456 if (attached && IS_MNT_SHARED(parent)) 3457 goto out; 3458 3459 if (beneath) { 3460 err = can_move_mount_beneath(old_path, new_path, mp); 3461 if (err) 3462 goto out; 3463 3464 err = -EINVAL; 3465 p = p->mnt_parent; 3466 flags |= MNT_TREE_BENEATH; 3467 } 3468 3469 /* 3470 * Don't move a mount tree containing unbindable mounts to a destination 3471 * mount which is shared. 3472 */ 3473 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) 3474 goto out; 3475 err = -ELOOP; 3476 if (!check_for_nsfs_mounts(old)) 3477 goto out; 3478 for (; mnt_has_parent(p); p = p->mnt_parent) 3479 if (p == old) 3480 goto out; 3481 3482 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags); 3483 if (err) 3484 goto out; 3485 3486 /* if the mount is moved, it should no longer be expire 3487 * automatically */ 3488 list_del_init(&old->mnt_expire); 3489 if (attached) 3490 put_mountpoint(old_mp); 3491 out: 3492 unlock_mount(mp); 3493 if (!err) { 3494 if (attached) 3495 mntput_no_expire(parent); 3496 else 3497 free_mnt_ns(ns); 3498 } 3499 return err; 3500 } 3501 3502 static int do_move_mount_old(struct path *path, const char *old_name) 3503 { 3504 struct path old_path; 3505 int err; 3506 3507 if (!old_name || !*old_name) 3508 return -EINVAL; 3509 3510 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 3511 if (err) 3512 return err; 3513 3514 err = do_move_mount(&old_path, path, false); 3515 path_put(&old_path); 3516 return err; 3517 } 3518 3519 /* 3520 * add a mount into a namespace's mount tree 3521 */ 3522 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp, 3523 const struct path *path, int mnt_flags) 3524 { 3525 struct mount *parent = real_mount(path->mnt); 3526 3527 mnt_flags &= ~MNT_INTERNAL_FLAGS; 3528 3529 if (unlikely(!check_mnt(parent))) { 3530 /* that's acceptable only for automounts done in private ns */ 3531 if (!(mnt_flags & MNT_SHRINKABLE)) 3532 return -EINVAL; 3533 /* ... and for those we'd better have mountpoint still alive */ 3534 if (!parent->mnt_ns) 3535 return -EINVAL; 3536 } 3537 3538 /* Refuse the same filesystem on the same mount point */ 3539 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path)) 3540 return -EBUSY; 3541 3542 if (d_is_symlink(newmnt->mnt.mnt_root)) 3543 return -EINVAL; 3544 3545 newmnt->mnt.mnt_flags = mnt_flags; 3546 return graft_tree(newmnt, parent, mp); 3547 } 3548 3549 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags); 3550 3551 /* 3552 * Create a new mount using a superblock configuration and request it 3553 * be added to the namespace tree. 3554 */ 3555 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, 3556 unsigned int mnt_flags) 3557 { 3558 struct vfsmount *mnt; 3559 struct mountpoint *mp; 3560 struct super_block *sb = fc->root->d_sb; 3561 int error; 3562 3563 error = security_sb_kern_mount(sb); 3564 if (!error && mount_too_revealing(sb, &mnt_flags)) 3565 error = -EPERM; 3566 3567 if (unlikely(error)) { 3568 fc_drop_locked(fc); 3569 return error; 3570 } 3571 3572 up_write(&sb->s_umount); 3573 3574 mnt = vfs_create_mount(fc); 3575 if (IS_ERR(mnt)) 3576 return PTR_ERR(mnt); 3577 3578 mnt_warn_timestamp_expiry(mountpoint, mnt); 3579 3580 mp = lock_mount(mountpoint); 3581 if (IS_ERR(mp)) { 3582 mntput(mnt); 3583 return PTR_ERR(mp); 3584 } 3585 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags); 3586 unlock_mount(mp); 3587 if (error < 0) 3588 mntput(mnt); 3589 return error; 3590 } 3591 3592 /* 3593 * create a new mount for userspace and request it to be added into the 3594 * namespace's tree 3595 */ 3596 static int do_new_mount(struct path *path, const char *fstype, int sb_flags, 3597 int mnt_flags, const char *name, void *data) 3598 { 3599 struct file_system_type *type; 3600 struct fs_context *fc; 3601 const char *subtype = NULL; 3602 int err = 0; 3603 3604 if (!fstype) 3605 return -EINVAL; 3606 3607 type = get_fs_type(fstype); 3608 if (!type) 3609 return -ENODEV; 3610 3611 if (type->fs_flags & FS_HAS_SUBTYPE) { 3612 subtype = strchr(fstype, '.'); 3613 if (subtype) { 3614 subtype++; 3615 if (!*subtype) { 3616 put_filesystem(type); 3617 return -EINVAL; 3618 } 3619 } 3620 } 3621 3622 fc = fs_context_for_mount(type, sb_flags); 3623 put_filesystem(type); 3624 if (IS_ERR(fc)) 3625 return PTR_ERR(fc); 3626 3627 /* 3628 * Indicate to the filesystem that the mount request is coming 3629 * from the legacy mount system call. 3630 */ 3631 fc->oldapi = true; 3632 3633 if (subtype) 3634 err = vfs_parse_fs_string(fc, "subtype", 3635 subtype, strlen(subtype)); 3636 if (!err && name) 3637 err = vfs_parse_fs_string(fc, "source", name, strlen(name)); 3638 if (!err) 3639 err = parse_monolithic_mount_data(fc, data); 3640 if (!err && !mount_capable(fc)) 3641 err = -EPERM; 3642 if (!err) 3643 err = vfs_get_tree(fc); 3644 if (!err) 3645 err = do_new_mount_fc(fc, path, mnt_flags); 3646 3647 put_fs_context(fc); 3648 return err; 3649 } 3650 3651 int finish_automount(struct vfsmount *m, const struct path *path) 3652 { 3653 struct dentry *dentry = path->dentry; 3654 struct mountpoint *mp; 3655 struct mount *mnt; 3656 int err; 3657 3658 if (!m) 3659 return 0; 3660 if (IS_ERR(m)) 3661 return PTR_ERR(m); 3662 3663 mnt = real_mount(m); 3664 /* The new mount record should have at least 2 refs to prevent it being 3665 * expired before we get a chance to add it 3666 */ 3667 BUG_ON(mnt_get_count(mnt) < 2); 3668 3669 if (m->mnt_sb == path->mnt->mnt_sb && 3670 m->mnt_root == dentry) { 3671 err = -ELOOP; 3672 goto discard; 3673 } 3674 3675 /* 3676 * we don't want to use lock_mount() - in this case finding something 3677 * that overmounts our mountpoint to be means "quitely drop what we've 3678 * got", not "try to mount it on top". 3679 */ 3680 inode_lock(dentry->d_inode); 3681 namespace_lock(); 3682 if (unlikely(cant_mount(dentry))) { 3683 err = -ENOENT; 3684 goto discard_locked; 3685 } 3686 if (path_overmounted(path)) { 3687 err = 0; 3688 goto discard_locked; 3689 } 3690 mp = get_mountpoint(dentry); 3691 if (IS_ERR(mp)) { 3692 err = PTR_ERR(mp); 3693 goto discard_locked; 3694 } 3695 3696 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE); 3697 unlock_mount(mp); 3698 if (unlikely(err)) 3699 goto discard; 3700 mntput(m); 3701 return 0; 3702 3703 discard_locked: 3704 namespace_unlock(); 3705 inode_unlock(dentry->d_inode); 3706 discard: 3707 /* remove m from any expiration list it may be on */ 3708 if (!list_empty(&mnt->mnt_expire)) { 3709 namespace_lock(); 3710 list_del_init(&mnt->mnt_expire); 3711 namespace_unlock(); 3712 } 3713 mntput(m); 3714 mntput(m); 3715 return err; 3716 } 3717 3718 /** 3719 * mnt_set_expiry - Put a mount on an expiration list 3720 * @mnt: The mount to list. 3721 * @expiry_list: The list to add the mount to. 3722 */ 3723 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 3724 { 3725 namespace_lock(); 3726 3727 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 3728 3729 namespace_unlock(); 3730 } 3731 EXPORT_SYMBOL(mnt_set_expiry); 3732 3733 /* 3734 * process a list of expirable mountpoints with the intent of discarding any 3735 * mountpoints that aren't in use and haven't been touched since last we came 3736 * here 3737 */ 3738 void mark_mounts_for_expiry(struct list_head *mounts) 3739 { 3740 struct mount *mnt, *next; 3741 LIST_HEAD(graveyard); 3742 3743 if (list_empty(mounts)) 3744 return; 3745 3746 namespace_lock(); 3747 lock_mount_hash(); 3748 3749 /* extract from the expiration list every vfsmount that matches the 3750 * following criteria: 3751 * - only referenced by its parent vfsmount 3752 * - still marked for expiry (marked on the last call here; marks are 3753 * cleared by mntput()) 3754 */ 3755 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 3756 if (!xchg(&mnt->mnt_expiry_mark, 1) || 3757 propagate_mount_busy(mnt, 1)) 3758 continue; 3759 list_move(&mnt->mnt_expire, &graveyard); 3760 } 3761 while (!list_empty(&graveyard)) { 3762 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 3763 touch_mnt_namespace(mnt->mnt_ns); 3764 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3765 } 3766 unlock_mount_hash(); 3767 namespace_unlock(); 3768 } 3769 3770 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 3771 3772 /* 3773 * Ripoff of 'select_parent()' 3774 * 3775 * search the list of submounts for a given mountpoint, and move any 3776 * shrinkable submounts to the 'graveyard' list. 3777 */ 3778 static int select_submounts(struct mount *parent, struct list_head *graveyard) 3779 { 3780 struct mount *this_parent = parent; 3781 struct list_head *next; 3782 int found = 0; 3783 3784 repeat: 3785 next = this_parent->mnt_mounts.next; 3786 resume: 3787 while (next != &this_parent->mnt_mounts) { 3788 struct list_head *tmp = next; 3789 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); 3790 3791 next = tmp->next; 3792 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) 3793 continue; 3794 /* 3795 * Descend a level if the d_mounts list is non-empty. 3796 */ 3797 if (!list_empty(&mnt->mnt_mounts)) { 3798 this_parent = mnt; 3799 goto repeat; 3800 } 3801 3802 if (!propagate_mount_busy(mnt, 1)) { 3803 list_move_tail(&mnt->mnt_expire, graveyard); 3804 found++; 3805 } 3806 } 3807 /* 3808 * All done at this level ... ascend and resume the search 3809 */ 3810 if (this_parent != parent) { 3811 next = this_parent->mnt_child.next; 3812 this_parent = this_parent->mnt_parent; 3813 goto resume; 3814 } 3815 return found; 3816 } 3817 3818 /* 3819 * process a list of expirable mountpoints with the intent of discarding any 3820 * submounts of a specific parent mountpoint 3821 * 3822 * mount_lock must be held for write 3823 */ 3824 static void shrink_submounts(struct mount *mnt) 3825 { 3826 LIST_HEAD(graveyard); 3827 struct mount *m; 3828 3829 /* extract submounts of 'mountpoint' from the expiration list */ 3830 while (select_submounts(mnt, &graveyard)) { 3831 while (!list_empty(&graveyard)) { 3832 m = list_first_entry(&graveyard, struct mount, 3833 mnt_expire); 3834 touch_mnt_namespace(m->mnt_ns); 3835 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3836 } 3837 } 3838 } 3839 3840 static void *copy_mount_options(const void __user * data) 3841 { 3842 char *copy; 3843 unsigned left, offset; 3844 3845 if (!data) 3846 return NULL; 3847 3848 copy = kmalloc(PAGE_SIZE, GFP_KERNEL); 3849 if (!copy) 3850 return ERR_PTR(-ENOMEM); 3851 3852 left = copy_from_user(copy, data, PAGE_SIZE); 3853 3854 /* 3855 * Not all architectures have an exact copy_from_user(). Resort to 3856 * byte at a time. 3857 */ 3858 offset = PAGE_SIZE - left; 3859 while (left) { 3860 char c; 3861 if (get_user(c, (const char __user *)data + offset)) 3862 break; 3863 copy[offset] = c; 3864 left--; 3865 offset++; 3866 } 3867 3868 if (left == PAGE_SIZE) { 3869 kfree(copy); 3870 return ERR_PTR(-EFAULT); 3871 } 3872 3873 return copy; 3874 } 3875 3876 static char *copy_mount_string(const void __user *data) 3877 { 3878 return data ? strndup_user(data, PATH_MAX) : NULL; 3879 } 3880 3881 /* 3882 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 3883 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 3884 * 3885 * data is a (void *) that can point to any structure up to 3886 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 3887 * information (or be NULL). 3888 * 3889 * Pre-0.97 versions of mount() didn't have a flags word. 3890 * When the flags word was introduced its top half was required 3891 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 3892 * Therefore, if this magic number is present, it carries no information 3893 * and must be discarded. 3894 */ 3895 int path_mount(const char *dev_name, struct path *path, 3896 const char *type_page, unsigned long flags, void *data_page) 3897 { 3898 unsigned int mnt_flags = 0, sb_flags; 3899 int ret; 3900 3901 /* Discard magic */ 3902 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 3903 flags &= ~MS_MGC_MSK; 3904 3905 /* Basic sanity checks */ 3906 if (data_page) 3907 ((char *)data_page)[PAGE_SIZE - 1] = 0; 3908 3909 if (flags & MS_NOUSER) 3910 return -EINVAL; 3911 3912 ret = security_sb_mount(dev_name, path, type_page, flags, data_page); 3913 if (ret) 3914 return ret; 3915 if (!may_mount()) 3916 return -EPERM; 3917 if (flags & SB_MANDLOCK) 3918 warn_mandlock(); 3919 3920 /* Default to relatime unless overriden */ 3921 if (!(flags & MS_NOATIME)) 3922 mnt_flags |= MNT_RELATIME; 3923 3924 /* Separate the per-mountpoint flags */ 3925 if (flags & MS_NOSUID) 3926 mnt_flags |= MNT_NOSUID; 3927 if (flags & MS_NODEV) 3928 mnt_flags |= MNT_NODEV; 3929 if (flags & MS_NOEXEC) 3930 mnt_flags |= MNT_NOEXEC; 3931 if (flags & MS_NOATIME) 3932 mnt_flags |= MNT_NOATIME; 3933 if (flags & MS_NODIRATIME) 3934 mnt_flags |= MNT_NODIRATIME; 3935 if (flags & MS_STRICTATIME) 3936 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 3937 if (flags & MS_RDONLY) 3938 mnt_flags |= MNT_READONLY; 3939 if (flags & MS_NOSYMFOLLOW) 3940 mnt_flags |= MNT_NOSYMFOLLOW; 3941 3942 /* The default atime for remount is preservation */ 3943 if ((flags & MS_REMOUNT) && 3944 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | 3945 MS_STRICTATIME)) == 0)) { 3946 mnt_flags &= ~MNT_ATIME_MASK; 3947 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK; 3948 } 3949 3950 sb_flags = flags & (SB_RDONLY | 3951 SB_SYNCHRONOUS | 3952 SB_MANDLOCK | 3953 SB_DIRSYNC | 3954 SB_SILENT | 3955 SB_POSIXACL | 3956 SB_LAZYTIME | 3957 SB_I_VERSION); 3958 3959 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) 3960 return do_reconfigure_mnt(path, mnt_flags); 3961 if (flags & MS_REMOUNT) 3962 return do_remount(path, flags, sb_flags, mnt_flags, data_page); 3963 if (flags & MS_BIND) 3964 return do_loopback(path, dev_name, flags & MS_REC); 3965 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 3966 return do_change_type(path, flags); 3967 if (flags & MS_MOVE) 3968 return do_move_mount_old(path, dev_name); 3969 3970 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name, 3971 data_page); 3972 } 3973 3974 int do_mount(const char *dev_name, const char __user *dir_name, 3975 const char *type_page, unsigned long flags, void *data_page) 3976 { 3977 struct path path; 3978 int ret; 3979 3980 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path); 3981 if (ret) 3982 return ret; 3983 ret = path_mount(dev_name, &path, type_page, flags, data_page); 3984 path_put(&path); 3985 return ret; 3986 } 3987 3988 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) 3989 { 3990 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES); 3991 } 3992 3993 static void dec_mnt_namespaces(struct ucounts *ucounts) 3994 { 3995 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES); 3996 } 3997 3998 static void free_mnt_ns(struct mnt_namespace *ns) 3999 { 4000 if (!is_anon_ns(ns)) 4001 ns_free_inum(&ns->ns); 4002 dec_mnt_namespaces(ns->ucounts); 4003 mnt_ns_tree_remove(ns); 4004 } 4005 4006 /* 4007 * Assign a sequence number so we can detect when we attempt to bind 4008 * mount a reference to an older mount namespace into the current 4009 * mount namespace, preventing reference counting loops. A 64bit 4010 * number incrementing at 10Ghz will take 12,427 years to wrap which 4011 * is effectively never, so we can ignore the possibility. 4012 */ 4013 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); 4014 4015 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon) 4016 { 4017 struct mnt_namespace *new_ns; 4018 struct ucounts *ucounts; 4019 int ret; 4020 4021 ucounts = inc_mnt_namespaces(user_ns); 4022 if (!ucounts) 4023 return ERR_PTR(-ENOSPC); 4024 4025 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT); 4026 if (!new_ns) { 4027 dec_mnt_namespaces(ucounts); 4028 return ERR_PTR(-ENOMEM); 4029 } 4030 if (!anon) { 4031 ret = ns_alloc_inum(&new_ns->ns); 4032 if (ret) { 4033 kfree(new_ns); 4034 dec_mnt_namespaces(ucounts); 4035 return ERR_PTR(ret); 4036 } 4037 } 4038 new_ns->ns.ops = &mntns_operations; 4039 if (!anon) 4040 new_ns->seq = atomic64_inc_return(&mnt_ns_seq); 4041 refcount_set(&new_ns->ns.count, 1); 4042 refcount_set(&new_ns->passive, 1); 4043 new_ns->mounts = RB_ROOT; 4044 INIT_LIST_HEAD(&new_ns->mnt_ns_list); 4045 RB_CLEAR_NODE(&new_ns->mnt_ns_tree_node); 4046 init_waitqueue_head(&new_ns->poll); 4047 new_ns->user_ns = get_user_ns(user_ns); 4048 new_ns->ucounts = ucounts; 4049 return new_ns; 4050 } 4051 4052 __latent_entropy 4053 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 4054 struct user_namespace *user_ns, struct fs_struct *new_fs) 4055 { 4056 struct mnt_namespace *new_ns; 4057 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 4058 struct mount *p, *q; 4059 struct mount *old; 4060 struct mount *new; 4061 int copy_flags; 4062 4063 BUG_ON(!ns); 4064 4065 if (likely(!(flags & CLONE_NEWNS))) { 4066 get_mnt_ns(ns); 4067 return ns; 4068 } 4069 4070 old = ns->root; 4071 4072 new_ns = alloc_mnt_ns(user_ns, false); 4073 if (IS_ERR(new_ns)) 4074 return new_ns; 4075 4076 namespace_lock(); 4077 /* First pass: copy the tree topology */ 4078 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; 4079 if (user_ns != ns->user_ns) 4080 copy_flags |= CL_SHARED_TO_SLAVE; 4081 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 4082 if (IS_ERR(new)) { 4083 namespace_unlock(); 4084 ns_free_inum(&new_ns->ns); 4085 dec_mnt_namespaces(new_ns->ucounts); 4086 mnt_ns_release(new_ns); 4087 return ERR_CAST(new); 4088 } 4089 if (user_ns != ns->user_ns) { 4090 lock_mount_hash(); 4091 lock_mnt_tree(new); 4092 unlock_mount_hash(); 4093 } 4094 new_ns->root = new; 4095 4096 /* 4097 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 4098 * as belonging to new namespace. We have already acquired a private 4099 * fs_struct, so tsk->fs->lock is not needed. 4100 */ 4101 p = old; 4102 q = new; 4103 while (p) { 4104 mnt_add_to_ns(new_ns, q); 4105 new_ns->nr_mounts++; 4106 if (new_fs) { 4107 if (&p->mnt == new_fs->root.mnt) { 4108 new_fs->root.mnt = mntget(&q->mnt); 4109 rootmnt = &p->mnt; 4110 } 4111 if (&p->mnt == new_fs->pwd.mnt) { 4112 new_fs->pwd.mnt = mntget(&q->mnt); 4113 pwdmnt = &p->mnt; 4114 } 4115 } 4116 p = next_mnt(p, old); 4117 q = next_mnt(q, new); 4118 if (!q) 4119 break; 4120 // an mntns binding we'd skipped? 4121 while (p->mnt.mnt_root != q->mnt.mnt_root) 4122 p = next_mnt(skip_mnt_tree(p), old); 4123 } 4124 namespace_unlock(); 4125 4126 if (rootmnt) 4127 mntput(rootmnt); 4128 if (pwdmnt) 4129 mntput(pwdmnt); 4130 4131 mnt_ns_tree_add(new_ns); 4132 return new_ns; 4133 } 4134 4135 struct dentry *mount_subtree(struct vfsmount *m, const char *name) 4136 { 4137 struct mount *mnt = real_mount(m); 4138 struct mnt_namespace *ns; 4139 struct super_block *s; 4140 struct path path; 4141 int err; 4142 4143 ns = alloc_mnt_ns(&init_user_ns, true); 4144 if (IS_ERR(ns)) { 4145 mntput(m); 4146 return ERR_CAST(ns); 4147 } 4148 ns->root = mnt; 4149 ns->nr_mounts++; 4150 mnt_add_to_ns(ns, mnt); 4151 4152 err = vfs_path_lookup(m->mnt_root, m, 4153 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 4154 4155 put_mnt_ns(ns); 4156 4157 if (err) 4158 return ERR_PTR(err); 4159 4160 /* trade a vfsmount reference for active sb one */ 4161 s = path.mnt->mnt_sb; 4162 atomic_inc(&s->s_active); 4163 mntput(path.mnt); 4164 /* lock the sucker */ 4165 down_write(&s->s_umount); 4166 /* ... and return the root of (sub)tree on it */ 4167 return path.dentry; 4168 } 4169 EXPORT_SYMBOL(mount_subtree); 4170 4171 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 4172 char __user *, type, unsigned long, flags, void __user *, data) 4173 { 4174 int ret; 4175 char *kernel_type; 4176 char *kernel_dev; 4177 void *options; 4178 4179 kernel_type = copy_mount_string(type); 4180 ret = PTR_ERR(kernel_type); 4181 if (IS_ERR(kernel_type)) 4182 goto out_type; 4183 4184 kernel_dev = copy_mount_string(dev_name); 4185 ret = PTR_ERR(kernel_dev); 4186 if (IS_ERR(kernel_dev)) 4187 goto out_dev; 4188 4189 options = copy_mount_options(data); 4190 ret = PTR_ERR(options); 4191 if (IS_ERR(options)) 4192 goto out_data; 4193 4194 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options); 4195 4196 kfree(options); 4197 out_data: 4198 kfree(kernel_dev); 4199 out_dev: 4200 kfree(kernel_type); 4201 out_type: 4202 return ret; 4203 } 4204 4205 #define FSMOUNT_VALID_FLAGS \ 4206 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \ 4207 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \ 4208 MOUNT_ATTR_NOSYMFOLLOW) 4209 4210 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP) 4211 4212 #define MOUNT_SETATTR_PROPAGATION_FLAGS \ 4213 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED) 4214 4215 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags) 4216 { 4217 unsigned int mnt_flags = 0; 4218 4219 if (attr_flags & MOUNT_ATTR_RDONLY) 4220 mnt_flags |= MNT_READONLY; 4221 if (attr_flags & MOUNT_ATTR_NOSUID) 4222 mnt_flags |= MNT_NOSUID; 4223 if (attr_flags & MOUNT_ATTR_NODEV) 4224 mnt_flags |= MNT_NODEV; 4225 if (attr_flags & MOUNT_ATTR_NOEXEC) 4226 mnt_flags |= MNT_NOEXEC; 4227 if (attr_flags & MOUNT_ATTR_NODIRATIME) 4228 mnt_flags |= MNT_NODIRATIME; 4229 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW) 4230 mnt_flags |= MNT_NOSYMFOLLOW; 4231 4232 return mnt_flags; 4233 } 4234 4235 /* 4236 * Create a kernel mount representation for a new, prepared superblock 4237 * (specified by fs_fd) and attach to an open_tree-like file descriptor. 4238 */ 4239 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags, 4240 unsigned int, attr_flags) 4241 { 4242 struct mnt_namespace *ns; 4243 struct fs_context *fc; 4244 struct file *file; 4245 struct path newmount; 4246 struct mount *mnt; 4247 unsigned int mnt_flags = 0; 4248 long ret; 4249 4250 if (!may_mount()) 4251 return -EPERM; 4252 4253 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0) 4254 return -EINVAL; 4255 4256 if (attr_flags & ~FSMOUNT_VALID_FLAGS) 4257 return -EINVAL; 4258 4259 mnt_flags = attr_flags_to_mnt_flags(attr_flags); 4260 4261 switch (attr_flags & MOUNT_ATTR__ATIME) { 4262 case MOUNT_ATTR_STRICTATIME: 4263 break; 4264 case MOUNT_ATTR_NOATIME: 4265 mnt_flags |= MNT_NOATIME; 4266 break; 4267 case MOUNT_ATTR_RELATIME: 4268 mnt_flags |= MNT_RELATIME; 4269 break; 4270 default: 4271 return -EINVAL; 4272 } 4273 4274 CLASS(fd, f)(fs_fd); 4275 if (fd_empty(f)) 4276 return -EBADF; 4277 4278 if (fd_file(f)->f_op != &fscontext_fops) 4279 return -EINVAL; 4280 4281 fc = fd_file(f)->private_data; 4282 4283 ret = mutex_lock_interruptible(&fc->uapi_mutex); 4284 if (ret < 0) 4285 return ret; 4286 4287 /* There must be a valid superblock or we can't mount it */ 4288 ret = -EINVAL; 4289 if (!fc->root) 4290 goto err_unlock; 4291 4292 ret = -EPERM; 4293 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) { 4294 pr_warn("VFS: Mount too revealing\n"); 4295 goto err_unlock; 4296 } 4297 4298 ret = -EBUSY; 4299 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT) 4300 goto err_unlock; 4301 4302 if (fc->sb_flags & SB_MANDLOCK) 4303 warn_mandlock(); 4304 4305 newmount.mnt = vfs_create_mount(fc); 4306 if (IS_ERR(newmount.mnt)) { 4307 ret = PTR_ERR(newmount.mnt); 4308 goto err_unlock; 4309 } 4310 newmount.dentry = dget(fc->root); 4311 newmount.mnt->mnt_flags = mnt_flags; 4312 4313 /* We've done the mount bit - now move the file context into more or 4314 * less the same state as if we'd done an fspick(). We don't want to 4315 * do any memory allocation or anything like that at this point as we 4316 * don't want to have to handle any errors incurred. 4317 */ 4318 vfs_clean_context(fc); 4319 4320 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true); 4321 if (IS_ERR(ns)) { 4322 ret = PTR_ERR(ns); 4323 goto err_path; 4324 } 4325 mnt = real_mount(newmount.mnt); 4326 ns->root = mnt; 4327 ns->nr_mounts = 1; 4328 mnt_add_to_ns(ns, mnt); 4329 mntget(newmount.mnt); 4330 4331 /* Attach to an apparent O_PATH fd with a note that we need to unmount 4332 * it, not just simply put it. 4333 */ 4334 file = dentry_open(&newmount, O_PATH, fc->cred); 4335 if (IS_ERR(file)) { 4336 dissolve_on_fput(newmount.mnt); 4337 ret = PTR_ERR(file); 4338 goto err_path; 4339 } 4340 file->f_mode |= FMODE_NEED_UNMOUNT; 4341 4342 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0); 4343 if (ret >= 0) 4344 fd_install(ret, file); 4345 else 4346 fput(file); 4347 4348 err_path: 4349 path_put(&newmount); 4350 err_unlock: 4351 mutex_unlock(&fc->uapi_mutex); 4352 return ret; 4353 } 4354 4355 /* 4356 * Move a mount from one place to another. In combination with 4357 * fsopen()/fsmount() this is used to install a new mount and in combination 4358 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy 4359 * a mount subtree. 4360 * 4361 * Note the flags value is a combination of MOVE_MOUNT_* flags. 4362 */ 4363 SYSCALL_DEFINE5(move_mount, 4364 int, from_dfd, const char __user *, from_pathname, 4365 int, to_dfd, const char __user *, to_pathname, 4366 unsigned int, flags) 4367 { 4368 struct path from_path, to_path; 4369 unsigned int lflags; 4370 int ret = 0; 4371 4372 if (!may_mount()) 4373 return -EPERM; 4374 4375 if (flags & ~MOVE_MOUNT__MASK) 4376 return -EINVAL; 4377 4378 if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) == 4379 (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) 4380 return -EINVAL; 4381 4382 /* If someone gives a pathname, they aren't permitted to move 4383 * from an fd that requires unmount as we can't get at the flag 4384 * to clear it afterwards. 4385 */ 4386 lflags = 0; 4387 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW; 4388 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 4389 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY; 4390 4391 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path); 4392 if (ret < 0) 4393 return ret; 4394 4395 lflags = 0; 4396 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW; 4397 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 4398 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY; 4399 4400 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path); 4401 if (ret < 0) 4402 goto out_from; 4403 4404 ret = security_move_mount(&from_path, &to_path); 4405 if (ret < 0) 4406 goto out_to; 4407 4408 if (flags & MOVE_MOUNT_SET_GROUP) 4409 ret = do_set_group(&from_path, &to_path); 4410 else 4411 ret = do_move_mount(&from_path, &to_path, 4412 (flags & MOVE_MOUNT_BENEATH)); 4413 4414 out_to: 4415 path_put(&to_path); 4416 out_from: 4417 path_put(&from_path); 4418 return ret; 4419 } 4420 4421 /* 4422 * Return true if path is reachable from root 4423 * 4424 * namespace_sem or mount_lock is held 4425 */ 4426 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, 4427 const struct path *root) 4428 { 4429 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { 4430 dentry = mnt->mnt_mountpoint; 4431 mnt = mnt->mnt_parent; 4432 } 4433 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); 4434 } 4435 4436 bool path_is_under(const struct path *path1, const struct path *path2) 4437 { 4438 bool res; 4439 read_seqlock_excl(&mount_lock); 4440 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 4441 read_sequnlock_excl(&mount_lock); 4442 return res; 4443 } 4444 EXPORT_SYMBOL(path_is_under); 4445 4446 /* 4447 * pivot_root Semantics: 4448 * Moves the root file system of the current process to the directory put_old, 4449 * makes new_root as the new root file system of the current process, and sets 4450 * root/cwd of all processes which had them on the current root to new_root. 4451 * 4452 * Restrictions: 4453 * The new_root and put_old must be directories, and must not be on the 4454 * same file system as the current process root. The put_old must be 4455 * underneath new_root, i.e. adding a non-zero number of /.. to the string 4456 * pointed to by put_old must yield the same directory as new_root. No other 4457 * file system may be mounted on put_old. After all, new_root is a mountpoint. 4458 * 4459 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 4460 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives 4461 * in this situation. 4462 * 4463 * Notes: 4464 * - we don't move root/cwd if they are not at the root (reason: if something 4465 * cared enough to change them, it's probably wrong to force them elsewhere) 4466 * - it's okay to pick a root that isn't the root of a file system, e.g. 4467 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 4468 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 4469 * first. 4470 */ 4471 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 4472 const char __user *, put_old) 4473 { 4474 struct path new, old, root; 4475 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent; 4476 struct mountpoint *old_mp, *root_mp; 4477 int error; 4478 4479 if (!may_mount()) 4480 return -EPERM; 4481 4482 error = user_path_at(AT_FDCWD, new_root, 4483 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new); 4484 if (error) 4485 goto out0; 4486 4487 error = user_path_at(AT_FDCWD, put_old, 4488 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old); 4489 if (error) 4490 goto out1; 4491 4492 error = security_sb_pivotroot(&old, &new); 4493 if (error) 4494 goto out2; 4495 4496 get_fs_root(current->fs, &root); 4497 old_mp = lock_mount(&old); 4498 error = PTR_ERR(old_mp); 4499 if (IS_ERR(old_mp)) 4500 goto out3; 4501 4502 error = -EINVAL; 4503 new_mnt = real_mount(new.mnt); 4504 root_mnt = real_mount(root.mnt); 4505 old_mnt = real_mount(old.mnt); 4506 ex_parent = new_mnt->mnt_parent; 4507 root_parent = root_mnt->mnt_parent; 4508 if (IS_MNT_SHARED(old_mnt) || 4509 IS_MNT_SHARED(ex_parent) || 4510 IS_MNT_SHARED(root_parent)) 4511 goto out4; 4512 if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) 4513 goto out4; 4514 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) 4515 goto out4; 4516 error = -ENOENT; 4517 if (d_unlinked(new.dentry)) 4518 goto out4; 4519 error = -EBUSY; 4520 if (new_mnt == root_mnt || old_mnt == root_mnt) 4521 goto out4; /* loop, on the same file system */ 4522 error = -EINVAL; 4523 if (!path_mounted(&root)) 4524 goto out4; /* not a mountpoint */ 4525 if (!mnt_has_parent(root_mnt)) 4526 goto out4; /* not attached */ 4527 if (!path_mounted(&new)) 4528 goto out4; /* not a mountpoint */ 4529 if (!mnt_has_parent(new_mnt)) 4530 goto out4; /* not attached */ 4531 /* make sure we can reach put_old from new_root */ 4532 if (!is_path_reachable(old_mnt, old.dentry, &new)) 4533 goto out4; 4534 /* make certain new is below the root */ 4535 if (!is_path_reachable(new_mnt, new.dentry, &root)) 4536 goto out4; 4537 lock_mount_hash(); 4538 umount_mnt(new_mnt); 4539 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */ 4540 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 4541 new_mnt->mnt.mnt_flags |= MNT_LOCKED; 4542 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; 4543 } 4544 /* mount old root on put_old */ 4545 attach_mnt(root_mnt, old_mnt, old_mp, false); 4546 /* mount new_root on / */ 4547 attach_mnt(new_mnt, root_parent, root_mp, false); 4548 mnt_add_count(root_parent, -1); 4549 touch_mnt_namespace(current->nsproxy->mnt_ns); 4550 /* A moved mount should not expire automatically */ 4551 list_del_init(&new_mnt->mnt_expire); 4552 put_mountpoint(root_mp); 4553 unlock_mount_hash(); 4554 mnt_notify_add(root_mnt); 4555 mnt_notify_add(new_mnt); 4556 chroot_fs_refs(&root, &new); 4557 error = 0; 4558 out4: 4559 unlock_mount(old_mp); 4560 if (!error) 4561 mntput_no_expire(ex_parent); 4562 out3: 4563 path_put(&root); 4564 out2: 4565 path_put(&old); 4566 out1: 4567 path_put(&new); 4568 out0: 4569 return error; 4570 } 4571 4572 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt) 4573 { 4574 unsigned int flags = mnt->mnt.mnt_flags; 4575 4576 /* flags to clear */ 4577 flags &= ~kattr->attr_clr; 4578 /* flags to raise */ 4579 flags |= kattr->attr_set; 4580 4581 return flags; 4582 } 4583 4584 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4585 { 4586 struct vfsmount *m = &mnt->mnt; 4587 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns; 4588 4589 if (!kattr->mnt_idmap) 4590 return 0; 4591 4592 /* 4593 * Creating an idmapped mount with the filesystem wide idmapping 4594 * doesn't make sense so block that. We don't allow mushy semantics. 4595 */ 4596 if (kattr->mnt_userns == m->mnt_sb->s_user_ns) 4597 return -EINVAL; 4598 4599 /* 4600 * Once a mount has been idmapped we don't allow it to change its 4601 * mapping. It makes things simpler and callers can just create 4602 * another bind-mount they can idmap if they want to. 4603 */ 4604 if (is_idmapped_mnt(m)) 4605 return -EPERM; 4606 4607 /* The underlying filesystem doesn't support idmapped mounts yet. */ 4608 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP)) 4609 return -EINVAL; 4610 4611 /* The filesystem has turned off idmapped mounts. */ 4612 if (m->mnt_sb->s_iflags & SB_I_NOIDMAP) 4613 return -EINVAL; 4614 4615 /* We're not controlling the superblock. */ 4616 if (!ns_capable(fs_userns, CAP_SYS_ADMIN)) 4617 return -EPERM; 4618 4619 /* Mount has already been visible in the filesystem hierarchy. */ 4620 if (!is_anon_ns(mnt->mnt_ns)) 4621 return -EINVAL; 4622 4623 return 0; 4624 } 4625 4626 /** 4627 * mnt_allow_writers() - check whether the attribute change allows writers 4628 * @kattr: the new mount attributes 4629 * @mnt: the mount to which @kattr will be applied 4630 * 4631 * Check whether thew new mount attributes in @kattr allow concurrent writers. 4632 * 4633 * Return: true if writers need to be held, false if not 4634 */ 4635 static inline bool mnt_allow_writers(const struct mount_kattr *kattr, 4636 const struct mount *mnt) 4637 { 4638 return (!(kattr->attr_set & MNT_READONLY) || 4639 (mnt->mnt.mnt_flags & MNT_READONLY)) && 4640 !kattr->mnt_idmap; 4641 } 4642 4643 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt) 4644 { 4645 struct mount *m; 4646 int err; 4647 4648 for (m = mnt; m; m = next_mnt(m, mnt)) { 4649 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) { 4650 err = -EPERM; 4651 break; 4652 } 4653 4654 err = can_idmap_mount(kattr, m); 4655 if (err) 4656 break; 4657 4658 if (!mnt_allow_writers(kattr, m)) { 4659 err = mnt_hold_writers(m); 4660 if (err) 4661 break; 4662 } 4663 4664 if (!kattr->recurse) 4665 return 0; 4666 } 4667 4668 if (err) { 4669 struct mount *p; 4670 4671 /* 4672 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will 4673 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all 4674 * mounts and needs to take care to include the first mount. 4675 */ 4676 for (p = mnt; p; p = next_mnt(p, mnt)) { 4677 /* If we had to hold writers unblock them. */ 4678 if (p->mnt.mnt_flags & MNT_WRITE_HOLD) 4679 mnt_unhold_writers(p); 4680 4681 /* 4682 * We're done once the first mount we changed got 4683 * MNT_WRITE_HOLD unset. 4684 */ 4685 if (p == m) 4686 break; 4687 } 4688 } 4689 return err; 4690 } 4691 4692 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4693 { 4694 if (!kattr->mnt_idmap) 4695 return; 4696 4697 /* 4698 * Pairs with smp_load_acquire() in mnt_idmap(). 4699 * 4700 * Since we only allow a mount to change the idmapping once and 4701 * verified this in can_idmap_mount() we know that the mount has 4702 * @nop_mnt_idmap attached to it. So there's no need to drop any 4703 * references. 4704 */ 4705 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap)); 4706 } 4707 4708 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt) 4709 { 4710 struct mount *m; 4711 4712 for (m = mnt; m; m = next_mnt(m, mnt)) { 4713 unsigned int flags; 4714 4715 do_idmap_mount(kattr, m); 4716 flags = recalc_flags(kattr, m); 4717 WRITE_ONCE(m->mnt.mnt_flags, flags); 4718 4719 /* If we had to hold writers unblock them. */ 4720 if (m->mnt.mnt_flags & MNT_WRITE_HOLD) 4721 mnt_unhold_writers(m); 4722 4723 if (kattr->propagation) 4724 change_mnt_propagation(m, kattr->propagation); 4725 if (!kattr->recurse) 4726 break; 4727 } 4728 touch_mnt_namespace(mnt->mnt_ns); 4729 } 4730 4731 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr) 4732 { 4733 struct mount *mnt = real_mount(path->mnt); 4734 int err = 0; 4735 4736 if (!path_mounted(path)) 4737 return -EINVAL; 4738 4739 if (kattr->mnt_userns) { 4740 struct mnt_idmap *mnt_idmap; 4741 4742 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns); 4743 if (IS_ERR(mnt_idmap)) 4744 return PTR_ERR(mnt_idmap); 4745 kattr->mnt_idmap = mnt_idmap; 4746 } 4747 4748 if (kattr->propagation) { 4749 /* 4750 * Only take namespace_lock() if we're actually changing 4751 * propagation. 4752 */ 4753 namespace_lock(); 4754 if (kattr->propagation == MS_SHARED) { 4755 err = invent_group_ids(mnt, kattr->recurse); 4756 if (err) { 4757 namespace_unlock(); 4758 return err; 4759 } 4760 } 4761 } 4762 4763 err = -EINVAL; 4764 lock_mount_hash(); 4765 4766 /* Ensure that this isn't anything purely vfs internal. */ 4767 if (!is_mounted(&mnt->mnt)) 4768 goto out; 4769 4770 /* 4771 * If this is an attached mount make sure it's located in the callers 4772 * mount namespace. If it's not don't let the caller interact with it. 4773 * 4774 * If this mount doesn't have a parent it's most often simply a 4775 * detached mount with an anonymous mount namespace. IOW, something 4776 * that's simply not attached yet. But there are apparently also users 4777 * that do change mount properties on the rootfs itself. That obviously 4778 * neither has a parent nor is it a detached mount so we cannot 4779 * unconditionally check for detached mounts. 4780 */ 4781 if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt)) 4782 goto out; 4783 4784 /* 4785 * First, we get the mount tree in a shape where we can change mount 4786 * properties without failure. If we succeeded to do so we commit all 4787 * changes and if we failed we clean up. 4788 */ 4789 err = mount_setattr_prepare(kattr, mnt); 4790 if (!err) 4791 mount_setattr_commit(kattr, mnt); 4792 4793 out: 4794 unlock_mount_hash(); 4795 4796 if (kattr->propagation) { 4797 if (err) 4798 cleanup_group_ids(mnt, NULL); 4799 namespace_unlock(); 4800 } 4801 4802 return err; 4803 } 4804 4805 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize, 4806 struct mount_kattr *kattr, unsigned int flags) 4807 { 4808 struct ns_common *ns; 4809 struct user_namespace *mnt_userns; 4810 4811 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP)) 4812 return 0; 4813 4814 /* 4815 * We currently do not support clearing an idmapped mount. If this ever 4816 * is a use-case we can revisit this but for now let's keep it simple 4817 * and not allow it. 4818 */ 4819 if (attr->attr_clr & MOUNT_ATTR_IDMAP) 4820 return -EINVAL; 4821 4822 if (attr->userns_fd > INT_MAX) 4823 return -EINVAL; 4824 4825 CLASS(fd, f)(attr->userns_fd); 4826 if (fd_empty(f)) 4827 return -EBADF; 4828 4829 if (!proc_ns_file(fd_file(f))) 4830 return -EINVAL; 4831 4832 ns = get_proc_ns(file_inode(fd_file(f))); 4833 if (ns->ops->type != CLONE_NEWUSER) 4834 return -EINVAL; 4835 4836 /* 4837 * The initial idmapping cannot be used to create an idmapped 4838 * mount. We use the initial idmapping as an indicator of a mount 4839 * that is not idmapped. It can simply be passed into helpers that 4840 * are aware of idmapped mounts as a convenient shortcut. A user 4841 * can just create a dedicated identity mapping to achieve the same 4842 * result. 4843 */ 4844 mnt_userns = container_of(ns, struct user_namespace, ns); 4845 if (mnt_userns == &init_user_ns) 4846 return -EPERM; 4847 4848 /* We're not controlling the target namespace. */ 4849 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) 4850 return -EPERM; 4851 4852 kattr->mnt_userns = get_user_ns(mnt_userns); 4853 return 0; 4854 } 4855 4856 static int build_mount_kattr(const struct mount_attr *attr, size_t usize, 4857 struct mount_kattr *kattr, unsigned int flags) 4858 { 4859 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 4860 4861 if (flags & AT_NO_AUTOMOUNT) 4862 lookup_flags &= ~LOOKUP_AUTOMOUNT; 4863 if (flags & AT_SYMLINK_NOFOLLOW) 4864 lookup_flags &= ~LOOKUP_FOLLOW; 4865 if (flags & AT_EMPTY_PATH) 4866 lookup_flags |= LOOKUP_EMPTY; 4867 4868 *kattr = (struct mount_kattr) { 4869 .lookup_flags = lookup_flags, 4870 .recurse = !!(flags & AT_RECURSIVE), 4871 }; 4872 4873 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS) 4874 return -EINVAL; 4875 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1) 4876 return -EINVAL; 4877 kattr->propagation = attr->propagation; 4878 4879 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS) 4880 return -EINVAL; 4881 4882 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set); 4883 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr); 4884 4885 /* 4886 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap, 4887 * users wanting to transition to a different atime setting cannot 4888 * simply specify the atime setting in @attr_set, but must also 4889 * specify MOUNT_ATTR__ATIME in the @attr_clr field. 4890 * So ensure that MOUNT_ATTR__ATIME can't be partially set in 4891 * @attr_clr and that @attr_set can't have any atime bits set if 4892 * MOUNT_ATTR__ATIME isn't set in @attr_clr. 4893 */ 4894 if (attr->attr_clr & MOUNT_ATTR__ATIME) { 4895 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME) 4896 return -EINVAL; 4897 4898 /* 4899 * Clear all previous time settings as they are mutually 4900 * exclusive. 4901 */ 4902 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME; 4903 switch (attr->attr_set & MOUNT_ATTR__ATIME) { 4904 case MOUNT_ATTR_RELATIME: 4905 kattr->attr_set |= MNT_RELATIME; 4906 break; 4907 case MOUNT_ATTR_NOATIME: 4908 kattr->attr_set |= MNT_NOATIME; 4909 break; 4910 case MOUNT_ATTR_STRICTATIME: 4911 break; 4912 default: 4913 return -EINVAL; 4914 } 4915 } else { 4916 if (attr->attr_set & MOUNT_ATTR__ATIME) 4917 return -EINVAL; 4918 } 4919 4920 return build_mount_idmapped(attr, usize, kattr, flags); 4921 } 4922 4923 static void finish_mount_kattr(struct mount_kattr *kattr) 4924 { 4925 put_user_ns(kattr->mnt_userns); 4926 kattr->mnt_userns = NULL; 4927 4928 if (kattr->mnt_idmap) 4929 mnt_idmap_put(kattr->mnt_idmap); 4930 } 4931 4932 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path, 4933 unsigned int, flags, struct mount_attr __user *, uattr, 4934 size_t, usize) 4935 { 4936 int err; 4937 struct path target; 4938 struct mount_attr attr; 4939 struct mount_kattr kattr; 4940 4941 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0); 4942 4943 if (flags & ~(AT_EMPTY_PATH | 4944 AT_RECURSIVE | 4945 AT_SYMLINK_NOFOLLOW | 4946 AT_NO_AUTOMOUNT)) 4947 return -EINVAL; 4948 4949 if (unlikely(usize > PAGE_SIZE)) 4950 return -E2BIG; 4951 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0)) 4952 return -EINVAL; 4953 4954 if (!may_mount()) 4955 return -EPERM; 4956 4957 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize); 4958 if (err) 4959 return err; 4960 4961 /* Don't bother walking through the mounts if this is a nop. */ 4962 if (attr.attr_set == 0 && 4963 attr.attr_clr == 0 && 4964 attr.propagation == 0) 4965 return 0; 4966 4967 err = build_mount_kattr(&attr, usize, &kattr, flags); 4968 if (err) 4969 return err; 4970 4971 err = user_path_at(dfd, path, kattr.lookup_flags, &target); 4972 if (!err) { 4973 err = do_mount_setattr(&target, &kattr); 4974 path_put(&target); 4975 } 4976 finish_mount_kattr(&kattr); 4977 return err; 4978 } 4979 4980 int show_path(struct seq_file *m, struct dentry *root) 4981 { 4982 if (root->d_sb->s_op->show_path) 4983 return root->d_sb->s_op->show_path(m, root); 4984 4985 seq_dentry(m, root, " \t\n\\"); 4986 return 0; 4987 } 4988 4989 static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns) 4990 { 4991 struct mount *mnt = mnt_find_id_at(ns, id); 4992 4993 if (!mnt || mnt->mnt_id_unique != id) 4994 return NULL; 4995 4996 return &mnt->mnt; 4997 } 4998 4999 struct kstatmount { 5000 struct statmount __user *buf; 5001 size_t bufsize; 5002 struct vfsmount *mnt; 5003 u64 mask; 5004 struct path root; 5005 struct statmount sm; 5006 struct seq_file seq; 5007 }; 5008 5009 static u64 mnt_to_attr_flags(struct vfsmount *mnt) 5010 { 5011 unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags); 5012 u64 attr_flags = 0; 5013 5014 if (mnt_flags & MNT_READONLY) 5015 attr_flags |= MOUNT_ATTR_RDONLY; 5016 if (mnt_flags & MNT_NOSUID) 5017 attr_flags |= MOUNT_ATTR_NOSUID; 5018 if (mnt_flags & MNT_NODEV) 5019 attr_flags |= MOUNT_ATTR_NODEV; 5020 if (mnt_flags & MNT_NOEXEC) 5021 attr_flags |= MOUNT_ATTR_NOEXEC; 5022 if (mnt_flags & MNT_NODIRATIME) 5023 attr_flags |= MOUNT_ATTR_NODIRATIME; 5024 if (mnt_flags & MNT_NOSYMFOLLOW) 5025 attr_flags |= MOUNT_ATTR_NOSYMFOLLOW; 5026 5027 if (mnt_flags & MNT_NOATIME) 5028 attr_flags |= MOUNT_ATTR_NOATIME; 5029 else if (mnt_flags & MNT_RELATIME) 5030 attr_flags |= MOUNT_ATTR_RELATIME; 5031 else 5032 attr_flags |= MOUNT_ATTR_STRICTATIME; 5033 5034 if (is_idmapped_mnt(mnt)) 5035 attr_flags |= MOUNT_ATTR_IDMAP; 5036 5037 return attr_flags; 5038 } 5039 5040 static u64 mnt_to_propagation_flags(struct mount *m) 5041 { 5042 u64 propagation = 0; 5043 5044 if (IS_MNT_SHARED(m)) 5045 propagation |= MS_SHARED; 5046 if (IS_MNT_SLAVE(m)) 5047 propagation |= MS_SLAVE; 5048 if (IS_MNT_UNBINDABLE(m)) 5049 propagation |= MS_UNBINDABLE; 5050 if (!propagation) 5051 propagation |= MS_PRIVATE; 5052 5053 return propagation; 5054 } 5055 5056 static void statmount_sb_basic(struct kstatmount *s) 5057 { 5058 struct super_block *sb = s->mnt->mnt_sb; 5059 5060 s->sm.mask |= STATMOUNT_SB_BASIC; 5061 s->sm.sb_dev_major = MAJOR(sb->s_dev); 5062 s->sm.sb_dev_minor = MINOR(sb->s_dev); 5063 s->sm.sb_magic = sb->s_magic; 5064 s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME); 5065 } 5066 5067 static void statmount_mnt_basic(struct kstatmount *s) 5068 { 5069 struct mount *m = real_mount(s->mnt); 5070 5071 s->sm.mask |= STATMOUNT_MNT_BASIC; 5072 s->sm.mnt_id = m->mnt_id_unique; 5073 s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique; 5074 s->sm.mnt_id_old = m->mnt_id; 5075 s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id; 5076 s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt); 5077 s->sm.mnt_propagation = mnt_to_propagation_flags(m); 5078 s->sm.mnt_peer_group = IS_MNT_SHARED(m) ? m->mnt_group_id : 0; 5079 s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0; 5080 } 5081 5082 static void statmount_propagate_from(struct kstatmount *s) 5083 { 5084 struct mount *m = real_mount(s->mnt); 5085 5086 s->sm.mask |= STATMOUNT_PROPAGATE_FROM; 5087 if (IS_MNT_SLAVE(m)) 5088 s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root); 5089 } 5090 5091 static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq) 5092 { 5093 int ret; 5094 size_t start = seq->count; 5095 5096 ret = show_path(seq, s->mnt->mnt_root); 5097 if (ret) 5098 return ret; 5099 5100 if (unlikely(seq_has_overflowed(seq))) 5101 return -EAGAIN; 5102 5103 /* 5104 * Unescape the result. It would be better if supplied string was not 5105 * escaped in the first place, but that's a pretty invasive change. 5106 */ 5107 seq->buf[seq->count] = '\0'; 5108 seq->count = start; 5109 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL)); 5110 return 0; 5111 } 5112 5113 static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq) 5114 { 5115 struct vfsmount *mnt = s->mnt; 5116 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 5117 int err; 5118 5119 err = seq_path_root(seq, &mnt_path, &s->root, ""); 5120 return err == SEQ_SKIP ? 0 : err; 5121 } 5122 5123 static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq) 5124 { 5125 struct super_block *sb = s->mnt->mnt_sb; 5126 5127 seq_puts(seq, sb->s_type->name); 5128 return 0; 5129 } 5130 5131 static void statmount_fs_subtype(struct kstatmount *s, struct seq_file *seq) 5132 { 5133 struct super_block *sb = s->mnt->mnt_sb; 5134 5135 if (sb->s_subtype) 5136 seq_puts(seq, sb->s_subtype); 5137 } 5138 5139 static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq) 5140 { 5141 struct super_block *sb = s->mnt->mnt_sb; 5142 struct mount *r = real_mount(s->mnt); 5143 5144 if (sb->s_op->show_devname) { 5145 size_t start = seq->count; 5146 int ret; 5147 5148 ret = sb->s_op->show_devname(seq, s->mnt->mnt_root); 5149 if (ret) 5150 return ret; 5151 5152 if (unlikely(seq_has_overflowed(seq))) 5153 return -EAGAIN; 5154 5155 /* Unescape the result */ 5156 seq->buf[seq->count] = '\0'; 5157 seq->count = start; 5158 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL)); 5159 } else if (r->mnt_devname) { 5160 seq_puts(seq, r->mnt_devname); 5161 } 5162 return 0; 5163 } 5164 5165 static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns) 5166 { 5167 s->sm.mask |= STATMOUNT_MNT_NS_ID; 5168 s->sm.mnt_ns_id = ns->seq; 5169 } 5170 5171 static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq) 5172 { 5173 struct vfsmount *mnt = s->mnt; 5174 struct super_block *sb = mnt->mnt_sb; 5175 int err; 5176 5177 if (sb->s_op->show_options) { 5178 size_t start = seq->count; 5179 5180 err = security_sb_show_options(seq, sb); 5181 if (err) 5182 return err; 5183 5184 err = sb->s_op->show_options(seq, mnt->mnt_root); 5185 if (err) 5186 return err; 5187 5188 if (unlikely(seq_has_overflowed(seq))) 5189 return -EAGAIN; 5190 5191 if (seq->count == start) 5192 return 0; 5193 5194 /* skip leading comma */ 5195 memmove(seq->buf + start, seq->buf + start + 1, 5196 seq->count - start - 1); 5197 seq->count--; 5198 } 5199 5200 return 0; 5201 } 5202 5203 static inline int statmount_opt_process(struct seq_file *seq, size_t start) 5204 { 5205 char *buf_end, *opt_end, *src, *dst; 5206 int count = 0; 5207 5208 if (unlikely(seq_has_overflowed(seq))) 5209 return -EAGAIN; 5210 5211 buf_end = seq->buf + seq->count; 5212 dst = seq->buf + start; 5213 src = dst + 1; /* skip initial comma */ 5214 5215 if (src >= buf_end) { 5216 seq->count = start; 5217 return 0; 5218 } 5219 5220 *buf_end = '\0'; 5221 for (; src < buf_end; src = opt_end + 1) { 5222 opt_end = strchrnul(src, ','); 5223 *opt_end = '\0'; 5224 dst += string_unescape(src, dst, 0, UNESCAPE_OCTAL) + 1; 5225 if (WARN_ON_ONCE(++count == INT_MAX)) 5226 return -EOVERFLOW; 5227 } 5228 seq->count = dst - 1 - seq->buf; 5229 return count; 5230 } 5231 5232 static int statmount_opt_array(struct kstatmount *s, struct seq_file *seq) 5233 { 5234 struct vfsmount *mnt = s->mnt; 5235 struct super_block *sb = mnt->mnt_sb; 5236 size_t start = seq->count; 5237 int err; 5238 5239 if (!sb->s_op->show_options) 5240 return 0; 5241 5242 err = sb->s_op->show_options(seq, mnt->mnt_root); 5243 if (err) 5244 return err; 5245 5246 err = statmount_opt_process(seq, start); 5247 if (err < 0) 5248 return err; 5249 5250 s->sm.opt_num = err; 5251 return 0; 5252 } 5253 5254 static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq) 5255 { 5256 struct vfsmount *mnt = s->mnt; 5257 struct super_block *sb = mnt->mnt_sb; 5258 size_t start = seq->count; 5259 int err; 5260 5261 err = security_sb_show_options(seq, sb); 5262 if (err) 5263 return err; 5264 5265 err = statmount_opt_process(seq, start); 5266 if (err < 0) 5267 return err; 5268 5269 s->sm.opt_sec_num = err; 5270 return 0; 5271 } 5272 5273 static int statmount_string(struct kstatmount *s, u64 flag) 5274 { 5275 int ret = 0; 5276 size_t kbufsize; 5277 struct seq_file *seq = &s->seq; 5278 struct statmount *sm = &s->sm; 5279 u32 start = seq->count; 5280 5281 switch (flag) { 5282 case STATMOUNT_FS_TYPE: 5283 sm->fs_type = start; 5284 ret = statmount_fs_type(s, seq); 5285 break; 5286 case STATMOUNT_MNT_ROOT: 5287 sm->mnt_root = start; 5288 ret = statmount_mnt_root(s, seq); 5289 break; 5290 case STATMOUNT_MNT_POINT: 5291 sm->mnt_point = start; 5292 ret = statmount_mnt_point(s, seq); 5293 break; 5294 case STATMOUNT_MNT_OPTS: 5295 sm->mnt_opts = start; 5296 ret = statmount_mnt_opts(s, seq); 5297 break; 5298 case STATMOUNT_OPT_ARRAY: 5299 sm->opt_array = start; 5300 ret = statmount_opt_array(s, seq); 5301 break; 5302 case STATMOUNT_OPT_SEC_ARRAY: 5303 sm->opt_sec_array = start; 5304 ret = statmount_opt_sec_array(s, seq); 5305 break; 5306 case STATMOUNT_FS_SUBTYPE: 5307 sm->fs_subtype = start; 5308 statmount_fs_subtype(s, seq); 5309 break; 5310 case STATMOUNT_SB_SOURCE: 5311 sm->sb_source = start; 5312 ret = statmount_sb_source(s, seq); 5313 break; 5314 default: 5315 WARN_ON_ONCE(true); 5316 return -EINVAL; 5317 } 5318 5319 /* 5320 * If nothing was emitted, return to avoid setting the flag 5321 * and terminating the buffer. 5322 */ 5323 if (seq->count == start) 5324 return ret; 5325 if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize))) 5326 return -EOVERFLOW; 5327 if (kbufsize >= s->bufsize) 5328 return -EOVERFLOW; 5329 5330 /* signal a retry */ 5331 if (unlikely(seq_has_overflowed(seq))) 5332 return -EAGAIN; 5333 5334 if (ret) 5335 return ret; 5336 5337 seq->buf[seq->count++] = '\0'; 5338 sm->mask |= flag; 5339 return 0; 5340 } 5341 5342 static int copy_statmount_to_user(struct kstatmount *s) 5343 { 5344 struct statmount *sm = &s->sm; 5345 struct seq_file *seq = &s->seq; 5346 char __user *str = ((char __user *)s->buf) + sizeof(*sm); 5347 size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm)); 5348 5349 if (seq->count && copy_to_user(str, seq->buf, seq->count)) 5350 return -EFAULT; 5351 5352 /* Return the number of bytes copied to the buffer */ 5353 sm->size = copysize + seq->count; 5354 if (copy_to_user(s->buf, sm, copysize)) 5355 return -EFAULT; 5356 5357 return 0; 5358 } 5359 5360 static struct mount *listmnt_next(struct mount *curr, bool reverse) 5361 { 5362 struct rb_node *node; 5363 5364 if (reverse) 5365 node = rb_prev(&curr->mnt_node); 5366 else 5367 node = rb_next(&curr->mnt_node); 5368 5369 return node_to_mount(node); 5370 } 5371 5372 static int grab_requested_root(struct mnt_namespace *ns, struct path *root) 5373 { 5374 struct mount *first, *child; 5375 5376 rwsem_assert_held(&namespace_sem); 5377 5378 /* We're looking at our own ns, just use get_fs_root. */ 5379 if (ns == current->nsproxy->mnt_ns) { 5380 get_fs_root(current->fs, root); 5381 return 0; 5382 } 5383 5384 /* 5385 * We have to find the first mount in our ns and use that, however it 5386 * may not exist, so handle that properly. 5387 */ 5388 if (RB_EMPTY_ROOT(&ns->mounts)) 5389 return -ENOENT; 5390 5391 first = child = ns->root; 5392 for (;;) { 5393 child = listmnt_next(child, false); 5394 if (!child) 5395 return -ENOENT; 5396 if (child->mnt_parent == first) 5397 break; 5398 } 5399 5400 root->mnt = mntget(&child->mnt); 5401 root->dentry = dget(root->mnt->mnt_root); 5402 return 0; 5403 } 5404 5405 static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, 5406 struct mnt_namespace *ns) 5407 { 5408 struct path root __free(path_put) = {}; 5409 struct mount *m; 5410 int err; 5411 5412 /* Has the namespace already been emptied? */ 5413 if (mnt_ns_id && RB_EMPTY_ROOT(&ns->mounts)) 5414 return -ENOENT; 5415 5416 s->mnt = lookup_mnt_in_ns(mnt_id, ns); 5417 if (!s->mnt) 5418 return -ENOENT; 5419 5420 err = grab_requested_root(ns, &root); 5421 if (err) 5422 return err; 5423 5424 /* 5425 * Don't trigger audit denials. We just want to determine what 5426 * mounts to show users. 5427 */ 5428 m = real_mount(s->mnt); 5429 if (!is_path_reachable(m, m->mnt.mnt_root, &root) && 5430 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5431 return -EPERM; 5432 5433 err = security_sb_statfs(s->mnt->mnt_root); 5434 if (err) 5435 return err; 5436 5437 s->root = root; 5438 if (s->mask & STATMOUNT_SB_BASIC) 5439 statmount_sb_basic(s); 5440 5441 if (s->mask & STATMOUNT_MNT_BASIC) 5442 statmount_mnt_basic(s); 5443 5444 if (s->mask & STATMOUNT_PROPAGATE_FROM) 5445 statmount_propagate_from(s); 5446 5447 if (s->mask & STATMOUNT_FS_TYPE) 5448 err = statmount_string(s, STATMOUNT_FS_TYPE); 5449 5450 if (!err && s->mask & STATMOUNT_MNT_ROOT) 5451 err = statmount_string(s, STATMOUNT_MNT_ROOT); 5452 5453 if (!err && s->mask & STATMOUNT_MNT_POINT) 5454 err = statmount_string(s, STATMOUNT_MNT_POINT); 5455 5456 if (!err && s->mask & STATMOUNT_MNT_OPTS) 5457 err = statmount_string(s, STATMOUNT_MNT_OPTS); 5458 5459 if (!err && s->mask & STATMOUNT_OPT_ARRAY) 5460 err = statmount_string(s, STATMOUNT_OPT_ARRAY); 5461 5462 if (!err && s->mask & STATMOUNT_OPT_SEC_ARRAY) 5463 err = statmount_string(s, STATMOUNT_OPT_SEC_ARRAY); 5464 5465 if (!err && s->mask & STATMOUNT_FS_SUBTYPE) 5466 err = statmount_string(s, STATMOUNT_FS_SUBTYPE); 5467 5468 if (!err && s->mask & STATMOUNT_SB_SOURCE) 5469 err = statmount_string(s, STATMOUNT_SB_SOURCE); 5470 5471 if (!err && s->mask & STATMOUNT_MNT_NS_ID) 5472 statmount_mnt_ns_id(s, ns); 5473 5474 if (err) 5475 return err; 5476 5477 return 0; 5478 } 5479 5480 static inline bool retry_statmount(const long ret, size_t *seq_size) 5481 { 5482 if (likely(ret != -EAGAIN)) 5483 return false; 5484 if (unlikely(check_mul_overflow(*seq_size, 2, seq_size))) 5485 return false; 5486 if (unlikely(*seq_size > MAX_RW_COUNT)) 5487 return false; 5488 return true; 5489 } 5490 5491 #define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \ 5492 STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \ 5493 STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \ 5494 STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY) 5495 5496 static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq, 5497 struct statmount __user *buf, size_t bufsize, 5498 size_t seq_size) 5499 { 5500 if (!access_ok(buf, bufsize)) 5501 return -EFAULT; 5502 5503 memset(ks, 0, sizeof(*ks)); 5504 ks->mask = kreq->param; 5505 ks->buf = buf; 5506 ks->bufsize = bufsize; 5507 5508 if (ks->mask & STATMOUNT_STRING_REQ) { 5509 if (bufsize == sizeof(ks->sm)) 5510 return -EOVERFLOW; 5511 5512 ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT); 5513 if (!ks->seq.buf) 5514 return -ENOMEM; 5515 5516 ks->seq.size = seq_size; 5517 } 5518 5519 return 0; 5520 } 5521 5522 static int copy_mnt_id_req(const struct mnt_id_req __user *req, 5523 struct mnt_id_req *kreq) 5524 { 5525 int ret; 5526 size_t usize; 5527 5528 BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1); 5529 5530 ret = get_user(usize, &req->size); 5531 if (ret) 5532 return -EFAULT; 5533 if (unlikely(usize > PAGE_SIZE)) 5534 return -E2BIG; 5535 if (unlikely(usize < MNT_ID_REQ_SIZE_VER0)) 5536 return -EINVAL; 5537 memset(kreq, 0, sizeof(*kreq)); 5538 ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize); 5539 if (ret) 5540 return ret; 5541 if (kreq->spare != 0) 5542 return -EINVAL; 5543 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ 5544 if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET) 5545 return -EINVAL; 5546 return 0; 5547 } 5548 5549 /* 5550 * If the user requested a specific mount namespace id, look that up and return 5551 * that, or if not simply grab a passive reference on our mount namespace and 5552 * return that. 5553 */ 5554 static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq) 5555 { 5556 struct mnt_namespace *mnt_ns; 5557 5558 if (kreq->mnt_ns_id && kreq->spare) 5559 return ERR_PTR(-EINVAL); 5560 5561 if (kreq->mnt_ns_id) 5562 return lookup_mnt_ns(kreq->mnt_ns_id); 5563 5564 if (kreq->spare) { 5565 struct ns_common *ns; 5566 5567 CLASS(fd, f)(kreq->spare); 5568 if (fd_empty(f)) 5569 return ERR_PTR(-EBADF); 5570 5571 if (!proc_ns_file(fd_file(f))) 5572 return ERR_PTR(-EINVAL); 5573 5574 ns = get_proc_ns(file_inode(fd_file(f))); 5575 if (ns->ops->type != CLONE_NEWNS) 5576 return ERR_PTR(-EINVAL); 5577 5578 mnt_ns = to_mnt_ns(ns); 5579 } else { 5580 mnt_ns = current->nsproxy->mnt_ns; 5581 } 5582 5583 refcount_inc(&mnt_ns->passive); 5584 return mnt_ns; 5585 } 5586 5587 SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req, 5588 struct statmount __user *, buf, size_t, bufsize, 5589 unsigned int, flags) 5590 { 5591 struct mnt_namespace *ns __free(mnt_ns_release) = NULL; 5592 struct kstatmount *ks __free(kfree) = NULL; 5593 struct mnt_id_req kreq; 5594 /* We currently support retrieval of 3 strings. */ 5595 size_t seq_size = 3 * PATH_MAX; 5596 int ret; 5597 5598 if (flags) 5599 return -EINVAL; 5600 5601 ret = copy_mnt_id_req(req, &kreq); 5602 if (ret) 5603 return ret; 5604 5605 ns = grab_requested_mnt_ns(&kreq); 5606 if (!ns) 5607 return -ENOENT; 5608 5609 if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) && 5610 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5611 return -ENOENT; 5612 5613 ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT); 5614 if (!ks) 5615 return -ENOMEM; 5616 5617 retry: 5618 ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size); 5619 if (ret) 5620 return ret; 5621 5622 scoped_guard(rwsem_read, &namespace_sem) 5623 ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, ns); 5624 5625 if (!ret) 5626 ret = copy_statmount_to_user(ks); 5627 kvfree(ks->seq.buf); 5628 if (retry_statmount(ret, &seq_size)) 5629 goto retry; 5630 return ret; 5631 } 5632 5633 static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id, 5634 u64 last_mnt_id, u64 *mnt_ids, size_t nr_mnt_ids, 5635 bool reverse) 5636 { 5637 struct path root __free(path_put) = {}; 5638 struct path orig; 5639 struct mount *r, *first; 5640 ssize_t ret; 5641 5642 rwsem_assert_held(&namespace_sem); 5643 5644 ret = grab_requested_root(ns, &root); 5645 if (ret) 5646 return ret; 5647 5648 if (mnt_parent_id == LSMT_ROOT) { 5649 orig = root; 5650 } else { 5651 orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns); 5652 if (!orig.mnt) 5653 return -ENOENT; 5654 orig.dentry = orig.mnt->mnt_root; 5655 } 5656 5657 /* 5658 * Don't trigger audit denials. We just want to determine what 5659 * mounts to show users. 5660 */ 5661 if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &root) && 5662 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5663 return -EPERM; 5664 5665 ret = security_sb_statfs(orig.dentry); 5666 if (ret) 5667 return ret; 5668 5669 if (!last_mnt_id) { 5670 if (reverse) 5671 first = node_to_mount(ns->mnt_last_node); 5672 else 5673 first = node_to_mount(ns->mnt_first_node); 5674 } else { 5675 if (reverse) 5676 first = mnt_find_id_at_reverse(ns, last_mnt_id - 1); 5677 else 5678 first = mnt_find_id_at(ns, last_mnt_id + 1); 5679 } 5680 5681 for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) { 5682 if (r->mnt_id_unique == mnt_parent_id) 5683 continue; 5684 if (!is_path_reachable(r, r->mnt.mnt_root, &orig)) 5685 continue; 5686 *mnt_ids = r->mnt_id_unique; 5687 mnt_ids++; 5688 nr_mnt_ids--; 5689 ret++; 5690 } 5691 return ret; 5692 } 5693 5694 SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, 5695 u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags) 5696 { 5697 u64 *kmnt_ids __free(kvfree) = NULL; 5698 const size_t maxcount = 1000000; 5699 struct mnt_namespace *ns __free(mnt_ns_release) = NULL; 5700 struct mnt_id_req kreq; 5701 u64 last_mnt_id; 5702 ssize_t ret; 5703 5704 if (flags & ~LISTMOUNT_REVERSE) 5705 return -EINVAL; 5706 5707 /* 5708 * If the mount namespace really has more than 1 million mounts the 5709 * caller must iterate over the mount namespace (and reconsider their 5710 * system design...). 5711 */ 5712 if (unlikely(nr_mnt_ids > maxcount)) 5713 return -EOVERFLOW; 5714 5715 if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids))) 5716 return -EFAULT; 5717 5718 ret = copy_mnt_id_req(req, &kreq); 5719 if (ret) 5720 return ret; 5721 5722 last_mnt_id = kreq.param; 5723 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */ 5724 if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET) 5725 return -EINVAL; 5726 5727 kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kmnt_ids), 5728 GFP_KERNEL_ACCOUNT); 5729 if (!kmnt_ids) 5730 return -ENOMEM; 5731 5732 ns = grab_requested_mnt_ns(&kreq); 5733 if (!ns) 5734 return -ENOENT; 5735 5736 if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) && 5737 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) 5738 return -ENOENT; 5739 5740 scoped_guard(rwsem_read, &namespace_sem) 5741 ret = do_listmount(ns, kreq.mnt_id, last_mnt_id, kmnt_ids, 5742 nr_mnt_ids, (flags & LISTMOUNT_REVERSE)); 5743 if (ret <= 0) 5744 return ret; 5745 5746 if (copy_to_user(mnt_ids, kmnt_ids, ret * sizeof(*mnt_ids))) 5747 return -EFAULT; 5748 5749 return ret; 5750 } 5751 5752 static void __init init_mount_tree(void) 5753 { 5754 struct vfsmount *mnt; 5755 struct mount *m; 5756 struct mnt_namespace *ns; 5757 struct path root; 5758 5759 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL); 5760 if (IS_ERR(mnt)) 5761 panic("Can't create rootfs"); 5762 5763 ns = alloc_mnt_ns(&init_user_ns, false); 5764 if (IS_ERR(ns)) 5765 panic("Can't allocate initial namespace"); 5766 m = real_mount(mnt); 5767 ns->root = m; 5768 ns->nr_mounts = 1; 5769 mnt_add_to_ns(ns, m); 5770 init_task.nsproxy->mnt_ns = ns; 5771 get_mnt_ns(ns); 5772 5773 root.mnt = mnt; 5774 root.dentry = mnt->mnt_root; 5775 mnt->mnt_flags |= MNT_LOCKED; 5776 5777 set_fs_pwd(current->fs, &root); 5778 set_fs_root(current->fs, &root); 5779 5780 mnt_ns_tree_add(ns); 5781 } 5782 5783 void __init mnt_init(void) 5784 { 5785 int err; 5786 5787 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 5788 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); 5789 5790 mount_hashtable = alloc_large_system_hash("Mount-cache", 5791 sizeof(struct hlist_head), 5792 mhash_entries, 19, 5793 HASH_ZERO, 5794 &m_hash_shift, &m_hash_mask, 0, 0); 5795 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", 5796 sizeof(struct hlist_head), 5797 mphash_entries, 19, 5798 HASH_ZERO, 5799 &mp_hash_shift, &mp_hash_mask, 0, 0); 5800 5801 if (!mount_hashtable || !mountpoint_hashtable) 5802 panic("Failed to allocate mount hash table\n"); 5803 5804 kernfs_init(); 5805 5806 err = sysfs_init(); 5807 if (err) 5808 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 5809 __func__, err); 5810 fs_kobj = kobject_create_and_add("fs", NULL); 5811 if (!fs_kobj) 5812 printk(KERN_WARNING "%s: kobj create error\n", __func__); 5813 shmem_init(); 5814 init_rootfs(); 5815 init_mount_tree(); 5816 } 5817 5818 void put_mnt_ns(struct mnt_namespace *ns) 5819 { 5820 if (!refcount_dec_and_test(&ns->ns.count)) 5821 return; 5822 drop_collected_mounts(&ns->root->mnt); 5823 free_mnt_ns(ns); 5824 } 5825 5826 struct vfsmount *kern_mount(struct file_system_type *type) 5827 { 5828 struct vfsmount *mnt; 5829 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); 5830 if (!IS_ERR(mnt)) { 5831 /* 5832 * it is a longterm mount, don't release mnt until 5833 * we unmount before file sys is unregistered 5834 */ 5835 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 5836 } 5837 return mnt; 5838 } 5839 EXPORT_SYMBOL_GPL(kern_mount); 5840 5841 void kern_unmount(struct vfsmount *mnt) 5842 { 5843 /* release long term mount so mount point can be released */ 5844 if (!IS_ERR(mnt)) { 5845 mnt_make_shortterm(mnt); 5846 synchronize_rcu(); /* yecchhh... */ 5847 mntput(mnt); 5848 } 5849 } 5850 EXPORT_SYMBOL(kern_unmount); 5851 5852 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) 5853 { 5854 unsigned int i; 5855 5856 for (i = 0; i < num; i++) 5857 mnt_make_shortterm(mnt[i]); 5858 synchronize_rcu_expedited(); 5859 for (i = 0; i < num; i++) 5860 mntput(mnt[i]); 5861 } 5862 EXPORT_SYMBOL(kern_unmount_array); 5863 5864 bool our_mnt(struct vfsmount *mnt) 5865 { 5866 return check_mnt(real_mount(mnt)); 5867 } 5868 5869 bool current_chrooted(void) 5870 { 5871 /* Does the current process have a non-standard root */ 5872 struct path ns_root; 5873 struct path fs_root; 5874 bool chrooted; 5875 5876 /* Find the namespace root */ 5877 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; 5878 ns_root.dentry = ns_root.mnt->mnt_root; 5879 path_get(&ns_root); 5880 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) 5881 ; 5882 5883 get_fs_root(current->fs, &fs_root); 5884 5885 chrooted = !path_equal(&fs_root, &ns_root); 5886 5887 path_put(&fs_root); 5888 path_put(&ns_root); 5889 5890 return chrooted; 5891 } 5892 5893 static bool mnt_already_visible(struct mnt_namespace *ns, 5894 const struct super_block *sb, 5895 int *new_mnt_flags) 5896 { 5897 int new_flags = *new_mnt_flags; 5898 struct mount *mnt, *n; 5899 bool visible = false; 5900 5901 down_read(&namespace_sem); 5902 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) { 5903 struct mount *child; 5904 int mnt_flags; 5905 5906 if (mnt->mnt.mnt_sb->s_type != sb->s_type) 5907 continue; 5908 5909 /* This mount is not fully visible if it's root directory 5910 * is not the root directory of the filesystem. 5911 */ 5912 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) 5913 continue; 5914 5915 /* A local view of the mount flags */ 5916 mnt_flags = mnt->mnt.mnt_flags; 5917 5918 /* Don't miss readonly hidden in the superblock flags */ 5919 if (sb_rdonly(mnt->mnt.mnt_sb)) 5920 mnt_flags |= MNT_LOCK_READONLY; 5921 5922 /* Verify the mount flags are equal to or more permissive 5923 * than the proposed new mount. 5924 */ 5925 if ((mnt_flags & MNT_LOCK_READONLY) && 5926 !(new_flags & MNT_READONLY)) 5927 continue; 5928 if ((mnt_flags & MNT_LOCK_ATIME) && 5929 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) 5930 continue; 5931 5932 /* This mount is not fully visible if there are any 5933 * locked child mounts that cover anything except for 5934 * empty directories. 5935 */ 5936 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 5937 struct inode *inode = child->mnt_mountpoint->d_inode; 5938 /* Only worry about locked mounts */ 5939 if (!(child->mnt.mnt_flags & MNT_LOCKED)) 5940 continue; 5941 /* Is the directory permanently empty? */ 5942 if (!is_empty_dir_inode(inode)) 5943 goto next; 5944 } 5945 /* Preserve the locked attributes */ 5946 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \ 5947 MNT_LOCK_ATIME); 5948 visible = true; 5949 goto found; 5950 next: ; 5951 } 5952 found: 5953 up_read(&namespace_sem); 5954 return visible; 5955 } 5956 5957 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags) 5958 { 5959 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV; 5960 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 5961 unsigned long s_iflags; 5962 5963 if (ns->user_ns == &init_user_ns) 5964 return false; 5965 5966 /* Can this filesystem be too revealing? */ 5967 s_iflags = sb->s_iflags; 5968 if (!(s_iflags & SB_I_USERNS_VISIBLE)) 5969 return false; 5970 5971 if ((s_iflags & required_iflags) != required_iflags) { 5972 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n", 5973 required_iflags); 5974 return true; 5975 } 5976 5977 return !mnt_already_visible(ns, sb, new_mnt_flags); 5978 } 5979 5980 bool mnt_may_suid(struct vfsmount *mnt) 5981 { 5982 /* 5983 * Foreign mounts (accessed via fchdir or through /proc 5984 * symlinks) are always treated as if they are nosuid. This 5985 * prevents namespaces from trusting potentially unsafe 5986 * suid/sgid bits, file caps, or security labels that originate 5987 * in other namespaces. 5988 */ 5989 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && 5990 current_in_userns(mnt->mnt_sb->s_user_ns); 5991 } 5992 5993 static struct ns_common *mntns_get(struct task_struct *task) 5994 { 5995 struct ns_common *ns = NULL; 5996 struct nsproxy *nsproxy; 5997 5998 task_lock(task); 5999 nsproxy = task->nsproxy; 6000 if (nsproxy) { 6001 ns = &nsproxy->mnt_ns->ns; 6002 get_mnt_ns(to_mnt_ns(ns)); 6003 } 6004 task_unlock(task); 6005 6006 return ns; 6007 } 6008 6009 static void mntns_put(struct ns_common *ns) 6010 { 6011 put_mnt_ns(to_mnt_ns(ns)); 6012 } 6013 6014 static int mntns_install(struct nsset *nsset, struct ns_common *ns) 6015 { 6016 struct nsproxy *nsproxy = nsset->nsproxy; 6017 struct fs_struct *fs = nsset->fs; 6018 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; 6019 struct user_namespace *user_ns = nsset->cred->user_ns; 6020 struct path root; 6021 int err; 6022 6023 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || 6024 !ns_capable(user_ns, CAP_SYS_CHROOT) || 6025 !ns_capable(user_ns, CAP_SYS_ADMIN)) 6026 return -EPERM; 6027 6028 if (is_anon_ns(mnt_ns)) 6029 return -EINVAL; 6030 6031 if (fs->users != 1) 6032 return -EINVAL; 6033 6034 get_mnt_ns(mnt_ns); 6035 old_mnt_ns = nsproxy->mnt_ns; 6036 nsproxy->mnt_ns = mnt_ns; 6037 6038 /* Find the root */ 6039 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, 6040 "/", LOOKUP_DOWN, &root); 6041 if (err) { 6042 /* revert to old namespace */ 6043 nsproxy->mnt_ns = old_mnt_ns; 6044 put_mnt_ns(mnt_ns); 6045 return err; 6046 } 6047 6048 put_mnt_ns(old_mnt_ns); 6049 6050 /* Update the pwd and root */ 6051 set_fs_pwd(fs, &root); 6052 set_fs_root(fs, &root); 6053 6054 path_put(&root); 6055 return 0; 6056 } 6057 6058 static struct user_namespace *mntns_owner(struct ns_common *ns) 6059 { 6060 return to_mnt_ns(ns)->user_ns; 6061 } 6062 6063 const struct proc_ns_operations mntns_operations = { 6064 .name = "mnt", 6065 .type = CLONE_NEWNS, 6066 .get = mntns_get, 6067 .put = mntns_put, 6068 .install = mntns_install, 6069 .owner = mntns_owner, 6070 }; 6071 6072 #ifdef CONFIG_SYSCTL 6073 static const struct ctl_table fs_namespace_sysctls[] = { 6074 { 6075 .procname = "mount-max", 6076 .data = &sysctl_mount_max, 6077 .maxlen = sizeof(unsigned int), 6078 .mode = 0644, 6079 .proc_handler = proc_dointvec_minmax, 6080 .extra1 = SYSCTL_ONE, 6081 }, 6082 }; 6083 6084 static int __init init_fs_namespace_sysctls(void) 6085 { 6086 register_sysctl_init("fs", fs_namespace_sysctls); 6087 return 0; 6088 } 6089 fs_initcall(init_fs_namespace_sysctls); 6090 6091 #endif /* CONFIG_SYSCTL */ 6092