1 /* 2 * linux/fs/namespace.c 3 * 4 * (C) Copyright Al Viro 2000, 2001 5 * Released under GPL v2. 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/percpu.h> 16 #include <linux/smp_lock.h> 17 #include <linux/init.h> 18 #include <linux/kernel.h> 19 #include <linux/acct.h> 20 #include <linux/capability.h> 21 #include <linux/cpumask.h> 22 #include <linux/module.h> 23 #include <linux/sysfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/mnt_namespace.h> 26 #include <linux/namei.h> 27 #include <linux/nsproxy.h> 28 #include <linux/security.h> 29 #include <linux/mount.h> 30 #include <linux/ramfs.h> 31 #include <linux/log2.h> 32 #include <linux/idr.h> 33 #include <linux/fs_struct.h> 34 #include <linux/fsnotify.h> 35 #include <asm/uaccess.h> 36 #include <asm/unistd.h> 37 #include "pnode.h" 38 #include "internal.h" 39 40 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) 41 #define HASH_SIZE (1UL << HASH_SHIFT) 42 43 static int event; 44 static DEFINE_IDA(mnt_id_ida); 45 static DEFINE_IDA(mnt_group_ida); 46 static DEFINE_SPINLOCK(mnt_id_lock); 47 static int mnt_id_start = 0; 48 static int mnt_group_start = 1; 49 50 static struct list_head *mount_hashtable __read_mostly; 51 static struct kmem_cache *mnt_cache __read_mostly; 52 static struct rw_semaphore namespace_sem; 53 54 /* /sys/fs */ 55 struct kobject *fs_kobj; 56 EXPORT_SYMBOL_GPL(fs_kobj); 57 58 /* 59 * vfsmount lock may be taken for read to prevent changes to the 60 * vfsmount hash, ie. during mountpoint lookups or walking back 61 * up the tree. 62 * 63 * It should be taken for write in all cases where the vfsmount 64 * tree or hash is modified or when a vfsmount structure is modified. 65 */ 66 DEFINE_BRLOCK(vfsmount_lock); 67 68 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) 69 { 70 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 71 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 72 tmp = tmp + (tmp >> HASH_SHIFT); 73 return tmp & (HASH_SIZE - 1); 74 } 75 76 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) 77 78 /* 79 * allocation is serialized by namespace_sem, but we need the spinlock to 80 * serialize with freeing. 81 */ 82 static int mnt_alloc_id(struct vfsmount *mnt) 83 { 84 int res; 85 86 retry: 87 ida_pre_get(&mnt_id_ida, GFP_KERNEL); 88 spin_lock(&mnt_id_lock); 89 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); 90 if (!res) 91 mnt_id_start = mnt->mnt_id + 1; 92 spin_unlock(&mnt_id_lock); 93 if (res == -EAGAIN) 94 goto retry; 95 96 return res; 97 } 98 99 static void mnt_free_id(struct vfsmount *mnt) 100 { 101 int id = mnt->mnt_id; 102 spin_lock(&mnt_id_lock); 103 ida_remove(&mnt_id_ida, id); 104 if (mnt_id_start > id) 105 mnt_id_start = id; 106 spin_unlock(&mnt_id_lock); 107 } 108 109 /* 110 * Allocate a new peer group ID 111 * 112 * mnt_group_ida is protected by namespace_sem 113 */ 114 static int mnt_alloc_group_id(struct vfsmount *mnt) 115 { 116 int res; 117 118 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) 119 return -ENOMEM; 120 121 res = ida_get_new_above(&mnt_group_ida, 122 mnt_group_start, 123 &mnt->mnt_group_id); 124 if (!res) 125 mnt_group_start = mnt->mnt_group_id + 1; 126 127 return res; 128 } 129 130 /* 131 * Release a peer group ID 132 */ 133 void mnt_release_group_id(struct vfsmount *mnt) 134 { 135 int id = mnt->mnt_group_id; 136 ida_remove(&mnt_group_ida, id); 137 if (mnt_group_start > id) 138 mnt_group_start = id; 139 mnt->mnt_group_id = 0; 140 } 141 142 struct vfsmount *alloc_vfsmnt(const char *name) 143 { 144 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 145 if (mnt) { 146 int err; 147 148 err = mnt_alloc_id(mnt); 149 if (err) 150 goto out_free_cache; 151 152 if (name) { 153 mnt->mnt_devname = kstrdup(name, GFP_KERNEL); 154 if (!mnt->mnt_devname) 155 goto out_free_id; 156 } 157 158 atomic_set(&mnt->mnt_count, 1); 159 INIT_LIST_HEAD(&mnt->mnt_hash); 160 INIT_LIST_HEAD(&mnt->mnt_child); 161 INIT_LIST_HEAD(&mnt->mnt_mounts); 162 INIT_LIST_HEAD(&mnt->mnt_list); 163 INIT_LIST_HEAD(&mnt->mnt_expire); 164 INIT_LIST_HEAD(&mnt->mnt_share); 165 INIT_LIST_HEAD(&mnt->mnt_slave_list); 166 INIT_LIST_HEAD(&mnt->mnt_slave); 167 #ifdef CONFIG_FSNOTIFY 168 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); 169 #endif 170 #ifdef CONFIG_SMP 171 mnt->mnt_writers = alloc_percpu(int); 172 if (!mnt->mnt_writers) 173 goto out_free_devname; 174 #else 175 mnt->mnt_writers = 0; 176 #endif 177 } 178 return mnt; 179 180 #ifdef CONFIG_SMP 181 out_free_devname: 182 kfree(mnt->mnt_devname); 183 #endif 184 out_free_id: 185 mnt_free_id(mnt); 186 out_free_cache: 187 kmem_cache_free(mnt_cache, mnt); 188 return NULL; 189 } 190 191 /* 192 * Most r/o checks on a fs are for operations that take 193 * discrete amounts of time, like a write() or unlink(). 194 * We must keep track of when those operations start 195 * (for permission checks) and when they end, so that 196 * we can determine when writes are able to occur to 197 * a filesystem. 198 */ 199 /* 200 * __mnt_is_readonly: check whether a mount is read-only 201 * @mnt: the mount to check for its write status 202 * 203 * This shouldn't be used directly ouside of the VFS. 204 * It does not guarantee that the filesystem will stay 205 * r/w, just that it is right *now*. This can not and 206 * should not be used in place of IS_RDONLY(inode). 207 * mnt_want/drop_write() will _keep_ the filesystem 208 * r/w. 209 */ 210 int __mnt_is_readonly(struct vfsmount *mnt) 211 { 212 if (mnt->mnt_flags & MNT_READONLY) 213 return 1; 214 if (mnt->mnt_sb->s_flags & MS_RDONLY) 215 return 1; 216 return 0; 217 } 218 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 219 220 static inline void inc_mnt_writers(struct vfsmount *mnt) 221 { 222 #ifdef CONFIG_SMP 223 (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))++; 224 #else 225 mnt->mnt_writers++; 226 #endif 227 } 228 229 static inline void dec_mnt_writers(struct vfsmount *mnt) 230 { 231 #ifdef CONFIG_SMP 232 (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))--; 233 #else 234 mnt->mnt_writers--; 235 #endif 236 } 237 238 static unsigned int count_mnt_writers(struct vfsmount *mnt) 239 { 240 #ifdef CONFIG_SMP 241 unsigned int count = 0; 242 int cpu; 243 244 for_each_possible_cpu(cpu) { 245 count += *per_cpu_ptr(mnt->mnt_writers, cpu); 246 } 247 248 return count; 249 #else 250 return mnt->mnt_writers; 251 #endif 252 } 253 254 /* 255 * Most r/o checks on a fs are for operations that take 256 * discrete amounts of time, like a write() or unlink(). 257 * We must keep track of when those operations start 258 * (for permission checks) and when they end, so that 259 * we can determine when writes are able to occur to 260 * a filesystem. 261 */ 262 /** 263 * mnt_want_write - get write access to a mount 264 * @mnt: the mount on which to take a write 265 * 266 * This tells the low-level filesystem that a write is 267 * about to be performed to it, and makes sure that 268 * writes are allowed before returning success. When 269 * the write operation is finished, mnt_drop_write() 270 * must be called. This is effectively a refcount. 271 */ 272 int mnt_want_write(struct vfsmount *mnt) 273 { 274 int ret = 0; 275 276 preempt_disable(); 277 inc_mnt_writers(mnt); 278 /* 279 * The store to inc_mnt_writers must be visible before we pass 280 * MNT_WRITE_HOLD loop below, so that the slowpath can see our 281 * incremented count after it has set MNT_WRITE_HOLD. 282 */ 283 smp_mb(); 284 while (mnt->mnt_flags & MNT_WRITE_HOLD) 285 cpu_relax(); 286 /* 287 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will 288 * be set to match its requirements. So we must not load that until 289 * MNT_WRITE_HOLD is cleared. 290 */ 291 smp_rmb(); 292 if (__mnt_is_readonly(mnt)) { 293 dec_mnt_writers(mnt); 294 ret = -EROFS; 295 goto out; 296 } 297 out: 298 preempt_enable(); 299 return ret; 300 } 301 EXPORT_SYMBOL_GPL(mnt_want_write); 302 303 /** 304 * mnt_clone_write - get write access to a mount 305 * @mnt: the mount on which to take a write 306 * 307 * This is effectively like mnt_want_write, except 308 * it must only be used to take an extra write reference 309 * on a mountpoint that we already know has a write reference 310 * on it. This allows some optimisation. 311 * 312 * After finished, mnt_drop_write must be called as usual to 313 * drop the reference. 314 */ 315 int mnt_clone_write(struct vfsmount *mnt) 316 { 317 /* superblock may be r/o */ 318 if (__mnt_is_readonly(mnt)) 319 return -EROFS; 320 preempt_disable(); 321 inc_mnt_writers(mnt); 322 preempt_enable(); 323 return 0; 324 } 325 EXPORT_SYMBOL_GPL(mnt_clone_write); 326 327 /** 328 * mnt_want_write_file - get write access to a file's mount 329 * @file: the file who's mount on which to take a write 330 * 331 * This is like mnt_want_write, but it takes a file and can 332 * do some optimisations if the file is open for write already 333 */ 334 int mnt_want_write_file(struct file *file) 335 { 336 struct inode *inode = file->f_dentry->d_inode; 337 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) 338 return mnt_want_write(file->f_path.mnt); 339 else 340 return mnt_clone_write(file->f_path.mnt); 341 } 342 EXPORT_SYMBOL_GPL(mnt_want_write_file); 343 344 /** 345 * mnt_drop_write - give up write access to a mount 346 * @mnt: the mount on which to give up write access 347 * 348 * Tells the low-level filesystem that we are done 349 * performing writes to it. Must be matched with 350 * mnt_want_write() call above. 351 */ 352 void mnt_drop_write(struct vfsmount *mnt) 353 { 354 preempt_disable(); 355 dec_mnt_writers(mnt); 356 preempt_enable(); 357 } 358 EXPORT_SYMBOL_GPL(mnt_drop_write); 359 360 static int mnt_make_readonly(struct vfsmount *mnt) 361 { 362 int ret = 0; 363 364 br_write_lock(vfsmount_lock); 365 mnt->mnt_flags |= MNT_WRITE_HOLD; 366 /* 367 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 368 * should be visible before we do. 369 */ 370 smp_mb(); 371 372 /* 373 * With writers on hold, if this value is zero, then there are 374 * definitely no active writers (although held writers may subsequently 375 * increment the count, they'll have to wait, and decrement it after 376 * seeing MNT_READONLY). 377 * 378 * It is OK to have counter incremented on one CPU and decremented on 379 * another: the sum will add up correctly. The danger would be when we 380 * sum up each counter, if we read a counter before it is incremented, 381 * but then read another CPU's count which it has been subsequently 382 * decremented from -- we would see more decrements than we should. 383 * MNT_WRITE_HOLD protects against this scenario, because 384 * mnt_want_write first increments count, then smp_mb, then spins on 385 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while 386 * we're counting up here. 387 */ 388 if (count_mnt_writers(mnt) > 0) 389 ret = -EBUSY; 390 else 391 mnt->mnt_flags |= MNT_READONLY; 392 /* 393 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers 394 * that become unheld will see MNT_READONLY. 395 */ 396 smp_wmb(); 397 mnt->mnt_flags &= ~MNT_WRITE_HOLD; 398 br_write_unlock(vfsmount_lock); 399 return ret; 400 } 401 402 static void __mnt_unmake_readonly(struct vfsmount *mnt) 403 { 404 br_write_lock(vfsmount_lock); 405 mnt->mnt_flags &= ~MNT_READONLY; 406 br_write_unlock(vfsmount_lock); 407 } 408 409 void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) 410 { 411 mnt->mnt_sb = sb; 412 mnt->mnt_root = dget(sb->s_root); 413 } 414 415 EXPORT_SYMBOL(simple_set_mnt); 416 417 void free_vfsmnt(struct vfsmount *mnt) 418 { 419 kfree(mnt->mnt_devname); 420 mnt_free_id(mnt); 421 #ifdef CONFIG_SMP 422 free_percpu(mnt->mnt_writers); 423 #endif 424 kmem_cache_free(mnt_cache, mnt); 425 } 426 427 /* 428 * find the first or last mount at @dentry on vfsmount @mnt depending on 429 * @dir. If @dir is set return the first mount else return the last mount. 430 * vfsmount_lock must be held for read or write. 431 */ 432 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, 433 int dir) 434 { 435 struct list_head *head = mount_hashtable + hash(mnt, dentry); 436 struct list_head *tmp = head; 437 struct vfsmount *p, *found = NULL; 438 439 for (;;) { 440 tmp = dir ? tmp->next : tmp->prev; 441 p = NULL; 442 if (tmp == head) 443 break; 444 p = list_entry(tmp, struct vfsmount, mnt_hash); 445 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { 446 found = p; 447 break; 448 } 449 } 450 return found; 451 } 452 453 /* 454 * lookup_mnt increments the ref count before returning 455 * the vfsmount struct. 456 */ 457 struct vfsmount *lookup_mnt(struct path *path) 458 { 459 struct vfsmount *child_mnt; 460 461 br_read_lock(vfsmount_lock); 462 if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1))) 463 mntget(child_mnt); 464 br_read_unlock(vfsmount_lock); 465 return child_mnt; 466 } 467 468 static inline int check_mnt(struct vfsmount *mnt) 469 { 470 return mnt->mnt_ns == current->nsproxy->mnt_ns; 471 } 472 473 /* 474 * vfsmount lock must be held for write 475 */ 476 static void touch_mnt_namespace(struct mnt_namespace *ns) 477 { 478 if (ns) { 479 ns->event = ++event; 480 wake_up_interruptible(&ns->poll); 481 } 482 } 483 484 /* 485 * vfsmount lock must be held for write 486 */ 487 static void __touch_mnt_namespace(struct mnt_namespace *ns) 488 { 489 if (ns && ns->event != event) { 490 ns->event = event; 491 wake_up_interruptible(&ns->poll); 492 } 493 } 494 495 /* 496 * vfsmount lock must be held for write 497 */ 498 static void detach_mnt(struct vfsmount *mnt, struct path *old_path) 499 { 500 old_path->dentry = mnt->mnt_mountpoint; 501 old_path->mnt = mnt->mnt_parent; 502 mnt->mnt_parent = mnt; 503 mnt->mnt_mountpoint = mnt->mnt_root; 504 list_del_init(&mnt->mnt_child); 505 list_del_init(&mnt->mnt_hash); 506 old_path->dentry->d_mounted--; 507 } 508 509 /* 510 * vfsmount lock must be held for write 511 */ 512 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, 513 struct vfsmount *child_mnt) 514 { 515 child_mnt->mnt_parent = mntget(mnt); 516 child_mnt->mnt_mountpoint = dget(dentry); 517 dentry->d_mounted++; 518 } 519 520 /* 521 * vfsmount lock must be held for write 522 */ 523 static void attach_mnt(struct vfsmount *mnt, struct path *path) 524 { 525 mnt_set_mountpoint(path->mnt, path->dentry, mnt); 526 list_add_tail(&mnt->mnt_hash, mount_hashtable + 527 hash(path->mnt, path->dentry)); 528 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts); 529 } 530 531 /* 532 * vfsmount lock must be held for write 533 */ 534 static void commit_tree(struct vfsmount *mnt) 535 { 536 struct vfsmount *parent = mnt->mnt_parent; 537 struct vfsmount *m; 538 LIST_HEAD(head); 539 struct mnt_namespace *n = parent->mnt_ns; 540 541 BUG_ON(parent == mnt); 542 543 list_add_tail(&head, &mnt->mnt_list); 544 list_for_each_entry(m, &head, mnt_list) 545 m->mnt_ns = n; 546 list_splice(&head, n->list.prev); 547 548 list_add_tail(&mnt->mnt_hash, mount_hashtable + 549 hash(parent, mnt->mnt_mountpoint)); 550 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 551 touch_mnt_namespace(n); 552 } 553 554 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) 555 { 556 struct list_head *next = p->mnt_mounts.next; 557 if (next == &p->mnt_mounts) { 558 while (1) { 559 if (p == root) 560 return NULL; 561 next = p->mnt_child.next; 562 if (next != &p->mnt_parent->mnt_mounts) 563 break; 564 p = p->mnt_parent; 565 } 566 } 567 return list_entry(next, struct vfsmount, mnt_child); 568 } 569 570 static struct vfsmount *skip_mnt_tree(struct vfsmount *p) 571 { 572 struct list_head *prev = p->mnt_mounts.prev; 573 while (prev != &p->mnt_mounts) { 574 p = list_entry(prev, struct vfsmount, mnt_child); 575 prev = p->mnt_mounts.prev; 576 } 577 return p; 578 } 579 580 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, 581 int flag) 582 { 583 struct super_block *sb = old->mnt_sb; 584 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); 585 586 if (mnt) { 587 if (flag & (CL_SLAVE | CL_PRIVATE)) 588 mnt->mnt_group_id = 0; /* not a peer of original */ 589 else 590 mnt->mnt_group_id = old->mnt_group_id; 591 592 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 593 int err = mnt_alloc_group_id(mnt); 594 if (err) 595 goto out_free; 596 } 597 598 mnt->mnt_flags = old->mnt_flags; 599 atomic_inc(&sb->s_active); 600 mnt->mnt_sb = sb; 601 mnt->mnt_root = dget(root); 602 mnt->mnt_mountpoint = mnt->mnt_root; 603 mnt->mnt_parent = mnt; 604 605 if (flag & CL_SLAVE) { 606 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 607 mnt->mnt_master = old; 608 CLEAR_MNT_SHARED(mnt); 609 } else if (!(flag & CL_PRIVATE)) { 610 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) 611 list_add(&mnt->mnt_share, &old->mnt_share); 612 if (IS_MNT_SLAVE(old)) 613 list_add(&mnt->mnt_slave, &old->mnt_slave); 614 mnt->mnt_master = old->mnt_master; 615 } 616 if (flag & CL_MAKE_SHARED) 617 set_mnt_shared(mnt); 618 619 /* stick the duplicate mount on the same expiry list 620 * as the original if that was on one */ 621 if (flag & CL_EXPIRE) { 622 if (!list_empty(&old->mnt_expire)) 623 list_add(&mnt->mnt_expire, &old->mnt_expire); 624 } 625 } 626 return mnt; 627 628 out_free: 629 free_vfsmnt(mnt); 630 return NULL; 631 } 632 633 static inline void __mntput(struct vfsmount *mnt) 634 { 635 struct super_block *sb = mnt->mnt_sb; 636 /* 637 * This probably indicates that somebody messed 638 * up a mnt_want/drop_write() pair. If this 639 * happens, the filesystem was probably unable 640 * to make r/w->r/o transitions. 641 */ 642 /* 643 * atomic_dec_and_lock() used to deal with ->mnt_count decrements 644 * provides barriers, so count_mnt_writers() below is safe. AV 645 */ 646 WARN_ON(count_mnt_writers(mnt)); 647 fsnotify_vfsmount_delete(mnt); 648 dput(mnt->mnt_root); 649 free_vfsmnt(mnt); 650 deactivate_super(sb); 651 } 652 653 void mntput_no_expire(struct vfsmount *mnt) 654 { 655 repeat: 656 if (atomic_add_unless(&mnt->mnt_count, -1, 1)) 657 return; 658 br_write_lock(vfsmount_lock); 659 if (!atomic_dec_and_test(&mnt->mnt_count)) { 660 br_write_unlock(vfsmount_lock); 661 return; 662 } 663 if (likely(!mnt->mnt_pinned)) { 664 br_write_unlock(vfsmount_lock); 665 __mntput(mnt); 666 return; 667 } 668 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); 669 mnt->mnt_pinned = 0; 670 br_write_unlock(vfsmount_lock); 671 acct_auto_close_mnt(mnt); 672 goto repeat; 673 } 674 EXPORT_SYMBOL(mntput_no_expire); 675 676 void mnt_pin(struct vfsmount *mnt) 677 { 678 br_write_lock(vfsmount_lock); 679 mnt->mnt_pinned++; 680 br_write_unlock(vfsmount_lock); 681 } 682 683 EXPORT_SYMBOL(mnt_pin); 684 685 void mnt_unpin(struct vfsmount *mnt) 686 { 687 br_write_lock(vfsmount_lock); 688 if (mnt->mnt_pinned) { 689 atomic_inc(&mnt->mnt_count); 690 mnt->mnt_pinned--; 691 } 692 br_write_unlock(vfsmount_lock); 693 } 694 695 EXPORT_SYMBOL(mnt_unpin); 696 697 static inline void mangle(struct seq_file *m, const char *s) 698 { 699 seq_escape(m, s, " \t\n\\"); 700 } 701 702 /* 703 * Simple .show_options callback for filesystems which don't want to 704 * implement more complex mount option showing. 705 * 706 * See also save_mount_options(). 707 */ 708 int generic_show_options(struct seq_file *m, struct vfsmount *mnt) 709 { 710 const char *options; 711 712 rcu_read_lock(); 713 options = rcu_dereference(mnt->mnt_sb->s_options); 714 715 if (options != NULL && options[0]) { 716 seq_putc(m, ','); 717 mangle(m, options); 718 } 719 rcu_read_unlock(); 720 721 return 0; 722 } 723 EXPORT_SYMBOL(generic_show_options); 724 725 /* 726 * If filesystem uses generic_show_options(), this function should be 727 * called from the fill_super() callback. 728 * 729 * The .remount_fs callback usually needs to be handled in a special 730 * way, to make sure, that previous options are not overwritten if the 731 * remount fails. 732 * 733 * Also note, that if the filesystem's .remount_fs function doesn't 734 * reset all options to their default value, but changes only newly 735 * given options, then the displayed options will not reflect reality 736 * any more. 737 */ 738 void save_mount_options(struct super_block *sb, char *options) 739 { 740 BUG_ON(sb->s_options); 741 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); 742 } 743 EXPORT_SYMBOL(save_mount_options); 744 745 void replace_mount_options(struct super_block *sb, char *options) 746 { 747 char *old = sb->s_options; 748 rcu_assign_pointer(sb->s_options, options); 749 if (old) { 750 synchronize_rcu(); 751 kfree(old); 752 } 753 } 754 EXPORT_SYMBOL(replace_mount_options); 755 756 #ifdef CONFIG_PROC_FS 757 /* iterator */ 758 static void *m_start(struct seq_file *m, loff_t *pos) 759 { 760 struct proc_mounts *p = m->private; 761 762 down_read(&namespace_sem); 763 return seq_list_start(&p->ns->list, *pos); 764 } 765 766 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 767 { 768 struct proc_mounts *p = m->private; 769 770 return seq_list_next(v, &p->ns->list, pos); 771 } 772 773 static void m_stop(struct seq_file *m, void *v) 774 { 775 up_read(&namespace_sem); 776 } 777 778 int mnt_had_events(struct proc_mounts *p) 779 { 780 struct mnt_namespace *ns = p->ns; 781 int res = 0; 782 783 br_read_lock(vfsmount_lock); 784 if (p->event != ns->event) { 785 p->event = ns->event; 786 res = 1; 787 } 788 br_read_unlock(vfsmount_lock); 789 790 return res; 791 } 792 793 struct proc_fs_info { 794 int flag; 795 const char *str; 796 }; 797 798 static int show_sb_opts(struct seq_file *m, struct super_block *sb) 799 { 800 static const struct proc_fs_info fs_info[] = { 801 { MS_SYNCHRONOUS, ",sync" }, 802 { MS_DIRSYNC, ",dirsync" }, 803 { MS_MANDLOCK, ",mand" }, 804 { 0, NULL } 805 }; 806 const struct proc_fs_info *fs_infop; 807 808 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) { 809 if (sb->s_flags & fs_infop->flag) 810 seq_puts(m, fs_infop->str); 811 } 812 813 return security_sb_show_options(m, sb); 814 } 815 816 static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt) 817 { 818 static const struct proc_fs_info mnt_info[] = { 819 { MNT_NOSUID, ",nosuid" }, 820 { MNT_NODEV, ",nodev" }, 821 { MNT_NOEXEC, ",noexec" }, 822 { MNT_NOATIME, ",noatime" }, 823 { MNT_NODIRATIME, ",nodiratime" }, 824 { MNT_RELATIME, ",relatime" }, 825 { 0, NULL } 826 }; 827 const struct proc_fs_info *fs_infop; 828 829 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) { 830 if (mnt->mnt_flags & fs_infop->flag) 831 seq_puts(m, fs_infop->str); 832 } 833 } 834 835 static void show_type(struct seq_file *m, struct super_block *sb) 836 { 837 mangle(m, sb->s_type->name); 838 if (sb->s_subtype && sb->s_subtype[0]) { 839 seq_putc(m, '.'); 840 mangle(m, sb->s_subtype); 841 } 842 } 843 844 static int show_vfsmnt(struct seq_file *m, void *v) 845 { 846 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 847 int err = 0; 848 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 849 850 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); 851 seq_putc(m, ' '); 852 seq_path(m, &mnt_path, " \t\n\\"); 853 seq_putc(m, ' '); 854 show_type(m, mnt->mnt_sb); 855 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); 856 err = show_sb_opts(m, mnt->mnt_sb); 857 if (err) 858 goto out; 859 show_mnt_opts(m, mnt); 860 if (mnt->mnt_sb->s_op->show_options) 861 err = mnt->mnt_sb->s_op->show_options(m, mnt); 862 seq_puts(m, " 0 0\n"); 863 out: 864 return err; 865 } 866 867 const struct seq_operations mounts_op = { 868 .start = m_start, 869 .next = m_next, 870 .stop = m_stop, 871 .show = show_vfsmnt 872 }; 873 874 static int show_mountinfo(struct seq_file *m, void *v) 875 { 876 struct proc_mounts *p = m->private; 877 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 878 struct super_block *sb = mnt->mnt_sb; 879 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 880 struct path root = p->root; 881 int err = 0; 882 883 seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id, 884 MAJOR(sb->s_dev), MINOR(sb->s_dev)); 885 seq_dentry(m, mnt->mnt_root, " \t\n\\"); 886 seq_putc(m, ' '); 887 seq_path_root(m, &mnt_path, &root, " \t\n\\"); 888 if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) { 889 /* 890 * Mountpoint is outside root, discard that one. Ugly, 891 * but less so than trying to do that in iterator in a 892 * race-free way (due to renames). 893 */ 894 return SEQ_SKIP; 895 } 896 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); 897 show_mnt_opts(m, mnt); 898 899 /* Tagged fields ("foo:X" or "bar") */ 900 if (IS_MNT_SHARED(mnt)) 901 seq_printf(m, " shared:%i", mnt->mnt_group_id); 902 if (IS_MNT_SLAVE(mnt)) { 903 int master = mnt->mnt_master->mnt_group_id; 904 int dom = get_dominating_id(mnt, &p->root); 905 seq_printf(m, " master:%i", master); 906 if (dom && dom != master) 907 seq_printf(m, " propagate_from:%i", dom); 908 } 909 if (IS_MNT_UNBINDABLE(mnt)) 910 seq_puts(m, " unbindable"); 911 912 /* Filesystem specific data */ 913 seq_puts(m, " - "); 914 show_type(m, sb); 915 seq_putc(m, ' '); 916 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); 917 seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw"); 918 err = show_sb_opts(m, sb); 919 if (err) 920 goto out; 921 if (sb->s_op->show_options) 922 err = sb->s_op->show_options(m, mnt); 923 seq_putc(m, '\n'); 924 out: 925 return err; 926 } 927 928 const struct seq_operations mountinfo_op = { 929 .start = m_start, 930 .next = m_next, 931 .stop = m_stop, 932 .show = show_mountinfo, 933 }; 934 935 static int show_vfsstat(struct seq_file *m, void *v) 936 { 937 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 938 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 939 int err = 0; 940 941 /* device */ 942 if (mnt->mnt_devname) { 943 seq_puts(m, "device "); 944 mangle(m, mnt->mnt_devname); 945 } else 946 seq_puts(m, "no device"); 947 948 /* mount point */ 949 seq_puts(m, " mounted on "); 950 seq_path(m, &mnt_path, " \t\n\\"); 951 seq_putc(m, ' '); 952 953 /* file system type */ 954 seq_puts(m, "with fstype "); 955 show_type(m, mnt->mnt_sb); 956 957 /* optional statistics */ 958 if (mnt->mnt_sb->s_op->show_stats) { 959 seq_putc(m, ' '); 960 err = mnt->mnt_sb->s_op->show_stats(m, mnt); 961 } 962 963 seq_putc(m, '\n'); 964 return err; 965 } 966 967 const struct seq_operations mountstats_op = { 968 .start = m_start, 969 .next = m_next, 970 .stop = m_stop, 971 .show = show_vfsstat, 972 }; 973 #endif /* CONFIG_PROC_FS */ 974 975 /** 976 * may_umount_tree - check if a mount tree is busy 977 * @mnt: root of mount tree 978 * 979 * This is called to check if a tree of mounts has any 980 * open files, pwds, chroots or sub mounts that are 981 * busy. 982 */ 983 int may_umount_tree(struct vfsmount *mnt) 984 { 985 int actual_refs = 0; 986 int minimum_refs = 0; 987 struct vfsmount *p; 988 989 br_read_lock(vfsmount_lock); 990 for (p = mnt; p; p = next_mnt(p, mnt)) { 991 actual_refs += atomic_read(&p->mnt_count); 992 minimum_refs += 2; 993 } 994 br_read_unlock(vfsmount_lock); 995 996 if (actual_refs > minimum_refs) 997 return 0; 998 999 return 1; 1000 } 1001 1002 EXPORT_SYMBOL(may_umount_tree); 1003 1004 /** 1005 * may_umount - check if a mount point is busy 1006 * @mnt: root of mount 1007 * 1008 * This is called to check if a mount point has any 1009 * open files, pwds, chroots or sub mounts. If the 1010 * mount has sub mounts this will return busy 1011 * regardless of whether the sub mounts are busy. 1012 * 1013 * Doesn't take quota and stuff into account. IOW, in some cases it will 1014 * give false negatives. The main reason why it's here is that we need 1015 * a non-destructive way to look for easily umountable filesystems. 1016 */ 1017 int may_umount(struct vfsmount *mnt) 1018 { 1019 int ret = 1; 1020 down_read(&namespace_sem); 1021 br_read_lock(vfsmount_lock); 1022 if (propagate_mount_busy(mnt, 2)) 1023 ret = 0; 1024 br_read_unlock(vfsmount_lock); 1025 up_read(&namespace_sem); 1026 return ret; 1027 } 1028 1029 EXPORT_SYMBOL(may_umount); 1030 1031 void release_mounts(struct list_head *head) 1032 { 1033 struct vfsmount *mnt; 1034 while (!list_empty(head)) { 1035 mnt = list_first_entry(head, struct vfsmount, mnt_hash); 1036 list_del_init(&mnt->mnt_hash); 1037 if (mnt->mnt_parent != mnt) { 1038 struct dentry *dentry; 1039 struct vfsmount *m; 1040 1041 br_write_lock(vfsmount_lock); 1042 dentry = mnt->mnt_mountpoint; 1043 m = mnt->mnt_parent; 1044 mnt->mnt_mountpoint = mnt->mnt_root; 1045 mnt->mnt_parent = mnt; 1046 m->mnt_ghosts--; 1047 br_write_unlock(vfsmount_lock); 1048 dput(dentry); 1049 mntput(m); 1050 } 1051 mntput(mnt); 1052 } 1053 } 1054 1055 /* 1056 * vfsmount lock must be held for write 1057 * namespace_sem must be held for write 1058 */ 1059 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) 1060 { 1061 struct vfsmount *p; 1062 1063 for (p = mnt; p; p = next_mnt(p, mnt)) 1064 list_move(&p->mnt_hash, kill); 1065 1066 if (propagate) 1067 propagate_umount(kill); 1068 1069 list_for_each_entry(p, kill, mnt_hash) { 1070 list_del_init(&p->mnt_expire); 1071 list_del_init(&p->mnt_list); 1072 __touch_mnt_namespace(p->mnt_ns); 1073 p->mnt_ns = NULL; 1074 list_del_init(&p->mnt_child); 1075 if (p->mnt_parent != p) { 1076 p->mnt_parent->mnt_ghosts++; 1077 p->mnt_mountpoint->d_mounted--; 1078 } 1079 change_mnt_propagation(p, MS_PRIVATE); 1080 } 1081 } 1082 1083 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts); 1084 1085 static int do_umount(struct vfsmount *mnt, int flags) 1086 { 1087 struct super_block *sb = mnt->mnt_sb; 1088 int retval; 1089 LIST_HEAD(umount_list); 1090 1091 retval = security_sb_umount(mnt, flags); 1092 if (retval) 1093 return retval; 1094 1095 /* 1096 * Allow userspace to request a mountpoint be expired rather than 1097 * unmounting unconditionally. Unmount only happens if: 1098 * (1) the mark is already set (the mark is cleared by mntput()) 1099 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1100 */ 1101 if (flags & MNT_EXPIRE) { 1102 if (mnt == current->fs->root.mnt || 1103 flags & (MNT_FORCE | MNT_DETACH)) 1104 return -EINVAL; 1105 1106 if (atomic_read(&mnt->mnt_count) != 2) 1107 return -EBUSY; 1108 1109 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1110 return -EAGAIN; 1111 } 1112 1113 /* 1114 * If we may have to abort operations to get out of this 1115 * mount, and they will themselves hold resources we must 1116 * allow the fs to do things. In the Unix tradition of 1117 * 'Gee thats tricky lets do it in userspace' the umount_begin 1118 * might fail to complete on the first run through as other tasks 1119 * must return, and the like. Thats for the mount program to worry 1120 * about for the moment. 1121 */ 1122 1123 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1124 sb->s_op->umount_begin(sb); 1125 } 1126 1127 /* 1128 * No sense to grab the lock for this test, but test itself looks 1129 * somewhat bogus. Suggestions for better replacement? 1130 * Ho-hum... In principle, we might treat that as umount + switch 1131 * to rootfs. GC would eventually take care of the old vfsmount. 1132 * Actually it makes sense, especially if rootfs would contain a 1133 * /reboot - static binary that would close all descriptors and 1134 * call reboot(9). Then init(8) could umount root and exec /reboot. 1135 */ 1136 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1137 /* 1138 * Special case for "unmounting" root ... 1139 * we just try to remount it readonly. 1140 */ 1141 down_write(&sb->s_umount); 1142 if (!(sb->s_flags & MS_RDONLY)) 1143 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); 1144 up_write(&sb->s_umount); 1145 return retval; 1146 } 1147 1148 down_write(&namespace_sem); 1149 br_write_lock(vfsmount_lock); 1150 event++; 1151 1152 if (!(flags & MNT_DETACH)) 1153 shrink_submounts(mnt, &umount_list); 1154 1155 retval = -EBUSY; 1156 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { 1157 if (!list_empty(&mnt->mnt_list)) 1158 umount_tree(mnt, 1, &umount_list); 1159 retval = 0; 1160 } 1161 br_write_unlock(vfsmount_lock); 1162 up_write(&namespace_sem); 1163 release_mounts(&umount_list); 1164 return retval; 1165 } 1166 1167 /* 1168 * Now umount can handle mount points as well as block devices. 1169 * This is important for filesystems which use unnamed block devices. 1170 * 1171 * We now support a flag for forced unmount like the other 'big iron' 1172 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD 1173 */ 1174 1175 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 1176 { 1177 struct path path; 1178 int retval; 1179 int lookup_flags = 0; 1180 1181 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 1182 return -EINVAL; 1183 1184 if (!(flags & UMOUNT_NOFOLLOW)) 1185 lookup_flags |= LOOKUP_FOLLOW; 1186 1187 retval = user_path_at(AT_FDCWD, name, lookup_flags, &path); 1188 if (retval) 1189 goto out; 1190 retval = -EINVAL; 1191 if (path.dentry != path.mnt->mnt_root) 1192 goto dput_and_out; 1193 if (!check_mnt(path.mnt)) 1194 goto dput_and_out; 1195 1196 retval = -EPERM; 1197 if (!capable(CAP_SYS_ADMIN)) 1198 goto dput_and_out; 1199 1200 retval = do_umount(path.mnt, flags); 1201 dput_and_out: 1202 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 1203 dput(path.dentry); 1204 mntput_no_expire(path.mnt); 1205 out: 1206 return retval; 1207 } 1208 1209 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 1210 1211 /* 1212 * The 2.0 compatible umount. No flags. 1213 */ 1214 SYSCALL_DEFINE1(oldumount, char __user *, name) 1215 { 1216 return sys_umount(name, 0); 1217 } 1218 1219 #endif 1220 1221 static int mount_is_safe(struct path *path) 1222 { 1223 if (capable(CAP_SYS_ADMIN)) 1224 return 0; 1225 return -EPERM; 1226 #ifdef notyet 1227 if (S_ISLNK(path->dentry->d_inode->i_mode)) 1228 return -EPERM; 1229 if (path->dentry->d_inode->i_mode & S_ISVTX) { 1230 if (current_uid() != path->dentry->d_inode->i_uid) 1231 return -EPERM; 1232 } 1233 if (inode_permission(path->dentry->d_inode, MAY_WRITE)) 1234 return -EPERM; 1235 return 0; 1236 #endif 1237 } 1238 1239 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, 1240 int flag) 1241 { 1242 struct vfsmount *res, *p, *q, *r, *s; 1243 struct path path; 1244 1245 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) 1246 return NULL; 1247 1248 res = q = clone_mnt(mnt, dentry, flag); 1249 if (!q) 1250 goto Enomem; 1251 q->mnt_mountpoint = mnt->mnt_mountpoint; 1252 1253 p = mnt; 1254 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { 1255 if (!is_subdir(r->mnt_mountpoint, dentry)) 1256 continue; 1257 1258 for (s = r; s; s = next_mnt(s, r)) { 1259 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { 1260 s = skip_mnt_tree(s); 1261 continue; 1262 } 1263 while (p != s->mnt_parent) { 1264 p = p->mnt_parent; 1265 q = q->mnt_parent; 1266 } 1267 p = s; 1268 path.mnt = q; 1269 path.dentry = p->mnt_mountpoint; 1270 q = clone_mnt(p, p->mnt_root, flag); 1271 if (!q) 1272 goto Enomem; 1273 br_write_lock(vfsmount_lock); 1274 list_add_tail(&q->mnt_list, &res->mnt_list); 1275 attach_mnt(q, &path); 1276 br_write_unlock(vfsmount_lock); 1277 } 1278 } 1279 return res; 1280 Enomem: 1281 if (res) { 1282 LIST_HEAD(umount_list); 1283 br_write_lock(vfsmount_lock); 1284 umount_tree(res, 0, &umount_list); 1285 br_write_unlock(vfsmount_lock); 1286 release_mounts(&umount_list); 1287 } 1288 return NULL; 1289 } 1290 1291 struct vfsmount *collect_mounts(struct path *path) 1292 { 1293 struct vfsmount *tree; 1294 down_write(&namespace_sem); 1295 tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE); 1296 up_write(&namespace_sem); 1297 return tree; 1298 } 1299 1300 void drop_collected_mounts(struct vfsmount *mnt) 1301 { 1302 LIST_HEAD(umount_list); 1303 down_write(&namespace_sem); 1304 br_write_lock(vfsmount_lock); 1305 umount_tree(mnt, 0, &umount_list); 1306 br_write_unlock(vfsmount_lock); 1307 up_write(&namespace_sem); 1308 release_mounts(&umount_list); 1309 } 1310 1311 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, 1312 struct vfsmount *root) 1313 { 1314 struct vfsmount *mnt; 1315 int res = f(root, arg); 1316 if (res) 1317 return res; 1318 list_for_each_entry(mnt, &root->mnt_list, mnt_list) { 1319 res = f(mnt, arg); 1320 if (res) 1321 return res; 1322 } 1323 return 0; 1324 } 1325 1326 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end) 1327 { 1328 struct vfsmount *p; 1329 1330 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 1331 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 1332 mnt_release_group_id(p); 1333 } 1334 } 1335 1336 static int invent_group_ids(struct vfsmount *mnt, bool recurse) 1337 { 1338 struct vfsmount *p; 1339 1340 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 1341 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 1342 int err = mnt_alloc_group_id(p); 1343 if (err) { 1344 cleanup_group_ids(mnt, p); 1345 return err; 1346 } 1347 } 1348 } 1349 1350 return 0; 1351 } 1352 1353 /* 1354 * @source_mnt : mount tree to be attached 1355 * @nd : place the mount tree @source_mnt is attached 1356 * @parent_nd : if non-null, detach the source_mnt from its parent and 1357 * store the parent mount and mountpoint dentry. 1358 * (done when source_mnt is moved) 1359 * 1360 * NOTE: in the table below explains the semantics when a source mount 1361 * of a given type is attached to a destination mount of a given type. 1362 * --------------------------------------------------------------------------- 1363 * | BIND MOUNT OPERATION | 1364 * |************************************************************************** 1365 * | source-->| shared | private | slave | unbindable | 1366 * | dest | | | | | 1367 * | | | | | | | 1368 * | v | | | | | 1369 * |************************************************************************** 1370 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 1371 * | | | | | | 1372 * |non-shared| shared (+) | private | slave (*) | invalid | 1373 * *************************************************************************** 1374 * A bind operation clones the source mount and mounts the clone on the 1375 * destination mount. 1376 * 1377 * (++) the cloned mount is propagated to all the mounts in the propagation 1378 * tree of the destination mount and the cloned mount is added to 1379 * the peer group of the source mount. 1380 * (+) the cloned mount is created under the destination mount and is marked 1381 * as shared. The cloned mount is added to the peer group of the source 1382 * mount. 1383 * (+++) the mount is propagated to all the mounts in the propagation tree 1384 * of the destination mount and the cloned mount is made slave 1385 * of the same master as that of the source mount. The cloned mount 1386 * is marked as 'shared and slave'. 1387 * (*) the cloned mount is made a slave of the same master as that of the 1388 * source mount. 1389 * 1390 * --------------------------------------------------------------------------- 1391 * | MOVE MOUNT OPERATION | 1392 * |************************************************************************** 1393 * | source-->| shared | private | slave | unbindable | 1394 * | dest | | | | | 1395 * | | | | | | | 1396 * | v | | | | | 1397 * |************************************************************************** 1398 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 1399 * | | | | | | 1400 * |non-shared| shared (+*) | private | slave (*) | unbindable | 1401 * *************************************************************************** 1402 * 1403 * (+) the mount is moved to the destination. And is then propagated to 1404 * all the mounts in the propagation tree of the destination mount. 1405 * (+*) the mount is moved to the destination. 1406 * (+++) the mount is moved to the destination and is then propagated to 1407 * all the mounts belonging to the destination mount's propagation tree. 1408 * the mount is marked as 'shared and slave'. 1409 * (*) the mount continues to be a slave at the new location. 1410 * 1411 * if the source mount is a tree, the operations explained above is 1412 * applied to each mount in the tree. 1413 * Must be called without spinlocks held, since this function can sleep 1414 * in allocations. 1415 */ 1416 static int attach_recursive_mnt(struct vfsmount *source_mnt, 1417 struct path *path, struct path *parent_path) 1418 { 1419 LIST_HEAD(tree_list); 1420 struct vfsmount *dest_mnt = path->mnt; 1421 struct dentry *dest_dentry = path->dentry; 1422 struct vfsmount *child, *p; 1423 int err; 1424 1425 if (IS_MNT_SHARED(dest_mnt)) { 1426 err = invent_group_ids(source_mnt, true); 1427 if (err) 1428 goto out; 1429 } 1430 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list); 1431 if (err) 1432 goto out_cleanup_ids; 1433 1434 br_write_lock(vfsmount_lock); 1435 1436 if (IS_MNT_SHARED(dest_mnt)) { 1437 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1438 set_mnt_shared(p); 1439 } 1440 if (parent_path) { 1441 detach_mnt(source_mnt, parent_path); 1442 attach_mnt(source_mnt, path); 1443 touch_mnt_namespace(parent_path->mnt->mnt_ns); 1444 } else { 1445 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); 1446 commit_tree(source_mnt); 1447 } 1448 1449 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { 1450 list_del_init(&child->mnt_hash); 1451 commit_tree(child); 1452 } 1453 br_write_unlock(vfsmount_lock); 1454 1455 return 0; 1456 1457 out_cleanup_ids: 1458 if (IS_MNT_SHARED(dest_mnt)) 1459 cleanup_group_ids(source_mnt, NULL); 1460 out: 1461 return err; 1462 } 1463 1464 static int graft_tree(struct vfsmount *mnt, struct path *path) 1465 { 1466 int err; 1467 if (mnt->mnt_sb->s_flags & MS_NOUSER) 1468 return -EINVAL; 1469 1470 if (S_ISDIR(path->dentry->d_inode->i_mode) != 1471 S_ISDIR(mnt->mnt_root->d_inode->i_mode)) 1472 return -ENOTDIR; 1473 1474 err = -ENOENT; 1475 mutex_lock(&path->dentry->d_inode->i_mutex); 1476 if (cant_mount(path->dentry)) 1477 goto out_unlock; 1478 1479 if (!d_unlinked(path->dentry)) 1480 err = attach_recursive_mnt(mnt, path, NULL); 1481 out_unlock: 1482 mutex_unlock(&path->dentry->d_inode->i_mutex); 1483 return err; 1484 } 1485 1486 /* 1487 * recursively change the type of the mountpoint. 1488 */ 1489 static int do_change_type(struct path *path, int flag) 1490 { 1491 struct vfsmount *m, *mnt = path->mnt; 1492 int recurse = flag & MS_REC; 1493 int type = flag & ~MS_REC; 1494 int err = 0; 1495 1496 if (!capable(CAP_SYS_ADMIN)) 1497 return -EPERM; 1498 1499 if (path->dentry != path->mnt->mnt_root) 1500 return -EINVAL; 1501 1502 down_write(&namespace_sem); 1503 if (type == MS_SHARED) { 1504 err = invent_group_ids(mnt, recurse); 1505 if (err) 1506 goto out_unlock; 1507 } 1508 1509 br_write_lock(vfsmount_lock); 1510 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1511 change_mnt_propagation(m, type); 1512 br_write_unlock(vfsmount_lock); 1513 1514 out_unlock: 1515 up_write(&namespace_sem); 1516 return err; 1517 } 1518 1519 /* 1520 * do loopback mount. 1521 */ 1522 static int do_loopback(struct path *path, char *old_name, 1523 int recurse) 1524 { 1525 struct path old_path; 1526 struct vfsmount *mnt = NULL; 1527 int err = mount_is_safe(path); 1528 if (err) 1529 return err; 1530 if (!old_name || !*old_name) 1531 return -EINVAL; 1532 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 1533 if (err) 1534 return err; 1535 1536 down_write(&namespace_sem); 1537 err = -EINVAL; 1538 if (IS_MNT_UNBINDABLE(old_path.mnt)) 1539 goto out; 1540 1541 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt)) 1542 goto out; 1543 1544 err = -ENOMEM; 1545 if (recurse) 1546 mnt = copy_tree(old_path.mnt, old_path.dentry, 0); 1547 else 1548 mnt = clone_mnt(old_path.mnt, old_path.dentry, 0); 1549 1550 if (!mnt) 1551 goto out; 1552 1553 err = graft_tree(mnt, path); 1554 if (err) { 1555 LIST_HEAD(umount_list); 1556 1557 br_write_lock(vfsmount_lock); 1558 umount_tree(mnt, 0, &umount_list); 1559 br_write_unlock(vfsmount_lock); 1560 release_mounts(&umount_list); 1561 } 1562 1563 out: 1564 up_write(&namespace_sem); 1565 path_put(&old_path); 1566 return err; 1567 } 1568 1569 static int change_mount_flags(struct vfsmount *mnt, int ms_flags) 1570 { 1571 int error = 0; 1572 int readonly_request = 0; 1573 1574 if (ms_flags & MS_RDONLY) 1575 readonly_request = 1; 1576 if (readonly_request == __mnt_is_readonly(mnt)) 1577 return 0; 1578 1579 if (readonly_request) 1580 error = mnt_make_readonly(mnt); 1581 else 1582 __mnt_unmake_readonly(mnt); 1583 return error; 1584 } 1585 1586 /* 1587 * change filesystem flags. dir should be a physical root of filesystem. 1588 * If you've mounted a non-root directory somewhere and want to do remount 1589 * on it - tough luck. 1590 */ 1591 static int do_remount(struct path *path, int flags, int mnt_flags, 1592 void *data) 1593 { 1594 int err; 1595 struct super_block *sb = path->mnt->mnt_sb; 1596 1597 if (!capable(CAP_SYS_ADMIN)) 1598 return -EPERM; 1599 1600 if (!check_mnt(path->mnt)) 1601 return -EINVAL; 1602 1603 if (path->dentry != path->mnt->mnt_root) 1604 return -EINVAL; 1605 1606 down_write(&sb->s_umount); 1607 if (flags & MS_BIND) 1608 err = change_mount_flags(path->mnt, flags); 1609 else 1610 err = do_remount_sb(sb, flags, data, 0); 1611 if (!err) { 1612 br_write_lock(vfsmount_lock); 1613 mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK; 1614 path->mnt->mnt_flags = mnt_flags; 1615 br_write_unlock(vfsmount_lock); 1616 } 1617 up_write(&sb->s_umount); 1618 if (!err) { 1619 br_write_lock(vfsmount_lock); 1620 touch_mnt_namespace(path->mnt->mnt_ns); 1621 br_write_unlock(vfsmount_lock); 1622 } 1623 return err; 1624 } 1625 1626 static inline int tree_contains_unbindable(struct vfsmount *mnt) 1627 { 1628 struct vfsmount *p; 1629 for (p = mnt; p; p = next_mnt(p, mnt)) { 1630 if (IS_MNT_UNBINDABLE(p)) 1631 return 1; 1632 } 1633 return 0; 1634 } 1635 1636 static int do_move_mount(struct path *path, char *old_name) 1637 { 1638 struct path old_path, parent_path; 1639 struct vfsmount *p; 1640 int err = 0; 1641 if (!capable(CAP_SYS_ADMIN)) 1642 return -EPERM; 1643 if (!old_name || !*old_name) 1644 return -EINVAL; 1645 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 1646 if (err) 1647 return err; 1648 1649 down_write(&namespace_sem); 1650 while (d_mountpoint(path->dentry) && 1651 follow_down(path)) 1652 ; 1653 err = -EINVAL; 1654 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt)) 1655 goto out; 1656 1657 err = -ENOENT; 1658 mutex_lock(&path->dentry->d_inode->i_mutex); 1659 if (cant_mount(path->dentry)) 1660 goto out1; 1661 1662 if (d_unlinked(path->dentry)) 1663 goto out1; 1664 1665 err = -EINVAL; 1666 if (old_path.dentry != old_path.mnt->mnt_root) 1667 goto out1; 1668 1669 if (old_path.mnt == old_path.mnt->mnt_parent) 1670 goto out1; 1671 1672 if (S_ISDIR(path->dentry->d_inode->i_mode) != 1673 S_ISDIR(old_path.dentry->d_inode->i_mode)) 1674 goto out1; 1675 /* 1676 * Don't move a mount residing in a shared parent. 1677 */ 1678 if (old_path.mnt->mnt_parent && 1679 IS_MNT_SHARED(old_path.mnt->mnt_parent)) 1680 goto out1; 1681 /* 1682 * Don't move a mount tree containing unbindable mounts to a destination 1683 * mount which is shared. 1684 */ 1685 if (IS_MNT_SHARED(path->mnt) && 1686 tree_contains_unbindable(old_path.mnt)) 1687 goto out1; 1688 err = -ELOOP; 1689 for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent) 1690 if (p == old_path.mnt) 1691 goto out1; 1692 1693 err = attach_recursive_mnt(old_path.mnt, path, &parent_path); 1694 if (err) 1695 goto out1; 1696 1697 /* if the mount is moved, it should no longer be expire 1698 * automatically */ 1699 list_del_init(&old_path.mnt->mnt_expire); 1700 out1: 1701 mutex_unlock(&path->dentry->d_inode->i_mutex); 1702 out: 1703 up_write(&namespace_sem); 1704 if (!err) 1705 path_put(&parent_path); 1706 path_put(&old_path); 1707 return err; 1708 } 1709 1710 /* 1711 * create a new mount for userspace and request it to be added into the 1712 * namespace's tree 1713 */ 1714 static int do_new_mount(struct path *path, char *type, int flags, 1715 int mnt_flags, char *name, void *data) 1716 { 1717 struct vfsmount *mnt; 1718 1719 if (!type) 1720 return -EINVAL; 1721 1722 /* we need capabilities... */ 1723 if (!capable(CAP_SYS_ADMIN)) 1724 return -EPERM; 1725 1726 lock_kernel(); 1727 mnt = do_kern_mount(type, flags, name, data); 1728 unlock_kernel(); 1729 if (IS_ERR(mnt)) 1730 return PTR_ERR(mnt); 1731 1732 return do_add_mount(mnt, path, mnt_flags, NULL); 1733 } 1734 1735 /* 1736 * add a mount into a namespace's mount tree 1737 * - provide the option of adding the new mount to an expiration list 1738 */ 1739 int do_add_mount(struct vfsmount *newmnt, struct path *path, 1740 int mnt_flags, struct list_head *fslist) 1741 { 1742 int err; 1743 1744 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL); 1745 1746 down_write(&namespace_sem); 1747 /* Something was mounted here while we slept */ 1748 while (d_mountpoint(path->dentry) && 1749 follow_down(path)) 1750 ; 1751 err = -EINVAL; 1752 if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt)) 1753 goto unlock; 1754 1755 /* Refuse the same filesystem on the same mount point */ 1756 err = -EBUSY; 1757 if (path->mnt->mnt_sb == newmnt->mnt_sb && 1758 path->mnt->mnt_root == path->dentry) 1759 goto unlock; 1760 1761 err = -EINVAL; 1762 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode)) 1763 goto unlock; 1764 1765 newmnt->mnt_flags = mnt_flags; 1766 if ((err = graft_tree(newmnt, path))) 1767 goto unlock; 1768 1769 if (fslist) /* add to the specified expiration list */ 1770 list_add_tail(&newmnt->mnt_expire, fslist); 1771 1772 up_write(&namespace_sem); 1773 return 0; 1774 1775 unlock: 1776 up_write(&namespace_sem); 1777 mntput(newmnt); 1778 return err; 1779 } 1780 1781 EXPORT_SYMBOL_GPL(do_add_mount); 1782 1783 /* 1784 * process a list of expirable mountpoints with the intent of discarding any 1785 * mountpoints that aren't in use and haven't been touched since last we came 1786 * here 1787 */ 1788 void mark_mounts_for_expiry(struct list_head *mounts) 1789 { 1790 struct vfsmount *mnt, *next; 1791 LIST_HEAD(graveyard); 1792 LIST_HEAD(umounts); 1793 1794 if (list_empty(mounts)) 1795 return; 1796 1797 down_write(&namespace_sem); 1798 br_write_lock(vfsmount_lock); 1799 1800 /* extract from the expiration list every vfsmount that matches the 1801 * following criteria: 1802 * - only referenced by its parent vfsmount 1803 * - still marked for expiry (marked on the last call here; marks are 1804 * cleared by mntput()) 1805 */ 1806 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 1807 if (!xchg(&mnt->mnt_expiry_mark, 1) || 1808 propagate_mount_busy(mnt, 1)) 1809 continue; 1810 list_move(&mnt->mnt_expire, &graveyard); 1811 } 1812 while (!list_empty(&graveyard)) { 1813 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire); 1814 touch_mnt_namespace(mnt->mnt_ns); 1815 umount_tree(mnt, 1, &umounts); 1816 } 1817 br_write_unlock(vfsmount_lock); 1818 up_write(&namespace_sem); 1819 1820 release_mounts(&umounts); 1821 } 1822 1823 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 1824 1825 /* 1826 * Ripoff of 'select_parent()' 1827 * 1828 * search the list of submounts for a given mountpoint, and move any 1829 * shrinkable submounts to the 'graveyard' list. 1830 */ 1831 static int select_submounts(struct vfsmount *parent, struct list_head *graveyard) 1832 { 1833 struct vfsmount *this_parent = parent; 1834 struct list_head *next; 1835 int found = 0; 1836 1837 repeat: 1838 next = this_parent->mnt_mounts.next; 1839 resume: 1840 while (next != &this_parent->mnt_mounts) { 1841 struct list_head *tmp = next; 1842 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child); 1843 1844 next = tmp->next; 1845 if (!(mnt->mnt_flags & MNT_SHRINKABLE)) 1846 continue; 1847 /* 1848 * Descend a level if the d_mounts list is non-empty. 1849 */ 1850 if (!list_empty(&mnt->mnt_mounts)) { 1851 this_parent = mnt; 1852 goto repeat; 1853 } 1854 1855 if (!propagate_mount_busy(mnt, 1)) { 1856 list_move_tail(&mnt->mnt_expire, graveyard); 1857 found++; 1858 } 1859 } 1860 /* 1861 * All done at this level ... ascend and resume the search 1862 */ 1863 if (this_parent != parent) { 1864 next = this_parent->mnt_child.next; 1865 this_parent = this_parent->mnt_parent; 1866 goto resume; 1867 } 1868 return found; 1869 } 1870 1871 /* 1872 * process a list of expirable mountpoints with the intent of discarding any 1873 * submounts of a specific parent mountpoint 1874 * 1875 * vfsmount_lock must be held for write 1876 */ 1877 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts) 1878 { 1879 LIST_HEAD(graveyard); 1880 struct vfsmount *m; 1881 1882 /* extract submounts of 'mountpoint' from the expiration list */ 1883 while (select_submounts(mnt, &graveyard)) { 1884 while (!list_empty(&graveyard)) { 1885 m = list_first_entry(&graveyard, struct vfsmount, 1886 mnt_expire); 1887 touch_mnt_namespace(m->mnt_ns); 1888 umount_tree(m, 1, umounts); 1889 } 1890 } 1891 } 1892 1893 /* 1894 * Some copy_from_user() implementations do not return the exact number of 1895 * bytes remaining to copy on a fault. But copy_mount_options() requires that. 1896 * Note that this function differs from copy_from_user() in that it will oops 1897 * on bad values of `to', rather than returning a short copy. 1898 */ 1899 static long exact_copy_from_user(void *to, const void __user * from, 1900 unsigned long n) 1901 { 1902 char *t = to; 1903 const char __user *f = from; 1904 char c; 1905 1906 if (!access_ok(VERIFY_READ, from, n)) 1907 return n; 1908 1909 while (n) { 1910 if (__get_user(c, f)) { 1911 memset(t, 0, n); 1912 break; 1913 } 1914 *t++ = c; 1915 f++; 1916 n--; 1917 } 1918 return n; 1919 } 1920 1921 int copy_mount_options(const void __user * data, unsigned long *where) 1922 { 1923 int i; 1924 unsigned long page; 1925 unsigned long size; 1926 1927 *where = 0; 1928 if (!data) 1929 return 0; 1930 1931 if (!(page = __get_free_page(GFP_KERNEL))) 1932 return -ENOMEM; 1933 1934 /* We only care that *some* data at the address the user 1935 * gave us is valid. Just in case, we'll zero 1936 * the remainder of the page. 1937 */ 1938 /* copy_from_user cannot cross TASK_SIZE ! */ 1939 size = TASK_SIZE - (unsigned long)data; 1940 if (size > PAGE_SIZE) 1941 size = PAGE_SIZE; 1942 1943 i = size - exact_copy_from_user((void *)page, data, size); 1944 if (!i) { 1945 free_page(page); 1946 return -EFAULT; 1947 } 1948 if (i != PAGE_SIZE) 1949 memset((char *)page + i, 0, PAGE_SIZE - i); 1950 *where = page; 1951 return 0; 1952 } 1953 1954 int copy_mount_string(const void __user *data, char **where) 1955 { 1956 char *tmp; 1957 1958 if (!data) { 1959 *where = NULL; 1960 return 0; 1961 } 1962 1963 tmp = strndup_user(data, PAGE_SIZE); 1964 if (IS_ERR(tmp)) 1965 return PTR_ERR(tmp); 1966 1967 *where = tmp; 1968 return 0; 1969 } 1970 1971 /* 1972 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 1973 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 1974 * 1975 * data is a (void *) that can point to any structure up to 1976 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 1977 * information (or be NULL). 1978 * 1979 * Pre-0.97 versions of mount() didn't have a flags word. 1980 * When the flags word was introduced its top half was required 1981 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 1982 * Therefore, if this magic number is present, it carries no information 1983 * and must be discarded. 1984 */ 1985 long do_mount(char *dev_name, char *dir_name, char *type_page, 1986 unsigned long flags, void *data_page) 1987 { 1988 struct path path; 1989 int retval = 0; 1990 int mnt_flags = 0; 1991 1992 /* Discard magic */ 1993 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 1994 flags &= ~MS_MGC_MSK; 1995 1996 /* Basic sanity checks */ 1997 1998 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) 1999 return -EINVAL; 2000 2001 if (data_page) 2002 ((char *)data_page)[PAGE_SIZE - 1] = 0; 2003 2004 /* ... and get the mountpoint */ 2005 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); 2006 if (retval) 2007 return retval; 2008 2009 retval = security_sb_mount(dev_name, &path, 2010 type_page, flags, data_page); 2011 if (retval) 2012 goto dput_out; 2013 2014 /* Default to relatime unless overriden */ 2015 if (!(flags & MS_NOATIME)) 2016 mnt_flags |= MNT_RELATIME; 2017 2018 /* Separate the per-mountpoint flags */ 2019 if (flags & MS_NOSUID) 2020 mnt_flags |= MNT_NOSUID; 2021 if (flags & MS_NODEV) 2022 mnt_flags |= MNT_NODEV; 2023 if (flags & MS_NOEXEC) 2024 mnt_flags |= MNT_NOEXEC; 2025 if (flags & MS_NOATIME) 2026 mnt_flags |= MNT_NOATIME; 2027 if (flags & MS_NODIRATIME) 2028 mnt_flags |= MNT_NODIRATIME; 2029 if (flags & MS_STRICTATIME) 2030 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 2031 if (flags & MS_RDONLY) 2032 mnt_flags |= MNT_READONLY; 2033 2034 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | 2035 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | 2036 MS_STRICTATIME); 2037 2038 if (flags & MS_REMOUNT) 2039 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, 2040 data_page); 2041 else if (flags & MS_BIND) 2042 retval = do_loopback(&path, dev_name, flags & MS_REC); 2043 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2044 retval = do_change_type(&path, flags); 2045 else if (flags & MS_MOVE) 2046 retval = do_move_mount(&path, dev_name); 2047 else 2048 retval = do_new_mount(&path, type_page, flags, mnt_flags, 2049 dev_name, data_page); 2050 dput_out: 2051 path_put(&path); 2052 return retval; 2053 } 2054 2055 static struct mnt_namespace *alloc_mnt_ns(void) 2056 { 2057 struct mnt_namespace *new_ns; 2058 2059 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); 2060 if (!new_ns) 2061 return ERR_PTR(-ENOMEM); 2062 atomic_set(&new_ns->count, 1); 2063 new_ns->root = NULL; 2064 INIT_LIST_HEAD(&new_ns->list); 2065 init_waitqueue_head(&new_ns->poll); 2066 new_ns->event = 0; 2067 return new_ns; 2068 } 2069 2070 /* 2071 * Allocate a new namespace structure and populate it with contents 2072 * copied from the namespace of the passed in task structure. 2073 */ 2074 static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, 2075 struct fs_struct *fs) 2076 { 2077 struct mnt_namespace *new_ns; 2078 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 2079 struct vfsmount *p, *q; 2080 2081 new_ns = alloc_mnt_ns(); 2082 if (IS_ERR(new_ns)) 2083 return new_ns; 2084 2085 down_write(&namespace_sem); 2086 /* First pass: copy the tree topology */ 2087 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root, 2088 CL_COPY_ALL | CL_EXPIRE); 2089 if (!new_ns->root) { 2090 up_write(&namespace_sem); 2091 kfree(new_ns); 2092 return ERR_PTR(-ENOMEM); 2093 } 2094 br_write_lock(vfsmount_lock); 2095 list_add_tail(&new_ns->list, &new_ns->root->mnt_list); 2096 br_write_unlock(vfsmount_lock); 2097 2098 /* 2099 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 2100 * as belonging to new namespace. We have already acquired a private 2101 * fs_struct, so tsk->fs->lock is not needed. 2102 */ 2103 p = mnt_ns->root; 2104 q = new_ns->root; 2105 while (p) { 2106 q->mnt_ns = new_ns; 2107 if (fs) { 2108 if (p == fs->root.mnt) { 2109 rootmnt = p; 2110 fs->root.mnt = mntget(q); 2111 } 2112 if (p == fs->pwd.mnt) { 2113 pwdmnt = p; 2114 fs->pwd.mnt = mntget(q); 2115 } 2116 } 2117 p = next_mnt(p, mnt_ns->root); 2118 q = next_mnt(q, new_ns->root); 2119 } 2120 up_write(&namespace_sem); 2121 2122 if (rootmnt) 2123 mntput(rootmnt); 2124 if (pwdmnt) 2125 mntput(pwdmnt); 2126 2127 return new_ns; 2128 } 2129 2130 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 2131 struct fs_struct *new_fs) 2132 { 2133 struct mnt_namespace *new_ns; 2134 2135 BUG_ON(!ns); 2136 get_mnt_ns(ns); 2137 2138 if (!(flags & CLONE_NEWNS)) 2139 return ns; 2140 2141 new_ns = dup_mnt_ns(ns, new_fs); 2142 2143 put_mnt_ns(ns); 2144 return new_ns; 2145 } 2146 2147 /** 2148 * create_mnt_ns - creates a private namespace and adds a root filesystem 2149 * @mnt: pointer to the new root filesystem mountpoint 2150 */ 2151 struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) 2152 { 2153 struct mnt_namespace *new_ns; 2154 2155 new_ns = alloc_mnt_ns(); 2156 if (!IS_ERR(new_ns)) { 2157 mnt->mnt_ns = new_ns; 2158 new_ns->root = mnt; 2159 list_add(&new_ns->list, &new_ns->root->mnt_list); 2160 } 2161 return new_ns; 2162 } 2163 EXPORT_SYMBOL(create_mnt_ns); 2164 2165 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 2166 char __user *, type, unsigned long, flags, void __user *, data) 2167 { 2168 int ret; 2169 char *kernel_type; 2170 char *kernel_dir; 2171 char *kernel_dev; 2172 unsigned long data_page; 2173 2174 ret = copy_mount_string(type, &kernel_type); 2175 if (ret < 0) 2176 goto out_type; 2177 2178 kernel_dir = getname(dir_name); 2179 if (IS_ERR(kernel_dir)) { 2180 ret = PTR_ERR(kernel_dir); 2181 goto out_dir; 2182 } 2183 2184 ret = copy_mount_string(dev_name, &kernel_dev); 2185 if (ret < 0) 2186 goto out_dev; 2187 2188 ret = copy_mount_options(data, &data_page); 2189 if (ret < 0) 2190 goto out_data; 2191 2192 ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags, 2193 (void *) data_page); 2194 2195 free_page(data_page); 2196 out_data: 2197 kfree(kernel_dev); 2198 out_dev: 2199 putname(kernel_dir); 2200 out_dir: 2201 kfree(kernel_type); 2202 out_type: 2203 return ret; 2204 } 2205 2206 /* 2207 * pivot_root Semantics: 2208 * Moves the root file system of the current process to the directory put_old, 2209 * makes new_root as the new root file system of the current process, and sets 2210 * root/cwd of all processes which had them on the current root to new_root. 2211 * 2212 * Restrictions: 2213 * The new_root and put_old must be directories, and must not be on the 2214 * same file system as the current process root. The put_old must be 2215 * underneath new_root, i.e. adding a non-zero number of /.. to the string 2216 * pointed to by put_old must yield the same directory as new_root. No other 2217 * file system may be mounted on put_old. After all, new_root is a mountpoint. 2218 * 2219 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 2220 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives 2221 * in this situation. 2222 * 2223 * Notes: 2224 * - we don't move root/cwd if they are not at the root (reason: if something 2225 * cared enough to change them, it's probably wrong to force them elsewhere) 2226 * - it's okay to pick a root that isn't the root of a file system, e.g. 2227 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 2228 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 2229 * first. 2230 */ 2231 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 2232 const char __user *, put_old) 2233 { 2234 struct vfsmount *tmp; 2235 struct path new, old, parent_path, root_parent, root; 2236 int error; 2237 2238 if (!capable(CAP_SYS_ADMIN)) 2239 return -EPERM; 2240 2241 error = user_path_dir(new_root, &new); 2242 if (error) 2243 goto out0; 2244 error = -EINVAL; 2245 if (!check_mnt(new.mnt)) 2246 goto out1; 2247 2248 error = user_path_dir(put_old, &old); 2249 if (error) 2250 goto out1; 2251 2252 error = security_sb_pivotroot(&old, &new); 2253 if (error) { 2254 path_put(&old); 2255 goto out1; 2256 } 2257 2258 get_fs_root(current->fs, &root); 2259 down_write(&namespace_sem); 2260 mutex_lock(&old.dentry->d_inode->i_mutex); 2261 error = -EINVAL; 2262 if (IS_MNT_SHARED(old.mnt) || 2263 IS_MNT_SHARED(new.mnt->mnt_parent) || 2264 IS_MNT_SHARED(root.mnt->mnt_parent)) 2265 goto out2; 2266 if (!check_mnt(root.mnt)) 2267 goto out2; 2268 error = -ENOENT; 2269 if (cant_mount(old.dentry)) 2270 goto out2; 2271 if (d_unlinked(new.dentry)) 2272 goto out2; 2273 if (d_unlinked(old.dentry)) 2274 goto out2; 2275 error = -EBUSY; 2276 if (new.mnt == root.mnt || 2277 old.mnt == root.mnt) 2278 goto out2; /* loop, on the same file system */ 2279 error = -EINVAL; 2280 if (root.mnt->mnt_root != root.dentry) 2281 goto out2; /* not a mountpoint */ 2282 if (root.mnt->mnt_parent == root.mnt) 2283 goto out2; /* not attached */ 2284 if (new.mnt->mnt_root != new.dentry) 2285 goto out2; /* not a mountpoint */ 2286 if (new.mnt->mnt_parent == new.mnt) 2287 goto out2; /* not attached */ 2288 /* make sure we can reach put_old from new_root */ 2289 tmp = old.mnt; 2290 br_write_lock(vfsmount_lock); 2291 if (tmp != new.mnt) { 2292 for (;;) { 2293 if (tmp->mnt_parent == tmp) 2294 goto out3; /* already mounted on put_old */ 2295 if (tmp->mnt_parent == new.mnt) 2296 break; 2297 tmp = tmp->mnt_parent; 2298 } 2299 if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) 2300 goto out3; 2301 } else if (!is_subdir(old.dentry, new.dentry)) 2302 goto out3; 2303 detach_mnt(new.mnt, &parent_path); 2304 detach_mnt(root.mnt, &root_parent); 2305 /* mount old root on put_old */ 2306 attach_mnt(root.mnt, &old); 2307 /* mount new_root on / */ 2308 attach_mnt(new.mnt, &root_parent); 2309 touch_mnt_namespace(current->nsproxy->mnt_ns); 2310 br_write_unlock(vfsmount_lock); 2311 chroot_fs_refs(&root, &new); 2312 error = 0; 2313 path_put(&root_parent); 2314 path_put(&parent_path); 2315 out2: 2316 mutex_unlock(&old.dentry->d_inode->i_mutex); 2317 up_write(&namespace_sem); 2318 path_put(&root); 2319 path_put(&old); 2320 out1: 2321 path_put(&new); 2322 out0: 2323 return error; 2324 out3: 2325 br_write_unlock(vfsmount_lock); 2326 goto out2; 2327 } 2328 2329 static void __init init_mount_tree(void) 2330 { 2331 struct vfsmount *mnt; 2332 struct mnt_namespace *ns; 2333 struct path root; 2334 2335 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); 2336 if (IS_ERR(mnt)) 2337 panic("Can't create rootfs"); 2338 ns = create_mnt_ns(mnt); 2339 if (IS_ERR(ns)) 2340 panic("Can't allocate initial namespace"); 2341 2342 init_task.nsproxy->mnt_ns = ns; 2343 get_mnt_ns(ns); 2344 2345 root.mnt = ns->root; 2346 root.dentry = ns->root->mnt_root; 2347 2348 set_fs_pwd(current->fs, &root); 2349 set_fs_root(current->fs, &root); 2350 } 2351 2352 void __init mnt_init(void) 2353 { 2354 unsigned u; 2355 int err; 2356 2357 init_rwsem(&namespace_sem); 2358 2359 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 2360 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 2361 2362 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 2363 2364 if (!mount_hashtable) 2365 panic("Failed to allocate mount hash table\n"); 2366 2367 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE); 2368 2369 for (u = 0; u < HASH_SIZE; u++) 2370 INIT_LIST_HEAD(&mount_hashtable[u]); 2371 2372 br_lock_init(vfsmount_lock); 2373 2374 err = sysfs_init(); 2375 if (err) 2376 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 2377 __func__, err); 2378 fs_kobj = kobject_create_and_add("fs", NULL); 2379 if (!fs_kobj) 2380 printk(KERN_WARNING "%s: kobj create error\n", __func__); 2381 init_rootfs(); 2382 init_mount_tree(); 2383 } 2384 2385 void put_mnt_ns(struct mnt_namespace *ns) 2386 { 2387 LIST_HEAD(umount_list); 2388 2389 if (!atomic_dec_and_test(&ns->count)) 2390 return; 2391 down_write(&namespace_sem); 2392 br_write_lock(vfsmount_lock); 2393 umount_tree(ns->root, 0, &umount_list); 2394 br_write_unlock(vfsmount_lock); 2395 up_write(&namespace_sem); 2396 release_mounts(&umount_list); 2397 kfree(ns); 2398 } 2399 EXPORT_SYMBOL(put_mnt_ns); 2400