1 /* 2 * linux/fs/namespace.c 3 * 4 * (C) Copyright Al Viro 2000, 2001 5 * Released under GPL v2. 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/config.h> 12 #include <linux/syscalls.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/smp_lock.h> 16 #include <linux/init.h> 17 #include <linux/quotaops.h> 18 #include <linux/acct.h> 19 #include <linux/capability.h> 20 #include <linux/module.h> 21 #include <linux/seq_file.h> 22 #include <linux/namespace.h> 23 #include <linux/namei.h> 24 #include <linux/security.h> 25 #include <linux/mount.h> 26 #include <asm/uaccess.h> 27 #include <asm/unistd.h> 28 #include "pnode.h" 29 30 extern int __init init_rootfs(void); 31 32 #ifdef CONFIG_SYSFS 33 extern int __init sysfs_init(void); 34 #else 35 static inline int sysfs_init(void) 36 { 37 return 0; 38 } 39 #endif 40 41 /* spinlock for vfsmount related operations, inplace of dcache_lock */ 42 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); 43 44 static int event; 45 46 static struct list_head *mount_hashtable __read_mostly; 47 static int hash_mask __read_mostly, hash_bits __read_mostly; 48 static kmem_cache_t *mnt_cache __read_mostly; 49 static struct rw_semaphore namespace_sem; 50 51 /* /sys/fs */ 52 decl_subsys(fs, NULL, NULL); 53 EXPORT_SYMBOL_GPL(fs_subsys); 54 55 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) 56 { 57 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 58 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 59 tmp = tmp + (tmp >> hash_bits); 60 return tmp & hash_mask; 61 } 62 63 struct vfsmount *alloc_vfsmnt(const char *name) 64 { 65 struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); 66 if (mnt) { 67 memset(mnt, 0, sizeof(struct vfsmount)); 68 atomic_set(&mnt->mnt_count, 1); 69 INIT_LIST_HEAD(&mnt->mnt_hash); 70 INIT_LIST_HEAD(&mnt->mnt_child); 71 INIT_LIST_HEAD(&mnt->mnt_mounts); 72 INIT_LIST_HEAD(&mnt->mnt_list); 73 INIT_LIST_HEAD(&mnt->mnt_expire); 74 INIT_LIST_HEAD(&mnt->mnt_share); 75 INIT_LIST_HEAD(&mnt->mnt_slave_list); 76 INIT_LIST_HEAD(&mnt->mnt_slave); 77 if (name) { 78 int size = strlen(name) + 1; 79 char *newname = kmalloc(size, GFP_KERNEL); 80 if (newname) { 81 memcpy(newname, name, size); 82 mnt->mnt_devname = newname; 83 } 84 } 85 } 86 return mnt; 87 } 88 89 void free_vfsmnt(struct vfsmount *mnt) 90 { 91 kfree(mnt->mnt_devname); 92 kmem_cache_free(mnt_cache, mnt); 93 } 94 95 /* 96 * find the first or last mount at @dentry on vfsmount @mnt depending on 97 * @dir. If @dir is set return the first mount else return the last mount. 98 */ 99 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, 100 int dir) 101 { 102 struct list_head *head = mount_hashtable + hash(mnt, dentry); 103 struct list_head *tmp = head; 104 struct vfsmount *p, *found = NULL; 105 106 for (;;) { 107 tmp = dir ? tmp->next : tmp->prev; 108 p = NULL; 109 if (tmp == head) 110 break; 111 p = list_entry(tmp, struct vfsmount, mnt_hash); 112 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { 113 found = p; 114 break; 115 } 116 } 117 return found; 118 } 119 120 /* 121 * lookup_mnt increments the ref count before returning 122 * the vfsmount struct. 123 */ 124 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 125 { 126 struct vfsmount *child_mnt; 127 spin_lock(&vfsmount_lock); 128 if ((child_mnt = __lookup_mnt(mnt, dentry, 1))) 129 mntget(child_mnt); 130 spin_unlock(&vfsmount_lock); 131 return child_mnt; 132 } 133 134 static inline int check_mnt(struct vfsmount *mnt) 135 { 136 return mnt->mnt_namespace == current->namespace; 137 } 138 139 static void touch_namespace(struct namespace *ns) 140 { 141 if (ns) { 142 ns->event = ++event; 143 wake_up_interruptible(&ns->poll); 144 } 145 } 146 147 static void __touch_namespace(struct namespace *ns) 148 { 149 if (ns && ns->event != event) { 150 ns->event = event; 151 wake_up_interruptible(&ns->poll); 152 } 153 } 154 155 static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) 156 { 157 old_nd->dentry = mnt->mnt_mountpoint; 158 old_nd->mnt = mnt->mnt_parent; 159 mnt->mnt_parent = mnt; 160 mnt->mnt_mountpoint = mnt->mnt_root; 161 list_del_init(&mnt->mnt_child); 162 list_del_init(&mnt->mnt_hash); 163 old_nd->dentry->d_mounted--; 164 } 165 166 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, 167 struct vfsmount *child_mnt) 168 { 169 child_mnt->mnt_parent = mntget(mnt); 170 child_mnt->mnt_mountpoint = dget(dentry); 171 dentry->d_mounted++; 172 } 173 174 static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) 175 { 176 mnt_set_mountpoint(nd->mnt, nd->dentry, mnt); 177 list_add_tail(&mnt->mnt_hash, mount_hashtable + 178 hash(nd->mnt, nd->dentry)); 179 list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts); 180 } 181 182 /* 183 * the caller must hold vfsmount_lock 184 */ 185 static void commit_tree(struct vfsmount *mnt) 186 { 187 struct vfsmount *parent = mnt->mnt_parent; 188 struct vfsmount *m; 189 LIST_HEAD(head); 190 struct namespace *n = parent->mnt_namespace; 191 192 BUG_ON(parent == mnt); 193 194 list_add_tail(&head, &mnt->mnt_list); 195 list_for_each_entry(m, &head, mnt_list) 196 m->mnt_namespace = n; 197 list_splice(&head, n->list.prev); 198 199 list_add_tail(&mnt->mnt_hash, mount_hashtable + 200 hash(parent, mnt->mnt_mountpoint)); 201 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 202 touch_namespace(n); 203 } 204 205 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) 206 { 207 struct list_head *next = p->mnt_mounts.next; 208 if (next == &p->mnt_mounts) { 209 while (1) { 210 if (p == root) 211 return NULL; 212 next = p->mnt_child.next; 213 if (next != &p->mnt_parent->mnt_mounts) 214 break; 215 p = p->mnt_parent; 216 } 217 } 218 return list_entry(next, struct vfsmount, mnt_child); 219 } 220 221 static struct vfsmount *skip_mnt_tree(struct vfsmount *p) 222 { 223 struct list_head *prev = p->mnt_mounts.prev; 224 while (prev != &p->mnt_mounts) { 225 p = list_entry(prev, struct vfsmount, mnt_child); 226 prev = p->mnt_mounts.prev; 227 } 228 return p; 229 } 230 231 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, 232 int flag) 233 { 234 struct super_block *sb = old->mnt_sb; 235 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); 236 237 if (mnt) { 238 mnt->mnt_flags = old->mnt_flags; 239 atomic_inc(&sb->s_active); 240 mnt->mnt_sb = sb; 241 mnt->mnt_root = dget(root); 242 mnt->mnt_mountpoint = mnt->mnt_root; 243 mnt->mnt_parent = mnt; 244 245 if (flag & CL_SLAVE) { 246 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 247 mnt->mnt_master = old; 248 CLEAR_MNT_SHARED(mnt); 249 } else { 250 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) 251 list_add(&mnt->mnt_share, &old->mnt_share); 252 if (IS_MNT_SLAVE(old)) 253 list_add(&mnt->mnt_slave, &old->mnt_slave); 254 mnt->mnt_master = old->mnt_master; 255 } 256 if (flag & CL_MAKE_SHARED) 257 set_mnt_shared(mnt); 258 259 /* stick the duplicate mount on the same expiry list 260 * as the original if that was on one */ 261 if (flag & CL_EXPIRE) { 262 spin_lock(&vfsmount_lock); 263 if (!list_empty(&old->mnt_expire)) 264 list_add(&mnt->mnt_expire, &old->mnt_expire); 265 spin_unlock(&vfsmount_lock); 266 } 267 } 268 return mnt; 269 } 270 271 static inline void __mntput(struct vfsmount *mnt) 272 { 273 struct super_block *sb = mnt->mnt_sb; 274 dput(mnt->mnt_root); 275 free_vfsmnt(mnt); 276 deactivate_super(sb); 277 } 278 279 void mntput_no_expire(struct vfsmount *mnt) 280 { 281 repeat: 282 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) { 283 if (likely(!mnt->mnt_pinned)) { 284 spin_unlock(&vfsmount_lock); 285 __mntput(mnt); 286 return; 287 } 288 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); 289 mnt->mnt_pinned = 0; 290 spin_unlock(&vfsmount_lock); 291 acct_auto_close_mnt(mnt); 292 security_sb_umount_close(mnt); 293 goto repeat; 294 } 295 } 296 297 EXPORT_SYMBOL(mntput_no_expire); 298 299 void mnt_pin(struct vfsmount *mnt) 300 { 301 spin_lock(&vfsmount_lock); 302 mnt->mnt_pinned++; 303 spin_unlock(&vfsmount_lock); 304 } 305 306 EXPORT_SYMBOL(mnt_pin); 307 308 void mnt_unpin(struct vfsmount *mnt) 309 { 310 spin_lock(&vfsmount_lock); 311 if (mnt->mnt_pinned) { 312 atomic_inc(&mnt->mnt_count); 313 mnt->mnt_pinned--; 314 } 315 spin_unlock(&vfsmount_lock); 316 } 317 318 EXPORT_SYMBOL(mnt_unpin); 319 320 /* iterator */ 321 static void *m_start(struct seq_file *m, loff_t *pos) 322 { 323 struct namespace *n = m->private; 324 struct list_head *p; 325 loff_t l = *pos; 326 327 down_read(&namespace_sem); 328 list_for_each(p, &n->list) 329 if (!l--) 330 return list_entry(p, struct vfsmount, mnt_list); 331 return NULL; 332 } 333 334 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 335 { 336 struct namespace *n = m->private; 337 struct list_head *p = ((struct vfsmount *)v)->mnt_list.next; 338 (*pos)++; 339 return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list); 340 } 341 342 static void m_stop(struct seq_file *m, void *v) 343 { 344 up_read(&namespace_sem); 345 } 346 347 static inline void mangle(struct seq_file *m, const char *s) 348 { 349 seq_escape(m, s, " \t\n\\"); 350 } 351 352 static int show_vfsmnt(struct seq_file *m, void *v) 353 { 354 struct vfsmount *mnt = v; 355 int err = 0; 356 static struct proc_fs_info { 357 int flag; 358 char *str; 359 } fs_info[] = { 360 { MS_SYNCHRONOUS, ",sync" }, 361 { MS_DIRSYNC, ",dirsync" }, 362 { MS_MANDLOCK, ",mand" }, 363 { 0, NULL } 364 }; 365 static struct proc_fs_info mnt_info[] = { 366 { MNT_NOSUID, ",nosuid" }, 367 { MNT_NODEV, ",nodev" }, 368 { MNT_NOEXEC, ",noexec" }, 369 { MNT_NOATIME, ",noatime" }, 370 { MNT_NODIRATIME, ",nodiratime" }, 371 { 0, NULL } 372 }; 373 struct proc_fs_info *fs_infop; 374 375 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); 376 seq_putc(m, ' '); 377 seq_path(m, mnt, mnt->mnt_root, " \t\n\\"); 378 seq_putc(m, ' '); 379 mangle(m, mnt->mnt_sb->s_type->name); 380 seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw"); 381 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) { 382 if (mnt->mnt_sb->s_flags & fs_infop->flag) 383 seq_puts(m, fs_infop->str); 384 } 385 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) { 386 if (mnt->mnt_flags & fs_infop->flag) 387 seq_puts(m, fs_infop->str); 388 } 389 if (mnt->mnt_sb->s_op->show_options) 390 err = mnt->mnt_sb->s_op->show_options(m, mnt); 391 seq_puts(m, " 0 0\n"); 392 return err; 393 } 394 395 struct seq_operations mounts_op = { 396 .start = m_start, 397 .next = m_next, 398 .stop = m_stop, 399 .show = show_vfsmnt 400 }; 401 402 static int show_vfsstat(struct seq_file *m, void *v) 403 { 404 struct vfsmount *mnt = v; 405 int err = 0; 406 407 /* device */ 408 if (mnt->mnt_devname) { 409 seq_puts(m, "device "); 410 mangle(m, mnt->mnt_devname); 411 } else 412 seq_puts(m, "no device"); 413 414 /* mount point */ 415 seq_puts(m, " mounted on "); 416 seq_path(m, mnt, mnt->mnt_root, " \t\n\\"); 417 seq_putc(m, ' '); 418 419 /* file system type */ 420 seq_puts(m, "with fstype "); 421 mangle(m, mnt->mnt_sb->s_type->name); 422 423 /* optional statistics */ 424 if (mnt->mnt_sb->s_op->show_stats) { 425 seq_putc(m, ' '); 426 err = mnt->mnt_sb->s_op->show_stats(m, mnt); 427 } 428 429 seq_putc(m, '\n'); 430 return err; 431 } 432 433 struct seq_operations mountstats_op = { 434 .start = m_start, 435 .next = m_next, 436 .stop = m_stop, 437 .show = show_vfsstat, 438 }; 439 440 /** 441 * may_umount_tree - check if a mount tree is busy 442 * @mnt: root of mount tree 443 * 444 * This is called to check if a tree of mounts has any 445 * open files, pwds, chroots or sub mounts that are 446 * busy. 447 */ 448 int may_umount_tree(struct vfsmount *mnt) 449 { 450 int actual_refs = 0; 451 int minimum_refs = 0; 452 struct vfsmount *p; 453 454 spin_lock(&vfsmount_lock); 455 for (p = mnt; p; p = next_mnt(p, mnt)) { 456 actual_refs += atomic_read(&p->mnt_count); 457 minimum_refs += 2; 458 } 459 spin_unlock(&vfsmount_lock); 460 461 if (actual_refs > minimum_refs) 462 return 0; 463 464 return 1; 465 } 466 467 EXPORT_SYMBOL(may_umount_tree); 468 469 /** 470 * may_umount - check if a mount point is busy 471 * @mnt: root of mount 472 * 473 * This is called to check if a mount point has any 474 * open files, pwds, chroots or sub mounts. If the 475 * mount has sub mounts this will return busy 476 * regardless of whether the sub mounts are busy. 477 * 478 * Doesn't take quota and stuff into account. IOW, in some cases it will 479 * give false negatives. The main reason why it's here is that we need 480 * a non-destructive way to look for easily umountable filesystems. 481 */ 482 int may_umount(struct vfsmount *mnt) 483 { 484 int ret = 1; 485 spin_lock(&vfsmount_lock); 486 if (propagate_mount_busy(mnt, 2)) 487 ret = 0; 488 spin_unlock(&vfsmount_lock); 489 return ret; 490 } 491 492 EXPORT_SYMBOL(may_umount); 493 494 void release_mounts(struct list_head *head) 495 { 496 struct vfsmount *mnt; 497 while (!list_empty(head)) { 498 mnt = list_entry(head->next, struct vfsmount, mnt_hash); 499 list_del_init(&mnt->mnt_hash); 500 if (mnt->mnt_parent != mnt) { 501 struct dentry *dentry; 502 struct vfsmount *m; 503 spin_lock(&vfsmount_lock); 504 dentry = mnt->mnt_mountpoint; 505 m = mnt->mnt_parent; 506 mnt->mnt_mountpoint = mnt->mnt_root; 507 mnt->mnt_parent = mnt; 508 spin_unlock(&vfsmount_lock); 509 dput(dentry); 510 mntput(m); 511 } 512 mntput(mnt); 513 } 514 } 515 516 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) 517 { 518 struct vfsmount *p; 519 520 for (p = mnt; p; p = next_mnt(p, mnt)) { 521 list_del(&p->mnt_hash); 522 list_add(&p->mnt_hash, kill); 523 } 524 525 if (propagate) 526 propagate_umount(kill); 527 528 list_for_each_entry(p, kill, mnt_hash) { 529 list_del_init(&p->mnt_expire); 530 list_del_init(&p->mnt_list); 531 __touch_namespace(p->mnt_namespace); 532 p->mnt_namespace = NULL; 533 list_del_init(&p->mnt_child); 534 if (p->mnt_parent != p) 535 p->mnt_mountpoint->d_mounted--; 536 change_mnt_propagation(p, MS_PRIVATE); 537 } 538 } 539 540 static int do_umount(struct vfsmount *mnt, int flags) 541 { 542 struct super_block *sb = mnt->mnt_sb; 543 int retval; 544 LIST_HEAD(umount_list); 545 546 retval = security_sb_umount(mnt, flags); 547 if (retval) 548 return retval; 549 550 /* 551 * Allow userspace to request a mountpoint be expired rather than 552 * unmounting unconditionally. Unmount only happens if: 553 * (1) the mark is already set (the mark is cleared by mntput()) 554 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 555 */ 556 if (flags & MNT_EXPIRE) { 557 if (mnt == current->fs->rootmnt || 558 flags & (MNT_FORCE | MNT_DETACH)) 559 return -EINVAL; 560 561 if (atomic_read(&mnt->mnt_count) != 2) 562 return -EBUSY; 563 564 if (!xchg(&mnt->mnt_expiry_mark, 1)) 565 return -EAGAIN; 566 } 567 568 /* 569 * If we may have to abort operations to get out of this 570 * mount, and they will themselves hold resources we must 571 * allow the fs to do things. In the Unix tradition of 572 * 'Gee thats tricky lets do it in userspace' the umount_begin 573 * might fail to complete on the first run through as other tasks 574 * must return, and the like. Thats for the mount program to worry 575 * about for the moment. 576 */ 577 578 lock_kernel(); 579 if ((flags & MNT_FORCE) && sb->s_op->umount_begin) 580 sb->s_op->umount_begin(sb); 581 unlock_kernel(); 582 583 /* 584 * No sense to grab the lock for this test, but test itself looks 585 * somewhat bogus. Suggestions for better replacement? 586 * Ho-hum... In principle, we might treat that as umount + switch 587 * to rootfs. GC would eventually take care of the old vfsmount. 588 * Actually it makes sense, especially if rootfs would contain a 589 * /reboot - static binary that would close all descriptors and 590 * call reboot(9). Then init(8) could umount root and exec /reboot. 591 */ 592 if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) { 593 /* 594 * Special case for "unmounting" root ... 595 * we just try to remount it readonly. 596 */ 597 down_write(&sb->s_umount); 598 if (!(sb->s_flags & MS_RDONLY)) { 599 lock_kernel(); 600 DQUOT_OFF(sb); 601 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); 602 unlock_kernel(); 603 } 604 up_write(&sb->s_umount); 605 return retval; 606 } 607 608 down_write(&namespace_sem); 609 spin_lock(&vfsmount_lock); 610 event++; 611 612 retval = -EBUSY; 613 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { 614 if (!list_empty(&mnt->mnt_list)) 615 umount_tree(mnt, 1, &umount_list); 616 retval = 0; 617 } 618 spin_unlock(&vfsmount_lock); 619 if (retval) 620 security_sb_umount_busy(mnt); 621 up_write(&namespace_sem); 622 release_mounts(&umount_list); 623 return retval; 624 } 625 626 /* 627 * Now umount can handle mount points as well as block devices. 628 * This is important for filesystems which use unnamed block devices. 629 * 630 * We now support a flag for forced unmount like the other 'big iron' 631 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD 632 */ 633 634 asmlinkage long sys_umount(char __user * name, int flags) 635 { 636 struct nameidata nd; 637 int retval; 638 639 retval = __user_walk(name, LOOKUP_FOLLOW, &nd); 640 if (retval) 641 goto out; 642 retval = -EINVAL; 643 if (nd.dentry != nd.mnt->mnt_root) 644 goto dput_and_out; 645 if (!check_mnt(nd.mnt)) 646 goto dput_and_out; 647 648 retval = -EPERM; 649 if (!capable(CAP_SYS_ADMIN)) 650 goto dput_and_out; 651 652 retval = do_umount(nd.mnt, flags); 653 dput_and_out: 654 path_release_on_umount(&nd); 655 out: 656 return retval; 657 } 658 659 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 660 661 /* 662 * The 2.0 compatible umount. No flags. 663 */ 664 asmlinkage long sys_oldumount(char __user * name) 665 { 666 return sys_umount(name, 0); 667 } 668 669 #endif 670 671 static int mount_is_safe(struct nameidata *nd) 672 { 673 if (capable(CAP_SYS_ADMIN)) 674 return 0; 675 return -EPERM; 676 #ifdef notyet 677 if (S_ISLNK(nd->dentry->d_inode->i_mode)) 678 return -EPERM; 679 if (nd->dentry->d_inode->i_mode & S_ISVTX) { 680 if (current->uid != nd->dentry->d_inode->i_uid) 681 return -EPERM; 682 } 683 if (vfs_permission(nd, MAY_WRITE)) 684 return -EPERM; 685 return 0; 686 #endif 687 } 688 689 static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) 690 { 691 while (1) { 692 if (d == dentry) 693 return 1; 694 if (d == NULL || d == d->d_parent) 695 return 0; 696 d = d->d_parent; 697 } 698 } 699 700 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, 701 int flag) 702 { 703 struct vfsmount *res, *p, *q, *r, *s; 704 struct nameidata nd; 705 706 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) 707 return NULL; 708 709 res = q = clone_mnt(mnt, dentry, flag); 710 if (!q) 711 goto Enomem; 712 q->mnt_mountpoint = mnt->mnt_mountpoint; 713 714 p = mnt; 715 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { 716 if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry)) 717 continue; 718 719 for (s = r; s; s = next_mnt(s, r)) { 720 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { 721 s = skip_mnt_tree(s); 722 continue; 723 } 724 while (p != s->mnt_parent) { 725 p = p->mnt_parent; 726 q = q->mnt_parent; 727 } 728 p = s; 729 nd.mnt = q; 730 nd.dentry = p->mnt_mountpoint; 731 q = clone_mnt(p, p->mnt_root, flag); 732 if (!q) 733 goto Enomem; 734 spin_lock(&vfsmount_lock); 735 list_add_tail(&q->mnt_list, &res->mnt_list); 736 attach_mnt(q, &nd); 737 spin_unlock(&vfsmount_lock); 738 } 739 } 740 return res; 741 Enomem: 742 if (res) { 743 LIST_HEAD(umount_list); 744 spin_lock(&vfsmount_lock); 745 umount_tree(res, 0, &umount_list); 746 spin_unlock(&vfsmount_lock); 747 release_mounts(&umount_list); 748 } 749 return NULL; 750 } 751 752 /* 753 * @source_mnt : mount tree to be attached 754 * @nd : place the mount tree @source_mnt is attached 755 * @parent_nd : if non-null, detach the source_mnt from its parent and 756 * store the parent mount and mountpoint dentry. 757 * (done when source_mnt is moved) 758 * 759 * NOTE: in the table below explains the semantics when a source mount 760 * of a given type is attached to a destination mount of a given type. 761 * --------------------------------------------------------------------------- 762 * | BIND MOUNT OPERATION | 763 * |************************************************************************** 764 * | source-->| shared | private | slave | unbindable | 765 * | dest | | | | | 766 * | | | | | | | 767 * | v | | | | | 768 * |************************************************************************** 769 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 770 * | | | | | | 771 * |non-shared| shared (+) | private | slave (*) | invalid | 772 * *************************************************************************** 773 * A bind operation clones the source mount and mounts the clone on the 774 * destination mount. 775 * 776 * (++) the cloned mount is propagated to all the mounts in the propagation 777 * tree of the destination mount and the cloned mount is added to 778 * the peer group of the source mount. 779 * (+) the cloned mount is created under the destination mount and is marked 780 * as shared. The cloned mount is added to the peer group of the source 781 * mount. 782 * (+++) the mount is propagated to all the mounts in the propagation tree 783 * of the destination mount and the cloned mount is made slave 784 * of the same master as that of the source mount. The cloned mount 785 * is marked as 'shared and slave'. 786 * (*) the cloned mount is made a slave of the same master as that of the 787 * source mount. 788 * 789 * --------------------------------------------------------------------------- 790 * | MOVE MOUNT OPERATION | 791 * |************************************************************************** 792 * | source-->| shared | private | slave | unbindable | 793 * | dest | | | | | 794 * | | | | | | | 795 * | v | | | | | 796 * |************************************************************************** 797 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 798 * | | | | | | 799 * |non-shared| shared (+*) | private | slave (*) | unbindable | 800 * *************************************************************************** 801 * 802 * (+) the mount is moved to the destination. And is then propagated to 803 * all the mounts in the propagation tree of the destination mount. 804 * (+*) the mount is moved to the destination. 805 * (+++) the mount is moved to the destination and is then propagated to 806 * all the mounts belonging to the destination mount's propagation tree. 807 * the mount is marked as 'shared and slave'. 808 * (*) the mount continues to be a slave at the new location. 809 * 810 * if the source mount is a tree, the operations explained above is 811 * applied to each mount in the tree. 812 * Must be called without spinlocks held, since this function can sleep 813 * in allocations. 814 */ 815 static int attach_recursive_mnt(struct vfsmount *source_mnt, 816 struct nameidata *nd, struct nameidata *parent_nd) 817 { 818 LIST_HEAD(tree_list); 819 struct vfsmount *dest_mnt = nd->mnt; 820 struct dentry *dest_dentry = nd->dentry; 821 struct vfsmount *child, *p; 822 823 if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) 824 return -EINVAL; 825 826 if (IS_MNT_SHARED(dest_mnt)) { 827 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 828 set_mnt_shared(p); 829 } 830 831 spin_lock(&vfsmount_lock); 832 if (parent_nd) { 833 detach_mnt(source_mnt, parent_nd); 834 attach_mnt(source_mnt, nd); 835 touch_namespace(current->namespace); 836 } else { 837 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); 838 commit_tree(source_mnt); 839 } 840 841 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { 842 list_del_init(&child->mnt_hash); 843 commit_tree(child); 844 } 845 spin_unlock(&vfsmount_lock); 846 return 0; 847 } 848 849 static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) 850 { 851 int err; 852 if (mnt->mnt_sb->s_flags & MS_NOUSER) 853 return -EINVAL; 854 855 if (S_ISDIR(nd->dentry->d_inode->i_mode) != 856 S_ISDIR(mnt->mnt_root->d_inode->i_mode)) 857 return -ENOTDIR; 858 859 err = -ENOENT; 860 mutex_lock(&nd->dentry->d_inode->i_mutex); 861 if (IS_DEADDIR(nd->dentry->d_inode)) 862 goto out_unlock; 863 864 err = security_sb_check_sb(mnt, nd); 865 if (err) 866 goto out_unlock; 867 868 err = -ENOENT; 869 if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) 870 err = attach_recursive_mnt(mnt, nd, NULL); 871 out_unlock: 872 mutex_unlock(&nd->dentry->d_inode->i_mutex); 873 if (!err) 874 security_sb_post_addmount(mnt, nd); 875 return err; 876 } 877 878 /* 879 * recursively change the type of the mountpoint. 880 */ 881 static int do_change_type(struct nameidata *nd, int flag) 882 { 883 struct vfsmount *m, *mnt = nd->mnt; 884 int recurse = flag & MS_REC; 885 int type = flag & ~MS_REC; 886 887 if (nd->dentry != nd->mnt->mnt_root) 888 return -EINVAL; 889 890 down_write(&namespace_sem); 891 spin_lock(&vfsmount_lock); 892 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 893 change_mnt_propagation(m, type); 894 spin_unlock(&vfsmount_lock); 895 up_write(&namespace_sem); 896 return 0; 897 } 898 899 /* 900 * do loopback mount. 901 */ 902 static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags) 903 { 904 struct nameidata old_nd; 905 struct vfsmount *mnt = NULL; 906 int recurse = flags & MS_REC; 907 int err = mount_is_safe(nd); 908 909 if (err) 910 return err; 911 if (!old_name || !*old_name) 912 return -EINVAL; 913 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd); 914 if (err) 915 return err; 916 917 down_write(&namespace_sem); 918 err = -EINVAL; 919 if (IS_MNT_UNBINDABLE(old_nd.mnt)) 920 goto out; 921 922 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) 923 goto out; 924 925 err = -ENOMEM; 926 if (recurse) 927 mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0); 928 else 929 mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0); 930 931 if (!mnt) 932 goto out; 933 934 err = graft_tree(mnt, nd); 935 if (err) { 936 LIST_HEAD(umount_list); 937 spin_lock(&vfsmount_lock); 938 umount_tree(mnt, 0, &umount_list); 939 spin_unlock(&vfsmount_lock); 940 release_mounts(&umount_list); 941 } 942 mnt->mnt_flags = mnt_flags; 943 944 out: 945 up_write(&namespace_sem); 946 path_release(&old_nd); 947 return err; 948 } 949 950 /* 951 * change filesystem flags. dir should be a physical root of filesystem. 952 * If you've mounted a non-root directory somewhere and want to do remount 953 * on it - tough luck. 954 */ 955 static int do_remount(struct nameidata *nd, int flags, int mnt_flags, 956 void *data) 957 { 958 int err; 959 struct super_block *sb = nd->mnt->mnt_sb; 960 961 if (!capable(CAP_SYS_ADMIN)) 962 return -EPERM; 963 964 if (!check_mnt(nd->mnt)) 965 return -EINVAL; 966 967 if (nd->dentry != nd->mnt->mnt_root) 968 return -EINVAL; 969 970 down_write(&sb->s_umount); 971 err = do_remount_sb(sb, flags, data, 0); 972 if (!err) 973 nd->mnt->mnt_flags = mnt_flags; 974 up_write(&sb->s_umount); 975 if (!err) 976 security_sb_post_remount(nd->mnt, flags, data); 977 return err; 978 } 979 980 static inline int tree_contains_unbindable(struct vfsmount *mnt) 981 { 982 struct vfsmount *p; 983 for (p = mnt; p; p = next_mnt(p, mnt)) { 984 if (IS_MNT_UNBINDABLE(p)) 985 return 1; 986 } 987 return 0; 988 } 989 990 static int do_move_mount(struct nameidata *nd, char *old_name) 991 { 992 struct nameidata old_nd, parent_nd; 993 struct vfsmount *p; 994 int err = 0; 995 if (!capable(CAP_SYS_ADMIN)) 996 return -EPERM; 997 if (!old_name || !*old_name) 998 return -EINVAL; 999 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd); 1000 if (err) 1001 return err; 1002 1003 down_write(&namespace_sem); 1004 while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) 1005 ; 1006 err = -EINVAL; 1007 if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) 1008 goto out; 1009 1010 err = -ENOENT; 1011 mutex_lock(&nd->dentry->d_inode->i_mutex); 1012 if (IS_DEADDIR(nd->dentry->d_inode)) 1013 goto out1; 1014 1015 if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry)) 1016 goto out1; 1017 1018 err = -EINVAL; 1019 if (old_nd.dentry != old_nd.mnt->mnt_root) 1020 goto out1; 1021 1022 if (old_nd.mnt == old_nd.mnt->mnt_parent) 1023 goto out1; 1024 1025 if (S_ISDIR(nd->dentry->d_inode->i_mode) != 1026 S_ISDIR(old_nd.dentry->d_inode->i_mode)) 1027 goto out1; 1028 /* 1029 * Don't move a mount residing in a shared parent. 1030 */ 1031 if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent)) 1032 goto out1; 1033 /* 1034 * Don't move a mount tree containing unbindable mounts to a destination 1035 * mount which is shared. 1036 */ 1037 if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt)) 1038 goto out1; 1039 err = -ELOOP; 1040 for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent) 1041 if (p == old_nd.mnt) 1042 goto out1; 1043 1044 if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd))) 1045 goto out1; 1046 1047 spin_lock(&vfsmount_lock); 1048 /* if the mount is moved, it should no longer be expire 1049 * automatically */ 1050 list_del_init(&old_nd.mnt->mnt_expire); 1051 spin_unlock(&vfsmount_lock); 1052 out1: 1053 mutex_unlock(&nd->dentry->d_inode->i_mutex); 1054 out: 1055 up_write(&namespace_sem); 1056 if (!err) 1057 path_release(&parent_nd); 1058 path_release(&old_nd); 1059 return err; 1060 } 1061 1062 /* 1063 * create a new mount for userspace and request it to be added into the 1064 * namespace's tree 1065 */ 1066 static int do_new_mount(struct nameidata *nd, char *type, int flags, 1067 int mnt_flags, char *name, void *data) 1068 { 1069 struct vfsmount *mnt; 1070 1071 if (!type || !memchr(type, 0, PAGE_SIZE)) 1072 return -EINVAL; 1073 1074 /* we need capabilities... */ 1075 if (!capable(CAP_SYS_ADMIN)) 1076 return -EPERM; 1077 1078 mnt = do_kern_mount(type, flags, name, data); 1079 if (IS_ERR(mnt)) 1080 return PTR_ERR(mnt); 1081 1082 return do_add_mount(mnt, nd, mnt_flags, NULL); 1083 } 1084 1085 /* 1086 * add a mount into a namespace's mount tree 1087 * - provide the option of adding the new mount to an expiration list 1088 */ 1089 int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, 1090 int mnt_flags, struct list_head *fslist) 1091 { 1092 int err; 1093 1094 down_write(&namespace_sem); 1095 /* Something was mounted here while we slept */ 1096 while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) 1097 ; 1098 err = -EINVAL; 1099 if (!check_mnt(nd->mnt)) 1100 goto unlock; 1101 1102 /* Refuse the same filesystem on the same mount point */ 1103 err = -EBUSY; 1104 if (nd->mnt->mnt_sb == newmnt->mnt_sb && 1105 nd->mnt->mnt_root == nd->dentry) 1106 goto unlock; 1107 1108 err = -EINVAL; 1109 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode)) 1110 goto unlock; 1111 1112 newmnt->mnt_flags = mnt_flags; 1113 if ((err = graft_tree(newmnt, nd))) 1114 goto unlock; 1115 1116 if (fslist) { 1117 /* add to the specified expiration list */ 1118 spin_lock(&vfsmount_lock); 1119 list_add_tail(&newmnt->mnt_expire, fslist); 1120 spin_unlock(&vfsmount_lock); 1121 } 1122 up_write(&namespace_sem); 1123 return 0; 1124 1125 unlock: 1126 up_write(&namespace_sem); 1127 mntput(newmnt); 1128 return err; 1129 } 1130 1131 EXPORT_SYMBOL_GPL(do_add_mount); 1132 1133 static void expire_mount(struct vfsmount *mnt, struct list_head *mounts, 1134 struct list_head *umounts) 1135 { 1136 spin_lock(&vfsmount_lock); 1137 1138 /* 1139 * Check if mount is still attached, if not, let whoever holds it deal 1140 * with the sucker 1141 */ 1142 if (mnt->mnt_parent == mnt) { 1143 spin_unlock(&vfsmount_lock); 1144 return; 1145 } 1146 1147 /* 1148 * Check that it is still dead: the count should now be 2 - as 1149 * contributed by the vfsmount parent and the mntget above 1150 */ 1151 if (!propagate_mount_busy(mnt, 2)) { 1152 /* delete from the namespace */ 1153 touch_namespace(mnt->mnt_namespace); 1154 list_del_init(&mnt->mnt_list); 1155 mnt->mnt_namespace = NULL; 1156 umount_tree(mnt, 1, umounts); 1157 spin_unlock(&vfsmount_lock); 1158 } else { 1159 /* 1160 * Someone brought it back to life whilst we didn't have any 1161 * locks held so return it to the expiration list 1162 */ 1163 list_add_tail(&mnt->mnt_expire, mounts); 1164 spin_unlock(&vfsmount_lock); 1165 } 1166 } 1167 1168 /* 1169 * process a list of expirable mountpoints with the intent of discarding any 1170 * mountpoints that aren't in use and haven't been touched since last we came 1171 * here 1172 */ 1173 void mark_mounts_for_expiry(struct list_head *mounts) 1174 { 1175 struct namespace *namespace; 1176 struct vfsmount *mnt, *next; 1177 LIST_HEAD(graveyard); 1178 1179 if (list_empty(mounts)) 1180 return; 1181 1182 spin_lock(&vfsmount_lock); 1183 1184 /* extract from the expiration list every vfsmount that matches the 1185 * following criteria: 1186 * - only referenced by its parent vfsmount 1187 * - still marked for expiry (marked on the last call here; marks are 1188 * cleared by mntput()) 1189 */ 1190 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 1191 if (!xchg(&mnt->mnt_expiry_mark, 1) || 1192 atomic_read(&mnt->mnt_count) != 1) 1193 continue; 1194 1195 mntget(mnt); 1196 list_move(&mnt->mnt_expire, &graveyard); 1197 } 1198 1199 /* 1200 * go through the vfsmounts we've just consigned to the graveyard to 1201 * - check that they're still dead 1202 * - delete the vfsmount from the appropriate namespace under lock 1203 * - dispose of the corpse 1204 */ 1205 while (!list_empty(&graveyard)) { 1206 LIST_HEAD(umounts); 1207 mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire); 1208 list_del_init(&mnt->mnt_expire); 1209 1210 /* don't do anything if the namespace is dead - all the 1211 * vfsmounts from it are going away anyway */ 1212 namespace = mnt->mnt_namespace; 1213 if (!namespace || !namespace->root) 1214 continue; 1215 get_namespace(namespace); 1216 1217 spin_unlock(&vfsmount_lock); 1218 down_write(&namespace_sem); 1219 expire_mount(mnt, mounts, &umounts); 1220 up_write(&namespace_sem); 1221 release_mounts(&umounts); 1222 mntput(mnt); 1223 put_namespace(namespace); 1224 spin_lock(&vfsmount_lock); 1225 } 1226 1227 spin_unlock(&vfsmount_lock); 1228 } 1229 1230 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 1231 1232 /* 1233 * Some copy_from_user() implementations do not return the exact number of 1234 * bytes remaining to copy on a fault. But copy_mount_options() requires that. 1235 * Note that this function differs from copy_from_user() in that it will oops 1236 * on bad values of `to', rather than returning a short copy. 1237 */ 1238 static long exact_copy_from_user(void *to, const void __user * from, 1239 unsigned long n) 1240 { 1241 char *t = to; 1242 const char __user *f = from; 1243 char c; 1244 1245 if (!access_ok(VERIFY_READ, from, n)) 1246 return n; 1247 1248 while (n) { 1249 if (__get_user(c, f)) { 1250 memset(t, 0, n); 1251 break; 1252 } 1253 *t++ = c; 1254 f++; 1255 n--; 1256 } 1257 return n; 1258 } 1259 1260 int copy_mount_options(const void __user * data, unsigned long *where) 1261 { 1262 int i; 1263 unsigned long page; 1264 unsigned long size; 1265 1266 *where = 0; 1267 if (!data) 1268 return 0; 1269 1270 if (!(page = __get_free_page(GFP_KERNEL))) 1271 return -ENOMEM; 1272 1273 /* We only care that *some* data at the address the user 1274 * gave us is valid. Just in case, we'll zero 1275 * the remainder of the page. 1276 */ 1277 /* copy_from_user cannot cross TASK_SIZE ! */ 1278 size = TASK_SIZE - (unsigned long)data; 1279 if (size > PAGE_SIZE) 1280 size = PAGE_SIZE; 1281 1282 i = size - exact_copy_from_user((void *)page, data, size); 1283 if (!i) { 1284 free_page(page); 1285 return -EFAULT; 1286 } 1287 if (i != PAGE_SIZE) 1288 memset((char *)page + i, 0, PAGE_SIZE - i); 1289 *where = page; 1290 return 0; 1291 } 1292 1293 /* 1294 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 1295 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 1296 * 1297 * data is a (void *) that can point to any structure up to 1298 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 1299 * information (or be NULL). 1300 * 1301 * Pre-0.97 versions of mount() didn't have a flags word. 1302 * When the flags word was introduced its top half was required 1303 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 1304 * Therefore, if this magic number is present, it carries no information 1305 * and must be discarded. 1306 */ 1307 long do_mount(char *dev_name, char *dir_name, char *type_page, 1308 unsigned long flags, void *data_page) 1309 { 1310 struct nameidata nd; 1311 int retval = 0; 1312 int mnt_flags = 0; 1313 1314 /* Discard magic */ 1315 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 1316 flags &= ~MS_MGC_MSK; 1317 1318 /* Basic sanity checks */ 1319 1320 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) 1321 return -EINVAL; 1322 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE)) 1323 return -EINVAL; 1324 1325 if (data_page) 1326 ((char *)data_page)[PAGE_SIZE - 1] = 0; 1327 1328 /* Separate the per-mountpoint flags */ 1329 if (flags & MS_NOSUID) 1330 mnt_flags |= MNT_NOSUID; 1331 if (flags & MS_NODEV) 1332 mnt_flags |= MNT_NODEV; 1333 if (flags & MS_NOEXEC) 1334 mnt_flags |= MNT_NOEXEC; 1335 if (flags & MS_NOATIME) 1336 mnt_flags |= MNT_NOATIME; 1337 if (flags & MS_NODIRATIME) 1338 mnt_flags |= MNT_NODIRATIME; 1339 1340 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | 1341 MS_NOATIME | MS_NODIRATIME); 1342 1343 /* ... and get the mountpoint */ 1344 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd); 1345 if (retval) 1346 return retval; 1347 1348 retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page); 1349 if (retval) 1350 goto dput_out; 1351 1352 if (flags & MS_REMOUNT) 1353 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, 1354 data_page); 1355 else if (flags & MS_BIND) 1356 retval = do_loopback(&nd, dev_name, flags, mnt_flags); 1357 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 1358 retval = do_change_type(&nd, flags); 1359 else if (flags & MS_MOVE) 1360 retval = do_move_mount(&nd, dev_name); 1361 else 1362 retval = do_new_mount(&nd, type_page, flags, mnt_flags, 1363 dev_name, data_page); 1364 dput_out: 1365 path_release(&nd); 1366 return retval; 1367 } 1368 1369 /* 1370 * Allocate a new namespace structure and populate it with contents 1371 * copied from the namespace of the passed in task structure. 1372 */ 1373 struct namespace *dup_namespace(struct task_struct *tsk, struct fs_struct *fs) 1374 { 1375 struct namespace *namespace = tsk->namespace; 1376 struct namespace *new_ns; 1377 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL; 1378 struct vfsmount *p, *q; 1379 1380 new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL); 1381 if (!new_ns) 1382 return NULL; 1383 1384 atomic_set(&new_ns->count, 1); 1385 INIT_LIST_HEAD(&new_ns->list); 1386 init_waitqueue_head(&new_ns->poll); 1387 new_ns->event = 0; 1388 1389 down_write(&namespace_sem); 1390 /* First pass: copy the tree topology */ 1391 new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root, 1392 CL_COPY_ALL | CL_EXPIRE); 1393 if (!new_ns->root) { 1394 up_write(&namespace_sem); 1395 kfree(new_ns); 1396 return NULL; 1397 } 1398 spin_lock(&vfsmount_lock); 1399 list_add_tail(&new_ns->list, &new_ns->root->mnt_list); 1400 spin_unlock(&vfsmount_lock); 1401 1402 /* 1403 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 1404 * as belonging to new namespace. We have already acquired a private 1405 * fs_struct, so tsk->fs->lock is not needed. 1406 */ 1407 p = namespace->root; 1408 q = new_ns->root; 1409 while (p) { 1410 q->mnt_namespace = new_ns; 1411 if (fs) { 1412 if (p == fs->rootmnt) { 1413 rootmnt = p; 1414 fs->rootmnt = mntget(q); 1415 } 1416 if (p == fs->pwdmnt) { 1417 pwdmnt = p; 1418 fs->pwdmnt = mntget(q); 1419 } 1420 if (p == fs->altrootmnt) { 1421 altrootmnt = p; 1422 fs->altrootmnt = mntget(q); 1423 } 1424 } 1425 p = next_mnt(p, namespace->root); 1426 q = next_mnt(q, new_ns->root); 1427 } 1428 up_write(&namespace_sem); 1429 1430 if (rootmnt) 1431 mntput(rootmnt); 1432 if (pwdmnt) 1433 mntput(pwdmnt); 1434 if (altrootmnt) 1435 mntput(altrootmnt); 1436 1437 return new_ns; 1438 } 1439 1440 int copy_namespace(int flags, struct task_struct *tsk) 1441 { 1442 struct namespace *namespace = tsk->namespace; 1443 struct namespace *new_ns; 1444 int err = 0; 1445 1446 if (!namespace) 1447 return 0; 1448 1449 get_namespace(namespace); 1450 1451 if (!(flags & CLONE_NEWNS)) 1452 return 0; 1453 1454 if (!capable(CAP_SYS_ADMIN)) { 1455 err = -EPERM; 1456 goto out; 1457 } 1458 1459 new_ns = dup_namespace(tsk, tsk->fs); 1460 if (!new_ns) { 1461 err = -ENOMEM; 1462 goto out; 1463 } 1464 1465 tsk->namespace = new_ns; 1466 1467 out: 1468 put_namespace(namespace); 1469 return err; 1470 } 1471 1472 asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name, 1473 char __user * type, unsigned long flags, 1474 void __user * data) 1475 { 1476 int retval; 1477 unsigned long data_page; 1478 unsigned long type_page; 1479 unsigned long dev_page; 1480 char *dir_page; 1481 1482 retval = copy_mount_options(type, &type_page); 1483 if (retval < 0) 1484 return retval; 1485 1486 dir_page = getname(dir_name); 1487 retval = PTR_ERR(dir_page); 1488 if (IS_ERR(dir_page)) 1489 goto out1; 1490 1491 retval = copy_mount_options(dev_name, &dev_page); 1492 if (retval < 0) 1493 goto out2; 1494 1495 retval = copy_mount_options(data, &data_page); 1496 if (retval < 0) 1497 goto out3; 1498 1499 lock_kernel(); 1500 retval = do_mount((char *)dev_page, dir_page, (char *)type_page, 1501 flags, (void *)data_page); 1502 unlock_kernel(); 1503 free_page(data_page); 1504 1505 out3: 1506 free_page(dev_page); 1507 out2: 1508 putname(dir_page); 1509 out1: 1510 free_page(type_page); 1511 return retval; 1512 } 1513 1514 /* 1515 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. 1516 * It can block. Requires the big lock held. 1517 */ 1518 void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt, 1519 struct dentry *dentry) 1520 { 1521 struct dentry *old_root; 1522 struct vfsmount *old_rootmnt; 1523 write_lock(&fs->lock); 1524 old_root = fs->root; 1525 old_rootmnt = fs->rootmnt; 1526 fs->rootmnt = mntget(mnt); 1527 fs->root = dget(dentry); 1528 write_unlock(&fs->lock); 1529 if (old_root) { 1530 dput(old_root); 1531 mntput(old_rootmnt); 1532 } 1533 } 1534 1535 /* 1536 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. 1537 * It can block. Requires the big lock held. 1538 */ 1539 void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt, 1540 struct dentry *dentry) 1541 { 1542 struct dentry *old_pwd; 1543 struct vfsmount *old_pwdmnt; 1544 1545 write_lock(&fs->lock); 1546 old_pwd = fs->pwd; 1547 old_pwdmnt = fs->pwdmnt; 1548 fs->pwdmnt = mntget(mnt); 1549 fs->pwd = dget(dentry); 1550 write_unlock(&fs->lock); 1551 1552 if (old_pwd) { 1553 dput(old_pwd); 1554 mntput(old_pwdmnt); 1555 } 1556 } 1557 1558 static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) 1559 { 1560 struct task_struct *g, *p; 1561 struct fs_struct *fs; 1562 1563 read_lock(&tasklist_lock); 1564 do_each_thread(g, p) { 1565 task_lock(p); 1566 fs = p->fs; 1567 if (fs) { 1568 atomic_inc(&fs->count); 1569 task_unlock(p); 1570 if (fs->root == old_nd->dentry 1571 && fs->rootmnt == old_nd->mnt) 1572 set_fs_root(fs, new_nd->mnt, new_nd->dentry); 1573 if (fs->pwd == old_nd->dentry 1574 && fs->pwdmnt == old_nd->mnt) 1575 set_fs_pwd(fs, new_nd->mnt, new_nd->dentry); 1576 put_fs_struct(fs); 1577 } else 1578 task_unlock(p); 1579 } while_each_thread(g, p); 1580 read_unlock(&tasklist_lock); 1581 } 1582 1583 /* 1584 * pivot_root Semantics: 1585 * Moves the root file system of the current process to the directory put_old, 1586 * makes new_root as the new root file system of the current process, and sets 1587 * root/cwd of all processes which had them on the current root to new_root. 1588 * 1589 * Restrictions: 1590 * The new_root and put_old must be directories, and must not be on the 1591 * same file system as the current process root. The put_old must be 1592 * underneath new_root, i.e. adding a non-zero number of /.. to the string 1593 * pointed to by put_old must yield the same directory as new_root. No other 1594 * file system may be mounted on put_old. After all, new_root is a mountpoint. 1595 * 1596 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 1597 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives 1598 * in this situation. 1599 * 1600 * Notes: 1601 * - we don't move root/cwd if they are not at the root (reason: if something 1602 * cared enough to change them, it's probably wrong to force them elsewhere) 1603 * - it's okay to pick a root that isn't the root of a file system, e.g. 1604 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 1605 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 1606 * first. 1607 */ 1608 asmlinkage long sys_pivot_root(const char __user * new_root, 1609 const char __user * put_old) 1610 { 1611 struct vfsmount *tmp; 1612 struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd; 1613 int error; 1614 1615 if (!capable(CAP_SYS_ADMIN)) 1616 return -EPERM; 1617 1618 lock_kernel(); 1619 1620 error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, 1621 &new_nd); 1622 if (error) 1623 goto out0; 1624 error = -EINVAL; 1625 if (!check_mnt(new_nd.mnt)) 1626 goto out1; 1627 1628 error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd); 1629 if (error) 1630 goto out1; 1631 1632 error = security_sb_pivotroot(&old_nd, &new_nd); 1633 if (error) { 1634 path_release(&old_nd); 1635 goto out1; 1636 } 1637 1638 read_lock(¤t->fs->lock); 1639 user_nd.mnt = mntget(current->fs->rootmnt); 1640 user_nd.dentry = dget(current->fs->root); 1641 read_unlock(¤t->fs->lock); 1642 down_write(&namespace_sem); 1643 mutex_lock(&old_nd.dentry->d_inode->i_mutex); 1644 error = -EINVAL; 1645 if (IS_MNT_SHARED(old_nd.mnt) || 1646 IS_MNT_SHARED(new_nd.mnt->mnt_parent) || 1647 IS_MNT_SHARED(user_nd.mnt->mnt_parent)) 1648 goto out2; 1649 if (!check_mnt(user_nd.mnt)) 1650 goto out2; 1651 error = -ENOENT; 1652 if (IS_DEADDIR(new_nd.dentry->d_inode)) 1653 goto out2; 1654 if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry)) 1655 goto out2; 1656 if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry)) 1657 goto out2; 1658 error = -EBUSY; 1659 if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt) 1660 goto out2; /* loop, on the same file system */ 1661 error = -EINVAL; 1662 if (user_nd.mnt->mnt_root != user_nd.dentry) 1663 goto out2; /* not a mountpoint */ 1664 if (user_nd.mnt->mnt_parent == user_nd.mnt) 1665 goto out2; /* not attached */ 1666 if (new_nd.mnt->mnt_root != new_nd.dentry) 1667 goto out2; /* not a mountpoint */ 1668 if (new_nd.mnt->mnt_parent == new_nd.mnt) 1669 goto out2; /* not attached */ 1670 tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */ 1671 spin_lock(&vfsmount_lock); 1672 if (tmp != new_nd.mnt) { 1673 for (;;) { 1674 if (tmp->mnt_parent == tmp) 1675 goto out3; /* already mounted on put_old */ 1676 if (tmp->mnt_parent == new_nd.mnt) 1677 break; 1678 tmp = tmp->mnt_parent; 1679 } 1680 if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry)) 1681 goto out3; 1682 } else if (!is_subdir(old_nd.dentry, new_nd.dentry)) 1683 goto out3; 1684 detach_mnt(new_nd.mnt, &parent_nd); 1685 detach_mnt(user_nd.mnt, &root_parent); 1686 attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */ 1687 attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */ 1688 touch_namespace(current->namespace); 1689 spin_unlock(&vfsmount_lock); 1690 chroot_fs_refs(&user_nd, &new_nd); 1691 security_sb_post_pivotroot(&user_nd, &new_nd); 1692 error = 0; 1693 path_release(&root_parent); 1694 path_release(&parent_nd); 1695 out2: 1696 mutex_unlock(&old_nd.dentry->d_inode->i_mutex); 1697 up_write(&namespace_sem); 1698 path_release(&user_nd); 1699 path_release(&old_nd); 1700 out1: 1701 path_release(&new_nd); 1702 out0: 1703 unlock_kernel(); 1704 return error; 1705 out3: 1706 spin_unlock(&vfsmount_lock); 1707 goto out2; 1708 } 1709 1710 static void __init init_mount_tree(void) 1711 { 1712 struct vfsmount *mnt; 1713 struct namespace *namespace; 1714 struct task_struct *g, *p; 1715 1716 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); 1717 if (IS_ERR(mnt)) 1718 panic("Can't create rootfs"); 1719 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL); 1720 if (!namespace) 1721 panic("Can't allocate initial namespace"); 1722 atomic_set(&namespace->count, 1); 1723 INIT_LIST_HEAD(&namespace->list); 1724 init_waitqueue_head(&namespace->poll); 1725 namespace->event = 0; 1726 list_add(&mnt->mnt_list, &namespace->list); 1727 namespace->root = mnt; 1728 mnt->mnt_namespace = namespace; 1729 1730 init_task.namespace = namespace; 1731 read_lock(&tasklist_lock); 1732 do_each_thread(g, p) { 1733 get_namespace(namespace); 1734 p->namespace = namespace; 1735 } while_each_thread(g, p); 1736 read_unlock(&tasklist_lock); 1737 1738 set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root); 1739 set_fs_root(current->fs, namespace->root, namespace->root->mnt_root); 1740 } 1741 1742 void __init mnt_init(unsigned long mempages) 1743 { 1744 struct list_head *d; 1745 unsigned int nr_hash; 1746 int i; 1747 1748 init_rwsem(&namespace_sem); 1749 1750 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 1751 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL); 1752 1753 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 1754 1755 if (!mount_hashtable) 1756 panic("Failed to allocate mount hash table\n"); 1757 1758 /* 1759 * Find the power-of-two list-heads that can fit into the allocation.. 1760 * We don't guarantee that "sizeof(struct list_head)" is necessarily 1761 * a power-of-two. 1762 */ 1763 nr_hash = PAGE_SIZE / sizeof(struct list_head); 1764 hash_bits = 0; 1765 do { 1766 hash_bits++; 1767 } while ((nr_hash >> hash_bits) != 0); 1768 hash_bits--; 1769 1770 /* 1771 * Re-calculate the actual number of entries and the mask 1772 * from the number of bits we can fit. 1773 */ 1774 nr_hash = 1UL << hash_bits; 1775 hash_mask = nr_hash - 1; 1776 1777 printk("Mount-cache hash table entries: %d\n", nr_hash); 1778 1779 /* And initialize the newly allocated array */ 1780 d = mount_hashtable; 1781 i = nr_hash; 1782 do { 1783 INIT_LIST_HEAD(d); 1784 d++; 1785 i--; 1786 } while (i); 1787 sysfs_init(); 1788 subsystem_register(&fs_subsys); 1789 init_rootfs(); 1790 init_mount_tree(); 1791 } 1792 1793 void __put_namespace(struct namespace *namespace) 1794 { 1795 struct vfsmount *root = namespace->root; 1796 LIST_HEAD(umount_list); 1797 namespace->root = NULL; 1798 spin_unlock(&vfsmount_lock); 1799 down_write(&namespace_sem); 1800 spin_lock(&vfsmount_lock); 1801 umount_tree(root, 0, &umount_list); 1802 spin_unlock(&vfsmount_lock); 1803 up_write(&namespace_sem); 1804 release_mounts(&umount_list); 1805 kfree(namespace); 1806 } 1807