1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/namespace.c
4 *
5 * (C) Copyright Al Viro 2000, 2001
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
35 #include <linux/pidfs.h>
36 #include <linux/nstree.h>
37
38 #include "pnode.h"
39 #include "internal.h"
40
41 /* Maximum number of mounts in a mount namespace */
42 static unsigned int sysctl_mount_max __read_mostly = 100000;
43
44 static unsigned int m_hash_mask __ro_after_init;
45 static unsigned int m_hash_shift __ro_after_init;
46 static unsigned int mp_hash_mask __ro_after_init;
47 static unsigned int mp_hash_shift __ro_after_init;
48
49 static __initdata unsigned long mhash_entries;
set_mhash_entries(char * str)50 static int __init set_mhash_entries(char *str)
51 {
52 return kstrtoul(str, 0, &mhash_entries) == 0;
53 }
54 __setup("mhash_entries=", set_mhash_entries);
55
56 static __initdata unsigned long mphash_entries;
set_mphash_entries(char * str)57 static int __init set_mphash_entries(char *str)
58 {
59 return kstrtoul(str, 0, &mphash_entries) == 0;
60 }
61 __setup("mphash_entries=", set_mphash_entries);
62
63 static char * __initdata initramfs_options;
initramfs_options_setup(char * str)64 static int __init initramfs_options_setup(char *str)
65 {
66 initramfs_options = str;
67 return 1;
68 }
69
70 __setup("initramfs_options=", initramfs_options_setup);
71
72 static u64 event;
73 static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC);
74 static DEFINE_IDA(mnt_group_ida);
75
76 /* Don't allow confusion with old 32bit mount ID */
77 #define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
78 static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET;
79
80 static struct hlist_head *mount_hashtable __ro_after_init;
81 static struct hlist_head *mountpoint_hashtable __ro_after_init;
82 static struct kmem_cache *mnt_cache __ro_after_init;
83 static DECLARE_RWSEM(namespace_sem);
84 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
85 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
86 static struct mnt_namespace *emptied_ns; /* protected by namespace_sem */
87
88 static inline void namespace_lock(void);
89 static void namespace_unlock(void);
90 DEFINE_LOCK_GUARD_0(namespace_excl, namespace_lock(), namespace_unlock())
91 DEFINE_LOCK_GUARD_0(namespace_shared, down_read(&namespace_sem),
92 up_read(&namespace_sem))
93
94 DEFINE_FREE(mntput, struct vfsmount *, if (!IS_ERR(_T)) mntput(_T))
95
96 #ifdef CONFIG_FSNOTIFY
97 LIST_HEAD(notify_list); /* protected by namespace_sem */
98 #endif
99
100 enum mount_kattr_flags_t {
101 MOUNT_KATTR_RECURSE = (1 << 0),
102 MOUNT_KATTR_IDMAP_REPLACE = (1 << 1),
103 };
104
105 struct mount_kattr {
106 unsigned int attr_set;
107 unsigned int attr_clr;
108 unsigned int propagation;
109 unsigned int lookup_flags;
110 enum mount_kattr_flags_t kflags;
111 struct user_namespace *mnt_userns;
112 struct mnt_idmap *mnt_idmap;
113 };
114
115 /* /sys/fs */
116 struct kobject *fs_kobj __ro_after_init;
117 EXPORT_SYMBOL_GPL(fs_kobj);
118
119 /*
120 * vfsmount lock may be taken for read to prevent changes to the
121 * vfsmount hash, ie. during mountpoint lookups or walking back
122 * up the tree.
123 *
124 * It should be taken for write in all cases where the vfsmount
125 * tree or hash is modified or when a vfsmount structure is modified.
126 */
127 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
128
mnt_ns_release(struct mnt_namespace * ns)129 static void mnt_ns_release(struct mnt_namespace *ns)
130 {
131 /* keep alive for {list,stat}mount() */
132 if (ns && refcount_dec_and_test(&ns->passive)) {
133 fsnotify_mntns_delete(ns);
134 put_user_ns(ns->user_ns);
135 kfree(ns);
136 }
137 }
138 DEFINE_FREE(mnt_ns_release, struct mnt_namespace *,
139 if (!IS_ERR(_T)) mnt_ns_release(_T))
140
mnt_ns_release_rcu(struct rcu_head * rcu)141 static void mnt_ns_release_rcu(struct rcu_head *rcu)
142 {
143 mnt_ns_release(container_of(rcu, struct mnt_namespace, ns.ns_rcu));
144 }
145
mnt_ns_tree_remove(struct mnt_namespace * ns)146 static void mnt_ns_tree_remove(struct mnt_namespace *ns)
147 {
148 /* remove from global mount namespace list */
149 if (ns_tree_active(ns))
150 ns_tree_remove(ns);
151
152 call_rcu(&ns->ns.ns_rcu, mnt_ns_release_rcu);
153 }
154
155 /*
156 * Lookup a mount namespace by id and take a passive reference count. Taking a
157 * passive reference means the mount namespace can be emptied if e.g., the last
158 * task holding an active reference exits. To access the mounts of the
159 * namespace the @namespace_sem must first be acquired. If the namespace has
160 * already shut down before acquiring @namespace_sem, {list,stat}mount() will
161 * see that the mount rbtree of the namespace is empty.
162 *
163 * Note the lookup is lockless protected by a sequence counter. We only
164 * need to guard against false negatives as false positives aren't
165 * possible. So if we didn't find a mount namespace and the sequence
166 * counter has changed we need to retry. If the sequence counter is
167 * still the same we know the search actually failed.
168 */
lookup_mnt_ns(u64 mnt_ns_id)169 static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
170 {
171 struct mnt_namespace *mnt_ns;
172 struct ns_common *ns;
173
174 guard(rcu)();
175 ns = ns_tree_lookup_rcu(mnt_ns_id, CLONE_NEWNS);
176 if (!ns)
177 return NULL;
178
179 /*
180 * The last reference count is put with RCU delay so we can
181 * unconditonally acquire a reference here.
182 */
183 mnt_ns = container_of(ns, struct mnt_namespace, ns);
184 refcount_inc(&mnt_ns->passive);
185 return mnt_ns;
186 }
187
lock_mount_hash(void)188 static inline void lock_mount_hash(void)
189 {
190 write_seqlock(&mount_lock);
191 }
192
unlock_mount_hash(void)193 static inline void unlock_mount_hash(void)
194 {
195 write_sequnlock(&mount_lock);
196 }
197
m_hash(struct vfsmount * mnt,struct dentry * dentry)198 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
199 {
200 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
201 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
202 tmp = tmp + (tmp >> m_hash_shift);
203 return &mount_hashtable[tmp & m_hash_mask];
204 }
205
mp_hash(struct dentry * dentry)206 static inline struct hlist_head *mp_hash(struct dentry *dentry)
207 {
208 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
209 tmp = tmp + (tmp >> mp_hash_shift);
210 return &mountpoint_hashtable[tmp & mp_hash_mask];
211 }
212
mnt_alloc_id(struct mount * mnt)213 static int mnt_alloc_id(struct mount *mnt)
214 {
215 int res;
216
217 xa_lock(&mnt_id_xa);
218 res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, xa_limit_31b, GFP_KERNEL);
219 if (!res)
220 mnt->mnt_id_unique = ++mnt_id_ctr;
221 xa_unlock(&mnt_id_xa);
222 return res;
223 }
224
mnt_free_id(struct mount * mnt)225 static void mnt_free_id(struct mount *mnt)
226 {
227 xa_erase(&mnt_id_xa, mnt->mnt_id);
228 }
229
230 /*
231 * Allocate a new peer group ID
232 */
mnt_alloc_group_id(struct mount * mnt)233 static int mnt_alloc_group_id(struct mount *mnt)
234 {
235 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
236
237 if (res < 0)
238 return res;
239 mnt->mnt_group_id = res;
240 return 0;
241 }
242
243 /*
244 * Release a peer group ID
245 */
mnt_release_group_id(struct mount * mnt)246 void mnt_release_group_id(struct mount *mnt)
247 {
248 ida_free(&mnt_group_ida, mnt->mnt_group_id);
249 mnt->mnt_group_id = 0;
250 }
251
252 /*
253 * vfsmount lock must be held for read
254 */
mnt_add_count(struct mount * mnt,int n)255 static inline void mnt_add_count(struct mount *mnt, int n)
256 {
257 #ifdef CONFIG_SMP
258 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
259 #else
260 preempt_disable();
261 mnt->mnt_count += n;
262 preempt_enable();
263 #endif
264 }
265
266 /*
267 * vfsmount lock must be held for write
268 */
mnt_get_count(struct mount * mnt)269 int mnt_get_count(struct mount *mnt)
270 {
271 #ifdef CONFIG_SMP
272 int count = 0;
273 int cpu;
274
275 for_each_possible_cpu(cpu) {
276 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
277 }
278
279 return count;
280 #else
281 return mnt->mnt_count;
282 #endif
283 }
284
alloc_vfsmnt(const char * name)285 static struct mount *alloc_vfsmnt(const char *name)
286 {
287 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
288 if (mnt) {
289 int err;
290
291 err = mnt_alloc_id(mnt);
292 if (err)
293 goto out_free_cache;
294
295 if (name)
296 mnt->mnt_devname = kstrdup_const(name,
297 GFP_KERNEL_ACCOUNT);
298 else
299 mnt->mnt_devname = "none";
300 if (!mnt->mnt_devname)
301 goto out_free_id;
302
303 #ifdef CONFIG_SMP
304 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
305 if (!mnt->mnt_pcp)
306 goto out_free_devname;
307
308 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
309 #else
310 mnt->mnt_count = 1;
311 mnt->mnt_writers = 0;
312 #endif
313
314 INIT_HLIST_NODE(&mnt->mnt_hash);
315 INIT_LIST_HEAD(&mnt->mnt_child);
316 INIT_LIST_HEAD(&mnt->mnt_mounts);
317 INIT_LIST_HEAD(&mnt->mnt_list);
318 INIT_LIST_HEAD(&mnt->mnt_expire);
319 INIT_LIST_HEAD(&mnt->mnt_share);
320 INIT_HLIST_HEAD(&mnt->mnt_slave_list);
321 INIT_HLIST_NODE(&mnt->mnt_slave);
322 INIT_HLIST_NODE(&mnt->mnt_mp_list);
323 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
324 RB_CLEAR_NODE(&mnt->mnt_node);
325 mnt->mnt.mnt_idmap = &nop_mnt_idmap;
326 }
327 return mnt;
328
329 #ifdef CONFIG_SMP
330 out_free_devname:
331 kfree_const(mnt->mnt_devname);
332 #endif
333 out_free_id:
334 mnt_free_id(mnt);
335 out_free_cache:
336 kmem_cache_free(mnt_cache, mnt);
337 return NULL;
338 }
339
340 /*
341 * Most r/o checks on a fs are for operations that take
342 * discrete amounts of time, like a write() or unlink().
343 * We must keep track of when those operations start
344 * (for permission checks) and when they end, so that
345 * we can determine when writes are able to occur to
346 * a filesystem.
347 */
348 /*
349 * __mnt_is_readonly: check whether a mount is read-only
350 * @mnt: the mount to check for its write status
351 *
352 * This shouldn't be used directly ouside of the VFS.
353 * It does not guarantee that the filesystem will stay
354 * r/w, just that it is right *now*. This can not and
355 * should not be used in place of IS_RDONLY(inode).
356 * mnt_want/drop_write() will _keep_ the filesystem
357 * r/w.
358 */
__mnt_is_readonly(const struct vfsmount * mnt)359 bool __mnt_is_readonly(const struct vfsmount *mnt)
360 {
361 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
362 }
363 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
364
mnt_inc_writers(struct mount * mnt)365 static inline void mnt_inc_writers(struct mount *mnt)
366 {
367 #ifdef CONFIG_SMP
368 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
369 #else
370 mnt->mnt_writers++;
371 #endif
372 }
373
mnt_dec_writers(struct mount * mnt)374 static inline void mnt_dec_writers(struct mount *mnt)
375 {
376 #ifdef CONFIG_SMP
377 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
378 #else
379 mnt->mnt_writers--;
380 #endif
381 }
382
mnt_get_writers(struct mount * mnt)383 static unsigned int mnt_get_writers(struct mount *mnt)
384 {
385 #ifdef CONFIG_SMP
386 unsigned int count = 0;
387 int cpu;
388
389 for_each_possible_cpu(cpu) {
390 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
391 }
392
393 return count;
394 #else
395 return mnt->mnt_writers;
396 #endif
397 }
398
mnt_is_readonly(const struct vfsmount * mnt)399 static int mnt_is_readonly(const struct vfsmount *mnt)
400 {
401 if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
402 return 1;
403 /*
404 * The barrier pairs with the barrier in sb_start_ro_state_change()
405 * making sure if we don't see s_readonly_remount set yet, we also will
406 * not see any superblock / mount flag changes done by remount.
407 * It also pairs with the barrier in sb_end_ro_state_change()
408 * assuring that if we see s_readonly_remount already cleared, we will
409 * see the values of superblock / mount flags updated by remount.
410 */
411 smp_rmb();
412 return __mnt_is_readonly(mnt);
413 }
414
415 /*
416 * Most r/o & frozen checks on a fs are for operations that take discrete
417 * amounts of time, like a write() or unlink(). We must keep track of when
418 * those operations start (for permission checks) and when they end, so that we
419 * can determine when writes are able to occur to a filesystem.
420 */
421 /**
422 * mnt_get_write_access - get write access to a mount without freeze protection
423 * @m: the mount on which to take a write
424 *
425 * This tells the low-level filesystem that a write is about to be performed to
426 * it, and makes sure that writes are allowed (mnt it read-write) before
427 * returning success. This operation does not protect against filesystem being
428 * frozen. When the write operation is finished, mnt_put_write_access() must be
429 * called. This is effectively a refcount.
430 */
mnt_get_write_access(struct vfsmount * m)431 int mnt_get_write_access(struct vfsmount *m)
432 {
433 struct mount *mnt = real_mount(m);
434 int ret = 0;
435
436 preempt_disable();
437 mnt_inc_writers(mnt);
438 /*
439 * The store to mnt_inc_writers must be visible before we pass
440 * WRITE_HOLD loop below, so that the slowpath can see our
441 * incremented count after it has set WRITE_HOLD.
442 */
443 smp_mb();
444 might_lock(&mount_lock.lock);
445 while (__test_write_hold(READ_ONCE(mnt->mnt_pprev_for_sb))) {
446 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
447 cpu_relax();
448 } else {
449 /*
450 * This prevents priority inversion, if the task
451 * setting WRITE_HOLD got preempted on a remote
452 * CPU, and it prevents life lock if the task setting
453 * WRITE_HOLD has a lower priority and is bound to
454 * the same CPU as the task that is spinning here.
455 */
456 preempt_enable();
457 read_seqlock_excl(&mount_lock);
458 read_sequnlock_excl(&mount_lock);
459 preempt_disable();
460 }
461 }
462 /*
463 * The barrier pairs with the barrier sb_start_ro_state_change() making
464 * sure that if we see WRITE_HOLD cleared, we will also see
465 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
466 * mnt_is_readonly() and bail in case we are racing with remount
467 * read-only.
468 */
469 smp_rmb();
470 if (mnt_is_readonly(m)) {
471 mnt_dec_writers(mnt);
472 ret = -EROFS;
473 }
474 preempt_enable();
475
476 return ret;
477 }
478 EXPORT_SYMBOL_GPL(mnt_get_write_access);
479
480 /**
481 * mnt_want_write - get write access to a mount
482 * @m: the mount on which to take a write
483 *
484 * This tells the low-level filesystem that a write is about to be performed to
485 * it, and makes sure that writes are allowed (mount is read-write, filesystem
486 * is not frozen) before returning success. When the write operation is
487 * finished, mnt_drop_write() must be called. This is effectively a refcount.
488 */
mnt_want_write(struct vfsmount * m)489 int mnt_want_write(struct vfsmount *m)
490 {
491 int ret;
492
493 sb_start_write(m->mnt_sb);
494 ret = mnt_get_write_access(m);
495 if (ret)
496 sb_end_write(m->mnt_sb);
497 return ret;
498 }
499 EXPORT_SYMBOL_GPL(mnt_want_write);
500
501 /**
502 * mnt_get_write_access_file - get write access to a file's mount
503 * @file: the file who's mount on which to take a write
504 *
505 * This is like mnt_get_write_access, but if @file is already open for write it
506 * skips incrementing mnt_writers (since the open file already has a reference)
507 * and instead only does the check for emergency r/o remounts. This must be
508 * paired with mnt_put_write_access_file.
509 */
mnt_get_write_access_file(struct file * file)510 int mnt_get_write_access_file(struct file *file)
511 {
512 if (file->f_mode & FMODE_WRITER) {
513 /*
514 * Superblock may have become readonly while there are still
515 * writable fd's, e.g. due to a fs error with errors=remount-ro
516 */
517 if (__mnt_is_readonly(file->f_path.mnt))
518 return -EROFS;
519 return 0;
520 }
521 return mnt_get_write_access(file->f_path.mnt);
522 }
523
524 /**
525 * mnt_want_write_file - get write access to a file's mount
526 * @file: the file who's mount on which to take a write
527 *
528 * This is like mnt_want_write, but if the file is already open for writing it
529 * skips incrementing mnt_writers (since the open file already has a reference)
530 * and instead only does the freeze protection and the check for emergency r/o
531 * remounts. This must be paired with mnt_drop_write_file.
532 */
mnt_want_write_file(struct file * file)533 int mnt_want_write_file(struct file *file)
534 {
535 int ret;
536
537 sb_start_write(file_inode(file)->i_sb);
538 ret = mnt_get_write_access_file(file);
539 if (ret)
540 sb_end_write(file_inode(file)->i_sb);
541 return ret;
542 }
543 EXPORT_SYMBOL_GPL(mnt_want_write_file);
544
545 /**
546 * mnt_put_write_access - give up write access to a mount
547 * @mnt: the mount on which to give up write access
548 *
549 * Tells the low-level filesystem that we are done
550 * performing writes to it. Must be matched with
551 * mnt_get_write_access() call above.
552 */
mnt_put_write_access(struct vfsmount * mnt)553 void mnt_put_write_access(struct vfsmount *mnt)
554 {
555 preempt_disable();
556 mnt_dec_writers(real_mount(mnt));
557 preempt_enable();
558 }
559 EXPORT_SYMBOL_GPL(mnt_put_write_access);
560
561 /**
562 * mnt_drop_write - give up write access to a mount
563 * @mnt: the mount on which to give up write access
564 *
565 * Tells the low-level filesystem that we are done performing writes to it and
566 * also allows filesystem to be frozen again. Must be matched with
567 * mnt_want_write() call above.
568 */
mnt_drop_write(struct vfsmount * mnt)569 void mnt_drop_write(struct vfsmount *mnt)
570 {
571 mnt_put_write_access(mnt);
572 sb_end_write(mnt->mnt_sb);
573 }
574 EXPORT_SYMBOL_GPL(mnt_drop_write);
575
mnt_put_write_access_file(struct file * file)576 void mnt_put_write_access_file(struct file *file)
577 {
578 if (!(file->f_mode & FMODE_WRITER))
579 mnt_put_write_access(file->f_path.mnt);
580 }
581
mnt_drop_write_file(struct file * file)582 void mnt_drop_write_file(struct file *file)
583 {
584 mnt_put_write_access_file(file);
585 sb_end_write(file_inode(file)->i_sb);
586 }
587 EXPORT_SYMBOL(mnt_drop_write_file);
588
589 /**
590 * mnt_hold_writers - prevent write access to the given mount
591 * @mnt: mnt to prevent write access to
592 *
593 * Prevents write access to @mnt if there are no active writers for @mnt.
594 * This function needs to be called and return successfully before changing
595 * properties of @mnt that need to remain stable for callers with write access
596 * to @mnt.
597 *
598 * After this functions has been called successfully callers must pair it with
599 * a call to mnt_unhold_writers() in order to stop preventing write access to
600 * @mnt.
601 *
602 * Context: This function expects to be in mount_locked_reader scope serializing
603 * setting WRITE_HOLD.
604 * Return: On success 0 is returned.
605 * On error, -EBUSY is returned.
606 */
mnt_hold_writers(struct mount * mnt)607 static inline int mnt_hold_writers(struct mount *mnt)
608 {
609 set_write_hold(mnt);
610 /*
611 * After storing WRITE_HOLD, we'll read the counters. This store
612 * should be visible before we do.
613 */
614 smp_mb();
615
616 /*
617 * With writers on hold, if this value is zero, then there are
618 * definitely no active writers (although held writers may subsequently
619 * increment the count, they'll have to wait, and decrement it after
620 * seeing MNT_READONLY).
621 *
622 * It is OK to have counter incremented on one CPU and decremented on
623 * another: the sum will add up correctly. The danger would be when we
624 * sum up each counter, if we read a counter before it is incremented,
625 * but then read another CPU's count which it has been subsequently
626 * decremented from -- we would see more decrements than we should.
627 * WRITE_HOLD protects against this scenario, because
628 * mnt_want_write first increments count, then smp_mb, then spins on
629 * WRITE_HOLD, so it can't be decremented by another CPU while
630 * we're counting up here.
631 */
632 if (mnt_get_writers(mnt) > 0)
633 return -EBUSY;
634
635 return 0;
636 }
637
638 /**
639 * mnt_unhold_writers - stop preventing write access to the given mount
640 * @mnt: mnt to stop preventing write access to
641 *
642 * Stop preventing write access to @mnt allowing callers to gain write access
643 * to @mnt again.
644 *
645 * This function can only be called after a call to mnt_hold_writers().
646 *
647 * Context: This function expects to be in the same mount_locked_reader scope
648 * as the matching mnt_hold_writers().
649 */
mnt_unhold_writers(struct mount * mnt)650 static inline void mnt_unhold_writers(struct mount *mnt)
651 {
652 if (!test_write_hold(mnt))
653 return;
654 /*
655 * MNT_READONLY must become visible before ~WRITE_HOLD, so writers
656 * that become unheld will see MNT_READONLY.
657 */
658 smp_wmb();
659 clear_write_hold(mnt);
660 }
661
mnt_del_instance(struct mount * m)662 static inline void mnt_del_instance(struct mount *m)
663 {
664 struct mount **p = m->mnt_pprev_for_sb;
665 struct mount *next = m->mnt_next_for_sb;
666
667 if (next)
668 next->mnt_pprev_for_sb = p;
669 *p = next;
670 }
671
mnt_add_instance(struct mount * m,struct super_block * s)672 static inline void mnt_add_instance(struct mount *m, struct super_block *s)
673 {
674 struct mount *first = s->s_mounts;
675
676 if (first)
677 first->mnt_pprev_for_sb = &m->mnt_next_for_sb;
678 m->mnt_next_for_sb = first;
679 m->mnt_pprev_for_sb = &s->s_mounts;
680 s->s_mounts = m;
681 }
682
mnt_make_readonly(struct mount * mnt)683 static int mnt_make_readonly(struct mount *mnt)
684 {
685 int ret;
686
687 ret = mnt_hold_writers(mnt);
688 if (!ret)
689 mnt->mnt.mnt_flags |= MNT_READONLY;
690 mnt_unhold_writers(mnt);
691 return ret;
692 }
693
sb_prepare_remount_readonly(struct super_block * sb)694 int sb_prepare_remount_readonly(struct super_block *sb)
695 {
696 int err = 0;
697
698 /* Racy optimization. Recheck the counter under WRITE_HOLD */
699 if (atomic_long_read(&sb->s_remove_count))
700 return -EBUSY;
701
702 guard(mount_locked_reader)();
703
704 for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
705 if (!(m->mnt.mnt_flags & MNT_READONLY)) {
706 err = mnt_hold_writers(m);
707 if (err)
708 break;
709 }
710 }
711 if (!err && atomic_long_read(&sb->s_remove_count))
712 err = -EBUSY;
713
714 if (!err)
715 sb_start_ro_state_change(sb);
716 for (struct mount *m = sb->s_mounts; m; m = m->mnt_next_for_sb) {
717 if (test_write_hold(m))
718 clear_write_hold(m);
719 }
720
721 return err;
722 }
723
free_vfsmnt(struct mount * mnt)724 static void free_vfsmnt(struct mount *mnt)
725 {
726 mnt_idmap_put(mnt_idmap(&mnt->mnt));
727 kfree_const(mnt->mnt_devname);
728 #ifdef CONFIG_SMP
729 free_percpu(mnt->mnt_pcp);
730 #endif
731 kmem_cache_free(mnt_cache, mnt);
732 }
733
delayed_free_vfsmnt(struct rcu_head * head)734 static void delayed_free_vfsmnt(struct rcu_head *head)
735 {
736 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
737 }
738
739 /* call under rcu_read_lock */
__legitimize_mnt(struct vfsmount * bastard,unsigned seq)740 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
741 {
742 struct mount *mnt;
743 if (read_seqretry(&mount_lock, seq))
744 return 1;
745 if (bastard == NULL)
746 return 0;
747 mnt = real_mount(bastard);
748 mnt_add_count(mnt, 1);
749 smp_mb(); // see mntput_no_expire() and do_umount()
750 if (likely(!read_seqretry(&mount_lock, seq)))
751 return 0;
752 lock_mount_hash();
753 if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) {
754 mnt_add_count(mnt, -1);
755 unlock_mount_hash();
756 return 1;
757 }
758 unlock_mount_hash();
759 /* caller will mntput() */
760 return -1;
761 }
762
763 /* call under rcu_read_lock */
legitimize_mnt(struct vfsmount * bastard,unsigned seq)764 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
765 {
766 int res = __legitimize_mnt(bastard, seq);
767 if (likely(!res))
768 return true;
769 if (unlikely(res < 0)) {
770 rcu_read_unlock();
771 mntput(bastard);
772 rcu_read_lock();
773 }
774 return false;
775 }
776
777 /**
778 * __lookup_mnt - mount hash lookup
779 * @mnt: parent mount
780 * @dentry: dentry of mountpoint
781 *
782 * If @mnt has a child mount @c mounted on @dentry find and return it.
783 * Caller must either hold the spinlock component of @mount_lock or
784 * hold rcu_read_lock(), sample the seqcount component before the call
785 * and recheck it afterwards.
786 *
787 * Return: The child of @mnt mounted on @dentry or %NULL.
788 */
__lookup_mnt(struct vfsmount * mnt,struct dentry * dentry)789 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
790 {
791 struct hlist_head *head = m_hash(mnt, dentry);
792 struct mount *p;
793
794 hlist_for_each_entry_rcu(p, head, mnt_hash)
795 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
796 return p;
797 return NULL;
798 }
799
800 /**
801 * lookup_mnt - Return the child mount mounted at given location
802 * @path: location in the namespace
803 *
804 * Acquires and returns a new reference to mount at given location
805 * or %NULL if nothing is mounted there.
806 */
lookup_mnt(const struct path * path)807 struct vfsmount *lookup_mnt(const struct path *path)
808 {
809 struct mount *child_mnt;
810 struct vfsmount *m;
811 unsigned seq;
812
813 rcu_read_lock();
814 do {
815 seq = read_seqbegin(&mount_lock);
816 child_mnt = __lookup_mnt(path->mnt, path->dentry);
817 m = child_mnt ? &child_mnt->mnt : NULL;
818 } while (!legitimize_mnt(m, seq));
819 rcu_read_unlock();
820 return m;
821 }
822
823 /*
824 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
825 * current mount namespace.
826 *
827 * The common case is dentries are not mountpoints at all and that
828 * test is handled inline. For the slow case when we are actually
829 * dealing with a mountpoint of some kind, walk through all of the
830 * mounts in the current mount namespace and test to see if the dentry
831 * is a mountpoint.
832 *
833 * The mount_hashtable is not usable in the context because we
834 * need to identify all mounts that may be in the current mount
835 * namespace not just a mount that happens to have some specified
836 * parent mount.
837 */
__is_local_mountpoint(const struct dentry * dentry)838 bool __is_local_mountpoint(const struct dentry *dentry)
839 {
840 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
841 struct mount *mnt, *n;
842
843 guard(namespace_shared)();
844
845 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node)
846 if (mnt->mnt_mountpoint == dentry)
847 return true;
848
849 return false;
850 }
851
852 struct pinned_mountpoint {
853 struct hlist_node node;
854 struct mountpoint *mp;
855 struct mount *parent;
856 };
857
lookup_mountpoint(struct dentry * dentry,struct pinned_mountpoint * m)858 static bool lookup_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
859 {
860 struct hlist_head *chain = mp_hash(dentry);
861 struct mountpoint *mp;
862
863 hlist_for_each_entry(mp, chain, m_hash) {
864 if (mp->m_dentry == dentry) {
865 hlist_add_head(&m->node, &mp->m_list);
866 m->mp = mp;
867 return true;
868 }
869 }
870 return false;
871 }
872
get_mountpoint(struct dentry * dentry,struct pinned_mountpoint * m)873 static int get_mountpoint(struct dentry *dentry, struct pinned_mountpoint *m)
874 {
875 struct mountpoint *mp __free(kfree) = NULL;
876 bool found;
877 int ret;
878
879 if (d_mountpoint(dentry)) {
880 /* might be worth a WARN_ON() */
881 if (d_unlinked(dentry))
882 return -ENOENT;
883 mountpoint:
884 read_seqlock_excl(&mount_lock);
885 found = lookup_mountpoint(dentry, m);
886 read_sequnlock_excl(&mount_lock);
887 if (found)
888 return 0;
889 }
890
891 if (!mp)
892 mp = kmalloc_obj(struct mountpoint);
893 if (!mp)
894 return -ENOMEM;
895
896 /* Exactly one processes may set d_mounted */
897 ret = d_set_mounted(dentry);
898
899 /* Someone else set d_mounted? */
900 if (ret == -EBUSY)
901 goto mountpoint;
902
903 /* The dentry is not available as a mountpoint? */
904 if (ret)
905 return ret;
906
907 /* Add the new mountpoint to the hash table */
908 read_seqlock_excl(&mount_lock);
909 mp->m_dentry = dget(dentry);
910 hlist_add_head(&mp->m_hash, mp_hash(dentry));
911 INIT_HLIST_HEAD(&mp->m_list);
912 hlist_add_head(&m->node, &mp->m_list);
913 m->mp = no_free_ptr(mp);
914 read_sequnlock_excl(&mount_lock);
915 return 0;
916 }
917
918 /*
919 * vfsmount lock must be held. Additionally, the caller is responsible
920 * for serializing calls for given disposal list.
921 */
maybe_free_mountpoint(struct mountpoint * mp,struct list_head * list)922 static void maybe_free_mountpoint(struct mountpoint *mp, struct list_head *list)
923 {
924 if (hlist_empty(&mp->m_list)) {
925 struct dentry *dentry = mp->m_dentry;
926 spin_lock(&dentry->d_lock);
927 dentry->d_flags &= ~DCACHE_MOUNTED;
928 spin_unlock(&dentry->d_lock);
929 dput_to_list(dentry, list);
930 hlist_del(&mp->m_hash);
931 kfree(mp);
932 }
933 }
934
935 /*
936 * locks: mount_lock [read_seqlock_excl], namespace_sem [excl]
937 */
unpin_mountpoint(struct pinned_mountpoint * m)938 static void unpin_mountpoint(struct pinned_mountpoint *m)
939 {
940 if (m->mp) {
941 hlist_del(&m->node);
942 maybe_free_mountpoint(m->mp, &ex_mountpoints);
943 }
944 }
945
check_mnt(const struct mount * mnt)946 static inline int check_mnt(const struct mount *mnt)
947 {
948 return mnt->mnt_ns == current->nsproxy->mnt_ns;
949 }
950
check_anonymous_mnt(struct mount * mnt)951 static inline bool check_anonymous_mnt(struct mount *mnt)
952 {
953 u64 seq;
954
955 if (!is_anon_ns(mnt->mnt_ns))
956 return false;
957
958 seq = mnt->mnt_ns->seq_origin;
959 return !seq || (seq == current->nsproxy->mnt_ns->ns.ns_id);
960 }
961
962 /*
963 * vfsmount lock must be held for write
964 */
touch_mnt_namespace(struct mnt_namespace * ns)965 static void touch_mnt_namespace(struct mnt_namespace *ns)
966 {
967 if (ns) {
968 ns->event = ++event;
969 wake_up_interruptible(&ns->poll);
970 }
971 }
972
973 /*
974 * vfsmount lock must be held for write
975 */
__touch_mnt_namespace(struct mnt_namespace * ns)976 static void __touch_mnt_namespace(struct mnt_namespace *ns)
977 {
978 if (ns && ns->event != event) {
979 ns->event = event;
980 wake_up_interruptible(&ns->poll);
981 }
982 }
983
984 /*
985 * locks: mount_lock[write_seqlock]
986 */
__umount_mnt(struct mount * mnt,struct list_head * shrink_list)987 static void __umount_mnt(struct mount *mnt, struct list_head *shrink_list)
988 {
989 struct mountpoint *mp;
990 struct mount *parent = mnt->mnt_parent;
991 if (unlikely(parent->overmount == mnt))
992 parent->overmount = NULL;
993 mnt->mnt_parent = mnt;
994 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
995 list_del_init(&mnt->mnt_child);
996 hlist_del_init_rcu(&mnt->mnt_hash);
997 hlist_del_init(&mnt->mnt_mp_list);
998 mp = mnt->mnt_mp;
999 mnt->mnt_mp = NULL;
1000 maybe_free_mountpoint(mp, shrink_list);
1001 }
1002
1003 /*
1004 * locks: mount_lock[write_seqlock], namespace_sem[excl] (for ex_mountpoints)
1005 */
umount_mnt(struct mount * mnt)1006 static void umount_mnt(struct mount *mnt)
1007 {
1008 __umount_mnt(mnt, &ex_mountpoints);
1009 }
1010
1011 /*
1012 * vfsmount lock must be held for write
1013 */
mnt_set_mountpoint(struct mount * mnt,struct mountpoint * mp,struct mount * child_mnt)1014 void mnt_set_mountpoint(struct mount *mnt,
1015 struct mountpoint *mp,
1016 struct mount *child_mnt)
1017 {
1018 child_mnt->mnt_mountpoint = mp->m_dentry;
1019 child_mnt->mnt_parent = mnt;
1020 child_mnt->mnt_mp = mp;
1021 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
1022 }
1023
make_visible(struct mount * mnt)1024 static void make_visible(struct mount *mnt)
1025 {
1026 struct mount *parent = mnt->mnt_parent;
1027 if (unlikely(mnt->mnt_mountpoint == parent->mnt.mnt_root))
1028 parent->overmount = mnt;
1029 hlist_add_head_rcu(&mnt->mnt_hash,
1030 m_hash(&parent->mnt, mnt->mnt_mountpoint));
1031 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
1032 }
1033
1034 /**
1035 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
1036 * list of child mounts
1037 * @parent: the parent
1038 * @mnt: the new mount
1039 * @mp: the new mountpoint
1040 *
1041 * Mount @mnt at @mp on @parent. Then attach @mnt
1042 * to @parent's child mount list and to @mount_hashtable.
1043 *
1044 * Note, when make_visible() is called @mnt->mnt_parent already points
1045 * to the correct parent.
1046 *
1047 * Context: This function expects namespace_lock() and lock_mount_hash()
1048 * to have been acquired in that order.
1049 */
attach_mnt(struct mount * mnt,struct mount * parent,struct mountpoint * mp)1050 static void attach_mnt(struct mount *mnt, struct mount *parent,
1051 struct mountpoint *mp)
1052 {
1053 mnt_set_mountpoint(parent, mp, mnt);
1054 make_visible(mnt);
1055 }
1056
mnt_change_mountpoint(struct mount * parent,struct mountpoint * mp,struct mount * mnt)1057 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
1058 {
1059 struct mountpoint *old_mp = mnt->mnt_mp;
1060
1061 list_del_init(&mnt->mnt_child);
1062 hlist_del_init(&mnt->mnt_mp_list);
1063 hlist_del_init_rcu(&mnt->mnt_hash);
1064
1065 attach_mnt(mnt, parent, mp);
1066
1067 maybe_free_mountpoint(old_mp, &ex_mountpoints);
1068 }
1069
node_to_mount(struct rb_node * node)1070 static inline struct mount *node_to_mount(struct rb_node *node)
1071 {
1072 return node ? rb_entry(node, struct mount, mnt_node) : NULL;
1073 }
1074
mnt_add_to_ns(struct mnt_namespace * ns,struct mount * mnt)1075 static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
1076 {
1077 struct rb_node **link = &ns->mounts.rb_node;
1078 struct rb_node *parent = NULL;
1079 bool mnt_first_node = true, mnt_last_node = true;
1080
1081 WARN_ON(mnt_ns_attached(mnt));
1082 mnt->mnt_ns = ns;
1083 while (*link) {
1084 parent = *link;
1085 if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) {
1086 link = &parent->rb_left;
1087 mnt_last_node = false;
1088 } else {
1089 link = &parent->rb_right;
1090 mnt_first_node = false;
1091 }
1092 }
1093
1094 if (mnt_last_node)
1095 ns->mnt_last_node = &mnt->mnt_node;
1096 if (mnt_first_node)
1097 ns->mnt_first_node = &mnt->mnt_node;
1098 rb_link_node(&mnt->mnt_node, parent, link);
1099 rb_insert_color(&mnt->mnt_node, &ns->mounts);
1100
1101 mnt_notify_add(mnt);
1102 }
1103
next_mnt(struct mount * p,struct mount * root)1104 static struct mount *next_mnt(struct mount *p, struct mount *root)
1105 {
1106 struct list_head *next = p->mnt_mounts.next;
1107 if (next == &p->mnt_mounts) {
1108 while (1) {
1109 if (p == root)
1110 return NULL;
1111 next = p->mnt_child.next;
1112 if (next != &p->mnt_parent->mnt_mounts)
1113 break;
1114 p = p->mnt_parent;
1115 }
1116 }
1117 return list_entry(next, struct mount, mnt_child);
1118 }
1119
skip_mnt_tree(struct mount * p)1120 static struct mount *skip_mnt_tree(struct mount *p)
1121 {
1122 struct list_head *prev = p->mnt_mounts.prev;
1123 while (prev != &p->mnt_mounts) {
1124 p = list_entry(prev, struct mount, mnt_child);
1125 prev = p->mnt_mounts.prev;
1126 }
1127 return p;
1128 }
1129
1130 /*
1131 * vfsmount lock must be held for write
1132 */
commit_tree(struct mount * mnt)1133 static void commit_tree(struct mount *mnt)
1134 {
1135 struct mnt_namespace *n = mnt->mnt_parent->mnt_ns;
1136
1137 if (!mnt_ns_attached(mnt)) {
1138 for (struct mount *m = mnt; m; m = next_mnt(m, mnt))
1139 mnt_add_to_ns(n, m);
1140 n->nr_mounts += n->pending_mounts;
1141 n->pending_mounts = 0;
1142 }
1143
1144 make_visible(mnt);
1145 touch_mnt_namespace(n);
1146 }
1147
setup_mnt(struct mount * m,struct dentry * root)1148 static void setup_mnt(struct mount *m, struct dentry *root)
1149 {
1150 struct super_block *s = root->d_sb;
1151
1152 atomic_inc(&s->s_active);
1153 m->mnt.mnt_sb = s;
1154 m->mnt.mnt_root = dget(root);
1155 m->mnt_mountpoint = m->mnt.mnt_root;
1156 m->mnt_parent = m;
1157
1158 guard(mount_locked_reader)();
1159 mnt_add_instance(m, s);
1160 }
1161
1162 /**
1163 * vfs_create_mount - Create a mount for a configured superblock
1164 * @fc: The configuration context with the superblock attached
1165 *
1166 * Create a mount to an already configured superblock. If necessary, the
1167 * caller should invoke vfs_get_tree() before calling this.
1168 *
1169 * Note that this does not attach the mount to anything.
1170 */
vfs_create_mount(struct fs_context * fc)1171 struct vfsmount *vfs_create_mount(struct fs_context *fc)
1172 {
1173 struct mount *mnt;
1174
1175 if (!fc->root)
1176 return ERR_PTR(-EINVAL);
1177
1178 mnt = alloc_vfsmnt(fc->source);
1179 if (!mnt)
1180 return ERR_PTR(-ENOMEM);
1181
1182 if (fc->sb_flags & SB_KERNMOUNT)
1183 mnt->mnt.mnt_flags = MNT_INTERNAL;
1184
1185 setup_mnt(mnt, fc->root);
1186
1187 return &mnt->mnt;
1188 }
1189 EXPORT_SYMBOL(vfs_create_mount);
1190
fc_mount(struct fs_context * fc)1191 struct vfsmount *fc_mount(struct fs_context *fc)
1192 {
1193 int err = vfs_get_tree(fc);
1194 if (!err) {
1195 up_write(&fc->root->d_sb->s_umount);
1196 return vfs_create_mount(fc);
1197 }
1198 return ERR_PTR(err);
1199 }
1200 EXPORT_SYMBOL(fc_mount);
1201
fc_mount_longterm(struct fs_context * fc)1202 struct vfsmount *fc_mount_longterm(struct fs_context *fc)
1203 {
1204 struct vfsmount *mnt = fc_mount(fc);
1205 if (!IS_ERR(mnt))
1206 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
1207 return mnt;
1208 }
1209 EXPORT_SYMBOL(fc_mount_longterm);
1210
vfs_kern_mount(struct file_system_type * type,int flags,const char * name,void * data)1211 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
1212 int flags, const char *name,
1213 void *data)
1214 {
1215 struct fs_context *fc;
1216 struct vfsmount *mnt;
1217 int ret = 0;
1218
1219 if (!type)
1220 return ERR_PTR(-EINVAL);
1221
1222 fc = fs_context_for_mount(type, flags);
1223 if (IS_ERR(fc))
1224 return ERR_CAST(fc);
1225
1226 if (name)
1227 ret = vfs_parse_fs_string(fc, "source", name);
1228 if (!ret)
1229 ret = parse_monolithic_mount_data(fc, data);
1230 if (!ret)
1231 mnt = fc_mount(fc);
1232 else
1233 mnt = ERR_PTR(ret);
1234
1235 put_fs_context(fc);
1236 return mnt;
1237 }
1238 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1239
clone_mnt(struct mount * old,struct dentry * root,int flag)1240 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1241 int flag)
1242 {
1243 struct mount *mnt;
1244 int err;
1245
1246 mnt = alloc_vfsmnt(old->mnt_devname);
1247 if (!mnt)
1248 return ERR_PTR(-ENOMEM);
1249
1250 mnt->mnt.mnt_flags = READ_ONCE(old->mnt.mnt_flags) &
1251 ~MNT_INTERNAL_FLAGS;
1252
1253 if (flag & (CL_SLAVE | CL_PRIVATE))
1254 mnt->mnt_group_id = 0; /* not a peer of original */
1255 else
1256 mnt->mnt_group_id = old->mnt_group_id;
1257
1258 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1259 err = mnt_alloc_group_id(mnt);
1260 if (err)
1261 goto out_free;
1262 }
1263
1264 if (mnt->mnt_group_id)
1265 set_mnt_shared(mnt);
1266
1267 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
1268
1269 setup_mnt(mnt, root);
1270
1271 if (flag & CL_PRIVATE) // we are done with it
1272 return mnt;
1273
1274 if (peers(mnt, old))
1275 list_add(&mnt->mnt_share, &old->mnt_share);
1276
1277 if ((flag & CL_SLAVE) && old->mnt_group_id) {
1278 hlist_add_head(&mnt->mnt_slave, &old->mnt_slave_list);
1279 mnt->mnt_master = old;
1280 } else if (IS_MNT_SLAVE(old)) {
1281 hlist_add_behind(&mnt->mnt_slave, &old->mnt_slave);
1282 mnt->mnt_master = old->mnt_master;
1283 }
1284 return mnt;
1285
1286 out_free:
1287 mnt_free_id(mnt);
1288 free_vfsmnt(mnt);
1289 return ERR_PTR(err);
1290 }
1291
cleanup_mnt(struct mount * mnt)1292 static void cleanup_mnt(struct mount *mnt)
1293 {
1294 struct hlist_node *p;
1295 struct mount *m;
1296 /*
1297 * The warning here probably indicates that somebody messed
1298 * up a mnt_want/drop_write() pair. If this happens, the
1299 * filesystem was probably unable to make r/w->r/o transitions.
1300 * The locking used to deal with mnt_count decrement provides barriers,
1301 * so mnt_get_writers() below is safe.
1302 */
1303 WARN_ON(mnt_get_writers(mnt));
1304 if (unlikely(mnt->mnt_pins.first))
1305 mnt_pin_kill(mnt);
1306 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1307 hlist_del(&m->mnt_umount);
1308 mntput(&m->mnt);
1309 }
1310 fsnotify_vfsmount_delete(&mnt->mnt);
1311 dput(mnt->mnt.mnt_root);
1312 deactivate_super(mnt->mnt.mnt_sb);
1313 mnt_free_id(mnt);
1314 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1315 }
1316
__cleanup_mnt(struct rcu_head * head)1317 static void __cleanup_mnt(struct rcu_head *head)
1318 {
1319 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1320 }
1321
1322 static LLIST_HEAD(delayed_mntput_list);
delayed_mntput(struct work_struct * unused)1323 static void delayed_mntput(struct work_struct *unused)
1324 {
1325 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1326 struct mount *m, *t;
1327
1328 llist_for_each_entry_safe(m, t, node, mnt_llist)
1329 cleanup_mnt(m);
1330 }
1331 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1332
mntput_no_expire_slowpath(struct mount * mnt)1333 static void noinline mntput_no_expire_slowpath(struct mount *mnt)
1334 {
1335 LIST_HEAD(list);
1336 int count;
1337
1338 VFS_BUG_ON(mnt->mnt_ns);
1339 lock_mount_hash();
1340 /*
1341 * make sure that if __legitimize_mnt() has not seen us grab
1342 * mount_lock, we'll see their refcount increment here.
1343 */
1344 smp_mb();
1345 mnt_add_count(mnt, -1);
1346 count = mnt_get_count(mnt);
1347 if (count != 0) {
1348 WARN_ON(count < 0);
1349 rcu_read_unlock();
1350 unlock_mount_hash();
1351 return;
1352 }
1353 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1354 rcu_read_unlock();
1355 unlock_mount_hash();
1356 return;
1357 }
1358 mnt->mnt.mnt_flags |= MNT_DOOMED;
1359 rcu_read_unlock();
1360
1361 mnt_del_instance(mnt);
1362 if (unlikely(!list_empty(&mnt->mnt_expire)))
1363 list_del(&mnt->mnt_expire);
1364
1365 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1366 struct mount *p, *tmp;
1367 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1368 __umount_mnt(p, &list);
1369 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1370 }
1371 }
1372 unlock_mount_hash();
1373 shrink_dentry_list(&list);
1374
1375 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1376 struct task_struct *task = current;
1377 if (likely(!(task->flags & PF_KTHREAD))) {
1378 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1379 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1380 return;
1381 }
1382 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1383 schedule_delayed_work(&delayed_mntput_work, 1);
1384 return;
1385 }
1386 cleanup_mnt(mnt);
1387 }
1388
mntput_no_expire(struct mount * mnt)1389 static void mntput_no_expire(struct mount *mnt)
1390 {
1391 rcu_read_lock();
1392 if (likely(READ_ONCE(mnt->mnt_ns))) {
1393 /*
1394 * Since we don't do lock_mount_hash() here,
1395 * ->mnt_ns can change under us. However, if it's
1396 * non-NULL, then there's a reference that won't
1397 * be dropped until after an RCU delay done after
1398 * turning ->mnt_ns NULL. So if we observe it
1399 * non-NULL under rcu_read_lock(), the reference
1400 * we are dropping is not the final one.
1401 */
1402 mnt_add_count(mnt, -1);
1403 rcu_read_unlock();
1404 return;
1405 }
1406 mntput_no_expire_slowpath(mnt);
1407 }
1408
mntput(struct vfsmount * mnt)1409 void mntput(struct vfsmount *mnt)
1410 {
1411 if (mnt) {
1412 struct mount *m = real_mount(mnt);
1413 /* avoid cacheline pingpong */
1414 if (unlikely(m->mnt_expiry_mark))
1415 WRITE_ONCE(m->mnt_expiry_mark, 0);
1416 mntput_no_expire(m);
1417 }
1418 }
1419 EXPORT_SYMBOL(mntput);
1420
mntget(struct vfsmount * mnt)1421 struct vfsmount *mntget(struct vfsmount *mnt)
1422 {
1423 if (mnt)
1424 mnt_add_count(real_mount(mnt), 1);
1425 return mnt;
1426 }
1427 EXPORT_SYMBOL(mntget);
1428
1429 /*
1430 * Make a mount point inaccessible to new lookups.
1431 * Because there may still be current users, the caller MUST WAIT
1432 * for an RCU grace period before destroying the mount point.
1433 */
mnt_make_shortterm(struct vfsmount * mnt)1434 void mnt_make_shortterm(struct vfsmount *mnt)
1435 {
1436 if (mnt)
1437 real_mount(mnt)->mnt_ns = NULL;
1438 }
1439
1440 /**
1441 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1442 * @path: path to check
1443 *
1444 * d_mountpoint() can only be used reliably to establish if a dentry is
1445 * not mounted in any namespace and that common case is handled inline.
1446 * d_mountpoint() isn't aware of the possibility there may be multiple
1447 * mounts using a given dentry in a different namespace. This function
1448 * checks if the passed in path is a mountpoint rather than the dentry
1449 * alone.
1450 */
path_is_mountpoint(const struct path * path)1451 bool path_is_mountpoint(const struct path *path)
1452 {
1453 unsigned seq;
1454 bool res;
1455
1456 if (!d_mountpoint(path->dentry))
1457 return false;
1458
1459 rcu_read_lock();
1460 do {
1461 seq = read_seqbegin(&mount_lock);
1462 res = __path_is_mountpoint(path);
1463 } while (read_seqretry(&mount_lock, seq));
1464 rcu_read_unlock();
1465
1466 return res;
1467 }
1468 EXPORT_SYMBOL(path_is_mountpoint);
1469
mnt_clone_internal(const struct path * path)1470 struct vfsmount *mnt_clone_internal(const struct path *path)
1471 {
1472 struct mount *p;
1473 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1474 if (IS_ERR(p))
1475 return ERR_CAST(p);
1476 p->mnt.mnt_flags |= MNT_INTERNAL;
1477 return &p->mnt;
1478 }
1479
1480 /*
1481 * Returns the mount which either has the specified mnt_id, or has the next
1482 * smallest id afer the specified one.
1483 */
mnt_find_id_at(struct mnt_namespace * ns,u64 mnt_id)1484 static struct mount *mnt_find_id_at(struct mnt_namespace *ns, u64 mnt_id)
1485 {
1486 struct rb_node *node = ns->mounts.rb_node;
1487 struct mount *ret = NULL;
1488
1489 while (node) {
1490 struct mount *m = node_to_mount(node);
1491
1492 if (mnt_id <= m->mnt_id_unique) {
1493 ret = node_to_mount(node);
1494 if (mnt_id == m->mnt_id_unique)
1495 break;
1496 node = node->rb_left;
1497 } else {
1498 node = node->rb_right;
1499 }
1500 }
1501 return ret;
1502 }
1503
1504 /*
1505 * Returns the mount which either has the specified mnt_id, or has the next
1506 * greater id before the specified one.
1507 */
mnt_find_id_at_reverse(struct mnt_namespace * ns,u64 mnt_id)1508 static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id)
1509 {
1510 struct rb_node *node = ns->mounts.rb_node;
1511 struct mount *ret = NULL;
1512
1513 while (node) {
1514 struct mount *m = node_to_mount(node);
1515
1516 if (mnt_id >= m->mnt_id_unique) {
1517 ret = node_to_mount(node);
1518 if (mnt_id == m->mnt_id_unique)
1519 break;
1520 node = node->rb_right;
1521 } else {
1522 node = node->rb_left;
1523 }
1524 }
1525 return ret;
1526 }
1527
1528 #ifdef CONFIG_PROC_FS
1529
1530 /* iterator; we want it to have access to namespace_sem, thus here... */
m_start(struct seq_file * m,loff_t * pos)1531 static void *m_start(struct seq_file *m, loff_t *pos)
1532 {
1533 struct proc_mounts *p = m->private;
1534
1535 down_read(&namespace_sem);
1536
1537 return mnt_find_id_at(p->ns, *pos);
1538 }
1539
m_next(struct seq_file * m,void * v,loff_t * pos)1540 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1541 {
1542 struct mount *next = NULL, *mnt = v;
1543 struct rb_node *node = rb_next(&mnt->mnt_node);
1544
1545 ++*pos;
1546 if (node) {
1547 next = node_to_mount(node);
1548 *pos = next->mnt_id_unique;
1549 }
1550 return next;
1551 }
1552
m_stop(struct seq_file * m,void * v)1553 static void m_stop(struct seq_file *m, void *v)
1554 {
1555 up_read(&namespace_sem);
1556 }
1557
m_show(struct seq_file * m,void * v)1558 static int m_show(struct seq_file *m, void *v)
1559 {
1560 struct proc_mounts *p = m->private;
1561 struct mount *r = v;
1562 return p->show(m, &r->mnt);
1563 }
1564
1565 const struct seq_operations mounts_op = {
1566 .start = m_start,
1567 .next = m_next,
1568 .stop = m_stop,
1569 .show = m_show,
1570 };
1571
1572 #endif /* CONFIG_PROC_FS */
1573
1574 /**
1575 * may_umount_tree - check if a mount tree is busy
1576 * @m: root of mount tree
1577 *
1578 * This is called to check if a tree of mounts has any
1579 * open files, pwds, chroots or sub mounts that are
1580 * busy.
1581 */
may_umount_tree(struct vfsmount * m)1582 int may_umount_tree(struct vfsmount *m)
1583 {
1584 struct mount *mnt = real_mount(m);
1585 bool busy = false;
1586
1587 /* write lock needed for mnt_get_count */
1588 lock_mount_hash();
1589 for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) {
1590 if (mnt_get_count(p) > (p == mnt ? 2 : 1)) {
1591 busy = true;
1592 break;
1593 }
1594 }
1595 unlock_mount_hash();
1596
1597 return !busy;
1598 }
1599
1600 EXPORT_SYMBOL(may_umount_tree);
1601
1602 /**
1603 * may_umount - check if a mount point is busy
1604 * @mnt: root of mount
1605 *
1606 * This is called to check if a mount point has any
1607 * open files, pwds, chroots or sub mounts. If the
1608 * mount has sub mounts this will return busy
1609 * regardless of whether the sub mounts are busy.
1610 *
1611 * Doesn't take quota and stuff into account. IOW, in some cases it will
1612 * give false negatives. The main reason why it's here is that we need
1613 * a non-destructive way to look for easily umountable filesystems.
1614 */
may_umount(struct vfsmount * mnt)1615 int may_umount(struct vfsmount *mnt)
1616 {
1617 int ret = 1;
1618 down_read(&namespace_sem);
1619 lock_mount_hash();
1620 if (propagate_mount_busy(real_mount(mnt), 2))
1621 ret = 0;
1622 unlock_mount_hash();
1623 up_read(&namespace_sem);
1624 return ret;
1625 }
1626
1627 EXPORT_SYMBOL(may_umount);
1628
1629 #ifdef CONFIG_FSNOTIFY
mnt_notify(struct mount * p)1630 static void mnt_notify(struct mount *p)
1631 {
1632 if (!p->prev_ns && p->mnt_ns) {
1633 fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
1634 } else if (p->prev_ns && !p->mnt_ns) {
1635 fsnotify_mnt_detach(p->prev_ns, &p->mnt);
1636 } else if (p->prev_ns == p->mnt_ns) {
1637 fsnotify_mnt_move(p->mnt_ns, &p->mnt);
1638 } else {
1639 fsnotify_mnt_detach(p->prev_ns, &p->mnt);
1640 fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
1641 }
1642 p->prev_ns = p->mnt_ns;
1643 }
1644
notify_mnt_list(void)1645 static void notify_mnt_list(void)
1646 {
1647 struct mount *m, *tmp;
1648 /*
1649 * Notify about mounts that were added/reparented/detached/remain
1650 * connected after unmount.
1651 */
1652 list_for_each_entry_safe(m, tmp, ¬ify_list, to_notify) {
1653 mnt_notify(m);
1654 list_del_init(&m->to_notify);
1655 }
1656 }
1657
need_notify_mnt_list(void)1658 static bool need_notify_mnt_list(void)
1659 {
1660 return !list_empty(¬ify_list);
1661 }
1662 #else
notify_mnt_list(void)1663 static void notify_mnt_list(void)
1664 {
1665 }
1666
need_notify_mnt_list(void)1667 static bool need_notify_mnt_list(void)
1668 {
1669 return false;
1670 }
1671 #endif
1672
1673 static void free_mnt_ns(struct mnt_namespace *);
namespace_unlock(void)1674 static void namespace_unlock(void)
1675 {
1676 struct hlist_head head;
1677 struct hlist_node *p;
1678 struct mount *m;
1679 struct mnt_namespace *ns = emptied_ns;
1680 LIST_HEAD(list);
1681
1682 hlist_move_list(&unmounted, &head);
1683 list_splice_init(&ex_mountpoints, &list);
1684 emptied_ns = NULL;
1685
1686 if (need_notify_mnt_list()) {
1687 /*
1688 * No point blocking out concurrent readers while notifications
1689 * are sent. This will also allow statmount()/listmount() to run
1690 * concurrently.
1691 */
1692 downgrade_write(&namespace_sem);
1693 notify_mnt_list();
1694 up_read(&namespace_sem);
1695 } else {
1696 up_write(&namespace_sem);
1697 }
1698 if (unlikely(ns)) {
1699 /* Make sure we notice when we leak mounts. */
1700 VFS_WARN_ON_ONCE(!mnt_ns_empty(ns));
1701 free_mnt_ns(ns);
1702 }
1703
1704 shrink_dentry_list(&list);
1705
1706 if (likely(hlist_empty(&head)))
1707 return;
1708
1709 synchronize_rcu_expedited();
1710
1711 hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1712 hlist_del(&m->mnt_umount);
1713 mntput(&m->mnt);
1714 }
1715 }
1716
namespace_lock(void)1717 static inline void namespace_lock(void)
1718 {
1719 down_write(&namespace_sem);
1720 }
1721
1722 enum umount_tree_flags {
1723 UMOUNT_SYNC = 1,
1724 UMOUNT_PROPAGATE = 2,
1725 UMOUNT_CONNECTED = 4,
1726 };
1727
disconnect_mount(struct mount * mnt,enum umount_tree_flags how)1728 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1729 {
1730 /* Leaving mounts connected is only valid for lazy umounts */
1731 if (how & UMOUNT_SYNC)
1732 return true;
1733
1734 /* A mount without a parent has nothing to be connected to */
1735 if (!mnt_has_parent(mnt))
1736 return true;
1737
1738 /* Because the reference counting rules change when mounts are
1739 * unmounted and connected, umounted mounts may not be
1740 * connected to mounted mounts.
1741 */
1742 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1743 return true;
1744
1745 /* Has it been requested that the mount remain connected? */
1746 if (how & UMOUNT_CONNECTED)
1747 return false;
1748
1749 /* Is the mount locked such that it needs to remain connected? */
1750 if (IS_MNT_LOCKED(mnt))
1751 return false;
1752
1753 /* By default disconnect the mount */
1754 return true;
1755 }
1756
1757 /*
1758 * mount_lock must be held
1759 * namespace_sem must be held for write
1760 */
umount_tree(struct mount * mnt,enum umount_tree_flags how)1761 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1762 {
1763 LIST_HEAD(tmp_list);
1764 struct mount *p;
1765
1766 if (how & UMOUNT_PROPAGATE)
1767 propagate_mount_unlock(mnt);
1768
1769 /* Gather the mounts to umount */
1770 for (p = mnt; p; p = next_mnt(p, mnt)) {
1771 p->mnt.mnt_flags |= MNT_UMOUNT;
1772 if (mnt_ns_attached(p))
1773 move_from_ns(p);
1774 list_add_tail(&p->mnt_list, &tmp_list);
1775 }
1776
1777 /* Hide the mounts from mnt_mounts */
1778 list_for_each_entry(p, &tmp_list, mnt_list) {
1779 list_del_init(&p->mnt_child);
1780 }
1781
1782 /* Add propagated mounts to the tmp_list */
1783 if (how & UMOUNT_PROPAGATE)
1784 propagate_umount(&tmp_list);
1785
1786 bulk_make_private(&tmp_list);
1787
1788 while (!list_empty(&tmp_list)) {
1789 struct mnt_namespace *ns;
1790 bool disconnect;
1791 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1792 list_del_init(&p->mnt_expire);
1793 list_del_init(&p->mnt_list);
1794 ns = p->mnt_ns;
1795 if (ns) {
1796 ns->nr_mounts--;
1797 __touch_mnt_namespace(ns);
1798 }
1799 p->mnt_ns = NULL;
1800 if (how & UMOUNT_SYNC)
1801 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1802
1803 disconnect = disconnect_mount(p, how);
1804 if (mnt_has_parent(p)) {
1805 if (!disconnect) {
1806 /* Don't forget about p */
1807 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1808 } else {
1809 umount_mnt(p);
1810 }
1811 }
1812 if (disconnect)
1813 hlist_add_head(&p->mnt_umount, &unmounted);
1814
1815 /*
1816 * At this point p->mnt_ns is NULL, notification will be queued
1817 * only if
1818 *
1819 * - p->prev_ns is non-NULL *and*
1820 * - p->prev_ns->n_fsnotify_marks is non-NULL
1821 *
1822 * This will preclude queuing the mount if this is a cleanup
1823 * after a failed copy_tree() or destruction of an anonymous
1824 * namespace, etc.
1825 */
1826 mnt_notify_add(p);
1827 }
1828 }
1829
1830 static void shrink_submounts(struct mount *mnt);
1831
do_umount_root(struct super_block * sb)1832 static int do_umount_root(struct super_block *sb)
1833 {
1834 int ret = 0;
1835
1836 down_write(&sb->s_umount);
1837 if (!sb_rdonly(sb)) {
1838 struct fs_context *fc;
1839
1840 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1841 SB_RDONLY);
1842 if (IS_ERR(fc)) {
1843 ret = PTR_ERR(fc);
1844 } else {
1845 ret = parse_monolithic_mount_data(fc, NULL);
1846 if (!ret)
1847 ret = reconfigure_super(fc);
1848 put_fs_context(fc);
1849 }
1850 }
1851 up_write(&sb->s_umount);
1852 return ret;
1853 }
1854
do_umount(struct mount * mnt,int flags)1855 static int do_umount(struct mount *mnt, int flags)
1856 {
1857 struct super_block *sb = mnt->mnt.mnt_sb;
1858 int retval;
1859
1860 retval = security_sb_umount(&mnt->mnt, flags);
1861 if (retval)
1862 return retval;
1863
1864 /*
1865 * Allow userspace to request a mountpoint be expired rather than
1866 * unmounting unconditionally. Unmount only happens if:
1867 * (1) the mark is already set (the mark is cleared by mntput())
1868 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1869 */
1870 if (flags & MNT_EXPIRE) {
1871 if (&mnt->mnt == current->fs->root.mnt ||
1872 flags & (MNT_FORCE | MNT_DETACH))
1873 return -EINVAL;
1874
1875 /*
1876 * probably don't strictly need the lock here if we examined
1877 * all race cases, but it's a slowpath.
1878 */
1879 lock_mount_hash();
1880 if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) {
1881 unlock_mount_hash();
1882 return -EBUSY;
1883 }
1884 unlock_mount_hash();
1885
1886 if (!xchg(&mnt->mnt_expiry_mark, 1))
1887 return -EAGAIN;
1888 }
1889
1890 /*
1891 * If we may have to abort operations to get out of this
1892 * mount, and they will themselves hold resources we must
1893 * allow the fs to do things. In the Unix tradition of
1894 * 'Gee thats tricky lets do it in userspace' the umount_begin
1895 * might fail to complete on the first run through as other tasks
1896 * must return, and the like. Thats for the mount program to worry
1897 * about for the moment.
1898 */
1899
1900 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1901 sb->s_op->umount_begin(sb);
1902 }
1903
1904 /*
1905 * No sense to grab the lock for this test, but test itself looks
1906 * somewhat bogus. Suggestions for better replacement?
1907 * Ho-hum... In principle, we might treat that as umount + switch
1908 * to rootfs. GC would eventually take care of the old vfsmount.
1909 * Actually it makes sense, especially if rootfs would contain a
1910 * /reboot - static binary that would close all descriptors and
1911 * call reboot(9). Then init(8) could umount root and exec /reboot.
1912 */
1913 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1914 /*
1915 * Special case for "unmounting" root ...
1916 * we just try to remount it readonly.
1917 */
1918 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1919 return -EPERM;
1920 return do_umount_root(sb);
1921 }
1922
1923 namespace_lock();
1924 lock_mount_hash();
1925
1926 /* Repeat the earlier racy checks, now that we are holding the locks */
1927 retval = -EINVAL;
1928 if (!check_mnt(mnt))
1929 goto out;
1930
1931 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1932 goto out;
1933
1934 if (!mnt_has_parent(mnt)) /* not the absolute root */
1935 goto out;
1936
1937 event++;
1938 if (flags & MNT_DETACH) {
1939 umount_tree(mnt, UMOUNT_PROPAGATE);
1940 retval = 0;
1941 } else {
1942 smp_mb(); // paired with __legitimize_mnt()
1943 shrink_submounts(mnt);
1944 retval = -EBUSY;
1945 if (!propagate_mount_busy(mnt, 2)) {
1946 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1947 retval = 0;
1948 }
1949 }
1950 out:
1951 unlock_mount_hash();
1952 namespace_unlock();
1953 return retval;
1954 }
1955
1956 /*
1957 * __detach_mounts - lazily unmount all mounts on the specified dentry
1958 *
1959 * During unlink, rmdir, and d_drop it is possible to loose the path
1960 * to an existing mountpoint, and wind up leaking the mount.
1961 * detach_mounts allows lazily unmounting those mounts instead of
1962 * leaking them.
1963 *
1964 * The caller may hold dentry->d_inode->i_rwsem.
1965 */
__detach_mounts(struct dentry * dentry)1966 void __detach_mounts(struct dentry *dentry)
1967 {
1968 struct pinned_mountpoint mp = {};
1969 struct mount *mnt;
1970
1971 guard(namespace_excl)();
1972 guard(mount_writer)();
1973
1974 if (!lookup_mountpoint(dentry, &mp))
1975 return;
1976
1977 event++;
1978 while (mp.node.next) {
1979 mnt = hlist_entry(mp.node.next, struct mount, mnt_mp_list);
1980 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1981 umount_mnt(mnt);
1982 hlist_add_head(&mnt->mnt_umount, &unmounted);
1983 }
1984 else umount_tree(mnt, UMOUNT_CONNECTED);
1985 }
1986 unpin_mountpoint(&mp);
1987 }
1988
1989 /*
1990 * Is the caller allowed to modify his namespace?
1991 */
may_mount(void)1992 bool may_mount(void)
1993 {
1994 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1995 }
1996
warn_mandlock(void)1997 static void warn_mandlock(void)
1998 {
1999 pr_warn_once("=======================================================\n"
2000 "WARNING: The mand mount option has been deprecated and\n"
2001 " and is ignored by this kernel. Remove the mand\n"
2002 " option from the mount to silence this warning.\n"
2003 "=======================================================\n");
2004 }
2005
can_umount(const struct path * path,int flags)2006 static int can_umount(const struct path *path, int flags)
2007 {
2008 struct mount *mnt = real_mount(path->mnt);
2009 struct super_block *sb = path->dentry->d_sb;
2010
2011 if (!may_mount())
2012 return -EPERM;
2013 if (!path_mounted(path))
2014 return -EINVAL;
2015 if (!check_mnt(mnt))
2016 return -EINVAL;
2017 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
2018 return -EINVAL;
2019 if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
2020 return -EPERM;
2021 return 0;
2022 }
2023
2024 // caller is responsible for flags being sane
path_umount(const struct path * path,int flags)2025 int path_umount(const struct path *path, int flags)
2026 {
2027 struct mount *mnt = real_mount(path->mnt);
2028 int ret;
2029
2030 ret = can_umount(path, flags);
2031 if (!ret)
2032 ret = do_umount(mnt, flags);
2033
2034 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
2035 dput(path->dentry);
2036 mntput_no_expire(mnt);
2037 return ret;
2038 }
2039
ksys_umount(char __user * name,int flags)2040 static int ksys_umount(char __user *name, int flags)
2041 {
2042 int lookup_flags = LOOKUP_MOUNTPOINT;
2043 struct path path;
2044 int ret;
2045
2046 // basic validity checks done first
2047 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
2048 return -EINVAL;
2049
2050 if (!(flags & UMOUNT_NOFOLLOW))
2051 lookup_flags |= LOOKUP_FOLLOW;
2052 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
2053 if (ret)
2054 return ret;
2055 return path_umount(&path, flags);
2056 }
2057
SYSCALL_DEFINE2(umount,char __user *,name,int,flags)2058 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
2059 {
2060 return ksys_umount(name, flags);
2061 }
2062
2063 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
2064
2065 /*
2066 * The 2.0 compatible umount. No flags.
2067 */
SYSCALL_DEFINE1(oldumount,char __user *,name)2068 SYSCALL_DEFINE1(oldumount, char __user *, name)
2069 {
2070 return ksys_umount(name, 0);
2071 }
2072
2073 #endif
2074
is_mnt_ns_file(struct dentry * dentry)2075 static bool is_mnt_ns_file(struct dentry *dentry)
2076 {
2077 struct ns_common *ns;
2078
2079 /* Is this a proxy for a mount namespace? */
2080 if (dentry->d_op != &ns_dentry_operations)
2081 return false;
2082
2083 ns = d_inode(dentry)->i_private;
2084
2085 return ns->ops == &mntns_operations;
2086 }
2087
from_mnt_ns(struct mnt_namespace * mnt)2088 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
2089 {
2090 return &mnt->ns;
2091 }
2092
get_sequential_mnt_ns(struct mnt_namespace * mntns,bool previous)2093 struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous)
2094 {
2095 struct ns_common *ns;
2096
2097 guard(rcu)();
2098
2099 for (;;) {
2100 ns = ns_tree_adjoined_rcu(mntns, previous);
2101 if (IS_ERR(ns))
2102 return ERR_CAST(ns);
2103
2104 mntns = to_mnt_ns(ns);
2105
2106 /*
2107 * The last passive reference count is put with RCU
2108 * delay so accessing the mount namespace is not just
2109 * safe but all relevant members are still valid.
2110 */
2111 if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN))
2112 continue;
2113
2114 /*
2115 * We need an active reference count as we're persisting
2116 * the mount namespace and it might already be on its
2117 * deathbed.
2118 */
2119 if (!ns_ref_get(mntns))
2120 continue;
2121
2122 return mntns;
2123 }
2124 }
2125
mnt_ns_from_dentry(struct dentry * dentry)2126 struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry)
2127 {
2128 if (!is_mnt_ns_file(dentry))
2129 return NULL;
2130
2131 return to_mnt_ns(get_proc_ns(dentry->d_inode));
2132 }
2133
mnt_ns_loop(struct dentry * dentry)2134 static bool mnt_ns_loop(struct dentry *dentry)
2135 {
2136 /* Could bind mounting the mount namespace inode cause a
2137 * mount namespace loop?
2138 */
2139 struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry);
2140
2141 if (!mnt_ns)
2142 return false;
2143
2144 return current->nsproxy->mnt_ns->ns.ns_id >= mnt_ns->ns.ns_id;
2145 }
2146
copy_tree(struct mount * src_root,struct dentry * dentry,int flag)2147 struct mount *copy_tree(struct mount *src_root, struct dentry *dentry,
2148 int flag)
2149 {
2150 struct mount *res, *src_parent, *src_root_child, *src_mnt,
2151 *dst_parent, *dst_mnt;
2152
2153 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(src_root))
2154 return ERR_PTR(-EINVAL);
2155
2156 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
2157 return ERR_PTR(-EINVAL);
2158
2159 res = dst_mnt = clone_mnt(src_root, dentry, flag);
2160 if (IS_ERR(dst_mnt))
2161 return dst_mnt;
2162
2163 src_parent = src_root;
2164
2165 list_for_each_entry(src_root_child, &src_root->mnt_mounts, mnt_child) {
2166 if (!is_subdir(src_root_child->mnt_mountpoint, dentry))
2167 continue;
2168
2169 for (src_mnt = src_root_child; src_mnt;
2170 src_mnt = next_mnt(src_mnt, src_root_child)) {
2171 if (!(flag & CL_COPY_UNBINDABLE) &&
2172 IS_MNT_UNBINDABLE(src_mnt)) {
2173 if (src_mnt->mnt.mnt_flags & MNT_LOCKED) {
2174 /* Both unbindable and locked. */
2175 dst_mnt = ERR_PTR(-EPERM);
2176 goto out;
2177 } else {
2178 src_mnt = skip_mnt_tree(src_mnt);
2179 continue;
2180 }
2181 }
2182 if (!(flag & CL_COPY_MNT_NS_FILE) &&
2183 is_mnt_ns_file(src_mnt->mnt.mnt_root)) {
2184 src_mnt = skip_mnt_tree(src_mnt);
2185 continue;
2186 }
2187 while (src_parent != src_mnt->mnt_parent) {
2188 src_parent = src_parent->mnt_parent;
2189 dst_mnt = dst_mnt->mnt_parent;
2190 }
2191
2192 src_parent = src_mnt;
2193 dst_parent = dst_mnt;
2194 dst_mnt = clone_mnt(src_mnt, src_mnt->mnt.mnt_root, flag);
2195 if (IS_ERR(dst_mnt))
2196 goto out;
2197 lock_mount_hash();
2198 if (src_mnt->mnt.mnt_flags & MNT_LOCKED)
2199 dst_mnt->mnt.mnt_flags |= MNT_LOCKED;
2200 if (unlikely(flag & CL_EXPIRE)) {
2201 /* stick the duplicate mount on the same expiry
2202 * list as the original if that was on one */
2203 if (!list_empty(&src_mnt->mnt_expire))
2204 list_add(&dst_mnt->mnt_expire,
2205 &src_mnt->mnt_expire);
2206 }
2207 attach_mnt(dst_mnt, dst_parent, src_parent->mnt_mp);
2208 unlock_mount_hash();
2209 }
2210 }
2211 return res;
2212
2213 out:
2214 if (res) {
2215 lock_mount_hash();
2216 umount_tree(res, UMOUNT_SYNC);
2217 unlock_mount_hash();
2218 }
2219 return dst_mnt;
2220 }
2221
extend_array(struct path ** res,struct path ** to_free,unsigned n,unsigned * count,unsigned new_count)2222 static inline bool extend_array(struct path **res, struct path **to_free,
2223 unsigned n, unsigned *count, unsigned new_count)
2224 {
2225 struct path *p;
2226
2227 if (likely(n < *count))
2228 return true;
2229 p = kmalloc_objs(struct path, new_count);
2230 if (p && *count)
2231 memcpy(p, *res, *count * sizeof(struct path));
2232 *count = new_count;
2233 kfree(*to_free);
2234 *to_free = *res = p;
2235 return p;
2236 }
2237
collect_paths(const struct path * path,struct path * prealloc,unsigned count)2238 const struct path *collect_paths(const struct path *path,
2239 struct path *prealloc, unsigned count)
2240 {
2241 struct mount *root = real_mount(path->mnt);
2242 struct mount *child;
2243 struct path *res = prealloc, *to_free = NULL;
2244 unsigned n = 0;
2245
2246 guard(namespace_shared)();
2247
2248 if (!check_mnt(root))
2249 return ERR_PTR(-EINVAL);
2250 if (!extend_array(&res, &to_free, 0, &count, 32))
2251 return ERR_PTR(-ENOMEM);
2252 res[n++] = *path;
2253 list_for_each_entry(child, &root->mnt_mounts, mnt_child) {
2254 if (!is_subdir(child->mnt_mountpoint, path->dentry))
2255 continue;
2256 for (struct mount *m = child; m; m = next_mnt(m, child)) {
2257 if (!extend_array(&res, &to_free, n, &count, 2 * count))
2258 return ERR_PTR(-ENOMEM);
2259 res[n].mnt = &m->mnt;
2260 res[n].dentry = m->mnt.mnt_root;
2261 n++;
2262 }
2263 }
2264 if (!extend_array(&res, &to_free, n, &count, count + 1))
2265 return ERR_PTR(-ENOMEM);
2266 memset(res + n, 0, (count - n) * sizeof(struct path));
2267 for (struct path *p = res; p->mnt; p++)
2268 path_get(p);
2269 return res;
2270 }
2271
drop_collected_paths(const struct path * paths,const struct path * prealloc)2272 void drop_collected_paths(const struct path *paths, const struct path *prealloc)
2273 {
2274 for (const struct path *p = paths; p->mnt; p++)
2275 path_put(p);
2276 if (paths != prealloc)
2277 kfree(paths);
2278 }
2279
2280 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
2281
dissolve_on_fput(struct vfsmount * mnt)2282 void dissolve_on_fput(struct vfsmount *mnt)
2283 {
2284 struct mount *m = real_mount(mnt);
2285
2286 /*
2287 * m used to be the root of anon namespace; if it still is one,
2288 * we need to dissolve the mount tree and free that namespace.
2289 * Let's try to avoid taking namespace_sem if we can determine
2290 * that there's nothing to do without it - rcu_read_lock() is
2291 * enough to make anon_ns_root() memory-safe and once m has
2292 * left its namespace, it's no longer our concern, since it will
2293 * never become a root of anon ns again.
2294 */
2295
2296 scoped_guard(rcu) {
2297 if (!anon_ns_root(m))
2298 return;
2299 }
2300
2301 scoped_guard(namespace_excl) {
2302 if (!anon_ns_root(m))
2303 return;
2304
2305 emptied_ns = m->mnt_ns;
2306 lock_mount_hash();
2307 umount_tree(m, UMOUNT_CONNECTED);
2308 unlock_mount_hash();
2309 }
2310 }
2311
2312 /* locks: namespace_shared && pinned(mnt) || mount_locked_reader */
__has_locked_children(struct mount * mnt,struct dentry * dentry)2313 static bool __has_locked_children(struct mount *mnt, struct dentry *dentry)
2314 {
2315 struct mount *child;
2316
2317 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2318 if (!is_subdir(child->mnt_mountpoint, dentry))
2319 continue;
2320
2321 if (child->mnt.mnt_flags & MNT_LOCKED)
2322 return true;
2323 }
2324 return false;
2325 }
2326
has_locked_children(struct mount * mnt,struct dentry * dentry)2327 bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2328 {
2329 guard(mount_locked_reader)();
2330 return __has_locked_children(mnt, dentry);
2331 }
2332
2333 /*
2334 * Check that there aren't references to earlier/same mount namespaces in the
2335 * specified subtree. Such references can act as pins for mount namespaces
2336 * that aren't checked by the mount-cycle checking code, thereby allowing
2337 * cycles to be made.
2338 *
2339 * locks: mount_locked_reader || namespace_shared && pinned(subtree)
2340 */
check_for_nsfs_mounts(struct mount * subtree)2341 static bool check_for_nsfs_mounts(struct mount *subtree)
2342 {
2343 for (struct mount *p = subtree; p; p = next_mnt(p, subtree))
2344 if (mnt_ns_loop(p->mnt.mnt_root))
2345 return false;
2346 return true;
2347 }
2348
2349 /**
2350 * clone_private_mount - create a private clone of a path
2351 * @path: path to clone
2352 *
2353 * This creates a new vfsmount, which will be the clone of @path. The new mount
2354 * will not be attached anywhere in the namespace and will be private (i.e.
2355 * changes to the originating mount won't be propagated into this).
2356 *
2357 * This assumes caller has called or done the equivalent of may_mount().
2358 *
2359 * Release with mntput().
2360 */
clone_private_mount(const struct path * path)2361 struct vfsmount *clone_private_mount(const struct path *path)
2362 {
2363 struct mount *old_mnt = real_mount(path->mnt);
2364 struct mount *new_mnt;
2365
2366 guard(namespace_shared)();
2367
2368 if (IS_MNT_UNBINDABLE(old_mnt))
2369 return ERR_PTR(-EINVAL);
2370
2371 /*
2372 * Make sure the source mount is acceptable.
2373 * Anything mounted in our mount namespace is allowed.
2374 * Otherwise, it must be the root of an anonymous mount
2375 * namespace, and we need to make sure no namespace
2376 * loops get created.
2377 */
2378 if (!check_mnt(old_mnt)) {
2379 if (!anon_ns_root(old_mnt))
2380 return ERR_PTR(-EINVAL);
2381
2382 if (!check_for_nsfs_mounts(old_mnt))
2383 return ERR_PTR(-EINVAL);
2384 }
2385
2386 if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
2387 return ERR_PTR(-EPERM);
2388
2389 if (__has_locked_children(old_mnt, path->dentry))
2390 return ERR_PTR(-EINVAL);
2391
2392 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
2393 if (IS_ERR(new_mnt))
2394 return ERR_PTR(-EINVAL);
2395
2396 /* Longterm mount to be removed by kern_unmount*() */
2397 new_mnt->mnt_ns = MNT_NS_INTERNAL;
2398 return &new_mnt->mnt;
2399 }
2400 EXPORT_SYMBOL_GPL(clone_private_mount);
2401
lock_mnt_tree(struct mount * mnt)2402 static void lock_mnt_tree(struct mount *mnt)
2403 {
2404 struct mount *p;
2405
2406 for (p = mnt; p; p = next_mnt(p, mnt)) {
2407 int flags = p->mnt.mnt_flags;
2408 /* Don't allow unprivileged users to change mount flags */
2409 flags |= MNT_LOCK_ATIME;
2410
2411 if (flags & MNT_READONLY)
2412 flags |= MNT_LOCK_READONLY;
2413
2414 if (flags & MNT_NODEV)
2415 flags |= MNT_LOCK_NODEV;
2416
2417 if (flags & MNT_NOSUID)
2418 flags |= MNT_LOCK_NOSUID;
2419
2420 if (flags & MNT_NOEXEC)
2421 flags |= MNT_LOCK_NOEXEC;
2422 /* Don't allow unprivileged users to reveal what is under a mount */
2423 if (list_empty(&p->mnt_expire) && p != mnt)
2424 flags |= MNT_LOCKED;
2425 p->mnt.mnt_flags = flags;
2426 }
2427 }
2428
cleanup_group_ids(struct mount * mnt,struct mount * end)2429 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2430 {
2431 struct mount *p;
2432
2433 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2434 if (p->mnt_group_id && !IS_MNT_SHARED(p))
2435 mnt_release_group_id(p);
2436 }
2437 }
2438
invent_group_ids(struct mount * mnt,bool recurse)2439 static int invent_group_ids(struct mount *mnt, bool recurse)
2440 {
2441 struct mount *p;
2442
2443 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2444 if (!p->mnt_group_id) {
2445 int err = mnt_alloc_group_id(p);
2446 if (err) {
2447 cleanup_group_ids(mnt, p);
2448 return err;
2449 }
2450 }
2451 }
2452
2453 return 0;
2454 }
2455
count_mounts(struct mnt_namespace * ns,struct mount * mnt)2456 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2457 {
2458 unsigned int max = READ_ONCE(sysctl_mount_max);
2459 unsigned int mounts = 0;
2460 struct mount *p;
2461
2462 if (ns->nr_mounts >= max)
2463 return -ENOSPC;
2464 max -= ns->nr_mounts;
2465 if (ns->pending_mounts >= max)
2466 return -ENOSPC;
2467 max -= ns->pending_mounts;
2468
2469 for (p = mnt; p; p = next_mnt(p, mnt))
2470 mounts++;
2471
2472 if (mounts > max)
2473 return -ENOSPC;
2474
2475 ns->pending_mounts += mounts;
2476 return 0;
2477 }
2478
2479 enum mnt_tree_flags_t {
2480 MNT_TREE_BENEATH = BIT(0),
2481 MNT_TREE_PROPAGATION = BIT(1),
2482 };
2483
2484 /**
2485 * attach_recursive_mnt - attach a source mount tree
2486 * @source_mnt: mount tree to be attached
2487 * @dest: the context for mounting at the place where the tree should go
2488 *
2489 * NOTE: in the table below explains the semantics when a source mount
2490 * of a given type is attached to a destination mount of a given type.
2491 * ---------------------------------------------------------------------------
2492 * | BIND MOUNT OPERATION |
2493 * |**************************************************************************
2494 * | source-->| shared | private | slave | unbindable |
2495 * | dest | | | | |
2496 * | | | | | | |
2497 * | v | | | | |
2498 * |**************************************************************************
2499 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2500 * | | | | | |
2501 * |non-shared| shared (+) | private | slave (*) | invalid |
2502 * ***************************************************************************
2503 * A bind operation clones the source mount and mounts the clone on the
2504 * destination mount.
2505 *
2506 * (++) the cloned mount is propagated to all the mounts in the propagation
2507 * tree of the destination mount and the cloned mount is added to
2508 * the peer group of the source mount.
2509 * (+) the cloned mount is created under the destination mount and is marked
2510 * as shared. The cloned mount is added to the peer group of the source
2511 * mount.
2512 * (+++) the mount is propagated to all the mounts in the propagation tree
2513 * of the destination mount and the cloned mount is made slave
2514 * of the same master as that of the source mount. The cloned mount
2515 * is marked as 'shared and slave'.
2516 * (*) the cloned mount is made a slave of the same master as that of the
2517 * source mount.
2518 *
2519 * ---------------------------------------------------------------------------
2520 * | MOVE MOUNT OPERATION |
2521 * |**************************************************************************
2522 * | source-->| shared | private | slave | unbindable |
2523 * | dest | | | | |
2524 * | | | | | | |
2525 * | v | | | | |
2526 * |**************************************************************************
2527 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2528 * | | | | | |
2529 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2530 * ***************************************************************************
2531 *
2532 * (+) the mount is moved to the destination. And is then propagated to
2533 * all the mounts in the propagation tree of the destination mount.
2534 * (+*) the mount is moved to the destination.
2535 * (+++) the mount is moved to the destination and is then propagated to
2536 * all the mounts belonging to the destination mount's propagation tree.
2537 * the mount is marked as 'shared and slave'.
2538 * (*) the mount continues to be a slave at the new location.
2539 *
2540 * if the source mount is a tree, the operations explained above is
2541 * applied to each mount in the tree.
2542 * Must be called without spinlocks held, since this function can sleep
2543 * in allocations.
2544 *
2545 * Context: The function expects namespace_lock() to be held.
2546 * Return: If @source_mnt was successfully attached 0 is returned.
2547 * Otherwise a negative error code is returned.
2548 */
attach_recursive_mnt(struct mount * source_mnt,const struct pinned_mountpoint * dest)2549 static int attach_recursive_mnt(struct mount *source_mnt,
2550 const struct pinned_mountpoint *dest)
2551 {
2552 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2553 struct mount *dest_mnt = dest->parent;
2554 struct mountpoint *dest_mp = dest->mp;
2555 HLIST_HEAD(tree_list);
2556 struct mnt_namespace *ns = dest_mnt->mnt_ns;
2557 struct pinned_mountpoint root = {};
2558 struct mountpoint *shorter = NULL;
2559 struct mount *child, *p;
2560 struct mount *top;
2561 struct hlist_node *n;
2562 int err = 0;
2563 bool moving = mnt_has_parent(source_mnt);
2564
2565 /*
2566 * Preallocate a mountpoint in case the new mounts need to be
2567 * mounted beneath mounts on the same mountpoint.
2568 */
2569 for (top = source_mnt; unlikely(top->overmount); top = top->overmount) {
2570 if (!shorter && is_mnt_ns_file(top->mnt.mnt_root))
2571 shorter = top->mnt_mp;
2572 }
2573 err = get_mountpoint(top->mnt.mnt_root, &root);
2574 if (err)
2575 return err;
2576
2577 /* Is there space to add these mounts to the mount namespace? */
2578 if (!moving) {
2579 err = count_mounts(ns, source_mnt);
2580 if (err)
2581 goto out;
2582 }
2583
2584 if (IS_MNT_SHARED(dest_mnt)) {
2585 err = invent_group_ids(source_mnt, true);
2586 if (err)
2587 goto out;
2588 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2589 }
2590 lock_mount_hash();
2591 if (err)
2592 goto out_cleanup_ids;
2593
2594 if (IS_MNT_SHARED(dest_mnt)) {
2595 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2596 set_mnt_shared(p);
2597 }
2598
2599 if (moving) {
2600 umount_mnt(source_mnt);
2601 mnt_notify_add(source_mnt);
2602 /* if the mount is moved, it should no longer be expired
2603 * automatically */
2604 list_del_init(&source_mnt->mnt_expire);
2605 } else {
2606 if (source_mnt->mnt_ns) {
2607 /* move from anon - the caller will destroy */
2608 emptied_ns = source_mnt->mnt_ns;
2609 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2610 move_from_ns(p);
2611 }
2612 }
2613
2614 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2615 /*
2616 * Now the original copy is in the same state as the secondaries -
2617 * its root attached to mountpoint, but not hashed and all mounts
2618 * in it are either in our namespace or in no namespace at all.
2619 * Add the original to the list of copies and deal with the
2620 * rest of work for all of them uniformly.
2621 */
2622 hlist_add_head(&source_mnt->mnt_hash, &tree_list);
2623
2624 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2625 struct mount *q;
2626 hlist_del_init(&child->mnt_hash);
2627 /* Notice when we are propagating across user namespaces */
2628 if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2629 lock_mnt_tree(child);
2630 q = __lookup_mnt(&child->mnt_parent->mnt,
2631 child->mnt_mountpoint);
2632 commit_tree(child);
2633 if (q) {
2634 struct mount *r = topmost_overmount(child);
2635 struct mountpoint *mp = root.mp;
2636
2637 if (unlikely(shorter) && child != source_mnt)
2638 mp = shorter;
2639 mnt_change_mountpoint(r, mp, q);
2640 }
2641 }
2642 unpin_mountpoint(&root);
2643 unlock_mount_hash();
2644
2645 return 0;
2646
2647 out_cleanup_ids:
2648 while (!hlist_empty(&tree_list)) {
2649 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2650 child->mnt_parent->mnt_ns->pending_mounts = 0;
2651 umount_tree(child, UMOUNT_SYNC);
2652 }
2653 unlock_mount_hash();
2654 cleanup_group_ids(source_mnt, NULL);
2655 out:
2656 ns->pending_mounts = 0;
2657
2658 read_seqlock_excl(&mount_lock);
2659 unpin_mountpoint(&root);
2660 read_sequnlock_excl(&mount_lock);
2661
2662 return err;
2663 }
2664
where_to_mount(const struct path * path,struct dentry ** dentry,bool beneath)2665 static inline struct mount *where_to_mount(const struct path *path,
2666 struct dentry **dentry,
2667 bool beneath)
2668 {
2669 struct mount *m;
2670
2671 if (unlikely(beneath)) {
2672 m = topmost_overmount(real_mount(path->mnt));
2673 *dentry = m->mnt_mountpoint;
2674 return m->mnt_parent;
2675 }
2676 m = __lookup_mnt(path->mnt, path->dentry);
2677 if (unlikely(m)) {
2678 m = topmost_overmount(m);
2679 *dentry = m->mnt.mnt_root;
2680 return m;
2681 }
2682 *dentry = path->dentry;
2683 return real_mount(path->mnt);
2684 }
2685
2686 /**
2687 * do_lock_mount - acquire environment for mounting
2688 * @path: target path
2689 * @res: context to set up
2690 * @beneath: whether the intention is to mount beneath @path
2691 *
2692 * To mount something at given location, we need
2693 * namespace_sem locked exclusive
2694 * inode of dentry we are mounting on locked exclusive
2695 * struct mountpoint for that dentry
2696 * struct mount we are mounting on
2697 *
2698 * Results are stored in caller-supplied context (pinned_mountpoint);
2699 * on success we have res->parent and res->mp pointing to parent and
2700 * mountpoint respectively and res->node inserted into the ->m_list
2701 * of the mountpoint, making sure the mountpoint won't disappear.
2702 * On failure we have res->parent set to ERR_PTR(-E...), res->mp
2703 * left NULL, res->node - empty.
2704 * In case of success do_lock_mount returns with locks acquired (in
2705 * proper order - inode lock nests outside of namespace_sem).
2706 *
2707 * Request to mount on overmounted location is treated as "mount on
2708 * top of whatever's overmounting it"; request to mount beneath
2709 * a location - "mount immediately beneath the topmost mount at that
2710 * place".
2711 *
2712 * In all cases the location must not have been unmounted and the
2713 * chosen mountpoint must be allowed to be mounted on. For "beneath"
2714 * case we also require the location to be at the root of a mount
2715 * that has a parent (i.e. is not a root of some namespace).
2716 */
do_lock_mount(const struct path * path,struct pinned_mountpoint * res,bool beneath)2717 static void do_lock_mount(const struct path *path,
2718 struct pinned_mountpoint *res,
2719 bool beneath)
2720 {
2721 int err;
2722
2723 if (unlikely(beneath) && !path_mounted(path)) {
2724 res->parent = ERR_PTR(-EINVAL);
2725 return;
2726 }
2727
2728 do {
2729 struct dentry *dentry, *d;
2730 struct mount *m, *n;
2731
2732 scoped_guard(mount_locked_reader) {
2733 m = where_to_mount(path, &dentry, beneath);
2734 if (&m->mnt != path->mnt) {
2735 mntget(&m->mnt);
2736 dget(dentry);
2737 }
2738 }
2739
2740 inode_lock(dentry->d_inode);
2741 namespace_lock();
2742
2743 // check if the chain of mounts (if any) has changed.
2744 scoped_guard(mount_locked_reader)
2745 n = where_to_mount(path, &d, beneath);
2746
2747 if (unlikely(n != m || dentry != d))
2748 err = -EAGAIN; // something moved, retry
2749 else if (unlikely(cant_mount(dentry) || !is_mounted(path->mnt)))
2750 err = -ENOENT; // not to be mounted on
2751 else if (beneath && &m->mnt == path->mnt && !m->overmount)
2752 err = -EINVAL;
2753 else
2754 err = get_mountpoint(dentry, res);
2755
2756 if (unlikely(err)) {
2757 res->parent = ERR_PTR(err);
2758 namespace_unlock();
2759 inode_unlock(dentry->d_inode);
2760 } else {
2761 res->parent = m;
2762 }
2763 /*
2764 * Drop the temporary references. This is subtle - on success
2765 * we are doing that under namespace_sem, which would normally
2766 * be forbidden. However, in that case we are guaranteed that
2767 * refcounts won't reach zero, since we know that path->mnt
2768 * is mounted and thus all mounts reachable from it are pinned
2769 * and stable, along with their mountpoints and roots.
2770 */
2771 if (&m->mnt != path->mnt) {
2772 dput(dentry);
2773 mntput(&m->mnt);
2774 }
2775 } while (err == -EAGAIN);
2776 }
2777
__unlock_mount(struct pinned_mountpoint * m)2778 static void __unlock_mount(struct pinned_mountpoint *m)
2779 {
2780 inode_unlock(m->mp->m_dentry->d_inode);
2781 read_seqlock_excl(&mount_lock);
2782 unpin_mountpoint(m);
2783 read_sequnlock_excl(&mount_lock);
2784 namespace_unlock();
2785 }
2786
unlock_mount(struct pinned_mountpoint * m)2787 static inline void unlock_mount(struct pinned_mountpoint *m)
2788 {
2789 if (!IS_ERR(m->parent))
2790 __unlock_mount(m);
2791 }
2792
2793 static void lock_mount_exact(const struct path *path,
2794 struct pinned_mountpoint *mp);
2795
2796 #define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \
2797 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
2798 do_lock_mount((path), &mp, (beneath))
2799 #define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false)
2800 #define LOCK_MOUNT_EXACT(mp, path) \
2801 struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
2802 lock_mount_exact((path), &mp)
2803
graft_tree(struct mount * mnt,const struct pinned_mountpoint * mp)2804 static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp)
2805 {
2806 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2807 return -EINVAL;
2808
2809 if (d_is_dir(mp->mp->m_dentry) !=
2810 d_is_dir(mnt->mnt.mnt_root))
2811 return -ENOTDIR;
2812
2813 return attach_recursive_mnt(mnt, mp);
2814 }
2815
may_change_propagation(const struct mount * m)2816 static int may_change_propagation(const struct mount *m)
2817 {
2818 struct mnt_namespace *ns = m->mnt_ns;
2819
2820 // it must be mounted in some namespace
2821 if (IS_ERR_OR_NULL(ns)) // is_mounted()
2822 return -EINVAL;
2823 // and the caller must be admin in userns of that namespace
2824 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
2825 return -EPERM;
2826 return 0;
2827 }
2828
2829 /*
2830 * Sanity check the flags to change_mnt_propagation.
2831 */
2832
flags_to_propagation_type(int ms_flags)2833 static int flags_to_propagation_type(int ms_flags)
2834 {
2835 int type = ms_flags & ~(MS_REC | MS_SILENT);
2836
2837 /* Fail if any non-propagation flags are set */
2838 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2839 return 0;
2840 /* Only one propagation flag should be set */
2841 if (!is_power_of_2(type))
2842 return 0;
2843 return type;
2844 }
2845
2846 /*
2847 * recursively change the type of the mountpoint.
2848 */
do_change_type(const struct path * path,int ms_flags)2849 static int do_change_type(const struct path *path, int ms_flags)
2850 {
2851 struct mount *m;
2852 struct mount *mnt = real_mount(path->mnt);
2853 int recurse = ms_flags & MS_REC;
2854 int type;
2855 int err;
2856
2857 if (!path_mounted(path))
2858 return -EINVAL;
2859
2860 type = flags_to_propagation_type(ms_flags);
2861 if (!type)
2862 return -EINVAL;
2863
2864 guard(namespace_excl)();
2865
2866 err = may_change_propagation(mnt);
2867 if (err)
2868 return err;
2869
2870 if (type == MS_SHARED) {
2871 err = invent_group_ids(mnt, recurse);
2872 if (err)
2873 return err;
2874 }
2875
2876 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2877 change_mnt_propagation(m, type);
2878
2879 return 0;
2880 }
2881
2882 /* may_copy_tree() - check if a mount tree can be copied
2883 * @path: path to the mount tree to be copied
2884 *
2885 * This helper checks if the caller may copy the mount tree starting
2886 * from @path->mnt. The caller may copy the mount tree under the
2887 * following circumstances:
2888 *
2889 * (1) The caller is located in the mount namespace of the mount tree.
2890 * This also implies that the mount does not belong to an anonymous
2891 * mount namespace.
2892 * (2) The caller tries to copy an nfs mount referring to a mount
2893 * namespace, i.e., the caller is trying to copy a mount namespace
2894 * entry from nsfs.
2895 * (3) The caller tries to copy a pidfs mount referring to a pidfd.
2896 * (4) The caller is trying to copy a mount tree that belongs to an
2897 * anonymous mount namespace.
2898 *
2899 * For that to be safe, this helper enforces that the origin mount
2900 * namespace the anonymous mount namespace was created from is the
2901 * same as the caller's mount namespace by comparing the sequence
2902 * numbers.
2903 *
2904 * This is not strictly necessary. The current semantics of the new
2905 * mount api enforce that the caller must be located in the same
2906 * mount namespace as the mount tree it interacts with. Using the
2907 * origin sequence number preserves these semantics even for
2908 * anonymous mount namespaces. However, one could envision extending
2909 * the api to directly operate across mount namespace if needed.
2910 *
2911 * The ownership of a non-anonymous mount namespace such as the
2912 * caller's cannot change.
2913 * => We know that the caller's mount namespace is stable.
2914 *
2915 * If the origin sequence number of the anonymous mount namespace is
2916 * the same as the sequence number of the caller's mount namespace.
2917 * => The owning namespaces are the same.
2918 *
2919 * ==> The earlier capability check on the owning namespace of the
2920 * caller's mount namespace ensures that the caller has the
2921 * ability to copy the mount tree.
2922 *
2923 * Returns true if the mount tree can be copied, false otherwise.
2924 */
may_copy_tree(const struct path * path)2925 static inline bool may_copy_tree(const struct path *path)
2926 {
2927 struct mount *mnt = real_mount(path->mnt);
2928 const struct dentry_operations *d_op;
2929
2930 if (check_mnt(mnt))
2931 return true;
2932
2933 d_op = path->dentry->d_op;
2934 if (d_op == &ns_dentry_operations)
2935 return true;
2936
2937 if (d_op == &pidfs_dentry_operations)
2938 return true;
2939
2940 if (!is_mounted(path->mnt))
2941 return false;
2942
2943 return check_anonymous_mnt(mnt);
2944 }
2945
__do_loopback(const struct path * old_path,unsigned int flags,unsigned int copy_flags)2946 static struct mount *__do_loopback(const struct path *old_path,
2947 unsigned int flags, unsigned int copy_flags)
2948 {
2949 struct mount *old = real_mount(old_path->mnt);
2950 bool recurse = flags & AT_RECURSIVE;
2951
2952 if (IS_MNT_UNBINDABLE(old))
2953 return ERR_PTR(-EINVAL);
2954
2955 if (!may_copy_tree(old_path))
2956 return ERR_PTR(-EINVAL);
2957
2958 if (!recurse && __has_locked_children(old, old_path->dentry))
2959 return ERR_PTR(-EINVAL);
2960
2961 /*
2962 * When creating a new mount namespace we don't want to copy over
2963 * mounts of mount namespaces to avoid the risk of cycles and also to
2964 * minimize the default complex interdependencies between mount
2965 * namespaces.
2966 *
2967 * We could ofc just check whether all mount namespace files aren't
2968 * creating cycles but really let's keep this simple.
2969 */
2970 if (!(flags & OPEN_TREE_NAMESPACE))
2971 copy_flags |= CL_COPY_MNT_NS_FILE;
2972
2973 if (recurse)
2974 return copy_tree(old, old_path->dentry, copy_flags);
2975
2976 return clone_mnt(old, old_path->dentry, copy_flags);
2977 }
2978
2979 /*
2980 * do loopback mount.
2981 */
do_loopback(const struct path * path,const char * old_name,int recurse)2982 static int do_loopback(const struct path *path, const char *old_name,
2983 int recurse)
2984 {
2985 struct path old_path __free(path_put) = {};
2986 struct mount *mnt = NULL;
2987 unsigned int flags = recurse ? AT_RECURSIVE : 0;
2988 int err;
2989
2990 if (!old_name || !*old_name)
2991 return -EINVAL;
2992 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2993 if (err)
2994 return err;
2995
2996 if (mnt_ns_loop(old_path.dentry))
2997 return -EINVAL;
2998
2999 LOCK_MOUNT(mp, path);
3000 if (IS_ERR(mp.parent))
3001 return PTR_ERR(mp.parent);
3002
3003 if (!check_mnt(mp.parent))
3004 return -EINVAL;
3005
3006 mnt = __do_loopback(&old_path, flags, 0);
3007 if (IS_ERR(mnt))
3008 return PTR_ERR(mnt);
3009
3010 err = graft_tree(mnt, &mp);
3011 if (err) {
3012 lock_mount_hash();
3013 umount_tree(mnt, UMOUNT_SYNC);
3014 unlock_mount_hash();
3015 }
3016 return err;
3017 }
3018
get_detached_copy(const struct path * path,unsigned int flags)3019 static struct mnt_namespace *get_detached_copy(const struct path *path, unsigned int flags)
3020 {
3021 struct mnt_namespace *ns, *mnt_ns = current->nsproxy->mnt_ns, *src_mnt_ns;
3022 struct user_namespace *user_ns = mnt_ns->user_ns;
3023 struct mount *mnt, *p;
3024
3025 ns = alloc_mnt_ns(user_ns, true);
3026 if (IS_ERR(ns))
3027 return ns;
3028
3029 guard(namespace_excl)();
3030
3031 /*
3032 * Record the sequence number of the source mount namespace.
3033 * This needs to hold namespace_sem to ensure that the mount
3034 * doesn't get attached.
3035 */
3036 if (is_mounted(path->mnt)) {
3037 src_mnt_ns = real_mount(path->mnt)->mnt_ns;
3038 if (is_anon_ns(src_mnt_ns))
3039 ns->seq_origin = src_mnt_ns->seq_origin;
3040 else
3041 ns->seq_origin = src_mnt_ns->ns.ns_id;
3042 }
3043
3044 mnt = __do_loopback(path, flags, 0);
3045 if (IS_ERR(mnt)) {
3046 emptied_ns = ns;
3047 return ERR_CAST(mnt);
3048 }
3049
3050 for (p = mnt; p; p = next_mnt(p, mnt)) {
3051 mnt_add_to_ns(ns, p);
3052 ns->nr_mounts++;
3053 }
3054 ns->root = mnt;
3055 return ns;
3056 }
3057
open_detached_copy(struct path * path,unsigned int flags)3058 static struct file *open_detached_copy(struct path *path, unsigned int flags)
3059 {
3060 struct mnt_namespace *ns = get_detached_copy(path, flags);
3061 struct file *file;
3062
3063 if (IS_ERR(ns))
3064 return ERR_CAST(ns);
3065
3066 mntput(path->mnt);
3067 path->mnt = mntget(&ns->root->mnt);
3068 file = dentry_open(path, O_PATH, current_cred());
3069 if (IS_ERR(file))
3070 dissolve_on_fput(path->mnt);
3071 else
3072 file->f_mode |= FMODE_NEED_UNMOUNT;
3073 return file;
3074 }
3075
3076 DEFINE_FREE(put_empty_mnt_ns, struct mnt_namespace *,
3077 if (!IS_ERR_OR_NULL(_T)) free_mnt_ns(_T))
3078
create_new_namespace(struct path * path,unsigned int flags)3079 static struct mnt_namespace *create_new_namespace(struct path *path, unsigned int flags)
3080 {
3081 struct mnt_namespace *new_ns __free(put_empty_mnt_ns) = NULL;
3082 struct path to_path __free(path_put) = {};
3083 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
3084 struct user_namespace *user_ns = current_user_ns();
3085 struct mount *new_ns_root;
3086 struct mount *mnt;
3087 unsigned int copy_flags = 0;
3088 bool locked = false;
3089
3090 if (user_ns != ns->user_ns)
3091 copy_flags |= CL_SLAVE;
3092
3093 new_ns = alloc_mnt_ns(user_ns, false);
3094 if (IS_ERR(new_ns))
3095 return ERR_CAST(new_ns);
3096
3097 scoped_guard(namespace_excl) {
3098 new_ns_root = clone_mnt(ns->root, ns->root->mnt.mnt_root, copy_flags);
3099 if (IS_ERR(new_ns_root))
3100 return ERR_CAST(new_ns_root);
3101
3102 /*
3103 * If the real rootfs had a locked mount on top of it somewhere
3104 * in the stack, lock the new mount tree as well so it can't be
3105 * exposed.
3106 */
3107 mnt = ns->root;
3108 while (mnt->overmount) {
3109 mnt = mnt->overmount;
3110 if (mnt->mnt.mnt_flags & MNT_LOCKED)
3111 locked = true;
3112 }
3113 }
3114
3115 /*
3116 * We dropped the namespace semaphore so we can actually lock
3117 * the copy for mounting. The copied mount isn't attached to any
3118 * mount namespace and it is thus excluded from any propagation.
3119 * So realistically we're isolated and the mount can't be
3120 * overmounted.
3121 */
3122
3123 /* Borrow the reference from clone_mnt(). */
3124 to_path.mnt = &new_ns_root->mnt;
3125 to_path.dentry = dget(new_ns_root->mnt.mnt_root);
3126
3127 /* Now lock for actual mounting. */
3128 LOCK_MOUNT_EXACT(mp, &to_path);
3129 if (unlikely(IS_ERR(mp.parent)))
3130 return ERR_CAST(mp.parent);
3131
3132 /*
3133 * We don't emulate unshare()ing a mount namespace. We stick to the
3134 * restrictions of creating detached bind-mounts. It has a lot
3135 * saner and simpler semantics.
3136 */
3137 mnt = __do_loopback(path, flags, copy_flags);
3138 if (IS_ERR(mnt))
3139 return ERR_CAST(mnt);
3140
3141 scoped_guard(mount_writer) {
3142 if (locked)
3143 mnt->mnt.mnt_flags |= MNT_LOCKED;
3144 /*
3145 * Now mount the detached tree on top of the copy of the
3146 * real rootfs we created.
3147 */
3148 attach_mnt(mnt, new_ns_root, mp.mp);
3149 if (user_ns != ns->user_ns)
3150 lock_mnt_tree(new_ns_root);
3151 }
3152
3153 /* Add all mounts to the new namespace. */
3154 for (struct mount *p = new_ns_root; p; p = next_mnt(p, new_ns_root)) {
3155 mnt_add_to_ns(new_ns, p);
3156 new_ns->nr_mounts++;
3157 }
3158
3159 new_ns->root = real_mount(no_free_ptr(to_path.mnt));
3160 ns_tree_add_raw(new_ns);
3161 return no_free_ptr(new_ns);
3162 }
3163
open_new_namespace(struct path * path,unsigned int flags)3164 static struct file *open_new_namespace(struct path *path, unsigned int flags)
3165 {
3166 struct mnt_namespace *new_ns;
3167
3168 new_ns = create_new_namespace(path, flags);
3169 if (IS_ERR(new_ns))
3170 return ERR_CAST(new_ns);
3171 return open_namespace_file(to_ns_common(new_ns));
3172 }
3173
vfs_open_tree(int dfd,const char __user * filename,unsigned int flags)3174 static struct file *vfs_open_tree(int dfd, const char __user *filename, unsigned int flags)
3175 {
3176 int ret;
3177 struct path path __free(path_put) = {};
3178 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
3179
3180 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
3181
3182 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
3183 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
3184 OPEN_TREE_CLOEXEC | OPEN_TREE_NAMESPACE))
3185 return ERR_PTR(-EINVAL);
3186
3187 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE | OPEN_TREE_NAMESPACE)) ==
3188 AT_RECURSIVE)
3189 return ERR_PTR(-EINVAL);
3190
3191 if (hweight32(flags & (OPEN_TREE_CLONE | OPEN_TREE_NAMESPACE)) > 1)
3192 return ERR_PTR(-EINVAL);
3193
3194 if (flags & AT_NO_AUTOMOUNT)
3195 lookup_flags &= ~LOOKUP_AUTOMOUNT;
3196 if (flags & AT_SYMLINK_NOFOLLOW)
3197 lookup_flags &= ~LOOKUP_FOLLOW;
3198
3199 /*
3200 * If we create a new mount namespace with the cloned mount tree we
3201 * just care about being privileged over our current user namespace.
3202 * The new mount namespace will be owned by it.
3203 */
3204 if ((flags & OPEN_TREE_NAMESPACE) &&
3205 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
3206 return ERR_PTR(-EPERM);
3207
3208 if ((flags & OPEN_TREE_CLONE) && !may_mount())
3209 return ERR_PTR(-EPERM);
3210
3211 CLASS(filename_uflags, name)(filename, flags);
3212 ret = filename_lookup(dfd, name, lookup_flags, &path, NULL);
3213 if (unlikely(ret))
3214 return ERR_PTR(ret);
3215
3216 if (flags & OPEN_TREE_NAMESPACE)
3217 return open_new_namespace(&path, flags);
3218
3219 if (flags & OPEN_TREE_CLONE)
3220 return open_detached_copy(&path, flags);
3221
3222 return dentry_open(&path, O_PATH, current_cred());
3223 }
3224
SYSCALL_DEFINE3(open_tree,int,dfd,const char __user *,filename,unsigned,flags)3225 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
3226 {
3227 return FD_ADD(flags, vfs_open_tree(dfd, filename, flags));
3228 }
3229
3230 /*
3231 * Don't allow locked mount flags to be cleared.
3232 *
3233 * No locks need to be held here while testing the various MNT_LOCK
3234 * flags because those flags can never be cleared once they are set.
3235 */
can_change_locked_flags(struct mount * mnt,unsigned int mnt_flags)3236 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
3237 {
3238 unsigned int fl = mnt->mnt.mnt_flags;
3239
3240 if ((fl & MNT_LOCK_READONLY) &&
3241 !(mnt_flags & MNT_READONLY))
3242 return false;
3243
3244 if ((fl & MNT_LOCK_NODEV) &&
3245 !(mnt_flags & MNT_NODEV))
3246 return false;
3247
3248 if ((fl & MNT_LOCK_NOSUID) &&
3249 !(mnt_flags & MNT_NOSUID))
3250 return false;
3251
3252 if ((fl & MNT_LOCK_NOEXEC) &&
3253 !(mnt_flags & MNT_NOEXEC))
3254 return false;
3255
3256 if ((fl & MNT_LOCK_ATIME) &&
3257 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
3258 return false;
3259
3260 return true;
3261 }
3262
change_mount_ro_state(struct mount * mnt,unsigned int mnt_flags)3263 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
3264 {
3265 bool readonly_request = (mnt_flags & MNT_READONLY);
3266
3267 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
3268 return 0;
3269
3270 if (readonly_request)
3271 return mnt_make_readonly(mnt);
3272
3273 mnt->mnt.mnt_flags &= ~MNT_READONLY;
3274 return 0;
3275 }
3276
set_mount_attributes(struct mount * mnt,unsigned int mnt_flags)3277 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
3278 {
3279 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
3280 mnt->mnt.mnt_flags = mnt_flags;
3281 touch_mnt_namespace(mnt->mnt_ns);
3282 }
3283
mnt_warn_timestamp_expiry(const struct path * mountpoint,struct vfsmount * mnt)3284 static void mnt_warn_timestamp_expiry(const struct path *mountpoint,
3285 struct vfsmount *mnt)
3286 {
3287 struct super_block *sb = mnt->mnt_sb;
3288
3289 if (!__mnt_is_readonly(mnt) &&
3290 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
3291 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
3292 char *buf, *mntpath;
3293
3294 buf = (char *)__get_free_page(GFP_KERNEL);
3295 if (buf)
3296 mntpath = d_path(mountpoint, buf, PAGE_SIZE);
3297 else
3298 mntpath = ERR_PTR(-ENOMEM);
3299 if (IS_ERR(mntpath))
3300 mntpath = "(unknown)";
3301
3302 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
3303 sb->s_type->name,
3304 is_mounted(mnt) ? "remounted" : "mounted",
3305 mntpath, &sb->s_time_max,
3306 (unsigned long long)sb->s_time_max);
3307
3308 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
3309 if (buf)
3310 free_page((unsigned long)buf);
3311 }
3312 }
3313
3314 /*
3315 * Handle reconfiguration of the mountpoint only without alteration of the
3316 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
3317 * to mount(2).
3318 */
do_reconfigure_mnt(const struct path * path,unsigned int mnt_flags)3319 static int do_reconfigure_mnt(const struct path *path, unsigned int mnt_flags)
3320 {
3321 struct super_block *sb = path->mnt->mnt_sb;
3322 struct mount *mnt = real_mount(path->mnt);
3323 int ret;
3324
3325 if (!check_mnt(mnt))
3326 return -EINVAL;
3327
3328 if (!path_mounted(path))
3329 return -EINVAL;
3330
3331 if (!can_change_locked_flags(mnt, mnt_flags))
3332 return -EPERM;
3333
3334 /*
3335 * We're only checking whether the superblock is read-only not
3336 * changing it, so only take down_read(&sb->s_umount).
3337 */
3338 down_read(&sb->s_umount);
3339 lock_mount_hash();
3340 ret = change_mount_ro_state(mnt, mnt_flags);
3341 if (ret == 0)
3342 set_mount_attributes(mnt, mnt_flags);
3343 unlock_mount_hash();
3344 up_read(&sb->s_umount);
3345
3346 mnt_warn_timestamp_expiry(path, &mnt->mnt);
3347
3348 return ret;
3349 }
3350
3351 /*
3352 * change filesystem flags. dir should be a physical root of filesystem.
3353 * If you've mounted a non-root directory somewhere and want to do remount
3354 * on it - tough luck.
3355 */
do_remount(const struct path * path,int sb_flags,int mnt_flags,void * data)3356 static int do_remount(const struct path *path, int sb_flags,
3357 int mnt_flags, void *data)
3358 {
3359 int err;
3360 struct super_block *sb = path->mnt->mnt_sb;
3361 struct mount *mnt = real_mount(path->mnt);
3362 struct fs_context *fc;
3363
3364 if (!check_mnt(mnt))
3365 return -EINVAL;
3366
3367 if (!path_mounted(path))
3368 return -EINVAL;
3369
3370 if (!can_change_locked_flags(mnt, mnt_flags))
3371 return -EPERM;
3372
3373 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
3374 if (IS_ERR(fc))
3375 return PTR_ERR(fc);
3376
3377 /*
3378 * Indicate to the filesystem that the remount request is coming
3379 * from the legacy mount system call.
3380 */
3381 fc->oldapi = true;
3382
3383 err = parse_monolithic_mount_data(fc, data);
3384 if (!err) {
3385 down_write(&sb->s_umount);
3386 err = -EPERM;
3387 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
3388 err = reconfigure_super(fc);
3389 if (!err) {
3390 lock_mount_hash();
3391 set_mount_attributes(mnt, mnt_flags);
3392 unlock_mount_hash();
3393 }
3394 }
3395 up_write(&sb->s_umount);
3396 }
3397
3398 mnt_warn_timestamp_expiry(path, &mnt->mnt);
3399
3400 put_fs_context(fc);
3401 return err;
3402 }
3403
tree_contains_unbindable(struct mount * mnt)3404 static inline int tree_contains_unbindable(struct mount *mnt)
3405 {
3406 struct mount *p;
3407 for (p = mnt; p; p = next_mnt(p, mnt)) {
3408 if (IS_MNT_UNBINDABLE(p))
3409 return 1;
3410 }
3411 return 0;
3412 }
3413
do_set_group(const struct path * from_path,const struct path * to_path)3414 static int do_set_group(const struct path *from_path, const struct path *to_path)
3415 {
3416 struct mount *from = real_mount(from_path->mnt);
3417 struct mount *to = real_mount(to_path->mnt);
3418 int err;
3419
3420 guard(namespace_excl)();
3421
3422 err = may_change_propagation(from);
3423 if (err)
3424 return err;
3425 err = may_change_propagation(to);
3426 if (err)
3427 return err;
3428
3429 /* To and From paths should be mount roots */
3430 if (!path_mounted(from_path))
3431 return -EINVAL;
3432 if (!path_mounted(to_path))
3433 return -EINVAL;
3434
3435 /* Setting sharing groups is only allowed across same superblock */
3436 if (from->mnt.mnt_sb != to->mnt.mnt_sb)
3437 return -EINVAL;
3438
3439 /* From mount root should be wider than To mount root */
3440 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
3441 return -EINVAL;
3442
3443 /* From mount should not have locked children in place of To's root */
3444 if (__has_locked_children(from, to->mnt.mnt_root))
3445 return -EINVAL;
3446
3447 /* Setting sharing groups is only allowed on private mounts */
3448 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
3449 return -EINVAL;
3450
3451 /* From should not be private */
3452 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
3453 return -EINVAL;
3454
3455 if (IS_MNT_SLAVE(from)) {
3456 hlist_add_behind(&to->mnt_slave, &from->mnt_slave);
3457 to->mnt_master = from->mnt_master;
3458 }
3459
3460 if (IS_MNT_SHARED(from)) {
3461 to->mnt_group_id = from->mnt_group_id;
3462 list_add(&to->mnt_share, &from->mnt_share);
3463 set_mnt_shared(to);
3464 }
3465 return 0;
3466 }
3467
3468 /**
3469 * path_overmounted - check if path is overmounted
3470 * @path: path to check
3471 *
3472 * Check if path is overmounted, i.e., if there's a mount on top of
3473 * @path->mnt with @path->dentry as mountpoint.
3474 *
3475 * Context: namespace_sem must be held at least shared.
3476 * MUST NOT be called under lock_mount_hash() (there one should just
3477 * call __lookup_mnt() and check if it returns NULL).
3478 * Return: If path is overmounted true is returned, false if not.
3479 */
path_overmounted(const struct path * path)3480 static inline bool path_overmounted(const struct path *path)
3481 {
3482 unsigned seq = read_seqbegin(&mount_lock);
3483 bool no_child;
3484
3485 rcu_read_lock();
3486 no_child = !__lookup_mnt(path->mnt, path->dentry);
3487 rcu_read_unlock();
3488 if (need_seqretry(&mount_lock, seq)) {
3489 read_seqlock_excl(&mount_lock);
3490 no_child = !__lookup_mnt(path->mnt, path->dentry);
3491 read_sequnlock_excl(&mount_lock);
3492 }
3493 return unlikely(!no_child);
3494 }
3495
3496 /*
3497 * Check if there is a possibly empty chain of descent from p1 to p2.
3498 * Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl).
3499 */
mount_is_ancestor(const struct mount * p1,const struct mount * p2)3500 static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2)
3501 {
3502 while (p2 != p1 && mnt_has_parent(p2))
3503 p2 = p2->mnt_parent;
3504 return p2 == p1;
3505 }
3506
3507 /**
3508 * can_move_mount_beneath - check that we can mount beneath the top mount
3509 * @mnt_from: mount we are trying to move
3510 * @mnt_to: mount under which to mount
3511 * @mp: mountpoint of @mnt_to
3512 *
3513 * - Make sure that nothing can be mounted beneath the caller's current
3514 * root or the rootfs of the namespace.
3515 * - Make sure that the caller can unmount the topmost mount ensuring
3516 * that the caller could reveal the underlying mountpoint.
3517 * - Ensure that nothing has been mounted on top of @mnt_from before we
3518 * grabbed @namespace_sem to avoid creating pointless shadow mounts.
3519 * - Prevent mounting beneath a mount if the propagation relationship
3520 * between the source mount, parent mount, and top mount would lead to
3521 * nonsensical mount trees.
3522 *
3523 * Context: This function expects namespace_lock() to be held.
3524 * Return: On success 0, and on error a negative error code is returned.
3525 */
can_move_mount_beneath(const struct mount * mnt_from,const struct mount * mnt_to,const struct mountpoint * mp)3526 static int can_move_mount_beneath(const struct mount *mnt_from,
3527 const struct mount *mnt_to,
3528 const struct mountpoint *mp)
3529 {
3530 struct mount *parent_mnt_to = mnt_to->mnt_parent;
3531
3532 if (IS_MNT_LOCKED(mnt_to))
3533 return -EINVAL;
3534
3535 /* Avoid creating shadow mounts during mount propagation. */
3536 if (mnt_from->overmount)
3537 return -EINVAL;
3538
3539 /*
3540 * Mounting beneath the rootfs only makes sense when the
3541 * semantics of pivot_root(".", ".") are used.
3542 */
3543 if (&mnt_to->mnt == current->fs->root.mnt)
3544 return -EINVAL;
3545 if (parent_mnt_to == current->nsproxy->mnt_ns->root)
3546 return -EINVAL;
3547
3548 if (mount_is_ancestor(mnt_to, mnt_from))
3549 return -EINVAL;
3550
3551 /*
3552 * If the parent mount propagates to the child mount this would
3553 * mean mounting @mnt_from on @mnt_to->mnt_parent and then
3554 * propagating a copy @c of @mnt_from on top of @mnt_to. This
3555 * defeats the whole purpose of mounting beneath another mount.
3556 */
3557 if (propagation_would_overmount(parent_mnt_to, mnt_to, mp))
3558 return -EINVAL;
3559
3560 /*
3561 * If @mnt_to->mnt_parent propagates to @mnt_from this would
3562 * mean propagating a copy @c of @mnt_from on top of @mnt_from.
3563 * Afterwards @mnt_from would be mounted on top of
3564 * @mnt_to->mnt_parent and @mnt_to would be unmounted from
3565 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3566 * already mounted on @mnt_from, @mnt_to would ultimately be
3567 * remounted on top of @c. Afterwards, @mnt_from would be
3568 * covered by a copy @c of @mnt_from and @c would be covered by
3569 * @mnt_from itself. This defeats the whole purpose of mounting
3570 * @mnt_from beneath @mnt_to.
3571 */
3572 if (check_mnt(mnt_from) &&
3573 propagation_would_overmount(parent_mnt_to, mnt_from, mp))
3574 return -EINVAL;
3575
3576 return 0;
3577 }
3578
3579 /* may_use_mount() - check if a mount tree can be used
3580 * @mnt: vfsmount to be used
3581 *
3582 * This helper checks if the caller may use the mount tree starting
3583 * from @path->mnt. The caller may use the mount tree under the
3584 * following circumstances:
3585 *
3586 * (1) The caller is located in the mount namespace of the mount tree.
3587 * This also implies that the mount does not belong to an anonymous
3588 * mount namespace.
3589 * (2) The caller is trying to use a mount tree that belongs to an
3590 * anonymous mount namespace.
3591 *
3592 * For that to be safe, this helper enforces that the origin mount
3593 * namespace the anonymous mount namespace was created from is the
3594 * same as the caller's mount namespace by comparing the sequence
3595 * numbers.
3596 *
3597 * The ownership of a non-anonymous mount namespace such as the
3598 * caller's cannot change.
3599 * => We know that the caller's mount namespace is stable.
3600 *
3601 * If the origin sequence number of the anonymous mount namespace is
3602 * the same as the sequence number of the caller's mount namespace.
3603 * => The owning namespaces are the same.
3604 *
3605 * ==> The earlier capability check on the owning namespace of the
3606 * caller's mount namespace ensures that the caller has the
3607 * ability to use the mount tree.
3608 *
3609 * Returns true if the mount tree can be used, false otherwise.
3610 */
may_use_mount(struct mount * mnt)3611 static inline bool may_use_mount(struct mount *mnt)
3612 {
3613 if (check_mnt(mnt))
3614 return true;
3615
3616 /*
3617 * Make sure that noone unmounted the target path or somehow
3618 * managed to get their hands on something purely kernel
3619 * internal.
3620 */
3621 if (!is_mounted(&mnt->mnt))
3622 return false;
3623
3624 return check_anonymous_mnt(mnt);
3625 }
3626
do_move_mount(const struct path * old_path,const struct path * new_path,enum mnt_tree_flags_t flags)3627 static int do_move_mount(const struct path *old_path,
3628 const struct path *new_path,
3629 enum mnt_tree_flags_t flags)
3630 {
3631 struct mount *old = real_mount(old_path->mnt);
3632 int err;
3633 bool beneath = flags & MNT_TREE_BENEATH;
3634
3635 if (!path_mounted(old_path))
3636 return -EINVAL;
3637
3638 if (d_is_dir(new_path->dentry) != d_is_dir(old_path->dentry))
3639 return -EINVAL;
3640
3641 LOCK_MOUNT_MAYBE_BENEATH(mp, new_path, beneath);
3642 if (IS_ERR(mp.parent))
3643 return PTR_ERR(mp.parent);
3644
3645 if (check_mnt(old)) {
3646 /* if the source is in our namespace... */
3647 /* ... it should be detachable from parent */
3648 if (!mnt_has_parent(old) || IS_MNT_LOCKED(old))
3649 return -EINVAL;
3650 /* ... which should not be shared */
3651 if (IS_MNT_SHARED(old->mnt_parent))
3652 return -EINVAL;
3653 /* ... and the target should be in our namespace */
3654 if (!check_mnt(mp.parent))
3655 return -EINVAL;
3656 } else {
3657 /*
3658 * otherwise the source must be the root of some anon namespace.
3659 */
3660 if (!anon_ns_root(old))
3661 return -EINVAL;
3662 /*
3663 * Bail out early if the target is within the same namespace -
3664 * subsequent checks would've rejected that, but they lose
3665 * some corner cases if we check it early.
3666 */
3667 if (old->mnt_ns == mp.parent->mnt_ns)
3668 return -EINVAL;
3669 /*
3670 * Target should be either in our namespace or in an acceptable
3671 * anon namespace, sensu check_anonymous_mnt().
3672 */
3673 if (!may_use_mount(mp.parent))
3674 return -EINVAL;
3675 }
3676
3677 if (beneath) {
3678 struct mount *over = real_mount(new_path->mnt);
3679
3680 if (mp.parent != over->mnt_parent)
3681 over = mp.parent->overmount;
3682 err = can_move_mount_beneath(old, over, mp.mp);
3683 if (err)
3684 return err;
3685 }
3686
3687 /*
3688 * Don't move a mount tree containing unbindable mounts to a destination
3689 * mount which is shared.
3690 */
3691 if (IS_MNT_SHARED(mp.parent) && tree_contains_unbindable(old))
3692 return -EINVAL;
3693 if (!check_for_nsfs_mounts(old))
3694 return -ELOOP;
3695 if (mount_is_ancestor(old, mp.parent))
3696 return -ELOOP;
3697
3698 return attach_recursive_mnt(old, &mp);
3699 }
3700
do_move_mount_old(const struct path * path,const char * old_name)3701 static int do_move_mount_old(const struct path *path, const char *old_name)
3702 {
3703 struct path old_path __free(path_put) = {};
3704 int err;
3705
3706 if (!old_name || !*old_name)
3707 return -EINVAL;
3708
3709 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
3710 if (err)
3711 return err;
3712
3713 return do_move_mount(&old_path, path, 0);
3714 }
3715
3716 /*
3717 * add a mount into a namespace's mount tree
3718 */
do_add_mount(struct mount * newmnt,const struct pinned_mountpoint * mp,int mnt_flags)3719 static int do_add_mount(struct mount *newmnt, const struct pinned_mountpoint *mp,
3720 int mnt_flags)
3721 {
3722 struct mount *parent = mp->parent;
3723
3724 if (IS_ERR(parent))
3725 return PTR_ERR(parent);
3726
3727 mnt_flags &= ~MNT_INTERNAL_FLAGS;
3728
3729 if (unlikely(!check_mnt(parent))) {
3730 /* that's acceptable only for automounts done in private ns */
3731 if (!(mnt_flags & MNT_SHRINKABLE))
3732 return -EINVAL;
3733 /* ... and for those we'd better have mountpoint still alive */
3734 if (!parent->mnt_ns)
3735 return -EINVAL;
3736 }
3737
3738 /* Refuse the same filesystem on the same mount point */
3739 if (parent->mnt.mnt_sb == newmnt->mnt.mnt_sb &&
3740 parent->mnt.mnt_root == mp->mp->m_dentry)
3741 return -EBUSY;
3742
3743 if (d_is_symlink(newmnt->mnt.mnt_root))
3744 return -EINVAL;
3745
3746 newmnt->mnt.mnt_flags = mnt_flags;
3747 return graft_tree(newmnt, mp);
3748 }
3749
3750 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
3751
3752 /*
3753 * Create a new mount using a superblock configuration and request it
3754 * be added to the namespace tree.
3755 */
do_new_mount_fc(struct fs_context * fc,const struct path * mountpoint,unsigned int mnt_flags)3756 static int do_new_mount_fc(struct fs_context *fc, const struct path *mountpoint,
3757 unsigned int mnt_flags)
3758 {
3759 struct super_block *sb;
3760 struct vfsmount *mnt __free(mntput) = fc_mount(fc);
3761 int error;
3762
3763 if (IS_ERR(mnt))
3764 return PTR_ERR(mnt);
3765
3766 sb = fc->root->d_sb;
3767 error = security_sb_kern_mount(sb);
3768 if (unlikely(error))
3769 return error;
3770
3771 if (unlikely(mount_too_revealing(sb, &mnt_flags))) {
3772 errorfcp(fc, "VFS", "Mount too revealing");
3773 return -EPERM;
3774 }
3775
3776 mnt_warn_timestamp_expiry(mountpoint, mnt);
3777
3778 LOCK_MOUNT(mp, mountpoint);
3779 error = do_add_mount(real_mount(mnt), &mp, mnt_flags);
3780 if (!error)
3781 retain_and_null_ptr(mnt); // consumed on success
3782 return error;
3783 }
3784
3785 /*
3786 * create a new mount for userspace and request it to be added into the
3787 * namespace's tree
3788 */
do_new_mount(const struct path * path,const char * fstype,int sb_flags,int mnt_flags,const char * name,void * data)3789 static int do_new_mount(const struct path *path, const char *fstype,
3790 int sb_flags, int mnt_flags,
3791 const char *name, void *data)
3792 {
3793 struct file_system_type *type;
3794 struct fs_context *fc;
3795 const char *subtype = NULL;
3796 int err = 0;
3797
3798 if (!fstype)
3799 return -EINVAL;
3800
3801 type = get_fs_type(fstype);
3802 if (!type)
3803 return -ENODEV;
3804
3805 if (type->fs_flags & FS_HAS_SUBTYPE) {
3806 subtype = strchr(fstype, '.');
3807 if (subtype) {
3808 subtype++;
3809 if (!*subtype) {
3810 put_filesystem(type);
3811 return -EINVAL;
3812 }
3813 }
3814 }
3815
3816 fc = fs_context_for_mount(type, sb_flags);
3817 put_filesystem(type);
3818 if (IS_ERR(fc))
3819 return PTR_ERR(fc);
3820
3821 /*
3822 * Indicate to the filesystem that the mount request is coming
3823 * from the legacy mount system call.
3824 */
3825 fc->oldapi = true;
3826
3827 if (subtype)
3828 err = vfs_parse_fs_string(fc, "subtype", subtype);
3829 if (!err && name)
3830 err = vfs_parse_fs_string(fc, "source", name);
3831 if (!err)
3832 err = parse_monolithic_mount_data(fc, data);
3833 if (!err && !mount_capable(fc))
3834 err = -EPERM;
3835 if (!err)
3836 err = do_new_mount_fc(fc, path, mnt_flags);
3837
3838 put_fs_context(fc);
3839 return err;
3840 }
3841
lock_mount_exact(const struct path * path,struct pinned_mountpoint * mp)3842 static void lock_mount_exact(const struct path *path,
3843 struct pinned_mountpoint *mp)
3844 {
3845 struct dentry *dentry = path->dentry;
3846 int err;
3847
3848 inode_lock(dentry->d_inode);
3849 namespace_lock();
3850 if (unlikely(cant_mount(dentry)))
3851 err = -ENOENT;
3852 else if (path_overmounted(path))
3853 err = -EBUSY;
3854 else
3855 err = get_mountpoint(dentry, mp);
3856 if (unlikely(err)) {
3857 namespace_unlock();
3858 inode_unlock(dentry->d_inode);
3859 mp->parent = ERR_PTR(err);
3860 } else {
3861 mp->parent = real_mount(path->mnt);
3862 }
3863 }
3864
finish_automount(struct vfsmount * __m,const struct path * path)3865 int finish_automount(struct vfsmount *__m, const struct path *path)
3866 {
3867 struct vfsmount *m __free(mntput) = __m;
3868 struct mount *mnt;
3869 int err;
3870
3871 if (!m)
3872 return 0;
3873 if (IS_ERR(m))
3874 return PTR_ERR(m);
3875
3876 mnt = real_mount(m);
3877
3878 if (m->mnt_root == path->dentry)
3879 return -ELOOP;
3880
3881 /*
3882 * we don't want to use LOCK_MOUNT() - in this case finding something
3883 * that overmounts our mountpoint to be means "quitely drop what we've
3884 * got", not "try to mount it on top".
3885 */
3886 LOCK_MOUNT_EXACT(mp, path);
3887 if (mp.parent == ERR_PTR(-EBUSY))
3888 return 0;
3889
3890 err = do_add_mount(mnt, &mp, path->mnt->mnt_flags | MNT_SHRINKABLE);
3891 if (likely(!err))
3892 retain_and_null_ptr(m);
3893 return err;
3894 }
3895
3896 /**
3897 * mnt_set_expiry - Put a mount on an expiration list
3898 * @mnt: The mount to list.
3899 * @expiry_list: The list to add the mount to.
3900 */
mnt_set_expiry(struct vfsmount * mnt,struct list_head * expiry_list)3901 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3902 {
3903 guard(mount_locked_reader)();
3904 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3905 }
3906 EXPORT_SYMBOL(mnt_set_expiry);
3907
3908 /*
3909 * process a list of expirable mountpoints with the intent of discarding any
3910 * mountpoints that aren't in use and haven't been touched since last we came
3911 * here
3912 */
mark_mounts_for_expiry(struct list_head * mounts)3913 void mark_mounts_for_expiry(struct list_head *mounts)
3914 {
3915 struct mount *mnt, *next;
3916 LIST_HEAD(graveyard);
3917
3918 if (list_empty(mounts))
3919 return;
3920
3921 guard(namespace_excl)();
3922 guard(mount_writer)();
3923
3924 /* extract from the expiration list every vfsmount that matches the
3925 * following criteria:
3926 * - already mounted
3927 * - only referenced by its parent vfsmount
3928 * - still marked for expiry (marked on the last call here; marks are
3929 * cleared by mntput())
3930 */
3931 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3932 if (!is_mounted(&mnt->mnt))
3933 continue;
3934 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3935 propagate_mount_busy(mnt, 1))
3936 continue;
3937 list_move(&mnt->mnt_expire, &graveyard);
3938 }
3939 while (!list_empty(&graveyard)) {
3940 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3941 touch_mnt_namespace(mnt->mnt_ns);
3942 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3943 }
3944 }
3945
3946 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3947
3948 /*
3949 * Ripoff of 'select_parent()'
3950 *
3951 * search the list of submounts for a given mountpoint, and move any
3952 * shrinkable submounts to the 'graveyard' list.
3953 */
select_submounts(struct mount * parent,struct list_head * graveyard)3954 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3955 {
3956 struct mount *this_parent = parent;
3957 struct list_head *next;
3958 int found = 0;
3959
3960 repeat:
3961 next = this_parent->mnt_mounts.next;
3962 resume:
3963 while (next != &this_parent->mnt_mounts) {
3964 struct list_head *tmp = next;
3965 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3966
3967 next = tmp->next;
3968 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3969 continue;
3970 /*
3971 * Descend a level if the d_mounts list is non-empty.
3972 */
3973 if (!list_empty(&mnt->mnt_mounts)) {
3974 this_parent = mnt;
3975 goto repeat;
3976 }
3977
3978 if (!propagate_mount_busy(mnt, 1)) {
3979 list_move_tail(&mnt->mnt_expire, graveyard);
3980 found++;
3981 }
3982 }
3983 /*
3984 * All done at this level ... ascend and resume the search
3985 */
3986 if (this_parent != parent) {
3987 next = this_parent->mnt_child.next;
3988 this_parent = this_parent->mnt_parent;
3989 goto resume;
3990 }
3991 return found;
3992 }
3993
3994 /*
3995 * process a list of expirable mountpoints with the intent of discarding any
3996 * submounts of a specific parent mountpoint
3997 *
3998 * mount_lock must be held for write
3999 */
shrink_submounts(struct mount * mnt)4000 static void shrink_submounts(struct mount *mnt)
4001 {
4002 LIST_HEAD(graveyard);
4003 struct mount *m;
4004
4005 /* extract submounts of 'mountpoint' from the expiration list */
4006 while (select_submounts(mnt, &graveyard)) {
4007 while (!list_empty(&graveyard)) {
4008 m = list_first_entry(&graveyard, struct mount,
4009 mnt_expire);
4010 touch_mnt_namespace(m->mnt_ns);
4011 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
4012 }
4013 }
4014 }
4015
copy_mount_options(const void __user * data)4016 static void *copy_mount_options(const void __user * data)
4017 {
4018 char *copy;
4019 unsigned left, offset;
4020
4021 if (!data)
4022 return NULL;
4023
4024 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
4025 if (!copy)
4026 return ERR_PTR(-ENOMEM);
4027
4028 left = copy_from_user(copy, data, PAGE_SIZE);
4029
4030 /*
4031 * Not all architectures have an exact copy_from_user(). Resort to
4032 * byte at a time.
4033 */
4034 offset = PAGE_SIZE - left;
4035 while (left) {
4036 char c;
4037 if (get_user(c, (const char __user *)data + offset))
4038 break;
4039 copy[offset] = c;
4040 left--;
4041 offset++;
4042 }
4043
4044 if (left == PAGE_SIZE) {
4045 kfree(copy);
4046 return ERR_PTR(-EFAULT);
4047 }
4048
4049 return copy;
4050 }
4051
copy_mount_string(const void __user * data)4052 static char *copy_mount_string(const void __user *data)
4053 {
4054 return data ? strndup_user(data, PATH_MAX) : NULL;
4055 }
4056
4057 /*
4058 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
4059 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
4060 *
4061 * data is a (void *) that can point to any structure up to
4062 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
4063 * information (or be NULL).
4064 *
4065 * Pre-0.97 versions of mount() didn't have a flags word.
4066 * When the flags word was introduced its top half was required
4067 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
4068 * Therefore, if this magic number is present, it carries no information
4069 * and must be discarded.
4070 */
path_mount(const char * dev_name,const struct path * path,const char * type_page,unsigned long flags,void * data_page)4071 int path_mount(const char *dev_name, const struct path *path,
4072 const char *type_page, unsigned long flags, void *data_page)
4073 {
4074 unsigned int mnt_flags = 0, sb_flags;
4075 int ret;
4076
4077 /* Discard magic */
4078 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
4079 flags &= ~MS_MGC_MSK;
4080
4081 /* Basic sanity checks */
4082 if (data_page)
4083 ((char *)data_page)[PAGE_SIZE - 1] = 0;
4084
4085 if (flags & MS_NOUSER)
4086 return -EINVAL;
4087
4088 ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
4089 if (ret)
4090 return ret;
4091 if (!may_mount())
4092 return -EPERM;
4093 if (flags & SB_MANDLOCK)
4094 warn_mandlock();
4095
4096 /* Default to relatime unless overriden */
4097 if (!(flags & MS_NOATIME))
4098 mnt_flags |= MNT_RELATIME;
4099
4100 /* Separate the per-mountpoint flags */
4101 if (flags & MS_NOSUID)
4102 mnt_flags |= MNT_NOSUID;
4103 if (flags & MS_NODEV)
4104 mnt_flags |= MNT_NODEV;
4105 if (flags & MS_NOEXEC)
4106 mnt_flags |= MNT_NOEXEC;
4107 if (flags & MS_NOATIME)
4108 mnt_flags |= MNT_NOATIME;
4109 if (flags & MS_NODIRATIME)
4110 mnt_flags |= MNT_NODIRATIME;
4111 if (flags & MS_STRICTATIME)
4112 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
4113 if (flags & MS_RDONLY)
4114 mnt_flags |= MNT_READONLY;
4115 if (flags & MS_NOSYMFOLLOW)
4116 mnt_flags |= MNT_NOSYMFOLLOW;
4117
4118 /* The default atime for remount is preservation */
4119 if ((flags & MS_REMOUNT) &&
4120 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
4121 MS_STRICTATIME)) == 0)) {
4122 mnt_flags &= ~MNT_ATIME_MASK;
4123 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
4124 }
4125
4126 sb_flags = flags & (SB_RDONLY |
4127 SB_SYNCHRONOUS |
4128 SB_MANDLOCK |
4129 SB_DIRSYNC |
4130 SB_SILENT |
4131 SB_POSIXACL |
4132 SB_LAZYTIME |
4133 SB_I_VERSION);
4134
4135 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
4136 return do_reconfigure_mnt(path, mnt_flags);
4137 if (flags & MS_REMOUNT)
4138 return do_remount(path, sb_flags, mnt_flags, data_page);
4139 if (flags & MS_BIND)
4140 return do_loopback(path, dev_name, flags & MS_REC);
4141 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
4142 return do_change_type(path, flags);
4143 if (flags & MS_MOVE)
4144 return do_move_mount_old(path, dev_name);
4145
4146 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
4147 data_page);
4148 }
4149
do_mount(const char * dev_name,const char __user * dir_name,const char * type_page,unsigned long flags,void * data_page)4150 int do_mount(const char *dev_name, const char __user *dir_name,
4151 const char *type_page, unsigned long flags, void *data_page)
4152 {
4153 struct path path __free(path_put) = {};
4154 int ret;
4155
4156 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
4157 if (ret)
4158 return ret;
4159 return path_mount(dev_name, &path, type_page, flags, data_page);
4160 }
4161
inc_mnt_namespaces(struct user_namespace * ns)4162 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
4163 {
4164 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
4165 }
4166
dec_mnt_namespaces(struct ucounts * ucounts)4167 static void dec_mnt_namespaces(struct ucounts *ucounts)
4168 {
4169 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
4170 }
4171
free_mnt_ns(struct mnt_namespace * ns)4172 static void free_mnt_ns(struct mnt_namespace *ns)
4173 {
4174 if (!is_anon_ns(ns))
4175 ns_common_free(ns);
4176 dec_mnt_namespaces(ns->ucounts);
4177 mnt_ns_tree_remove(ns);
4178 }
4179
alloc_mnt_ns(struct user_namespace * user_ns,bool anon)4180 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
4181 {
4182 struct mnt_namespace *new_ns;
4183 struct ucounts *ucounts;
4184 int ret;
4185
4186 ucounts = inc_mnt_namespaces(user_ns);
4187 if (!ucounts)
4188 return ERR_PTR(-ENOSPC);
4189
4190 new_ns = kzalloc_obj(struct mnt_namespace, GFP_KERNEL_ACCOUNT);
4191 if (!new_ns) {
4192 dec_mnt_namespaces(ucounts);
4193 return ERR_PTR(-ENOMEM);
4194 }
4195
4196 if (anon)
4197 ret = ns_common_init_inum(new_ns, MNT_NS_ANON_INO);
4198 else
4199 ret = ns_common_init(new_ns);
4200 if (ret) {
4201 kfree(new_ns);
4202 dec_mnt_namespaces(ucounts);
4203 return ERR_PTR(ret);
4204 }
4205 ns_tree_gen_id(new_ns);
4206
4207 new_ns->is_anon = anon;
4208 refcount_set(&new_ns->passive, 1);
4209 new_ns->mounts = RB_ROOT;
4210 init_waitqueue_head(&new_ns->poll);
4211 new_ns->user_ns = get_user_ns(user_ns);
4212 new_ns->ucounts = ucounts;
4213 return new_ns;
4214 }
4215
4216 __latent_entropy
copy_mnt_ns(u64 flags,struct mnt_namespace * ns,struct user_namespace * user_ns,struct fs_struct * new_fs)4217 struct mnt_namespace *copy_mnt_ns(u64 flags, struct mnt_namespace *ns,
4218 struct user_namespace *user_ns, struct fs_struct *new_fs)
4219 {
4220 struct mnt_namespace *new_ns;
4221 struct vfsmount *rootmnt __free(mntput) = NULL;
4222 struct vfsmount *pwdmnt __free(mntput) = NULL;
4223 struct mount *p, *q;
4224 struct mount *old;
4225 struct mount *new;
4226 int copy_flags;
4227
4228 BUG_ON(!ns);
4229
4230 if (likely(!(flags & CLONE_NEWNS))) {
4231 get_mnt_ns(ns);
4232 return ns;
4233 }
4234
4235 old = ns->root;
4236
4237 new_ns = alloc_mnt_ns(user_ns, false);
4238 if (IS_ERR(new_ns))
4239 return new_ns;
4240
4241 guard(namespace_excl)();
4242 /* First pass: copy the tree topology */
4243 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
4244 if (user_ns != ns->user_ns)
4245 copy_flags |= CL_SLAVE;
4246 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
4247 if (IS_ERR(new)) {
4248 emptied_ns = new_ns;
4249 return ERR_CAST(new);
4250 }
4251 if (user_ns != ns->user_ns) {
4252 guard(mount_writer)();
4253 lock_mnt_tree(new);
4254 }
4255 new_ns->root = new;
4256
4257 /*
4258 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
4259 * as belonging to new namespace. We have already acquired a private
4260 * fs_struct, so tsk->fs->lock is not needed.
4261 */
4262 p = old;
4263 q = new;
4264 while (p) {
4265 mnt_add_to_ns(new_ns, q);
4266 new_ns->nr_mounts++;
4267 if (new_fs) {
4268 if (&p->mnt == new_fs->root.mnt) {
4269 new_fs->root.mnt = mntget(&q->mnt);
4270 rootmnt = &p->mnt;
4271 }
4272 if (&p->mnt == new_fs->pwd.mnt) {
4273 new_fs->pwd.mnt = mntget(&q->mnt);
4274 pwdmnt = &p->mnt;
4275 }
4276 }
4277 p = next_mnt(p, old);
4278 q = next_mnt(q, new);
4279 if (!q)
4280 break;
4281 // an mntns binding we'd skipped?
4282 while (p->mnt.mnt_root != q->mnt.mnt_root)
4283 p = next_mnt(skip_mnt_tree(p), old);
4284 }
4285 ns_tree_add_raw(new_ns);
4286 return new_ns;
4287 }
4288
mount_subtree(struct vfsmount * m,const char * name)4289 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
4290 {
4291 struct mount *mnt = real_mount(m);
4292 struct mnt_namespace *ns;
4293 struct super_block *s;
4294 struct path path;
4295 int err;
4296
4297 ns = alloc_mnt_ns(&init_user_ns, true);
4298 if (IS_ERR(ns)) {
4299 mntput(m);
4300 return ERR_CAST(ns);
4301 }
4302 ns->root = mnt;
4303 ns->nr_mounts++;
4304 mnt_add_to_ns(ns, mnt);
4305
4306 err = vfs_path_lookup(m->mnt_root, m,
4307 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
4308
4309 put_mnt_ns(ns);
4310
4311 if (err)
4312 return ERR_PTR(err);
4313
4314 /* trade a vfsmount reference for active sb one */
4315 s = path.mnt->mnt_sb;
4316 atomic_inc(&s->s_active);
4317 mntput(path.mnt);
4318 /* lock the sucker */
4319 down_write(&s->s_umount);
4320 /* ... and return the root of (sub)tree on it */
4321 return path.dentry;
4322 }
4323 EXPORT_SYMBOL(mount_subtree);
4324
SYSCALL_DEFINE5(mount,char __user *,dev_name,char __user *,dir_name,char __user *,type,unsigned long,flags,void __user *,data)4325 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
4326 char __user *, type, unsigned long, flags, void __user *, data)
4327 {
4328 int ret;
4329 char *kernel_type;
4330 char *kernel_dev;
4331 void *options;
4332
4333 kernel_type = copy_mount_string(type);
4334 ret = PTR_ERR(kernel_type);
4335 if (IS_ERR(kernel_type))
4336 goto out_type;
4337
4338 kernel_dev = copy_mount_string(dev_name);
4339 ret = PTR_ERR(kernel_dev);
4340 if (IS_ERR(kernel_dev))
4341 goto out_dev;
4342
4343 options = copy_mount_options(data);
4344 ret = PTR_ERR(options);
4345 if (IS_ERR(options))
4346 goto out_data;
4347
4348 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
4349
4350 kfree(options);
4351 out_data:
4352 kfree(kernel_dev);
4353 out_dev:
4354 kfree(kernel_type);
4355 out_type:
4356 return ret;
4357 }
4358
4359 #define FSMOUNT_VALID_FLAGS \
4360 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
4361 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
4362 MOUNT_ATTR_NOSYMFOLLOW)
4363
4364 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
4365
4366 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
4367 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
4368
attr_flags_to_mnt_flags(u64 attr_flags)4369 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
4370 {
4371 unsigned int mnt_flags = 0;
4372
4373 if (attr_flags & MOUNT_ATTR_RDONLY)
4374 mnt_flags |= MNT_READONLY;
4375 if (attr_flags & MOUNT_ATTR_NOSUID)
4376 mnt_flags |= MNT_NOSUID;
4377 if (attr_flags & MOUNT_ATTR_NODEV)
4378 mnt_flags |= MNT_NODEV;
4379 if (attr_flags & MOUNT_ATTR_NOEXEC)
4380 mnt_flags |= MNT_NOEXEC;
4381 if (attr_flags & MOUNT_ATTR_NODIRATIME)
4382 mnt_flags |= MNT_NODIRATIME;
4383 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
4384 mnt_flags |= MNT_NOSYMFOLLOW;
4385
4386 return mnt_flags;
4387 }
4388
4389 /*
4390 * Create a kernel mount representation for a new, prepared superblock
4391 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
4392 */
SYSCALL_DEFINE3(fsmount,int,fs_fd,unsigned int,flags,unsigned int,attr_flags)4393 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
4394 unsigned int, attr_flags)
4395 {
4396 struct path new_path __free(path_put) = {};
4397 struct mnt_namespace *ns;
4398 struct fs_context *fc;
4399 struct vfsmount *new_mnt;
4400 struct mount *mnt;
4401 unsigned int mnt_flags = 0;
4402 long ret;
4403
4404 if (!may_mount())
4405 return -EPERM;
4406
4407 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
4408 return -EINVAL;
4409
4410 if (attr_flags & ~FSMOUNT_VALID_FLAGS)
4411 return -EINVAL;
4412
4413 mnt_flags = attr_flags_to_mnt_flags(attr_flags);
4414
4415 switch (attr_flags & MOUNT_ATTR__ATIME) {
4416 case MOUNT_ATTR_STRICTATIME:
4417 break;
4418 case MOUNT_ATTR_NOATIME:
4419 mnt_flags |= MNT_NOATIME;
4420 break;
4421 case MOUNT_ATTR_RELATIME:
4422 mnt_flags |= MNT_RELATIME;
4423 break;
4424 default:
4425 return -EINVAL;
4426 }
4427
4428 CLASS(fd, f)(fs_fd);
4429 if (fd_empty(f))
4430 return -EBADF;
4431
4432 if (fd_file(f)->f_op != &fscontext_fops)
4433 return -EINVAL;
4434
4435 fc = fd_file(f)->private_data;
4436
4437 ACQUIRE(mutex_intr, uapi_mutex)(&fc->uapi_mutex);
4438 ret = ACQUIRE_ERR(mutex_intr, &uapi_mutex);
4439 if (ret)
4440 return ret;
4441
4442 /* There must be a valid superblock or we can't mount it */
4443 ret = -EINVAL;
4444 if (!fc->root)
4445 return ret;
4446
4447 ret = -EPERM;
4448 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
4449 errorfcp(fc, "VFS", "Mount too revealing");
4450 return ret;
4451 }
4452
4453 ret = -EBUSY;
4454 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
4455 return ret;
4456
4457 if (fc->sb_flags & SB_MANDLOCK)
4458 warn_mandlock();
4459
4460 new_mnt = vfs_create_mount(fc);
4461 if (IS_ERR(new_mnt))
4462 return PTR_ERR(new_mnt);
4463 new_mnt->mnt_flags = mnt_flags;
4464
4465 new_path.dentry = dget(fc->root);
4466 new_path.mnt = new_mnt;
4467
4468 /* We've done the mount bit - now move the file context into more or
4469 * less the same state as if we'd done an fspick(). We don't want to
4470 * do any memory allocation or anything like that at this point as we
4471 * don't want to have to handle any errors incurred.
4472 */
4473 vfs_clean_context(fc);
4474
4475 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
4476 if (IS_ERR(ns))
4477 return PTR_ERR(ns);
4478 mnt = real_mount(new_path.mnt);
4479 ns->root = mnt;
4480 ns->nr_mounts = 1;
4481 mnt_add_to_ns(ns, mnt);
4482 mntget(new_path.mnt);
4483
4484 FD_PREPARE(fdf, (flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0,
4485 dentry_open(&new_path, O_PATH, fc->cred));
4486 if (fdf.err) {
4487 dissolve_on_fput(new_path.mnt);
4488 return fdf.err;
4489 }
4490
4491 /*
4492 * Attach to an apparent O_PATH fd with a note that we
4493 * need to unmount it, not just simply put it.
4494 */
4495 fd_prepare_file(fdf)->f_mode |= FMODE_NEED_UNMOUNT;
4496 return fd_publish(fdf);
4497 }
4498
vfs_move_mount(const struct path * from_path,const struct path * to_path,enum mnt_tree_flags_t mflags)4499 static inline int vfs_move_mount(const struct path *from_path,
4500 const struct path *to_path,
4501 enum mnt_tree_flags_t mflags)
4502 {
4503 int ret;
4504
4505 ret = security_move_mount(from_path, to_path);
4506 if (ret)
4507 return ret;
4508
4509 if (mflags & MNT_TREE_PROPAGATION)
4510 return do_set_group(from_path, to_path);
4511
4512 return do_move_mount(from_path, to_path, mflags);
4513 }
4514
4515 /*
4516 * Move a mount from one place to another. In combination with
4517 * fsopen()/fsmount() this is used to install a new mount and in combination
4518 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
4519 * a mount subtree.
4520 *
4521 * Note the flags value is a combination of MOVE_MOUNT_* flags.
4522 */
SYSCALL_DEFINE5(move_mount,int,from_dfd,const char __user *,from_pathname,int,to_dfd,const char __user *,to_pathname,unsigned int,flags)4523 SYSCALL_DEFINE5(move_mount,
4524 int, from_dfd, const char __user *, from_pathname,
4525 int, to_dfd, const char __user *, to_pathname,
4526 unsigned int, flags)
4527 {
4528 struct path to_path __free(path_put) = {};
4529 struct path from_path __free(path_put) = {};
4530 unsigned int lflags, uflags;
4531 enum mnt_tree_flags_t mflags = 0;
4532 int ret = 0;
4533
4534 if (!may_mount())
4535 return -EPERM;
4536
4537 if (flags & ~MOVE_MOUNT__MASK)
4538 return -EINVAL;
4539
4540 if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) ==
4541 (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
4542 return -EINVAL;
4543
4544 if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION;
4545 if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH;
4546
4547 uflags = 0;
4548 if (flags & MOVE_MOUNT_T_EMPTY_PATH)
4549 uflags = AT_EMPTY_PATH;
4550
4551 CLASS(filename_maybe_null,to_name)(to_pathname, uflags);
4552 if (!to_name && to_dfd >= 0) {
4553 CLASS(fd_raw, f_to)(to_dfd);
4554 if (fd_empty(f_to))
4555 return -EBADF;
4556
4557 to_path = fd_file(f_to)->f_path;
4558 path_get(&to_path);
4559 } else {
4560 lflags = 0;
4561 if (flags & MOVE_MOUNT_T_SYMLINKS)
4562 lflags |= LOOKUP_FOLLOW;
4563 if (flags & MOVE_MOUNT_T_AUTOMOUNTS)
4564 lflags |= LOOKUP_AUTOMOUNT;
4565 ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL);
4566 if (ret)
4567 return ret;
4568 }
4569
4570 uflags = 0;
4571 if (flags & MOVE_MOUNT_F_EMPTY_PATH)
4572 uflags = AT_EMPTY_PATH;
4573
4574 CLASS(filename_maybe_null,from_name)(from_pathname, uflags);
4575 if (!from_name && from_dfd >= 0) {
4576 CLASS(fd_raw, f_from)(from_dfd);
4577 if (fd_empty(f_from))
4578 return -EBADF;
4579
4580 return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags);
4581 }
4582
4583 lflags = 0;
4584 if (flags & MOVE_MOUNT_F_SYMLINKS)
4585 lflags |= LOOKUP_FOLLOW;
4586 if (flags & MOVE_MOUNT_F_AUTOMOUNTS)
4587 lflags |= LOOKUP_AUTOMOUNT;
4588 ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL);
4589 if (ret)
4590 return ret;
4591
4592 return vfs_move_mount(&from_path, &to_path, mflags);
4593 }
4594
4595 /*
4596 * Return true if path is reachable from root
4597 *
4598 * locks: mount_locked_reader || namespace_shared && is_mounted(mnt)
4599 */
is_path_reachable(struct mount * mnt,struct dentry * dentry,const struct path * root)4600 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
4601 const struct path *root)
4602 {
4603 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
4604 dentry = mnt->mnt_mountpoint;
4605 mnt = mnt->mnt_parent;
4606 }
4607 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
4608 }
4609
path_is_under(const struct path * path1,const struct path * path2)4610 bool path_is_under(const struct path *path1, const struct path *path2)
4611 {
4612 guard(mount_locked_reader)();
4613 return is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
4614 }
4615 EXPORT_SYMBOL(path_is_under);
4616
path_pivot_root(struct path * new,struct path * old)4617 int path_pivot_root(struct path *new, struct path *old)
4618 {
4619 struct path root __free(path_put) = {};
4620 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
4621 int error;
4622
4623 if (!may_mount())
4624 return -EPERM;
4625
4626 error = security_sb_pivotroot(old, new);
4627 if (error)
4628 return error;
4629
4630 get_fs_root(current->fs, &root);
4631
4632 LOCK_MOUNT(old_mp, old);
4633 old_mnt = old_mp.parent;
4634 if (IS_ERR(old_mnt))
4635 return PTR_ERR(old_mnt);
4636
4637 new_mnt = real_mount(new->mnt);
4638 root_mnt = real_mount(root.mnt);
4639 ex_parent = new_mnt->mnt_parent;
4640 root_parent = root_mnt->mnt_parent;
4641 if (IS_MNT_SHARED(old_mnt) ||
4642 IS_MNT_SHARED(ex_parent) ||
4643 IS_MNT_SHARED(root_parent))
4644 return -EINVAL;
4645 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
4646 return -EINVAL;
4647 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
4648 return -EINVAL;
4649 if (d_unlinked(new->dentry))
4650 return -ENOENT;
4651 if (new_mnt == root_mnt || old_mnt == root_mnt)
4652 return -EBUSY; /* loop, on the same file system */
4653 if (!path_mounted(&root))
4654 return -EINVAL; /* not a mountpoint */
4655 if (!mnt_has_parent(root_mnt))
4656 return -EINVAL; /* absolute root */
4657 if (!path_mounted(new))
4658 return -EINVAL; /* not a mountpoint */
4659 if (!mnt_has_parent(new_mnt))
4660 return -EINVAL; /* absolute root */
4661 /* make sure we can reach put_old from new_root */
4662 if (!is_path_reachable(old_mnt, old_mp.mp->m_dentry, new))
4663 return -EINVAL;
4664 /* make certain new is below the root */
4665 if (!is_path_reachable(new_mnt, new->dentry, &root))
4666 return -EINVAL;
4667 lock_mount_hash();
4668 umount_mnt(new_mnt);
4669 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
4670 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
4671 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
4672 }
4673 /* mount new_root on / */
4674 attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp);
4675 umount_mnt(root_mnt);
4676 /* mount old root on put_old */
4677 attach_mnt(root_mnt, old_mnt, old_mp.mp);
4678 touch_mnt_namespace(current->nsproxy->mnt_ns);
4679 /* A moved mount should not expire automatically */
4680 list_del_init(&new_mnt->mnt_expire);
4681 unlock_mount_hash();
4682 mnt_notify_add(root_mnt);
4683 mnt_notify_add(new_mnt);
4684 chroot_fs_refs(&root, new);
4685 return 0;
4686 }
4687
4688 /*
4689 * pivot_root Semantics:
4690 * Moves the root file system of the current process to the directory put_old,
4691 * makes new_root as the new root file system of the current process, and sets
4692 * root/cwd of all processes which had them on the current root to new_root.
4693 *
4694 * Restrictions:
4695 * The new_root and put_old must be directories, and must not be on the
4696 * same file system as the current process root. The put_old must be
4697 * underneath new_root, i.e. adding a non-zero number of /.. to the string
4698 * pointed to by put_old must yield the same directory as new_root. No other
4699 * file system may be mounted on put_old. After all, new_root is a mountpoint.
4700 *
4701 * The immutable nullfs filesystem is mounted as the true root of the VFS
4702 * hierarchy. The mutable rootfs (tmpfs/ramfs) is layered on top of this,
4703 * allowing pivot_root() to work normally from initramfs.
4704 *
4705 * Notes:
4706 * - we don't move root/cwd if they are not at the root (reason: if something
4707 * cared enough to change them, it's probably wrong to force them elsewhere)
4708 * - it's okay to pick a root that isn't the root of a file system, e.g.
4709 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
4710 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
4711 * first.
4712 */
SYSCALL_DEFINE2(pivot_root,const char __user *,new_root,const char __user *,put_old)4713 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
4714 const char __user *, put_old)
4715 {
4716 struct path new __free(path_put) = {};
4717 struct path old __free(path_put) = {};
4718 int error;
4719
4720 error = user_path_at(AT_FDCWD, new_root,
4721 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
4722 if (error)
4723 return error;
4724
4725 error = user_path_at(AT_FDCWD, put_old,
4726 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
4727 if (error)
4728 return error;
4729
4730 return path_pivot_root(&new, &old);
4731 }
4732
recalc_flags(struct mount_kattr * kattr,struct mount * mnt)4733 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
4734 {
4735 unsigned int flags = mnt->mnt.mnt_flags;
4736
4737 /* flags to clear */
4738 flags &= ~kattr->attr_clr;
4739 /* flags to raise */
4740 flags |= kattr->attr_set;
4741
4742 return flags;
4743 }
4744
can_idmap_mount(const struct mount_kattr * kattr,struct mount * mnt)4745 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4746 {
4747 struct vfsmount *m = &mnt->mnt;
4748 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
4749
4750 if (!kattr->mnt_idmap)
4751 return 0;
4752
4753 /*
4754 * Creating an idmapped mount with the filesystem wide idmapping
4755 * doesn't make sense so block that. We don't allow mushy semantics.
4756 */
4757 if (kattr->mnt_userns == m->mnt_sb->s_user_ns)
4758 return -EINVAL;
4759
4760 /*
4761 * We only allow an mount to change it's idmapping if it has
4762 * never been accessible to userspace.
4763 */
4764 if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE) && is_idmapped_mnt(m))
4765 return -EPERM;
4766
4767 /* The underlying filesystem doesn't support idmapped mounts yet. */
4768 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
4769 return -EINVAL;
4770
4771 /* The filesystem has turned off idmapped mounts. */
4772 if (m->mnt_sb->s_iflags & SB_I_NOIDMAP)
4773 return -EINVAL;
4774
4775 /* We're not controlling the superblock. */
4776 if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
4777 return -EPERM;
4778
4779 /* Mount has already been visible in the filesystem hierarchy. */
4780 if (!is_anon_ns(mnt->mnt_ns))
4781 return -EINVAL;
4782
4783 return 0;
4784 }
4785
4786 /**
4787 * mnt_allow_writers() - check whether the attribute change allows writers
4788 * @kattr: the new mount attributes
4789 * @mnt: the mount to which @kattr will be applied
4790 *
4791 * Check whether thew new mount attributes in @kattr allow concurrent writers.
4792 *
4793 * Return: true if writers need to be held, false if not
4794 */
mnt_allow_writers(const struct mount_kattr * kattr,const struct mount * mnt)4795 static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
4796 const struct mount *mnt)
4797 {
4798 return (!(kattr->attr_set & MNT_READONLY) ||
4799 (mnt->mnt.mnt_flags & MNT_READONLY)) &&
4800 !kattr->mnt_idmap;
4801 }
4802
mount_setattr_prepare(struct mount_kattr * kattr,struct mount * mnt)4803 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
4804 {
4805 struct mount *m;
4806 int err;
4807
4808 for (m = mnt; m; m = next_mnt(m, mnt)) {
4809 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
4810 err = -EPERM;
4811 break;
4812 }
4813
4814 err = can_idmap_mount(kattr, m);
4815 if (err)
4816 break;
4817
4818 if (!mnt_allow_writers(kattr, m)) {
4819 err = mnt_hold_writers(m);
4820 if (err) {
4821 m = next_mnt(m, mnt);
4822 break;
4823 }
4824 }
4825
4826 if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
4827 return 0;
4828 }
4829
4830 if (err) {
4831 /* undo all mnt_hold_writers() we'd done */
4832 for (struct mount *p = mnt; p != m; p = next_mnt(p, mnt))
4833 mnt_unhold_writers(p);
4834 }
4835 return err;
4836 }
4837
do_idmap_mount(const struct mount_kattr * kattr,struct mount * mnt)4838 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4839 {
4840 struct mnt_idmap *old_idmap;
4841
4842 if (!kattr->mnt_idmap)
4843 return;
4844
4845 old_idmap = mnt_idmap(&mnt->mnt);
4846
4847 /* Pairs with smp_load_acquire() in mnt_idmap(). */
4848 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
4849 mnt_idmap_put(old_idmap);
4850 }
4851
mount_setattr_commit(struct mount_kattr * kattr,struct mount * mnt)4852 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
4853 {
4854 struct mount *m;
4855
4856 for (m = mnt; m; m = next_mnt(m, mnt)) {
4857 unsigned int flags;
4858
4859 do_idmap_mount(kattr, m);
4860 flags = recalc_flags(kattr, m);
4861 WRITE_ONCE(m->mnt.mnt_flags, flags);
4862
4863 /* If we had to hold writers unblock them. */
4864 mnt_unhold_writers(m);
4865
4866 if (kattr->propagation)
4867 change_mnt_propagation(m, kattr->propagation);
4868 if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
4869 break;
4870 }
4871 touch_mnt_namespace(mnt->mnt_ns);
4872 }
4873
do_mount_setattr(const struct path * path,struct mount_kattr * kattr)4874 static int do_mount_setattr(const struct path *path, struct mount_kattr *kattr)
4875 {
4876 struct mount *mnt = real_mount(path->mnt);
4877 int err = 0;
4878
4879 if (!path_mounted(path))
4880 return -EINVAL;
4881
4882 if (kattr->mnt_userns) {
4883 struct mnt_idmap *mnt_idmap;
4884
4885 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
4886 if (IS_ERR(mnt_idmap))
4887 return PTR_ERR(mnt_idmap);
4888 kattr->mnt_idmap = mnt_idmap;
4889 }
4890
4891 if (kattr->propagation) {
4892 /*
4893 * Only take namespace_lock() if we're actually changing
4894 * propagation.
4895 */
4896 namespace_lock();
4897 if (kattr->propagation == MS_SHARED) {
4898 err = invent_group_ids(mnt, kattr->kflags & MOUNT_KATTR_RECURSE);
4899 if (err) {
4900 namespace_unlock();
4901 return err;
4902 }
4903 }
4904 }
4905
4906 err = -EINVAL;
4907 lock_mount_hash();
4908
4909 if (!anon_ns_root(mnt) && !check_mnt(mnt))
4910 goto out;
4911
4912 /*
4913 * First, we get the mount tree in a shape where we can change mount
4914 * properties without failure. If we succeeded to do so we commit all
4915 * changes and if we failed we clean up.
4916 */
4917 err = mount_setattr_prepare(kattr, mnt);
4918 if (!err)
4919 mount_setattr_commit(kattr, mnt);
4920
4921 out:
4922 unlock_mount_hash();
4923
4924 if (kattr->propagation) {
4925 if (err)
4926 cleanup_group_ids(mnt, NULL);
4927 namespace_unlock();
4928 }
4929
4930 return err;
4931 }
4932
build_mount_idmapped(const struct mount_attr * attr,size_t usize,struct mount_kattr * kattr)4933 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4934 struct mount_kattr *kattr)
4935 {
4936 struct ns_common *ns;
4937 struct user_namespace *mnt_userns;
4938
4939 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
4940 return 0;
4941
4942 if (attr->attr_clr & MOUNT_ATTR_IDMAP) {
4943 /*
4944 * We can only remove an idmapping if it's never been
4945 * exposed to userspace.
4946 */
4947 if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE))
4948 return -EINVAL;
4949
4950 /*
4951 * Removal of idmappings is equivalent to setting
4952 * nop_mnt_idmap.
4953 */
4954 if (!(attr->attr_set & MOUNT_ATTR_IDMAP)) {
4955 kattr->mnt_idmap = &nop_mnt_idmap;
4956 return 0;
4957 }
4958 }
4959
4960 if (attr->userns_fd > INT_MAX)
4961 return -EINVAL;
4962
4963 CLASS(fd, f)(attr->userns_fd);
4964 if (fd_empty(f))
4965 return -EBADF;
4966
4967 if (!proc_ns_file(fd_file(f)))
4968 return -EINVAL;
4969
4970 ns = get_proc_ns(file_inode(fd_file(f)));
4971 if (ns->ns_type != CLONE_NEWUSER)
4972 return -EINVAL;
4973
4974 /*
4975 * The initial idmapping cannot be used to create an idmapped
4976 * mount. We use the initial idmapping as an indicator of a mount
4977 * that is not idmapped. It can simply be passed into helpers that
4978 * are aware of idmapped mounts as a convenient shortcut. A user
4979 * can just create a dedicated identity mapping to achieve the same
4980 * result.
4981 */
4982 mnt_userns = container_of(ns, struct user_namespace, ns);
4983 if (mnt_userns == &init_user_ns)
4984 return -EPERM;
4985
4986 /* We're not controlling the target namespace. */
4987 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN))
4988 return -EPERM;
4989
4990 kattr->mnt_userns = get_user_ns(mnt_userns);
4991 return 0;
4992 }
4993
build_mount_kattr(const struct mount_attr * attr,size_t usize,struct mount_kattr * kattr)4994 static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
4995 struct mount_kattr *kattr)
4996 {
4997 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
4998 return -EINVAL;
4999 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
5000 return -EINVAL;
5001 kattr->propagation = attr->propagation;
5002
5003 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
5004 return -EINVAL;
5005
5006 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
5007 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
5008
5009 /*
5010 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
5011 * users wanting to transition to a different atime setting cannot
5012 * simply specify the atime setting in @attr_set, but must also
5013 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
5014 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
5015 * @attr_clr and that @attr_set can't have any atime bits set if
5016 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
5017 */
5018 if (attr->attr_clr & MOUNT_ATTR__ATIME) {
5019 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
5020 return -EINVAL;
5021
5022 /*
5023 * Clear all previous time settings as they are mutually
5024 * exclusive.
5025 */
5026 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
5027 switch (attr->attr_set & MOUNT_ATTR__ATIME) {
5028 case MOUNT_ATTR_RELATIME:
5029 kattr->attr_set |= MNT_RELATIME;
5030 break;
5031 case MOUNT_ATTR_NOATIME:
5032 kattr->attr_set |= MNT_NOATIME;
5033 break;
5034 case MOUNT_ATTR_STRICTATIME:
5035 break;
5036 default:
5037 return -EINVAL;
5038 }
5039 } else {
5040 if (attr->attr_set & MOUNT_ATTR__ATIME)
5041 return -EINVAL;
5042 }
5043
5044 return build_mount_idmapped(attr, usize, kattr);
5045 }
5046
finish_mount_kattr(struct mount_kattr * kattr)5047 static void finish_mount_kattr(struct mount_kattr *kattr)
5048 {
5049 if (kattr->mnt_userns) {
5050 put_user_ns(kattr->mnt_userns);
5051 kattr->mnt_userns = NULL;
5052 }
5053
5054 if (kattr->mnt_idmap)
5055 mnt_idmap_put(kattr->mnt_idmap);
5056 }
5057
wants_mount_setattr(struct mount_attr __user * uattr,size_t usize,struct mount_kattr * kattr)5058 static int wants_mount_setattr(struct mount_attr __user *uattr, size_t usize,
5059 struct mount_kattr *kattr)
5060 {
5061 int ret;
5062 struct mount_attr attr;
5063
5064 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
5065
5066 if (unlikely(usize > PAGE_SIZE))
5067 return -E2BIG;
5068 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
5069 return -EINVAL;
5070
5071 if (!may_mount())
5072 return -EPERM;
5073
5074 ret = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
5075 if (ret)
5076 return ret;
5077
5078 /* Don't bother walking through the mounts if this is a nop. */
5079 if (attr.attr_set == 0 &&
5080 attr.attr_clr == 0 &&
5081 attr.propagation == 0)
5082 return 0; /* Tell caller to not bother. */
5083
5084 ret = build_mount_kattr(&attr, usize, kattr);
5085 if (ret < 0)
5086 return ret;
5087
5088 return 1;
5089 }
5090
SYSCALL_DEFINE5(mount_setattr,int,dfd,const char __user *,path,unsigned int,flags,struct mount_attr __user *,uattr,size_t,usize)5091 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
5092 unsigned int, flags, struct mount_attr __user *, uattr,
5093 size_t, usize)
5094 {
5095 int err;
5096 struct path target;
5097 struct mount_kattr kattr;
5098 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
5099
5100 if (flags & ~(AT_EMPTY_PATH |
5101 AT_RECURSIVE |
5102 AT_SYMLINK_NOFOLLOW |
5103 AT_NO_AUTOMOUNT))
5104 return -EINVAL;
5105
5106 if (flags & AT_NO_AUTOMOUNT)
5107 lookup_flags &= ~LOOKUP_AUTOMOUNT;
5108 if (flags & AT_SYMLINK_NOFOLLOW)
5109 lookup_flags &= ~LOOKUP_FOLLOW;
5110
5111 kattr = (struct mount_kattr) {
5112 .lookup_flags = lookup_flags,
5113 };
5114
5115 if (flags & AT_RECURSIVE)
5116 kattr.kflags |= MOUNT_KATTR_RECURSE;
5117
5118 err = wants_mount_setattr(uattr, usize, &kattr);
5119 if (err <= 0)
5120 return err;
5121
5122 CLASS(filename_uflags, name)(path, flags);
5123 err = filename_lookup(dfd, name, kattr.lookup_flags, &target, NULL);
5124 if (!err) {
5125 err = do_mount_setattr(&target, &kattr);
5126 path_put(&target);
5127 }
5128 finish_mount_kattr(&kattr);
5129 return err;
5130 }
5131
SYSCALL_DEFINE5(open_tree_attr,int,dfd,const char __user *,filename,unsigned,flags,struct mount_attr __user *,uattr,size_t,usize)5132 SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename,
5133 unsigned, flags, struct mount_attr __user *, uattr,
5134 size_t, usize)
5135 {
5136 if (!uattr && usize)
5137 return -EINVAL;
5138
5139 FD_PREPARE(fdf, flags, vfs_open_tree(dfd, filename, flags));
5140 if (fdf.err)
5141 return fdf.err;
5142
5143 if (uattr) {
5144 struct mount_kattr kattr = {};
5145 struct file *file = fd_prepare_file(fdf);
5146 int ret;
5147
5148 if (flags & OPEN_TREE_CLONE)
5149 kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE;
5150 if (flags & AT_RECURSIVE)
5151 kattr.kflags |= MOUNT_KATTR_RECURSE;
5152
5153 ret = wants_mount_setattr(uattr, usize, &kattr);
5154 if (ret > 0) {
5155 ret = do_mount_setattr(&file->f_path, &kattr);
5156 finish_mount_kattr(&kattr);
5157 }
5158 if (ret)
5159 return ret;
5160 }
5161
5162 return fd_publish(fdf);
5163 }
5164
show_path(struct seq_file * m,struct dentry * root)5165 int show_path(struct seq_file *m, struct dentry *root)
5166 {
5167 if (root->d_sb->s_op->show_path)
5168 return root->d_sb->s_op->show_path(m, root);
5169
5170 seq_dentry(m, root, " \t\n\\");
5171 return 0;
5172 }
5173
lookup_mnt_in_ns(u64 id,struct mnt_namespace * ns)5174 static struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
5175 {
5176 struct mount *mnt = mnt_find_id_at(ns, id);
5177
5178 if (!mnt || mnt->mnt_id_unique != id)
5179 return NULL;
5180
5181 return &mnt->mnt;
5182 }
5183
5184 struct kstatmount {
5185 struct statmount __user *buf;
5186 size_t bufsize;
5187 struct vfsmount *mnt;
5188 struct mnt_idmap *idmap;
5189 u64 mask;
5190 struct path root;
5191 struct seq_file seq;
5192
5193 /* Must be last --ends in a flexible-array member. */
5194 struct statmount sm;
5195 };
5196
mnt_to_attr_flags(struct vfsmount * mnt)5197 static u64 mnt_to_attr_flags(struct vfsmount *mnt)
5198 {
5199 unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags);
5200 u64 attr_flags = 0;
5201
5202 if (mnt_flags & MNT_READONLY)
5203 attr_flags |= MOUNT_ATTR_RDONLY;
5204 if (mnt_flags & MNT_NOSUID)
5205 attr_flags |= MOUNT_ATTR_NOSUID;
5206 if (mnt_flags & MNT_NODEV)
5207 attr_flags |= MOUNT_ATTR_NODEV;
5208 if (mnt_flags & MNT_NOEXEC)
5209 attr_flags |= MOUNT_ATTR_NOEXEC;
5210 if (mnt_flags & MNT_NODIRATIME)
5211 attr_flags |= MOUNT_ATTR_NODIRATIME;
5212 if (mnt_flags & MNT_NOSYMFOLLOW)
5213 attr_flags |= MOUNT_ATTR_NOSYMFOLLOW;
5214
5215 if (mnt_flags & MNT_NOATIME)
5216 attr_flags |= MOUNT_ATTR_NOATIME;
5217 else if (mnt_flags & MNT_RELATIME)
5218 attr_flags |= MOUNT_ATTR_RELATIME;
5219 else
5220 attr_flags |= MOUNT_ATTR_STRICTATIME;
5221
5222 if (is_idmapped_mnt(mnt))
5223 attr_flags |= MOUNT_ATTR_IDMAP;
5224
5225 return attr_flags;
5226 }
5227
mnt_to_propagation_flags(struct mount * m)5228 static u64 mnt_to_propagation_flags(struct mount *m)
5229 {
5230 u64 propagation = 0;
5231
5232 if (IS_MNT_SHARED(m))
5233 propagation |= MS_SHARED;
5234 if (IS_MNT_SLAVE(m))
5235 propagation |= MS_SLAVE;
5236 if (IS_MNT_UNBINDABLE(m))
5237 propagation |= MS_UNBINDABLE;
5238 if (!propagation)
5239 propagation |= MS_PRIVATE;
5240
5241 return propagation;
5242 }
5243
vfsmount_to_propagation_flags(struct vfsmount * mnt)5244 u64 vfsmount_to_propagation_flags(struct vfsmount *mnt)
5245 {
5246 return mnt_to_propagation_flags(real_mount(mnt));
5247 }
5248 EXPORT_SYMBOL_GPL(vfsmount_to_propagation_flags);
5249
statmount_sb_basic(struct kstatmount * s)5250 static void statmount_sb_basic(struct kstatmount *s)
5251 {
5252 struct super_block *sb = s->mnt->mnt_sb;
5253
5254 s->sm.mask |= STATMOUNT_SB_BASIC;
5255 s->sm.sb_dev_major = MAJOR(sb->s_dev);
5256 s->sm.sb_dev_minor = MINOR(sb->s_dev);
5257 s->sm.sb_magic = sb->s_magic;
5258 s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME);
5259 }
5260
statmount_mnt_basic(struct kstatmount * s)5261 static void statmount_mnt_basic(struct kstatmount *s)
5262 {
5263 struct mount *m = real_mount(s->mnt);
5264
5265 s->sm.mask |= STATMOUNT_MNT_BASIC;
5266 s->sm.mnt_id = m->mnt_id_unique;
5267 s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique;
5268 s->sm.mnt_id_old = m->mnt_id;
5269 s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
5270 s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
5271 s->sm.mnt_propagation = mnt_to_propagation_flags(m);
5272 s->sm.mnt_peer_group = m->mnt_group_id;
5273 s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
5274 }
5275
statmount_propagate_from(struct kstatmount * s)5276 static void statmount_propagate_from(struct kstatmount *s)
5277 {
5278 struct mount *m = real_mount(s->mnt);
5279
5280 s->sm.mask |= STATMOUNT_PROPAGATE_FROM;
5281 if (IS_MNT_SLAVE(m))
5282 s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root);
5283 }
5284
statmount_mnt_root(struct kstatmount * s,struct seq_file * seq)5285 static int statmount_mnt_root(struct kstatmount *s, struct seq_file *seq)
5286 {
5287 int ret;
5288 size_t start = seq->count;
5289
5290 ret = show_path(seq, s->mnt->mnt_root);
5291 if (ret)
5292 return ret;
5293
5294 if (unlikely(seq_has_overflowed(seq)))
5295 return -EAGAIN;
5296
5297 /*
5298 * Unescape the result. It would be better if supplied string was not
5299 * escaped in the first place, but that's a pretty invasive change.
5300 */
5301 seq->buf[seq->count] = '\0';
5302 seq->count = start;
5303 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
5304 return 0;
5305 }
5306
statmount_mnt_point(struct kstatmount * s,struct seq_file * seq)5307 static int statmount_mnt_point(struct kstatmount *s, struct seq_file *seq)
5308 {
5309 struct vfsmount *mnt = s->mnt;
5310 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
5311 int err;
5312
5313 err = seq_path_root(seq, &mnt_path, &s->root, "");
5314 return err == SEQ_SKIP ? 0 : err;
5315 }
5316
statmount_fs_type(struct kstatmount * s,struct seq_file * seq)5317 static int statmount_fs_type(struct kstatmount *s, struct seq_file *seq)
5318 {
5319 struct super_block *sb = s->mnt->mnt_sb;
5320
5321 seq_puts(seq, sb->s_type->name);
5322 return 0;
5323 }
5324
statmount_fs_subtype(struct kstatmount * s,struct seq_file * seq)5325 static void statmount_fs_subtype(struct kstatmount *s, struct seq_file *seq)
5326 {
5327 struct super_block *sb = s->mnt->mnt_sb;
5328
5329 if (sb->s_subtype)
5330 seq_puts(seq, sb->s_subtype);
5331 }
5332
statmount_sb_source(struct kstatmount * s,struct seq_file * seq)5333 static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq)
5334 {
5335 struct super_block *sb = s->mnt->mnt_sb;
5336 struct mount *r = real_mount(s->mnt);
5337
5338 if (sb->s_op->show_devname) {
5339 size_t start = seq->count;
5340 int ret;
5341
5342 ret = sb->s_op->show_devname(seq, s->mnt->mnt_root);
5343 if (ret)
5344 return ret;
5345
5346 if (unlikely(seq_has_overflowed(seq)))
5347 return -EAGAIN;
5348
5349 /* Unescape the result */
5350 seq->buf[seq->count] = '\0';
5351 seq->count = start;
5352 seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL));
5353 } else {
5354 seq_puts(seq, r->mnt_devname);
5355 }
5356 return 0;
5357 }
5358
statmount_mnt_ns_id(struct kstatmount * s,struct mnt_namespace * ns)5359 static void statmount_mnt_ns_id(struct kstatmount *s, struct mnt_namespace *ns)
5360 {
5361 s->sm.mask |= STATMOUNT_MNT_NS_ID;
5362 s->sm.mnt_ns_id = ns->ns.ns_id;
5363 }
5364
statmount_mnt_opts(struct kstatmount * s,struct seq_file * seq)5365 static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
5366 {
5367 struct vfsmount *mnt = s->mnt;
5368 struct super_block *sb = mnt->mnt_sb;
5369 size_t start = seq->count;
5370 int err;
5371
5372 err = security_sb_show_options(seq, sb);
5373 if (err)
5374 return err;
5375
5376 if (sb->s_op->show_options) {
5377 err = sb->s_op->show_options(seq, mnt->mnt_root);
5378 if (err)
5379 return err;
5380 }
5381
5382 if (unlikely(seq_has_overflowed(seq)))
5383 return -EAGAIN;
5384
5385 if (seq->count == start)
5386 return 0;
5387
5388 /* skip leading comma */
5389 memmove(seq->buf + start, seq->buf + start + 1,
5390 seq->count - start - 1);
5391 seq->count--;
5392
5393 return 0;
5394 }
5395
statmount_opt_process(struct seq_file * seq,size_t start)5396 static inline int statmount_opt_process(struct seq_file *seq, size_t start)
5397 {
5398 char *buf_end, *opt_end, *src, *dst;
5399 int count = 0;
5400
5401 if (unlikely(seq_has_overflowed(seq)))
5402 return -EAGAIN;
5403
5404 buf_end = seq->buf + seq->count;
5405 dst = seq->buf + start;
5406 src = dst + 1; /* skip initial comma */
5407
5408 if (src >= buf_end) {
5409 seq->count = start;
5410 return 0;
5411 }
5412
5413 *buf_end = '\0';
5414 for (; src < buf_end; src = opt_end + 1) {
5415 opt_end = strchrnul(src, ',');
5416 *opt_end = '\0';
5417 dst += string_unescape(src, dst, 0, UNESCAPE_OCTAL) + 1;
5418 if (WARN_ON_ONCE(++count == INT_MAX))
5419 return -EOVERFLOW;
5420 }
5421 seq->count = dst - 1 - seq->buf;
5422 return count;
5423 }
5424
statmount_opt_array(struct kstatmount * s,struct seq_file * seq)5425 static int statmount_opt_array(struct kstatmount *s, struct seq_file *seq)
5426 {
5427 struct vfsmount *mnt = s->mnt;
5428 struct super_block *sb = mnt->mnt_sb;
5429 size_t start = seq->count;
5430 int err;
5431
5432 if (!sb->s_op->show_options)
5433 return 0;
5434
5435 err = sb->s_op->show_options(seq, mnt->mnt_root);
5436 if (err)
5437 return err;
5438
5439 err = statmount_opt_process(seq, start);
5440 if (err < 0)
5441 return err;
5442
5443 s->sm.opt_num = err;
5444 return 0;
5445 }
5446
statmount_opt_sec_array(struct kstatmount * s,struct seq_file * seq)5447 static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq)
5448 {
5449 struct vfsmount *mnt = s->mnt;
5450 struct super_block *sb = mnt->mnt_sb;
5451 size_t start = seq->count;
5452 int err;
5453
5454 err = security_sb_show_options(seq, sb);
5455 if (err)
5456 return err;
5457
5458 err = statmount_opt_process(seq, start);
5459 if (err < 0)
5460 return err;
5461
5462 s->sm.opt_sec_num = err;
5463 return 0;
5464 }
5465
statmount_mnt_uidmap(struct kstatmount * s,struct seq_file * seq)5466 static inline int statmount_mnt_uidmap(struct kstatmount *s, struct seq_file *seq)
5467 {
5468 int ret;
5469
5470 ret = statmount_mnt_idmap(s->idmap, seq, true);
5471 if (ret < 0)
5472 return ret;
5473
5474 s->sm.mnt_uidmap_num = ret;
5475 /*
5476 * Always raise STATMOUNT_MNT_UIDMAP even if there are no valid
5477 * mappings. This allows userspace to distinguish between a
5478 * non-idmapped mount and an idmapped mount where none of the
5479 * individual mappings are valid in the caller's idmapping.
5480 */
5481 if (is_valid_mnt_idmap(s->idmap))
5482 s->sm.mask |= STATMOUNT_MNT_UIDMAP;
5483 return 0;
5484 }
5485
statmount_mnt_gidmap(struct kstatmount * s,struct seq_file * seq)5486 static inline int statmount_mnt_gidmap(struct kstatmount *s, struct seq_file *seq)
5487 {
5488 int ret;
5489
5490 ret = statmount_mnt_idmap(s->idmap, seq, false);
5491 if (ret < 0)
5492 return ret;
5493
5494 s->sm.mnt_gidmap_num = ret;
5495 /*
5496 * Always raise STATMOUNT_MNT_GIDMAP even if there are no valid
5497 * mappings. This allows userspace to distinguish between a
5498 * non-idmapped mount and an idmapped mount where none of the
5499 * individual mappings are valid in the caller's idmapping.
5500 */
5501 if (is_valid_mnt_idmap(s->idmap))
5502 s->sm.mask |= STATMOUNT_MNT_GIDMAP;
5503 return 0;
5504 }
5505
statmount_string(struct kstatmount * s,u64 flag)5506 static int statmount_string(struct kstatmount *s, u64 flag)
5507 {
5508 int ret = 0;
5509 size_t kbufsize;
5510 struct seq_file *seq = &s->seq;
5511 struct statmount *sm = &s->sm;
5512 u32 start, *offp;
5513
5514 /* Reserve an empty string at the beginning for any unset offsets */
5515 if (!seq->count)
5516 seq_putc(seq, 0);
5517
5518 start = seq->count;
5519
5520 switch (flag) {
5521 case STATMOUNT_FS_TYPE:
5522 offp = &sm->fs_type;
5523 ret = statmount_fs_type(s, seq);
5524 break;
5525 case STATMOUNT_MNT_ROOT:
5526 offp = &sm->mnt_root;
5527 ret = statmount_mnt_root(s, seq);
5528 break;
5529 case STATMOUNT_MNT_POINT:
5530 offp = &sm->mnt_point;
5531 ret = statmount_mnt_point(s, seq);
5532 break;
5533 case STATMOUNT_MNT_OPTS:
5534 offp = &sm->mnt_opts;
5535 ret = statmount_mnt_opts(s, seq);
5536 break;
5537 case STATMOUNT_OPT_ARRAY:
5538 offp = &sm->opt_array;
5539 ret = statmount_opt_array(s, seq);
5540 break;
5541 case STATMOUNT_OPT_SEC_ARRAY:
5542 offp = &sm->opt_sec_array;
5543 ret = statmount_opt_sec_array(s, seq);
5544 break;
5545 case STATMOUNT_FS_SUBTYPE:
5546 offp = &sm->fs_subtype;
5547 statmount_fs_subtype(s, seq);
5548 break;
5549 case STATMOUNT_SB_SOURCE:
5550 offp = &sm->sb_source;
5551 ret = statmount_sb_source(s, seq);
5552 break;
5553 case STATMOUNT_MNT_UIDMAP:
5554 offp = &sm->mnt_uidmap;
5555 ret = statmount_mnt_uidmap(s, seq);
5556 break;
5557 case STATMOUNT_MNT_GIDMAP:
5558 offp = &sm->mnt_gidmap;
5559 ret = statmount_mnt_gidmap(s, seq);
5560 break;
5561 default:
5562 WARN_ON_ONCE(true);
5563 return -EINVAL;
5564 }
5565
5566 /*
5567 * If nothing was emitted, return to avoid setting the flag
5568 * and terminating the buffer.
5569 */
5570 if (seq->count == start)
5571 return ret;
5572 if (unlikely(check_add_overflow(sizeof(*sm), seq->count, &kbufsize)))
5573 return -EOVERFLOW;
5574 if (kbufsize >= s->bufsize)
5575 return -EOVERFLOW;
5576
5577 /* signal a retry */
5578 if (unlikely(seq_has_overflowed(seq)))
5579 return -EAGAIN;
5580
5581 if (ret)
5582 return ret;
5583
5584 seq->buf[seq->count++] = '\0';
5585 sm->mask |= flag;
5586 *offp = start;
5587 return 0;
5588 }
5589
copy_statmount_to_user(struct kstatmount * s)5590 static int copy_statmount_to_user(struct kstatmount *s)
5591 {
5592 struct statmount *sm = &s->sm;
5593 struct seq_file *seq = &s->seq;
5594 char __user *str = ((char __user *)s->buf) + sizeof(*sm);
5595 size_t copysize = min_t(size_t, s->bufsize, sizeof(*sm));
5596
5597 if (seq->count && copy_to_user(str, seq->buf, seq->count))
5598 return -EFAULT;
5599
5600 /* Return the number of bytes copied to the buffer */
5601 sm->size = copysize + seq->count;
5602 if (copy_to_user(s->buf, sm, copysize))
5603 return -EFAULT;
5604
5605 return 0;
5606 }
5607
listmnt_next(struct mount * curr,bool reverse)5608 static struct mount *listmnt_next(struct mount *curr, bool reverse)
5609 {
5610 struct rb_node *node;
5611
5612 if (reverse)
5613 node = rb_prev(&curr->mnt_node);
5614 else
5615 node = rb_next(&curr->mnt_node);
5616
5617 return node_to_mount(node);
5618 }
5619
grab_requested_root(struct mnt_namespace * ns,struct path * root)5620 static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
5621 {
5622 struct mount *first, *child;
5623
5624 rwsem_assert_held(&namespace_sem);
5625
5626 /* We're looking at our own ns, just use get_fs_root. */
5627 if (ns == current->nsproxy->mnt_ns) {
5628 get_fs_root(current->fs, root);
5629 return 0;
5630 }
5631
5632 /*
5633 * We have to find the first mount in our ns and use that, however it
5634 * may not exist, so handle that properly.
5635 */
5636 if (mnt_ns_empty(ns))
5637 return -ENOENT;
5638
5639 first = child = ns->root;
5640 for (;;) {
5641 child = listmnt_next(child, false);
5642 if (!child)
5643 return -ENOENT;
5644 if (child->mnt_parent == first)
5645 break;
5646 }
5647
5648 root->mnt = mntget(&child->mnt);
5649 root->dentry = dget(root->mnt->mnt_root);
5650 return 0;
5651 }
5652
5653 /* This must be updated whenever a new flag is added */
5654 #define STATMOUNT_SUPPORTED (STATMOUNT_SB_BASIC | \
5655 STATMOUNT_MNT_BASIC | \
5656 STATMOUNT_PROPAGATE_FROM | \
5657 STATMOUNT_MNT_ROOT | \
5658 STATMOUNT_MNT_POINT | \
5659 STATMOUNT_FS_TYPE | \
5660 STATMOUNT_MNT_NS_ID | \
5661 STATMOUNT_MNT_OPTS | \
5662 STATMOUNT_FS_SUBTYPE | \
5663 STATMOUNT_SB_SOURCE | \
5664 STATMOUNT_OPT_ARRAY | \
5665 STATMOUNT_OPT_SEC_ARRAY | \
5666 STATMOUNT_SUPPORTED_MASK | \
5667 STATMOUNT_MNT_UIDMAP | \
5668 STATMOUNT_MNT_GIDMAP)
5669
5670 /* locks: namespace_shared */
do_statmount(struct kstatmount * s,u64 mnt_id,u64 mnt_ns_id,struct file * mnt_file,struct mnt_namespace * ns)5671 static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
5672 struct file *mnt_file, struct mnt_namespace *ns)
5673 {
5674 int err;
5675
5676 if (mnt_file) {
5677 WARN_ON_ONCE(ns != NULL);
5678
5679 s->mnt = mnt_file->f_path.mnt;
5680 ns = real_mount(s->mnt)->mnt_ns;
5681 if (!ns)
5682 /*
5683 * We can't set mount point and mnt_ns_id since we don't have a
5684 * ns for the mount. This can happen if the mount is unmounted
5685 * with MNT_DETACH.
5686 */
5687 s->mask &= ~(STATMOUNT_MNT_POINT | STATMOUNT_MNT_NS_ID);
5688 } else {
5689 /* Has the namespace already been emptied? */
5690 if (mnt_ns_id && mnt_ns_empty(ns))
5691 return -ENOENT;
5692
5693 s->mnt = lookup_mnt_in_ns(mnt_id, ns);
5694 if (!s->mnt)
5695 return -ENOENT;
5696 }
5697
5698 if (ns) {
5699 err = grab_requested_root(ns, &s->root);
5700 if (err)
5701 return err;
5702
5703 if (!mnt_file) {
5704 struct mount *m;
5705 /*
5706 * Don't trigger audit denials. We just want to determine what
5707 * mounts to show users.
5708 */
5709 m = real_mount(s->mnt);
5710 if (!is_path_reachable(m, m->mnt.mnt_root, &s->root) &&
5711 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
5712 return -EPERM;
5713 }
5714 }
5715
5716 err = security_sb_statfs(s->mnt->mnt_root);
5717 if (err)
5718 return err;
5719
5720 /*
5721 * Note that mount properties in mnt->mnt_flags, mnt->mnt_idmap
5722 * can change concurrently as we only hold the read-side of the
5723 * namespace semaphore and mount properties may change with only
5724 * the mount lock held.
5725 *
5726 * We could sample the mount lock sequence counter to detect
5727 * those changes and retry. But it's not worth it. Worst that
5728 * happens is that the mnt->mnt_idmap pointer is already changed
5729 * while mnt->mnt_flags isn't or vica versa. So what.
5730 *
5731 * Both mnt->mnt_flags and mnt->mnt_idmap are set and retrieved
5732 * via READ_ONCE()/WRITE_ONCE() and guard against theoretical
5733 * torn read/write. That's all we care about right now.
5734 */
5735 s->idmap = mnt_idmap(s->mnt);
5736 if (s->mask & STATMOUNT_MNT_BASIC)
5737 statmount_mnt_basic(s);
5738
5739 if (s->mask & STATMOUNT_SB_BASIC)
5740 statmount_sb_basic(s);
5741
5742 if (s->mask & STATMOUNT_PROPAGATE_FROM)
5743 statmount_propagate_from(s);
5744
5745 if (s->mask & STATMOUNT_FS_TYPE)
5746 err = statmount_string(s, STATMOUNT_FS_TYPE);
5747
5748 if (!err && s->mask & STATMOUNT_MNT_ROOT)
5749 err = statmount_string(s, STATMOUNT_MNT_ROOT);
5750
5751 if (!err && s->mask & STATMOUNT_MNT_POINT)
5752 err = statmount_string(s, STATMOUNT_MNT_POINT);
5753
5754 if (!err && s->mask & STATMOUNT_MNT_OPTS)
5755 err = statmount_string(s, STATMOUNT_MNT_OPTS);
5756
5757 if (!err && s->mask & STATMOUNT_OPT_ARRAY)
5758 err = statmount_string(s, STATMOUNT_OPT_ARRAY);
5759
5760 if (!err && s->mask & STATMOUNT_OPT_SEC_ARRAY)
5761 err = statmount_string(s, STATMOUNT_OPT_SEC_ARRAY);
5762
5763 if (!err && s->mask & STATMOUNT_FS_SUBTYPE)
5764 err = statmount_string(s, STATMOUNT_FS_SUBTYPE);
5765
5766 if (!err && s->mask & STATMOUNT_SB_SOURCE)
5767 err = statmount_string(s, STATMOUNT_SB_SOURCE);
5768
5769 if (!err && s->mask & STATMOUNT_MNT_UIDMAP)
5770 err = statmount_string(s, STATMOUNT_MNT_UIDMAP);
5771
5772 if (!err && s->mask & STATMOUNT_MNT_GIDMAP)
5773 err = statmount_string(s, STATMOUNT_MNT_GIDMAP);
5774
5775 if (!err && s->mask & STATMOUNT_MNT_NS_ID)
5776 statmount_mnt_ns_id(s, ns);
5777
5778 if (!err && s->mask & STATMOUNT_SUPPORTED_MASK) {
5779 s->sm.mask |= STATMOUNT_SUPPORTED_MASK;
5780 s->sm.supported_mask = STATMOUNT_SUPPORTED;
5781 }
5782
5783 if (err)
5784 return err;
5785
5786 /* Are there bits in the return mask not present in STATMOUNT_SUPPORTED? */
5787 WARN_ON_ONCE(~STATMOUNT_SUPPORTED & s->sm.mask);
5788
5789 return 0;
5790 }
5791
retry_statmount(const long ret,size_t * seq_size)5792 static inline bool retry_statmount(const long ret, size_t *seq_size)
5793 {
5794 if (likely(ret != -EAGAIN))
5795 return false;
5796 if (unlikely(check_mul_overflow(*seq_size, 2, seq_size)))
5797 return false;
5798 if (unlikely(*seq_size > MAX_RW_COUNT))
5799 return false;
5800 return true;
5801 }
5802
5803 #define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
5804 STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \
5805 STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \
5806 STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY | \
5807 STATMOUNT_MNT_UIDMAP | STATMOUNT_MNT_GIDMAP)
5808
prepare_kstatmount(struct kstatmount * ks,struct mnt_id_req * kreq,struct statmount __user * buf,size_t bufsize,size_t seq_size)5809 static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
5810 struct statmount __user *buf, size_t bufsize,
5811 size_t seq_size)
5812 {
5813 if (!access_ok(buf, bufsize))
5814 return -EFAULT;
5815
5816 memset(ks, 0, sizeof(*ks));
5817 ks->mask = kreq->param;
5818 ks->buf = buf;
5819 ks->bufsize = bufsize;
5820
5821 if (ks->mask & STATMOUNT_STRING_REQ) {
5822 if (bufsize == sizeof(ks->sm))
5823 return -EOVERFLOW;
5824
5825 ks->seq.buf = kvmalloc(seq_size, GFP_KERNEL_ACCOUNT);
5826 if (!ks->seq.buf)
5827 return -ENOMEM;
5828
5829 ks->seq.size = seq_size;
5830 }
5831
5832 return 0;
5833 }
5834
copy_mnt_id_req(const struct mnt_id_req __user * req,struct mnt_id_req * kreq,unsigned int flags)5835 static int copy_mnt_id_req(const struct mnt_id_req __user *req,
5836 struct mnt_id_req *kreq, unsigned int flags)
5837 {
5838 int ret;
5839 size_t usize;
5840
5841 BUILD_BUG_ON(sizeof(struct mnt_id_req) != MNT_ID_REQ_SIZE_VER1);
5842
5843 ret = get_user(usize, &req->size);
5844 if (ret)
5845 return -EFAULT;
5846 if (unlikely(usize > PAGE_SIZE))
5847 return -E2BIG;
5848 if (unlikely(usize < MNT_ID_REQ_SIZE_VER0))
5849 return -EINVAL;
5850 memset(kreq, 0, sizeof(*kreq));
5851 ret = copy_struct_from_user(kreq, sizeof(*kreq), req, usize);
5852 if (ret)
5853 return ret;
5854
5855 if (flags & STATMOUNT_BY_FD) {
5856 if (kreq->mnt_id || kreq->mnt_ns_id)
5857 return -EINVAL;
5858 } else {
5859 if (kreq->mnt_ns_fd != 0 && kreq->mnt_ns_id)
5860 return -EINVAL;
5861 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
5862 if (kreq->mnt_id <= MNT_UNIQUE_ID_OFFSET)
5863 return -EINVAL;
5864 }
5865 return 0;
5866 }
5867
5868 /*
5869 * If the user requested a specific mount namespace id, look that up and return
5870 * that, or if not simply grab a passive reference on our mount namespace and
5871 * return that.
5872 */
grab_requested_mnt_ns(const struct mnt_id_req * kreq)5873 static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq)
5874 {
5875 struct mnt_namespace *mnt_ns;
5876
5877 if (kreq->mnt_ns_id) {
5878 mnt_ns = lookup_mnt_ns(kreq->mnt_ns_id);
5879 if (!mnt_ns)
5880 return ERR_PTR(-ENOENT);
5881 } else if (kreq->mnt_ns_fd) {
5882 struct ns_common *ns;
5883
5884 CLASS(fd, f)(kreq->mnt_ns_fd);
5885 if (fd_empty(f))
5886 return ERR_PTR(-EBADF);
5887
5888 if (!proc_ns_file(fd_file(f)))
5889 return ERR_PTR(-EINVAL);
5890
5891 ns = get_proc_ns(file_inode(fd_file(f)));
5892 if (ns->ns_type != CLONE_NEWNS)
5893 return ERR_PTR(-EINVAL);
5894
5895 mnt_ns = to_mnt_ns(ns);
5896 refcount_inc(&mnt_ns->passive);
5897 } else {
5898 mnt_ns = current->nsproxy->mnt_ns;
5899 refcount_inc(&mnt_ns->passive);
5900 }
5901
5902 return mnt_ns;
5903 }
5904
SYSCALL_DEFINE4(statmount,const struct mnt_id_req __user *,req,struct statmount __user *,buf,size_t,bufsize,unsigned int,flags)5905 SYSCALL_DEFINE4(statmount, const struct mnt_id_req __user *, req,
5906 struct statmount __user *, buf, size_t, bufsize,
5907 unsigned int, flags)
5908 {
5909 struct mnt_namespace *ns __free(mnt_ns_release) = NULL;
5910 struct kstatmount *ks __free(kfree) = NULL;
5911 struct file *mnt_file __free(fput) = NULL;
5912 struct mnt_id_req kreq;
5913 /* We currently support retrieval of 3 strings. */
5914 size_t seq_size = 3 * PATH_MAX;
5915 int ret;
5916
5917 if (flags & ~STATMOUNT_BY_FD)
5918 return -EINVAL;
5919
5920 ret = copy_mnt_id_req(req, &kreq, flags);
5921 if (ret)
5922 return ret;
5923
5924 if (flags & STATMOUNT_BY_FD) {
5925 mnt_file = fget_raw(kreq.mnt_fd);
5926 if (!mnt_file)
5927 return -EBADF;
5928 /* do_statmount sets ns in case of STATMOUNT_BY_FD */
5929 } else {
5930 ns = grab_requested_mnt_ns(&kreq);
5931 if (IS_ERR(ns))
5932 return PTR_ERR(ns);
5933
5934 if (kreq.mnt_ns_id && (ns != current->nsproxy->mnt_ns) &&
5935 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
5936 return -EPERM;
5937 }
5938
5939 ks = kmalloc(sizeof(*ks), GFP_KERNEL_ACCOUNT);
5940 if (!ks)
5941 return -ENOMEM;
5942
5943 retry:
5944 ret = prepare_kstatmount(ks, &kreq, buf, bufsize, seq_size);
5945 if (ret)
5946 return ret;
5947
5948 scoped_guard(namespace_shared)
5949 ret = do_statmount(ks, kreq.mnt_id, kreq.mnt_ns_id, mnt_file, ns);
5950
5951 if (!ret)
5952 ret = copy_statmount_to_user(ks);
5953 kvfree(ks->seq.buf);
5954 path_put(&ks->root);
5955 if (retry_statmount(ret, &seq_size))
5956 goto retry;
5957 return ret;
5958 }
5959
5960 struct klistmount {
5961 u64 last_mnt_id;
5962 u64 mnt_parent_id;
5963 u64 *kmnt_ids;
5964 u32 nr_mnt_ids;
5965 struct mnt_namespace *ns;
5966 struct path root;
5967 };
5968
5969 /* locks: namespace_shared */
do_listmount(struct klistmount * kls,bool reverse)5970 static ssize_t do_listmount(struct klistmount *kls, bool reverse)
5971 {
5972 struct mnt_namespace *ns = kls->ns;
5973 u64 mnt_parent_id = kls->mnt_parent_id;
5974 u64 last_mnt_id = kls->last_mnt_id;
5975 u64 *mnt_ids = kls->kmnt_ids;
5976 size_t nr_mnt_ids = kls->nr_mnt_ids;
5977 struct path orig;
5978 struct mount *r, *first;
5979 ssize_t ret;
5980
5981 rwsem_assert_held(&namespace_sem);
5982
5983 ret = grab_requested_root(ns, &kls->root);
5984 if (ret)
5985 return ret;
5986
5987 if (mnt_parent_id == LSMT_ROOT) {
5988 orig = kls->root;
5989 } else {
5990 orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
5991 if (!orig.mnt)
5992 return -ENOENT;
5993 orig.dentry = orig.mnt->mnt_root;
5994 }
5995
5996 /*
5997 * Don't trigger audit denials. We just want to determine what
5998 * mounts to show users.
5999 */
6000 if (!is_path_reachable(real_mount(orig.mnt), orig.dentry, &kls->root) &&
6001 !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN))
6002 return -EPERM;
6003
6004 ret = security_sb_statfs(orig.dentry);
6005 if (ret)
6006 return ret;
6007
6008 if (!last_mnt_id) {
6009 if (reverse)
6010 first = node_to_mount(ns->mnt_last_node);
6011 else
6012 first = node_to_mount(ns->mnt_first_node);
6013 } else {
6014 if (reverse)
6015 first = mnt_find_id_at_reverse(ns, last_mnt_id - 1);
6016 else
6017 first = mnt_find_id_at(ns, last_mnt_id + 1);
6018 }
6019
6020 for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r, reverse)) {
6021 if (r->mnt_id_unique == mnt_parent_id)
6022 continue;
6023 if (!is_path_reachable(r, r->mnt.mnt_root, &orig))
6024 continue;
6025 *mnt_ids = r->mnt_id_unique;
6026 mnt_ids++;
6027 nr_mnt_ids--;
6028 ret++;
6029 }
6030 return ret;
6031 }
6032
__free_klistmount_free(const struct klistmount * kls)6033 static void __free_klistmount_free(const struct klistmount *kls)
6034 {
6035 path_put(&kls->root);
6036 kvfree(kls->kmnt_ids);
6037 mnt_ns_release(kls->ns);
6038 }
6039
prepare_klistmount(struct klistmount * kls,struct mnt_id_req * kreq,size_t nr_mnt_ids)6040 static inline int prepare_klistmount(struct klistmount *kls, struct mnt_id_req *kreq,
6041 size_t nr_mnt_ids)
6042 {
6043 u64 last_mnt_id = kreq->param;
6044 struct mnt_namespace *ns;
6045
6046 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
6047 if (last_mnt_id != 0 && last_mnt_id <= MNT_UNIQUE_ID_OFFSET)
6048 return -EINVAL;
6049
6050 kls->last_mnt_id = last_mnt_id;
6051
6052 kls->nr_mnt_ids = nr_mnt_ids;
6053 kls->kmnt_ids = kvmalloc_array(nr_mnt_ids, sizeof(*kls->kmnt_ids),
6054 GFP_KERNEL_ACCOUNT);
6055 if (!kls->kmnt_ids)
6056 return -ENOMEM;
6057
6058 ns = grab_requested_mnt_ns(kreq);
6059 if (IS_ERR(ns))
6060 return PTR_ERR(ns);
6061 kls->ns = ns;
6062
6063 kls->mnt_parent_id = kreq->mnt_id;
6064 return 0;
6065 }
6066
SYSCALL_DEFINE4(listmount,const struct mnt_id_req __user *,req,u64 __user *,mnt_ids,size_t,nr_mnt_ids,unsigned int,flags)6067 SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
6068 u64 __user *, mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
6069 {
6070 struct klistmount kls __free(klistmount_free) = {};
6071 const size_t maxcount = 1000000;
6072 struct mnt_id_req kreq;
6073 ssize_t ret;
6074
6075 if (flags & ~LISTMOUNT_REVERSE)
6076 return -EINVAL;
6077
6078 /*
6079 * If the mount namespace really has more than 1 million mounts the
6080 * caller must iterate over the mount namespace (and reconsider their
6081 * system design...).
6082 */
6083 if (unlikely(nr_mnt_ids > maxcount))
6084 return -EOVERFLOW;
6085
6086 if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
6087 return -EFAULT;
6088
6089 ret = copy_mnt_id_req(req, &kreq, 0);
6090 if (ret)
6091 return ret;
6092
6093 ret = prepare_klistmount(&kls, &kreq, nr_mnt_ids);
6094 if (ret)
6095 return ret;
6096
6097 if (kreq.mnt_ns_id && (kls.ns != current->nsproxy->mnt_ns) &&
6098 !ns_capable_noaudit(kls.ns->user_ns, CAP_SYS_ADMIN))
6099 return -ENOENT;
6100
6101 /*
6102 * We only need to guard against mount topology changes as
6103 * listmount() doesn't care about any mount properties.
6104 */
6105 scoped_guard(namespace_shared)
6106 ret = do_listmount(&kls, (flags & LISTMOUNT_REVERSE));
6107 if (ret <= 0)
6108 return ret;
6109
6110 if (copy_to_user(mnt_ids, kls.kmnt_ids, ret * sizeof(*mnt_ids)))
6111 return -EFAULT;
6112
6113 return ret;
6114 }
6115
6116 struct mnt_namespace init_mnt_ns = {
6117 .ns = NS_COMMON_INIT(init_mnt_ns),
6118 .user_ns = &init_user_ns,
6119 .passive = REFCOUNT_INIT(1),
6120 .mounts = RB_ROOT,
6121 .poll = __WAIT_QUEUE_HEAD_INITIALIZER(init_mnt_ns.poll),
6122 };
6123
init_mount_tree(void)6124 static void __init init_mount_tree(void)
6125 {
6126 struct vfsmount *mnt, *nullfs_mnt;
6127 struct mount *mnt_root;
6128 struct path root;
6129
6130 /*
6131 * We create two mounts:
6132 *
6133 * (1) nullfs with mount id 1
6134 * (2) mutable rootfs with mount id 2
6135 *
6136 * with (2) mounted on top of (1).
6137 */
6138 nullfs_mnt = vfs_kern_mount(&nullfs_fs_type, 0, "nullfs", NULL);
6139 if (IS_ERR(nullfs_mnt))
6140 panic("VFS: Failed to create nullfs");
6141
6142 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options);
6143 if (IS_ERR(mnt))
6144 panic("Can't create rootfs");
6145
6146 VFS_WARN_ON_ONCE(real_mount(nullfs_mnt)->mnt_id != 1);
6147 VFS_WARN_ON_ONCE(real_mount(mnt)->mnt_id != 2);
6148
6149 /* The namespace root is the nullfs mnt. */
6150 mnt_root = real_mount(nullfs_mnt);
6151 init_mnt_ns.root = mnt_root;
6152
6153 /* Mount mutable rootfs on top of nullfs. */
6154 root.mnt = nullfs_mnt;
6155 root.dentry = nullfs_mnt->mnt_root;
6156
6157 LOCK_MOUNT_EXACT(mp, &root);
6158 if (unlikely(IS_ERR(mp.parent)))
6159 panic("VFS: Failed to mount rootfs on nullfs");
6160 scoped_guard(mount_writer)
6161 attach_mnt(real_mount(mnt), mp.parent, mp.mp);
6162
6163 pr_info("VFS: Finished mounting rootfs on nullfs\n");
6164
6165 /*
6166 * We've dropped all locks here but that's fine. Not just are we
6167 * the only task that's running, there's no other mount
6168 * namespace in existence and the initial mount namespace is
6169 * completely empty until we add the mounts we just created.
6170 */
6171 for (struct mount *p = mnt_root; p; p = next_mnt(p, mnt_root)) {
6172 mnt_add_to_ns(&init_mnt_ns, p);
6173 init_mnt_ns.nr_mounts++;
6174 }
6175
6176 init_task.nsproxy->mnt_ns = &init_mnt_ns;
6177 get_mnt_ns(&init_mnt_ns);
6178
6179 /* The root and pwd always point to the mutable rootfs. */
6180 root.mnt = mnt;
6181 root.dentry = mnt->mnt_root;
6182 set_fs_pwd(current->fs, &root);
6183 set_fs_root(current->fs, &root);
6184
6185 ns_tree_add(&init_mnt_ns);
6186 }
6187
mnt_init(void)6188 void __init mnt_init(void)
6189 {
6190 int err;
6191
6192 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
6193 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
6194
6195 mount_hashtable = alloc_large_system_hash("Mount-cache",
6196 sizeof(struct hlist_head),
6197 mhash_entries, 19,
6198 HASH_ZERO,
6199 &m_hash_shift, &m_hash_mask, 0, 0);
6200 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
6201 sizeof(struct hlist_head),
6202 mphash_entries, 19,
6203 HASH_ZERO,
6204 &mp_hash_shift, &mp_hash_mask, 0, 0);
6205
6206 if (!mount_hashtable || !mountpoint_hashtable)
6207 panic("Failed to allocate mount hash table\n");
6208
6209 kernfs_init();
6210
6211 err = sysfs_init();
6212 if (err)
6213 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
6214 __func__, err);
6215 fs_kobj = kobject_create_and_add("fs", NULL);
6216 if (!fs_kobj)
6217 printk(KERN_WARNING "%s: kobj create error\n", __func__);
6218 shmem_init();
6219 init_rootfs();
6220 init_mount_tree();
6221 }
6222
put_mnt_ns(struct mnt_namespace * ns)6223 void put_mnt_ns(struct mnt_namespace *ns)
6224 {
6225 if (!ns_ref_put(ns))
6226 return;
6227 guard(namespace_excl)();
6228 emptied_ns = ns;
6229 guard(mount_writer)();
6230 umount_tree(ns->root, 0);
6231 }
6232
kern_mount(struct file_system_type * type)6233 struct vfsmount *kern_mount(struct file_system_type *type)
6234 {
6235 struct vfsmount *mnt;
6236 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
6237 if (!IS_ERR(mnt)) {
6238 /*
6239 * it is a longterm mount, don't release mnt until
6240 * we unmount before file sys is unregistered
6241 */
6242 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
6243 }
6244 return mnt;
6245 }
6246 EXPORT_SYMBOL_GPL(kern_mount);
6247
kern_unmount(struct vfsmount * mnt)6248 void kern_unmount(struct vfsmount *mnt)
6249 {
6250 /* release long term mount so mount point can be released */
6251 if (!IS_ERR(mnt)) {
6252 mnt_make_shortterm(mnt);
6253 synchronize_rcu(); /* yecchhh... */
6254 mntput(mnt);
6255 }
6256 }
6257 EXPORT_SYMBOL(kern_unmount);
6258
kern_unmount_array(struct vfsmount * mnt[],unsigned int num)6259 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
6260 {
6261 unsigned int i;
6262
6263 for (i = 0; i < num; i++)
6264 mnt_make_shortterm(mnt[i]);
6265 synchronize_rcu_expedited();
6266 for (i = 0; i < num; i++)
6267 mntput(mnt[i]);
6268 }
6269 EXPORT_SYMBOL(kern_unmount_array);
6270
our_mnt(struct vfsmount * mnt)6271 bool our_mnt(struct vfsmount *mnt)
6272 {
6273 return check_mnt(real_mount(mnt));
6274 }
6275
current_chrooted(void)6276 bool current_chrooted(void)
6277 {
6278 /* Does the current process have a non-standard root */
6279 struct path fs_root __free(path_put) = {};
6280 struct mount *root;
6281
6282 get_fs_root(current->fs, &fs_root);
6283
6284 /* Find the namespace root */
6285
6286 guard(mount_locked_reader)();
6287
6288 root = topmost_overmount(current->nsproxy->mnt_ns->root);
6289
6290 return fs_root.mnt != &root->mnt || !path_mounted(&fs_root);
6291 }
6292
mnt_already_visible(struct mnt_namespace * ns,const struct super_block * sb,int * new_mnt_flags)6293 static bool mnt_already_visible(struct mnt_namespace *ns,
6294 const struct super_block *sb,
6295 int *new_mnt_flags)
6296 {
6297 int new_flags = *new_mnt_flags;
6298 struct mount *mnt, *n;
6299
6300 guard(namespace_shared)();
6301 rbtree_postorder_for_each_entry_safe(mnt, n, &ns->mounts, mnt_node) {
6302 struct mount *child;
6303 int mnt_flags;
6304
6305 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
6306 continue;
6307
6308 /* This mount is not fully visible if it's root directory
6309 * is not the root directory of the filesystem.
6310 */
6311 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
6312 continue;
6313
6314 /* A local view of the mount flags */
6315 mnt_flags = mnt->mnt.mnt_flags;
6316
6317 /* Don't miss readonly hidden in the superblock flags */
6318 if (sb_rdonly(mnt->mnt.mnt_sb))
6319 mnt_flags |= MNT_LOCK_READONLY;
6320
6321 /* Verify the mount flags are equal to or more permissive
6322 * than the proposed new mount.
6323 */
6324 if ((mnt_flags & MNT_LOCK_READONLY) &&
6325 !(new_flags & MNT_READONLY))
6326 continue;
6327 if ((mnt_flags & MNT_LOCK_ATIME) &&
6328 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
6329 continue;
6330
6331 /* This mount is not fully visible if there are any
6332 * locked child mounts that cover anything except for
6333 * empty directories.
6334 */
6335 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
6336 struct inode *inode = child->mnt_mountpoint->d_inode;
6337 /* Only worry about locked mounts */
6338 if (!(child->mnt.mnt_flags & MNT_LOCKED))
6339 continue;
6340 /* Is the directory permanently empty? */
6341 if (!is_empty_dir_inode(inode))
6342 goto next;
6343 }
6344 /* Preserve the locked attributes */
6345 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
6346 MNT_LOCK_ATIME);
6347 return true;
6348 next: ;
6349 }
6350 return false;
6351 }
6352
mount_too_revealing(const struct super_block * sb,int * new_mnt_flags)6353 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
6354 {
6355 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
6356 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
6357 unsigned long s_iflags;
6358
6359 if (ns->user_ns == &init_user_ns)
6360 return false;
6361
6362 /* Can this filesystem be too revealing? */
6363 s_iflags = sb->s_iflags;
6364 if (!(s_iflags & SB_I_USERNS_VISIBLE))
6365 return false;
6366
6367 if ((s_iflags & required_iflags) != required_iflags) {
6368 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
6369 required_iflags);
6370 return true;
6371 }
6372
6373 return !mnt_already_visible(ns, sb, new_mnt_flags);
6374 }
6375
mnt_may_suid(struct vfsmount * mnt)6376 bool mnt_may_suid(struct vfsmount *mnt)
6377 {
6378 /*
6379 * Foreign mounts (accessed via fchdir or through /proc
6380 * symlinks) are always treated as if they are nosuid. This
6381 * prevents namespaces from trusting potentially unsafe
6382 * suid/sgid bits, file caps, or security labels that originate
6383 * in other namespaces.
6384 */
6385 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
6386 current_in_userns(mnt->mnt_sb->s_user_ns);
6387 }
6388
mntns_get(struct task_struct * task)6389 static struct ns_common *mntns_get(struct task_struct *task)
6390 {
6391 struct ns_common *ns = NULL;
6392 struct nsproxy *nsproxy;
6393
6394 task_lock(task);
6395 nsproxy = task->nsproxy;
6396 if (nsproxy) {
6397 ns = &nsproxy->mnt_ns->ns;
6398 get_mnt_ns(to_mnt_ns(ns));
6399 }
6400 task_unlock(task);
6401
6402 return ns;
6403 }
6404
mntns_put(struct ns_common * ns)6405 static void mntns_put(struct ns_common *ns)
6406 {
6407 put_mnt_ns(to_mnt_ns(ns));
6408 }
6409
mntns_install(struct nsset * nsset,struct ns_common * ns)6410 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
6411 {
6412 struct nsproxy *nsproxy = nsset->nsproxy;
6413 struct fs_struct *fs = nsset->fs;
6414 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
6415 struct user_namespace *user_ns = nsset->cred->user_ns;
6416 struct path root;
6417 int err;
6418
6419 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
6420 !ns_capable(user_ns, CAP_SYS_CHROOT) ||
6421 !ns_capable(user_ns, CAP_SYS_ADMIN))
6422 return -EPERM;
6423
6424 if (is_anon_ns(mnt_ns))
6425 return -EINVAL;
6426
6427 if (fs->users != 1)
6428 return -EINVAL;
6429
6430 get_mnt_ns(mnt_ns);
6431 old_mnt_ns = nsproxy->mnt_ns;
6432 nsproxy->mnt_ns = mnt_ns;
6433
6434 /* Find the root */
6435 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
6436 "/", LOOKUP_DOWN, &root);
6437 if (err) {
6438 /* revert to old namespace */
6439 nsproxy->mnt_ns = old_mnt_ns;
6440 put_mnt_ns(mnt_ns);
6441 return err;
6442 }
6443
6444 put_mnt_ns(old_mnt_ns);
6445
6446 /* Update the pwd and root */
6447 set_fs_pwd(fs, &root);
6448 set_fs_root(fs, &root);
6449
6450 path_put(&root);
6451 return 0;
6452 }
6453
mntns_owner(struct ns_common * ns)6454 static struct user_namespace *mntns_owner(struct ns_common *ns)
6455 {
6456 return to_mnt_ns(ns)->user_ns;
6457 }
6458
6459 const struct proc_ns_operations mntns_operations = {
6460 .name = "mnt",
6461 .get = mntns_get,
6462 .put = mntns_put,
6463 .install = mntns_install,
6464 .owner = mntns_owner,
6465 };
6466
6467 #ifdef CONFIG_SYSCTL
6468 static const struct ctl_table fs_namespace_sysctls[] = {
6469 {
6470 .procname = "mount-max",
6471 .data = &sysctl_mount_max,
6472 .maxlen = sizeof(unsigned int),
6473 .mode = 0644,
6474 .proc_handler = proc_dointvec_minmax,
6475 .extra1 = SYSCTL_ONE,
6476 },
6477 };
6478
init_fs_namespace_sysctls(void)6479 static int __init init_fs_namespace_sysctls(void)
6480 {
6481 register_sysctl_init("fs", fs_namespace_sysctls);
6482 return 0;
6483 }
6484 fs_initcall(init_fs_namespace_sysctls);
6485
6486 #endif /* CONFIG_SYSCTL */
6487