xref: /linux/fs/pnode.c (revision b19a97d57c15643494ac8bfaaa35e3ee472d41da)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/pnode.c
4  *
5  * (C) Copyright IBM Corporation 2005.
6  *	Author : Ram Pai (linuxram@us.ibm.com)
7  */
8 #include <linux/mnt_namespace.h>
9 #include <linux/mount.h>
10 #include <linux/fs.h>
11 #include <linux/nsproxy.h>
12 #include <uapi/linux/mount.h>
13 #include "internal.h"
14 #include "pnode.h"
15 
16 /* return the next shared peer mount of @p */
next_peer(struct mount * p)17 static inline struct mount *next_peer(struct mount *p)
18 {
19 	return list_entry(p->mnt_share.next, struct mount, mnt_share);
20 }
21 
first_slave(struct mount * p)22 static inline struct mount *first_slave(struct mount *p)
23 {
24 	return hlist_entry(p->mnt_slave_list.first, struct mount, mnt_slave);
25 }
26 
next_slave(struct mount * p)27 static inline struct mount *next_slave(struct mount *p)
28 {
29 	return hlist_entry(p->mnt_slave.next, struct mount, mnt_slave);
30 }
31 
get_peer_under_root(struct mount * mnt,struct mnt_namespace * ns,const struct path * root)32 static struct mount *get_peer_under_root(struct mount *mnt,
33 					 struct mnt_namespace *ns,
34 					 const struct path *root)
35 {
36 	struct mount *m = mnt;
37 
38 	do {
39 		/* Check the namespace first for optimization */
40 		if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
41 			return m;
42 
43 		m = next_peer(m);
44 	} while (m != mnt);
45 
46 	return NULL;
47 }
48 
49 /*
50  * Get ID of closest dominating peer group having a representative
51  * under the given root.
52  *
53  * Caller must hold namespace_sem
54  */
get_dominating_id(struct mount * mnt,const struct path * root)55 int get_dominating_id(struct mount *mnt, const struct path *root)
56 {
57 	struct mount *m;
58 
59 	for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
60 		struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
61 		if (d)
62 			return d->mnt_group_id;
63 	}
64 
65 	return 0;
66 }
67 
will_be_unmounted(struct mount * m)68 static inline bool will_be_unmounted(struct mount *m)
69 {
70 	return m->mnt.mnt_flags & MNT_UMOUNT;
71 }
72 
propagation_source(struct mount * mnt)73 static struct mount *propagation_source(struct mount *mnt)
74 {
75 	do {
76 		struct mount *m;
77 		for (m = next_peer(mnt); m != mnt; m = next_peer(m)) {
78 			if (!will_be_unmounted(m))
79 				return m;
80 		}
81 		mnt = mnt->mnt_master;
82 	} while (mnt && will_be_unmounted(mnt));
83 	return mnt;
84 }
85 
transfer_propagation(struct mount * mnt,struct mount * to)86 static void transfer_propagation(struct mount *mnt, struct mount *to)
87 {
88 	struct hlist_node *p = NULL, *n;
89 	struct mount *m;
90 
91 	hlist_for_each_entry_safe(m, n, &mnt->mnt_slave_list, mnt_slave) {
92 		m->mnt_master = to;
93 		if (!to)
94 			hlist_del_init(&m->mnt_slave);
95 		else
96 			p = &m->mnt_slave;
97 	}
98 	if (p)
99 		hlist_splice_init(&mnt->mnt_slave_list, p, &to->mnt_slave_list);
100 }
101 
102 /*
103  * EXCL[namespace_sem]
104  */
change_mnt_propagation(struct mount * mnt,int type)105 void change_mnt_propagation(struct mount *mnt, int type)
106 {
107 	struct mount *m = mnt->mnt_master;
108 
109 	if (type == MS_SHARED) {
110 		set_mnt_shared(mnt);
111 		return;
112 	}
113 	if (IS_MNT_SHARED(mnt)) {
114 		if (type == MS_SLAVE || !hlist_empty(&mnt->mnt_slave_list))
115 			m = propagation_source(mnt);
116 		if (list_empty(&mnt->mnt_share)) {
117 			mnt_release_group_id(mnt);
118 		} else {
119 			list_del_init(&mnt->mnt_share);
120 			mnt->mnt_group_id = 0;
121 		}
122 		CLEAR_MNT_SHARED(mnt);
123 		transfer_propagation(mnt, m);
124 	}
125 	hlist_del_init(&mnt->mnt_slave);
126 	if (type == MS_SLAVE) {
127 		mnt->mnt_master = m;
128 		if (m)
129 			hlist_add_head(&mnt->mnt_slave, &m->mnt_slave_list);
130 	} else {
131 		mnt->mnt_master = NULL;
132 		if (type == MS_UNBINDABLE)
133 			mnt->mnt_t_flags |= T_UNBINDABLE;
134 		else
135 			mnt->mnt_t_flags &= ~T_UNBINDABLE;
136 	}
137 }
138 
__propagation_next(struct mount * m,struct mount * origin)139 static struct mount *__propagation_next(struct mount *m,
140 					 struct mount *origin)
141 {
142 	while (1) {
143 		struct mount *master = m->mnt_master;
144 
145 		if (master == origin->mnt_master) {
146 			struct mount *next = next_peer(m);
147 			return (next == origin) ? NULL : next;
148 		} else if (m->mnt_slave.next)
149 			return next_slave(m);
150 
151 		/* back at master */
152 		m = master;
153 	}
154 }
155 
156 /*
157  * get the next mount in the propagation tree.
158  * @m: the mount seen last
159  * @origin: the original mount from where the tree walk initiated
160  *
161  * Note that peer groups form contiguous segments of slave lists.
162  * We rely on that in get_source() to be able to find out if
163  * vfsmount found while iterating with propagation_next() is
164  * a peer of one we'd found earlier.
165  */
propagation_next(struct mount * m,struct mount * origin)166 static struct mount *propagation_next(struct mount *m,
167 					 struct mount *origin)
168 {
169 	/* are there any slaves of this mount? */
170 	if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
171 		return first_slave(m);
172 
173 	return __propagation_next(m, origin);
174 }
175 
skip_propagation_subtree(struct mount * m,struct mount * origin)176 static struct mount *skip_propagation_subtree(struct mount *m,
177 						struct mount *origin)
178 {
179 	/*
180 	 * Advance m past everything that gets propagation from it.
181 	 */
182 	struct mount *p = __propagation_next(m, origin);
183 
184 	while (p && peers(m, p))
185 		p = __propagation_next(p, origin);
186 
187 	return p;
188 }
189 
next_group(struct mount * m,struct mount * origin)190 static struct mount *next_group(struct mount *m, struct mount *origin)
191 {
192 	while (1) {
193 		while (1) {
194 			struct mount *next;
195 			if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
196 				return first_slave(m);
197 			next = next_peer(m);
198 			if (m->mnt_group_id == origin->mnt_group_id) {
199 				if (next == origin)
200 					return NULL;
201 			} else if (m->mnt_slave.next != &next->mnt_slave)
202 				break;
203 			m = next;
204 		}
205 		/* m is the last peer */
206 		while (1) {
207 			struct mount *master = m->mnt_master;
208 			if (m->mnt_slave.next)
209 				return next_slave(m);
210 			m = next_peer(master);
211 			if (master->mnt_group_id == origin->mnt_group_id)
212 				break;
213 			if (master->mnt_slave.next == &m->mnt_slave)
214 				break;
215 			m = master;
216 		}
217 		if (m == origin)
218 			return NULL;
219 	}
220 }
221 
need_secondary(struct mount * m,struct mountpoint * dest_mp)222 static bool need_secondary(struct mount *m, struct mountpoint *dest_mp)
223 {
224 	/* skip ones added by this propagate_mnt() */
225 	if (IS_MNT_NEW(m))
226 		return false;
227 	/* skip if mountpoint isn't visible in m */
228 	if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
229 		return false;
230 	/* skip if m is in the anon_ns */
231 	if (is_anon_ns(m->mnt_ns))
232 		return false;
233 	return true;
234 }
235 
find_master(struct mount * m,struct mount * last_copy,struct mount * original)236 static struct mount *find_master(struct mount *m,
237 				struct mount *last_copy,
238 				struct mount *original)
239 {
240 	struct mount *p;
241 
242 	// ascend until there's a copy for something with the same master
243 	for (;;) {
244 		p = m->mnt_master;
245 		if (!p || IS_MNT_MARKED(p))
246 			break;
247 		m = p;
248 	}
249 	while (!peers(last_copy, original)) {
250 		struct mount *parent = last_copy->mnt_parent;
251 		if (parent->mnt_master == p) {
252 			if (!peers(parent, m))
253 				last_copy = last_copy->mnt_master;
254 			break;
255 		}
256 		last_copy = last_copy->mnt_master;
257 	}
258 	return last_copy;
259 }
260 
261 /**
262  * propagate_mnt() - create secondary copies for tree attachment
263  * @dest_mnt:    destination mount.
264  * @dest_mp:     destination mountpoint.
265  * @source_mnt:  source mount.
266  * @tree_list:   list of secondaries to be attached.
267  *
268  * Create secondary copies for attaching a tree with root @source_mnt
269  * at mount @dest_mnt with mountpoint @dest_mp.  Link all new mounts
270  * into a propagation graph.  Set mountpoints for all secondaries,
271  * link their roots into @tree_list via ->mnt_hash.
272  */
propagate_mnt(struct mount * dest_mnt,struct mountpoint * dest_mp,struct mount * source_mnt,struct hlist_head * tree_list)273 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
274 		  struct mount *source_mnt, struct hlist_head *tree_list)
275 {
276 	struct mount *m, *n, *copy, *this;
277 	int err = 0, type;
278 
279 	if (dest_mnt->mnt_master)
280 		SET_MNT_MARK(dest_mnt->mnt_master);
281 
282 	/* iterate over peer groups, depth first */
283 	for (m = dest_mnt; m && !err; m = next_group(m, dest_mnt)) {
284 		if (m == dest_mnt) { // have one for dest_mnt itself
285 			copy = source_mnt;
286 			type = CL_MAKE_SHARED;
287 			n = next_peer(m);
288 			if (n == m)
289 				continue;
290 		} else {
291 			type = CL_SLAVE;
292 			/* beginning of peer group among the slaves? */
293 			if (IS_MNT_SHARED(m))
294 				type |= CL_MAKE_SHARED;
295 			n = m;
296 		}
297 		do {
298 			if (!need_secondary(n, dest_mp))
299 				continue;
300 			if (type & CL_SLAVE) // first in this peer group
301 				copy = find_master(n, copy, source_mnt);
302 			this = copy_tree(copy, copy->mnt.mnt_root, type);
303 			if (IS_ERR(this)) {
304 				err = PTR_ERR(this);
305 				break;
306 			}
307 			read_seqlock_excl(&mount_lock);
308 			mnt_set_mountpoint(n, dest_mp, this);
309 			read_sequnlock_excl(&mount_lock);
310 			if (n->mnt_master)
311 				SET_MNT_MARK(n->mnt_master);
312 			copy = this;
313 			hlist_add_head(&this->mnt_hash, tree_list);
314 			err = count_mounts(n->mnt_ns, this);
315 			if (err)
316 				break;
317 			type = CL_MAKE_SHARED;
318 		} while ((n = next_peer(n)) != m);
319 	}
320 
321 	hlist_for_each_entry(n, tree_list, mnt_hash) {
322 		m = n->mnt_parent;
323 		if (m->mnt_master)
324 			CLEAR_MNT_MARK(m->mnt_master);
325 	}
326 	if (dest_mnt->mnt_master)
327 		CLEAR_MNT_MARK(dest_mnt->mnt_master);
328 	return err;
329 }
330 
331 /*
332  * return true if the refcount is greater than count
333  */
do_refcount_check(struct mount * mnt,int count)334 static inline int do_refcount_check(struct mount *mnt, int count)
335 {
336 	return mnt_get_count(mnt) > count;
337 }
338 
339 /**
340  * propagation_would_overmount - check whether propagation from @from
341  *                               would overmount @to
342  * @from: shared mount
343  * @to:   mount to check
344  * @mp:   future mountpoint of @to on @from
345  *
346  * If @from propagates mounts to @to, @from and @to must either be peers
347  * or one of the masters in the hierarchy of masters of @to must be a
348  * peer of @from.
349  *
350  * If the root of the @to mount is equal to the future mountpoint @mp of
351  * the @to mount on @from then @to will be overmounted by whatever is
352  * propagated to it.
353  *
354  * Context: This function expects namespace_lock() to be held and that
355  *          @mp is stable.
356  * Return: If @from overmounts @to, true is returned, false if not.
357  */
propagation_would_overmount(const struct mount * from,const struct mount * to,const struct mountpoint * mp)358 bool propagation_would_overmount(const struct mount *from,
359 				 const struct mount *to,
360 				 const struct mountpoint *mp)
361 {
362 	if (!IS_MNT_SHARED(from))
363 		return false;
364 
365 	if (to->mnt.mnt_root != mp->m_dentry)
366 		return false;
367 
368 	for (const struct mount *m = to; m; m = m->mnt_master) {
369 		if (peers(from, m))
370 			return true;
371 	}
372 
373 	return false;
374 }
375 
376 /*
377  * check if the mount 'mnt' can be unmounted successfully.
378  * @mnt: the mount to be checked for unmount
379  * NOTE: unmounting 'mnt' would naturally propagate to all
380  * other mounts its parent propagates to.
381  * Check if any of these mounts that **do not have submounts**
382  * have more references than 'refcnt'. If so return busy.
383  *
384  * vfsmount lock must be held for write
385  */
propagate_mount_busy(struct mount * mnt,int refcnt)386 int propagate_mount_busy(struct mount *mnt, int refcnt)
387 {
388 	struct mount *parent = mnt->mnt_parent;
389 
390 	/*
391 	 * quickly check if the current mount can be unmounted.
392 	 * If not, we don't have to go checking for all other
393 	 * mounts
394 	 */
395 	if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
396 		return 1;
397 
398 	if (mnt == parent)
399 		return 0;
400 
401 	for (struct mount *m = propagation_next(parent, parent); m;
402 	     		m = propagation_next(m, parent)) {
403 		struct list_head *head;
404 		struct mount *child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
405 
406 		if (!child)
407 			continue;
408 
409 		head = &child->mnt_mounts;
410 		if (!list_empty(head)) {
411 			/*
412 			 * a mount that covers child completely wouldn't prevent
413 			 * it being pulled out; any other would.
414 			 */
415 			if (!list_is_singular(head) || !child->overmount)
416 				continue;
417 		}
418 		if (do_refcount_check(child, 1))
419 			return 1;
420 	}
421 	return 0;
422 }
423 
424 /*
425  * Clear MNT_LOCKED when it can be shown to be safe.
426  *
427  * mount_lock lock must be held for write
428  */
propagate_mount_unlock(struct mount * mnt)429 void propagate_mount_unlock(struct mount *mnt)
430 {
431 	struct mount *parent = mnt->mnt_parent;
432 	struct mount *m, *child;
433 
434 	BUG_ON(parent == mnt);
435 
436 	for (m = propagation_next(parent, parent); m;
437 			m = propagation_next(m, parent)) {
438 		child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
439 		if (child)
440 			child->mnt.mnt_flags &= ~MNT_LOCKED;
441 	}
442 }
443 
is_candidate(struct mount * m)444 static inline bool is_candidate(struct mount *m)
445 {
446 	return m->mnt_t_flags & T_UMOUNT_CANDIDATE;
447 }
448 
umount_one(struct mount * m,struct list_head * to_umount)449 static void umount_one(struct mount *m, struct list_head *to_umount)
450 {
451 	m->mnt.mnt_flags |= MNT_UMOUNT;
452 	list_del_init(&m->mnt_child);
453 	move_from_ns(m);
454 	list_add_tail(&m->mnt_list, to_umount);
455 }
456 
remove_from_candidate_list(struct mount * m)457 static void remove_from_candidate_list(struct mount *m)
458 {
459 	m->mnt_t_flags &= ~(T_MARKED | T_UMOUNT_CANDIDATE);
460 	list_del_init(&m->mnt_list);
461 }
462 
gather_candidates(struct list_head * set,struct list_head * candidates)463 static void gather_candidates(struct list_head *set,
464 			      struct list_head *candidates)
465 {
466 	struct mount *m, *p, *q;
467 
468 	list_for_each_entry(m, set, mnt_list) {
469 		if (is_candidate(m))
470 			continue;
471 		m->mnt_t_flags |= T_UMOUNT_CANDIDATE;
472 		p = m->mnt_parent;
473 		q = propagation_next(p, p);
474 		while (q) {
475 			struct mount *child = __lookup_mnt(&q->mnt,
476 							   m->mnt_mountpoint);
477 			if (child) {
478 				/*
479 				 * We might've already run into this one.  That
480 				 * must've happened on earlier iteration of the
481 				 * outer loop; in that case we can skip those
482 				 * parents that get propagation from q - there
483 				 * will be nothing new on those as well.
484 				 */
485 				if (is_candidate(child)) {
486 					q = skip_propagation_subtree(q, p);
487 					continue;
488 				}
489 				child->mnt_t_flags |= T_UMOUNT_CANDIDATE;
490 				if (!will_be_unmounted(child))
491 					list_add(&child->mnt_list, candidates);
492 			}
493 			q = propagation_next(q, p);
494 		}
495 	}
496 	list_for_each_entry(m, set, mnt_list)
497 		m->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
498 }
499 
500 /*
501  * We know that some child of @m can't be unmounted.  In all places where the
502  * chain of descent of @m has child not overmounting the root of parent,
503  * the parent can't be unmounted either.
504  */
trim_ancestors(struct mount * m)505 static void trim_ancestors(struct mount *m)
506 {
507 	struct mount *p;
508 
509 	for (p = m->mnt_parent; is_candidate(p); m = p, p = p->mnt_parent) {
510 		if (IS_MNT_MARKED(m))	// all candidates beneath are overmounts
511 			return;
512 		SET_MNT_MARK(m);
513 		if (m != p->overmount)
514 			p->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
515 	}
516 }
517 
518 /*
519  * Find and exclude all umount candidates forbidden by @m
520  * (see Documentation/filesystems/propagate_umount.txt)
521  * If we can immediately tell that @m is OK to unmount (unlocked
522  * and all children are already committed to unmounting) commit
523  * to unmounting it.
524  * Only @m itself might be taken from the candidates list;
525  * anything found by trim_ancestors() is marked non-candidate
526  * and left on the list.
527  */
trim_one(struct mount * m,struct list_head * to_umount)528 static void trim_one(struct mount *m, struct list_head *to_umount)
529 {
530 	bool remove_this = false, found = false, umount_this = false;
531 	struct mount *n;
532 
533 	if (!is_candidate(m)) { // trim_ancestors() left it on list
534 		remove_from_candidate_list(m);
535 		return;
536 	}
537 
538 	list_for_each_entry(n, &m->mnt_mounts, mnt_child) {
539 		if (!is_candidate(n)) {
540 			found = true;
541 			if (n != m->overmount) {
542 				remove_this = true;
543 				break;
544 			}
545 		}
546 	}
547 	if (found) {
548 		trim_ancestors(m);
549 	} else if (!IS_MNT_LOCKED(m) && list_empty(&m->mnt_mounts)) {
550 		remove_this = true;
551 		umount_this = true;
552 	}
553 	if (remove_this) {
554 		remove_from_candidate_list(m);
555 		if (umount_this)
556 			umount_one(m, to_umount);
557 	}
558 }
559 
handle_locked(struct mount * m,struct list_head * to_umount)560 static void handle_locked(struct mount *m, struct list_head *to_umount)
561 {
562 	struct mount *cutoff = m, *p;
563 
564 	if (!is_candidate(m)) { // trim_ancestors() left it on list
565 		remove_from_candidate_list(m);
566 		return;
567 	}
568 	for (p = m; is_candidate(p); p = p->mnt_parent) {
569 		remove_from_candidate_list(p);
570 		if (!IS_MNT_LOCKED(p))
571 			cutoff = p->mnt_parent;
572 	}
573 	if (will_be_unmounted(p))
574 		cutoff = p;
575 	while (m != cutoff) {
576 		umount_one(m, to_umount);
577 		m = m->mnt_parent;
578 	}
579 }
580 
581 /*
582  * @m is not to going away, and it overmounts the top of a stack of mounts
583  * that are going away.  We know that all of those are fully overmounted
584  * by the one above (@m being the topmost of the chain), so @m can be slid
585  * in place where the bottom of the stack is attached.
586  *
587  * NOTE: here we temporarily violate a constraint - two mounts end up with
588  * the same parent and mountpoint; that will be remedied as soon as we
589  * return from propagate_umount() - its caller (umount_tree()) will detach
590  * the stack from the parent it (and now @m) is attached to.  umount_tree()
591  * might choose to keep unmounted pieces stuck to each other, but it always
592  * detaches them from the mounts that remain in the tree.
593  */
reparent(struct mount * m)594 static void reparent(struct mount *m)
595 {
596 	struct mount *p = m;
597 	struct mountpoint *mp;
598 
599 	do {
600 		mp = p->mnt_mp;
601 		p = p->mnt_parent;
602 	} while (will_be_unmounted(p));
603 
604 	mnt_change_mountpoint(p, mp, m);
605 	mnt_notify_add(m);
606 }
607 
608 /**
609  * propagate_umount - apply propagation rules to the set of mounts for umount()
610  * @set: the list of mounts to be unmounted.
611  *
612  * Collect all mounts that receive propagation from the mount in @set and have
613  * no obstacles to being unmounted.  Add these additional mounts to the set.
614  *
615  * See Documentation/filesystems/propagate_umount.txt if you do anything in
616  * this area.
617  *
618  * Locks held:
619  * mount_lock (write_seqlock), namespace_sem (exclusive).
620  */
propagate_umount(struct list_head * set)621 void propagate_umount(struct list_head *set)
622 {
623 	struct mount *m, *p;
624 	LIST_HEAD(to_umount);	// committed to unmounting
625 	LIST_HEAD(candidates);	// undecided umount candidates
626 
627 	// collect all candidates
628 	gather_candidates(set, &candidates);
629 
630 	// reduce the set until it's non-shifting
631 	list_for_each_entry_safe(m, p, &candidates, mnt_list)
632 		trim_one(m, &to_umount);
633 
634 	// ... and non-revealing
635 	while (!list_empty(&candidates)) {
636 		m = list_first_entry(&candidates,struct mount, mnt_list);
637 		handle_locked(m, &to_umount);
638 	}
639 
640 	// now to_umount consists of all acceptable candidates
641 	// deal with reparenting of surviving overmounts on those
642 	list_for_each_entry(m, &to_umount, mnt_list) {
643 		struct mount *over = m->overmount;
644 		if (over && !will_be_unmounted(over))
645 			reparent(over);
646 	}
647 
648 	// and fold them into the set
649 	list_splice_tail_init(&to_umount, set);
650 }
651