xref: /linux/fs/pnode.c (revision e64aeecbbb0962601bd2ac502a2f9c0d9be97502)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/pnode.c
4  *
5  * (C) Copyright IBM Corporation 2005.
6  *	Author : Ram Pai (linuxram@us.ibm.com)
7  */
8 #include <linux/mnt_namespace.h>
9 #include <linux/mount.h>
10 #include <linux/fs.h>
11 #include <linux/nsproxy.h>
12 #include <uapi/linux/mount.h>
13 #include "internal.h"
14 #include "pnode.h"
15 
16 /* return the next shared peer mount of @p */
next_peer(struct mount * p)17 static inline struct mount *next_peer(struct mount *p)
18 {
19 	return list_entry(p->mnt_share.next, struct mount, mnt_share);
20 }
21 
first_slave(struct mount * p)22 static inline struct mount *first_slave(struct mount *p)
23 {
24 	return hlist_entry(p->mnt_slave_list.first, struct mount, mnt_slave);
25 }
26 
next_slave(struct mount * p)27 static inline struct mount *next_slave(struct mount *p)
28 {
29 	return hlist_entry(p->mnt_slave.next, struct mount, mnt_slave);
30 }
31 
32 /* locks: namespace_shared && is_mounted(mnt) */
get_peer_under_root(struct mount * mnt,struct mnt_namespace * ns,const struct path * root)33 static struct mount *get_peer_under_root(struct mount *mnt,
34 					 struct mnt_namespace *ns,
35 					 const struct path *root)
36 {
37 	struct mount *m = mnt;
38 
39 	do {
40 		/* Check the namespace first for optimization */
41 		if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
42 			return m;
43 
44 		m = next_peer(m);
45 	} while (m != mnt);
46 
47 	return NULL;
48 }
49 
50 /*
51  * Get ID of closest dominating peer group having a representative
52  * under the given root.
53  *
54  * locks: namespace_shared
55  */
get_dominating_id(struct mount * mnt,const struct path * root)56 int get_dominating_id(struct mount *mnt, const struct path *root)
57 {
58 	struct mount *m;
59 
60 	for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
61 		struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
62 		if (d)
63 			return d->mnt_group_id;
64 	}
65 
66 	return 0;
67 }
68 
will_be_unmounted(struct mount * m)69 static inline bool will_be_unmounted(struct mount *m)
70 {
71 	return m->mnt.mnt_flags & MNT_UMOUNT;
72 }
73 
transfer_propagation(struct mount * mnt,struct mount * to)74 static void transfer_propagation(struct mount *mnt, struct mount *to)
75 {
76 	struct hlist_node *p = NULL, *n;
77 	struct mount *m;
78 
79 	hlist_for_each_entry_safe(m, n, &mnt->mnt_slave_list, mnt_slave) {
80 		m->mnt_master = to;
81 		if (!to)
82 			hlist_del_init(&m->mnt_slave);
83 		else
84 			p = &m->mnt_slave;
85 	}
86 	if (p)
87 		hlist_splice_init(&mnt->mnt_slave_list, p, &to->mnt_slave_list);
88 }
89 
90 /*
91  * EXCL[namespace_sem]
92  */
change_mnt_propagation(struct mount * mnt,int type)93 void change_mnt_propagation(struct mount *mnt, int type)
94 {
95 	struct mount *m = mnt->mnt_master;
96 
97 	if (type == MS_SHARED) {
98 		set_mnt_shared(mnt);
99 		return;
100 	}
101 	if (IS_MNT_SHARED(mnt)) {
102 		if (list_empty(&mnt->mnt_share)) {
103 			mnt_release_group_id(mnt);
104 		} else {
105 			m = next_peer(mnt);
106 			list_del_init(&mnt->mnt_share);
107 			mnt->mnt_group_id = 0;
108 		}
109 		CLEAR_MNT_SHARED(mnt);
110 		transfer_propagation(mnt, m);
111 	}
112 	hlist_del_init(&mnt->mnt_slave);
113 	if (type == MS_SLAVE) {
114 		mnt->mnt_master = m;
115 		if (m)
116 			hlist_add_head(&mnt->mnt_slave, &m->mnt_slave_list);
117 	} else {
118 		mnt->mnt_master = NULL;
119 		if (type == MS_UNBINDABLE)
120 			mnt->mnt_t_flags |= T_UNBINDABLE;
121 		else
122 			mnt->mnt_t_flags &= ~T_UNBINDABLE;
123 	}
124 }
125 
trace_transfers(struct mount * m)126 static struct mount *trace_transfers(struct mount *m)
127 {
128 	while (1) {
129 		struct mount *next = next_peer(m);
130 
131 		if (next != m) {
132 			list_del_init(&m->mnt_share);
133 			m->mnt_group_id = 0;
134 			m->mnt_master = next;
135 		} else {
136 			if (IS_MNT_SHARED(m))
137 				mnt_release_group_id(m);
138 			next = m->mnt_master;
139 		}
140 		hlist_del_init(&m->mnt_slave);
141 		CLEAR_MNT_SHARED(m);
142 		SET_MNT_MARK(m);
143 
144 		if (!next || !will_be_unmounted(next))
145 			return next;
146 		if (IS_MNT_MARKED(next))
147 			return next->mnt_master;
148 		m = next;
149 	}
150 }
151 
set_destinations(struct mount * m,struct mount * master)152 static void set_destinations(struct mount *m, struct mount *master)
153 {
154 	struct mount *next;
155 
156 	while ((next = m->mnt_master) != master) {
157 		m->mnt_master = master;
158 		m = next;
159 	}
160 }
161 
bulk_make_private(struct list_head * set)162 void bulk_make_private(struct list_head *set)
163 {
164 	struct mount *m;
165 
166 	list_for_each_entry(m, set, mnt_list)
167 		if (!IS_MNT_MARKED(m))
168 			set_destinations(m, trace_transfers(m));
169 
170 	list_for_each_entry(m, set, mnt_list) {
171 		transfer_propagation(m, m->mnt_master);
172 		m->mnt_master = NULL;
173 		CLEAR_MNT_MARK(m);
174 	}
175 }
176 
__propagation_next(struct mount * m,struct mount * origin)177 static struct mount *__propagation_next(struct mount *m,
178 					 struct mount *origin)
179 {
180 	while (1) {
181 		struct mount *master = m->mnt_master;
182 
183 		if (master == origin->mnt_master) {
184 			struct mount *next = next_peer(m);
185 			return (next == origin) ? NULL : next;
186 		} else if (m->mnt_slave.next)
187 			return next_slave(m);
188 
189 		/* back at master */
190 		m = master;
191 	}
192 }
193 
194 /*
195  * get the next mount in the propagation tree.
196  * @m: the mount seen last
197  * @origin: the original mount from where the tree walk initiated
198  *
199  * Note that peer groups form contiguous segments of slave lists.
200  * We rely on that in get_source() to be able to find out if
201  * vfsmount found while iterating with propagation_next() is
202  * a peer of one we'd found earlier.
203  */
propagation_next(struct mount * m,struct mount * origin)204 static struct mount *propagation_next(struct mount *m,
205 					 struct mount *origin)
206 {
207 	/* are there any slaves of this mount? */
208 	if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
209 		return first_slave(m);
210 
211 	return __propagation_next(m, origin);
212 }
213 
skip_propagation_subtree(struct mount * m,struct mount * origin)214 static struct mount *skip_propagation_subtree(struct mount *m,
215 						struct mount *origin)
216 {
217 	/*
218 	 * Advance m past everything that gets propagation from it.
219 	 */
220 	struct mount *p = __propagation_next(m, origin);
221 
222 	while (p && peers(m, p))
223 		p = __propagation_next(p, origin);
224 
225 	return p;
226 }
227 
next_group(struct mount * m,struct mount * origin)228 static struct mount *next_group(struct mount *m, struct mount *origin)
229 {
230 	while (1) {
231 		while (1) {
232 			struct mount *next;
233 			if (!IS_MNT_NEW(m) && !hlist_empty(&m->mnt_slave_list))
234 				return first_slave(m);
235 			next = next_peer(m);
236 			if (m->mnt_group_id == origin->mnt_group_id) {
237 				if (next == origin)
238 					return NULL;
239 			} else if (m->mnt_slave.next != &next->mnt_slave)
240 				break;
241 			m = next;
242 		}
243 		/* m is the last peer */
244 		while (1) {
245 			struct mount *master = m->mnt_master;
246 			if (m->mnt_slave.next)
247 				return next_slave(m);
248 			m = next_peer(master);
249 			if (master->mnt_group_id == origin->mnt_group_id)
250 				break;
251 			if (master->mnt_slave.next == &m->mnt_slave)
252 				break;
253 			m = master;
254 		}
255 		if (m == origin)
256 			return NULL;
257 	}
258 }
259 
need_secondary(struct mount * m,struct mountpoint * dest_mp)260 static bool need_secondary(struct mount *m, struct mountpoint *dest_mp)
261 {
262 	/* skip ones added by this propagate_mnt() */
263 	if (IS_MNT_NEW(m))
264 		return false;
265 	/* skip if mountpoint isn't visible in m */
266 	if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
267 		return false;
268 	/* skip if m is in the anon_ns */
269 	if (is_anon_ns(m->mnt_ns))
270 		return false;
271 	return true;
272 }
273 
find_master(struct mount * m,struct mount * last_copy,struct mount * original)274 static struct mount *find_master(struct mount *m,
275 				struct mount *last_copy,
276 				struct mount *original)
277 {
278 	struct mount *p;
279 
280 	// ascend until there's a copy for something with the same master
281 	for (;;) {
282 		p = m->mnt_master;
283 		if (!p || IS_MNT_MARKED(p))
284 			break;
285 		m = p;
286 	}
287 	while (!peers(last_copy, original)) {
288 		struct mount *parent = last_copy->mnt_parent;
289 		if (parent->mnt_master == p) {
290 			if (!peers(parent, m))
291 				last_copy = last_copy->mnt_master;
292 			break;
293 		}
294 		last_copy = last_copy->mnt_master;
295 	}
296 	return last_copy;
297 }
298 
299 /**
300  * propagate_mnt() - create secondary copies for tree attachment
301  * @dest_mnt:    destination mount.
302  * @dest_mp:     destination mountpoint.
303  * @source_mnt:  source mount.
304  * @tree_list:   list of secondaries to be attached.
305  *
306  * Create secondary copies for attaching a tree with root @source_mnt
307  * at mount @dest_mnt with mountpoint @dest_mp.  Link all new mounts
308  * into a propagation graph.  Set mountpoints for all secondaries,
309  * link their roots into @tree_list via ->mnt_hash.
310  */
propagate_mnt(struct mount * dest_mnt,struct mountpoint * dest_mp,struct mount * source_mnt,struct hlist_head * tree_list)311 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
312 		  struct mount *source_mnt, struct hlist_head *tree_list)
313 {
314 	struct mount *m, *n, *copy, *this;
315 	int err = 0, type;
316 
317 	if (dest_mnt->mnt_master)
318 		SET_MNT_MARK(dest_mnt->mnt_master);
319 
320 	/* iterate over peer groups, depth first */
321 	for (m = dest_mnt; m && !err; m = next_group(m, dest_mnt)) {
322 		if (m == dest_mnt) { // have one for dest_mnt itself
323 			copy = source_mnt;
324 			type = CL_MAKE_SHARED;
325 			n = next_peer(m);
326 			if (n == m)
327 				continue;
328 		} else {
329 			type = CL_SLAVE;
330 			/* beginning of peer group among the slaves? */
331 			if (IS_MNT_SHARED(m))
332 				type |= CL_MAKE_SHARED;
333 			n = m;
334 		}
335 		do {
336 			if (!need_secondary(n, dest_mp))
337 				continue;
338 			if (type & CL_SLAVE) // first in this peer group
339 				copy = find_master(n, copy, source_mnt);
340 			this = copy_tree(copy, copy->mnt.mnt_root, type);
341 			if (IS_ERR(this)) {
342 				err = PTR_ERR(this);
343 				break;
344 			}
345 			scoped_guard(mount_locked_reader)
346 				mnt_set_mountpoint(n, dest_mp, this);
347 			if (n->mnt_master)
348 				SET_MNT_MARK(n->mnt_master);
349 			copy = this;
350 			hlist_add_head(&this->mnt_hash, tree_list);
351 			err = count_mounts(n->mnt_ns, this);
352 			if (err)
353 				break;
354 			type = CL_MAKE_SHARED;
355 		} while ((n = next_peer(n)) != m);
356 	}
357 
358 	hlist_for_each_entry(n, tree_list, mnt_hash) {
359 		m = n->mnt_parent;
360 		if (m->mnt_master)
361 			CLEAR_MNT_MARK(m->mnt_master);
362 	}
363 	if (dest_mnt->mnt_master)
364 		CLEAR_MNT_MARK(dest_mnt->mnt_master);
365 	return err;
366 }
367 
368 /*
369  * return true if the refcount is greater than count
370  */
do_refcount_check(struct mount * mnt,int count)371 static inline int do_refcount_check(struct mount *mnt, int count)
372 {
373 	return mnt_get_count(mnt) > count;
374 }
375 
376 /**
377  * propagation_would_overmount - check whether propagation from @from
378  *                               would overmount @to
379  * @from: shared mount
380  * @to:   mount to check
381  * @mp:   future mountpoint of @to on @from
382  *
383  * If @from propagates mounts to @to, @from and @to must either be peers
384  * or one of the masters in the hierarchy of masters of @to must be a
385  * peer of @from.
386  *
387  * If the root of the @to mount is equal to the future mountpoint @mp of
388  * the @to mount on @from then @to will be overmounted by whatever is
389  * propagated to it.
390  *
391  * Context: This function expects namespace_lock() to be held and that
392  *          @mp is stable.
393  * Return: If @from overmounts @to, true is returned, false if not.
394  */
propagation_would_overmount(const struct mount * from,const struct mount * to,const struct mountpoint * mp)395 bool propagation_would_overmount(const struct mount *from,
396 				 const struct mount *to,
397 				 const struct mountpoint *mp)
398 {
399 	if (!IS_MNT_SHARED(from))
400 		return false;
401 
402 	if (to->mnt.mnt_root != mp->m_dentry)
403 		return false;
404 
405 	for (const struct mount *m = to; m; m = m->mnt_master) {
406 		if (peers(from, m))
407 			return true;
408 	}
409 
410 	return false;
411 }
412 
413 /*
414  * check if the mount 'mnt' can be unmounted successfully.
415  * @mnt: the mount to be checked for unmount
416  * NOTE: unmounting 'mnt' would naturally propagate to all
417  * other mounts its parent propagates to.
418  * Check if any of these mounts that **do not have submounts**
419  * have more references than 'refcnt'. If so return busy.
420  *
421  * vfsmount lock must be held for write
422  */
propagate_mount_busy(struct mount * mnt,int refcnt)423 int propagate_mount_busy(struct mount *mnt, int refcnt)
424 {
425 	struct mount *parent = mnt->mnt_parent;
426 
427 	/*
428 	 * quickly check if the current mount can be unmounted.
429 	 * If not, we don't have to go checking for all other
430 	 * mounts
431 	 */
432 	if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
433 		return 1;
434 
435 	if (mnt == parent)
436 		return 0;
437 
438 	for (struct mount *m = propagation_next(parent, parent); m;
439 	     		m = propagation_next(m, parent)) {
440 		struct list_head *head;
441 		struct mount *child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
442 
443 		if (!child)
444 			continue;
445 
446 		head = &child->mnt_mounts;
447 		if (!list_empty(head)) {
448 			/*
449 			 * a mount that covers child completely wouldn't prevent
450 			 * it being pulled out; any other would.
451 			 */
452 			if (!list_is_singular(head) || !child->overmount)
453 				continue;
454 		}
455 		if (do_refcount_check(child, 1))
456 			return 1;
457 	}
458 	return 0;
459 }
460 
461 /*
462  * Clear MNT_LOCKED when it can be shown to be safe.
463  *
464  * mount_lock lock must be held for write
465  */
propagate_mount_unlock(struct mount * mnt)466 void propagate_mount_unlock(struct mount *mnt)
467 {
468 	struct mount *parent = mnt->mnt_parent;
469 	struct mount *m, *child;
470 
471 	BUG_ON(parent == mnt);
472 
473 	for (m = propagation_next(parent, parent); m;
474 			m = propagation_next(m, parent)) {
475 		child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
476 		if (child)
477 			child->mnt.mnt_flags &= ~MNT_LOCKED;
478 	}
479 }
480 
is_candidate(struct mount * m)481 static inline bool is_candidate(struct mount *m)
482 {
483 	return m->mnt_t_flags & T_UMOUNT_CANDIDATE;
484 }
485 
umount_one(struct mount * m,struct list_head * to_umount)486 static void umount_one(struct mount *m, struct list_head *to_umount)
487 {
488 	m->mnt.mnt_flags |= MNT_UMOUNT;
489 	list_del_init(&m->mnt_child);
490 	move_from_ns(m);
491 	list_add_tail(&m->mnt_list, to_umount);
492 }
493 
remove_from_candidate_list(struct mount * m)494 static void remove_from_candidate_list(struct mount *m)
495 {
496 	m->mnt_t_flags &= ~(T_MARKED | T_UMOUNT_CANDIDATE);
497 	list_del_init(&m->mnt_list);
498 }
499 
gather_candidates(struct list_head * set,struct list_head * candidates)500 static void gather_candidates(struct list_head *set,
501 			      struct list_head *candidates)
502 {
503 	struct mount *m, *p, *q;
504 
505 	list_for_each_entry(m, set, mnt_list) {
506 		if (is_candidate(m))
507 			continue;
508 		m->mnt_t_flags |= T_UMOUNT_CANDIDATE;
509 		p = m->mnt_parent;
510 		q = propagation_next(p, p);
511 		while (q) {
512 			struct mount *child = __lookup_mnt(&q->mnt,
513 							   m->mnt_mountpoint);
514 			if (child) {
515 				/*
516 				 * We might've already run into this one.  That
517 				 * must've happened on earlier iteration of the
518 				 * outer loop; in that case we can skip those
519 				 * parents that get propagation from q - there
520 				 * will be nothing new on those as well.
521 				 */
522 				if (is_candidate(child)) {
523 					q = skip_propagation_subtree(q, p);
524 					continue;
525 				}
526 				child->mnt_t_flags |= T_UMOUNT_CANDIDATE;
527 				if (!will_be_unmounted(child))
528 					list_add(&child->mnt_list, candidates);
529 			}
530 			q = propagation_next(q, p);
531 		}
532 	}
533 	list_for_each_entry(m, set, mnt_list)
534 		m->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
535 }
536 
537 /*
538  * We know that some child of @m can't be unmounted.  In all places where the
539  * chain of descent of @m has child not overmounting the root of parent,
540  * the parent can't be unmounted either.
541  */
trim_ancestors(struct mount * m)542 static void trim_ancestors(struct mount *m)
543 {
544 	struct mount *p;
545 
546 	for (p = m->mnt_parent; is_candidate(p); m = p, p = p->mnt_parent) {
547 		if (IS_MNT_MARKED(m))	// all candidates beneath are overmounts
548 			return;
549 		SET_MNT_MARK(m);
550 		if (m != p->overmount)
551 			p->mnt_t_flags &= ~T_UMOUNT_CANDIDATE;
552 	}
553 }
554 
555 /*
556  * Find and exclude all umount candidates forbidden by @m
557  * (see Documentation/filesystems/propagate_umount.txt)
558  * If we can immediately tell that @m is OK to unmount (unlocked
559  * and all children are already committed to unmounting) commit
560  * to unmounting it.
561  * Only @m itself might be taken from the candidates list;
562  * anything found by trim_ancestors() is marked non-candidate
563  * and left on the list.
564  */
trim_one(struct mount * m,struct list_head * to_umount)565 static void trim_one(struct mount *m, struct list_head *to_umount)
566 {
567 	bool remove_this = false, found = false, umount_this = false;
568 	struct mount *n;
569 
570 	if (!is_candidate(m)) { // trim_ancestors() left it on list
571 		remove_from_candidate_list(m);
572 		return;
573 	}
574 
575 	list_for_each_entry(n, &m->mnt_mounts, mnt_child) {
576 		if (!is_candidate(n)) {
577 			found = true;
578 			if (n != m->overmount) {
579 				remove_this = true;
580 				break;
581 			}
582 		}
583 	}
584 	if (found) {
585 		trim_ancestors(m);
586 	} else if (!IS_MNT_LOCKED(m) && list_empty(&m->mnt_mounts)) {
587 		remove_this = true;
588 		umount_this = true;
589 	}
590 	if (remove_this) {
591 		remove_from_candidate_list(m);
592 		if (umount_this)
593 			umount_one(m, to_umount);
594 	}
595 }
596 
handle_locked(struct mount * m,struct list_head * to_umount)597 static void handle_locked(struct mount *m, struct list_head *to_umount)
598 {
599 	struct mount *cutoff = m, *p;
600 
601 	if (!is_candidate(m)) { // trim_ancestors() left it on list
602 		remove_from_candidate_list(m);
603 		return;
604 	}
605 	for (p = m; is_candidate(p); p = p->mnt_parent) {
606 		remove_from_candidate_list(p);
607 		if (!IS_MNT_LOCKED(p))
608 			cutoff = p->mnt_parent;
609 	}
610 	if (will_be_unmounted(p))
611 		cutoff = p;
612 	while (m != cutoff) {
613 		umount_one(m, to_umount);
614 		m = m->mnt_parent;
615 	}
616 }
617 
618 /*
619  * @m is not to going away, and it overmounts the top of a stack of mounts
620  * that are going away.  We know that all of those are fully overmounted
621  * by the one above (@m being the topmost of the chain), so @m can be slid
622  * in place where the bottom of the stack is attached.
623  *
624  * NOTE: here we temporarily violate a constraint - two mounts end up with
625  * the same parent and mountpoint; that will be remedied as soon as we
626  * return from propagate_umount() - its caller (umount_tree()) will detach
627  * the stack from the parent it (and now @m) is attached to.  umount_tree()
628  * might choose to keep unmounted pieces stuck to each other, but it always
629  * detaches them from the mounts that remain in the tree.
630  */
reparent(struct mount * m)631 static void reparent(struct mount *m)
632 {
633 	struct mount *p = m;
634 	struct mountpoint *mp;
635 
636 	do {
637 		mp = p->mnt_mp;
638 		p = p->mnt_parent;
639 	} while (will_be_unmounted(p));
640 
641 	mnt_change_mountpoint(p, mp, m);
642 	mnt_notify_add(m);
643 }
644 
645 /**
646  * propagate_umount - apply propagation rules to the set of mounts for umount()
647  * @set: the list of mounts to be unmounted.
648  *
649  * Collect all mounts that receive propagation from the mount in @set and have
650  * no obstacles to being unmounted.  Add these additional mounts to the set.
651  *
652  * See Documentation/filesystems/propagate_umount.txt if you do anything in
653  * this area.
654  *
655  * Locks held:
656  * mount_lock (write_seqlock), namespace_sem (exclusive).
657  */
propagate_umount(struct list_head * set)658 void propagate_umount(struct list_head *set)
659 {
660 	struct mount *m, *p;
661 	LIST_HEAD(to_umount);	// committed to unmounting
662 	LIST_HEAD(candidates);	// undecided umount candidates
663 
664 	// collect all candidates
665 	gather_candidates(set, &candidates);
666 
667 	// reduce the set until it's non-shifting
668 	list_for_each_entry_safe(m, p, &candidates, mnt_list)
669 		trim_one(m, &to_umount);
670 
671 	// ... and non-revealing
672 	while (!list_empty(&candidates)) {
673 		m = list_first_entry(&candidates,struct mount, mnt_list);
674 		handle_locked(m, &to_umount);
675 	}
676 
677 	// now to_umount consists of all acceptable candidates
678 	// deal with reparenting of surviving overmounts on those
679 	list_for_each_entry(m, &to_umount, mnt_list) {
680 		struct mount *over = m->overmount;
681 		if (over && !will_be_unmounted(over))
682 			reparent(over);
683 	}
684 
685 	// and fold them into the set
686 	list_splice_tail_init(&to_umount, set);
687 }
688