xref: /linux/fs/super.c (revision 119ff04864a24470b1e531bb53e5c141aa8fefb0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/super.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  super.c contains code to handle: - mount structures
8  *                                   - super-block tables
9  *                                   - filesystem drivers list
10  *                                   - mount system call
11  *                                   - umount system call
12  *                                   - ustat system call
13  *
14  * GK 2/5/95  -  Changed to support mounting the root fs via NFS
15  *
16  *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17  *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18  *  Added options to /proc/mounts:
19  *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20  *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21  *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22  */
23 
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h>		/* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
40 #include "internal.h"
41 
42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
43 
44 static LIST_HEAD(super_blocks);
45 static DEFINE_SPINLOCK(sb_lock);
46 
47 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
48 	"sb_writers",
49 	"sb_pagefaults",
50 	"sb_internal",
51 };
52 
53 static inline void __super_lock(struct super_block *sb, bool excl)
54 {
55 	if (excl)
56 		down_write(&sb->s_umount);
57 	else
58 		down_read(&sb->s_umount);
59 }
60 
61 static inline void super_unlock(struct super_block *sb, bool excl)
62 {
63 	if (excl)
64 		up_write(&sb->s_umount);
65 	else
66 		up_read(&sb->s_umount);
67 }
68 
69 static inline void __super_lock_excl(struct super_block *sb)
70 {
71 	__super_lock(sb, true);
72 }
73 
74 static inline void super_unlock_excl(struct super_block *sb)
75 {
76 	super_unlock(sb, true);
77 }
78 
79 static inline void super_unlock_shared(struct super_block *sb)
80 {
81 	super_unlock(sb, false);
82 }
83 
84 static bool super_flags(const struct super_block *sb, unsigned int flags)
85 {
86 	/*
87 	 * Pairs with smp_store_release() in super_wake() and ensures
88 	 * that we see @flags after we're woken.
89 	 */
90 	return smp_load_acquire(&sb->s_flags) & flags;
91 }
92 
93 /**
94  * super_lock - wait for superblock to become ready and lock it
95  * @sb: superblock to wait for
96  * @excl: whether exclusive access is required
97  *
98  * If the superblock has neither passed through vfs_get_tree() or
99  * generic_shutdown_super() yet wait for it to happen. Either superblock
100  * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
101  * woken and we'll see SB_DYING.
102  *
103  * The caller must have acquired a temporary reference on @sb->s_count.
104  *
105  * Return: The function returns true if SB_BORN was set and with
106  *         s_umount held. The function returns false if SB_DYING was
107  *         set and without s_umount held.
108  */
109 static __must_check bool super_lock(struct super_block *sb, bool excl)
110 {
111 	lockdep_assert_not_held(&sb->s_umount);
112 
113 	/* wait until the superblock is ready or dying */
114 	wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
115 
116 	/* Don't pointlessly acquire s_umount. */
117 	if (super_flags(sb, SB_DYING))
118 		return false;
119 
120 	__super_lock(sb, excl);
121 
122 	/*
123 	 * Has gone through generic_shutdown_super() in the meantime.
124 	 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
125 	 * grab a reference to this. Tell them so.
126 	 */
127 	if (sb->s_flags & SB_DYING) {
128 		super_unlock(sb, excl);
129 		return false;
130 	}
131 
132 	WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
133 	return true;
134 }
135 
136 /* wait and try to acquire read-side of @sb->s_umount */
137 static inline bool super_lock_shared(struct super_block *sb)
138 {
139 	return super_lock(sb, false);
140 }
141 
142 /* wait and try to acquire write-side of @sb->s_umount */
143 static inline bool super_lock_excl(struct super_block *sb)
144 {
145 	return super_lock(sb, true);
146 }
147 
148 /* wake waiters */
149 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
150 static void super_wake(struct super_block *sb, unsigned int flag)
151 {
152 	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
153 	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
154 
155 	/*
156 	 * Pairs with smp_load_acquire() in super_lock() to make sure
157 	 * all initializations in the superblock are seen by the user
158 	 * seeing SB_BORN sent.
159 	 */
160 	smp_store_release(&sb->s_flags, sb->s_flags | flag);
161 	/*
162 	 * Pairs with the barrier in prepare_to_wait_event() to make sure
163 	 * ___wait_var_event() either sees SB_BORN set or
164 	 * waitqueue_active() check in wake_up_var() sees the waiter.
165 	 */
166 	smp_mb();
167 	wake_up_var(&sb->s_flags);
168 }
169 
170 /*
171  * One thing we have to be careful of with a per-sb shrinker is that we don't
172  * drop the last active reference to the superblock from within the shrinker.
173  * If that happens we could trigger unregistering the shrinker from within the
174  * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
175  * take a passive reference to the superblock to avoid this from occurring.
176  */
177 static unsigned long super_cache_scan(struct shrinker *shrink,
178 				      struct shrink_control *sc)
179 {
180 	struct super_block *sb;
181 	long	fs_objects = 0;
182 	long	total_objects;
183 	long	freed = 0;
184 	long	dentries;
185 	long	inodes;
186 
187 	sb = shrink->private_data;
188 
189 	/*
190 	 * Deadlock avoidance.  We may hold various FS locks, and we don't want
191 	 * to recurse into the FS that called us in clear_inode() and friends..
192 	 */
193 	if (!(sc->gfp_mask & __GFP_FS))
194 		return SHRINK_STOP;
195 
196 	if (!super_trylock_shared(sb))
197 		return SHRINK_STOP;
198 
199 	if (sb->s_op->nr_cached_objects)
200 		fs_objects = sb->s_op->nr_cached_objects(sb, sc);
201 
202 	inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
203 	dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
204 	total_objects = dentries + inodes + fs_objects + 1;
205 	if (!total_objects)
206 		total_objects = 1;
207 
208 	/* proportion the scan between the caches */
209 	dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
210 	inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
211 	fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
212 
213 	/*
214 	 * prune the dcache first as the icache is pinned by it, then
215 	 * prune the icache, followed by the filesystem specific caches
216 	 *
217 	 * Ensure that we always scan at least one object - memcg kmem
218 	 * accounting uses this to fully empty the caches.
219 	 */
220 	sc->nr_to_scan = dentries + 1;
221 	freed = prune_dcache_sb(sb, sc);
222 	sc->nr_to_scan = inodes + 1;
223 	freed += prune_icache_sb(sb, sc);
224 
225 	if (fs_objects) {
226 		sc->nr_to_scan = fs_objects + 1;
227 		freed += sb->s_op->free_cached_objects(sb, sc);
228 	}
229 
230 	super_unlock_shared(sb);
231 	return freed;
232 }
233 
234 static unsigned long super_cache_count(struct shrinker *shrink,
235 				       struct shrink_control *sc)
236 {
237 	struct super_block *sb;
238 	long	total_objects = 0;
239 
240 	sb = shrink->private_data;
241 
242 	/*
243 	 * We don't call super_trylock_shared() here as it is a scalability
244 	 * bottleneck, so we're exposed to partial setup state. The shrinker
245 	 * rwsem does not protect filesystem operations backing
246 	 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
247 	 * change between super_cache_count and super_cache_scan, so we really
248 	 * don't need locks here.
249 	 *
250 	 * However, if we are currently mounting the superblock, the underlying
251 	 * filesystem might be in a state of partial construction and hence it
252 	 * is dangerous to access it.  super_trylock_shared() uses a SB_BORN check
253 	 * to avoid this situation, so do the same here. The memory barrier is
254 	 * matched with the one in mount_fs() as we don't hold locks here.
255 	 */
256 	if (!(sb->s_flags & SB_BORN))
257 		return 0;
258 	smp_rmb();
259 
260 	if (sb->s_op && sb->s_op->nr_cached_objects)
261 		total_objects = sb->s_op->nr_cached_objects(sb, sc);
262 
263 	total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
264 	total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
265 
266 	if (!total_objects)
267 		return SHRINK_EMPTY;
268 
269 	total_objects = vfs_pressure_ratio(total_objects);
270 	return total_objects;
271 }
272 
273 static void destroy_super_work(struct work_struct *work)
274 {
275 	struct super_block *s = container_of(work, struct super_block,
276 							destroy_work);
277 	int i;
278 
279 	for (i = 0; i < SB_FREEZE_LEVELS; i++)
280 		percpu_free_rwsem(&s->s_writers.rw_sem[i]);
281 	kfree(s);
282 }
283 
284 static void destroy_super_rcu(struct rcu_head *head)
285 {
286 	struct super_block *s = container_of(head, struct super_block, rcu);
287 	INIT_WORK(&s->destroy_work, destroy_super_work);
288 	schedule_work(&s->destroy_work);
289 }
290 
291 /* Free a superblock that has never been seen by anyone */
292 static void destroy_unused_super(struct super_block *s)
293 {
294 	if (!s)
295 		return;
296 	super_unlock_excl(s);
297 	list_lru_destroy(&s->s_dentry_lru);
298 	list_lru_destroy(&s->s_inode_lru);
299 	security_sb_free(s);
300 	put_user_ns(s->s_user_ns);
301 	kfree(s->s_subtype);
302 	shrinker_free(s->s_shrink);
303 	/* no delays needed */
304 	destroy_super_work(&s->destroy_work);
305 }
306 
307 /**
308  *	alloc_super	-	create new superblock
309  *	@type:	filesystem type superblock should belong to
310  *	@flags: the mount flags
311  *	@user_ns: User namespace for the super_block
312  *
313  *	Allocates and initializes a new &struct super_block.  alloc_super()
314  *	returns a pointer new superblock or %NULL if allocation had failed.
315  */
316 static struct super_block *alloc_super(struct file_system_type *type, int flags,
317 				       struct user_namespace *user_ns)
318 {
319 	struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
320 	static const struct super_operations default_op;
321 	int i;
322 
323 	if (!s)
324 		return NULL;
325 
326 	INIT_LIST_HEAD(&s->s_mounts);
327 	s->s_user_ns = get_user_ns(user_ns);
328 	init_rwsem(&s->s_umount);
329 	lockdep_set_class(&s->s_umount, &type->s_umount_key);
330 	/*
331 	 * sget() can have s_umount recursion.
332 	 *
333 	 * When it cannot find a suitable sb, it allocates a new
334 	 * one (this one), and tries again to find a suitable old
335 	 * one.
336 	 *
337 	 * In case that succeeds, it will acquire the s_umount
338 	 * lock of the old one. Since these are clearly distrinct
339 	 * locks, and this object isn't exposed yet, there's no
340 	 * risk of deadlocks.
341 	 *
342 	 * Annotate this by putting this lock in a different
343 	 * subclass.
344 	 */
345 	down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
346 
347 	if (security_sb_alloc(s))
348 		goto fail;
349 
350 	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
351 		if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
352 					sb_writers_name[i],
353 					&type->s_writers_key[i]))
354 			goto fail;
355 	}
356 	s->s_bdi = &noop_backing_dev_info;
357 	s->s_flags = flags;
358 	if (s->s_user_ns != &init_user_ns)
359 		s->s_iflags |= SB_I_NODEV;
360 	INIT_HLIST_NODE(&s->s_instances);
361 	INIT_HLIST_BL_HEAD(&s->s_roots);
362 	mutex_init(&s->s_sync_lock);
363 	INIT_LIST_HEAD(&s->s_inodes);
364 	spin_lock_init(&s->s_inode_list_lock);
365 	INIT_LIST_HEAD(&s->s_inodes_wb);
366 	spin_lock_init(&s->s_inode_wblist_lock);
367 
368 	s->s_count = 1;
369 	atomic_set(&s->s_active, 1);
370 	mutex_init(&s->s_vfs_rename_mutex);
371 	lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
372 	init_rwsem(&s->s_dquot.dqio_sem);
373 	s->s_maxbytes = MAX_NON_LFS;
374 	s->s_op = &default_op;
375 	s->s_time_gran = 1000000000;
376 	s->s_time_min = TIME64_MIN;
377 	s->s_time_max = TIME64_MAX;
378 
379 	s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
380 				     "sb-%s", type->name);
381 	if (!s->s_shrink)
382 		goto fail;
383 
384 	s->s_shrink->scan_objects = super_cache_scan;
385 	s->s_shrink->count_objects = super_cache_count;
386 	s->s_shrink->batch = 1024;
387 	s->s_shrink->private_data = s;
388 
389 	if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
390 		goto fail;
391 	if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
392 		goto fail;
393 	return s;
394 
395 fail:
396 	destroy_unused_super(s);
397 	return NULL;
398 }
399 
400 /* Superblock refcounting  */
401 
402 /*
403  * Drop a superblock's refcount.  The caller must hold sb_lock.
404  */
405 static void __put_super(struct super_block *s)
406 {
407 	if (!--s->s_count) {
408 		list_del_init(&s->s_list);
409 		WARN_ON(s->s_dentry_lru.node);
410 		WARN_ON(s->s_inode_lru.node);
411 		WARN_ON(!list_empty(&s->s_mounts));
412 		security_sb_free(s);
413 		put_user_ns(s->s_user_ns);
414 		kfree(s->s_subtype);
415 		call_rcu(&s->rcu, destroy_super_rcu);
416 	}
417 }
418 
419 /**
420  *	put_super	-	drop a temporary reference to superblock
421  *	@sb: superblock in question
422  *
423  *	Drops a temporary reference, frees superblock if there's no
424  *	references left.
425  */
426 void put_super(struct super_block *sb)
427 {
428 	spin_lock(&sb_lock);
429 	__put_super(sb);
430 	spin_unlock(&sb_lock);
431 }
432 
433 static void kill_super_notify(struct super_block *sb)
434 {
435 	lockdep_assert_not_held(&sb->s_umount);
436 
437 	/* already notified earlier */
438 	if (sb->s_flags & SB_DEAD)
439 		return;
440 
441 	/*
442 	 * Remove it from @fs_supers so it isn't found by new
443 	 * sget{_fc}() walkers anymore. Any concurrent mounter still
444 	 * managing to grab a temporary reference is guaranteed to
445 	 * already see SB_DYING and will wait until we notify them about
446 	 * SB_DEAD.
447 	 */
448 	spin_lock(&sb_lock);
449 	hlist_del_init(&sb->s_instances);
450 	spin_unlock(&sb_lock);
451 
452 	/*
453 	 * Let concurrent mounts know that this thing is really dead.
454 	 * We don't need @sb->s_umount here as every concurrent caller
455 	 * will see SB_DYING and either discard the superblock or wait
456 	 * for SB_DEAD.
457 	 */
458 	super_wake(sb, SB_DEAD);
459 }
460 
461 /**
462  *	deactivate_locked_super	-	drop an active reference to superblock
463  *	@s: superblock to deactivate
464  *
465  *	Drops an active reference to superblock, converting it into a temporary
466  *	one if there is no other active references left.  In that case we
467  *	tell fs driver to shut it down and drop the temporary reference we
468  *	had just acquired.
469  *
470  *	Caller holds exclusive lock on superblock; that lock is released.
471  */
472 void deactivate_locked_super(struct super_block *s)
473 {
474 	struct file_system_type *fs = s->s_type;
475 	if (atomic_dec_and_test(&s->s_active)) {
476 		shrinker_free(s->s_shrink);
477 		fs->kill_sb(s);
478 
479 		kill_super_notify(s);
480 
481 		/*
482 		 * Since list_lru_destroy() may sleep, we cannot call it from
483 		 * put_super(), where we hold the sb_lock. Therefore we destroy
484 		 * the lru lists right now.
485 		 */
486 		list_lru_destroy(&s->s_dentry_lru);
487 		list_lru_destroy(&s->s_inode_lru);
488 
489 		put_filesystem(fs);
490 		put_super(s);
491 	} else {
492 		super_unlock_excl(s);
493 	}
494 }
495 
496 EXPORT_SYMBOL(deactivate_locked_super);
497 
498 /**
499  *	deactivate_super	-	drop an active reference to superblock
500  *	@s: superblock to deactivate
501  *
502  *	Variant of deactivate_locked_super(), except that superblock is *not*
503  *	locked by caller.  If we are going to drop the final active reference,
504  *	lock will be acquired prior to that.
505  */
506 void deactivate_super(struct super_block *s)
507 {
508 	if (!atomic_add_unless(&s->s_active, -1, 1)) {
509 		__super_lock_excl(s);
510 		deactivate_locked_super(s);
511 	}
512 }
513 
514 EXPORT_SYMBOL(deactivate_super);
515 
516 /**
517  * grab_super - acquire an active reference to a superblock
518  * @sb: superblock to acquire
519  *
520  * Acquire a temporary reference on a superblock and try to trade it for
521  * an active reference. This is used in sget{_fc}() to wait for a
522  * superblock to either become SB_BORN or for it to pass through
523  * sb->kill() and be marked as SB_DEAD.
524  *
525  * Return: This returns true if an active reference could be acquired,
526  *         false if not.
527  */
528 static bool grab_super(struct super_block *sb)
529 {
530 	bool locked;
531 
532 	sb->s_count++;
533 	spin_unlock(&sb_lock);
534 	locked = super_lock_excl(sb);
535 	if (locked) {
536 		if (atomic_inc_not_zero(&sb->s_active)) {
537 			put_super(sb);
538 			return true;
539 		}
540 		super_unlock_excl(sb);
541 	}
542 	wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
543 	put_super(sb);
544 	return false;
545 }
546 
547 /*
548  *	super_trylock_shared - try to grab ->s_umount shared
549  *	@sb: reference we are trying to grab
550  *
551  *	Try to prevent fs shutdown.  This is used in places where we
552  *	cannot take an active reference but we need to ensure that the
553  *	filesystem is not shut down while we are working on it. It returns
554  *	false if we cannot acquire s_umount or if we lose the race and
555  *	filesystem already got into shutdown, and returns true with the s_umount
556  *	lock held in read mode in case of success. On successful return,
557  *	the caller must drop the s_umount lock when done.
558  *
559  *	Note that unlike get_super() et.al. this one does *not* bump ->s_count.
560  *	The reason why it's safe is that we are OK with doing trylock instead
561  *	of down_read().  There's a couple of places that are OK with that, but
562  *	it's very much not a general-purpose interface.
563  */
564 bool super_trylock_shared(struct super_block *sb)
565 {
566 	if (down_read_trylock(&sb->s_umount)) {
567 		if (!(sb->s_flags & SB_DYING) && sb->s_root &&
568 		    (sb->s_flags & SB_BORN))
569 			return true;
570 		super_unlock_shared(sb);
571 	}
572 
573 	return false;
574 }
575 
576 /**
577  *	retire_super	-	prevents superblock from being reused
578  *	@sb: superblock to retire
579  *
580  *	The function marks superblock to be ignored in superblock test, which
581  *	prevents it from being reused for any new mounts.  If the superblock has
582  *	a private bdi, it also unregisters it, but doesn't reduce the refcount
583  *	of the superblock to prevent potential races.  The refcount is reduced
584  *	by generic_shutdown_super().  The function can not be called
585  *	concurrently with generic_shutdown_super().  It is safe to call the
586  *	function multiple times, subsequent calls have no effect.
587  *
588  *	The marker will affect the re-use only for block-device-based
589  *	superblocks.  Other superblocks will still get marked if this function
590  *	is used, but that will not affect their reusability.
591  */
592 void retire_super(struct super_block *sb)
593 {
594 	WARN_ON(!sb->s_bdev);
595 	__super_lock_excl(sb);
596 	if (sb->s_iflags & SB_I_PERSB_BDI) {
597 		bdi_unregister(sb->s_bdi);
598 		sb->s_iflags &= ~SB_I_PERSB_BDI;
599 	}
600 	sb->s_iflags |= SB_I_RETIRED;
601 	super_unlock_excl(sb);
602 }
603 EXPORT_SYMBOL(retire_super);
604 
605 /**
606  *	generic_shutdown_super	-	common helper for ->kill_sb()
607  *	@sb: superblock to kill
608  *
609  *	generic_shutdown_super() does all fs-independent work on superblock
610  *	shutdown.  Typical ->kill_sb() should pick all fs-specific objects
611  *	that need destruction out of superblock, call generic_shutdown_super()
612  *	and release aforementioned objects.  Note: dentries and inodes _are_
613  *	taken care of and do not need specific handling.
614  *
615  *	Upon calling this function, the filesystem may no longer alter or
616  *	rearrange the set of dentries belonging to this super_block, nor may it
617  *	change the attachments of dentries to inodes.
618  */
619 void generic_shutdown_super(struct super_block *sb)
620 {
621 	const struct super_operations *sop = sb->s_op;
622 
623 	if (sb->s_root) {
624 		shrink_dcache_for_umount(sb);
625 		sync_filesystem(sb);
626 		sb->s_flags &= ~SB_ACTIVE;
627 
628 		cgroup_writeback_umount();
629 
630 		/* Evict all inodes with zero refcount. */
631 		evict_inodes(sb);
632 
633 		/*
634 		 * Clean up and evict any inodes that still have references due
635 		 * to fsnotify or the security policy.
636 		 */
637 		fsnotify_sb_delete(sb);
638 		security_sb_delete(sb);
639 
640 		if (sb->s_dio_done_wq) {
641 			destroy_workqueue(sb->s_dio_done_wq);
642 			sb->s_dio_done_wq = NULL;
643 		}
644 
645 		if (sop->put_super)
646 			sop->put_super(sb);
647 
648 		/*
649 		 * Now that all potentially-encrypted inodes have been evicted,
650 		 * the fscrypt keyring can be destroyed.
651 		 */
652 		fscrypt_destroy_keyring(sb);
653 
654 		if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
655 				"VFS: Busy inodes after unmount of %s (%s)",
656 				sb->s_id, sb->s_type->name)) {
657 			/*
658 			 * Adding a proper bailout path here would be hard, but
659 			 * we can at least make it more likely that a later
660 			 * iput_final() or such crashes cleanly.
661 			 */
662 			struct inode *inode;
663 
664 			spin_lock(&sb->s_inode_list_lock);
665 			list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
666 				inode->i_op = VFS_PTR_POISON;
667 				inode->i_sb = VFS_PTR_POISON;
668 				inode->i_mapping = VFS_PTR_POISON;
669 			}
670 			spin_unlock(&sb->s_inode_list_lock);
671 		}
672 	}
673 	/*
674 	 * Broadcast to everyone that grabbed a temporary reference to this
675 	 * superblock before we removed it from @fs_supers that the superblock
676 	 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
677 	 * discard this superblock and treat it as dead.
678 	 *
679 	 * We leave the superblock on @fs_supers so it can be found by
680 	 * sget{_fc}() until we passed sb->kill_sb().
681 	 */
682 	super_wake(sb, SB_DYING);
683 	super_unlock_excl(sb);
684 	if (sb->s_bdi != &noop_backing_dev_info) {
685 		if (sb->s_iflags & SB_I_PERSB_BDI)
686 			bdi_unregister(sb->s_bdi);
687 		bdi_put(sb->s_bdi);
688 		sb->s_bdi = &noop_backing_dev_info;
689 	}
690 }
691 
692 EXPORT_SYMBOL(generic_shutdown_super);
693 
694 bool mount_capable(struct fs_context *fc)
695 {
696 	if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
697 		return capable(CAP_SYS_ADMIN);
698 	else
699 		return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
700 }
701 
702 /**
703  * sget_fc - Find or create a superblock
704  * @fc:	Filesystem context.
705  * @test: Comparison callback
706  * @set: Setup callback
707  *
708  * Create a new superblock or find an existing one.
709  *
710  * The @test callback is used to find a matching existing superblock.
711  * Whether or not the requested parameters in @fc are taken into account
712  * is specific to the @test callback that is used. They may even be
713  * completely ignored.
714  *
715  * If an extant superblock is matched, it will be returned unless:
716  *
717  * (1) the namespace the filesystem context @fc and the extant
718  *     superblock's namespace differ
719  *
720  * (2) the filesystem context @fc has requested that reusing an extant
721  *     superblock is not allowed
722  *
723  * In both cases EBUSY will be returned.
724  *
725  * If no match is made, a new superblock will be allocated and basic
726  * initialisation will be performed (s_type, s_fs_info and s_id will be
727  * set and the @set callback will be invoked), the superblock will be
728  * published and it will be returned in a partially constructed state
729  * with SB_BORN and SB_ACTIVE as yet unset.
730  *
731  * Return: On success, an extant or newly created superblock is
732  *         returned. On failure an error pointer is returned.
733  */
734 struct super_block *sget_fc(struct fs_context *fc,
735 			    int (*test)(struct super_block *, struct fs_context *),
736 			    int (*set)(struct super_block *, struct fs_context *))
737 {
738 	struct super_block *s = NULL;
739 	struct super_block *old;
740 	struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
741 	int err;
742 
743 retry:
744 	spin_lock(&sb_lock);
745 	if (test) {
746 		hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
747 			if (test(old, fc))
748 				goto share_extant_sb;
749 		}
750 	}
751 	if (!s) {
752 		spin_unlock(&sb_lock);
753 		s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
754 		if (!s)
755 			return ERR_PTR(-ENOMEM);
756 		goto retry;
757 	}
758 
759 	s->s_fs_info = fc->s_fs_info;
760 	err = set(s, fc);
761 	if (err) {
762 		s->s_fs_info = NULL;
763 		spin_unlock(&sb_lock);
764 		destroy_unused_super(s);
765 		return ERR_PTR(err);
766 	}
767 	fc->s_fs_info = NULL;
768 	s->s_type = fc->fs_type;
769 	s->s_iflags |= fc->s_iflags;
770 	strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
771 	/*
772 	 * Make the superblock visible on @super_blocks and @fs_supers.
773 	 * It's in a nascent state and users should wait on SB_BORN or
774 	 * SB_DYING to be set.
775 	 */
776 	list_add_tail(&s->s_list, &super_blocks);
777 	hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
778 	spin_unlock(&sb_lock);
779 	get_filesystem(s->s_type);
780 	shrinker_register(s->s_shrink);
781 	return s;
782 
783 share_extant_sb:
784 	if (user_ns != old->s_user_ns || fc->exclusive) {
785 		spin_unlock(&sb_lock);
786 		destroy_unused_super(s);
787 		if (fc->exclusive)
788 			warnfc(fc, "reusing existing filesystem not allowed");
789 		else
790 			warnfc(fc, "reusing existing filesystem in another namespace not allowed");
791 		return ERR_PTR(-EBUSY);
792 	}
793 	if (!grab_super(old))
794 		goto retry;
795 	destroy_unused_super(s);
796 	return old;
797 }
798 EXPORT_SYMBOL(sget_fc);
799 
800 /**
801  *	sget	-	find or create a superblock
802  *	@type:	  filesystem type superblock should belong to
803  *	@test:	  comparison callback
804  *	@set:	  setup callback
805  *	@flags:	  mount flags
806  *	@data:	  argument to each of them
807  */
808 struct super_block *sget(struct file_system_type *type,
809 			int (*test)(struct super_block *,void *),
810 			int (*set)(struct super_block *,void *),
811 			int flags,
812 			void *data)
813 {
814 	struct user_namespace *user_ns = current_user_ns();
815 	struct super_block *s = NULL;
816 	struct super_block *old;
817 	int err;
818 
819 	/* We don't yet pass the user namespace of the parent
820 	 * mount through to here so always use &init_user_ns
821 	 * until that changes.
822 	 */
823 	if (flags & SB_SUBMOUNT)
824 		user_ns = &init_user_ns;
825 
826 retry:
827 	spin_lock(&sb_lock);
828 	if (test) {
829 		hlist_for_each_entry(old, &type->fs_supers, s_instances) {
830 			if (!test(old, data))
831 				continue;
832 			if (user_ns != old->s_user_ns) {
833 				spin_unlock(&sb_lock);
834 				destroy_unused_super(s);
835 				return ERR_PTR(-EBUSY);
836 			}
837 			if (!grab_super(old))
838 				goto retry;
839 			destroy_unused_super(s);
840 			return old;
841 		}
842 	}
843 	if (!s) {
844 		spin_unlock(&sb_lock);
845 		s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
846 		if (!s)
847 			return ERR_PTR(-ENOMEM);
848 		goto retry;
849 	}
850 
851 	err = set(s, data);
852 	if (err) {
853 		spin_unlock(&sb_lock);
854 		destroy_unused_super(s);
855 		return ERR_PTR(err);
856 	}
857 	s->s_type = type;
858 	strscpy(s->s_id, type->name, sizeof(s->s_id));
859 	list_add_tail(&s->s_list, &super_blocks);
860 	hlist_add_head(&s->s_instances, &type->fs_supers);
861 	spin_unlock(&sb_lock);
862 	get_filesystem(type);
863 	shrinker_register(s->s_shrink);
864 	return s;
865 }
866 EXPORT_SYMBOL(sget);
867 
868 void drop_super(struct super_block *sb)
869 {
870 	super_unlock_shared(sb);
871 	put_super(sb);
872 }
873 
874 EXPORT_SYMBOL(drop_super);
875 
876 void drop_super_exclusive(struct super_block *sb)
877 {
878 	super_unlock_excl(sb);
879 	put_super(sb);
880 }
881 EXPORT_SYMBOL(drop_super_exclusive);
882 
883 static void __iterate_supers(void (*f)(struct super_block *))
884 {
885 	struct super_block *sb, *p = NULL;
886 
887 	spin_lock(&sb_lock);
888 	list_for_each_entry(sb, &super_blocks, s_list) {
889 		if (super_flags(sb, SB_DYING))
890 			continue;
891 		sb->s_count++;
892 		spin_unlock(&sb_lock);
893 
894 		f(sb);
895 
896 		spin_lock(&sb_lock);
897 		if (p)
898 			__put_super(p);
899 		p = sb;
900 	}
901 	if (p)
902 		__put_super(p);
903 	spin_unlock(&sb_lock);
904 }
905 /**
906  *	iterate_supers - call function for all active superblocks
907  *	@f: function to call
908  *	@arg: argument to pass to it
909  *
910  *	Scans the superblock list and calls given function, passing it
911  *	locked superblock and given argument.
912  */
913 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
914 {
915 	struct super_block *sb, *p = NULL;
916 
917 	spin_lock(&sb_lock);
918 	list_for_each_entry(sb, &super_blocks, s_list) {
919 		bool locked;
920 
921 		sb->s_count++;
922 		spin_unlock(&sb_lock);
923 
924 		locked = super_lock_shared(sb);
925 		if (locked) {
926 			if (sb->s_root)
927 				f(sb, arg);
928 			super_unlock_shared(sb);
929 		}
930 
931 		spin_lock(&sb_lock);
932 		if (p)
933 			__put_super(p);
934 		p = sb;
935 	}
936 	if (p)
937 		__put_super(p);
938 	spin_unlock(&sb_lock);
939 }
940 
941 /**
942  *	iterate_supers_type - call function for superblocks of given type
943  *	@type: fs type
944  *	@f: function to call
945  *	@arg: argument to pass to it
946  *
947  *	Scans the superblock list and calls given function, passing it
948  *	locked superblock and given argument.
949  */
950 void iterate_supers_type(struct file_system_type *type,
951 	void (*f)(struct super_block *, void *), void *arg)
952 {
953 	struct super_block *sb, *p = NULL;
954 
955 	spin_lock(&sb_lock);
956 	hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
957 		bool locked;
958 
959 		sb->s_count++;
960 		spin_unlock(&sb_lock);
961 
962 		locked = super_lock_shared(sb);
963 		if (locked) {
964 			if (sb->s_root)
965 				f(sb, arg);
966 			super_unlock_shared(sb);
967 		}
968 
969 		spin_lock(&sb_lock);
970 		if (p)
971 			__put_super(p);
972 		p = sb;
973 	}
974 	if (p)
975 		__put_super(p);
976 	spin_unlock(&sb_lock);
977 }
978 
979 EXPORT_SYMBOL(iterate_supers_type);
980 
981 struct super_block *user_get_super(dev_t dev, bool excl)
982 {
983 	struct super_block *sb;
984 
985 	spin_lock(&sb_lock);
986 	list_for_each_entry(sb, &super_blocks, s_list) {
987 		if (sb->s_dev ==  dev) {
988 			bool locked;
989 
990 			sb->s_count++;
991 			spin_unlock(&sb_lock);
992 			/* still alive? */
993 			locked = super_lock(sb, excl);
994 			if (locked) {
995 				if (sb->s_root)
996 					return sb;
997 				super_unlock(sb, excl);
998 			}
999 			/* nope, got unmounted */
1000 			spin_lock(&sb_lock);
1001 			__put_super(sb);
1002 			break;
1003 		}
1004 	}
1005 	spin_unlock(&sb_lock);
1006 	return NULL;
1007 }
1008 
1009 /**
1010  * reconfigure_super - asks filesystem to change superblock parameters
1011  * @fc: The superblock and configuration
1012  *
1013  * Alters the configuration parameters of a live superblock.
1014  */
1015 int reconfigure_super(struct fs_context *fc)
1016 {
1017 	struct super_block *sb = fc->root->d_sb;
1018 	int retval;
1019 	bool remount_ro = false;
1020 	bool remount_rw = false;
1021 	bool force = fc->sb_flags & SB_FORCE;
1022 
1023 	if (fc->sb_flags_mask & ~MS_RMT_MASK)
1024 		return -EINVAL;
1025 	if (sb->s_writers.frozen != SB_UNFROZEN)
1026 		return -EBUSY;
1027 
1028 	retval = security_sb_remount(sb, fc->security);
1029 	if (retval)
1030 		return retval;
1031 
1032 	if (fc->sb_flags_mask & SB_RDONLY) {
1033 #ifdef CONFIG_BLOCK
1034 		if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1035 		    bdev_read_only(sb->s_bdev))
1036 			return -EACCES;
1037 #endif
1038 		remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1039 		remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1040 	}
1041 
1042 	if (remount_ro) {
1043 		if (!hlist_empty(&sb->s_pins)) {
1044 			super_unlock_excl(sb);
1045 			group_pin_kill(&sb->s_pins);
1046 			__super_lock_excl(sb);
1047 			if (!sb->s_root)
1048 				return 0;
1049 			if (sb->s_writers.frozen != SB_UNFROZEN)
1050 				return -EBUSY;
1051 			remount_ro = !sb_rdonly(sb);
1052 		}
1053 	}
1054 	shrink_dcache_sb(sb);
1055 
1056 	/* If we are reconfiguring to RDONLY and current sb is read/write,
1057 	 * make sure there are no files open for writing.
1058 	 */
1059 	if (remount_ro) {
1060 		if (force) {
1061 			sb_start_ro_state_change(sb);
1062 		} else {
1063 			retval = sb_prepare_remount_readonly(sb);
1064 			if (retval)
1065 				return retval;
1066 		}
1067 	} else if (remount_rw) {
1068 		/*
1069 		 * Protect filesystem's reconfigure code from writes from
1070 		 * userspace until reconfigure finishes.
1071 		 */
1072 		sb_start_ro_state_change(sb);
1073 	}
1074 
1075 	if (fc->ops->reconfigure) {
1076 		retval = fc->ops->reconfigure(fc);
1077 		if (retval) {
1078 			if (!force)
1079 				goto cancel_readonly;
1080 			/* If forced remount, go ahead despite any errors */
1081 			WARN(1, "forced remount of a %s fs returned %i\n",
1082 			     sb->s_type->name, retval);
1083 		}
1084 	}
1085 
1086 	WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1087 				 (fc->sb_flags & fc->sb_flags_mask)));
1088 	sb_end_ro_state_change(sb);
1089 
1090 	/*
1091 	 * Some filesystems modify their metadata via some other path than the
1092 	 * bdev buffer cache (eg. use a private mapping, or directories in
1093 	 * pagecache, etc). Also file data modifications go via their own
1094 	 * mappings. So If we try to mount readonly then copy the filesystem
1095 	 * from bdev, we could get stale data, so invalidate it to give a best
1096 	 * effort at coherency.
1097 	 */
1098 	if (remount_ro && sb->s_bdev)
1099 		invalidate_bdev(sb->s_bdev);
1100 	return 0;
1101 
1102 cancel_readonly:
1103 	sb_end_ro_state_change(sb);
1104 	return retval;
1105 }
1106 
1107 static void do_emergency_remount_callback(struct super_block *sb)
1108 {
1109 	bool locked = super_lock_excl(sb);
1110 
1111 	if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
1112 		struct fs_context *fc;
1113 
1114 		fc = fs_context_for_reconfigure(sb->s_root,
1115 					SB_RDONLY | SB_FORCE, SB_RDONLY);
1116 		if (!IS_ERR(fc)) {
1117 			if (parse_monolithic_mount_data(fc, NULL) == 0)
1118 				(void)reconfigure_super(fc);
1119 			put_fs_context(fc);
1120 		}
1121 	}
1122 	if (locked)
1123 		super_unlock_excl(sb);
1124 }
1125 
1126 static void do_emergency_remount(struct work_struct *work)
1127 {
1128 	__iterate_supers(do_emergency_remount_callback);
1129 	kfree(work);
1130 	printk("Emergency Remount complete\n");
1131 }
1132 
1133 void emergency_remount(void)
1134 {
1135 	struct work_struct *work;
1136 
1137 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1138 	if (work) {
1139 		INIT_WORK(work, do_emergency_remount);
1140 		schedule_work(work);
1141 	}
1142 }
1143 
1144 static void do_thaw_all_callback(struct super_block *sb)
1145 {
1146 	bool locked = super_lock_excl(sb);
1147 
1148 	if (locked && sb->s_root) {
1149 		if (IS_ENABLED(CONFIG_BLOCK))
1150 			while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1151 				pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1152 		thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
1153 		return;
1154 	}
1155 	if (locked)
1156 		super_unlock_excl(sb);
1157 }
1158 
1159 static void do_thaw_all(struct work_struct *work)
1160 {
1161 	__iterate_supers(do_thaw_all_callback);
1162 	kfree(work);
1163 	printk(KERN_WARNING "Emergency Thaw complete\n");
1164 }
1165 
1166 /**
1167  * emergency_thaw_all -- forcibly thaw every frozen filesystem
1168  *
1169  * Used for emergency unfreeze of all filesystems via SysRq
1170  */
1171 void emergency_thaw_all(void)
1172 {
1173 	struct work_struct *work;
1174 
1175 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1176 	if (work) {
1177 		INIT_WORK(work, do_thaw_all);
1178 		schedule_work(work);
1179 	}
1180 }
1181 
1182 static DEFINE_IDA(unnamed_dev_ida);
1183 
1184 /**
1185  * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1186  * @p: Pointer to a dev_t.
1187  *
1188  * Filesystems which don't use real block devices can call this function
1189  * to allocate a virtual block device.
1190  *
1191  * Context: Any context.  Frequently called while holding sb_lock.
1192  * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1193  * or -ENOMEM if memory allocation failed.
1194  */
1195 int get_anon_bdev(dev_t *p)
1196 {
1197 	int dev;
1198 
1199 	/*
1200 	 * Many userspace utilities consider an FSID of 0 invalid.
1201 	 * Always return at least 1 from get_anon_bdev.
1202 	 */
1203 	dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1204 			GFP_ATOMIC);
1205 	if (dev == -ENOSPC)
1206 		dev = -EMFILE;
1207 	if (dev < 0)
1208 		return dev;
1209 
1210 	*p = MKDEV(0, dev);
1211 	return 0;
1212 }
1213 EXPORT_SYMBOL(get_anon_bdev);
1214 
1215 void free_anon_bdev(dev_t dev)
1216 {
1217 	ida_free(&unnamed_dev_ida, MINOR(dev));
1218 }
1219 EXPORT_SYMBOL(free_anon_bdev);
1220 
1221 int set_anon_super(struct super_block *s, void *data)
1222 {
1223 	return get_anon_bdev(&s->s_dev);
1224 }
1225 EXPORT_SYMBOL(set_anon_super);
1226 
1227 void kill_anon_super(struct super_block *sb)
1228 {
1229 	dev_t dev = sb->s_dev;
1230 	generic_shutdown_super(sb);
1231 	kill_super_notify(sb);
1232 	free_anon_bdev(dev);
1233 }
1234 EXPORT_SYMBOL(kill_anon_super);
1235 
1236 void kill_litter_super(struct super_block *sb)
1237 {
1238 	if (sb->s_root)
1239 		d_genocide(sb->s_root);
1240 	kill_anon_super(sb);
1241 }
1242 EXPORT_SYMBOL(kill_litter_super);
1243 
1244 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1245 {
1246 	return set_anon_super(sb, NULL);
1247 }
1248 EXPORT_SYMBOL(set_anon_super_fc);
1249 
1250 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1251 {
1252 	return sb->s_fs_info == fc->s_fs_info;
1253 }
1254 
1255 static int test_single_super(struct super_block *s, struct fs_context *fc)
1256 {
1257 	return 1;
1258 }
1259 
1260 static int vfs_get_super(struct fs_context *fc,
1261 		int (*test)(struct super_block *, struct fs_context *),
1262 		int (*fill_super)(struct super_block *sb,
1263 				  struct fs_context *fc))
1264 {
1265 	struct super_block *sb;
1266 	int err;
1267 
1268 	sb = sget_fc(fc, test, set_anon_super_fc);
1269 	if (IS_ERR(sb))
1270 		return PTR_ERR(sb);
1271 
1272 	if (!sb->s_root) {
1273 		err = fill_super(sb, fc);
1274 		if (err)
1275 			goto error;
1276 
1277 		sb->s_flags |= SB_ACTIVE;
1278 	}
1279 
1280 	fc->root = dget(sb->s_root);
1281 	return 0;
1282 
1283 error:
1284 	deactivate_locked_super(sb);
1285 	return err;
1286 }
1287 
1288 int get_tree_nodev(struct fs_context *fc,
1289 		  int (*fill_super)(struct super_block *sb,
1290 				    struct fs_context *fc))
1291 {
1292 	return vfs_get_super(fc, NULL, fill_super);
1293 }
1294 EXPORT_SYMBOL(get_tree_nodev);
1295 
1296 int get_tree_single(struct fs_context *fc,
1297 		  int (*fill_super)(struct super_block *sb,
1298 				    struct fs_context *fc))
1299 {
1300 	return vfs_get_super(fc, test_single_super, fill_super);
1301 }
1302 EXPORT_SYMBOL(get_tree_single);
1303 
1304 int get_tree_keyed(struct fs_context *fc,
1305 		  int (*fill_super)(struct super_block *sb,
1306 				    struct fs_context *fc),
1307 		void *key)
1308 {
1309 	fc->s_fs_info = key;
1310 	return vfs_get_super(fc, test_keyed_super, fill_super);
1311 }
1312 EXPORT_SYMBOL(get_tree_keyed);
1313 
1314 static int set_bdev_super(struct super_block *s, void *data)
1315 {
1316 	s->s_dev = *(dev_t *)data;
1317 	return 0;
1318 }
1319 
1320 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1321 {
1322 	return set_bdev_super(s, fc->sget_key);
1323 }
1324 
1325 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1326 {
1327 	return !(s->s_iflags & SB_I_RETIRED) &&
1328 		s->s_dev == *(dev_t *)fc->sget_key;
1329 }
1330 
1331 /**
1332  * sget_dev - Find or create a superblock by device number
1333  * @fc: Filesystem context.
1334  * @dev: device number
1335  *
1336  * Find or create a superblock using the provided device number that
1337  * will be stored in fc->sget_key.
1338  *
1339  * If an extant superblock is matched, then that will be returned with
1340  * an elevated reference count that the caller must transfer or discard.
1341  *
1342  * If no match is made, a new superblock will be allocated and basic
1343  * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1344  * be set). The superblock will be published and it will be returned in
1345  * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1346  * unset.
1347  *
1348  * Return: an existing or newly created superblock on success, an error
1349  *         pointer on failure.
1350  */
1351 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1352 {
1353 	fc->sget_key = &dev;
1354 	return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1355 }
1356 EXPORT_SYMBOL(sget_dev);
1357 
1358 #ifdef CONFIG_BLOCK
1359 /*
1360  * Lock the superblock that is holder of the bdev. Returns the superblock
1361  * pointer if we successfully locked the superblock and it is alive. Otherwise
1362  * we return NULL and just unlock bdev->bd_holder_lock.
1363  *
1364  * The function must be called with bdev->bd_holder_lock and releases it.
1365  */
1366 static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
1367 	__releases(&bdev->bd_holder_lock)
1368 {
1369 	struct super_block *sb = bdev->bd_holder;
1370 	bool locked;
1371 
1372 	lockdep_assert_held(&bdev->bd_holder_lock);
1373 	lockdep_assert_not_held(&sb->s_umount);
1374 	lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
1375 
1376 	/* Make sure sb doesn't go away from under us */
1377 	spin_lock(&sb_lock);
1378 	sb->s_count++;
1379 	spin_unlock(&sb_lock);
1380 
1381 	mutex_unlock(&bdev->bd_holder_lock);
1382 
1383 	locked = super_lock(sb, excl);
1384 
1385 	/*
1386 	 * If the superblock wasn't already SB_DYING then we hold
1387 	 * s_umount and can safely drop our temporary reference.
1388          */
1389 	put_super(sb);
1390 
1391 	if (!locked)
1392 		return NULL;
1393 
1394 	if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1395 		super_unlock(sb, excl);
1396 		return NULL;
1397 	}
1398 
1399 	return sb;
1400 }
1401 
1402 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1403 {
1404 	struct super_block *sb;
1405 
1406 	sb = bdev_super_lock(bdev, false);
1407 	if (!sb)
1408 		return;
1409 
1410 	if (!surprise)
1411 		sync_filesystem(sb);
1412 	shrink_dcache_sb(sb);
1413 	invalidate_inodes(sb);
1414 	if (sb->s_op->shutdown)
1415 		sb->s_op->shutdown(sb);
1416 
1417 	super_unlock_shared(sb);
1418 }
1419 
1420 static void fs_bdev_sync(struct block_device *bdev)
1421 {
1422 	struct super_block *sb;
1423 
1424 	sb = bdev_super_lock(bdev, false);
1425 	if (!sb)
1426 		return;
1427 
1428 	sync_filesystem(sb);
1429 	super_unlock_shared(sb);
1430 }
1431 
1432 static struct super_block *get_bdev_super(struct block_device *bdev)
1433 {
1434 	bool active = false;
1435 	struct super_block *sb;
1436 
1437 	sb = bdev_super_lock(bdev, true);
1438 	if (sb) {
1439 		active = atomic_inc_not_zero(&sb->s_active);
1440 		super_unlock_excl(sb);
1441 	}
1442 	if (!active)
1443 		return NULL;
1444 	return sb;
1445 }
1446 
1447 /**
1448  * fs_bdev_freeze - freeze owning filesystem of block device
1449  * @bdev: block device
1450  *
1451  * Freeze the filesystem that owns this block device if it is still
1452  * active.
1453  *
1454  * A filesystem that owns multiple block devices may be frozen from each
1455  * block device and won't be unfrozen until all block devices are
1456  * unfrozen. Each block device can only freeze the filesystem once as we
1457  * nest freezes for block devices in the block layer.
1458  *
1459  * Return: If the freeze was successful zero is returned. If the freeze
1460  *         failed a negative error code is returned.
1461  */
1462 static int fs_bdev_freeze(struct block_device *bdev)
1463 {
1464 	struct super_block *sb;
1465 	int error = 0;
1466 
1467 	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1468 
1469 	sb = get_bdev_super(bdev);
1470 	if (!sb)
1471 		return -EINVAL;
1472 
1473 	if (sb->s_op->freeze_super)
1474 		error = sb->s_op->freeze_super(sb,
1475 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1476 	else
1477 		error = freeze_super(sb,
1478 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1479 	if (!error)
1480 		error = sync_blockdev(bdev);
1481 	deactivate_super(sb);
1482 	return error;
1483 }
1484 
1485 /**
1486  * fs_bdev_thaw - thaw owning filesystem of block device
1487  * @bdev: block device
1488  *
1489  * Thaw the filesystem that owns this block device.
1490  *
1491  * A filesystem that owns multiple block devices may be frozen from each
1492  * block device and won't be unfrozen until all block devices are
1493  * unfrozen. Each block device can only freeze the filesystem once as we
1494  * nest freezes for block devices in the block layer.
1495  *
1496  * Return: If the thaw was successful zero is returned. If the thaw
1497  *         failed a negative error code is returned. If this function
1498  *         returns zero it doesn't mean that the filesystem is unfrozen
1499  *         as it may have been frozen multiple times (kernel may hold a
1500  *         freeze or might be frozen from other block devices).
1501  */
1502 static int fs_bdev_thaw(struct block_device *bdev)
1503 {
1504 	struct super_block *sb;
1505 	int error;
1506 
1507 	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1508 
1509 	sb = get_bdev_super(bdev);
1510 	if (WARN_ON_ONCE(!sb))
1511 		return -EINVAL;
1512 
1513 	if (sb->s_op->thaw_super)
1514 		error = sb->s_op->thaw_super(sb,
1515 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1516 	else
1517 		error = thaw_super(sb,
1518 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1519 	deactivate_super(sb);
1520 	return error;
1521 }
1522 
1523 const struct blk_holder_ops fs_holder_ops = {
1524 	.mark_dead		= fs_bdev_mark_dead,
1525 	.sync			= fs_bdev_sync,
1526 	.freeze			= fs_bdev_freeze,
1527 	.thaw			= fs_bdev_thaw,
1528 };
1529 EXPORT_SYMBOL_GPL(fs_holder_ops);
1530 
1531 int setup_bdev_super(struct super_block *sb, int sb_flags,
1532 		struct fs_context *fc)
1533 {
1534 	blk_mode_t mode = sb_open_mode(sb_flags);
1535 	struct bdev_handle *bdev_handle;
1536 	struct block_device *bdev;
1537 
1538 	bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1539 	if (IS_ERR(bdev_handle)) {
1540 		if (fc)
1541 			errorf(fc, "%s: Can't open blockdev", fc->source);
1542 		return PTR_ERR(bdev_handle);
1543 	}
1544 	bdev = bdev_handle->bdev;
1545 
1546 	/*
1547 	 * This really should be in blkdev_get_by_dev, but right now can't due
1548 	 * to legacy issues that require us to allow opening a block device node
1549 	 * writable from userspace even for a read-only block device.
1550 	 */
1551 	if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1552 		bdev_release(bdev_handle);
1553 		return -EACCES;
1554 	}
1555 
1556 	/*
1557 	 * It is enough to check bdev was not frozen before we set
1558 	 * s_bdev as freezing will wait until SB_BORN is set.
1559 	 */
1560 	if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
1561 		if (fc)
1562 			warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1563 		bdev_release(bdev_handle);
1564 		return -EBUSY;
1565 	}
1566 	spin_lock(&sb_lock);
1567 	sb->s_bdev_handle = bdev_handle;
1568 	sb->s_bdev = bdev;
1569 	sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1570 	if (bdev_stable_writes(bdev))
1571 		sb->s_iflags |= SB_I_STABLE_WRITES;
1572 	spin_unlock(&sb_lock);
1573 
1574 	snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1575 	shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1576 				sb->s_id);
1577 	sb_set_blocksize(sb, block_size(bdev));
1578 	return 0;
1579 }
1580 EXPORT_SYMBOL_GPL(setup_bdev_super);
1581 
1582 /**
1583  * get_tree_bdev - Get a superblock based on a single block device
1584  * @fc: The filesystem context holding the parameters
1585  * @fill_super: Helper to initialise a new superblock
1586  */
1587 int get_tree_bdev(struct fs_context *fc,
1588 		int (*fill_super)(struct super_block *,
1589 				  struct fs_context *))
1590 {
1591 	struct super_block *s;
1592 	int error = 0;
1593 	dev_t dev;
1594 
1595 	if (!fc->source)
1596 		return invalf(fc, "No source specified");
1597 
1598 	error = lookup_bdev(fc->source, &dev);
1599 	if (error) {
1600 		errorf(fc, "%s: Can't lookup blockdev", fc->source);
1601 		return error;
1602 	}
1603 
1604 	fc->sb_flags |= SB_NOSEC;
1605 	s = sget_dev(fc, dev);
1606 	if (IS_ERR(s))
1607 		return PTR_ERR(s);
1608 
1609 	if (s->s_root) {
1610 		/* Don't summarily change the RO/RW state. */
1611 		if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1612 			warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1613 			deactivate_locked_super(s);
1614 			return -EBUSY;
1615 		}
1616 	} else {
1617 		error = setup_bdev_super(s, fc->sb_flags, fc);
1618 		if (!error)
1619 			error = fill_super(s, fc);
1620 		if (error) {
1621 			deactivate_locked_super(s);
1622 			return error;
1623 		}
1624 		s->s_flags |= SB_ACTIVE;
1625 	}
1626 
1627 	BUG_ON(fc->root);
1628 	fc->root = dget(s->s_root);
1629 	return 0;
1630 }
1631 EXPORT_SYMBOL(get_tree_bdev);
1632 
1633 static int test_bdev_super(struct super_block *s, void *data)
1634 {
1635 	return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
1636 }
1637 
1638 struct dentry *mount_bdev(struct file_system_type *fs_type,
1639 	int flags, const char *dev_name, void *data,
1640 	int (*fill_super)(struct super_block *, void *, int))
1641 {
1642 	struct super_block *s;
1643 	int error;
1644 	dev_t dev;
1645 
1646 	error = lookup_bdev(dev_name, &dev);
1647 	if (error)
1648 		return ERR_PTR(error);
1649 
1650 	flags |= SB_NOSEC;
1651 	s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
1652 	if (IS_ERR(s))
1653 		return ERR_CAST(s);
1654 
1655 	if (s->s_root) {
1656 		if ((flags ^ s->s_flags) & SB_RDONLY) {
1657 			deactivate_locked_super(s);
1658 			return ERR_PTR(-EBUSY);
1659 		}
1660 	} else {
1661 		error = setup_bdev_super(s, flags, NULL);
1662 		if (!error)
1663 			error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1664 		if (error) {
1665 			deactivate_locked_super(s);
1666 			return ERR_PTR(error);
1667 		}
1668 
1669 		s->s_flags |= SB_ACTIVE;
1670 	}
1671 
1672 	return dget(s->s_root);
1673 }
1674 EXPORT_SYMBOL(mount_bdev);
1675 
1676 void kill_block_super(struct super_block *sb)
1677 {
1678 	struct block_device *bdev = sb->s_bdev;
1679 
1680 	generic_shutdown_super(sb);
1681 	if (bdev) {
1682 		sync_blockdev(bdev);
1683 		bdev_release(sb->s_bdev_handle);
1684 	}
1685 }
1686 
1687 EXPORT_SYMBOL(kill_block_super);
1688 #endif
1689 
1690 struct dentry *mount_nodev(struct file_system_type *fs_type,
1691 	int flags, void *data,
1692 	int (*fill_super)(struct super_block *, void *, int))
1693 {
1694 	int error;
1695 	struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1696 
1697 	if (IS_ERR(s))
1698 		return ERR_CAST(s);
1699 
1700 	error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1701 	if (error) {
1702 		deactivate_locked_super(s);
1703 		return ERR_PTR(error);
1704 	}
1705 	s->s_flags |= SB_ACTIVE;
1706 	return dget(s->s_root);
1707 }
1708 EXPORT_SYMBOL(mount_nodev);
1709 
1710 int reconfigure_single(struct super_block *s,
1711 		       int flags, void *data)
1712 {
1713 	struct fs_context *fc;
1714 	int ret;
1715 
1716 	/* The caller really need to be passing fc down into mount_single(),
1717 	 * then a chunk of this can be removed.  [Bollocks -- AV]
1718 	 * Better yet, reconfiguration shouldn't happen, but rather the second
1719 	 * mount should be rejected if the parameters are not compatible.
1720 	 */
1721 	fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1722 	if (IS_ERR(fc))
1723 		return PTR_ERR(fc);
1724 
1725 	ret = parse_monolithic_mount_data(fc, data);
1726 	if (ret < 0)
1727 		goto out;
1728 
1729 	ret = reconfigure_super(fc);
1730 out:
1731 	put_fs_context(fc);
1732 	return ret;
1733 }
1734 
1735 static int compare_single(struct super_block *s, void *p)
1736 {
1737 	return 1;
1738 }
1739 
1740 struct dentry *mount_single(struct file_system_type *fs_type,
1741 	int flags, void *data,
1742 	int (*fill_super)(struct super_block *, void *, int))
1743 {
1744 	struct super_block *s;
1745 	int error;
1746 
1747 	s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1748 	if (IS_ERR(s))
1749 		return ERR_CAST(s);
1750 	if (!s->s_root) {
1751 		error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1752 		if (!error)
1753 			s->s_flags |= SB_ACTIVE;
1754 	} else {
1755 		error = reconfigure_single(s, flags, data);
1756 	}
1757 	if (unlikely(error)) {
1758 		deactivate_locked_super(s);
1759 		return ERR_PTR(error);
1760 	}
1761 	return dget(s->s_root);
1762 }
1763 EXPORT_SYMBOL(mount_single);
1764 
1765 /**
1766  * vfs_get_tree - Get the mountable root
1767  * @fc: The superblock configuration context.
1768  *
1769  * The filesystem is invoked to get or create a superblock which can then later
1770  * be used for mounting.  The filesystem places a pointer to the root to be
1771  * used for mounting in @fc->root.
1772  */
1773 int vfs_get_tree(struct fs_context *fc)
1774 {
1775 	struct super_block *sb;
1776 	int error;
1777 
1778 	if (fc->root)
1779 		return -EBUSY;
1780 
1781 	/* Get the mountable root in fc->root, with a ref on the root and a ref
1782 	 * on the superblock.
1783 	 */
1784 	error = fc->ops->get_tree(fc);
1785 	if (error < 0)
1786 		return error;
1787 
1788 	if (!fc->root) {
1789 		pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1790 		       fc->fs_type->name);
1791 		/* We don't know what the locking state of the superblock is -
1792 		 * if there is a superblock.
1793 		 */
1794 		BUG();
1795 	}
1796 
1797 	sb = fc->root->d_sb;
1798 	WARN_ON(!sb->s_bdi);
1799 
1800 	/*
1801 	 * super_wake() contains a memory barrier which also care of
1802 	 * ordering for super_cache_count(). We place it before setting
1803 	 * SB_BORN as the data dependency between the two functions is
1804 	 * the superblock structure contents that we just set up, not
1805 	 * the SB_BORN flag.
1806 	 */
1807 	super_wake(sb, SB_BORN);
1808 
1809 	error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1810 	if (unlikely(error)) {
1811 		fc_drop_locked(fc);
1812 		return error;
1813 	}
1814 
1815 	/*
1816 	 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1817 	 * but s_maxbytes was an unsigned long long for many releases. Throw
1818 	 * this warning for a little while to try and catch filesystems that
1819 	 * violate this rule.
1820 	 */
1821 	WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1822 		"negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1823 
1824 	return 0;
1825 }
1826 EXPORT_SYMBOL(vfs_get_tree);
1827 
1828 /*
1829  * Setup private BDI for given superblock. It gets automatically cleaned up
1830  * in generic_shutdown_super().
1831  */
1832 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1833 {
1834 	struct backing_dev_info *bdi;
1835 	int err;
1836 	va_list args;
1837 
1838 	bdi = bdi_alloc(NUMA_NO_NODE);
1839 	if (!bdi)
1840 		return -ENOMEM;
1841 
1842 	va_start(args, fmt);
1843 	err = bdi_register_va(bdi, fmt, args);
1844 	va_end(args);
1845 	if (err) {
1846 		bdi_put(bdi);
1847 		return err;
1848 	}
1849 	WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1850 	sb->s_bdi = bdi;
1851 	sb->s_iflags |= SB_I_PERSB_BDI;
1852 
1853 	return 0;
1854 }
1855 EXPORT_SYMBOL(super_setup_bdi_name);
1856 
1857 /*
1858  * Setup private BDI for given superblock. I gets automatically cleaned up
1859  * in generic_shutdown_super().
1860  */
1861 int super_setup_bdi(struct super_block *sb)
1862 {
1863 	static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1864 
1865 	return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1866 				    atomic_long_inc_return(&bdi_seq));
1867 }
1868 EXPORT_SYMBOL(super_setup_bdi);
1869 
1870 /**
1871  * sb_wait_write - wait until all writers to given file system finish
1872  * @sb: the super for which we wait
1873  * @level: type of writers we wait for (normal vs page fault)
1874  *
1875  * This function waits until there are no writers of given type to given file
1876  * system.
1877  */
1878 static void sb_wait_write(struct super_block *sb, int level)
1879 {
1880 	percpu_down_write(sb->s_writers.rw_sem + level-1);
1881 }
1882 
1883 /*
1884  * We are going to return to userspace and forget about these locks, the
1885  * ownership goes to the caller of thaw_super() which does unlock().
1886  */
1887 static void lockdep_sb_freeze_release(struct super_block *sb)
1888 {
1889 	int level;
1890 
1891 	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1892 		percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1893 }
1894 
1895 /*
1896  * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1897  */
1898 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1899 {
1900 	int level;
1901 
1902 	for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1903 		percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1904 }
1905 
1906 static void sb_freeze_unlock(struct super_block *sb, int level)
1907 {
1908 	for (level--; level >= 0; level--)
1909 		percpu_up_write(sb->s_writers.rw_sem + level);
1910 }
1911 
1912 static int wait_for_partially_frozen(struct super_block *sb)
1913 {
1914 	int ret = 0;
1915 
1916 	do {
1917 		unsigned short old = sb->s_writers.frozen;
1918 
1919 		up_write(&sb->s_umount);
1920 		ret = wait_var_event_killable(&sb->s_writers.frozen,
1921 					       sb->s_writers.frozen != old);
1922 		down_write(&sb->s_umount);
1923 	} while (ret == 0 &&
1924 		 sb->s_writers.frozen != SB_UNFROZEN &&
1925 		 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1926 
1927 	return ret;
1928 }
1929 
1930 #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1931 #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST)
1932 
1933 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1934 {
1935 	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1936 	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1937 
1938 	if (who & FREEZE_HOLDER_KERNEL)
1939 		++sb->s_writers.freeze_kcount;
1940 	if (who & FREEZE_HOLDER_USERSPACE)
1941 		++sb->s_writers.freeze_ucount;
1942 	return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1943 }
1944 
1945 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1946 {
1947 	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1948 	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1949 
1950 	if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1951 		--sb->s_writers.freeze_kcount;
1952 	if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1953 		--sb->s_writers.freeze_ucount;
1954 	return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1955 }
1956 
1957 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who)
1958 {
1959 	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1960 	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1961 
1962 	if (who & FREEZE_HOLDER_KERNEL)
1963 		return (who & FREEZE_MAY_NEST) ||
1964 		       sb->s_writers.freeze_kcount == 0;
1965 	if (who & FREEZE_HOLDER_USERSPACE)
1966 		return (who & FREEZE_MAY_NEST) ||
1967 		       sb->s_writers.freeze_ucount == 0;
1968 	return false;
1969 }
1970 
1971 /**
1972  * freeze_super - lock the filesystem and force it into a consistent state
1973  * @sb: the super to lock
1974  * @who: context that wants to freeze
1975  *
1976  * Syncs the super to make sure the filesystem is consistent and calls the fs's
1977  * freeze_fs.  Subsequent calls to this without first thawing the fs may return
1978  * -EBUSY.
1979  *
1980  * @who should be:
1981  * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
1982  * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
1983  * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
1984  *
1985  * The @who argument distinguishes between the kernel and userspace trying to
1986  * freeze the filesystem.  Although there cannot be multiple kernel freezes or
1987  * multiple userspace freezes in effect at any given time, the kernel and
1988  * userspace can both hold a filesystem frozen.  The filesystem remains frozen
1989  * until there are no kernel or userspace freezes in effect.
1990  *
1991  * A filesystem may hold multiple devices and thus a filesystems may be
1992  * frozen through the block layer via multiple block devices. In this
1993  * case the request is marked as being allowed to nest by passing
1994  * FREEZE_MAY_NEST. The filesystem remains frozen until all block
1995  * devices are unfrozen. If multiple freezes are attempted without
1996  * FREEZE_MAY_NEST -EBUSY will be returned.
1997  *
1998  * During this function, sb->s_writers.frozen goes through these values:
1999  *
2000  * SB_UNFROZEN: File system is normal, all writes progress as usual.
2001  *
2002  * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
2003  * writes should be blocked, though page faults are still allowed. We wait for
2004  * all writes to complete and then proceed to the next stage.
2005  *
2006  * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2007  * but internal fs threads can still modify the filesystem (although they
2008  * should not dirty new pages or inodes), writeback can run etc. After waiting
2009  * for all running page faults we sync the filesystem which will clean all
2010  * dirty pages and inodes (no new dirty pages or inodes can be created when
2011  * sync is running).
2012  *
2013  * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2014  * modification are blocked (e.g. XFS preallocation truncation on inode
2015  * reclaim). This is usually implemented by blocking new transactions for
2016  * filesystems that have them and need this additional guard. After all
2017  * internal writers are finished we call ->freeze_fs() to finish filesystem
2018  * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2019  * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2020  *
2021  * sb->s_writers.frozen is protected by sb->s_umount.
2022  *
2023  * Return: If the freeze was successful zero is returned. If the freeze
2024  *         failed a negative error code is returned.
2025  */
2026 int freeze_super(struct super_block *sb, enum freeze_holder who)
2027 {
2028 	int ret;
2029 
2030 	if (!super_lock_excl(sb)) {
2031 		WARN_ON_ONCE("Dying superblock while freezing!");
2032 		return -EINVAL;
2033 	}
2034 	atomic_inc(&sb->s_active);
2035 
2036 retry:
2037 	if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2038 		if (may_freeze(sb, who))
2039 			ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2040 		else
2041 			ret = -EBUSY;
2042 		/* All freezers share a single active reference. */
2043 		deactivate_locked_super(sb);
2044 		return ret;
2045 	}
2046 
2047 	if (sb->s_writers.frozen != SB_UNFROZEN) {
2048 		ret = wait_for_partially_frozen(sb);
2049 		if (ret) {
2050 			deactivate_locked_super(sb);
2051 			return ret;
2052 		}
2053 
2054 		goto retry;
2055 	}
2056 
2057 	if (sb_rdonly(sb)) {
2058 		/* Nothing to do really... */
2059 		WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2060 		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2061 		wake_up_var(&sb->s_writers.frozen);
2062 		super_unlock_excl(sb);
2063 		return 0;
2064 	}
2065 
2066 	sb->s_writers.frozen = SB_FREEZE_WRITE;
2067 	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
2068 	super_unlock_excl(sb);
2069 	sb_wait_write(sb, SB_FREEZE_WRITE);
2070 	__super_lock_excl(sb);
2071 
2072 	/* Now we go and block page faults... */
2073 	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2074 	sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2075 
2076 	/* All writers are done so after syncing there won't be dirty data */
2077 	ret = sync_filesystem(sb);
2078 	if (ret) {
2079 		sb->s_writers.frozen = SB_UNFROZEN;
2080 		sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2081 		wake_up_var(&sb->s_writers.frozen);
2082 		deactivate_locked_super(sb);
2083 		return ret;
2084 	}
2085 
2086 	/* Now wait for internal filesystem counter */
2087 	sb->s_writers.frozen = SB_FREEZE_FS;
2088 	sb_wait_write(sb, SB_FREEZE_FS);
2089 
2090 	if (sb->s_op->freeze_fs) {
2091 		ret = sb->s_op->freeze_fs(sb);
2092 		if (ret) {
2093 			printk(KERN_ERR
2094 				"VFS:Filesystem freeze failed\n");
2095 			sb->s_writers.frozen = SB_UNFROZEN;
2096 			sb_freeze_unlock(sb, SB_FREEZE_FS);
2097 			wake_up_var(&sb->s_writers.frozen);
2098 			deactivate_locked_super(sb);
2099 			return ret;
2100 		}
2101 	}
2102 	/*
2103 	 * For debugging purposes so that fs can warn if it sees write activity
2104 	 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2105 	 */
2106 	WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2107 	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2108 	wake_up_var(&sb->s_writers.frozen);
2109 	lockdep_sb_freeze_release(sb);
2110 	super_unlock_excl(sb);
2111 	return 0;
2112 }
2113 EXPORT_SYMBOL(freeze_super);
2114 
2115 /*
2116  * Undoes the effect of a freeze_super_locked call.  If the filesystem is
2117  * frozen both by userspace and the kernel, a thaw call from either source
2118  * removes that state without releasing the other state or unlocking the
2119  * filesystem.
2120  */
2121 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
2122 {
2123 	int error = -EINVAL;
2124 
2125 	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2126 		goto out_unlock;
2127 
2128 	/*
2129 	 * All freezers share a single active reference.
2130 	 * So just unlock in case there are any left.
2131 	 */
2132 	if (freeze_dec(sb, who))
2133 		goto out_unlock;
2134 
2135 	if (sb_rdonly(sb)) {
2136 		sb->s_writers.frozen = SB_UNFROZEN;
2137 		wake_up_var(&sb->s_writers.frozen);
2138 		goto out_deactivate;
2139 	}
2140 
2141 	lockdep_sb_freeze_acquire(sb);
2142 
2143 	if (sb->s_op->unfreeze_fs) {
2144 		error = sb->s_op->unfreeze_fs(sb);
2145 		if (error) {
2146 			pr_err("VFS: Filesystem thaw failed\n");
2147 			freeze_inc(sb, who);
2148 			lockdep_sb_freeze_release(sb);
2149 			goto out_unlock;
2150 		}
2151 	}
2152 
2153 	sb->s_writers.frozen = SB_UNFROZEN;
2154 	wake_up_var(&sb->s_writers.frozen);
2155 	sb_freeze_unlock(sb, SB_FREEZE_FS);
2156 out_deactivate:
2157 	deactivate_locked_super(sb);
2158 	return 0;
2159 
2160 out_unlock:
2161 	super_unlock_excl(sb);
2162 	return error;
2163 }
2164 
2165 /**
2166  * thaw_super -- unlock filesystem
2167  * @sb: the super to thaw
2168  * @who: context that wants to freeze
2169  *
2170  * Unlocks the filesystem and marks it writeable again after freeze_super()
2171  * if there are no remaining freezes on the filesystem.
2172  *
2173  * @who should be:
2174  * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2175  * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2176  * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2177  *
2178  * A filesystem may hold multiple devices and thus a filesystems may
2179  * have been frozen through the block layer via multiple block devices.
2180  * The filesystem remains frozen until all block devices are unfrozen.
2181  */
2182 int thaw_super(struct super_block *sb, enum freeze_holder who)
2183 {
2184 	if (!super_lock_excl(sb)) {
2185 		WARN_ON_ONCE("Dying superblock while thawing!");
2186 		return -EINVAL;
2187 	}
2188 	return thaw_super_locked(sb, who);
2189 }
2190 EXPORT_SYMBOL(thaw_super);
2191 
2192 /*
2193  * Create workqueue for deferred direct IO completions. We allocate the
2194  * workqueue when it's first needed. This avoids creating workqueue for
2195  * filesystems that don't need it and also allows us to create the workqueue
2196  * late enough so the we can include s_id in the name of the workqueue.
2197  */
2198 int sb_init_dio_done_wq(struct super_block *sb)
2199 {
2200 	struct workqueue_struct *old;
2201 	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2202 						      WQ_MEM_RECLAIM, 0,
2203 						      sb->s_id);
2204 	if (!wq)
2205 		return -ENOMEM;
2206 	/*
2207 	 * This has to be atomic as more DIOs can race to create the workqueue
2208 	 */
2209 	old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
2210 	/* Someone created workqueue before us? Free ours... */
2211 	if (old)
2212 		destroy_workqueue(wq);
2213 	return 0;
2214 }
2215 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
2216