xref: /linux/fs/super.c (revision e64aeecbbb0962601bd2ac502a2f9c0d9be97502)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/super.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  super.c contains code to handle: - mount structures
8  *                                   - super-block tables
9  *                                   - filesystem drivers list
10  *                                   - mount system call
11  *                                   - umount system call
12  *                                   - ustat system call
13  *
14  * GK 2/5/95  -  Changed to support mounting the root fs via NFS
15  *
16  *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17  *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18  *  Added options to /proc/mounts:
19  *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20  *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21  *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22  */
23 
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h>		/* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
40 #include "internal.h"
41 
42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
43 			     const void *freeze_owner);
44 
45 static LIST_HEAD(super_blocks);
46 static DEFINE_SPINLOCK(sb_lock);
47 
48 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
49 	"sb_writers",
50 	"sb_pagefaults",
51 	"sb_internal",
52 };
53 
__super_lock(struct super_block * sb,bool excl)54 static inline void __super_lock(struct super_block *sb, bool excl)
55 {
56 	if (excl)
57 		down_write(&sb->s_umount);
58 	else
59 		down_read(&sb->s_umount);
60 }
61 
super_unlock(struct super_block * sb,bool excl)62 static inline void super_unlock(struct super_block *sb, bool excl)
63 {
64 	if (excl)
65 		up_write(&sb->s_umount);
66 	else
67 		up_read(&sb->s_umount);
68 }
69 
__super_lock_excl(struct super_block * sb)70 static inline void __super_lock_excl(struct super_block *sb)
71 {
72 	__super_lock(sb, true);
73 }
74 
super_unlock_excl(struct super_block * sb)75 static inline void super_unlock_excl(struct super_block *sb)
76 {
77 	super_unlock(sb, true);
78 }
79 
super_unlock_shared(struct super_block * sb)80 static inline void super_unlock_shared(struct super_block *sb)
81 {
82 	super_unlock(sb, false);
83 }
84 
super_flags(const struct super_block * sb,unsigned int flags)85 static bool super_flags(const struct super_block *sb, unsigned int flags)
86 {
87 	/*
88 	 * Pairs with smp_store_release() in super_wake() and ensures
89 	 * that we see @flags after we're woken.
90 	 */
91 	return smp_load_acquire(&sb->s_flags) & flags;
92 }
93 
94 /**
95  * super_lock - wait for superblock to become ready and lock it
96  * @sb: superblock to wait for
97  * @excl: whether exclusive access is required
98  *
99  * If the superblock has neither passed through vfs_get_tree() or
100  * generic_shutdown_super() yet wait for it to happen. Either superblock
101  * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
102  * woken and we'll see SB_DYING.
103  *
104  * The caller must have acquired a temporary reference on @sb->s_count.
105  *
106  * Return: The function returns true if SB_BORN was set and with
107  *         s_umount held. The function returns false if SB_DYING was
108  *         set and without s_umount held.
109  */
super_lock(struct super_block * sb,bool excl)110 static __must_check bool super_lock(struct super_block *sb, bool excl)
111 {
112 	lockdep_assert_not_held(&sb->s_umount);
113 
114 	/* wait until the superblock is ready or dying */
115 	wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
116 
117 	/* Don't pointlessly acquire s_umount. */
118 	if (super_flags(sb, SB_DYING))
119 		return false;
120 
121 	__super_lock(sb, excl);
122 
123 	/*
124 	 * Has gone through generic_shutdown_super() in the meantime.
125 	 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
126 	 * grab a reference to this. Tell them so.
127 	 */
128 	if (sb->s_flags & SB_DYING) {
129 		super_unlock(sb, excl);
130 		return false;
131 	}
132 
133 	WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
134 	return true;
135 }
136 
137 /* wait and try to acquire read-side of @sb->s_umount */
super_lock_shared(struct super_block * sb)138 static inline bool super_lock_shared(struct super_block *sb)
139 {
140 	return super_lock(sb, false);
141 }
142 
143 /* wait and try to acquire write-side of @sb->s_umount */
super_lock_excl(struct super_block * sb)144 static inline bool super_lock_excl(struct super_block *sb)
145 {
146 	return super_lock(sb, true);
147 }
148 
149 /* wake waiters */
150 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
super_wake(struct super_block * sb,unsigned int flag)151 static void super_wake(struct super_block *sb, unsigned int flag)
152 {
153 	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
154 	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
155 
156 	/*
157 	 * Pairs with smp_load_acquire() in super_lock() to make sure
158 	 * all initializations in the superblock are seen by the user
159 	 * seeing SB_BORN sent.
160 	 */
161 	smp_store_release(&sb->s_flags, sb->s_flags | flag);
162 	/*
163 	 * Pairs with the barrier in prepare_to_wait_event() to make sure
164 	 * ___wait_var_event() either sees SB_BORN set or
165 	 * waitqueue_active() check in wake_up_var() sees the waiter.
166 	 */
167 	smp_mb();
168 	wake_up_var(&sb->s_flags);
169 }
170 
171 /*
172  * One thing we have to be careful of with a per-sb shrinker is that we don't
173  * drop the last active reference to the superblock from within the shrinker.
174  * If that happens we could trigger unregistering the shrinker from within the
175  * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
176  * take a passive reference to the superblock to avoid this from occurring.
177  */
super_cache_scan(struct shrinker * shrink,struct shrink_control * sc)178 static unsigned long super_cache_scan(struct shrinker *shrink,
179 				      struct shrink_control *sc)
180 {
181 	struct super_block *sb;
182 	long	fs_objects = 0;
183 	long	total_objects;
184 	long	freed = 0;
185 	long	dentries;
186 	long	inodes;
187 
188 	sb = shrink->private_data;
189 
190 	/*
191 	 * Deadlock avoidance.  We may hold various FS locks, and we don't want
192 	 * to recurse into the FS that called us in clear_inode() and friends..
193 	 */
194 	if (!(sc->gfp_mask & __GFP_FS))
195 		return SHRINK_STOP;
196 
197 	if (!super_trylock_shared(sb))
198 		return SHRINK_STOP;
199 
200 	if (sb->s_op->nr_cached_objects)
201 		fs_objects = sb->s_op->nr_cached_objects(sb, sc);
202 
203 	inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
204 	dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
205 	total_objects = dentries + inodes + fs_objects;
206 	if (!total_objects)
207 		total_objects = 1;
208 
209 	/* proportion the scan between the caches */
210 	dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
211 	inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
212 	fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
213 
214 	/*
215 	 * prune the dcache first as the icache is pinned by it, then
216 	 * prune the icache, followed by the filesystem specific caches
217 	 *
218 	 * Ensure that we always scan at least one object - memcg kmem
219 	 * accounting uses this to fully empty the caches.
220 	 */
221 	sc->nr_to_scan = dentries + 1;
222 	freed = prune_dcache_sb(sb, sc);
223 	sc->nr_to_scan = inodes + 1;
224 	freed += prune_icache_sb(sb, sc);
225 
226 	if (fs_objects) {
227 		sc->nr_to_scan = fs_objects + 1;
228 		freed += sb->s_op->free_cached_objects(sb, sc);
229 	}
230 
231 	super_unlock_shared(sb);
232 	return freed;
233 }
234 
super_cache_count(struct shrinker * shrink,struct shrink_control * sc)235 static unsigned long super_cache_count(struct shrinker *shrink,
236 				       struct shrink_control *sc)
237 {
238 	struct super_block *sb;
239 	long	total_objects = 0;
240 
241 	sb = shrink->private_data;
242 
243 	/*
244 	 * We don't call super_trylock_shared() here as it is a scalability
245 	 * bottleneck, so we're exposed to partial setup state. The shrinker
246 	 * rwsem does not protect filesystem operations backing
247 	 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
248 	 * change between super_cache_count and super_cache_scan, so we really
249 	 * don't need locks here.
250 	 *
251 	 * However, if we are currently mounting the superblock, the underlying
252 	 * filesystem might be in a state of partial construction and hence it
253 	 * is dangerous to access it.  super_trylock_shared() uses a SB_BORN check
254 	 * to avoid this situation, so do the same here. The memory barrier is
255 	 * matched with the one in mount_fs() as we don't hold locks here.
256 	 */
257 	if (!(sb->s_flags & SB_BORN))
258 		return 0;
259 	smp_rmb();
260 
261 	if (sb->s_op && sb->s_op->nr_cached_objects)
262 		total_objects = sb->s_op->nr_cached_objects(sb, sc);
263 
264 	total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
265 	total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
266 
267 	if (!total_objects)
268 		return SHRINK_EMPTY;
269 
270 	total_objects = vfs_pressure_ratio(total_objects);
271 	return total_objects;
272 }
273 
destroy_super_work(struct work_struct * work)274 static void destroy_super_work(struct work_struct *work)
275 {
276 	struct super_block *s = container_of(work, struct super_block,
277 							destroy_work);
278 	fsnotify_sb_free(s);
279 	security_sb_free(s);
280 	put_user_ns(s->s_user_ns);
281 	kfree(s->s_subtype);
282 	for (int i = 0; i < SB_FREEZE_LEVELS; i++)
283 		percpu_free_rwsem(&s->s_writers.rw_sem[i]);
284 	kfree(s);
285 }
286 
destroy_super_rcu(struct rcu_head * head)287 static void destroy_super_rcu(struct rcu_head *head)
288 {
289 	struct super_block *s = container_of(head, struct super_block, rcu);
290 	INIT_WORK(&s->destroy_work, destroy_super_work);
291 	schedule_work(&s->destroy_work);
292 }
293 
294 /* Free a superblock that has never been seen by anyone */
destroy_unused_super(struct super_block * s)295 static void destroy_unused_super(struct super_block *s)
296 {
297 	if (!s)
298 		return;
299 	super_unlock_excl(s);
300 	list_lru_destroy(&s->s_dentry_lru);
301 	list_lru_destroy(&s->s_inode_lru);
302 	shrinker_free(s->s_shrink);
303 	/* no delays needed */
304 	destroy_super_work(&s->destroy_work);
305 }
306 
307 /**
308  *	alloc_super	-	create new superblock
309  *	@type:	filesystem type superblock should belong to
310  *	@flags: the mount flags
311  *	@user_ns: User namespace for the super_block
312  *
313  *	Allocates and initializes a new &struct super_block.  alloc_super()
314  *	returns a pointer new superblock or %NULL if allocation had failed.
315  */
alloc_super(struct file_system_type * type,int flags,struct user_namespace * user_ns)316 static struct super_block *alloc_super(struct file_system_type *type, int flags,
317 				       struct user_namespace *user_ns)
318 {
319 	struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
320 	static const struct super_operations default_op;
321 	int i;
322 
323 	if (!s)
324 		return NULL;
325 
326 	s->s_user_ns = get_user_ns(user_ns);
327 	init_rwsem(&s->s_umount);
328 	lockdep_set_class(&s->s_umount, &type->s_umount_key);
329 	/*
330 	 * sget() can have s_umount recursion.
331 	 *
332 	 * When it cannot find a suitable sb, it allocates a new
333 	 * one (this one), and tries again to find a suitable old
334 	 * one.
335 	 *
336 	 * In case that succeeds, it will acquire the s_umount
337 	 * lock of the old one. Since these are clearly distrinct
338 	 * locks, and this object isn't exposed yet, there's no
339 	 * risk of deadlocks.
340 	 *
341 	 * Annotate this by putting this lock in a different
342 	 * subclass.
343 	 */
344 	down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
345 
346 	if (security_sb_alloc(s))
347 		goto fail;
348 
349 	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
350 		if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
351 					sb_writers_name[i],
352 					&type->s_writers_key[i]))
353 			goto fail;
354 	}
355 	s->s_bdi = &noop_backing_dev_info;
356 	s->s_flags = flags;
357 	if (s->s_user_ns != &init_user_ns)
358 		s->s_iflags |= SB_I_NODEV;
359 	INIT_HLIST_NODE(&s->s_instances);
360 	INIT_HLIST_BL_HEAD(&s->s_roots);
361 	mutex_init(&s->s_sync_lock);
362 	INIT_LIST_HEAD(&s->s_inodes);
363 	spin_lock_init(&s->s_inode_list_lock);
364 	INIT_LIST_HEAD(&s->s_inodes_wb);
365 	spin_lock_init(&s->s_inode_wblist_lock);
366 
367 	s->s_count = 1;
368 	atomic_set(&s->s_active, 1);
369 	mutex_init(&s->s_vfs_rename_mutex);
370 	lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
371 	init_rwsem(&s->s_dquot.dqio_sem);
372 	s->s_maxbytes = MAX_NON_LFS;
373 	s->s_op = &default_op;
374 	s->s_time_gran = 1000000000;
375 	s->s_time_min = TIME64_MIN;
376 	s->s_time_max = TIME64_MAX;
377 
378 	s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
379 				     "sb-%s", type->name);
380 	if (!s->s_shrink)
381 		goto fail;
382 
383 	s->s_shrink->scan_objects = super_cache_scan;
384 	s->s_shrink->count_objects = super_cache_count;
385 	s->s_shrink->batch = 1024;
386 	s->s_shrink->private_data = s;
387 
388 	if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
389 		goto fail;
390 	if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
391 		goto fail;
392 	return s;
393 
394 fail:
395 	destroy_unused_super(s);
396 	return NULL;
397 }
398 
399 /* Superblock refcounting  */
400 
401 /*
402  * Drop a superblock's refcount.  The caller must hold sb_lock.
403  */
__put_super(struct super_block * s)404 static void __put_super(struct super_block *s)
405 {
406 	if (!--s->s_count) {
407 		list_del_init(&s->s_list);
408 		WARN_ON(s->s_dentry_lru.node);
409 		WARN_ON(s->s_inode_lru.node);
410 		WARN_ON(s->s_mounts);
411 		call_rcu(&s->rcu, destroy_super_rcu);
412 	}
413 }
414 
415 /**
416  *	put_super	-	drop a temporary reference to superblock
417  *	@sb: superblock in question
418  *
419  *	Drops a temporary reference, frees superblock if there's no
420  *	references left.
421  */
put_super(struct super_block * sb)422 void put_super(struct super_block *sb)
423 {
424 	spin_lock(&sb_lock);
425 	__put_super(sb);
426 	spin_unlock(&sb_lock);
427 }
428 
kill_super_notify(struct super_block * sb)429 static void kill_super_notify(struct super_block *sb)
430 {
431 	lockdep_assert_not_held(&sb->s_umount);
432 
433 	/* already notified earlier */
434 	if (sb->s_flags & SB_DEAD)
435 		return;
436 
437 	/*
438 	 * Remove it from @fs_supers so it isn't found by new
439 	 * sget{_fc}() walkers anymore. Any concurrent mounter still
440 	 * managing to grab a temporary reference is guaranteed to
441 	 * already see SB_DYING and will wait until we notify them about
442 	 * SB_DEAD.
443 	 */
444 	spin_lock(&sb_lock);
445 	hlist_del_init(&sb->s_instances);
446 	spin_unlock(&sb_lock);
447 
448 	/*
449 	 * Let concurrent mounts know that this thing is really dead.
450 	 * We don't need @sb->s_umount here as every concurrent caller
451 	 * will see SB_DYING and either discard the superblock or wait
452 	 * for SB_DEAD.
453 	 */
454 	super_wake(sb, SB_DEAD);
455 }
456 
457 /**
458  *	deactivate_locked_super	-	drop an active reference to superblock
459  *	@s: superblock to deactivate
460  *
461  *	Drops an active reference to superblock, converting it into a temporary
462  *	one if there is no other active references left.  In that case we
463  *	tell fs driver to shut it down and drop the temporary reference we
464  *	had just acquired.
465  *
466  *	Caller holds exclusive lock on superblock; that lock is released.
467  */
deactivate_locked_super(struct super_block * s)468 void deactivate_locked_super(struct super_block *s)
469 {
470 	struct file_system_type *fs = s->s_type;
471 	if (atomic_dec_and_test(&s->s_active)) {
472 		shrinker_free(s->s_shrink);
473 		fs->kill_sb(s);
474 
475 		kill_super_notify(s);
476 
477 		/*
478 		 * Since list_lru_destroy() may sleep, we cannot call it from
479 		 * put_super(), where we hold the sb_lock. Therefore we destroy
480 		 * the lru lists right now.
481 		 */
482 		list_lru_destroy(&s->s_dentry_lru);
483 		list_lru_destroy(&s->s_inode_lru);
484 
485 		put_filesystem(fs);
486 		put_super(s);
487 	} else {
488 		super_unlock_excl(s);
489 	}
490 }
491 
492 EXPORT_SYMBOL(deactivate_locked_super);
493 
494 /**
495  *	deactivate_super	-	drop an active reference to superblock
496  *	@s: superblock to deactivate
497  *
498  *	Variant of deactivate_locked_super(), except that superblock is *not*
499  *	locked by caller.  If we are going to drop the final active reference,
500  *	lock will be acquired prior to that.
501  */
deactivate_super(struct super_block * s)502 void deactivate_super(struct super_block *s)
503 {
504 	if (!atomic_add_unless(&s->s_active, -1, 1)) {
505 		__super_lock_excl(s);
506 		deactivate_locked_super(s);
507 	}
508 }
509 
510 EXPORT_SYMBOL(deactivate_super);
511 
512 /**
513  * grab_super - acquire an active reference to a superblock
514  * @sb: superblock to acquire
515  *
516  * Acquire a temporary reference on a superblock and try to trade it for
517  * an active reference. This is used in sget{_fc}() to wait for a
518  * superblock to either become SB_BORN or for it to pass through
519  * sb->kill() and be marked as SB_DEAD.
520  *
521  * Return: This returns true if an active reference could be acquired,
522  *         false if not.
523  */
grab_super(struct super_block * sb)524 static bool grab_super(struct super_block *sb)
525 {
526 	bool locked;
527 
528 	sb->s_count++;
529 	spin_unlock(&sb_lock);
530 	locked = super_lock_excl(sb);
531 	if (locked) {
532 		if (atomic_inc_not_zero(&sb->s_active)) {
533 			put_super(sb);
534 			return true;
535 		}
536 		super_unlock_excl(sb);
537 	}
538 	wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
539 	put_super(sb);
540 	return false;
541 }
542 
543 /*
544  *	super_trylock_shared - try to grab ->s_umount shared
545  *	@sb: reference we are trying to grab
546  *
547  *	Try to prevent fs shutdown.  This is used in places where we
548  *	cannot take an active reference but we need to ensure that the
549  *	filesystem is not shut down while we are working on it. It returns
550  *	false if we cannot acquire s_umount or if we lose the race and
551  *	filesystem already got into shutdown, and returns true with the s_umount
552  *	lock held in read mode in case of success. On successful return,
553  *	the caller must drop the s_umount lock when done.
554  *
555  *	Note that unlike get_super() et.al. this one does *not* bump ->s_count.
556  *	The reason why it's safe is that we are OK with doing trylock instead
557  *	of down_read().  There's a couple of places that are OK with that, but
558  *	it's very much not a general-purpose interface.
559  */
super_trylock_shared(struct super_block * sb)560 bool super_trylock_shared(struct super_block *sb)
561 {
562 	if (down_read_trylock(&sb->s_umount)) {
563 		if (!(sb->s_flags & SB_DYING) && sb->s_root &&
564 		    (sb->s_flags & SB_BORN))
565 			return true;
566 		super_unlock_shared(sb);
567 	}
568 
569 	return false;
570 }
571 
572 /**
573  *	retire_super	-	prevents superblock from being reused
574  *	@sb: superblock to retire
575  *
576  *	The function marks superblock to be ignored in superblock test, which
577  *	prevents it from being reused for any new mounts.  If the superblock has
578  *	a private bdi, it also unregisters it, but doesn't reduce the refcount
579  *	of the superblock to prevent potential races.  The refcount is reduced
580  *	by generic_shutdown_super().  The function can not be called
581  *	concurrently with generic_shutdown_super().  It is safe to call the
582  *	function multiple times, subsequent calls have no effect.
583  *
584  *	The marker will affect the re-use only for block-device-based
585  *	superblocks.  Other superblocks will still get marked if this function
586  *	is used, but that will not affect their reusability.
587  */
retire_super(struct super_block * sb)588 void retire_super(struct super_block *sb)
589 {
590 	WARN_ON(!sb->s_bdev);
591 	__super_lock_excl(sb);
592 	if (sb->s_iflags & SB_I_PERSB_BDI) {
593 		bdi_unregister(sb->s_bdi);
594 		sb->s_iflags &= ~SB_I_PERSB_BDI;
595 	}
596 	sb->s_iflags |= SB_I_RETIRED;
597 	super_unlock_excl(sb);
598 }
599 EXPORT_SYMBOL(retire_super);
600 
601 /**
602  *	generic_shutdown_super	-	common helper for ->kill_sb()
603  *	@sb: superblock to kill
604  *
605  *	generic_shutdown_super() does all fs-independent work on superblock
606  *	shutdown.  Typical ->kill_sb() should pick all fs-specific objects
607  *	that need destruction out of superblock, call generic_shutdown_super()
608  *	and release aforementioned objects.  Note: dentries and inodes _are_
609  *	taken care of and do not need specific handling.
610  *
611  *	Upon calling this function, the filesystem may no longer alter or
612  *	rearrange the set of dentries belonging to this super_block, nor may it
613  *	change the attachments of dentries to inodes.
614  */
generic_shutdown_super(struct super_block * sb)615 void generic_shutdown_super(struct super_block *sb)
616 {
617 	const struct super_operations *sop = sb->s_op;
618 
619 	if (sb->s_root) {
620 		shrink_dcache_for_umount(sb);
621 		sync_filesystem(sb);
622 		sb->s_flags &= ~SB_ACTIVE;
623 
624 		cgroup_writeback_umount(sb);
625 
626 		/* Evict all inodes with zero refcount. */
627 		evict_inodes(sb);
628 
629 		/*
630 		 * Clean up and evict any inodes that still have references due
631 		 * to fsnotify or the security policy.
632 		 */
633 		fsnotify_sb_delete(sb);
634 		security_sb_delete(sb);
635 
636 		if (sb->s_dio_done_wq) {
637 			destroy_workqueue(sb->s_dio_done_wq);
638 			sb->s_dio_done_wq = NULL;
639 		}
640 
641 		if (sop->put_super)
642 			sop->put_super(sb);
643 
644 		/*
645 		 * Now that all potentially-encrypted inodes have been evicted,
646 		 * the fscrypt keyring can be destroyed.
647 		 */
648 		fscrypt_destroy_keyring(sb);
649 
650 		if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL,
651 				"VFS: Busy inodes after unmount of %s (%s)",
652 				sb->s_id, sb->s_type->name)) {
653 			/*
654 			 * Adding a proper bailout path here would be hard, but
655 			 * we can at least make it more likely that a later
656 			 * iput_final() or such crashes cleanly.
657 			 */
658 			struct inode *inode;
659 
660 			spin_lock(&sb->s_inode_list_lock);
661 			list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
662 				inode->i_op = VFS_PTR_POISON;
663 				inode->i_sb = VFS_PTR_POISON;
664 				inode->i_mapping = VFS_PTR_POISON;
665 			}
666 			spin_unlock(&sb->s_inode_list_lock);
667 		}
668 	}
669 	/*
670 	 * Broadcast to everyone that grabbed a temporary reference to this
671 	 * superblock before we removed it from @fs_supers that the superblock
672 	 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
673 	 * discard this superblock and treat it as dead.
674 	 *
675 	 * We leave the superblock on @fs_supers so it can be found by
676 	 * sget{_fc}() until we passed sb->kill_sb().
677 	 */
678 	super_wake(sb, SB_DYING);
679 	super_unlock_excl(sb);
680 	if (sb->s_bdi != &noop_backing_dev_info) {
681 		if (sb->s_iflags & SB_I_PERSB_BDI)
682 			bdi_unregister(sb->s_bdi);
683 		bdi_put(sb->s_bdi);
684 		sb->s_bdi = &noop_backing_dev_info;
685 	}
686 }
687 
688 EXPORT_SYMBOL(generic_shutdown_super);
689 
mount_capable(struct fs_context * fc)690 bool mount_capable(struct fs_context *fc)
691 {
692 	if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
693 		return capable(CAP_SYS_ADMIN);
694 	else
695 		return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
696 }
697 
698 /**
699  * sget_fc - Find or create a superblock
700  * @fc:	Filesystem context.
701  * @test: Comparison callback
702  * @set: Setup callback
703  *
704  * Create a new superblock or find an existing one.
705  *
706  * The @test callback is used to find a matching existing superblock.
707  * Whether or not the requested parameters in @fc are taken into account
708  * is specific to the @test callback that is used. They may even be
709  * completely ignored.
710  *
711  * If an extant superblock is matched, it will be returned unless:
712  *
713  * (1) the namespace the filesystem context @fc and the extant
714  *     superblock's namespace differ
715  *
716  * (2) the filesystem context @fc has requested that reusing an extant
717  *     superblock is not allowed
718  *
719  * In both cases EBUSY will be returned.
720  *
721  * If no match is made, a new superblock will be allocated and basic
722  * initialisation will be performed (s_type, s_fs_info and s_id will be
723  * set and the @set callback will be invoked), the superblock will be
724  * published and it will be returned in a partially constructed state
725  * with SB_BORN and SB_ACTIVE as yet unset.
726  *
727  * Return: On success, an extant or newly created superblock is
728  *         returned. On failure an error pointer is returned.
729  */
sget_fc(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* set)(struct super_block *,struct fs_context *))730 struct super_block *sget_fc(struct fs_context *fc,
731 			    int (*test)(struct super_block *, struct fs_context *),
732 			    int (*set)(struct super_block *, struct fs_context *))
733 {
734 	struct super_block *s = NULL;
735 	struct super_block *old;
736 	struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
737 	int err;
738 
739 	/*
740 	 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
741 	 * not set, as the filesystem is likely unprepared to handle it.
742 	 * This can happen when fsconfig() is called from init_user_ns with
743 	 * an fs_fd opened in another user namespace.
744 	 */
745 	if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
746 		errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
747 		return ERR_PTR(-EPERM);
748 	}
749 
750 retry:
751 	spin_lock(&sb_lock);
752 	if (test) {
753 		hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
754 			if (test(old, fc))
755 				goto share_extant_sb;
756 		}
757 	}
758 	if (!s) {
759 		spin_unlock(&sb_lock);
760 		s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
761 		if (!s)
762 			return ERR_PTR(-ENOMEM);
763 		goto retry;
764 	}
765 
766 	s->s_fs_info = fc->s_fs_info;
767 	err = set(s, fc);
768 	if (err) {
769 		s->s_fs_info = NULL;
770 		spin_unlock(&sb_lock);
771 		destroy_unused_super(s);
772 		return ERR_PTR(err);
773 	}
774 	fc->s_fs_info = NULL;
775 	s->s_type = fc->fs_type;
776 	s->s_iflags |= fc->s_iflags;
777 	strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
778 	/*
779 	 * Make the superblock visible on @super_blocks and @fs_supers.
780 	 * It's in a nascent state and users should wait on SB_BORN or
781 	 * SB_DYING to be set.
782 	 */
783 	list_add_tail(&s->s_list, &super_blocks);
784 	hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
785 	spin_unlock(&sb_lock);
786 	get_filesystem(s->s_type);
787 	shrinker_register(s->s_shrink);
788 	return s;
789 
790 share_extant_sb:
791 	if (user_ns != old->s_user_ns || fc->exclusive) {
792 		spin_unlock(&sb_lock);
793 		destroy_unused_super(s);
794 		if (fc->exclusive)
795 			warnfc(fc, "reusing existing filesystem not allowed");
796 		else
797 			warnfc(fc, "reusing existing filesystem in another namespace not allowed");
798 		return ERR_PTR(-EBUSY);
799 	}
800 	if (!grab_super(old))
801 		goto retry;
802 	destroy_unused_super(s);
803 	return old;
804 }
805 EXPORT_SYMBOL(sget_fc);
806 
807 /**
808  *	sget	-	find or create a superblock
809  *	@type:	  filesystem type superblock should belong to
810  *	@test:	  comparison callback
811  *	@set:	  setup callback
812  *	@flags:	  mount flags
813  *	@data:	  argument to each of them
814  */
sget(struct file_system_type * type,int (* test)(struct super_block *,void *),int (* set)(struct super_block *,void *),int flags,void * data)815 struct super_block *sget(struct file_system_type *type,
816 			int (*test)(struct super_block *,void *),
817 			int (*set)(struct super_block *,void *),
818 			int flags,
819 			void *data)
820 {
821 	struct user_namespace *user_ns = current_user_ns();
822 	struct super_block *s = NULL;
823 	struct super_block *old;
824 	int err;
825 
826 retry:
827 	spin_lock(&sb_lock);
828 	if (test) {
829 		hlist_for_each_entry(old, &type->fs_supers, s_instances) {
830 			if (!test(old, data))
831 				continue;
832 			if (user_ns != old->s_user_ns) {
833 				spin_unlock(&sb_lock);
834 				destroy_unused_super(s);
835 				return ERR_PTR(-EBUSY);
836 			}
837 			if (!grab_super(old))
838 				goto retry;
839 			destroy_unused_super(s);
840 			return old;
841 		}
842 	}
843 	if (!s) {
844 		spin_unlock(&sb_lock);
845 		s = alloc_super(type, flags, user_ns);
846 		if (!s)
847 			return ERR_PTR(-ENOMEM);
848 		goto retry;
849 	}
850 
851 	err = set(s, data);
852 	if (err) {
853 		spin_unlock(&sb_lock);
854 		destroy_unused_super(s);
855 		return ERR_PTR(err);
856 	}
857 	s->s_type = type;
858 	strscpy(s->s_id, type->name, sizeof(s->s_id));
859 	list_add_tail(&s->s_list, &super_blocks);
860 	hlist_add_head(&s->s_instances, &type->fs_supers);
861 	spin_unlock(&sb_lock);
862 	get_filesystem(type);
863 	shrinker_register(s->s_shrink);
864 	return s;
865 }
866 EXPORT_SYMBOL(sget);
867 
drop_super(struct super_block * sb)868 void drop_super(struct super_block *sb)
869 {
870 	super_unlock_shared(sb);
871 	put_super(sb);
872 }
873 
874 EXPORT_SYMBOL(drop_super);
875 
drop_super_exclusive(struct super_block * sb)876 void drop_super_exclusive(struct super_block *sb)
877 {
878 	super_unlock_excl(sb);
879 	put_super(sb);
880 }
881 EXPORT_SYMBOL(drop_super_exclusive);
882 
883 enum super_iter_flags_t {
884 	SUPER_ITER_EXCL		= (1U << 0),
885 	SUPER_ITER_UNLOCKED	= (1U << 1),
886 	SUPER_ITER_REVERSE	= (1U << 2),
887 };
888 
first_super(enum super_iter_flags_t flags)889 static inline struct super_block *first_super(enum super_iter_flags_t flags)
890 {
891 	if (flags & SUPER_ITER_REVERSE)
892 		return list_last_entry(&super_blocks, struct super_block, s_list);
893 	return list_first_entry(&super_blocks, struct super_block, s_list);
894 }
895 
next_super(struct super_block * sb,enum super_iter_flags_t flags)896 static inline struct super_block *next_super(struct super_block *sb,
897 					     enum super_iter_flags_t flags)
898 {
899 	if (flags & SUPER_ITER_REVERSE)
900 		return list_prev_entry(sb, s_list);
901 	return list_next_entry(sb, s_list);
902 }
903 
__iterate_supers(void (* f)(struct super_block *,void *),void * arg,enum super_iter_flags_t flags)904 static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg,
905 			     enum super_iter_flags_t flags)
906 {
907 	struct super_block *sb, *p = NULL;
908 	bool excl = flags & SUPER_ITER_EXCL;
909 
910 	guard(spinlock)(&sb_lock);
911 
912 	for (sb = first_super(flags);
913 	     !list_entry_is_head(sb, &super_blocks, s_list);
914 	     sb = next_super(sb, flags)) {
915 		if (super_flags(sb, SB_DYING))
916 			continue;
917 		sb->s_count++;
918 		spin_unlock(&sb_lock);
919 
920 		if (flags & SUPER_ITER_UNLOCKED) {
921 			f(sb, arg);
922 		} else if (super_lock(sb, excl)) {
923 			f(sb, arg);
924 			super_unlock(sb, excl);
925 		}
926 
927 		spin_lock(&sb_lock);
928 		if (p)
929 			__put_super(p);
930 		p = sb;
931 	}
932 	if (p)
933 		__put_super(p);
934 }
935 
iterate_supers(void (* f)(struct super_block *,void *),void * arg)936 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
937 {
938 	__iterate_supers(f, arg, 0);
939 }
940 
941 /**
942  *	iterate_supers_type - call function for superblocks of given type
943  *	@type: fs type
944  *	@f: function to call
945  *	@arg: argument to pass to it
946  *
947  *	Scans the superblock list and calls given function, passing it
948  *	locked superblock and given argument.
949  */
iterate_supers_type(struct file_system_type * type,void (* f)(struct super_block *,void *),void * arg)950 void iterate_supers_type(struct file_system_type *type,
951 	void (*f)(struct super_block *, void *), void *arg)
952 {
953 	struct super_block *sb, *p = NULL;
954 
955 	spin_lock(&sb_lock);
956 	hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
957 		bool locked;
958 
959 		if (super_flags(sb, SB_DYING))
960 			continue;
961 
962 		sb->s_count++;
963 		spin_unlock(&sb_lock);
964 
965 		locked = super_lock_shared(sb);
966 		if (locked) {
967 			f(sb, arg);
968 			super_unlock_shared(sb);
969 		}
970 
971 		spin_lock(&sb_lock);
972 		if (p)
973 			__put_super(p);
974 		p = sb;
975 	}
976 	if (p)
977 		__put_super(p);
978 	spin_unlock(&sb_lock);
979 }
980 
981 EXPORT_SYMBOL(iterate_supers_type);
982 
user_get_super(dev_t dev,bool excl)983 struct super_block *user_get_super(dev_t dev, bool excl)
984 {
985 	struct super_block *sb;
986 
987 	spin_lock(&sb_lock);
988 	list_for_each_entry(sb, &super_blocks, s_list) {
989 		bool locked;
990 
991 		if (sb->s_dev != dev)
992 			continue;
993 
994 		sb->s_count++;
995 		spin_unlock(&sb_lock);
996 
997 		locked = super_lock(sb, excl);
998 		if (locked)
999 			return sb;
1000 
1001 		spin_lock(&sb_lock);
1002 		__put_super(sb);
1003 		break;
1004 	}
1005 	spin_unlock(&sb_lock);
1006 	return NULL;
1007 }
1008 
1009 /**
1010  * reconfigure_super - asks filesystem to change superblock parameters
1011  * @fc: The superblock and configuration
1012  *
1013  * Alters the configuration parameters of a live superblock.
1014  */
reconfigure_super(struct fs_context * fc)1015 int reconfigure_super(struct fs_context *fc)
1016 {
1017 	struct super_block *sb = fc->root->d_sb;
1018 	int retval;
1019 	bool remount_ro = false;
1020 	bool remount_rw = false;
1021 	bool force = fc->sb_flags & SB_FORCE;
1022 
1023 	if (fc->sb_flags_mask & ~MS_RMT_MASK)
1024 		return -EINVAL;
1025 	if (sb->s_writers.frozen != SB_UNFROZEN)
1026 		return -EBUSY;
1027 
1028 	retval = security_sb_remount(sb, fc->security);
1029 	if (retval)
1030 		return retval;
1031 
1032 	if (fc->sb_flags_mask & SB_RDONLY) {
1033 #ifdef CONFIG_BLOCK
1034 		if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1035 		    bdev_read_only(sb->s_bdev))
1036 			return -EACCES;
1037 #endif
1038 		remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1039 		remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1040 	}
1041 
1042 	if (remount_ro) {
1043 		if (!hlist_empty(&sb->s_pins)) {
1044 			super_unlock_excl(sb);
1045 			group_pin_kill(&sb->s_pins);
1046 			__super_lock_excl(sb);
1047 			if (!sb->s_root)
1048 				return 0;
1049 			if (sb->s_writers.frozen != SB_UNFROZEN)
1050 				return -EBUSY;
1051 			remount_ro = !sb_rdonly(sb);
1052 		}
1053 	}
1054 	shrink_dcache_sb(sb);
1055 
1056 	/* If we are reconfiguring to RDONLY and current sb is read/write,
1057 	 * make sure there are no files open for writing.
1058 	 */
1059 	if (remount_ro) {
1060 		if (force) {
1061 			sb_start_ro_state_change(sb);
1062 		} else {
1063 			retval = sb_prepare_remount_readonly(sb);
1064 			if (retval)
1065 				return retval;
1066 		}
1067 	} else if (remount_rw) {
1068 		/*
1069 		 * Protect filesystem's reconfigure code from writes from
1070 		 * userspace until reconfigure finishes.
1071 		 */
1072 		sb_start_ro_state_change(sb);
1073 	}
1074 
1075 	if (fc->ops->reconfigure) {
1076 		retval = fc->ops->reconfigure(fc);
1077 		if (retval) {
1078 			if (!force)
1079 				goto cancel_readonly;
1080 			/* If forced remount, go ahead despite any errors */
1081 			WARN(1, "forced remount of a %s fs returned %i\n",
1082 			     sb->s_type->name, retval);
1083 		}
1084 	}
1085 
1086 	WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1087 				 (fc->sb_flags & fc->sb_flags_mask)));
1088 	sb_end_ro_state_change(sb);
1089 
1090 	/*
1091 	 * Some filesystems modify their metadata via some other path than the
1092 	 * bdev buffer cache (eg. use a private mapping, or directories in
1093 	 * pagecache, etc). Also file data modifications go via their own
1094 	 * mappings. So If we try to mount readonly then copy the filesystem
1095 	 * from bdev, we could get stale data, so invalidate it to give a best
1096 	 * effort at coherency.
1097 	 */
1098 	if (remount_ro && sb->s_bdev)
1099 		invalidate_bdev(sb->s_bdev);
1100 	return 0;
1101 
1102 cancel_readonly:
1103 	sb_end_ro_state_change(sb);
1104 	return retval;
1105 }
1106 
do_emergency_remount_callback(struct super_block * sb,void * unused)1107 static void do_emergency_remount_callback(struct super_block *sb, void *unused)
1108 {
1109 	if (sb->s_bdev && !sb_rdonly(sb)) {
1110 		struct fs_context *fc;
1111 
1112 		fc = fs_context_for_reconfigure(sb->s_root,
1113 					SB_RDONLY | SB_FORCE, SB_RDONLY);
1114 		if (!IS_ERR(fc)) {
1115 			if (parse_monolithic_mount_data(fc, NULL) == 0)
1116 				(void)reconfigure_super(fc);
1117 			put_fs_context(fc);
1118 		}
1119 	}
1120 }
1121 
do_emergency_remount(struct work_struct * work)1122 static void do_emergency_remount(struct work_struct *work)
1123 {
1124 	__iterate_supers(do_emergency_remount_callback, NULL,
1125 			 SUPER_ITER_EXCL | SUPER_ITER_REVERSE);
1126 	kfree(work);
1127 	printk("Emergency Remount complete\n");
1128 }
1129 
emergency_remount(void)1130 void emergency_remount(void)
1131 {
1132 	struct work_struct *work;
1133 
1134 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1135 	if (work) {
1136 		INIT_WORK(work, do_emergency_remount);
1137 		schedule_work(work);
1138 	}
1139 }
1140 
do_thaw_all_callback(struct super_block * sb,void * unused)1141 static void do_thaw_all_callback(struct super_block *sb, void *unused)
1142 {
1143 	if (IS_ENABLED(CONFIG_BLOCK))
1144 		while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1145 			pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1146 	thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL);
1147 	return;
1148 }
1149 
do_thaw_all(struct work_struct * work)1150 static void do_thaw_all(struct work_struct *work)
1151 {
1152 	__iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL);
1153 	kfree(work);
1154 	printk(KERN_WARNING "Emergency Thaw complete\n");
1155 }
1156 
1157 /**
1158  * emergency_thaw_all -- forcibly thaw every frozen filesystem
1159  *
1160  * Used for emergency unfreeze of all filesystems via SysRq
1161  */
emergency_thaw_all(void)1162 void emergency_thaw_all(void)
1163 {
1164 	struct work_struct *work;
1165 
1166 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1167 	if (work) {
1168 		INIT_WORK(work, do_thaw_all);
1169 		schedule_work(work);
1170 	}
1171 }
1172 
get_active_super(struct super_block * sb)1173 static inline bool get_active_super(struct super_block *sb)
1174 {
1175 	bool active = false;
1176 
1177 	if (super_lock_excl(sb)) {
1178 		active = atomic_inc_not_zero(&sb->s_active);
1179 		super_unlock_excl(sb);
1180 	}
1181 	return active;
1182 }
1183 
1184 static const char *filesystems_freeze_ptr = "filesystems_freeze";
1185 
filesystems_freeze_callback(struct super_block * sb,void * unused)1186 static void filesystems_freeze_callback(struct super_block *sb, void *unused)
1187 {
1188 	if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1189 		return;
1190 
1191 	if (!get_active_super(sb))
1192 		return;
1193 
1194 	if (sb->s_op->freeze_super)
1195 		sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1196 				       filesystems_freeze_ptr);
1197 	else
1198 		freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1199 			     filesystems_freeze_ptr);
1200 
1201 	deactivate_super(sb);
1202 }
1203 
filesystems_freeze(void)1204 void filesystems_freeze(void)
1205 {
1206 	__iterate_supers(filesystems_freeze_callback, NULL,
1207 			 SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE);
1208 }
1209 
filesystems_thaw_callback(struct super_block * sb,void * unused)1210 static void filesystems_thaw_callback(struct super_block *sb, void *unused)
1211 {
1212 	if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1213 		return;
1214 
1215 	if (!get_active_super(sb))
1216 		return;
1217 
1218 	if (sb->s_op->thaw_super)
1219 		sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1220 				     filesystems_freeze_ptr);
1221 	else
1222 		thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1223 			   filesystems_freeze_ptr);
1224 
1225 	deactivate_super(sb);
1226 }
1227 
filesystems_thaw(void)1228 void filesystems_thaw(void)
1229 {
1230 	__iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED);
1231 }
1232 
1233 static DEFINE_IDA(unnamed_dev_ida);
1234 
1235 /**
1236  * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1237  * @p: Pointer to a dev_t.
1238  *
1239  * Filesystems which don't use real block devices can call this function
1240  * to allocate a virtual block device.
1241  *
1242  * Context: Any context.  Frequently called while holding sb_lock.
1243  * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1244  * or -ENOMEM if memory allocation failed.
1245  */
get_anon_bdev(dev_t * p)1246 int get_anon_bdev(dev_t *p)
1247 {
1248 	int dev;
1249 
1250 	/*
1251 	 * Many userspace utilities consider an FSID of 0 invalid.
1252 	 * Always return at least 1 from get_anon_bdev.
1253 	 */
1254 	dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1255 			GFP_ATOMIC);
1256 	if (dev == -ENOSPC)
1257 		dev = -EMFILE;
1258 	if (dev < 0)
1259 		return dev;
1260 
1261 	*p = MKDEV(0, dev);
1262 	return 0;
1263 }
1264 EXPORT_SYMBOL(get_anon_bdev);
1265 
free_anon_bdev(dev_t dev)1266 void free_anon_bdev(dev_t dev)
1267 {
1268 	ida_free(&unnamed_dev_ida, MINOR(dev));
1269 }
1270 EXPORT_SYMBOL(free_anon_bdev);
1271 
set_anon_super(struct super_block * s,void * data)1272 int set_anon_super(struct super_block *s, void *data)
1273 {
1274 	return get_anon_bdev(&s->s_dev);
1275 }
1276 EXPORT_SYMBOL(set_anon_super);
1277 
kill_anon_super(struct super_block * sb)1278 void kill_anon_super(struct super_block *sb)
1279 {
1280 	dev_t dev = sb->s_dev;
1281 	generic_shutdown_super(sb);
1282 	kill_super_notify(sb);
1283 	free_anon_bdev(dev);
1284 }
1285 EXPORT_SYMBOL(kill_anon_super);
1286 
kill_litter_super(struct super_block * sb)1287 void kill_litter_super(struct super_block *sb)
1288 {
1289 	if (sb->s_root)
1290 		d_genocide(sb->s_root);
1291 	kill_anon_super(sb);
1292 }
1293 EXPORT_SYMBOL(kill_litter_super);
1294 
set_anon_super_fc(struct super_block * sb,struct fs_context * fc)1295 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1296 {
1297 	return set_anon_super(sb, NULL);
1298 }
1299 EXPORT_SYMBOL(set_anon_super_fc);
1300 
test_keyed_super(struct super_block * sb,struct fs_context * fc)1301 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1302 {
1303 	return sb->s_fs_info == fc->s_fs_info;
1304 }
1305 
test_single_super(struct super_block * s,struct fs_context * fc)1306 static int test_single_super(struct super_block *s, struct fs_context *fc)
1307 {
1308 	return 1;
1309 }
1310 
vfs_get_super(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* fill_super)(struct super_block * sb,struct fs_context * fc))1311 static int vfs_get_super(struct fs_context *fc,
1312 		int (*test)(struct super_block *, struct fs_context *),
1313 		int (*fill_super)(struct super_block *sb,
1314 				  struct fs_context *fc))
1315 {
1316 	struct super_block *sb;
1317 	int err;
1318 
1319 	sb = sget_fc(fc, test, set_anon_super_fc);
1320 	if (IS_ERR(sb))
1321 		return PTR_ERR(sb);
1322 
1323 	if (!sb->s_root) {
1324 		err = fill_super(sb, fc);
1325 		if (err)
1326 			goto error;
1327 
1328 		sb->s_flags |= SB_ACTIVE;
1329 	}
1330 
1331 	fc->root = dget(sb->s_root);
1332 	return 0;
1333 
1334 error:
1335 	deactivate_locked_super(sb);
1336 	return err;
1337 }
1338 
get_tree_nodev(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1339 int get_tree_nodev(struct fs_context *fc,
1340 		  int (*fill_super)(struct super_block *sb,
1341 				    struct fs_context *fc))
1342 {
1343 	return vfs_get_super(fc, NULL, fill_super);
1344 }
1345 EXPORT_SYMBOL(get_tree_nodev);
1346 
get_tree_single(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1347 int get_tree_single(struct fs_context *fc,
1348 		  int (*fill_super)(struct super_block *sb,
1349 				    struct fs_context *fc))
1350 {
1351 	return vfs_get_super(fc, test_single_super, fill_super);
1352 }
1353 EXPORT_SYMBOL(get_tree_single);
1354 
get_tree_keyed(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),void * key)1355 int get_tree_keyed(struct fs_context *fc,
1356 		  int (*fill_super)(struct super_block *sb,
1357 				    struct fs_context *fc),
1358 		void *key)
1359 {
1360 	fc->s_fs_info = key;
1361 	return vfs_get_super(fc, test_keyed_super, fill_super);
1362 }
1363 EXPORT_SYMBOL(get_tree_keyed);
1364 
set_bdev_super(struct super_block * s,void * data)1365 static int set_bdev_super(struct super_block *s, void *data)
1366 {
1367 	s->s_dev = *(dev_t *)data;
1368 	return 0;
1369 }
1370 
super_s_dev_set(struct super_block * s,struct fs_context * fc)1371 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1372 {
1373 	return set_bdev_super(s, fc->sget_key);
1374 }
1375 
super_s_dev_test(struct super_block * s,struct fs_context * fc)1376 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1377 {
1378 	return !(s->s_iflags & SB_I_RETIRED) &&
1379 		s->s_dev == *(dev_t *)fc->sget_key;
1380 }
1381 
1382 /**
1383  * sget_dev - Find or create a superblock by device number
1384  * @fc: Filesystem context.
1385  * @dev: device number
1386  *
1387  * Find or create a superblock using the provided device number that
1388  * will be stored in fc->sget_key.
1389  *
1390  * If an extant superblock is matched, then that will be returned with
1391  * an elevated reference count that the caller must transfer or discard.
1392  *
1393  * If no match is made, a new superblock will be allocated and basic
1394  * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1395  * be set). The superblock will be published and it will be returned in
1396  * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1397  * unset.
1398  *
1399  * Return: an existing or newly created superblock on success, an error
1400  *         pointer on failure.
1401  */
sget_dev(struct fs_context * fc,dev_t dev)1402 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1403 {
1404 	fc->sget_key = &dev;
1405 	return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1406 }
1407 EXPORT_SYMBOL(sget_dev);
1408 
1409 #ifdef CONFIG_BLOCK
1410 /*
1411  * Lock the superblock that is holder of the bdev. Returns the superblock
1412  * pointer if we successfully locked the superblock and it is alive. Otherwise
1413  * we return NULL and just unlock bdev->bd_holder_lock.
1414  *
1415  * The function must be called with bdev->bd_holder_lock and releases it.
1416  */
bdev_super_lock(struct block_device * bdev,bool excl)1417 static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
1418 	__releases(&bdev->bd_holder_lock)
1419 {
1420 	struct super_block *sb = bdev->bd_holder;
1421 	bool locked;
1422 
1423 	lockdep_assert_held(&bdev->bd_holder_lock);
1424 	lockdep_assert_not_held(&sb->s_umount);
1425 	lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
1426 
1427 	/* Make sure sb doesn't go away from under us */
1428 	spin_lock(&sb_lock);
1429 	sb->s_count++;
1430 	spin_unlock(&sb_lock);
1431 
1432 	mutex_unlock(&bdev->bd_holder_lock);
1433 
1434 	locked = super_lock(sb, excl);
1435 
1436 	/*
1437 	 * If the superblock wasn't already SB_DYING then we hold
1438 	 * s_umount and can safely drop our temporary reference.
1439          */
1440 	put_super(sb);
1441 
1442 	if (!locked)
1443 		return NULL;
1444 
1445 	if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1446 		super_unlock(sb, excl);
1447 		return NULL;
1448 	}
1449 
1450 	return sb;
1451 }
1452 
fs_bdev_mark_dead(struct block_device * bdev,bool surprise)1453 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1454 {
1455 	struct super_block *sb;
1456 
1457 	sb = bdev_super_lock(bdev, false);
1458 	if (!sb)
1459 		return;
1460 
1461 	if (sb->s_op->remove_bdev) {
1462 		int ret;
1463 
1464 		ret = sb->s_op->remove_bdev(sb, bdev);
1465 		if (!ret) {
1466 			super_unlock_shared(sb);
1467 			return;
1468 		}
1469 		/* Fallback to shutdown. */
1470 	}
1471 
1472 	if (!surprise)
1473 		sync_filesystem(sb);
1474 	shrink_dcache_sb(sb);
1475 	evict_inodes(sb);
1476 	if (sb->s_op->shutdown)
1477 		sb->s_op->shutdown(sb);
1478 
1479 	super_unlock_shared(sb);
1480 }
1481 
fs_bdev_sync(struct block_device * bdev)1482 static void fs_bdev_sync(struct block_device *bdev)
1483 {
1484 	struct super_block *sb;
1485 
1486 	sb = bdev_super_lock(bdev, false);
1487 	if (!sb)
1488 		return;
1489 
1490 	sync_filesystem(sb);
1491 	super_unlock_shared(sb);
1492 }
1493 
get_bdev_super(struct block_device * bdev)1494 static struct super_block *get_bdev_super(struct block_device *bdev)
1495 {
1496 	bool active = false;
1497 	struct super_block *sb;
1498 
1499 	sb = bdev_super_lock(bdev, true);
1500 	if (sb) {
1501 		active = atomic_inc_not_zero(&sb->s_active);
1502 		super_unlock_excl(sb);
1503 	}
1504 	if (!active)
1505 		return NULL;
1506 	return sb;
1507 }
1508 
1509 /**
1510  * fs_bdev_freeze - freeze owning filesystem of block device
1511  * @bdev: block device
1512  *
1513  * Freeze the filesystem that owns this block device if it is still
1514  * active.
1515  *
1516  * A filesystem that owns multiple block devices may be frozen from each
1517  * block device and won't be unfrozen until all block devices are
1518  * unfrozen. Each block device can only freeze the filesystem once as we
1519  * nest freezes for block devices in the block layer.
1520  *
1521  * Return: If the freeze was successful zero is returned. If the freeze
1522  *         failed a negative error code is returned.
1523  */
fs_bdev_freeze(struct block_device * bdev)1524 static int fs_bdev_freeze(struct block_device *bdev)
1525 {
1526 	struct super_block *sb;
1527 	int error = 0;
1528 
1529 	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1530 
1531 	sb = get_bdev_super(bdev);
1532 	if (!sb)
1533 		return -EINVAL;
1534 
1535 	if (sb->s_op->freeze_super)
1536 		error = sb->s_op->freeze_super(sb,
1537 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1538 	else
1539 		error = freeze_super(sb,
1540 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1541 	if (!error)
1542 		error = sync_blockdev(bdev);
1543 	deactivate_super(sb);
1544 	return error;
1545 }
1546 
1547 /**
1548  * fs_bdev_thaw - thaw owning filesystem of block device
1549  * @bdev: block device
1550  *
1551  * Thaw the filesystem that owns this block device.
1552  *
1553  * A filesystem that owns multiple block devices may be frozen from each
1554  * block device and won't be unfrozen until all block devices are
1555  * unfrozen. Each block device can only freeze the filesystem once as we
1556  * nest freezes for block devices in the block layer.
1557  *
1558  * Return: If the thaw was successful zero is returned. If the thaw
1559  *         failed a negative error code is returned. If this function
1560  *         returns zero it doesn't mean that the filesystem is unfrozen
1561  *         as it may have been frozen multiple times (kernel may hold a
1562  *         freeze or might be frozen from other block devices).
1563  */
fs_bdev_thaw(struct block_device * bdev)1564 static int fs_bdev_thaw(struct block_device *bdev)
1565 {
1566 	struct super_block *sb;
1567 	int error;
1568 
1569 	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1570 
1571 	/*
1572 	 * The block device may have been frozen before it was claimed by a
1573 	 * filesystem. Concurrently another process might try to mount that
1574 	 * frozen block device and has temporarily claimed the block device for
1575 	 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
1576 	 * mounter is already about to abort mounting because they still saw an
1577 	 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
1578 	 * NULL in that case.
1579 	 */
1580 	sb = get_bdev_super(bdev);
1581 	if (!sb)
1582 		return -EINVAL;
1583 
1584 	if (sb->s_op->thaw_super)
1585 		error = sb->s_op->thaw_super(sb,
1586 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1587 	else
1588 		error = thaw_super(sb,
1589 				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1590 	deactivate_super(sb);
1591 	return error;
1592 }
1593 
1594 const struct blk_holder_ops fs_holder_ops = {
1595 	.mark_dead		= fs_bdev_mark_dead,
1596 	.sync			= fs_bdev_sync,
1597 	.freeze			= fs_bdev_freeze,
1598 	.thaw			= fs_bdev_thaw,
1599 };
1600 EXPORT_SYMBOL_GPL(fs_holder_ops);
1601 
setup_bdev_super(struct super_block * sb,int sb_flags,struct fs_context * fc)1602 int setup_bdev_super(struct super_block *sb, int sb_flags,
1603 		struct fs_context *fc)
1604 {
1605 	blk_mode_t mode = sb_open_mode(sb_flags);
1606 	struct file *bdev_file;
1607 	struct block_device *bdev;
1608 
1609 	bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1610 	if (IS_ERR(bdev_file)) {
1611 		if (fc)
1612 			errorf(fc, "%s: Can't open blockdev", fc->source);
1613 		return PTR_ERR(bdev_file);
1614 	}
1615 	bdev = file_bdev(bdev_file);
1616 
1617 	/*
1618 	 * This really should be in blkdev_get_by_dev, but right now can't due
1619 	 * to legacy issues that require us to allow opening a block device node
1620 	 * writable from userspace even for a read-only block device.
1621 	 */
1622 	if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1623 		bdev_fput(bdev_file);
1624 		return -EACCES;
1625 	}
1626 
1627 	/*
1628 	 * It is enough to check bdev was not frozen before we set
1629 	 * s_bdev as freezing will wait until SB_BORN is set.
1630 	 */
1631 	if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
1632 		if (fc)
1633 			warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1634 		bdev_fput(bdev_file);
1635 		return -EBUSY;
1636 	}
1637 	spin_lock(&sb_lock);
1638 	sb->s_bdev_file = bdev_file;
1639 	sb->s_bdev = bdev;
1640 	sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1641 	if (bdev_stable_writes(bdev))
1642 		sb->s_iflags |= SB_I_STABLE_WRITES;
1643 	spin_unlock(&sb_lock);
1644 
1645 	snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1646 	shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1647 				sb->s_id);
1648 	sb_set_blocksize(sb, block_size(bdev));
1649 	return 0;
1650 }
1651 EXPORT_SYMBOL_GPL(setup_bdev_super);
1652 
1653 /**
1654  * get_tree_bdev_flags - Get a superblock based on a single block device
1655  * @fc: The filesystem context holding the parameters
1656  * @fill_super: Helper to initialise a new superblock
1657  * @flags: GET_TREE_BDEV_* flags
1658  */
get_tree_bdev_flags(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),unsigned int flags)1659 int get_tree_bdev_flags(struct fs_context *fc,
1660 		int (*fill_super)(struct super_block *sb,
1661 				  struct fs_context *fc), unsigned int flags)
1662 {
1663 	struct super_block *s;
1664 	int error = 0;
1665 	dev_t dev;
1666 
1667 	if (!fc->source)
1668 		return invalf(fc, "No source specified");
1669 
1670 	error = lookup_bdev(fc->source, &dev);
1671 	if (error) {
1672 		if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP))
1673 			errorf(fc, "%s: Can't lookup blockdev", fc->source);
1674 		return error;
1675 	}
1676 	fc->sb_flags |= SB_NOSEC;
1677 	s = sget_dev(fc, dev);
1678 	if (IS_ERR(s))
1679 		return PTR_ERR(s);
1680 
1681 	if (s->s_root) {
1682 		/* Don't summarily change the RO/RW state. */
1683 		if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1684 			warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1685 			deactivate_locked_super(s);
1686 			return -EBUSY;
1687 		}
1688 	} else {
1689 		error = setup_bdev_super(s, fc->sb_flags, fc);
1690 		if (!error)
1691 			error = fill_super(s, fc);
1692 		if (error) {
1693 			deactivate_locked_super(s);
1694 			return error;
1695 		}
1696 		s->s_flags |= SB_ACTIVE;
1697 	}
1698 
1699 	BUG_ON(fc->root);
1700 	fc->root = dget(s->s_root);
1701 	return 0;
1702 }
1703 EXPORT_SYMBOL_GPL(get_tree_bdev_flags);
1704 
1705 /**
1706  * get_tree_bdev - Get a superblock based on a single block device
1707  * @fc: The filesystem context holding the parameters
1708  * @fill_super: Helper to initialise a new superblock
1709  */
get_tree_bdev(struct fs_context * fc,int (* fill_super)(struct super_block *,struct fs_context *))1710 int get_tree_bdev(struct fs_context *fc,
1711 		int (*fill_super)(struct super_block *,
1712 				  struct fs_context *))
1713 {
1714 	return get_tree_bdev_flags(fc, fill_super, 0);
1715 }
1716 EXPORT_SYMBOL(get_tree_bdev);
1717 
kill_block_super(struct super_block * sb)1718 void kill_block_super(struct super_block *sb)
1719 {
1720 	struct block_device *bdev = sb->s_bdev;
1721 
1722 	generic_shutdown_super(sb);
1723 	if (bdev) {
1724 		sync_blockdev(bdev);
1725 		bdev_fput(sb->s_bdev_file);
1726 	}
1727 }
1728 
1729 EXPORT_SYMBOL(kill_block_super);
1730 #endif
1731 
1732 /**
1733  * vfs_get_tree - Get the mountable root
1734  * @fc: The superblock configuration context.
1735  *
1736  * The filesystem is invoked to get or create a superblock which can then later
1737  * be used for mounting.  The filesystem places a pointer to the root to be
1738  * used for mounting in @fc->root.
1739  */
vfs_get_tree(struct fs_context * fc)1740 int vfs_get_tree(struct fs_context *fc)
1741 {
1742 	struct super_block *sb;
1743 	int error;
1744 
1745 	if (fc->root)
1746 		return -EBUSY;
1747 
1748 	/* Get the mountable root in fc->root, with a ref on the root and a ref
1749 	 * on the superblock.
1750 	 */
1751 	error = fc->ops->get_tree(fc);
1752 	if (error < 0)
1753 		return error;
1754 
1755 	if (!fc->root) {
1756 		pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n",
1757 		       fc->fs_type->name, error);
1758 		/* We don't know what the locking state of the superblock is -
1759 		 * if there is a superblock.
1760 		 */
1761 		BUG();
1762 	}
1763 
1764 	sb = fc->root->d_sb;
1765 	WARN_ON(!sb->s_bdi);
1766 
1767 	/*
1768 	 * super_wake() contains a memory barrier which also care of
1769 	 * ordering for super_cache_count(). We place it before setting
1770 	 * SB_BORN as the data dependency between the two functions is
1771 	 * the superblock structure contents that we just set up, not
1772 	 * the SB_BORN flag.
1773 	 */
1774 	super_wake(sb, SB_BORN);
1775 
1776 	error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1777 	if (unlikely(error)) {
1778 		fc_drop_locked(fc);
1779 		return error;
1780 	}
1781 
1782 	/*
1783 	 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1784 	 * but s_maxbytes was an unsigned long long for many releases. Throw
1785 	 * this warning for a little while to try and catch filesystems that
1786 	 * violate this rule.
1787 	 */
1788 	WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1789 		"negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1790 
1791 	return 0;
1792 }
1793 EXPORT_SYMBOL(vfs_get_tree);
1794 
1795 /*
1796  * Setup private BDI for given superblock. It gets automatically cleaned up
1797  * in generic_shutdown_super().
1798  */
super_setup_bdi_name(struct super_block * sb,char * fmt,...)1799 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1800 {
1801 	struct backing_dev_info *bdi;
1802 	int err;
1803 	va_list args;
1804 
1805 	bdi = bdi_alloc(NUMA_NO_NODE);
1806 	if (!bdi)
1807 		return -ENOMEM;
1808 
1809 	va_start(args, fmt);
1810 	err = bdi_register_va(bdi, fmt, args);
1811 	va_end(args);
1812 	if (err) {
1813 		bdi_put(bdi);
1814 		return err;
1815 	}
1816 	WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1817 	sb->s_bdi = bdi;
1818 	sb->s_iflags |= SB_I_PERSB_BDI;
1819 
1820 	return 0;
1821 }
1822 EXPORT_SYMBOL(super_setup_bdi_name);
1823 
1824 /*
1825  * Setup private BDI for given superblock. I gets automatically cleaned up
1826  * in generic_shutdown_super().
1827  */
super_setup_bdi(struct super_block * sb)1828 int super_setup_bdi(struct super_block *sb)
1829 {
1830 	static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1831 
1832 	return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1833 				    atomic_long_inc_return(&bdi_seq));
1834 }
1835 EXPORT_SYMBOL(super_setup_bdi);
1836 
1837 /**
1838  * sb_wait_write - wait until all writers to given file system finish
1839  * @sb: the super for which we wait
1840  * @level: type of writers we wait for (normal vs page fault)
1841  *
1842  * This function waits until there are no writers of given type to given file
1843  * system.
1844  */
sb_wait_write(struct super_block * sb,int level)1845 static void sb_wait_write(struct super_block *sb, int level)
1846 {
1847 	percpu_down_write(sb->s_writers.rw_sem + level-1);
1848 }
1849 
1850 /*
1851  * We are going to return to userspace and forget about these locks, the
1852  * ownership goes to the caller of thaw_super() which does unlock().
1853  */
lockdep_sb_freeze_release(struct super_block * sb)1854 static void lockdep_sb_freeze_release(struct super_block *sb)
1855 {
1856 	int level;
1857 
1858 	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1859 		percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_);
1860 }
1861 
1862 /*
1863  * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1864  */
lockdep_sb_freeze_acquire(struct super_block * sb)1865 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1866 {
1867 	int level;
1868 
1869 	for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1870 		percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1871 }
1872 
sb_freeze_unlock(struct super_block * sb,int level)1873 static void sb_freeze_unlock(struct super_block *sb, int level)
1874 {
1875 	for (level--; level >= 0; level--)
1876 		percpu_up_write(sb->s_writers.rw_sem + level);
1877 }
1878 
wait_for_partially_frozen(struct super_block * sb)1879 static int wait_for_partially_frozen(struct super_block *sb)
1880 {
1881 	int ret = 0;
1882 
1883 	do {
1884 		unsigned short old = sb->s_writers.frozen;
1885 
1886 		up_write(&sb->s_umount);
1887 		ret = wait_var_event_killable(&sb->s_writers.frozen,
1888 					       sb->s_writers.frozen != old);
1889 		down_write(&sb->s_umount);
1890 	} while (ret == 0 &&
1891 		 sb->s_writers.frozen != SB_UNFROZEN &&
1892 		 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1893 
1894 	return ret;
1895 }
1896 
1897 #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1898 #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL)
1899 
freeze_inc(struct super_block * sb,enum freeze_holder who)1900 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1901 {
1902 	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1903 	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1904 
1905 	if (who & FREEZE_HOLDER_KERNEL)
1906 		++sb->s_writers.freeze_kcount;
1907 	if (who & FREEZE_HOLDER_USERSPACE)
1908 		++sb->s_writers.freeze_ucount;
1909 	return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1910 }
1911 
freeze_dec(struct super_block * sb,enum freeze_holder who)1912 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1913 {
1914 	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1915 	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1916 
1917 	if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1918 		--sb->s_writers.freeze_kcount;
1919 	if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1920 		--sb->s_writers.freeze_ucount;
1921 	return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1922 }
1923 
may_freeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)1924 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who,
1925 			      const void *freeze_owner)
1926 {
1927 	lockdep_assert_held(&sb->s_umount);
1928 
1929 	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1930 	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1931 
1932 	if (who & FREEZE_EXCL) {
1933 		if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
1934 			return false;
1935 		if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
1936 			return false;
1937 		if (WARN_ON_ONCE(!freeze_owner))
1938 			return false;
1939 		/* This freeze already has a specific owner. */
1940 		if (sb->s_writers.freeze_owner)
1941 			return false;
1942 		/*
1943 		 * This is already frozen multiple times so we're just
1944 		 * going to take a reference count and mark the freeze as
1945 		 * being owned by the caller.
1946 		 */
1947 		if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount)
1948 			sb->s_writers.freeze_owner = freeze_owner;
1949 		return true;
1950 	}
1951 
1952 	if (who & FREEZE_HOLDER_KERNEL)
1953 		return (who & FREEZE_MAY_NEST) ||
1954 		       sb->s_writers.freeze_kcount == 0;
1955 	if (who & FREEZE_HOLDER_USERSPACE)
1956 		return (who & FREEZE_MAY_NEST) ||
1957 		       sb->s_writers.freeze_ucount == 0;
1958 	return false;
1959 }
1960 
may_unfreeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)1961 static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who,
1962 				const void *freeze_owner)
1963 {
1964 	lockdep_assert_held(&sb->s_umount);
1965 
1966 	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1967 	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1968 
1969 	if (who & FREEZE_EXCL) {
1970 		if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
1971 			return false;
1972 		if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
1973 			return false;
1974 		if (WARN_ON_ONCE(!freeze_owner))
1975 			return false;
1976 		if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0))
1977 			return false;
1978 		/* This isn't exclusively frozen. */
1979 		if (!sb->s_writers.freeze_owner)
1980 			return false;
1981 		/* This isn't exclusively frozen by us. */
1982 		if (sb->s_writers.freeze_owner != freeze_owner)
1983 			return false;
1984 		/*
1985 		 * This is still frozen multiple times so we're just
1986 		 * going to drop our reference count and undo our
1987 		 * exclusive freeze.
1988 		 */
1989 		if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1)
1990 			sb->s_writers.freeze_owner = NULL;
1991 		return true;
1992 	}
1993 
1994 	if (who & FREEZE_HOLDER_KERNEL) {
1995 		/*
1996 		 * Someone's trying to steal the reference belonging to
1997 		 * @sb->s_writers.freeze_owner.
1998 		 */
1999 		if (sb->s_writers.freeze_kcount == 1 &&
2000 		    sb->s_writers.freeze_owner)
2001 			return false;
2002 		return sb->s_writers.freeze_kcount > 0;
2003 	}
2004 
2005 	if (who & FREEZE_HOLDER_USERSPACE)
2006 		return sb->s_writers.freeze_ucount > 0;
2007 
2008 	return false;
2009 }
2010 
2011 /**
2012  * freeze_super - lock the filesystem and force it into a consistent state
2013  * @sb: the super to lock
2014  * @who: context that wants to freeze
2015  * @freeze_owner: owner of the freeze
2016  *
2017  * Syncs the super to make sure the filesystem is consistent and calls the fs's
2018  * freeze_fs.  Subsequent calls to this without first thawing the fs may return
2019  * -EBUSY.
2020  *
2021  * @who should be:
2022  * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
2023  * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
2024  * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
2025  *
2026  * The @who argument distinguishes between the kernel and userspace trying to
2027  * freeze the filesystem.  Although there cannot be multiple kernel freezes or
2028  * multiple userspace freezes in effect at any given time, the kernel and
2029  * userspace can both hold a filesystem frozen.  The filesystem remains frozen
2030  * until there are no kernel or userspace freezes in effect.
2031  *
2032  * A filesystem may hold multiple devices and thus a filesystems may be
2033  * frozen through the block layer via multiple block devices. In this
2034  * case the request is marked as being allowed to nest by passing
2035  * FREEZE_MAY_NEST. The filesystem remains frozen until all block
2036  * devices are unfrozen. If multiple freezes are attempted without
2037  * FREEZE_MAY_NEST -EBUSY will be returned.
2038  *
2039  * During this function, sb->s_writers.frozen goes through these values:
2040  *
2041  * SB_UNFROZEN: File system is normal, all writes progress as usual.
2042  *
2043  * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
2044  * writes should be blocked, though page faults are still allowed. We wait for
2045  * all writes to complete and then proceed to the next stage.
2046  *
2047  * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2048  * but internal fs threads can still modify the filesystem (although they
2049  * should not dirty new pages or inodes), writeback can run etc. After waiting
2050  * for all running page faults we sync the filesystem which will clean all
2051  * dirty pages and inodes (no new dirty pages or inodes can be created when
2052  * sync is running).
2053  *
2054  * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2055  * modification are blocked (e.g. XFS preallocation truncation on inode
2056  * reclaim). This is usually implemented by blocking new transactions for
2057  * filesystems that have them and need this additional guard. After all
2058  * internal writers are finished we call ->freeze_fs() to finish filesystem
2059  * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2060  * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2061  *
2062  * sb->s_writers.frozen is protected by sb->s_umount.
2063  *
2064  * Return: If the freeze was successful zero is returned. If the freeze
2065  *         failed a negative error code is returned.
2066  */
freeze_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2067 int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner)
2068 {
2069 	int ret;
2070 
2071 	if (!super_lock_excl(sb)) {
2072 		WARN_ON_ONCE("Dying superblock while freezing!");
2073 		return -EINVAL;
2074 	}
2075 	atomic_inc(&sb->s_active);
2076 
2077 retry:
2078 	if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2079 		if (may_freeze(sb, who, freeze_owner))
2080 			ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2081 		else
2082 			ret = -EBUSY;
2083 		/* All freezers share a single active reference. */
2084 		deactivate_locked_super(sb);
2085 		return ret;
2086 	}
2087 
2088 	if (sb->s_writers.frozen != SB_UNFROZEN) {
2089 		ret = wait_for_partially_frozen(sb);
2090 		if (ret) {
2091 			deactivate_locked_super(sb);
2092 			return ret;
2093 		}
2094 
2095 		goto retry;
2096 	}
2097 
2098 	if (sb_rdonly(sb)) {
2099 		/* Nothing to do really... */
2100 		WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2101 		sb->s_writers.freeze_owner = freeze_owner;
2102 		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2103 		wake_up_var(&sb->s_writers.frozen);
2104 		super_unlock_excl(sb);
2105 		return 0;
2106 	}
2107 
2108 	sb->s_writers.frozen = SB_FREEZE_WRITE;
2109 	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
2110 	super_unlock_excl(sb);
2111 	sb_wait_write(sb, SB_FREEZE_WRITE);
2112 	__super_lock_excl(sb);
2113 
2114 	/* Now we go and block page faults... */
2115 	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2116 	sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2117 
2118 	/* All writers are done so after syncing there won't be dirty data */
2119 	ret = sync_filesystem(sb);
2120 	if (ret) {
2121 		sb->s_writers.frozen = SB_UNFROZEN;
2122 		sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2123 		wake_up_var(&sb->s_writers.frozen);
2124 		deactivate_locked_super(sb);
2125 		return ret;
2126 	}
2127 
2128 	/* Now wait for internal filesystem counter */
2129 	sb->s_writers.frozen = SB_FREEZE_FS;
2130 	sb_wait_write(sb, SB_FREEZE_FS);
2131 
2132 	if (sb->s_op->freeze_fs) {
2133 		ret = sb->s_op->freeze_fs(sb);
2134 		if (ret) {
2135 			printk(KERN_ERR
2136 				"VFS:Filesystem freeze failed\n");
2137 			sb->s_writers.frozen = SB_UNFROZEN;
2138 			sb_freeze_unlock(sb, SB_FREEZE_FS);
2139 			wake_up_var(&sb->s_writers.frozen);
2140 			deactivate_locked_super(sb);
2141 			return ret;
2142 		}
2143 	}
2144 	/*
2145 	 * For debugging purposes so that fs can warn if it sees write activity
2146 	 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2147 	 */
2148 	WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2149 	sb->s_writers.freeze_owner = freeze_owner;
2150 	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2151 	wake_up_var(&sb->s_writers.frozen);
2152 	lockdep_sb_freeze_release(sb);
2153 	super_unlock_excl(sb);
2154 	return 0;
2155 }
2156 EXPORT_SYMBOL(freeze_super);
2157 
2158 /*
2159  * Undoes the effect of a freeze_super_locked call.  If the filesystem is
2160  * frozen both by userspace and the kernel, a thaw call from either source
2161  * removes that state without releasing the other state or unlocking the
2162  * filesystem.
2163  */
thaw_super_locked(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2164 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
2165 			     const void *freeze_owner)
2166 {
2167 	int error = -EINVAL;
2168 
2169 	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2170 		goto out_unlock;
2171 
2172 	if (!may_unfreeze(sb, who, freeze_owner))
2173 		goto out_unlock;
2174 
2175 	/*
2176 	 * All freezers share a single active reference.
2177 	 * So just unlock in case there are any left.
2178 	 */
2179 	if (freeze_dec(sb, who))
2180 		goto out_unlock;
2181 
2182 	if (sb_rdonly(sb)) {
2183 		sb->s_writers.frozen = SB_UNFROZEN;
2184 		sb->s_writers.freeze_owner = NULL;
2185 		wake_up_var(&sb->s_writers.frozen);
2186 		goto out_deactivate;
2187 	}
2188 
2189 	lockdep_sb_freeze_acquire(sb);
2190 
2191 	if (sb->s_op->unfreeze_fs) {
2192 		error = sb->s_op->unfreeze_fs(sb);
2193 		if (error) {
2194 			pr_err("VFS: Filesystem thaw failed\n");
2195 			freeze_inc(sb, who);
2196 			lockdep_sb_freeze_release(sb);
2197 			goto out_unlock;
2198 		}
2199 	}
2200 
2201 	sb->s_writers.frozen = SB_UNFROZEN;
2202 	sb->s_writers.freeze_owner = NULL;
2203 	wake_up_var(&sb->s_writers.frozen);
2204 	sb_freeze_unlock(sb, SB_FREEZE_FS);
2205 out_deactivate:
2206 	deactivate_locked_super(sb);
2207 	return 0;
2208 
2209 out_unlock:
2210 	super_unlock_excl(sb);
2211 	return error;
2212 }
2213 
2214 /**
2215  * thaw_super -- unlock filesystem
2216  * @sb: the super to thaw
2217  * @who: context that wants to freeze
2218  * @freeze_owner: owner of the freeze
2219  *
2220  * Unlocks the filesystem and marks it writeable again after freeze_super()
2221  * if there are no remaining freezes on the filesystem.
2222  *
2223  * @who should be:
2224  * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2225  * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2226  * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2227  *
2228  * A filesystem may hold multiple devices and thus a filesystems may
2229  * have been frozen through the block layer via multiple block devices.
2230  * The filesystem remains frozen until all block devices are unfrozen.
2231  */
thaw_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2232 int thaw_super(struct super_block *sb, enum freeze_holder who,
2233 	       const void *freeze_owner)
2234 {
2235 	if (!super_lock_excl(sb)) {
2236 		WARN_ON_ONCE("Dying superblock while thawing!");
2237 		return -EINVAL;
2238 	}
2239 	return thaw_super_locked(sb, who, freeze_owner);
2240 }
2241 EXPORT_SYMBOL(thaw_super);
2242 
2243 /*
2244  * Create workqueue for deferred direct IO completions. We allocate the
2245  * workqueue when it's first needed. This avoids creating workqueue for
2246  * filesystems that don't need it and also allows us to create the workqueue
2247  * late enough so the we can include s_id in the name of the workqueue.
2248  */
sb_init_dio_done_wq(struct super_block * sb)2249 int sb_init_dio_done_wq(struct super_block *sb)
2250 {
2251 	struct workqueue_struct *old;
2252 	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2253 						      WQ_MEM_RECLAIM | WQ_PERCPU,
2254 						      0,
2255 						      sb->s_id);
2256 	if (!wq)
2257 		return -ENOMEM;
2258 
2259 	old = NULL;
2260 	/*
2261 	 * This has to be atomic as more DIOs can race to create the workqueue
2262 	 */
2263 	if (!try_cmpxchg(&sb->s_dio_done_wq, &old, wq)) {
2264 		/* Someone created workqueue before us? Free ours... */
2265 		destroy_workqueue(wq);
2266 	}
2267 	return 0;
2268 }
2269 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
2270