1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/super.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * super.c contains code to handle: - mount structures
8 * - super-block tables
9 * - filesystem drivers list
10 * - mount system call
11 * - umount system call
12 * - ustat system call
13 *
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 *
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22 */
23
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
40 #include "internal.h"
41
42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
43 const void *freeze_owner);
44
45 static LIST_HEAD(super_blocks);
46 static DEFINE_SPINLOCK(sb_lock);
47
48 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
49 "sb_writers",
50 "sb_pagefaults",
51 "sb_internal",
52 };
53
__super_lock(struct super_block * sb,bool excl)54 static inline void __super_lock(struct super_block *sb, bool excl)
55 {
56 if (excl)
57 down_write(&sb->s_umount);
58 else
59 down_read(&sb->s_umount);
60 }
61
super_unlock(struct super_block * sb,bool excl)62 static inline void super_unlock(struct super_block *sb, bool excl)
63 {
64 if (excl)
65 up_write(&sb->s_umount);
66 else
67 up_read(&sb->s_umount);
68 }
69
__super_lock_excl(struct super_block * sb)70 static inline void __super_lock_excl(struct super_block *sb)
71 {
72 __super_lock(sb, true);
73 }
74
super_unlock_excl(struct super_block * sb)75 static inline void super_unlock_excl(struct super_block *sb)
76 {
77 super_unlock(sb, true);
78 }
79
super_unlock_shared(struct super_block * sb)80 static inline void super_unlock_shared(struct super_block *sb)
81 {
82 super_unlock(sb, false);
83 }
84
super_flags(const struct super_block * sb,unsigned int flags)85 static bool super_flags(const struct super_block *sb, unsigned int flags)
86 {
87 /*
88 * Pairs with smp_store_release() in super_wake() and ensures
89 * that we see @flags after we're woken.
90 */
91 return smp_load_acquire(&sb->s_flags) & flags;
92 }
93
94 /**
95 * super_lock - wait for superblock to become ready and lock it
96 * @sb: superblock to wait for
97 * @excl: whether exclusive access is required
98 *
99 * If the superblock has neither passed through vfs_get_tree() or
100 * generic_shutdown_super() yet wait for it to happen. Either superblock
101 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
102 * woken and we'll see SB_DYING.
103 *
104 * The caller must have acquired a temporary reference on @sb->s_count.
105 *
106 * Return: The function returns true if SB_BORN was set and with
107 * s_umount held. The function returns false if SB_DYING was
108 * set and without s_umount held.
109 */
super_lock(struct super_block * sb,bool excl)110 static __must_check bool super_lock(struct super_block *sb, bool excl)
111 {
112 lockdep_assert_not_held(&sb->s_umount);
113
114 /* wait until the superblock is ready or dying */
115 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
116
117 /* Don't pointlessly acquire s_umount. */
118 if (super_flags(sb, SB_DYING))
119 return false;
120
121 __super_lock(sb, excl);
122
123 /*
124 * Has gone through generic_shutdown_super() in the meantime.
125 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
126 * grab a reference to this. Tell them so.
127 */
128 if (sb->s_flags & SB_DYING) {
129 super_unlock(sb, excl);
130 return false;
131 }
132
133 WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
134 return true;
135 }
136
137 /* wait and try to acquire read-side of @sb->s_umount */
super_lock_shared(struct super_block * sb)138 static inline bool super_lock_shared(struct super_block *sb)
139 {
140 return super_lock(sb, false);
141 }
142
143 /* wait and try to acquire write-side of @sb->s_umount */
super_lock_excl(struct super_block * sb)144 static inline bool super_lock_excl(struct super_block *sb)
145 {
146 return super_lock(sb, true);
147 }
148
149 /* wake waiters */
150 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
super_wake(struct super_block * sb,unsigned int flag)151 static void super_wake(struct super_block *sb, unsigned int flag)
152 {
153 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
154 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
155
156 /*
157 * Pairs with smp_load_acquire() in super_lock() to make sure
158 * all initializations in the superblock are seen by the user
159 * seeing SB_BORN sent.
160 */
161 smp_store_release(&sb->s_flags, sb->s_flags | flag);
162 /*
163 * Pairs with the barrier in prepare_to_wait_event() to make sure
164 * ___wait_var_event() either sees SB_BORN set or
165 * waitqueue_active() check in wake_up_var() sees the waiter.
166 */
167 smp_mb();
168 wake_up_var(&sb->s_flags);
169 }
170
171 /*
172 * One thing we have to be careful of with a per-sb shrinker is that we don't
173 * drop the last active reference to the superblock from within the shrinker.
174 * If that happens we could trigger unregistering the shrinker from within the
175 * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
176 * take a passive reference to the superblock to avoid this from occurring.
177 */
super_cache_scan(struct shrinker * shrink,struct shrink_control * sc)178 static unsigned long super_cache_scan(struct shrinker *shrink,
179 struct shrink_control *sc)
180 {
181 struct super_block *sb;
182 long fs_objects = 0;
183 long total_objects;
184 long freed = 0;
185 long dentries;
186 long inodes;
187
188 sb = shrink->private_data;
189
190 /*
191 * Deadlock avoidance. We may hold various FS locks, and we don't want
192 * to recurse into the FS that called us in clear_inode() and friends..
193 */
194 if (!(sc->gfp_mask & __GFP_FS))
195 return SHRINK_STOP;
196
197 if (!super_trylock_shared(sb))
198 return SHRINK_STOP;
199
200 if (sb->s_op->nr_cached_objects)
201 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
202
203 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
204 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
205 total_objects = dentries + inodes + fs_objects;
206 if (!total_objects)
207 total_objects = 1;
208
209 /* proportion the scan between the caches */
210 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
211 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
212 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
213
214 /*
215 * prune the dcache first as the icache is pinned by it, then
216 * prune the icache, followed by the filesystem specific caches
217 *
218 * Ensure that we always scan at least one object - memcg kmem
219 * accounting uses this to fully empty the caches.
220 */
221 sc->nr_to_scan = dentries + 1;
222 freed = prune_dcache_sb(sb, sc);
223 sc->nr_to_scan = inodes + 1;
224 freed += prune_icache_sb(sb, sc);
225
226 if (fs_objects) {
227 sc->nr_to_scan = fs_objects + 1;
228 freed += sb->s_op->free_cached_objects(sb, sc);
229 }
230
231 super_unlock_shared(sb);
232 return freed;
233 }
234
super_cache_count(struct shrinker * shrink,struct shrink_control * sc)235 static unsigned long super_cache_count(struct shrinker *shrink,
236 struct shrink_control *sc)
237 {
238 struct super_block *sb;
239 long total_objects = 0;
240
241 sb = shrink->private_data;
242
243 /*
244 * We don't call super_trylock_shared() here as it is a scalability
245 * bottleneck, so we're exposed to partial setup state. The shrinker
246 * rwsem does not protect filesystem operations backing
247 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
248 * change between super_cache_count and super_cache_scan, so we really
249 * don't need locks here.
250 *
251 * However, if we are currently mounting the superblock, the underlying
252 * filesystem might be in a state of partial construction and hence it
253 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check
254 * to avoid this situation, so do the same here. The memory barrier is
255 * matched with the one in mount_fs() as we don't hold locks here.
256 */
257 if (!(sb->s_flags & SB_BORN))
258 return 0;
259 smp_rmb();
260
261 if (sb->s_op && sb->s_op->nr_cached_objects)
262 total_objects = sb->s_op->nr_cached_objects(sb, sc);
263
264 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
265 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
266
267 if (!total_objects)
268 return SHRINK_EMPTY;
269
270 total_objects = vfs_pressure_ratio(total_objects);
271 return total_objects;
272 }
273
destroy_super_work(struct work_struct * work)274 static void destroy_super_work(struct work_struct *work)
275 {
276 struct super_block *s = container_of(work, struct super_block,
277 destroy_work);
278 fsnotify_sb_free(s);
279 security_sb_free(s);
280 put_user_ns(s->s_user_ns);
281 kfree(s->s_subtype);
282 for (int i = 0; i < SB_FREEZE_LEVELS; i++)
283 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
284 kfree(s);
285 }
286
destroy_super_rcu(struct rcu_head * head)287 static void destroy_super_rcu(struct rcu_head *head)
288 {
289 struct super_block *s = container_of(head, struct super_block, rcu);
290 INIT_WORK(&s->destroy_work, destroy_super_work);
291 schedule_work(&s->destroy_work);
292 }
293
294 /* Free a superblock that has never been seen by anyone */
destroy_unused_super(struct super_block * s)295 static void destroy_unused_super(struct super_block *s)
296 {
297 if (!s)
298 return;
299 super_unlock_excl(s);
300 list_lru_destroy(&s->s_dentry_lru);
301 list_lru_destroy(&s->s_inode_lru);
302 shrinker_free(s->s_shrink);
303 /* no delays needed */
304 destroy_super_work(&s->destroy_work);
305 }
306
307 /**
308 * alloc_super - create new superblock
309 * @type: filesystem type superblock should belong to
310 * @flags: the mount flags
311 * @user_ns: User namespace for the super_block
312 *
313 * Allocates and initializes a new &struct super_block. alloc_super()
314 * returns a pointer new superblock or %NULL if allocation had failed.
315 */
alloc_super(struct file_system_type * type,int flags,struct user_namespace * user_ns)316 static struct super_block *alloc_super(struct file_system_type *type, int flags,
317 struct user_namespace *user_ns)
318 {
319 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
320 static const struct super_operations default_op;
321 int i;
322
323 if (!s)
324 return NULL;
325
326 s->s_user_ns = get_user_ns(user_ns);
327 init_rwsem(&s->s_umount);
328 lockdep_set_class(&s->s_umount, &type->s_umount_key);
329 /*
330 * sget() can have s_umount recursion.
331 *
332 * When it cannot find a suitable sb, it allocates a new
333 * one (this one), and tries again to find a suitable old
334 * one.
335 *
336 * In case that succeeds, it will acquire the s_umount
337 * lock of the old one. Since these are clearly distrinct
338 * locks, and this object isn't exposed yet, there's no
339 * risk of deadlocks.
340 *
341 * Annotate this by putting this lock in a different
342 * subclass.
343 */
344 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
345
346 if (security_sb_alloc(s))
347 goto fail;
348
349 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
350 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
351 sb_writers_name[i],
352 &type->s_writers_key[i]))
353 goto fail;
354 }
355 s->s_bdi = &noop_backing_dev_info;
356 s->s_flags = flags;
357 if (s->s_user_ns != &init_user_ns)
358 s->s_iflags |= SB_I_NODEV;
359 INIT_HLIST_NODE(&s->s_instances);
360 INIT_HLIST_BL_HEAD(&s->s_roots);
361 mutex_init(&s->s_sync_lock);
362 INIT_LIST_HEAD(&s->s_inodes);
363 spin_lock_init(&s->s_inode_list_lock);
364 INIT_LIST_HEAD(&s->s_inodes_wb);
365 spin_lock_init(&s->s_inode_wblist_lock);
366
367 s->s_count = 1;
368 atomic_set(&s->s_active, 1);
369 mutex_init(&s->s_vfs_rename_mutex);
370 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
371 init_rwsem(&s->s_dquot.dqio_sem);
372 s->s_maxbytes = MAX_NON_LFS;
373 s->s_op = &default_op;
374 s->s_time_gran = 1000000000;
375 s->s_time_min = TIME64_MIN;
376 s->s_time_max = TIME64_MAX;
377
378 s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
379 "sb-%s", type->name);
380 if (!s->s_shrink)
381 goto fail;
382
383 s->s_shrink->scan_objects = super_cache_scan;
384 s->s_shrink->count_objects = super_cache_count;
385 s->s_shrink->batch = 1024;
386 s->s_shrink->private_data = s;
387
388 if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
389 goto fail;
390 if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
391 goto fail;
392 return s;
393
394 fail:
395 destroy_unused_super(s);
396 return NULL;
397 }
398
399 /* Superblock refcounting */
400
401 /*
402 * Drop a superblock's refcount. The caller must hold sb_lock.
403 */
__put_super(struct super_block * s)404 static void __put_super(struct super_block *s)
405 {
406 if (!--s->s_count) {
407 list_del_init(&s->s_list);
408 WARN_ON(s->s_dentry_lru.node);
409 WARN_ON(s->s_inode_lru.node);
410 WARN_ON(s->s_mounts);
411 call_rcu(&s->rcu, destroy_super_rcu);
412 }
413 }
414
415 /**
416 * put_super - drop a temporary reference to superblock
417 * @sb: superblock in question
418 *
419 * Drops a temporary reference, frees superblock if there's no
420 * references left.
421 */
put_super(struct super_block * sb)422 void put_super(struct super_block *sb)
423 {
424 spin_lock(&sb_lock);
425 __put_super(sb);
426 spin_unlock(&sb_lock);
427 }
428
kill_super_notify(struct super_block * sb)429 static void kill_super_notify(struct super_block *sb)
430 {
431 lockdep_assert_not_held(&sb->s_umount);
432
433 /* already notified earlier */
434 if (sb->s_flags & SB_DEAD)
435 return;
436
437 /*
438 * Remove it from @fs_supers so it isn't found by new
439 * sget{_fc}() walkers anymore. Any concurrent mounter still
440 * managing to grab a temporary reference is guaranteed to
441 * already see SB_DYING and will wait until we notify them about
442 * SB_DEAD.
443 */
444 spin_lock(&sb_lock);
445 hlist_del_init(&sb->s_instances);
446 spin_unlock(&sb_lock);
447
448 /*
449 * Let concurrent mounts know that this thing is really dead.
450 * We don't need @sb->s_umount here as every concurrent caller
451 * will see SB_DYING and either discard the superblock or wait
452 * for SB_DEAD.
453 */
454 super_wake(sb, SB_DEAD);
455 }
456
457 /**
458 * deactivate_locked_super - drop an active reference to superblock
459 * @s: superblock to deactivate
460 *
461 * Drops an active reference to superblock, converting it into a temporary
462 * one if there is no other active references left. In that case we
463 * tell fs driver to shut it down and drop the temporary reference we
464 * had just acquired.
465 *
466 * Caller holds exclusive lock on superblock; that lock is released.
467 */
deactivate_locked_super(struct super_block * s)468 void deactivate_locked_super(struct super_block *s)
469 {
470 struct file_system_type *fs = s->s_type;
471 if (atomic_dec_and_test(&s->s_active)) {
472 shrinker_free(s->s_shrink);
473 fs->kill_sb(s);
474
475 kill_super_notify(s);
476
477 /*
478 * Since list_lru_destroy() may sleep, we cannot call it from
479 * put_super(), where we hold the sb_lock. Therefore we destroy
480 * the lru lists right now.
481 */
482 list_lru_destroy(&s->s_dentry_lru);
483 list_lru_destroy(&s->s_inode_lru);
484
485 put_filesystem(fs);
486 put_super(s);
487 } else {
488 super_unlock_excl(s);
489 }
490 }
491
492 EXPORT_SYMBOL(deactivate_locked_super);
493
494 /**
495 * deactivate_super - drop an active reference to superblock
496 * @s: superblock to deactivate
497 *
498 * Variant of deactivate_locked_super(), except that superblock is *not*
499 * locked by caller. If we are going to drop the final active reference,
500 * lock will be acquired prior to that.
501 */
deactivate_super(struct super_block * s)502 void deactivate_super(struct super_block *s)
503 {
504 if (!atomic_add_unless(&s->s_active, -1, 1)) {
505 __super_lock_excl(s);
506 deactivate_locked_super(s);
507 }
508 }
509
510 EXPORT_SYMBOL(deactivate_super);
511
512 /**
513 * grab_super - acquire an active reference to a superblock
514 * @sb: superblock to acquire
515 *
516 * Acquire a temporary reference on a superblock and try to trade it for
517 * an active reference. This is used in sget{_fc}() to wait for a
518 * superblock to either become SB_BORN or for it to pass through
519 * sb->kill() and be marked as SB_DEAD.
520 *
521 * Return: This returns true if an active reference could be acquired,
522 * false if not.
523 */
grab_super(struct super_block * sb)524 static bool grab_super(struct super_block *sb)
525 {
526 bool locked;
527
528 sb->s_count++;
529 spin_unlock(&sb_lock);
530 locked = super_lock_excl(sb);
531 if (locked) {
532 if (atomic_inc_not_zero(&sb->s_active)) {
533 put_super(sb);
534 return true;
535 }
536 super_unlock_excl(sb);
537 }
538 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
539 put_super(sb);
540 return false;
541 }
542
543 /*
544 * super_trylock_shared - try to grab ->s_umount shared
545 * @sb: reference we are trying to grab
546 *
547 * Try to prevent fs shutdown. This is used in places where we
548 * cannot take an active reference but we need to ensure that the
549 * filesystem is not shut down while we are working on it. It returns
550 * false if we cannot acquire s_umount or if we lose the race and
551 * filesystem already got into shutdown, and returns true with the s_umount
552 * lock held in read mode in case of success. On successful return,
553 * the caller must drop the s_umount lock when done.
554 *
555 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
556 * The reason why it's safe is that we are OK with doing trylock instead
557 * of down_read(). There's a couple of places that are OK with that, but
558 * it's very much not a general-purpose interface.
559 */
super_trylock_shared(struct super_block * sb)560 bool super_trylock_shared(struct super_block *sb)
561 {
562 if (down_read_trylock(&sb->s_umount)) {
563 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
564 (sb->s_flags & SB_BORN))
565 return true;
566 super_unlock_shared(sb);
567 }
568
569 return false;
570 }
571
572 /**
573 * retire_super - prevents superblock from being reused
574 * @sb: superblock to retire
575 *
576 * The function marks superblock to be ignored in superblock test, which
577 * prevents it from being reused for any new mounts. If the superblock has
578 * a private bdi, it also unregisters it, but doesn't reduce the refcount
579 * of the superblock to prevent potential races. The refcount is reduced
580 * by generic_shutdown_super(). The function can not be called
581 * concurrently with generic_shutdown_super(). It is safe to call the
582 * function multiple times, subsequent calls have no effect.
583 *
584 * The marker will affect the re-use only for block-device-based
585 * superblocks. Other superblocks will still get marked if this function
586 * is used, but that will not affect their reusability.
587 */
retire_super(struct super_block * sb)588 void retire_super(struct super_block *sb)
589 {
590 WARN_ON(!sb->s_bdev);
591 __super_lock_excl(sb);
592 if (sb->s_iflags & SB_I_PERSB_BDI) {
593 bdi_unregister(sb->s_bdi);
594 sb->s_iflags &= ~SB_I_PERSB_BDI;
595 }
596 sb->s_iflags |= SB_I_RETIRED;
597 super_unlock_excl(sb);
598 }
599 EXPORT_SYMBOL(retire_super);
600
601 /**
602 * generic_shutdown_super - common helper for ->kill_sb()
603 * @sb: superblock to kill
604 *
605 * generic_shutdown_super() does all fs-independent work on superblock
606 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
607 * that need destruction out of superblock, call generic_shutdown_super()
608 * and release aforementioned objects. Note: dentries and inodes _are_
609 * taken care of and do not need specific handling.
610 *
611 * Upon calling this function, the filesystem may no longer alter or
612 * rearrange the set of dentries belonging to this super_block, nor may it
613 * change the attachments of dentries to inodes.
614 */
generic_shutdown_super(struct super_block * sb)615 void generic_shutdown_super(struct super_block *sb)
616 {
617 const struct super_operations *sop = sb->s_op;
618
619 if (sb->s_root) {
620 shrink_dcache_for_umount(sb);
621 sync_filesystem(sb);
622 sb->s_flags &= ~SB_ACTIVE;
623
624 cgroup_writeback_umount(sb);
625
626 /* Evict all inodes with zero refcount. */
627 evict_inodes(sb);
628
629 /*
630 * Clean up and evict any inodes that still have references due
631 * to fsnotify or the security policy.
632 */
633 fsnotify_sb_delete(sb);
634 security_sb_delete(sb);
635
636 if (sb->s_dio_done_wq) {
637 destroy_workqueue(sb->s_dio_done_wq);
638 sb->s_dio_done_wq = NULL;
639 }
640
641 if (sop->put_super)
642 sop->put_super(sb);
643
644 /*
645 * Now that all potentially-encrypted inodes have been evicted,
646 * the fscrypt keyring can be destroyed.
647 */
648 fscrypt_destroy_keyring(sb);
649
650 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL,
651 "VFS: Busy inodes after unmount of %s (%s)",
652 sb->s_id, sb->s_type->name)) {
653 /*
654 * Adding a proper bailout path here would be hard, but
655 * we can at least make it more likely that a later
656 * iput_final() or such crashes cleanly.
657 */
658 struct inode *inode;
659
660 spin_lock(&sb->s_inode_list_lock);
661 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
662 inode->i_op = VFS_PTR_POISON;
663 inode->i_sb = VFS_PTR_POISON;
664 inode->i_mapping = VFS_PTR_POISON;
665 }
666 spin_unlock(&sb->s_inode_list_lock);
667 }
668 }
669 /*
670 * Broadcast to everyone that grabbed a temporary reference to this
671 * superblock before we removed it from @fs_supers that the superblock
672 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
673 * discard this superblock and treat it as dead.
674 *
675 * We leave the superblock on @fs_supers so it can be found by
676 * sget{_fc}() until we passed sb->kill_sb().
677 */
678 super_wake(sb, SB_DYING);
679 super_unlock_excl(sb);
680 if (sb->s_bdi != &noop_backing_dev_info) {
681 if (sb->s_iflags & SB_I_PERSB_BDI)
682 bdi_unregister(sb->s_bdi);
683 bdi_put(sb->s_bdi);
684 sb->s_bdi = &noop_backing_dev_info;
685 }
686 }
687
688 EXPORT_SYMBOL(generic_shutdown_super);
689
mount_capable(struct fs_context * fc)690 bool mount_capable(struct fs_context *fc)
691 {
692 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
693 return capable(CAP_SYS_ADMIN);
694 else
695 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
696 }
697
698 /**
699 * sget_fc - Find or create a superblock
700 * @fc: Filesystem context.
701 * @test: Comparison callback
702 * @set: Setup callback
703 *
704 * Create a new superblock or find an existing one.
705 *
706 * The @test callback is used to find a matching existing superblock.
707 * Whether or not the requested parameters in @fc are taken into account
708 * is specific to the @test callback that is used. They may even be
709 * completely ignored.
710 *
711 * If an extant superblock is matched, it will be returned unless:
712 *
713 * (1) the namespace the filesystem context @fc and the extant
714 * superblock's namespace differ
715 *
716 * (2) the filesystem context @fc has requested that reusing an extant
717 * superblock is not allowed
718 *
719 * In both cases EBUSY will be returned.
720 *
721 * If no match is made, a new superblock will be allocated and basic
722 * initialisation will be performed (s_type, s_fs_info and s_id will be
723 * set and the @set callback will be invoked), the superblock will be
724 * published and it will be returned in a partially constructed state
725 * with SB_BORN and SB_ACTIVE as yet unset.
726 *
727 * Return: On success, an extant or newly created superblock is
728 * returned. On failure an error pointer is returned.
729 */
sget_fc(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* set)(struct super_block *,struct fs_context *))730 struct super_block *sget_fc(struct fs_context *fc,
731 int (*test)(struct super_block *, struct fs_context *),
732 int (*set)(struct super_block *, struct fs_context *))
733 {
734 struct super_block *s = NULL;
735 struct super_block *old;
736 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
737 int err;
738
739 /*
740 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
741 * not set, as the filesystem is likely unprepared to handle it.
742 * This can happen when fsconfig() is called from init_user_ns with
743 * an fs_fd opened in another user namespace.
744 */
745 if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
746 errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
747 return ERR_PTR(-EPERM);
748 }
749
750 retry:
751 spin_lock(&sb_lock);
752 if (test) {
753 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
754 if (test(old, fc))
755 goto share_extant_sb;
756 }
757 }
758 if (!s) {
759 spin_unlock(&sb_lock);
760 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
761 if (!s)
762 return ERR_PTR(-ENOMEM);
763 goto retry;
764 }
765
766 s->s_fs_info = fc->s_fs_info;
767 err = set(s, fc);
768 if (err) {
769 s->s_fs_info = NULL;
770 spin_unlock(&sb_lock);
771 destroy_unused_super(s);
772 return ERR_PTR(err);
773 }
774 fc->s_fs_info = NULL;
775 s->s_type = fc->fs_type;
776 s->s_iflags |= fc->s_iflags;
777 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
778 /*
779 * Make the superblock visible on @super_blocks and @fs_supers.
780 * It's in a nascent state and users should wait on SB_BORN or
781 * SB_DYING to be set.
782 */
783 list_add_tail(&s->s_list, &super_blocks);
784 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
785 spin_unlock(&sb_lock);
786 get_filesystem(s->s_type);
787 shrinker_register(s->s_shrink);
788 return s;
789
790 share_extant_sb:
791 if (user_ns != old->s_user_ns || fc->exclusive) {
792 spin_unlock(&sb_lock);
793 destroy_unused_super(s);
794 if (fc->exclusive)
795 warnfc(fc, "reusing existing filesystem not allowed");
796 else
797 warnfc(fc, "reusing existing filesystem in another namespace not allowed");
798 return ERR_PTR(-EBUSY);
799 }
800 if (!grab_super(old))
801 goto retry;
802 destroy_unused_super(s);
803 return old;
804 }
805 EXPORT_SYMBOL(sget_fc);
806
807 /**
808 * sget - find or create a superblock
809 * @type: filesystem type superblock should belong to
810 * @test: comparison callback
811 * @set: setup callback
812 * @flags: mount flags
813 * @data: argument to each of them
814 */
sget(struct file_system_type * type,int (* test)(struct super_block *,void *),int (* set)(struct super_block *,void *),int flags,void * data)815 struct super_block *sget(struct file_system_type *type,
816 int (*test)(struct super_block *,void *),
817 int (*set)(struct super_block *,void *),
818 int flags,
819 void *data)
820 {
821 struct user_namespace *user_ns = current_user_ns();
822 struct super_block *s = NULL;
823 struct super_block *old;
824 int err;
825
826 retry:
827 spin_lock(&sb_lock);
828 if (test) {
829 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
830 if (!test(old, data))
831 continue;
832 if (user_ns != old->s_user_ns) {
833 spin_unlock(&sb_lock);
834 destroy_unused_super(s);
835 return ERR_PTR(-EBUSY);
836 }
837 if (!grab_super(old))
838 goto retry;
839 destroy_unused_super(s);
840 return old;
841 }
842 }
843 if (!s) {
844 spin_unlock(&sb_lock);
845 s = alloc_super(type, flags, user_ns);
846 if (!s)
847 return ERR_PTR(-ENOMEM);
848 goto retry;
849 }
850
851 err = set(s, data);
852 if (err) {
853 spin_unlock(&sb_lock);
854 destroy_unused_super(s);
855 return ERR_PTR(err);
856 }
857 s->s_type = type;
858 strscpy(s->s_id, type->name, sizeof(s->s_id));
859 list_add_tail(&s->s_list, &super_blocks);
860 hlist_add_head(&s->s_instances, &type->fs_supers);
861 spin_unlock(&sb_lock);
862 get_filesystem(type);
863 shrinker_register(s->s_shrink);
864 return s;
865 }
866 EXPORT_SYMBOL(sget);
867
drop_super(struct super_block * sb)868 void drop_super(struct super_block *sb)
869 {
870 super_unlock_shared(sb);
871 put_super(sb);
872 }
873
874 EXPORT_SYMBOL(drop_super);
875
drop_super_exclusive(struct super_block * sb)876 void drop_super_exclusive(struct super_block *sb)
877 {
878 super_unlock_excl(sb);
879 put_super(sb);
880 }
881 EXPORT_SYMBOL(drop_super_exclusive);
882
883 enum super_iter_flags_t {
884 SUPER_ITER_EXCL = (1U << 0),
885 SUPER_ITER_UNLOCKED = (1U << 1),
886 SUPER_ITER_REVERSE = (1U << 2),
887 };
888
first_super(enum super_iter_flags_t flags)889 static inline struct super_block *first_super(enum super_iter_flags_t flags)
890 {
891 if (flags & SUPER_ITER_REVERSE)
892 return list_last_entry(&super_blocks, struct super_block, s_list);
893 return list_first_entry(&super_blocks, struct super_block, s_list);
894 }
895
next_super(struct super_block * sb,enum super_iter_flags_t flags)896 static inline struct super_block *next_super(struct super_block *sb,
897 enum super_iter_flags_t flags)
898 {
899 if (flags & SUPER_ITER_REVERSE)
900 return list_prev_entry(sb, s_list);
901 return list_next_entry(sb, s_list);
902 }
903
__iterate_supers(void (* f)(struct super_block *,void *),void * arg,enum super_iter_flags_t flags)904 static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg,
905 enum super_iter_flags_t flags)
906 {
907 struct super_block *sb, *p = NULL;
908 bool excl = flags & SUPER_ITER_EXCL;
909
910 guard(spinlock)(&sb_lock);
911
912 for (sb = first_super(flags);
913 !list_entry_is_head(sb, &super_blocks, s_list);
914 sb = next_super(sb, flags)) {
915 if (super_flags(sb, SB_DYING))
916 continue;
917 sb->s_count++;
918 spin_unlock(&sb_lock);
919
920 if (flags & SUPER_ITER_UNLOCKED) {
921 f(sb, arg);
922 } else if (super_lock(sb, excl)) {
923 f(sb, arg);
924 super_unlock(sb, excl);
925 }
926
927 spin_lock(&sb_lock);
928 if (p)
929 __put_super(p);
930 p = sb;
931 }
932 if (p)
933 __put_super(p);
934 }
935
iterate_supers(void (* f)(struct super_block *,void *),void * arg)936 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
937 {
938 __iterate_supers(f, arg, 0);
939 }
940
941 /**
942 * iterate_supers_type - call function for superblocks of given type
943 * @type: fs type
944 * @f: function to call
945 * @arg: argument to pass to it
946 *
947 * Scans the superblock list and calls given function, passing it
948 * locked superblock and given argument.
949 */
iterate_supers_type(struct file_system_type * type,void (* f)(struct super_block *,void *),void * arg)950 void iterate_supers_type(struct file_system_type *type,
951 void (*f)(struct super_block *, void *), void *arg)
952 {
953 struct super_block *sb, *p = NULL;
954
955 spin_lock(&sb_lock);
956 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
957 bool locked;
958
959 if (super_flags(sb, SB_DYING))
960 continue;
961
962 sb->s_count++;
963 spin_unlock(&sb_lock);
964
965 locked = super_lock_shared(sb);
966 if (locked) {
967 f(sb, arg);
968 super_unlock_shared(sb);
969 }
970
971 spin_lock(&sb_lock);
972 if (p)
973 __put_super(p);
974 p = sb;
975 }
976 if (p)
977 __put_super(p);
978 spin_unlock(&sb_lock);
979 }
980
981 EXPORT_SYMBOL(iterate_supers_type);
982
user_get_super(dev_t dev,bool excl)983 struct super_block *user_get_super(dev_t dev, bool excl)
984 {
985 struct super_block *sb;
986
987 spin_lock(&sb_lock);
988 list_for_each_entry(sb, &super_blocks, s_list) {
989 bool locked;
990
991 if (sb->s_dev != dev)
992 continue;
993
994 sb->s_count++;
995 spin_unlock(&sb_lock);
996
997 locked = super_lock(sb, excl);
998 if (locked)
999 return sb;
1000
1001 spin_lock(&sb_lock);
1002 __put_super(sb);
1003 break;
1004 }
1005 spin_unlock(&sb_lock);
1006 return NULL;
1007 }
1008
1009 /**
1010 * reconfigure_super - asks filesystem to change superblock parameters
1011 * @fc: The superblock and configuration
1012 *
1013 * Alters the configuration parameters of a live superblock.
1014 */
reconfigure_super(struct fs_context * fc)1015 int reconfigure_super(struct fs_context *fc)
1016 {
1017 struct super_block *sb = fc->root->d_sb;
1018 int retval;
1019 bool remount_ro = false;
1020 bool remount_rw = false;
1021 bool force = fc->sb_flags & SB_FORCE;
1022
1023 if (fc->sb_flags_mask & ~MS_RMT_MASK)
1024 return -EINVAL;
1025 if (sb->s_writers.frozen != SB_UNFROZEN)
1026 return -EBUSY;
1027
1028 retval = security_sb_remount(sb, fc->security);
1029 if (retval)
1030 return retval;
1031
1032 if (fc->sb_flags_mask & SB_RDONLY) {
1033 #ifdef CONFIG_BLOCK
1034 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1035 bdev_read_only(sb->s_bdev))
1036 return -EACCES;
1037 #endif
1038 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1039 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1040 }
1041
1042 if (remount_ro) {
1043 if (!hlist_empty(&sb->s_pins)) {
1044 super_unlock_excl(sb);
1045 group_pin_kill(&sb->s_pins);
1046 __super_lock_excl(sb);
1047 if (!sb->s_root)
1048 return 0;
1049 if (sb->s_writers.frozen != SB_UNFROZEN)
1050 return -EBUSY;
1051 remount_ro = !sb_rdonly(sb);
1052 }
1053 }
1054 shrink_dcache_sb(sb);
1055
1056 /* If we are reconfiguring to RDONLY and current sb is read/write,
1057 * make sure there are no files open for writing.
1058 */
1059 if (remount_ro) {
1060 if (force) {
1061 sb_start_ro_state_change(sb);
1062 } else {
1063 retval = sb_prepare_remount_readonly(sb);
1064 if (retval)
1065 return retval;
1066 }
1067 } else if (remount_rw) {
1068 /*
1069 * Protect filesystem's reconfigure code from writes from
1070 * userspace until reconfigure finishes.
1071 */
1072 sb_start_ro_state_change(sb);
1073 }
1074
1075 if (fc->ops->reconfigure) {
1076 retval = fc->ops->reconfigure(fc);
1077 if (retval) {
1078 if (!force)
1079 goto cancel_readonly;
1080 /* If forced remount, go ahead despite any errors */
1081 WARN(1, "forced remount of a %s fs returned %i\n",
1082 sb->s_type->name, retval);
1083 }
1084 }
1085
1086 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1087 (fc->sb_flags & fc->sb_flags_mask)));
1088 sb_end_ro_state_change(sb);
1089
1090 /*
1091 * Some filesystems modify their metadata via some other path than the
1092 * bdev buffer cache (eg. use a private mapping, or directories in
1093 * pagecache, etc). Also file data modifications go via their own
1094 * mappings. So If we try to mount readonly then copy the filesystem
1095 * from bdev, we could get stale data, so invalidate it to give a best
1096 * effort at coherency.
1097 */
1098 if (remount_ro && sb->s_bdev)
1099 invalidate_bdev(sb->s_bdev);
1100 return 0;
1101
1102 cancel_readonly:
1103 sb_end_ro_state_change(sb);
1104 return retval;
1105 }
1106
do_emergency_remount_callback(struct super_block * sb,void * unused)1107 static void do_emergency_remount_callback(struct super_block *sb, void *unused)
1108 {
1109 if (sb->s_bdev && !sb_rdonly(sb)) {
1110 struct fs_context *fc;
1111
1112 fc = fs_context_for_reconfigure(sb->s_root,
1113 SB_RDONLY | SB_FORCE, SB_RDONLY);
1114 if (!IS_ERR(fc)) {
1115 if (parse_monolithic_mount_data(fc, NULL) == 0)
1116 (void)reconfigure_super(fc);
1117 put_fs_context(fc);
1118 }
1119 }
1120 }
1121
do_emergency_remount(struct work_struct * work)1122 static void do_emergency_remount(struct work_struct *work)
1123 {
1124 __iterate_supers(do_emergency_remount_callback, NULL,
1125 SUPER_ITER_EXCL | SUPER_ITER_REVERSE);
1126 kfree(work);
1127 printk("Emergency Remount complete\n");
1128 }
1129
emergency_remount(void)1130 void emergency_remount(void)
1131 {
1132 struct work_struct *work;
1133
1134 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1135 if (work) {
1136 INIT_WORK(work, do_emergency_remount);
1137 schedule_work(work);
1138 }
1139 }
1140
do_thaw_all_callback(struct super_block * sb,void * unused)1141 static void do_thaw_all_callback(struct super_block *sb, void *unused)
1142 {
1143 if (IS_ENABLED(CONFIG_BLOCK))
1144 while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1145 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1146 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL);
1147 return;
1148 }
1149
do_thaw_all(struct work_struct * work)1150 static void do_thaw_all(struct work_struct *work)
1151 {
1152 __iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL);
1153 kfree(work);
1154 printk(KERN_WARNING "Emergency Thaw complete\n");
1155 }
1156
1157 /**
1158 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1159 *
1160 * Used for emergency unfreeze of all filesystems via SysRq
1161 */
emergency_thaw_all(void)1162 void emergency_thaw_all(void)
1163 {
1164 struct work_struct *work;
1165
1166 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1167 if (work) {
1168 INIT_WORK(work, do_thaw_all);
1169 schedule_work(work);
1170 }
1171 }
1172
get_active_super(struct super_block * sb)1173 static inline bool get_active_super(struct super_block *sb)
1174 {
1175 bool active = false;
1176
1177 if (super_lock_excl(sb)) {
1178 active = atomic_inc_not_zero(&sb->s_active);
1179 super_unlock_excl(sb);
1180 }
1181 return active;
1182 }
1183
1184 static const char *filesystems_freeze_ptr = "filesystems_freeze";
1185
filesystems_freeze_callback(struct super_block * sb,void * freeze_all_ptr)1186 static void filesystems_freeze_callback(struct super_block *sb, void *freeze_all_ptr)
1187 {
1188 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1189 return;
1190
1191 if (freeze_all_ptr && !(sb->s_type->fs_flags & FS_POWER_FREEZE))
1192 return;
1193
1194 if (!get_active_super(sb))
1195 return;
1196
1197 if (sb->s_op->freeze_super)
1198 sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1199 filesystems_freeze_ptr);
1200 else
1201 freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1202 filesystems_freeze_ptr);
1203
1204 deactivate_super(sb);
1205 }
1206
filesystems_freeze(bool freeze_all)1207 void filesystems_freeze(bool freeze_all)
1208 {
1209 void *freeze_all_ptr = NULL;
1210
1211 if (freeze_all)
1212 freeze_all_ptr = &freeze_all;
1213 __iterate_supers(filesystems_freeze_callback, freeze_all_ptr,
1214 SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE);
1215 }
1216
filesystems_thaw_callback(struct super_block * sb,void * unused)1217 static void filesystems_thaw_callback(struct super_block *sb, void *unused)
1218 {
1219 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1220 return;
1221
1222 if (!get_active_super(sb))
1223 return;
1224
1225 if (sb->s_op->thaw_super)
1226 sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1227 filesystems_freeze_ptr);
1228 else
1229 thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1230 filesystems_freeze_ptr);
1231
1232 deactivate_super(sb);
1233 }
1234
filesystems_thaw(void)1235 void filesystems_thaw(void)
1236 {
1237 __iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED);
1238 }
1239
1240 static DEFINE_IDA(unnamed_dev_ida);
1241
1242 /**
1243 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1244 * @p: Pointer to a dev_t.
1245 *
1246 * Filesystems which don't use real block devices can call this function
1247 * to allocate a virtual block device.
1248 *
1249 * Context: Any context. Frequently called while holding sb_lock.
1250 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1251 * or -ENOMEM if memory allocation failed.
1252 */
get_anon_bdev(dev_t * p)1253 int get_anon_bdev(dev_t *p)
1254 {
1255 int dev;
1256
1257 /*
1258 * Many userspace utilities consider an FSID of 0 invalid.
1259 * Always return at least 1 from get_anon_bdev.
1260 */
1261 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1262 GFP_ATOMIC);
1263 if (dev == -ENOSPC)
1264 dev = -EMFILE;
1265 if (dev < 0)
1266 return dev;
1267
1268 *p = MKDEV(0, dev);
1269 return 0;
1270 }
1271 EXPORT_SYMBOL(get_anon_bdev);
1272
free_anon_bdev(dev_t dev)1273 void free_anon_bdev(dev_t dev)
1274 {
1275 ida_free(&unnamed_dev_ida, MINOR(dev));
1276 }
1277 EXPORT_SYMBOL(free_anon_bdev);
1278
set_anon_super(struct super_block * s,void * data)1279 int set_anon_super(struct super_block *s, void *data)
1280 {
1281 return get_anon_bdev(&s->s_dev);
1282 }
1283 EXPORT_SYMBOL(set_anon_super);
1284
kill_anon_super(struct super_block * sb)1285 void kill_anon_super(struct super_block *sb)
1286 {
1287 dev_t dev = sb->s_dev;
1288 generic_shutdown_super(sb);
1289 kill_super_notify(sb);
1290 free_anon_bdev(dev);
1291 }
1292 EXPORT_SYMBOL(kill_anon_super);
1293
kill_litter_super(struct super_block * sb)1294 void kill_litter_super(struct super_block *sb)
1295 {
1296 if (sb->s_root)
1297 d_genocide(sb->s_root);
1298 kill_anon_super(sb);
1299 }
1300 EXPORT_SYMBOL(kill_litter_super);
1301
set_anon_super_fc(struct super_block * sb,struct fs_context * fc)1302 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1303 {
1304 return set_anon_super(sb, NULL);
1305 }
1306 EXPORT_SYMBOL(set_anon_super_fc);
1307
test_keyed_super(struct super_block * sb,struct fs_context * fc)1308 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1309 {
1310 return sb->s_fs_info == fc->s_fs_info;
1311 }
1312
test_single_super(struct super_block * s,struct fs_context * fc)1313 static int test_single_super(struct super_block *s, struct fs_context *fc)
1314 {
1315 return 1;
1316 }
1317
vfs_get_super(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* fill_super)(struct super_block * sb,struct fs_context * fc))1318 static int vfs_get_super(struct fs_context *fc,
1319 int (*test)(struct super_block *, struct fs_context *),
1320 int (*fill_super)(struct super_block *sb,
1321 struct fs_context *fc))
1322 {
1323 struct super_block *sb;
1324 int err;
1325
1326 sb = sget_fc(fc, test, set_anon_super_fc);
1327 if (IS_ERR(sb))
1328 return PTR_ERR(sb);
1329
1330 if (!sb->s_root) {
1331 err = fill_super(sb, fc);
1332 if (err)
1333 goto error;
1334
1335 sb->s_flags |= SB_ACTIVE;
1336 }
1337
1338 fc->root = dget(sb->s_root);
1339 return 0;
1340
1341 error:
1342 deactivate_locked_super(sb);
1343 return err;
1344 }
1345
get_tree_nodev(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1346 int get_tree_nodev(struct fs_context *fc,
1347 int (*fill_super)(struct super_block *sb,
1348 struct fs_context *fc))
1349 {
1350 return vfs_get_super(fc, NULL, fill_super);
1351 }
1352 EXPORT_SYMBOL(get_tree_nodev);
1353
get_tree_single(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1354 int get_tree_single(struct fs_context *fc,
1355 int (*fill_super)(struct super_block *sb,
1356 struct fs_context *fc))
1357 {
1358 return vfs_get_super(fc, test_single_super, fill_super);
1359 }
1360 EXPORT_SYMBOL(get_tree_single);
1361
get_tree_keyed(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),void * key)1362 int get_tree_keyed(struct fs_context *fc,
1363 int (*fill_super)(struct super_block *sb,
1364 struct fs_context *fc),
1365 void *key)
1366 {
1367 fc->s_fs_info = key;
1368 return vfs_get_super(fc, test_keyed_super, fill_super);
1369 }
1370 EXPORT_SYMBOL(get_tree_keyed);
1371
set_bdev_super(struct super_block * s,void * data)1372 static int set_bdev_super(struct super_block *s, void *data)
1373 {
1374 s->s_dev = *(dev_t *)data;
1375 return 0;
1376 }
1377
super_s_dev_set(struct super_block * s,struct fs_context * fc)1378 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1379 {
1380 return set_bdev_super(s, fc->sget_key);
1381 }
1382
super_s_dev_test(struct super_block * s,struct fs_context * fc)1383 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1384 {
1385 return !(s->s_iflags & SB_I_RETIRED) &&
1386 s->s_dev == *(dev_t *)fc->sget_key;
1387 }
1388
1389 /**
1390 * sget_dev - Find or create a superblock by device number
1391 * @fc: Filesystem context.
1392 * @dev: device number
1393 *
1394 * Find or create a superblock using the provided device number that
1395 * will be stored in fc->sget_key.
1396 *
1397 * If an extant superblock is matched, then that will be returned with
1398 * an elevated reference count that the caller must transfer or discard.
1399 *
1400 * If no match is made, a new superblock will be allocated and basic
1401 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1402 * be set). The superblock will be published and it will be returned in
1403 * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1404 * unset.
1405 *
1406 * Return: an existing or newly created superblock on success, an error
1407 * pointer on failure.
1408 */
sget_dev(struct fs_context * fc,dev_t dev)1409 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1410 {
1411 fc->sget_key = &dev;
1412 return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1413 }
1414 EXPORT_SYMBOL(sget_dev);
1415
1416 #ifdef CONFIG_BLOCK
1417 /*
1418 * Lock the superblock that is holder of the bdev. Returns the superblock
1419 * pointer if we successfully locked the superblock and it is alive. Otherwise
1420 * we return NULL and just unlock bdev->bd_holder_lock.
1421 *
1422 * The function must be called with bdev->bd_holder_lock and releases it.
1423 */
bdev_super_lock(struct block_device * bdev,bool excl)1424 static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
1425 __releases(&bdev->bd_holder_lock)
1426 {
1427 struct super_block *sb = bdev->bd_holder;
1428 bool locked;
1429
1430 lockdep_assert_held(&bdev->bd_holder_lock);
1431 lockdep_assert_not_held(&sb->s_umount);
1432 lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
1433
1434 /* Make sure sb doesn't go away from under us */
1435 spin_lock(&sb_lock);
1436 sb->s_count++;
1437 spin_unlock(&sb_lock);
1438
1439 mutex_unlock(&bdev->bd_holder_lock);
1440
1441 locked = super_lock(sb, excl);
1442
1443 /*
1444 * If the superblock wasn't already SB_DYING then we hold
1445 * s_umount and can safely drop our temporary reference.
1446 */
1447 put_super(sb);
1448
1449 if (!locked)
1450 return NULL;
1451
1452 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1453 super_unlock(sb, excl);
1454 return NULL;
1455 }
1456
1457 return sb;
1458 }
1459
fs_bdev_mark_dead(struct block_device * bdev,bool surprise)1460 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1461 {
1462 struct super_block *sb;
1463
1464 sb = bdev_super_lock(bdev, false);
1465 if (!sb)
1466 return;
1467
1468 if (sb->s_op->remove_bdev) {
1469 int ret;
1470
1471 ret = sb->s_op->remove_bdev(sb, bdev);
1472 if (!ret) {
1473 super_unlock_shared(sb);
1474 return;
1475 }
1476 /* Fallback to shutdown. */
1477 }
1478
1479 if (!surprise)
1480 sync_filesystem(sb);
1481 shrink_dcache_sb(sb);
1482 evict_inodes(sb);
1483 if (sb->s_op->shutdown)
1484 sb->s_op->shutdown(sb);
1485
1486 super_unlock_shared(sb);
1487 }
1488
fs_bdev_sync(struct block_device * bdev)1489 static void fs_bdev_sync(struct block_device *bdev)
1490 {
1491 struct super_block *sb;
1492
1493 sb = bdev_super_lock(bdev, false);
1494 if (!sb)
1495 return;
1496
1497 sync_filesystem(sb);
1498 super_unlock_shared(sb);
1499 }
1500
get_bdev_super(struct block_device * bdev)1501 static struct super_block *get_bdev_super(struct block_device *bdev)
1502 {
1503 bool active = false;
1504 struct super_block *sb;
1505
1506 sb = bdev_super_lock(bdev, true);
1507 if (sb) {
1508 active = atomic_inc_not_zero(&sb->s_active);
1509 super_unlock_excl(sb);
1510 }
1511 if (!active)
1512 return NULL;
1513 return sb;
1514 }
1515
1516 /**
1517 * fs_bdev_freeze - freeze owning filesystem of block device
1518 * @bdev: block device
1519 *
1520 * Freeze the filesystem that owns this block device if it is still
1521 * active.
1522 *
1523 * A filesystem that owns multiple block devices may be frozen from each
1524 * block device and won't be unfrozen until all block devices are
1525 * unfrozen. Each block device can only freeze the filesystem once as we
1526 * nest freezes for block devices in the block layer.
1527 *
1528 * Return: If the freeze was successful zero is returned. If the freeze
1529 * failed a negative error code is returned.
1530 */
fs_bdev_freeze(struct block_device * bdev)1531 static int fs_bdev_freeze(struct block_device *bdev)
1532 {
1533 struct super_block *sb;
1534 int error = 0;
1535
1536 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1537
1538 sb = get_bdev_super(bdev);
1539 if (!sb)
1540 return -EINVAL;
1541
1542 if (sb->s_op->freeze_super)
1543 error = sb->s_op->freeze_super(sb,
1544 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1545 else
1546 error = freeze_super(sb,
1547 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1548 if (!error)
1549 error = sync_blockdev(bdev);
1550 deactivate_super(sb);
1551 return error;
1552 }
1553
1554 /**
1555 * fs_bdev_thaw - thaw owning filesystem of block device
1556 * @bdev: block device
1557 *
1558 * Thaw the filesystem that owns this block device.
1559 *
1560 * A filesystem that owns multiple block devices may be frozen from each
1561 * block device and won't be unfrozen until all block devices are
1562 * unfrozen. Each block device can only freeze the filesystem once as we
1563 * nest freezes for block devices in the block layer.
1564 *
1565 * Return: If the thaw was successful zero is returned. If the thaw
1566 * failed a negative error code is returned. If this function
1567 * returns zero it doesn't mean that the filesystem is unfrozen
1568 * as it may have been frozen multiple times (kernel may hold a
1569 * freeze or might be frozen from other block devices).
1570 */
fs_bdev_thaw(struct block_device * bdev)1571 static int fs_bdev_thaw(struct block_device *bdev)
1572 {
1573 struct super_block *sb;
1574 int error;
1575
1576 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1577
1578 /*
1579 * The block device may have been frozen before it was claimed by a
1580 * filesystem. Concurrently another process might try to mount that
1581 * frozen block device and has temporarily claimed the block device for
1582 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
1583 * mounter is already about to abort mounting because they still saw an
1584 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
1585 * NULL in that case.
1586 */
1587 sb = get_bdev_super(bdev);
1588 if (!sb)
1589 return -EINVAL;
1590
1591 if (sb->s_op->thaw_super)
1592 error = sb->s_op->thaw_super(sb,
1593 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1594 else
1595 error = thaw_super(sb,
1596 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1597 deactivate_super(sb);
1598 return error;
1599 }
1600
1601 const struct blk_holder_ops fs_holder_ops = {
1602 .mark_dead = fs_bdev_mark_dead,
1603 .sync = fs_bdev_sync,
1604 .freeze = fs_bdev_freeze,
1605 .thaw = fs_bdev_thaw,
1606 };
1607 EXPORT_SYMBOL_GPL(fs_holder_ops);
1608
setup_bdev_super(struct super_block * sb,int sb_flags,struct fs_context * fc)1609 int setup_bdev_super(struct super_block *sb, int sb_flags,
1610 struct fs_context *fc)
1611 {
1612 blk_mode_t mode = sb_open_mode(sb_flags);
1613 struct file *bdev_file;
1614 struct block_device *bdev;
1615
1616 bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1617 if (IS_ERR(bdev_file)) {
1618 if (fc)
1619 errorf(fc, "%s: Can't open blockdev", fc->source);
1620 return PTR_ERR(bdev_file);
1621 }
1622 bdev = file_bdev(bdev_file);
1623
1624 /*
1625 * This really should be in blkdev_get_by_dev, but right now can't due
1626 * to legacy issues that require us to allow opening a block device node
1627 * writable from userspace even for a read-only block device.
1628 */
1629 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1630 bdev_fput(bdev_file);
1631 return -EACCES;
1632 }
1633
1634 /*
1635 * It is enough to check bdev was not frozen before we set
1636 * s_bdev as freezing will wait until SB_BORN is set.
1637 */
1638 if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
1639 if (fc)
1640 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1641 bdev_fput(bdev_file);
1642 return -EBUSY;
1643 }
1644 spin_lock(&sb_lock);
1645 sb->s_bdev_file = bdev_file;
1646 sb->s_bdev = bdev;
1647 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1648 if (bdev_stable_writes(bdev))
1649 sb->s_iflags |= SB_I_STABLE_WRITES;
1650 spin_unlock(&sb_lock);
1651
1652 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1653 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1654 sb->s_id);
1655 sb_set_blocksize(sb, block_size(bdev));
1656 return 0;
1657 }
1658 EXPORT_SYMBOL_GPL(setup_bdev_super);
1659
1660 /**
1661 * get_tree_bdev_flags - Get a superblock based on a single block device
1662 * @fc: The filesystem context holding the parameters
1663 * @fill_super: Helper to initialise a new superblock
1664 * @flags: GET_TREE_BDEV_* flags
1665 */
get_tree_bdev_flags(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),unsigned int flags)1666 int get_tree_bdev_flags(struct fs_context *fc,
1667 int (*fill_super)(struct super_block *sb,
1668 struct fs_context *fc), unsigned int flags)
1669 {
1670 struct super_block *s;
1671 int error = 0;
1672 dev_t dev;
1673
1674 if (!fc->source)
1675 return invalf(fc, "No source specified");
1676
1677 error = lookup_bdev(fc->source, &dev);
1678 if (error) {
1679 if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP))
1680 errorf(fc, "%s: Can't lookup blockdev", fc->source);
1681 return error;
1682 }
1683 fc->sb_flags |= SB_NOSEC;
1684 s = sget_dev(fc, dev);
1685 if (IS_ERR(s))
1686 return PTR_ERR(s);
1687
1688 if (s->s_root) {
1689 /* Don't summarily change the RO/RW state. */
1690 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1691 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1692 deactivate_locked_super(s);
1693 return -EBUSY;
1694 }
1695 } else {
1696 error = setup_bdev_super(s, fc->sb_flags, fc);
1697 if (!error)
1698 error = fill_super(s, fc);
1699 if (error) {
1700 deactivate_locked_super(s);
1701 return error;
1702 }
1703 s->s_flags |= SB_ACTIVE;
1704 }
1705
1706 BUG_ON(fc->root);
1707 fc->root = dget(s->s_root);
1708 return 0;
1709 }
1710 EXPORT_SYMBOL_GPL(get_tree_bdev_flags);
1711
1712 /**
1713 * get_tree_bdev - Get a superblock based on a single block device
1714 * @fc: The filesystem context holding the parameters
1715 * @fill_super: Helper to initialise a new superblock
1716 */
get_tree_bdev(struct fs_context * fc,int (* fill_super)(struct super_block *,struct fs_context *))1717 int get_tree_bdev(struct fs_context *fc,
1718 int (*fill_super)(struct super_block *,
1719 struct fs_context *))
1720 {
1721 return get_tree_bdev_flags(fc, fill_super, 0);
1722 }
1723 EXPORT_SYMBOL(get_tree_bdev);
1724
kill_block_super(struct super_block * sb)1725 void kill_block_super(struct super_block *sb)
1726 {
1727 struct block_device *bdev = sb->s_bdev;
1728
1729 generic_shutdown_super(sb);
1730 if (bdev) {
1731 sync_blockdev(bdev);
1732 bdev_fput(sb->s_bdev_file);
1733 }
1734 }
1735
1736 EXPORT_SYMBOL(kill_block_super);
1737 #endif
1738
1739 /**
1740 * vfs_get_tree - Get the mountable root
1741 * @fc: The superblock configuration context.
1742 *
1743 * The filesystem is invoked to get or create a superblock which can then later
1744 * be used for mounting. The filesystem places a pointer to the root to be
1745 * used for mounting in @fc->root.
1746 */
vfs_get_tree(struct fs_context * fc)1747 int vfs_get_tree(struct fs_context *fc)
1748 {
1749 struct super_block *sb;
1750 int error;
1751
1752 if (fc->root)
1753 return -EBUSY;
1754
1755 /* Get the mountable root in fc->root, with a ref on the root and a ref
1756 * on the superblock.
1757 */
1758 error = fc->ops->get_tree(fc);
1759 if (error < 0)
1760 return error;
1761
1762 if (!fc->root) {
1763 pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n",
1764 fc->fs_type->name, error);
1765 /* We don't know what the locking state of the superblock is -
1766 * if there is a superblock.
1767 */
1768 BUG();
1769 }
1770
1771 sb = fc->root->d_sb;
1772 WARN_ON(!sb->s_bdi);
1773
1774 /*
1775 * super_wake() contains a memory barrier which also care of
1776 * ordering for super_cache_count(). We place it before setting
1777 * SB_BORN as the data dependency between the two functions is
1778 * the superblock structure contents that we just set up, not
1779 * the SB_BORN flag.
1780 */
1781 super_wake(sb, SB_BORN);
1782
1783 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1784 if (unlikely(error)) {
1785 fc_drop_locked(fc);
1786 return error;
1787 }
1788
1789 /*
1790 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1791 * but s_maxbytes was an unsigned long long for many releases. Throw
1792 * this warning for a little while to try and catch filesystems that
1793 * violate this rule.
1794 */
1795 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1796 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1797
1798 return 0;
1799 }
1800 EXPORT_SYMBOL(vfs_get_tree);
1801
1802 /*
1803 * Setup private BDI for given superblock. It gets automatically cleaned up
1804 * in generic_shutdown_super().
1805 */
super_setup_bdi_name(struct super_block * sb,char * fmt,...)1806 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1807 {
1808 struct backing_dev_info *bdi;
1809 int err;
1810 va_list args;
1811
1812 bdi = bdi_alloc(NUMA_NO_NODE);
1813 if (!bdi)
1814 return -ENOMEM;
1815
1816 va_start(args, fmt);
1817 err = bdi_register_va(bdi, fmt, args);
1818 va_end(args);
1819 if (err) {
1820 bdi_put(bdi);
1821 return err;
1822 }
1823 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1824 sb->s_bdi = bdi;
1825 sb->s_iflags |= SB_I_PERSB_BDI;
1826
1827 return 0;
1828 }
1829 EXPORT_SYMBOL(super_setup_bdi_name);
1830
1831 /*
1832 * Setup private BDI for given superblock. I gets automatically cleaned up
1833 * in generic_shutdown_super().
1834 */
super_setup_bdi(struct super_block * sb)1835 int super_setup_bdi(struct super_block *sb)
1836 {
1837 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1838
1839 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1840 atomic_long_inc_return(&bdi_seq));
1841 }
1842 EXPORT_SYMBOL(super_setup_bdi);
1843
1844 /**
1845 * sb_wait_write - wait until all writers to given file system finish
1846 * @sb: the super for which we wait
1847 * @level: type of writers we wait for (normal vs page fault)
1848 *
1849 * This function waits until there are no writers of given type to given file
1850 * system.
1851 */
sb_wait_write(struct super_block * sb,int level)1852 static void sb_wait_write(struct super_block *sb, int level)
1853 {
1854 percpu_down_write(sb->s_writers.rw_sem + level-1);
1855 }
1856
1857 /*
1858 * We are going to return to userspace and forget about these locks, the
1859 * ownership goes to the caller of thaw_super() which does unlock().
1860 */
lockdep_sb_freeze_release(struct super_block * sb)1861 static void lockdep_sb_freeze_release(struct super_block *sb)
1862 {
1863 int level;
1864
1865 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1866 percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_);
1867 }
1868
1869 /*
1870 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1871 */
lockdep_sb_freeze_acquire(struct super_block * sb)1872 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1873 {
1874 int level;
1875
1876 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1877 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1878 }
1879
sb_freeze_unlock(struct super_block * sb,int level)1880 static void sb_freeze_unlock(struct super_block *sb, int level)
1881 {
1882 for (level--; level >= 0; level--)
1883 percpu_up_write(sb->s_writers.rw_sem + level);
1884 }
1885
wait_for_partially_frozen(struct super_block * sb)1886 static int wait_for_partially_frozen(struct super_block *sb)
1887 {
1888 int ret = 0;
1889
1890 do {
1891 unsigned short old = sb->s_writers.frozen;
1892
1893 up_write(&sb->s_umount);
1894 ret = wait_var_event_killable(&sb->s_writers.frozen,
1895 sb->s_writers.frozen != old);
1896 down_write(&sb->s_umount);
1897 } while (ret == 0 &&
1898 sb->s_writers.frozen != SB_UNFROZEN &&
1899 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1900
1901 return ret;
1902 }
1903
1904 #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1905 #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL)
1906
freeze_inc(struct super_block * sb,enum freeze_holder who)1907 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1908 {
1909 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1910 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1911
1912 if (who & FREEZE_HOLDER_KERNEL)
1913 ++sb->s_writers.freeze_kcount;
1914 if (who & FREEZE_HOLDER_USERSPACE)
1915 ++sb->s_writers.freeze_ucount;
1916 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1917 }
1918
freeze_dec(struct super_block * sb,enum freeze_holder who)1919 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1920 {
1921 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1922 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1923
1924 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1925 --sb->s_writers.freeze_kcount;
1926 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1927 --sb->s_writers.freeze_ucount;
1928 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1929 }
1930
may_freeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)1931 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who,
1932 const void *freeze_owner)
1933 {
1934 lockdep_assert_held(&sb->s_umount);
1935
1936 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1937 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1938
1939 if (who & FREEZE_EXCL) {
1940 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
1941 return false;
1942 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
1943 return false;
1944 if (WARN_ON_ONCE(!freeze_owner))
1945 return false;
1946 /* This freeze already has a specific owner. */
1947 if (sb->s_writers.freeze_owner)
1948 return false;
1949 /*
1950 * This is already frozen multiple times so we're just
1951 * going to take a reference count and mark the freeze as
1952 * being owned by the caller.
1953 */
1954 if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount)
1955 sb->s_writers.freeze_owner = freeze_owner;
1956 return true;
1957 }
1958
1959 if (who & FREEZE_HOLDER_KERNEL)
1960 return (who & FREEZE_MAY_NEST) ||
1961 sb->s_writers.freeze_kcount == 0;
1962 if (who & FREEZE_HOLDER_USERSPACE)
1963 return (who & FREEZE_MAY_NEST) ||
1964 sb->s_writers.freeze_ucount == 0;
1965 return false;
1966 }
1967
may_unfreeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)1968 static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who,
1969 const void *freeze_owner)
1970 {
1971 lockdep_assert_held(&sb->s_umount);
1972
1973 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1974 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1975
1976 if (who & FREEZE_EXCL) {
1977 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
1978 return false;
1979 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
1980 return false;
1981 if (WARN_ON_ONCE(!freeze_owner))
1982 return false;
1983 if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0))
1984 return false;
1985 /* This isn't exclusively frozen. */
1986 if (!sb->s_writers.freeze_owner)
1987 return false;
1988 /* This isn't exclusively frozen by us. */
1989 if (sb->s_writers.freeze_owner != freeze_owner)
1990 return false;
1991 /*
1992 * This is still frozen multiple times so we're just
1993 * going to drop our reference count and undo our
1994 * exclusive freeze.
1995 */
1996 if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1)
1997 sb->s_writers.freeze_owner = NULL;
1998 return true;
1999 }
2000
2001 if (who & FREEZE_HOLDER_KERNEL) {
2002 /*
2003 * Someone's trying to steal the reference belonging to
2004 * @sb->s_writers.freeze_owner.
2005 */
2006 if (sb->s_writers.freeze_kcount == 1 &&
2007 sb->s_writers.freeze_owner)
2008 return false;
2009 return sb->s_writers.freeze_kcount > 0;
2010 }
2011
2012 if (who & FREEZE_HOLDER_USERSPACE)
2013 return sb->s_writers.freeze_ucount > 0;
2014
2015 return false;
2016 }
2017
2018 /**
2019 * freeze_super - lock the filesystem and force it into a consistent state
2020 * @sb: the super to lock
2021 * @who: context that wants to freeze
2022 * @freeze_owner: owner of the freeze
2023 *
2024 * Syncs the super to make sure the filesystem is consistent and calls the fs's
2025 * freeze_fs. Subsequent calls to this without first thawing the fs may return
2026 * -EBUSY.
2027 *
2028 * @who should be:
2029 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
2030 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
2031 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
2032 *
2033 * The @who argument distinguishes between the kernel and userspace trying to
2034 * freeze the filesystem. Although there cannot be multiple kernel freezes or
2035 * multiple userspace freezes in effect at any given time, the kernel and
2036 * userspace can both hold a filesystem frozen. The filesystem remains frozen
2037 * until there are no kernel or userspace freezes in effect.
2038 *
2039 * A filesystem may hold multiple devices and thus a filesystems may be
2040 * frozen through the block layer via multiple block devices. In this
2041 * case the request is marked as being allowed to nest by passing
2042 * FREEZE_MAY_NEST. The filesystem remains frozen until all block
2043 * devices are unfrozen. If multiple freezes are attempted without
2044 * FREEZE_MAY_NEST -EBUSY will be returned.
2045 *
2046 * During this function, sb->s_writers.frozen goes through these values:
2047 *
2048 * SB_UNFROZEN: File system is normal, all writes progress as usual.
2049 *
2050 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
2051 * writes should be blocked, though page faults are still allowed. We wait for
2052 * all writes to complete and then proceed to the next stage.
2053 *
2054 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2055 * but internal fs threads can still modify the filesystem (although they
2056 * should not dirty new pages or inodes), writeback can run etc. After waiting
2057 * for all running page faults we sync the filesystem which will clean all
2058 * dirty pages and inodes (no new dirty pages or inodes can be created when
2059 * sync is running).
2060 *
2061 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2062 * modification are blocked (e.g. XFS preallocation truncation on inode
2063 * reclaim). This is usually implemented by blocking new transactions for
2064 * filesystems that have them and need this additional guard. After all
2065 * internal writers are finished we call ->freeze_fs() to finish filesystem
2066 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2067 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2068 *
2069 * sb->s_writers.frozen is protected by sb->s_umount.
2070 *
2071 * Return: If the freeze was successful zero is returned. If the freeze
2072 * failed a negative error code is returned.
2073 */
freeze_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2074 int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner)
2075 {
2076 int ret;
2077
2078 if (!super_lock_excl(sb)) {
2079 WARN_ON_ONCE("Dying superblock while freezing!");
2080 return -EINVAL;
2081 }
2082 atomic_inc(&sb->s_active);
2083
2084 retry:
2085 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2086 if (may_freeze(sb, who, freeze_owner))
2087 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2088 else
2089 ret = -EBUSY;
2090 /* All freezers share a single active reference. */
2091 deactivate_locked_super(sb);
2092 return ret;
2093 }
2094
2095 if (sb->s_writers.frozen != SB_UNFROZEN) {
2096 ret = wait_for_partially_frozen(sb);
2097 if (ret) {
2098 deactivate_locked_super(sb);
2099 return ret;
2100 }
2101
2102 goto retry;
2103 }
2104
2105 if (sb_rdonly(sb)) {
2106 /* Nothing to do really... */
2107 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2108 sb->s_writers.freeze_owner = freeze_owner;
2109 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2110 wake_up_var(&sb->s_writers.frozen);
2111 super_unlock_excl(sb);
2112 return 0;
2113 }
2114
2115 sb->s_writers.frozen = SB_FREEZE_WRITE;
2116 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
2117 super_unlock_excl(sb);
2118 sb_wait_write(sb, SB_FREEZE_WRITE);
2119 __super_lock_excl(sb);
2120
2121 /* Now we go and block page faults... */
2122 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2123 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2124
2125 /* All writers are done so after syncing there won't be dirty data */
2126 ret = sync_filesystem(sb);
2127 if (ret) {
2128 sb->s_writers.frozen = SB_UNFROZEN;
2129 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2130 wake_up_var(&sb->s_writers.frozen);
2131 deactivate_locked_super(sb);
2132 return ret;
2133 }
2134
2135 /* Now wait for internal filesystem counter */
2136 sb->s_writers.frozen = SB_FREEZE_FS;
2137 sb_wait_write(sb, SB_FREEZE_FS);
2138
2139 if (sb->s_op->freeze_fs) {
2140 ret = sb->s_op->freeze_fs(sb);
2141 if (ret) {
2142 printk(KERN_ERR
2143 "VFS:Filesystem freeze failed\n");
2144 sb->s_writers.frozen = SB_UNFROZEN;
2145 sb_freeze_unlock(sb, SB_FREEZE_FS);
2146 wake_up_var(&sb->s_writers.frozen);
2147 deactivate_locked_super(sb);
2148 return ret;
2149 }
2150 }
2151 /*
2152 * For debugging purposes so that fs can warn if it sees write activity
2153 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2154 */
2155 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2156 sb->s_writers.freeze_owner = freeze_owner;
2157 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2158 wake_up_var(&sb->s_writers.frozen);
2159 lockdep_sb_freeze_release(sb);
2160 super_unlock_excl(sb);
2161 return 0;
2162 }
2163 EXPORT_SYMBOL(freeze_super);
2164
2165 /*
2166 * Undoes the effect of a freeze_super_locked call. If the filesystem is
2167 * frozen both by userspace and the kernel, a thaw call from either source
2168 * removes that state without releasing the other state or unlocking the
2169 * filesystem.
2170 */
thaw_super_locked(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2171 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
2172 const void *freeze_owner)
2173 {
2174 int error = -EINVAL;
2175
2176 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2177 goto out_unlock;
2178
2179 if (!may_unfreeze(sb, who, freeze_owner))
2180 goto out_unlock;
2181
2182 /*
2183 * All freezers share a single active reference.
2184 * So just unlock in case there are any left.
2185 */
2186 if (freeze_dec(sb, who))
2187 goto out_unlock;
2188
2189 if (sb_rdonly(sb)) {
2190 sb->s_writers.frozen = SB_UNFROZEN;
2191 sb->s_writers.freeze_owner = NULL;
2192 wake_up_var(&sb->s_writers.frozen);
2193 goto out_deactivate;
2194 }
2195
2196 lockdep_sb_freeze_acquire(sb);
2197
2198 if (sb->s_op->unfreeze_fs) {
2199 error = sb->s_op->unfreeze_fs(sb);
2200 if (error) {
2201 pr_err("VFS: Filesystem thaw failed\n");
2202 freeze_inc(sb, who);
2203 lockdep_sb_freeze_release(sb);
2204 goto out_unlock;
2205 }
2206 }
2207
2208 sb->s_writers.frozen = SB_UNFROZEN;
2209 sb->s_writers.freeze_owner = NULL;
2210 wake_up_var(&sb->s_writers.frozen);
2211 sb_freeze_unlock(sb, SB_FREEZE_FS);
2212 out_deactivate:
2213 deactivate_locked_super(sb);
2214 return 0;
2215
2216 out_unlock:
2217 super_unlock_excl(sb);
2218 return error;
2219 }
2220
2221 /**
2222 * thaw_super -- unlock filesystem
2223 * @sb: the super to thaw
2224 * @who: context that wants to freeze
2225 * @freeze_owner: owner of the freeze
2226 *
2227 * Unlocks the filesystem and marks it writeable again after freeze_super()
2228 * if there are no remaining freezes on the filesystem.
2229 *
2230 * @who should be:
2231 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2232 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2233 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2234 *
2235 * A filesystem may hold multiple devices and thus a filesystems may
2236 * have been frozen through the block layer via multiple block devices.
2237 * The filesystem remains frozen until all block devices are unfrozen.
2238 */
thaw_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2239 int thaw_super(struct super_block *sb, enum freeze_holder who,
2240 const void *freeze_owner)
2241 {
2242 if (!super_lock_excl(sb)) {
2243 WARN_ON_ONCE("Dying superblock while thawing!");
2244 return -EINVAL;
2245 }
2246 return thaw_super_locked(sb, who, freeze_owner);
2247 }
2248 EXPORT_SYMBOL(thaw_super);
2249
2250 /*
2251 * Create workqueue for deferred direct IO completions. We allocate the
2252 * workqueue when it's first needed. This avoids creating workqueue for
2253 * filesystems that don't need it and also allows us to create the workqueue
2254 * late enough so the we can include s_id in the name of the workqueue.
2255 */
sb_init_dio_done_wq(struct super_block * sb)2256 int sb_init_dio_done_wq(struct super_block *sb)
2257 {
2258 struct workqueue_struct *old;
2259 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2260 WQ_MEM_RECLAIM | WQ_PERCPU,
2261 0,
2262 sb->s_id);
2263 if (!wq)
2264 return -ENOMEM;
2265
2266 old = NULL;
2267 /*
2268 * This has to be atomic as more DIOs can race to create the workqueue
2269 */
2270 if (!try_cmpxchg(&sb->s_dio_done_wq, &old, wq)) {
2271 /* Someone created workqueue before us? Free ours... */
2272 destroy_workqueue(wq);
2273 }
2274 return 0;
2275 }
2276 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
2277