1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/super.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * super.c contains code to handle: - mount structures
8 * - super-block tables
9 * - filesystem drivers list
10 * - mount system call
11 * - umount system call
12 * - ustat system call
13 *
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 *
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22 */
23
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
40 #include "internal.h"
41
42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
43 const void *freeze_owner);
44
45 static LIST_HEAD(super_blocks);
46 static DEFINE_SPINLOCK(sb_lock);
47
48 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
49 "sb_writers",
50 "sb_pagefaults",
51 "sb_internal",
52 };
53
__super_lock(struct super_block * sb,bool excl)54 static inline void __super_lock(struct super_block *sb, bool excl)
55 {
56 if (excl)
57 down_write(&sb->s_umount);
58 else
59 down_read(&sb->s_umount);
60 }
61
super_unlock(struct super_block * sb,bool excl)62 static inline void super_unlock(struct super_block *sb, bool excl)
63 {
64 if (excl)
65 up_write(&sb->s_umount);
66 else
67 up_read(&sb->s_umount);
68 }
69
__super_lock_excl(struct super_block * sb)70 static inline void __super_lock_excl(struct super_block *sb)
71 {
72 __super_lock(sb, true);
73 }
74
super_unlock_excl(struct super_block * sb)75 static inline void super_unlock_excl(struct super_block *sb)
76 {
77 super_unlock(sb, true);
78 }
79
super_unlock_shared(struct super_block * sb)80 static inline void super_unlock_shared(struct super_block *sb)
81 {
82 super_unlock(sb, false);
83 }
84
super_flags(const struct super_block * sb,unsigned int flags)85 static bool super_flags(const struct super_block *sb, unsigned int flags)
86 {
87 /*
88 * Pairs with smp_store_release() in super_wake() and ensures
89 * that we see @flags after we're woken.
90 */
91 return smp_load_acquire(&sb->s_flags) & flags;
92 }
93
94 /**
95 * super_lock - wait for superblock to become ready and lock it
96 * @sb: superblock to wait for
97 * @excl: whether exclusive access is required
98 *
99 * If the superblock has neither passed through vfs_get_tree() or
100 * generic_shutdown_super() yet wait for it to happen. Either superblock
101 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
102 * woken and we'll see SB_DYING.
103 *
104 * The caller must have acquired a temporary reference on @sb->s_count.
105 *
106 * Return: The function returns true if SB_BORN was set and with
107 * s_umount held. The function returns false if SB_DYING was
108 * set and without s_umount held.
109 */
super_lock(struct super_block * sb,bool excl)110 static __must_check bool super_lock(struct super_block *sb, bool excl)
111 {
112 lockdep_assert_not_held(&sb->s_umount);
113
114 /* wait until the superblock is ready or dying */
115 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
116
117 /* Don't pointlessly acquire s_umount. */
118 if (super_flags(sb, SB_DYING))
119 return false;
120
121 __super_lock(sb, excl);
122
123 /*
124 * Has gone through generic_shutdown_super() in the meantime.
125 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
126 * grab a reference to this. Tell them so.
127 */
128 if (sb->s_flags & SB_DYING) {
129 super_unlock(sb, excl);
130 return false;
131 }
132
133 WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
134 return true;
135 }
136
137 /* wait and try to acquire read-side of @sb->s_umount */
super_lock_shared(struct super_block * sb)138 static inline bool super_lock_shared(struct super_block *sb)
139 {
140 return super_lock(sb, false);
141 }
142
143 /* wait and try to acquire write-side of @sb->s_umount */
super_lock_excl(struct super_block * sb)144 static inline bool super_lock_excl(struct super_block *sb)
145 {
146 return super_lock(sb, true);
147 }
148
149 /* wake waiters */
150 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
super_wake(struct super_block * sb,unsigned int flag)151 static void super_wake(struct super_block *sb, unsigned int flag)
152 {
153 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
154 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
155
156 /*
157 * Pairs with smp_load_acquire() in super_lock() to make sure
158 * all initializations in the superblock are seen by the user
159 * seeing SB_BORN sent.
160 */
161 smp_store_release(&sb->s_flags, sb->s_flags | flag);
162 /*
163 * Pairs with the barrier in prepare_to_wait_event() to make sure
164 * ___wait_var_event() either sees SB_BORN set or
165 * waitqueue_active() check in wake_up_var() sees the waiter.
166 */
167 smp_mb();
168 wake_up_var(&sb->s_flags);
169 }
170
171 /*
172 * One thing we have to be careful of with a per-sb shrinker is that we don't
173 * drop the last active reference to the superblock from within the shrinker.
174 * If that happens we could trigger unregistering the shrinker from within the
175 * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
176 * take a passive reference to the superblock to avoid this from occurring.
177 */
super_cache_scan(struct shrinker * shrink,struct shrink_control * sc)178 static unsigned long super_cache_scan(struct shrinker *shrink,
179 struct shrink_control *sc)
180 {
181 struct super_block *sb;
182 long fs_objects = 0;
183 long total_objects;
184 long freed = 0;
185 long dentries;
186 long inodes;
187
188 sb = shrink->private_data;
189
190 /*
191 * Deadlock avoidance. We may hold various FS locks, and we don't want
192 * to recurse into the FS that called us in clear_inode() and friends..
193 */
194 if (!(sc->gfp_mask & __GFP_FS))
195 return SHRINK_STOP;
196
197 if (!super_trylock_shared(sb))
198 return SHRINK_STOP;
199
200 if (sb->s_op->nr_cached_objects)
201 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
202
203 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
204 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
205 total_objects = dentries + inodes + fs_objects;
206 if (!total_objects)
207 total_objects = 1;
208
209 /* proportion the scan between the caches */
210 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
211 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
212 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
213
214 /*
215 * prune the dcache first as the icache is pinned by it, then
216 * prune the icache, followed by the filesystem specific caches
217 *
218 * Ensure that we always scan at least one object - memcg kmem
219 * accounting uses this to fully empty the caches.
220 */
221 sc->nr_to_scan = dentries + 1;
222 freed = prune_dcache_sb(sb, sc);
223 sc->nr_to_scan = inodes + 1;
224 freed += prune_icache_sb(sb, sc);
225
226 if (fs_objects) {
227 sc->nr_to_scan = fs_objects + 1;
228 freed += sb->s_op->free_cached_objects(sb, sc);
229 }
230
231 super_unlock_shared(sb);
232 return freed;
233 }
234
super_cache_count(struct shrinker * shrink,struct shrink_control * sc)235 static unsigned long super_cache_count(struct shrinker *shrink,
236 struct shrink_control *sc)
237 {
238 struct super_block *sb;
239 long total_objects = 0;
240
241 sb = shrink->private_data;
242
243 /*
244 * We don't call super_trylock_shared() here as it is a scalability
245 * bottleneck, so we're exposed to partial setup state. The shrinker
246 * rwsem does not protect filesystem operations backing
247 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
248 * change between super_cache_count and super_cache_scan, so we really
249 * don't need locks here.
250 *
251 * However, if we are currently mounting the superblock, the underlying
252 * filesystem might be in a state of partial construction and hence it
253 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check
254 * to avoid this situation, so do the same here. The memory barrier is
255 * matched with the one in mount_fs() as we don't hold locks here.
256 */
257 if (!(sb->s_flags & SB_BORN))
258 return 0;
259 smp_rmb();
260
261 if (sb->s_op && sb->s_op->nr_cached_objects)
262 total_objects = sb->s_op->nr_cached_objects(sb, sc);
263
264 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
265 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
266
267 if (!total_objects)
268 return SHRINK_EMPTY;
269
270 total_objects = vfs_pressure_ratio(total_objects);
271 return total_objects;
272 }
273
destroy_super_work(struct work_struct * work)274 static void destroy_super_work(struct work_struct *work)
275 {
276 struct super_block *s = container_of(work, struct super_block,
277 destroy_work);
278 fsnotify_sb_free(s);
279 security_sb_free(s);
280 put_user_ns(s->s_user_ns);
281 kfree(s->s_subtype);
282 for (int i = 0; i < SB_FREEZE_LEVELS; i++)
283 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
284 kfree(s);
285 }
286
destroy_super_rcu(struct rcu_head * head)287 static void destroy_super_rcu(struct rcu_head *head)
288 {
289 struct super_block *s = container_of(head, struct super_block, rcu);
290 INIT_WORK(&s->destroy_work, destroy_super_work);
291 schedule_work(&s->destroy_work);
292 }
293
294 /* Free a superblock that has never been seen by anyone */
destroy_unused_super(struct super_block * s)295 static void destroy_unused_super(struct super_block *s)
296 {
297 if (!s)
298 return;
299 super_unlock_excl(s);
300 list_lru_destroy(&s->s_dentry_lru);
301 list_lru_destroy(&s->s_inode_lru);
302 shrinker_free(s->s_shrink);
303 /* no delays needed */
304 destroy_super_work(&s->destroy_work);
305 }
306
307 /**
308 * alloc_super - create new superblock
309 * @type: filesystem type superblock should belong to
310 * @flags: the mount flags
311 * @user_ns: User namespace for the super_block
312 *
313 * Allocates and initializes a new &struct super_block. alloc_super()
314 * returns a pointer new superblock or %NULL if allocation had failed.
315 */
alloc_super(struct file_system_type * type,int flags,struct user_namespace * user_ns)316 static struct super_block *alloc_super(struct file_system_type *type, int flags,
317 struct user_namespace *user_ns)
318 {
319 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
320 static const struct super_operations default_op;
321 int i;
322
323 if (!s)
324 return NULL;
325
326 INIT_LIST_HEAD(&s->s_mounts);
327 s->s_user_ns = get_user_ns(user_ns);
328 init_rwsem(&s->s_umount);
329 lockdep_set_class(&s->s_umount, &type->s_umount_key);
330 /*
331 * sget() can have s_umount recursion.
332 *
333 * When it cannot find a suitable sb, it allocates a new
334 * one (this one), and tries again to find a suitable old
335 * one.
336 *
337 * In case that succeeds, it will acquire the s_umount
338 * lock of the old one. Since these are clearly distrinct
339 * locks, and this object isn't exposed yet, there's no
340 * risk of deadlocks.
341 *
342 * Annotate this by putting this lock in a different
343 * subclass.
344 */
345 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
346
347 if (security_sb_alloc(s))
348 goto fail;
349
350 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
351 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
352 sb_writers_name[i],
353 &type->s_writers_key[i]))
354 goto fail;
355 }
356 s->s_bdi = &noop_backing_dev_info;
357 s->s_flags = flags;
358 if (s->s_user_ns != &init_user_ns)
359 s->s_iflags |= SB_I_NODEV;
360 INIT_HLIST_NODE(&s->s_instances);
361 INIT_HLIST_BL_HEAD(&s->s_roots);
362 mutex_init(&s->s_sync_lock);
363 INIT_LIST_HEAD(&s->s_inodes);
364 spin_lock_init(&s->s_inode_list_lock);
365 INIT_LIST_HEAD(&s->s_inodes_wb);
366 spin_lock_init(&s->s_inode_wblist_lock);
367
368 s->s_count = 1;
369 atomic_set(&s->s_active, 1);
370 mutex_init(&s->s_vfs_rename_mutex);
371 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
372 init_rwsem(&s->s_dquot.dqio_sem);
373 s->s_maxbytes = MAX_NON_LFS;
374 s->s_op = &default_op;
375 s->s_time_gran = 1000000000;
376 s->s_time_min = TIME64_MIN;
377 s->s_time_max = TIME64_MAX;
378
379 s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
380 "sb-%s", type->name);
381 if (!s->s_shrink)
382 goto fail;
383
384 s->s_shrink->scan_objects = super_cache_scan;
385 s->s_shrink->count_objects = super_cache_count;
386 s->s_shrink->batch = 1024;
387 s->s_shrink->private_data = s;
388
389 if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
390 goto fail;
391 if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
392 goto fail;
393 return s;
394
395 fail:
396 destroy_unused_super(s);
397 return NULL;
398 }
399
400 /* Superblock refcounting */
401
402 /*
403 * Drop a superblock's refcount. The caller must hold sb_lock.
404 */
__put_super(struct super_block * s)405 static void __put_super(struct super_block *s)
406 {
407 if (!--s->s_count) {
408 list_del_init(&s->s_list);
409 WARN_ON(s->s_dentry_lru.node);
410 WARN_ON(s->s_inode_lru.node);
411 WARN_ON(!list_empty(&s->s_mounts));
412 call_rcu(&s->rcu, destroy_super_rcu);
413 }
414 }
415
416 /**
417 * put_super - drop a temporary reference to superblock
418 * @sb: superblock in question
419 *
420 * Drops a temporary reference, frees superblock if there's no
421 * references left.
422 */
put_super(struct super_block * sb)423 void put_super(struct super_block *sb)
424 {
425 spin_lock(&sb_lock);
426 __put_super(sb);
427 spin_unlock(&sb_lock);
428 }
429
kill_super_notify(struct super_block * sb)430 static void kill_super_notify(struct super_block *sb)
431 {
432 lockdep_assert_not_held(&sb->s_umount);
433
434 /* already notified earlier */
435 if (sb->s_flags & SB_DEAD)
436 return;
437
438 /*
439 * Remove it from @fs_supers so it isn't found by new
440 * sget{_fc}() walkers anymore. Any concurrent mounter still
441 * managing to grab a temporary reference is guaranteed to
442 * already see SB_DYING and will wait until we notify them about
443 * SB_DEAD.
444 */
445 spin_lock(&sb_lock);
446 hlist_del_init(&sb->s_instances);
447 spin_unlock(&sb_lock);
448
449 /*
450 * Let concurrent mounts know that this thing is really dead.
451 * We don't need @sb->s_umount here as every concurrent caller
452 * will see SB_DYING and either discard the superblock or wait
453 * for SB_DEAD.
454 */
455 super_wake(sb, SB_DEAD);
456 }
457
458 /**
459 * deactivate_locked_super - drop an active reference to superblock
460 * @s: superblock to deactivate
461 *
462 * Drops an active reference to superblock, converting it into a temporary
463 * one if there is no other active references left. In that case we
464 * tell fs driver to shut it down and drop the temporary reference we
465 * had just acquired.
466 *
467 * Caller holds exclusive lock on superblock; that lock is released.
468 */
deactivate_locked_super(struct super_block * s)469 void deactivate_locked_super(struct super_block *s)
470 {
471 struct file_system_type *fs = s->s_type;
472 if (atomic_dec_and_test(&s->s_active)) {
473 shrinker_free(s->s_shrink);
474 fs->kill_sb(s);
475
476 kill_super_notify(s);
477
478 /*
479 * Since list_lru_destroy() may sleep, we cannot call it from
480 * put_super(), where we hold the sb_lock. Therefore we destroy
481 * the lru lists right now.
482 */
483 list_lru_destroy(&s->s_dentry_lru);
484 list_lru_destroy(&s->s_inode_lru);
485
486 put_filesystem(fs);
487 put_super(s);
488 } else {
489 super_unlock_excl(s);
490 }
491 }
492
493 EXPORT_SYMBOL(deactivate_locked_super);
494
495 /**
496 * deactivate_super - drop an active reference to superblock
497 * @s: superblock to deactivate
498 *
499 * Variant of deactivate_locked_super(), except that superblock is *not*
500 * locked by caller. If we are going to drop the final active reference,
501 * lock will be acquired prior to that.
502 */
deactivate_super(struct super_block * s)503 void deactivate_super(struct super_block *s)
504 {
505 if (!atomic_add_unless(&s->s_active, -1, 1)) {
506 __super_lock_excl(s);
507 deactivate_locked_super(s);
508 }
509 }
510
511 EXPORT_SYMBOL(deactivate_super);
512
513 /**
514 * grab_super - acquire an active reference to a superblock
515 * @sb: superblock to acquire
516 *
517 * Acquire a temporary reference on a superblock and try to trade it for
518 * an active reference. This is used in sget{_fc}() to wait for a
519 * superblock to either become SB_BORN or for it to pass through
520 * sb->kill() and be marked as SB_DEAD.
521 *
522 * Return: This returns true if an active reference could be acquired,
523 * false if not.
524 */
grab_super(struct super_block * sb)525 static bool grab_super(struct super_block *sb)
526 {
527 bool locked;
528
529 sb->s_count++;
530 spin_unlock(&sb_lock);
531 locked = super_lock_excl(sb);
532 if (locked) {
533 if (atomic_inc_not_zero(&sb->s_active)) {
534 put_super(sb);
535 return true;
536 }
537 super_unlock_excl(sb);
538 }
539 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
540 put_super(sb);
541 return false;
542 }
543
544 /*
545 * super_trylock_shared - try to grab ->s_umount shared
546 * @sb: reference we are trying to grab
547 *
548 * Try to prevent fs shutdown. This is used in places where we
549 * cannot take an active reference but we need to ensure that the
550 * filesystem is not shut down while we are working on it. It returns
551 * false if we cannot acquire s_umount or if we lose the race and
552 * filesystem already got into shutdown, and returns true with the s_umount
553 * lock held in read mode in case of success. On successful return,
554 * the caller must drop the s_umount lock when done.
555 *
556 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
557 * The reason why it's safe is that we are OK with doing trylock instead
558 * of down_read(). There's a couple of places that are OK with that, but
559 * it's very much not a general-purpose interface.
560 */
super_trylock_shared(struct super_block * sb)561 bool super_trylock_shared(struct super_block *sb)
562 {
563 if (down_read_trylock(&sb->s_umount)) {
564 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
565 (sb->s_flags & SB_BORN))
566 return true;
567 super_unlock_shared(sb);
568 }
569
570 return false;
571 }
572
573 /**
574 * retire_super - prevents superblock from being reused
575 * @sb: superblock to retire
576 *
577 * The function marks superblock to be ignored in superblock test, which
578 * prevents it from being reused for any new mounts. If the superblock has
579 * a private bdi, it also unregisters it, but doesn't reduce the refcount
580 * of the superblock to prevent potential races. The refcount is reduced
581 * by generic_shutdown_super(). The function can not be called
582 * concurrently with generic_shutdown_super(). It is safe to call the
583 * function multiple times, subsequent calls have no effect.
584 *
585 * The marker will affect the re-use only for block-device-based
586 * superblocks. Other superblocks will still get marked if this function
587 * is used, but that will not affect their reusability.
588 */
retire_super(struct super_block * sb)589 void retire_super(struct super_block *sb)
590 {
591 WARN_ON(!sb->s_bdev);
592 __super_lock_excl(sb);
593 if (sb->s_iflags & SB_I_PERSB_BDI) {
594 bdi_unregister(sb->s_bdi);
595 sb->s_iflags &= ~SB_I_PERSB_BDI;
596 }
597 sb->s_iflags |= SB_I_RETIRED;
598 super_unlock_excl(sb);
599 }
600 EXPORT_SYMBOL(retire_super);
601
602 /**
603 * generic_shutdown_super - common helper for ->kill_sb()
604 * @sb: superblock to kill
605 *
606 * generic_shutdown_super() does all fs-independent work on superblock
607 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
608 * that need destruction out of superblock, call generic_shutdown_super()
609 * and release aforementioned objects. Note: dentries and inodes _are_
610 * taken care of and do not need specific handling.
611 *
612 * Upon calling this function, the filesystem may no longer alter or
613 * rearrange the set of dentries belonging to this super_block, nor may it
614 * change the attachments of dentries to inodes.
615 */
generic_shutdown_super(struct super_block * sb)616 void generic_shutdown_super(struct super_block *sb)
617 {
618 const struct super_operations *sop = sb->s_op;
619
620 if (sb->s_root) {
621 shrink_dcache_for_umount(sb);
622 sync_filesystem(sb);
623 sb->s_flags &= ~SB_ACTIVE;
624
625 cgroup_writeback_umount(sb);
626
627 /* Evict all inodes with zero refcount. */
628 evict_inodes(sb);
629
630 /*
631 * Clean up and evict any inodes that still have references due
632 * to fsnotify or the security policy.
633 */
634 fsnotify_sb_delete(sb);
635 security_sb_delete(sb);
636
637 if (sb->s_dio_done_wq) {
638 destroy_workqueue(sb->s_dio_done_wq);
639 sb->s_dio_done_wq = NULL;
640 }
641
642 if (sop->put_super)
643 sop->put_super(sb);
644
645 /*
646 * Now that all potentially-encrypted inodes have been evicted,
647 * the fscrypt keyring can be destroyed.
648 */
649 fscrypt_destroy_keyring(sb);
650
651 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL,
652 "VFS: Busy inodes after unmount of %s (%s)",
653 sb->s_id, sb->s_type->name)) {
654 /*
655 * Adding a proper bailout path here would be hard, but
656 * we can at least make it more likely that a later
657 * iput_final() or such crashes cleanly.
658 */
659 struct inode *inode;
660
661 spin_lock(&sb->s_inode_list_lock);
662 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
663 inode->i_op = VFS_PTR_POISON;
664 inode->i_sb = VFS_PTR_POISON;
665 inode->i_mapping = VFS_PTR_POISON;
666 }
667 spin_unlock(&sb->s_inode_list_lock);
668 }
669 }
670 /*
671 * Broadcast to everyone that grabbed a temporary reference to this
672 * superblock before we removed it from @fs_supers that the superblock
673 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
674 * discard this superblock and treat it as dead.
675 *
676 * We leave the superblock on @fs_supers so it can be found by
677 * sget{_fc}() until we passed sb->kill_sb().
678 */
679 super_wake(sb, SB_DYING);
680 super_unlock_excl(sb);
681 if (sb->s_bdi != &noop_backing_dev_info) {
682 if (sb->s_iflags & SB_I_PERSB_BDI)
683 bdi_unregister(sb->s_bdi);
684 bdi_put(sb->s_bdi);
685 sb->s_bdi = &noop_backing_dev_info;
686 }
687 }
688
689 EXPORT_SYMBOL(generic_shutdown_super);
690
mount_capable(struct fs_context * fc)691 bool mount_capable(struct fs_context *fc)
692 {
693 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
694 return capable(CAP_SYS_ADMIN);
695 else
696 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
697 }
698
699 /**
700 * sget_fc - Find or create a superblock
701 * @fc: Filesystem context.
702 * @test: Comparison callback
703 * @set: Setup callback
704 *
705 * Create a new superblock or find an existing one.
706 *
707 * The @test callback is used to find a matching existing superblock.
708 * Whether or not the requested parameters in @fc are taken into account
709 * is specific to the @test callback that is used. They may even be
710 * completely ignored.
711 *
712 * If an extant superblock is matched, it will be returned unless:
713 *
714 * (1) the namespace the filesystem context @fc and the extant
715 * superblock's namespace differ
716 *
717 * (2) the filesystem context @fc has requested that reusing an extant
718 * superblock is not allowed
719 *
720 * In both cases EBUSY will be returned.
721 *
722 * If no match is made, a new superblock will be allocated and basic
723 * initialisation will be performed (s_type, s_fs_info and s_id will be
724 * set and the @set callback will be invoked), the superblock will be
725 * published and it will be returned in a partially constructed state
726 * with SB_BORN and SB_ACTIVE as yet unset.
727 *
728 * Return: On success, an extant or newly created superblock is
729 * returned. On failure an error pointer is returned.
730 */
sget_fc(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* set)(struct super_block *,struct fs_context *))731 struct super_block *sget_fc(struct fs_context *fc,
732 int (*test)(struct super_block *, struct fs_context *),
733 int (*set)(struct super_block *, struct fs_context *))
734 {
735 struct super_block *s = NULL;
736 struct super_block *old;
737 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
738 int err;
739
740 /*
741 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
742 * not set, as the filesystem is likely unprepared to handle it.
743 * This can happen when fsconfig() is called from init_user_ns with
744 * an fs_fd opened in another user namespace.
745 */
746 if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
747 errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
748 return ERR_PTR(-EPERM);
749 }
750
751 retry:
752 spin_lock(&sb_lock);
753 if (test) {
754 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
755 if (test(old, fc))
756 goto share_extant_sb;
757 }
758 }
759 if (!s) {
760 spin_unlock(&sb_lock);
761 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
762 if (!s)
763 return ERR_PTR(-ENOMEM);
764 goto retry;
765 }
766
767 s->s_fs_info = fc->s_fs_info;
768 err = set(s, fc);
769 if (err) {
770 s->s_fs_info = NULL;
771 spin_unlock(&sb_lock);
772 destroy_unused_super(s);
773 return ERR_PTR(err);
774 }
775 fc->s_fs_info = NULL;
776 s->s_type = fc->fs_type;
777 s->s_iflags |= fc->s_iflags;
778 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
779 /*
780 * Make the superblock visible on @super_blocks and @fs_supers.
781 * It's in a nascent state and users should wait on SB_BORN or
782 * SB_DYING to be set.
783 */
784 list_add_tail(&s->s_list, &super_blocks);
785 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
786 spin_unlock(&sb_lock);
787 get_filesystem(s->s_type);
788 shrinker_register(s->s_shrink);
789 return s;
790
791 share_extant_sb:
792 if (user_ns != old->s_user_ns || fc->exclusive) {
793 spin_unlock(&sb_lock);
794 destroy_unused_super(s);
795 if (fc->exclusive)
796 warnfc(fc, "reusing existing filesystem not allowed");
797 else
798 warnfc(fc, "reusing existing filesystem in another namespace not allowed");
799 return ERR_PTR(-EBUSY);
800 }
801 if (!grab_super(old))
802 goto retry;
803 destroy_unused_super(s);
804 return old;
805 }
806 EXPORT_SYMBOL(sget_fc);
807
808 /**
809 * sget - find or create a superblock
810 * @type: filesystem type superblock should belong to
811 * @test: comparison callback
812 * @set: setup callback
813 * @flags: mount flags
814 * @data: argument to each of them
815 */
sget(struct file_system_type * type,int (* test)(struct super_block *,void *),int (* set)(struct super_block *,void *),int flags,void * data)816 struct super_block *sget(struct file_system_type *type,
817 int (*test)(struct super_block *,void *),
818 int (*set)(struct super_block *,void *),
819 int flags,
820 void *data)
821 {
822 struct user_namespace *user_ns = current_user_ns();
823 struct super_block *s = NULL;
824 struct super_block *old;
825 int err;
826
827 retry:
828 spin_lock(&sb_lock);
829 if (test) {
830 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
831 if (!test(old, data))
832 continue;
833 if (user_ns != old->s_user_ns) {
834 spin_unlock(&sb_lock);
835 destroy_unused_super(s);
836 return ERR_PTR(-EBUSY);
837 }
838 if (!grab_super(old))
839 goto retry;
840 destroy_unused_super(s);
841 return old;
842 }
843 }
844 if (!s) {
845 spin_unlock(&sb_lock);
846 s = alloc_super(type, flags, user_ns);
847 if (!s)
848 return ERR_PTR(-ENOMEM);
849 goto retry;
850 }
851
852 err = set(s, data);
853 if (err) {
854 spin_unlock(&sb_lock);
855 destroy_unused_super(s);
856 return ERR_PTR(err);
857 }
858 s->s_type = type;
859 strscpy(s->s_id, type->name, sizeof(s->s_id));
860 list_add_tail(&s->s_list, &super_blocks);
861 hlist_add_head(&s->s_instances, &type->fs_supers);
862 spin_unlock(&sb_lock);
863 get_filesystem(type);
864 shrinker_register(s->s_shrink);
865 return s;
866 }
867 EXPORT_SYMBOL(sget);
868
drop_super(struct super_block * sb)869 void drop_super(struct super_block *sb)
870 {
871 super_unlock_shared(sb);
872 put_super(sb);
873 }
874
875 EXPORT_SYMBOL(drop_super);
876
drop_super_exclusive(struct super_block * sb)877 void drop_super_exclusive(struct super_block *sb)
878 {
879 super_unlock_excl(sb);
880 put_super(sb);
881 }
882 EXPORT_SYMBOL(drop_super_exclusive);
883
884 enum super_iter_flags_t {
885 SUPER_ITER_EXCL = (1U << 0),
886 SUPER_ITER_UNLOCKED = (1U << 1),
887 SUPER_ITER_REVERSE = (1U << 2),
888 };
889
first_super(enum super_iter_flags_t flags)890 static inline struct super_block *first_super(enum super_iter_flags_t flags)
891 {
892 if (flags & SUPER_ITER_REVERSE)
893 return list_last_entry(&super_blocks, struct super_block, s_list);
894 return list_first_entry(&super_blocks, struct super_block, s_list);
895 }
896
next_super(struct super_block * sb,enum super_iter_flags_t flags)897 static inline struct super_block *next_super(struct super_block *sb,
898 enum super_iter_flags_t flags)
899 {
900 if (flags & SUPER_ITER_REVERSE)
901 return list_prev_entry(sb, s_list);
902 return list_next_entry(sb, s_list);
903 }
904
__iterate_supers(void (* f)(struct super_block *,void *),void * arg,enum super_iter_flags_t flags)905 static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg,
906 enum super_iter_flags_t flags)
907 {
908 struct super_block *sb, *p = NULL;
909 bool excl = flags & SUPER_ITER_EXCL;
910
911 guard(spinlock)(&sb_lock);
912
913 for (sb = first_super(flags);
914 !list_entry_is_head(sb, &super_blocks, s_list);
915 sb = next_super(sb, flags)) {
916 if (super_flags(sb, SB_DYING))
917 continue;
918 sb->s_count++;
919 spin_unlock(&sb_lock);
920
921 if (flags & SUPER_ITER_UNLOCKED) {
922 f(sb, arg);
923 } else if (super_lock(sb, excl)) {
924 f(sb, arg);
925 super_unlock(sb, excl);
926 }
927
928 spin_lock(&sb_lock);
929 if (p)
930 __put_super(p);
931 p = sb;
932 }
933 if (p)
934 __put_super(p);
935 }
936
iterate_supers(void (* f)(struct super_block *,void *),void * arg)937 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
938 {
939 __iterate_supers(f, arg, 0);
940 }
941
942 /**
943 * iterate_supers_type - call function for superblocks of given type
944 * @type: fs type
945 * @f: function to call
946 * @arg: argument to pass to it
947 *
948 * Scans the superblock list and calls given function, passing it
949 * locked superblock and given argument.
950 */
iterate_supers_type(struct file_system_type * type,void (* f)(struct super_block *,void *),void * arg)951 void iterate_supers_type(struct file_system_type *type,
952 void (*f)(struct super_block *, void *), void *arg)
953 {
954 struct super_block *sb, *p = NULL;
955
956 spin_lock(&sb_lock);
957 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
958 bool locked;
959
960 if (super_flags(sb, SB_DYING))
961 continue;
962
963 sb->s_count++;
964 spin_unlock(&sb_lock);
965
966 locked = super_lock_shared(sb);
967 if (locked) {
968 f(sb, arg);
969 super_unlock_shared(sb);
970 }
971
972 spin_lock(&sb_lock);
973 if (p)
974 __put_super(p);
975 p = sb;
976 }
977 if (p)
978 __put_super(p);
979 spin_unlock(&sb_lock);
980 }
981
982 EXPORT_SYMBOL(iterate_supers_type);
983
user_get_super(dev_t dev,bool excl)984 struct super_block *user_get_super(dev_t dev, bool excl)
985 {
986 struct super_block *sb;
987
988 spin_lock(&sb_lock);
989 list_for_each_entry(sb, &super_blocks, s_list) {
990 bool locked;
991
992 if (sb->s_dev != dev)
993 continue;
994
995 sb->s_count++;
996 spin_unlock(&sb_lock);
997
998 locked = super_lock(sb, excl);
999 if (locked)
1000 return sb;
1001
1002 spin_lock(&sb_lock);
1003 __put_super(sb);
1004 break;
1005 }
1006 spin_unlock(&sb_lock);
1007 return NULL;
1008 }
1009
1010 /**
1011 * reconfigure_super - asks filesystem to change superblock parameters
1012 * @fc: The superblock and configuration
1013 *
1014 * Alters the configuration parameters of a live superblock.
1015 */
reconfigure_super(struct fs_context * fc)1016 int reconfigure_super(struct fs_context *fc)
1017 {
1018 struct super_block *sb = fc->root->d_sb;
1019 int retval;
1020 bool remount_ro = false;
1021 bool remount_rw = false;
1022 bool force = fc->sb_flags & SB_FORCE;
1023
1024 if (fc->sb_flags_mask & ~MS_RMT_MASK)
1025 return -EINVAL;
1026 if (sb->s_writers.frozen != SB_UNFROZEN)
1027 return -EBUSY;
1028
1029 retval = security_sb_remount(sb, fc->security);
1030 if (retval)
1031 return retval;
1032
1033 if (fc->sb_flags_mask & SB_RDONLY) {
1034 #ifdef CONFIG_BLOCK
1035 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1036 bdev_read_only(sb->s_bdev))
1037 return -EACCES;
1038 #endif
1039 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1040 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1041 }
1042
1043 if (remount_ro) {
1044 if (!hlist_empty(&sb->s_pins)) {
1045 super_unlock_excl(sb);
1046 group_pin_kill(&sb->s_pins);
1047 __super_lock_excl(sb);
1048 if (!sb->s_root)
1049 return 0;
1050 if (sb->s_writers.frozen != SB_UNFROZEN)
1051 return -EBUSY;
1052 remount_ro = !sb_rdonly(sb);
1053 }
1054 }
1055 shrink_dcache_sb(sb);
1056
1057 /* If we are reconfiguring to RDONLY and current sb is read/write,
1058 * make sure there are no files open for writing.
1059 */
1060 if (remount_ro) {
1061 if (force) {
1062 sb_start_ro_state_change(sb);
1063 } else {
1064 retval = sb_prepare_remount_readonly(sb);
1065 if (retval)
1066 return retval;
1067 }
1068 } else if (remount_rw) {
1069 /*
1070 * Protect filesystem's reconfigure code from writes from
1071 * userspace until reconfigure finishes.
1072 */
1073 sb_start_ro_state_change(sb);
1074 }
1075
1076 if (fc->ops->reconfigure) {
1077 retval = fc->ops->reconfigure(fc);
1078 if (retval) {
1079 if (!force)
1080 goto cancel_readonly;
1081 /* If forced remount, go ahead despite any errors */
1082 WARN(1, "forced remount of a %s fs returned %i\n",
1083 sb->s_type->name, retval);
1084 }
1085 }
1086
1087 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1088 (fc->sb_flags & fc->sb_flags_mask)));
1089 sb_end_ro_state_change(sb);
1090
1091 /*
1092 * Some filesystems modify their metadata via some other path than the
1093 * bdev buffer cache (eg. use a private mapping, or directories in
1094 * pagecache, etc). Also file data modifications go via their own
1095 * mappings. So If we try to mount readonly then copy the filesystem
1096 * from bdev, we could get stale data, so invalidate it to give a best
1097 * effort at coherency.
1098 */
1099 if (remount_ro && sb->s_bdev)
1100 invalidate_bdev(sb->s_bdev);
1101 return 0;
1102
1103 cancel_readonly:
1104 sb_end_ro_state_change(sb);
1105 return retval;
1106 }
1107
do_emergency_remount_callback(struct super_block * sb,void * unused)1108 static void do_emergency_remount_callback(struct super_block *sb, void *unused)
1109 {
1110 if (sb->s_bdev && !sb_rdonly(sb)) {
1111 struct fs_context *fc;
1112
1113 fc = fs_context_for_reconfigure(sb->s_root,
1114 SB_RDONLY | SB_FORCE, SB_RDONLY);
1115 if (!IS_ERR(fc)) {
1116 if (parse_monolithic_mount_data(fc, NULL) == 0)
1117 (void)reconfigure_super(fc);
1118 put_fs_context(fc);
1119 }
1120 }
1121 }
1122
do_emergency_remount(struct work_struct * work)1123 static void do_emergency_remount(struct work_struct *work)
1124 {
1125 __iterate_supers(do_emergency_remount_callback, NULL,
1126 SUPER_ITER_EXCL | SUPER_ITER_REVERSE);
1127 kfree(work);
1128 printk("Emergency Remount complete\n");
1129 }
1130
emergency_remount(void)1131 void emergency_remount(void)
1132 {
1133 struct work_struct *work;
1134
1135 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1136 if (work) {
1137 INIT_WORK(work, do_emergency_remount);
1138 schedule_work(work);
1139 }
1140 }
1141
do_thaw_all_callback(struct super_block * sb,void * unused)1142 static void do_thaw_all_callback(struct super_block *sb, void *unused)
1143 {
1144 if (IS_ENABLED(CONFIG_BLOCK))
1145 while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1146 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1147 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL);
1148 return;
1149 }
1150
do_thaw_all(struct work_struct * work)1151 static void do_thaw_all(struct work_struct *work)
1152 {
1153 __iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL);
1154 kfree(work);
1155 printk(KERN_WARNING "Emergency Thaw complete\n");
1156 }
1157
1158 /**
1159 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1160 *
1161 * Used for emergency unfreeze of all filesystems via SysRq
1162 */
emergency_thaw_all(void)1163 void emergency_thaw_all(void)
1164 {
1165 struct work_struct *work;
1166
1167 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1168 if (work) {
1169 INIT_WORK(work, do_thaw_all);
1170 schedule_work(work);
1171 }
1172 }
1173
get_active_super(struct super_block * sb)1174 static inline bool get_active_super(struct super_block *sb)
1175 {
1176 bool active = false;
1177
1178 if (super_lock_excl(sb)) {
1179 active = atomic_inc_not_zero(&sb->s_active);
1180 super_unlock_excl(sb);
1181 }
1182 return active;
1183 }
1184
1185 static const char *filesystems_freeze_ptr = "filesystems_freeze";
1186
filesystems_freeze_callback(struct super_block * sb,void * unused)1187 static void filesystems_freeze_callback(struct super_block *sb, void *unused)
1188 {
1189 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1190 return;
1191
1192 if (!get_active_super(sb))
1193 return;
1194
1195 if (sb->s_op->freeze_super)
1196 sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1197 filesystems_freeze_ptr);
1198 else
1199 freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1200 filesystems_freeze_ptr);
1201
1202 deactivate_super(sb);
1203 }
1204
filesystems_freeze(void)1205 void filesystems_freeze(void)
1206 {
1207 __iterate_supers(filesystems_freeze_callback, NULL,
1208 SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE);
1209 }
1210
filesystems_thaw_callback(struct super_block * sb,void * unused)1211 static void filesystems_thaw_callback(struct super_block *sb, void *unused)
1212 {
1213 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1214 return;
1215
1216 if (!get_active_super(sb))
1217 return;
1218
1219 if (sb->s_op->thaw_super)
1220 sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1221 filesystems_freeze_ptr);
1222 else
1223 thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1224 filesystems_freeze_ptr);
1225
1226 deactivate_super(sb);
1227 }
1228
filesystems_thaw(void)1229 void filesystems_thaw(void)
1230 {
1231 __iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED);
1232 }
1233
1234 static DEFINE_IDA(unnamed_dev_ida);
1235
1236 /**
1237 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1238 * @p: Pointer to a dev_t.
1239 *
1240 * Filesystems which don't use real block devices can call this function
1241 * to allocate a virtual block device.
1242 *
1243 * Context: Any context. Frequently called while holding sb_lock.
1244 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1245 * or -ENOMEM if memory allocation failed.
1246 */
get_anon_bdev(dev_t * p)1247 int get_anon_bdev(dev_t *p)
1248 {
1249 int dev;
1250
1251 /*
1252 * Many userspace utilities consider an FSID of 0 invalid.
1253 * Always return at least 1 from get_anon_bdev.
1254 */
1255 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1256 GFP_ATOMIC);
1257 if (dev == -ENOSPC)
1258 dev = -EMFILE;
1259 if (dev < 0)
1260 return dev;
1261
1262 *p = MKDEV(0, dev);
1263 return 0;
1264 }
1265 EXPORT_SYMBOL(get_anon_bdev);
1266
free_anon_bdev(dev_t dev)1267 void free_anon_bdev(dev_t dev)
1268 {
1269 ida_free(&unnamed_dev_ida, MINOR(dev));
1270 }
1271 EXPORT_SYMBOL(free_anon_bdev);
1272
set_anon_super(struct super_block * s,void * data)1273 int set_anon_super(struct super_block *s, void *data)
1274 {
1275 return get_anon_bdev(&s->s_dev);
1276 }
1277 EXPORT_SYMBOL(set_anon_super);
1278
kill_anon_super(struct super_block * sb)1279 void kill_anon_super(struct super_block *sb)
1280 {
1281 dev_t dev = sb->s_dev;
1282 generic_shutdown_super(sb);
1283 kill_super_notify(sb);
1284 free_anon_bdev(dev);
1285 }
1286 EXPORT_SYMBOL(kill_anon_super);
1287
kill_litter_super(struct super_block * sb)1288 void kill_litter_super(struct super_block *sb)
1289 {
1290 if (sb->s_root)
1291 d_genocide(sb->s_root);
1292 kill_anon_super(sb);
1293 }
1294 EXPORT_SYMBOL(kill_litter_super);
1295
set_anon_super_fc(struct super_block * sb,struct fs_context * fc)1296 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1297 {
1298 return set_anon_super(sb, NULL);
1299 }
1300 EXPORT_SYMBOL(set_anon_super_fc);
1301
test_keyed_super(struct super_block * sb,struct fs_context * fc)1302 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1303 {
1304 return sb->s_fs_info == fc->s_fs_info;
1305 }
1306
test_single_super(struct super_block * s,struct fs_context * fc)1307 static int test_single_super(struct super_block *s, struct fs_context *fc)
1308 {
1309 return 1;
1310 }
1311
vfs_get_super(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* fill_super)(struct super_block * sb,struct fs_context * fc))1312 static int vfs_get_super(struct fs_context *fc,
1313 int (*test)(struct super_block *, struct fs_context *),
1314 int (*fill_super)(struct super_block *sb,
1315 struct fs_context *fc))
1316 {
1317 struct super_block *sb;
1318 int err;
1319
1320 sb = sget_fc(fc, test, set_anon_super_fc);
1321 if (IS_ERR(sb))
1322 return PTR_ERR(sb);
1323
1324 if (!sb->s_root) {
1325 err = fill_super(sb, fc);
1326 if (err)
1327 goto error;
1328
1329 sb->s_flags |= SB_ACTIVE;
1330 }
1331
1332 fc->root = dget(sb->s_root);
1333 return 0;
1334
1335 error:
1336 deactivate_locked_super(sb);
1337 return err;
1338 }
1339
get_tree_nodev(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1340 int get_tree_nodev(struct fs_context *fc,
1341 int (*fill_super)(struct super_block *sb,
1342 struct fs_context *fc))
1343 {
1344 return vfs_get_super(fc, NULL, fill_super);
1345 }
1346 EXPORT_SYMBOL(get_tree_nodev);
1347
get_tree_single(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1348 int get_tree_single(struct fs_context *fc,
1349 int (*fill_super)(struct super_block *sb,
1350 struct fs_context *fc))
1351 {
1352 return vfs_get_super(fc, test_single_super, fill_super);
1353 }
1354 EXPORT_SYMBOL(get_tree_single);
1355
get_tree_keyed(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),void * key)1356 int get_tree_keyed(struct fs_context *fc,
1357 int (*fill_super)(struct super_block *sb,
1358 struct fs_context *fc),
1359 void *key)
1360 {
1361 fc->s_fs_info = key;
1362 return vfs_get_super(fc, test_keyed_super, fill_super);
1363 }
1364 EXPORT_SYMBOL(get_tree_keyed);
1365
set_bdev_super(struct super_block * s,void * data)1366 static int set_bdev_super(struct super_block *s, void *data)
1367 {
1368 s->s_dev = *(dev_t *)data;
1369 return 0;
1370 }
1371
super_s_dev_set(struct super_block * s,struct fs_context * fc)1372 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1373 {
1374 return set_bdev_super(s, fc->sget_key);
1375 }
1376
super_s_dev_test(struct super_block * s,struct fs_context * fc)1377 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1378 {
1379 return !(s->s_iflags & SB_I_RETIRED) &&
1380 s->s_dev == *(dev_t *)fc->sget_key;
1381 }
1382
1383 /**
1384 * sget_dev - Find or create a superblock by device number
1385 * @fc: Filesystem context.
1386 * @dev: device number
1387 *
1388 * Find or create a superblock using the provided device number that
1389 * will be stored in fc->sget_key.
1390 *
1391 * If an extant superblock is matched, then that will be returned with
1392 * an elevated reference count that the caller must transfer or discard.
1393 *
1394 * If no match is made, a new superblock will be allocated and basic
1395 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1396 * be set). The superblock will be published and it will be returned in
1397 * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1398 * unset.
1399 *
1400 * Return: an existing or newly created superblock on success, an error
1401 * pointer on failure.
1402 */
sget_dev(struct fs_context * fc,dev_t dev)1403 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1404 {
1405 fc->sget_key = &dev;
1406 return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1407 }
1408 EXPORT_SYMBOL(sget_dev);
1409
1410 #ifdef CONFIG_BLOCK
1411 /*
1412 * Lock the superblock that is holder of the bdev. Returns the superblock
1413 * pointer if we successfully locked the superblock and it is alive. Otherwise
1414 * we return NULL and just unlock bdev->bd_holder_lock.
1415 *
1416 * The function must be called with bdev->bd_holder_lock and releases it.
1417 */
bdev_super_lock(struct block_device * bdev,bool excl)1418 static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
1419 __releases(&bdev->bd_holder_lock)
1420 {
1421 struct super_block *sb = bdev->bd_holder;
1422 bool locked;
1423
1424 lockdep_assert_held(&bdev->bd_holder_lock);
1425 lockdep_assert_not_held(&sb->s_umount);
1426 lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
1427
1428 /* Make sure sb doesn't go away from under us */
1429 spin_lock(&sb_lock);
1430 sb->s_count++;
1431 spin_unlock(&sb_lock);
1432
1433 mutex_unlock(&bdev->bd_holder_lock);
1434
1435 locked = super_lock(sb, excl);
1436
1437 /*
1438 * If the superblock wasn't already SB_DYING then we hold
1439 * s_umount and can safely drop our temporary reference.
1440 */
1441 put_super(sb);
1442
1443 if (!locked)
1444 return NULL;
1445
1446 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1447 super_unlock(sb, excl);
1448 return NULL;
1449 }
1450
1451 return sb;
1452 }
1453
fs_bdev_mark_dead(struct block_device * bdev,bool surprise)1454 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1455 {
1456 struct super_block *sb;
1457
1458 sb = bdev_super_lock(bdev, false);
1459 if (!sb)
1460 return;
1461
1462 if (sb->s_op->remove_bdev) {
1463 int ret;
1464
1465 ret = sb->s_op->remove_bdev(sb, bdev);
1466 if (!ret) {
1467 super_unlock_shared(sb);
1468 return;
1469 }
1470 /* Fallback to shutdown. */
1471 }
1472
1473 if (!surprise)
1474 sync_filesystem(sb);
1475 shrink_dcache_sb(sb);
1476 evict_inodes(sb);
1477 if (sb->s_op->shutdown)
1478 sb->s_op->shutdown(sb);
1479
1480 super_unlock_shared(sb);
1481 }
1482
fs_bdev_sync(struct block_device * bdev)1483 static void fs_bdev_sync(struct block_device *bdev)
1484 {
1485 struct super_block *sb;
1486
1487 sb = bdev_super_lock(bdev, false);
1488 if (!sb)
1489 return;
1490
1491 sync_filesystem(sb);
1492 super_unlock_shared(sb);
1493 }
1494
get_bdev_super(struct block_device * bdev)1495 static struct super_block *get_bdev_super(struct block_device *bdev)
1496 {
1497 bool active = false;
1498 struct super_block *sb;
1499
1500 sb = bdev_super_lock(bdev, true);
1501 if (sb) {
1502 active = atomic_inc_not_zero(&sb->s_active);
1503 super_unlock_excl(sb);
1504 }
1505 if (!active)
1506 return NULL;
1507 return sb;
1508 }
1509
1510 /**
1511 * fs_bdev_freeze - freeze owning filesystem of block device
1512 * @bdev: block device
1513 *
1514 * Freeze the filesystem that owns this block device if it is still
1515 * active.
1516 *
1517 * A filesystem that owns multiple block devices may be frozen from each
1518 * block device and won't be unfrozen until all block devices are
1519 * unfrozen. Each block device can only freeze the filesystem once as we
1520 * nest freezes for block devices in the block layer.
1521 *
1522 * Return: If the freeze was successful zero is returned. If the freeze
1523 * failed a negative error code is returned.
1524 */
fs_bdev_freeze(struct block_device * bdev)1525 static int fs_bdev_freeze(struct block_device *bdev)
1526 {
1527 struct super_block *sb;
1528 int error = 0;
1529
1530 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1531
1532 sb = get_bdev_super(bdev);
1533 if (!sb)
1534 return -EINVAL;
1535
1536 if (sb->s_op->freeze_super)
1537 error = sb->s_op->freeze_super(sb,
1538 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1539 else
1540 error = freeze_super(sb,
1541 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1542 if (!error)
1543 error = sync_blockdev(bdev);
1544 deactivate_super(sb);
1545 return error;
1546 }
1547
1548 /**
1549 * fs_bdev_thaw - thaw owning filesystem of block device
1550 * @bdev: block device
1551 *
1552 * Thaw the filesystem that owns this block device.
1553 *
1554 * A filesystem that owns multiple block devices may be frozen from each
1555 * block device and won't be unfrozen until all block devices are
1556 * unfrozen. Each block device can only freeze the filesystem once as we
1557 * nest freezes for block devices in the block layer.
1558 *
1559 * Return: If the thaw was successful zero is returned. If the thaw
1560 * failed a negative error code is returned. If this function
1561 * returns zero it doesn't mean that the filesystem is unfrozen
1562 * as it may have been frozen multiple times (kernel may hold a
1563 * freeze or might be frozen from other block devices).
1564 */
fs_bdev_thaw(struct block_device * bdev)1565 static int fs_bdev_thaw(struct block_device *bdev)
1566 {
1567 struct super_block *sb;
1568 int error;
1569
1570 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1571
1572 /*
1573 * The block device may have been frozen before it was claimed by a
1574 * filesystem. Concurrently another process might try to mount that
1575 * frozen block device and has temporarily claimed the block device for
1576 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
1577 * mounter is already about to abort mounting because they still saw an
1578 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
1579 * NULL in that case.
1580 */
1581 sb = get_bdev_super(bdev);
1582 if (!sb)
1583 return -EINVAL;
1584
1585 if (sb->s_op->thaw_super)
1586 error = sb->s_op->thaw_super(sb,
1587 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1588 else
1589 error = thaw_super(sb,
1590 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1591 deactivate_super(sb);
1592 return error;
1593 }
1594
1595 const struct blk_holder_ops fs_holder_ops = {
1596 .mark_dead = fs_bdev_mark_dead,
1597 .sync = fs_bdev_sync,
1598 .freeze = fs_bdev_freeze,
1599 .thaw = fs_bdev_thaw,
1600 };
1601 EXPORT_SYMBOL_GPL(fs_holder_ops);
1602
setup_bdev_super(struct super_block * sb,int sb_flags,struct fs_context * fc)1603 int setup_bdev_super(struct super_block *sb, int sb_flags,
1604 struct fs_context *fc)
1605 {
1606 blk_mode_t mode = sb_open_mode(sb_flags);
1607 struct file *bdev_file;
1608 struct block_device *bdev;
1609
1610 bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1611 if (IS_ERR(bdev_file)) {
1612 if (fc)
1613 errorf(fc, "%s: Can't open blockdev", fc->source);
1614 return PTR_ERR(bdev_file);
1615 }
1616 bdev = file_bdev(bdev_file);
1617
1618 /*
1619 * This really should be in blkdev_get_by_dev, but right now can't due
1620 * to legacy issues that require us to allow opening a block device node
1621 * writable from userspace even for a read-only block device.
1622 */
1623 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1624 bdev_fput(bdev_file);
1625 return -EACCES;
1626 }
1627
1628 /*
1629 * It is enough to check bdev was not frozen before we set
1630 * s_bdev as freezing will wait until SB_BORN is set.
1631 */
1632 if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
1633 if (fc)
1634 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1635 bdev_fput(bdev_file);
1636 return -EBUSY;
1637 }
1638 spin_lock(&sb_lock);
1639 sb->s_bdev_file = bdev_file;
1640 sb->s_bdev = bdev;
1641 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1642 if (bdev_stable_writes(bdev))
1643 sb->s_iflags |= SB_I_STABLE_WRITES;
1644 spin_unlock(&sb_lock);
1645
1646 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1647 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1648 sb->s_id);
1649 sb_set_blocksize(sb, block_size(bdev));
1650 return 0;
1651 }
1652 EXPORT_SYMBOL_GPL(setup_bdev_super);
1653
1654 /**
1655 * get_tree_bdev_flags - Get a superblock based on a single block device
1656 * @fc: The filesystem context holding the parameters
1657 * @fill_super: Helper to initialise a new superblock
1658 * @flags: GET_TREE_BDEV_* flags
1659 */
get_tree_bdev_flags(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),unsigned int flags)1660 int get_tree_bdev_flags(struct fs_context *fc,
1661 int (*fill_super)(struct super_block *sb,
1662 struct fs_context *fc), unsigned int flags)
1663 {
1664 struct super_block *s;
1665 int error = 0;
1666 dev_t dev;
1667
1668 if (!fc->source)
1669 return invalf(fc, "No source specified");
1670
1671 error = lookup_bdev(fc->source, &dev);
1672 if (error) {
1673 if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP))
1674 errorf(fc, "%s: Can't lookup blockdev", fc->source);
1675 return error;
1676 }
1677 fc->sb_flags |= SB_NOSEC;
1678 s = sget_dev(fc, dev);
1679 if (IS_ERR(s))
1680 return PTR_ERR(s);
1681
1682 if (s->s_root) {
1683 /* Don't summarily change the RO/RW state. */
1684 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1685 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1686 deactivate_locked_super(s);
1687 return -EBUSY;
1688 }
1689 } else {
1690 error = setup_bdev_super(s, fc->sb_flags, fc);
1691 if (!error)
1692 error = fill_super(s, fc);
1693 if (error) {
1694 deactivate_locked_super(s);
1695 return error;
1696 }
1697 s->s_flags |= SB_ACTIVE;
1698 }
1699
1700 BUG_ON(fc->root);
1701 fc->root = dget(s->s_root);
1702 return 0;
1703 }
1704 EXPORT_SYMBOL_GPL(get_tree_bdev_flags);
1705
1706 /**
1707 * get_tree_bdev - Get a superblock based on a single block device
1708 * @fc: The filesystem context holding the parameters
1709 * @fill_super: Helper to initialise a new superblock
1710 */
get_tree_bdev(struct fs_context * fc,int (* fill_super)(struct super_block *,struct fs_context *))1711 int get_tree_bdev(struct fs_context *fc,
1712 int (*fill_super)(struct super_block *,
1713 struct fs_context *))
1714 {
1715 return get_tree_bdev_flags(fc, fill_super, 0);
1716 }
1717 EXPORT_SYMBOL(get_tree_bdev);
1718
test_bdev_super(struct super_block * s,void * data)1719 static int test_bdev_super(struct super_block *s, void *data)
1720 {
1721 return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
1722 }
1723
mount_bdev(struct file_system_type * fs_type,int flags,const char * dev_name,void * data,int (* fill_super)(struct super_block *,void *,int))1724 struct dentry *mount_bdev(struct file_system_type *fs_type,
1725 int flags, const char *dev_name, void *data,
1726 int (*fill_super)(struct super_block *, void *, int))
1727 {
1728 struct super_block *s;
1729 int error;
1730 dev_t dev;
1731
1732 error = lookup_bdev(dev_name, &dev);
1733 if (error)
1734 return ERR_PTR(error);
1735
1736 flags |= SB_NOSEC;
1737 s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
1738 if (IS_ERR(s))
1739 return ERR_CAST(s);
1740
1741 if (s->s_root) {
1742 if ((flags ^ s->s_flags) & SB_RDONLY) {
1743 deactivate_locked_super(s);
1744 return ERR_PTR(-EBUSY);
1745 }
1746 } else {
1747 error = setup_bdev_super(s, flags, NULL);
1748 if (!error)
1749 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1750 if (error) {
1751 deactivate_locked_super(s);
1752 return ERR_PTR(error);
1753 }
1754
1755 s->s_flags |= SB_ACTIVE;
1756 }
1757
1758 return dget(s->s_root);
1759 }
1760 EXPORT_SYMBOL(mount_bdev);
1761
kill_block_super(struct super_block * sb)1762 void kill_block_super(struct super_block *sb)
1763 {
1764 struct block_device *bdev = sb->s_bdev;
1765
1766 generic_shutdown_super(sb);
1767 if (bdev) {
1768 sync_blockdev(bdev);
1769 bdev_fput(sb->s_bdev_file);
1770 }
1771 }
1772
1773 EXPORT_SYMBOL(kill_block_super);
1774 #endif
1775
mount_nodev(struct file_system_type * fs_type,int flags,void * data,int (* fill_super)(struct super_block *,void *,int))1776 struct dentry *mount_nodev(struct file_system_type *fs_type,
1777 int flags, void *data,
1778 int (*fill_super)(struct super_block *, void *, int))
1779 {
1780 int error;
1781 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1782
1783 if (IS_ERR(s))
1784 return ERR_CAST(s);
1785
1786 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1787 if (error) {
1788 deactivate_locked_super(s);
1789 return ERR_PTR(error);
1790 }
1791 s->s_flags |= SB_ACTIVE;
1792 return dget(s->s_root);
1793 }
1794 EXPORT_SYMBOL(mount_nodev);
1795
1796 /**
1797 * vfs_get_tree - Get the mountable root
1798 * @fc: The superblock configuration context.
1799 *
1800 * The filesystem is invoked to get or create a superblock which can then later
1801 * be used for mounting. The filesystem places a pointer to the root to be
1802 * used for mounting in @fc->root.
1803 */
vfs_get_tree(struct fs_context * fc)1804 int vfs_get_tree(struct fs_context *fc)
1805 {
1806 struct super_block *sb;
1807 int error;
1808
1809 if (fc->root)
1810 return -EBUSY;
1811
1812 /* Get the mountable root in fc->root, with a ref on the root and a ref
1813 * on the superblock.
1814 */
1815 error = fc->ops->get_tree(fc);
1816 if (error < 0)
1817 return error;
1818
1819 if (!fc->root) {
1820 pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n",
1821 fc->fs_type->name, error);
1822 /* We don't know what the locking state of the superblock is -
1823 * if there is a superblock.
1824 */
1825 BUG();
1826 }
1827
1828 sb = fc->root->d_sb;
1829 WARN_ON(!sb->s_bdi);
1830
1831 /*
1832 * super_wake() contains a memory barrier which also care of
1833 * ordering for super_cache_count(). We place it before setting
1834 * SB_BORN as the data dependency between the two functions is
1835 * the superblock structure contents that we just set up, not
1836 * the SB_BORN flag.
1837 */
1838 super_wake(sb, SB_BORN);
1839
1840 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1841 if (unlikely(error)) {
1842 fc_drop_locked(fc);
1843 return error;
1844 }
1845
1846 /*
1847 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1848 * but s_maxbytes was an unsigned long long for many releases. Throw
1849 * this warning for a little while to try and catch filesystems that
1850 * violate this rule.
1851 */
1852 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1853 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1854
1855 return 0;
1856 }
1857 EXPORT_SYMBOL(vfs_get_tree);
1858
1859 /*
1860 * Setup private BDI for given superblock. It gets automatically cleaned up
1861 * in generic_shutdown_super().
1862 */
super_setup_bdi_name(struct super_block * sb,char * fmt,...)1863 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1864 {
1865 struct backing_dev_info *bdi;
1866 int err;
1867 va_list args;
1868
1869 bdi = bdi_alloc(NUMA_NO_NODE);
1870 if (!bdi)
1871 return -ENOMEM;
1872
1873 va_start(args, fmt);
1874 err = bdi_register_va(bdi, fmt, args);
1875 va_end(args);
1876 if (err) {
1877 bdi_put(bdi);
1878 return err;
1879 }
1880 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1881 sb->s_bdi = bdi;
1882 sb->s_iflags |= SB_I_PERSB_BDI;
1883
1884 return 0;
1885 }
1886 EXPORT_SYMBOL(super_setup_bdi_name);
1887
1888 /*
1889 * Setup private BDI for given superblock. I gets automatically cleaned up
1890 * in generic_shutdown_super().
1891 */
super_setup_bdi(struct super_block * sb)1892 int super_setup_bdi(struct super_block *sb)
1893 {
1894 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1895
1896 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1897 atomic_long_inc_return(&bdi_seq));
1898 }
1899 EXPORT_SYMBOL(super_setup_bdi);
1900
1901 /**
1902 * sb_wait_write - wait until all writers to given file system finish
1903 * @sb: the super for which we wait
1904 * @level: type of writers we wait for (normal vs page fault)
1905 *
1906 * This function waits until there are no writers of given type to given file
1907 * system.
1908 */
sb_wait_write(struct super_block * sb,int level)1909 static void sb_wait_write(struct super_block *sb, int level)
1910 {
1911 percpu_down_write(sb->s_writers.rw_sem + level-1);
1912 }
1913
1914 /*
1915 * We are going to return to userspace and forget about these locks, the
1916 * ownership goes to the caller of thaw_super() which does unlock().
1917 */
lockdep_sb_freeze_release(struct super_block * sb)1918 static void lockdep_sb_freeze_release(struct super_block *sb)
1919 {
1920 int level;
1921
1922 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1923 percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_);
1924 }
1925
1926 /*
1927 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1928 */
lockdep_sb_freeze_acquire(struct super_block * sb)1929 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1930 {
1931 int level;
1932
1933 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1934 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1935 }
1936
sb_freeze_unlock(struct super_block * sb,int level)1937 static void sb_freeze_unlock(struct super_block *sb, int level)
1938 {
1939 for (level--; level >= 0; level--)
1940 percpu_up_write(sb->s_writers.rw_sem + level);
1941 }
1942
wait_for_partially_frozen(struct super_block * sb)1943 static int wait_for_partially_frozen(struct super_block *sb)
1944 {
1945 int ret = 0;
1946
1947 do {
1948 unsigned short old = sb->s_writers.frozen;
1949
1950 up_write(&sb->s_umount);
1951 ret = wait_var_event_killable(&sb->s_writers.frozen,
1952 sb->s_writers.frozen != old);
1953 down_write(&sb->s_umount);
1954 } while (ret == 0 &&
1955 sb->s_writers.frozen != SB_UNFROZEN &&
1956 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1957
1958 return ret;
1959 }
1960
1961 #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1962 #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL)
1963
freeze_inc(struct super_block * sb,enum freeze_holder who)1964 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1965 {
1966 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1967 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1968
1969 if (who & FREEZE_HOLDER_KERNEL)
1970 ++sb->s_writers.freeze_kcount;
1971 if (who & FREEZE_HOLDER_USERSPACE)
1972 ++sb->s_writers.freeze_ucount;
1973 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1974 }
1975
freeze_dec(struct super_block * sb,enum freeze_holder who)1976 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1977 {
1978 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1979 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1980
1981 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1982 --sb->s_writers.freeze_kcount;
1983 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1984 --sb->s_writers.freeze_ucount;
1985 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1986 }
1987
may_freeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)1988 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who,
1989 const void *freeze_owner)
1990 {
1991 lockdep_assert_held(&sb->s_umount);
1992
1993 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1994 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1995
1996 if (who & FREEZE_EXCL) {
1997 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
1998 return false;
1999 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
2000 return false;
2001 if (WARN_ON_ONCE(!freeze_owner))
2002 return false;
2003 /* This freeze already has a specific owner. */
2004 if (sb->s_writers.freeze_owner)
2005 return false;
2006 /*
2007 * This is already frozen multiple times so we're just
2008 * going to take a reference count and mark the freeze as
2009 * being owned by the caller.
2010 */
2011 if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount)
2012 sb->s_writers.freeze_owner = freeze_owner;
2013 return true;
2014 }
2015
2016 if (who & FREEZE_HOLDER_KERNEL)
2017 return (who & FREEZE_MAY_NEST) ||
2018 sb->s_writers.freeze_kcount == 0;
2019 if (who & FREEZE_HOLDER_USERSPACE)
2020 return (who & FREEZE_MAY_NEST) ||
2021 sb->s_writers.freeze_ucount == 0;
2022 return false;
2023 }
2024
may_unfreeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2025 static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who,
2026 const void *freeze_owner)
2027 {
2028 lockdep_assert_held(&sb->s_umount);
2029
2030 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
2031 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
2032
2033 if (who & FREEZE_EXCL) {
2034 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
2035 return false;
2036 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
2037 return false;
2038 if (WARN_ON_ONCE(!freeze_owner))
2039 return false;
2040 if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0))
2041 return false;
2042 /* This isn't exclusively frozen. */
2043 if (!sb->s_writers.freeze_owner)
2044 return false;
2045 /* This isn't exclusively frozen by us. */
2046 if (sb->s_writers.freeze_owner != freeze_owner)
2047 return false;
2048 /*
2049 * This is still frozen multiple times so we're just
2050 * going to drop our reference count and undo our
2051 * exclusive freeze.
2052 */
2053 if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1)
2054 sb->s_writers.freeze_owner = NULL;
2055 return true;
2056 }
2057
2058 if (who & FREEZE_HOLDER_KERNEL) {
2059 /*
2060 * Someone's trying to steal the reference belonging to
2061 * @sb->s_writers.freeze_owner.
2062 */
2063 if (sb->s_writers.freeze_kcount == 1 &&
2064 sb->s_writers.freeze_owner)
2065 return false;
2066 return sb->s_writers.freeze_kcount > 0;
2067 }
2068
2069 if (who & FREEZE_HOLDER_USERSPACE)
2070 return sb->s_writers.freeze_ucount > 0;
2071
2072 return false;
2073 }
2074
2075 /**
2076 * freeze_super - lock the filesystem and force it into a consistent state
2077 * @sb: the super to lock
2078 * @who: context that wants to freeze
2079 * @freeze_owner: owner of the freeze
2080 *
2081 * Syncs the super to make sure the filesystem is consistent and calls the fs's
2082 * freeze_fs. Subsequent calls to this without first thawing the fs may return
2083 * -EBUSY.
2084 *
2085 * @who should be:
2086 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
2087 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
2088 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
2089 *
2090 * The @who argument distinguishes between the kernel and userspace trying to
2091 * freeze the filesystem. Although there cannot be multiple kernel freezes or
2092 * multiple userspace freezes in effect at any given time, the kernel and
2093 * userspace can both hold a filesystem frozen. The filesystem remains frozen
2094 * until there are no kernel or userspace freezes in effect.
2095 *
2096 * A filesystem may hold multiple devices and thus a filesystems may be
2097 * frozen through the block layer via multiple block devices. In this
2098 * case the request is marked as being allowed to nest by passing
2099 * FREEZE_MAY_NEST. The filesystem remains frozen until all block
2100 * devices are unfrozen. If multiple freezes are attempted without
2101 * FREEZE_MAY_NEST -EBUSY will be returned.
2102 *
2103 * During this function, sb->s_writers.frozen goes through these values:
2104 *
2105 * SB_UNFROZEN: File system is normal, all writes progress as usual.
2106 *
2107 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
2108 * writes should be blocked, though page faults are still allowed. We wait for
2109 * all writes to complete and then proceed to the next stage.
2110 *
2111 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2112 * but internal fs threads can still modify the filesystem (although they
2113 * should not dirty new pages or inodes), writeback can run etc. After waiting
2114 * for all running page faults we sync the filesystem which will clean all
2115 * dirty pages and inodes (no new dirty pages or inodes can be created when
2116 * sync is running).
2117 *
2118 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2119 * modification are blocked (e.g. XFS preallocation truncation on inode
2120 * reclaim). This is usually implemented by blocking new transactions for
2121 * filesystems that have them and need this additional guard. After all
2122 * internal writers are finished we call ->freeze_fs() to finish filesystem
2123 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2124 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2125 *
2126 * sb->s_writers.frozen is protected by sb->s_umount.
2127 *
2128 * Return: If the freeze was successful zero is returned. If the freeze
2129 * failed a negative error code is returned.
2130 */
freeze_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2131 int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner)
2132 {
2133 int ret;
2134
2135 if (!super_lock_excl(sb)) {
2136 WARN_ON_ONCE("Dying superblock while freezing!");
2137 return -EINVAL;
2138 }
2139 atomic_inc(&sb->s_active);
2140
2141 retry:
2142 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2143 if (may_freeze(sb, who, freeze_owner))
2144 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2145 else
2146 ret = -EBUSY;
2147 /* All freezers share a single active reference. */
2148 deactivate_locked_super(sb);
2149 return ret;
2150 }
2151
2152 if (sb->s_writers.frozen != SB_UNFROZEN) {
2153 ret = wait_for_partially_frozen(sb);
2154 if (ret) {
2155 deactivate_locked_super(sb);
2156 return ret;
2157 }
2158
2159 goto retry;
2160 }
2161
2162 if (sb_rdonly(sb)) {
2163 /* Nothing to do really... */
2164 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2165 sb->s_writers.freeze_owner = freeze_owner;
2166 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2167 wake_up_var(&sb->s_writers.frozen);
2168 super_unlock_excl(sb);
2169 return 0;
2170 }
2171
2172 sb->s_writers.frozen = SB_FREEZE_WRITE;
2173 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
2174 super_unlock_excl(sb);
2175 sb_wait_write(sb, SB_FREEZE_WRITE);
2176 __super_lock_excl(sb);
2177
2178 /* Now we go and block page faults... */
2179 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2180 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2181
2182 /* All writers are done so after syncing there won't be dirty data */
2183 ret = sync_filesystem(sb);
2184 if (ret) {
2185 sb->s_writers.frozen = SB_UNFROZEN;
2186 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2187 wake_up_var(&sb->s_writers.frozen);
2188 deactivate_locked_super(sb);
2189 return ret;
2190 }
2191
2192 /* Now wait for internal filesystem counter */
2193 sb->s_writers.frozen = SB_FREEZE_FS;
2194 sb_wait_write(sb, SB_FREEZE_FS);
2195
2196 if (sb->s_op->freeze_fs) {
2197 ret = sb->s_op->freeze_fs(sb);
2198 if (ret) {
2199 printk(KERN_ERR
2200 "VFS:Filesystem freeze failed\n");
2201 sb->s_writers.frozen = SB_UNFROZEN;
2202 sb_freeze_unlock(sb, SB_FREEZE_FS);
2203 wake_up_var(&sb->s_writers.frozen);
2204 deactivate_locked_super(sb);
2205 return ret;
2206 }
2207 }
2208 /*
2209 * For debugging purposes so that fs can warn if it sees write activity
2210 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2211 */
2212 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2213 sb->s_writers.freeze_owner = freeze_owner;
2214 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2215 wake_up_var(&sb->s_writers.frozen);
2216 lockdep_sb_freeze_release(sb);
2217 super_unlock_excl(sb);
2218 return 0;
2219 }
2220 EXPORT_SYMBOL(freeze_super);
2221
2222 /*
2223 * Undoes the effect of a freeze_super_locked call. If the filesystem is
2224 * frozen both by userspace and the kernel, a thaw call from either source
2225 * removes that state without releasing the other state or unlocking the
2226 * filesystem.
2227 */
thaw_super_locked(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2228 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
2229 const void *freeze_owner)
2230 {
2231 int error = -EINVAL;
2232
2233 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2234 goto out_unlock;
2235
2236 if (!may_unfreeze(sb, who, freeze_owner))
2237 goto out_unlock;
2238
2239 /*
2240 * All freezers share a single active reference.
2241 * So just unlock in case there are any left.
2242 */
2243 if (freeze_dec(sb, who))
2244 goto out_unlock;
2245
2246 if (sb_rdonly(sb)) {
2247 sb->s_writers.frozen = SB_UNFROZEN;
2248 sb->s_writers.freeze_owner = NULL;
2249 wake_up_var(&sb->s_writers.frozen);
2250 goto out_deactivate;
2251 }
2252
2253 lockdep_sb_freeze_acquire(sb);
2254
2255 if (sb->s_op->unfreeze_fs) {
2256 error = sb->s_op->unfreeze_fs(sb);
2257 if (error) {
2258 pr_err("VFS: Filesystem thaw failed\n");
2259 freeze_inc(sb, who);
2260 lockdep_sb_freeze_release(sb);
2261 goto out_unlock;
2262 }
2263 }
2264
2265 sb->s_writers.frozen = SB_UNFROZEN;
2266 sb->s_writers.freeze_owner = NULL;
2267 wake_up_var(&sb->s_writers.frozen);
2268 sb_freeze_unlock(sb, SB_FREEZE_FS);
2269 out_deactivate:
2270 deactivate_locked_super(sb);
2271 return 0;
2272
2273 out_unlock:
2274 super_unlock_excl(sb);
2275 return error;
2276 }
2277
2278 /**
2279 * thaw_super -- unlock filesystem
2280 * @sb: the super to thaw
2281 * @who: context that wants to freeze
2282 * @freeze_owner: owner of the freeze
2283 *
2284 * Unlocks the filesystem and marks it writeable again after freeze_super()
2285 * if there are no remaining freezes on the filesystem.
2286 *
2287 * @who should be:
2288 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2289 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2290 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2291 *
2292 * A filesystem may hold multiple devices and thus a filesystems may
2293 * have been frozen through the block layer via multiple block devices.
2294 * The filesystem remains frozen until all block devices are unfrozen.
2295 */
thaw_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2296 int thaw_super(struct super_block *sb, enum freeze_holder who,
2297 const void *freeze_owner)
2298 {
2299 if (!super_lock_excl(sb)) {
2300 WARN_ON_ONCE("Dying superblock while thawing!");
2301 return -EINVAL;
2302 }
2303 return thaw_super_locked(sb, who, freeze_owner);
2304 }
2305 EXPORT_SYMBOL(thaw_super);
2306
2307 /*
2308 * Create workqueue for deferred direct IO completions. We allocate the
2309 * workqueue when it's first needed. This avoids creating workqueue for
2310 * filesystems that don't need it and also allows us to create the workqueue
2311 * late enough so the we can include s_id in the name of the workqueue.
2312 */
sb_init_dio_done_wq(struct super_block * sb)2313 int sb_init_dio_done_wq(struct super_block *sb)
2314 {
2315 struct workqueue_struct *old;
2316 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2317 WQ_MEM_RECLAIM, 0,
2318 sb->s_id);
2319 if (!wq)
2320 return -ENOMEM;
2321 /*
2322 * This has to be atomic as more DIOs can race to create the workqueue
2323 */
2324 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
2325 /* Someone created workqueue before us? Free ours... */
2326 if (old)
2327 destroy_workqueue(wq);
2328 return 0;
2329 }
2330 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
2331