1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/super.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * super.c contains code to handle: - mount structures
8 * - super-block tables
9 * - filesystem drivers list
10 * - mount system call
11 * - umount system call
12 * - ustat system call
13 *
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 *
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22 */
23
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <linux/fserror.h>
40 #include <uapi/linux/mount.h>
41 #include "internal.h"
42
43 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
44 const void *freeze_owner);
45
46 static LIST_HEAD(super_blocks);
47 static DEFINE_SPINLOCK(sb_lock);
48
49 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
50 "sb_writers",
51 "sb_pagefaults",
52 "sb_internal",
53 };
54
__super_lock(struct super_block * sb,bool excl)55 static inline void __super_lock(struct super_block *sb, bool excl)
56 {
57 if (excl)
58 down_write(&sb->s_umount);
59 else
60 down_read(&sb->s_umount);
61 }
62
super_unlock(struct super_block * sb,bool excl)63 static inline void super_unlock(struct super_block *sb, bool excl)
64 {
65 if (excl)
66 up_write(&sb->s_umount);
67 else
68 up_read(&sb->s_umount);
69 }
70
__super_lock_excl(struct super_block * sb)71 static inline void __super_lock_excl(struct super_block *sb)
72 {
73 __super_lock(sb, true);
74 }
75
super_unlock_excl(struct super_block * sb)76 static inline void super_unlock_excl(struct super_block *sb)
77 {
78 super_unlock(sb, true);
79 }
80
super_unlock_shared(struct super_block * sb)81 static inline void super_unlock_shared(struct super_block *sb)
82 {
83 super_unlock(sb, false);
84 }
85
super_flags(const struct super_block * sb,unsigned int flags)86 static bool super_flags(const struct super_block *sb, unsigned int flags)
87 {
88 /*
89 * Pairs with smp_store_release() in super_wake() and ensures
90 * that we see @flags after we're woken.
91 */
92 return smp_load_acquire(&sb->s_flags) & flags;
93 }
94
95 /**
96 * super_lock - wait for superblock to become ready and lock it
97 * @sb: superblock to wait for
98 * @excl: whether exclusive access is required
99 *
100 * If the superblock has neither passed through vfs_get_tree() or
101 * generic_shutdown_super() yet wait for it to happen. Either superblock
102 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
103 * woken and we'll see SB_DYING.
104 *
105 * The caller must have acquired a temporary reference on @sb->s_count.
106 *
107 * Return: The function returns true if SB_BORN was set and with
108 * s_umount held. The function returns false if SB_DYING was
109 * set and without s_umount held.
110 */
super_lock(struct super_block * sb,bool excl)111 static __must_check bool super_lock(struct super_block *sb, bool excl)
112 {
113 lockdep_assert_not_held(&sb->s_umount);
114
115 /* wait until the superblock is ready or dying */
116 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
117
118 /* Don't pointlessly acquire s_umount. */
119 if (super_flags(sb, SB_DYING))
120 return false;
121
122 __super_lock(sb, excl);
123
124 /*
125 * Has gone through generic_shutdown_super() in the meantime.
126 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
127 * grab a reference to this. Tell them so.
128 */
129 if (sb->s_flags & SB_DYING) {
130 super_unlock(sb, excl);
131 return false;
132 }
133
134 WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
135 return true;
136 }
137
138 /* wait and try to acquire read-side of @sb->s_umount */
super_lock_shared(struct super_block * sb)139 static inline bool super_lock_shared(struct super_block *sb)
140 {
141 return super_lock(sb, false);
142 }
143
144 /* wait and try to acquire write-side of @sb->s_umount */
super_lock_excl(struct super_block * sb)145 static inline bool super_lock_excl(struct super_block *sb)
146 {
147 return super_lock(sb, true);
148 }
149
150 /* wake waiters */
151 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
super_wake(struct super_block * sb,unsigned int flag)152 static void super_wake(struct super_block *sb, unsigned int flag)
153 {
154 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
155 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
156
157 /*
158 * Pairs with smp_load_acquire() in super_lock() to make sure
159 * all initializations in the superblock are seen by the user
160 * seeing SB_BORN sent.
161 */
162 smp_store_release(&sb->s_flags, sb->s_flags | flag);
163 /*
164 * Pairs with the barrier in prepare_to_wait_event() to make sure
165 * ___wait_var_event() either sees SB_BORN set or
166 * waitqueue_active() check in wake_up_var() sees the waiter.
167 */
168 smp_mb();
169 wake_up_var(&sb->s_flags);
170 }
171
172 /*
173 * One thing we have to be careful of with a per-sb shrinker is that we don't
174 * drop the last active reference to the superblock from within the shrinker.
175 * If that happens we could trigger unregistering the shrinker from within the
176 * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
177 * take a passive reference to the superblock to avoid this from occurring.
178 */
super_cache_scan(struct shrinker * shrink,struct shrink_control * sc)179 static unsigned long super_cache_scan(struct shrinker *shrink,
180 struct shrink_control *sc)
181 {
182 struct super_block *sb;
183 long fs_objects = 0;
184 long total_objects;
185 long freed = 0;
186 long dentries;
187 long inodes;
188
189 sb = shrink->private_data;
190
191 /*
192 * Deadlock avoidance. We may hold various FS locks, and we don't want
193 * to recurse into the FS that called us in clear_inode() and friends..
194 */
195 if (!(sc->gfp_mask & __GFP_FS))
196 return SHRINK_STOP;
197
198 if (!super_trylock_shared(sb))
199 return SHRINK_STOP;
200
201 if (sb->s_op->nr_cached_objects)
202 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
203
204 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
205 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
206 total_objects = dentries + inodes + fs_objects;
207 if (!total_objects)
208 total_objects = 1;
209
210 /* proportion the scan between the caches */
211 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
212 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
213 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
214
215 /*
216 * prune the dcache first as the icache is pinned by it, then
217 * prune the icache, followed by the filesystem specific caches
218 *
219 * Ensure that we always scan at least one object - memcg kmem
220 * accounting uses this to fully empty the caches.
221 */
222 sc->nr_to_scan = dentries + 1;
223 freed = prune_dcache_sb(sb, sc);
224 sc->nr_to_scan = inodes + 1;
225 freed += prune_icache_sb(sb, sc);
226
227 if (fs_objects) {
228 sc->nr_to_scan = fs_objects + 1;
229 freed += sb->s_op->free_cached_objects(sb, sc);
230 }
231
232 super_unlock_shared(sb);
233 return freed;
234 }
235
super_cache_count(struct shrinker * shrink,struct shrink_control * sc)236 static unsigned long super_cache_count(struct shrinker *shrink,
237 struct shrink_control *sc)
238 {
239 struct super_block *sb;
240 long total_objects = 0;
241
242 sb = shrink->private_data;
243
244 /*
245 * We don't call super_trylock_shared() here as it is a scalability
246 * bottleneck, so we're exposed to partial setup state. The shrinker
247 * rwsem does not protect filesystem operations backing
248 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
249 * change between super_cache_count and super_cache_scan, so we really
250 * don't need locks here.
251 *
252 * However, if we are currently mounting the superblock, the underlying
253 * filesystem might be in a state of partial construction and hence it
254 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check
255 * to avoid this situation, so do the same here. The memory barrier is
256 * matched with the one in mount_fs() as we don't hold locks here.
257 */
258 if (!(sb->s_flags & SB_BORN))
259 return 0;
260 smp_rmb();
261
262 if (sb->s_op && sb->s_op->nr_cached_objects)
263 total_objects = sb->s_op->nr_cached_objects(sb, sc);
264
265 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
266 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
267
268 if (!total_objects)
269 return SHRINK_EMPTY;
270
271 total_objects = vfs_pressure_ratio(total_objects);
272 return total_objects;
273 }
274
destroy_super_work(struct work_struct * work)275 static void destroy_super_work(struct work_struct *work)
276 {
277 struct super_block *s = container_of(work, struct super_block,
278 destroy_work);
279 fsnotify_sb_free(s);
280 security_sb_free(s);
281 put_user_ns(s->s_user_ns);
282 kfree(s->s_subtype);
283 for (int i = 0; i < SB_FREEZE_LEVELS; i++)
284 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
285 kfree(s);
286 }
287
destroy_super_rcu(struct rcu_head * head)288 static void destroy_super_rcu(struct rcu_head *head)
289 {
290 struct super_block *s = container_of(head, struct super_block, rcu);
291 INIT_WORK(&s->destroy_work, destroy_super_work);
292 schedule_work(&s->destroy_work);
293 }
294
295 /* Free a superblock that has never been seen by anyone */
destroy_unused_super(struct super_block * s)296 static void destroy_unused_super(struct super_block *s)
297 {
298 if (!s)
299 return;
300 super_unlock_excl(s);
301 list_lru_destroy(&s->s_dentry_lru);
302 list_lru_destroy(&s->s_inode_lru);
303 shrinker_free(s->s_shrink);
304 /* no delays needed */
305 destroy_super_work(&s->destroy_work);
306 }
307
308 /**
309 * alloc_super - create new superblock
310 * @type: filesystem type superblock should belong to
311 * @flags: the mount flags
312 * @user_ns: User namespace for the super_block
313 *
314 * Allocates and initializes a new &struct super_block. alloc_super()
315 * returns a pointer new superblock or %NULL if allocation had failed.
316 */
alloc_super(struct file_system_type * type,int flags,struct user_namespace * user_ns)317 static struct super_block *alloc_super(struct file_system_type *type, int flags,
318 struct user_namespace *user_ns)
319 {
320 struct super_block *s = kzalloc_obj(struct super_block);
321 static const struct super_operations default_op;
322 int i;
323
324 if (!s)
325 return NULL;
326
327 s->s_user_ns = get_user_ns(user_ns);
328 init_rwsem(&s->s_umount);
329 lockdep_set_class(&s->s_umount, &type->s_umount_key);
330 /*
331 * sget() can have s_umount recursion.
332 *
333 * When it cannot find a suitable sb, it allocates a new
334 * one (this one), and tries again to find a suitable old
335 * one.
336 *
337 * In case that succeeds, it will acquire the s_umount
338 * lock of the old one. Since these are clearly distrinct
339 * locks, and this object isn't exposed yet, there's no
340 * risk of deadlocks.
341 *
342 * Annotate this by putting this lock in a different
343 * subclass.
344 */
345 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
346
347 if (security_sb_alloc(s))
348 goto fail;
349
350 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
351 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
352 sb_writers_name[i],
353 &type->s_writers_key[i]))
354 goto fail;
355 }
356 s->s_bdi = &noop_backing_dev_info;
357 s->s_flags = flags;
358 if (s->s_user_ns != &init_user_ns)
359 s->s_iflags |= SB_I_NODEV;
360 INIT_HLIST_NODE(&s->s_instances);
361 INIT_HLIST_BL_HEAD(&s->s_roots);
362 mutex_init(&s->s_sync_lock);
363 INIT_LIST_HEAD(&s->s_inodes);
364 spin_lock_init(&s->s_inode_list_lock);
365 INIT_LIST_HEAD(&s->s_inodes_wb);
366 spin_lock_init(&s->s_inode_wblist_lock);
367 fserror_mount(s);
368
369 s->s_count = 1;
370 atomic_set(&s->s_active, 1);
371 mutex_init(&s->s_vfs_rename_mutex);
372 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
373 init_rwsem(&s->s_dquot.dqio_sem);
374 s->s_maxbytes = MAX_NON_LFS;
375 s->s_op = &default_op;
376 s->s_time_gran = 1000000000;
377 s->s_time_min = TIME64_MIN;
378 s->s_time_max = TIME64_MAX;
379
380 s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
381 "sb-%s", type->name);
382 if (!s->s_shrink)
383 goto fail;
384
385 s->s_shrink->scan_objects = super_cache_scan;
386 s->s_shrink->count_objects = super_cache_count;
387 s->s_shrink->batch = 1024;
388 s->s_shrink->private_data = s;
389
390 if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
391 goto fail;
392 if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
393 goto fail;
394 s->s_min_writeback_pages = MIN_WRITEBACK_PAGES;
395 return s;
396
397 fail:
398 destroy_unused_super(s);
399 return NULL;
400 }
401
402 /* Superblock refcounting */
403
404 /*
405 * Drop a superblock's refcount. The caller must hold sb_lock.
406 */
__put_super(struct super_block * s)407 static void __put_super(struct super_block *s)
408 {
409 if (!--s->s_count) {
410 list_del_init(&s->s_list);
411 WARN_ON(s->s_dentry_lru.node);
412 WARN_ON(s->s_inode_lru.node);
413 WARN_ON(s->s_mounts);
414 call_rcu(&s->rcu, destroy_super_rcu);
415 }
416 }
417
418 /**
419 * put_super - drop a temporary reference to superblock
420 * @sb: superblock in question
421 *
422 * Drops a temporary reference, frees superblock if there's no
423 * references left.
424 */
put_super(struct super_block * sb)425 void put_super(struct super_block *sb)
426 {
427 spin_lock(&sb_lock);
428 __put_super(sb);
429 spin_unlock(&sb_lock);
430 }
431
kill_super_notify(struct super_block * sb)432 static void kill_super_notify(struct super_block *sb)
433 {
434 lockdep_assert_not_held(&sb->s_umount);
435
436 /* already notified earlier */
437 if (sb->s_flags & SB_DEAD)
438 return;
439
440 /*
441 * Remove it from @fs_supers so it isn't found by new
442 * sget{_fc}() walkers anymore. Any concurrent mounter still
443 * managing to grab a temporary reference is guaranteed to
444 * already see SB_DYING and will wait until we notify them about
445 * SB_DEAD.
446 */
447 spin_lock(&sb_lock);
448 hlist_del_init(&sb->s_instances);
449 spin_unlock(&sb_lock);
450
451 /*
452 * Let concurrent mounts know that this thing is really dead.
453 * We don't need @sb->s_umount here as every concurrent caller
454 * will see SB_DYING and either discard the superblock or wait
455 * for SB_DEAD.
456 */
457 super_wake(sb, SB_DEAD);
458 }
459
460 /**
461 * deactivate_locked_super - drop an active reference to superblock
462 * @s: superblock to deactivate
463 *
464 * Drops an active reference to superblock, converting it into a temporary
465 * one if there is no other active references left. In that case we
466 * tell fs driver to shut it down and drop the temporary reference we
467 * had just acquired.
468 *
469 * Caller holds exclusive lock on superblock; that lock is released.
470 */
deactivate_locked_super(struct super_block * s)471 void deactivate_locked_super(struct super_block *s)
472 {
473 struct file_system_type *fs = s->s_type;
474 if (atomic_dec_and_test(&s->s_active)) {
475 shrinker_free(s->s_shrink);
476 fs->kill_sb(s);
477
478 kill_super_notify(s);
479
480 /*
481 * Since list_lru_destroy() may sleep, we cannot call it from
482 * put_super(), where we hold the sb_lock. Therefore we destroy
483 * the lru lists right now.
484 */
485 list_lru_destroy(&s->s_dentry_lru);
486 list_lru_destroy(&s->s_inode_lru);
487
488 put_filesystem(fs);
489 put_super(s);
490 } else {
491 super_unlock_excl(s);
492 }
493 }
494
495 EXPORT_SYMBOL(deactivate_locked_super);
496
497 /**
498 * deactivate_super - drop an active reference to superblock
499 * @s: superblock to deactivate
500 *
501 * Variant of deactivate_locked_super(), except that superblock is *not*
502 * locked by caller. If we are going to drop the final active reference,
503 * lock will be acquired prior to that.
504 */
deactivate_super(struct super_block * s)505 void deactivate_super(struct super_block *s)
506 {
507 if (!atomic_add_unless(&s->s_active, -1, 1)) {
508 __super_lock_excl(s);
509 deactivate_locked_super(s);
510 }
511 }
512
513 EXPORT_SYMBOL(deactivate_super);
514
515 /**
516 * grab_super - acquire an active reference to a superblock
517 * @sb: superblock to acquire
518 *
519 * Acquire a temporary reference on a superblock and try to trade it for
520 * an active reference. This is used in sget{_fc}() to wait for a
521 * superblock to either become SB_BORN or for it to pass through
522 * sb->kill() and be marked as SB_DEAD.
523 *
524 * Return: This returns true if an active reference could be acquired,
525 * false if not.
526 */
grab_super(struct super_block * sb)527 static bool grab_super(struct super_block *sb)
528 {
529 bool locked;
530
531 sb->s_count++;
532 spin_unlock(&sb_lock);
533 locked = super_lock_excl(sb);
534 if (locked) {
535 if (atomic_inc_not_zero(&sb->s_active)) {
536 put_super(sb);
537 return true;
538 }
539 super_unlock_excl(sb);
540 }
541 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
542 put_super(sb);
543 return false;
544 }
545
546 /*
547 * super_trylock_shared - try to grab ->s_umount shared
548 * @sb: reference we are trying to grab
549 *
550 * Try to prevent fs shutdown. This is used in places where we
551 * cannot take an active reference but we need to ensure that the
552 * filesystem is not shut down while we are working on it. It returns
553 * false if we cannot acquire s_umount or if we lose the race and
554 * filesystem already got into shutdown, and returns true with the s_umount
555 * lock held in read mode in case of success. On successful return,
556 * the caller must drop the s_umount lock when done.
557 *
558 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
559 * The reason why it's safe is that we are OK with doing trylock instead
560 * of down_read(). There's a couple of places that are OK with that, but
561 * it's very much not a general-purpose interface.
562 */
super_trylock_shared(struct super_block * sb)563 bool super_trylock_shared(struct super_block *sb)
564 {
565 if (down_read_trylock(&sb->s_umount)) {
566 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
567 (sb->s_flags & SB_BORN))
568 return true;
569 super_unlock_shared(sb);
570 }
571
572 return false;
573 }
574
575 /**
576 * retire_super - prevents superblock from being reused
577 * @sb: superblock to retire
578 *
579 * The function marks superblock to be ignored in superblock test, which
580 * prevents it from being reused for any new mounts. If the superblock has
581 * a private bdi, it also unregisters it, but doesn't reduce the refcount
582 * of the superblock to prevent potential races. The refcount is reduced
583 * by generic_shutdown_super(). The function can not be called
584 * concurrently with generic_shutdown_super(). It is safe to call the
585 * function multiple times, subsequent calls have no effect.
586 *
587 * The marker will affect the re-use only for block-device-based
588 * superblocks. Other superblocks will still get marked if this function
589 * is used, but that will not affect their reusability.
590 */
retire_super(struct super_block * sb)591 void retire_super(struct super_block *sb)
592 {
593 WARN_ON(!sb->s_bdev);
594 __super_lock_excl(sb);
595 if (sb->s_iflags & SB_I_PERSB_BDI) {
596 bdi_unregister(sb->s_bdi);
597 sb->s_iflags &= ~SB_I_PERSB_BDI;
598 }
599 sb->s_iflags |= SB_I_RETIRED;
600 super_unlock_excl(sb);
601 }
602 EXPORT_SYMBOL(retire_super);
603
604 /**
605 * generic_shutdown_super - common helper for ->kill_sb()
606 * @sb: superblock to kill
607 *
608 * generic_shutdown_super() does all fs-independent work on superblock
609 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
610 * that need destruction out of superblock, call generic_shutdown_super()
611 * and release aforementioned objects. Note: dentries and inodes _are_
612 * taken care of and do not need specific handling.
613 *
614 * Upon calling this function, the filesystem may no longer alter or
615 * rearrange the set of dentries belonging to this super_block, nor may it
616 * change the attachments of dentries to inodes.
617 */
generic_shutdown_super(struct super_block * sb)618 void generic_shutdown_super(struct super_block *sb)
619 {
620 const struct super_operations *sop = sb->s_op;
621
622 if (sb->s_root) {
623 fsnotify_sb_delete(sb);
624 shrink_dcache_for_umount(sb);
625 sync_filesystem(sb);
626 sb->s_flags &= ~SB_ACTIVE;
627
628 fserror_unmount(sb);
629 cgroup_writeback_umount(sb);
630
631 /* Evict all inodes with zero refcount. */
632 evict_inodes(sb);
633
634 /*
635 * Clean up and evict any inodes that still have references due
636 * to the security policy.
637 */
638 security_sb_delete(sb);
639
640 if (sb->s_dio_done_wq) {
641 destroy_workqueue(sb->s_dio_done_wq);
642 sb->s_dio_done_wq = NULL;
643 }
644
645 if (sop->put_super)
646 sop->put_super(sb);
647
648 /*
649 * Now that all potentially-encrypted inodes have been evicted,
650 * the fscrypt keyring can be destroyed.
651 */
652 fscrypt_destroy_keyring(sb);
653
654 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL,
655 "VFS: Busy inodes after unmount of %s (%s)",
656 sb->s_id, sb->s_type->name)) {
657 /*
658 * Adding a proper bailout path here would be hard, but
659 * we can at least make it more likely that a later
660 * iput_final() or such crashes cleanly.
661 */
662 struct inode *inode;
663
664 spin_lock(&sb->s_inode_list_lock);
665 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
666 inode->i_op = VFS_PTR_POISON;
667 inode->i_sb = VFS_PTR_POISON;
668 inode->i_mapping = VFS_PTR_POISON;
669 }
670 spin_unlock(&sb->s_inode_list_lock);
671 }
672 }
673 /*
674 * Broadcast to everyone that grabbed a temporary reference to this
675 * superblock before we removed it from @fs_supers that the superblock
676 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
677 * discard this superblock and treat it as dead.
678 *
679 * We leave the superblock on @fs_supers so it can be found by
680 * sget{_fc}() until we passed sb->kill_sb().
681 */
682 super_wake(sb, SB_DYING);
683 super_unlock_excl(sb);
684 if (sb->s_bdi != &noop_backing_dev_info) {
685 if (sb->s_iflags & SB_I_PERSB_BDI)
686 bdi_unregister(sb->s_bdi);
687 bdi_put(sb->s_bdi);
688 sb->s_bdi = &noop_backing_dev_info;
689 }
690 }
691
692 EXPORT_SYMBOL(generic_shutdown_super);
693
mount_capable(struct fs_context * fc)694 bool mount_capable(struct fs_context *fc)
695 {
696 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
697 return capable(CAP_SYS_ADMIN);
698 else
699 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
700 }
701
702 /**
703 * sget_fc - Find or create a superblock
704 * @fc: Filesystem context.
705 * @test: Comparison callback
706 * @set: Setup callback
707 *
708 * Create a new superblock or find an existing one.
709 *
710 * The @test callback is used to find a matching existing superblock.
711 * Whether or not the requested parameters in @fc are taken into account
712 * is specific to the @test callback that is used. They may even be
713 * completely ignored.
714 *
715 * If an extant superblock is matched, it will be returned unless:
716 *
717 * (1) the namespace the filesystem context @fc and the extant
718 * superblock's namespace differ
719 *
720 * (2) the filesystem context @fc has requested that reusing an extant
721 * superblock is not allowed
722 *
723 * In both cases EBUSY will be returned.
724 *
725 * If no match is made, a new superblock will be allocated and basic
726 * initialisation will be performed (s_type, s_fs_info and s_id will be
727 * set and the @set callback will be invoked), the superblock will be
728 * published and it will be returned in a partially constructed state
729 * with SB_BORN and SB_ACTIVE as yet unset.
730 *
731 * Return: On success, an extant or newly created superblock is
732 * returned. On failure an error pointer is returned.
733 */
sget_fc(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* set)(struct super_block *,struct fs_context *))734 struct super_block *sget_fc(struct fs_context *fc,
735 int (*test)(struct super_block *, struct fs_context *),
736 int (*set)(struct super_block *, struct fs_context *))
737 {
738 struct super_block *s = NULL;
739 struct super_block *old;
740 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
741 int err;
742
743 /*
744 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
745 * not set, as the filesystem is likely unprepared to handle it.
746 * This can happen when fsconfig() is called from init_user_ns with
747 * an fs_fd opened in another user namespace.
748 */
749 if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
750 errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
751 return ERR_PTR(-EPERM);
752 }
753
754 retry:
755 spin_lock(&sb_lock);
756 if (test) {
757 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
758 if (test(old, fc))
759 goto share_extant_sb;
760 }
761 }
762 if (!s) {
763 spin_unlock(&sb_lock);
764 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
765 if (!s)
766 return ERR_PTR(-ENOMEM);
767 goto retry;
768 }
769
770 s->s_fs_info = fc->s_fs_info;
771 err = set(s, fc);
772 if (err) {
773 s->s_fs_info = NULL;
774 spin_unlock(&sb_lock);
775 destroy_unused_super(s);
776 return ERR_PTR(err);
777 }
778 fc->s_fs_info = NULL;
779 s->s_type = fc->fs_type;
780 s->s_iflags |= fc->s_iflags;
781 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
782 /*
783 * Make the superblock visible on @super_blocks and @fs_supers.
784 * It's in a nascent state and users should wait on SB_BORN or
785 * SB_DYING to be set.
786 */
787 list_add_tail(&s->s_list, &super_blocks);
788 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
789 spin_unlock(&sb_lock);
790 get_filesystem(s->s_type);
791 shrinker_register(s->s_shrink);
792 return s;
793
794 share_extant_sb:
795 if (user_ns != old->s_user_ns || fc->exclusive) {
796 spin_unlock(&sb_lock);
797 destroy_unused_super(s);
798 if (fc->exclusive)
799 warnfc(fc, "reusing existing filesystem not allowed");
800 else
801 warnfc(fc, "reusing existing filesystem in another namespace not allowed");
802 return ERR_PTR(-EBUSY);
803 }
804 if (!grab_super(old))
805 goto retry;
806 destroy_unused_super(s);
807 return old;
808 }
809 EXPORT_SYMBOL(sget_fc);
810
811 /**
812 * sget - find or create a superblock
813 * @type: filesystem type superblock should belong to
814 * @test: comparison callback
815 * @set: setup callback
816 * @flags: mount flags
817 * @data: argument to each of them
818 */
sget(struct file_system_type * type,int (* test)(struct super_block *,void *),int (* set)(struct super_block *,void *),int flags,void * data)819 struct super_block *sget(struct file_system_type *type,
820 int (*test)(struct super_block *,void *),
821 int (*set)(struct super_block *,void *),
822 int flags,
823 void *data)
824 {
825 struct user_namespace *user_ns = current_user_ns();
826 struct super_block *s = NULL;
827 struct super_block *old;
828 int err;
829
830 retry:
831 spin_lock(&sb_lock);
832 if (test) {
833 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
834 if (!test(old, data))
835 continue;
836 if (user_ns != old->s_user_ns) {
837 spin_unlock(&sb_lock);
838 destroy_unused_super(s);
839 return ERR_PTR(-EBUSY);
840 }
841 if (!grab_super(old))
842 goto retry;
843 destroy_unused_super(s);
844 return old;
845 }
846 }
847 if (!s) {
848 spin_unlock(&sb_lock);
849 s = alloc_super(type, flags, user_ns);
850 if (!s)
851 return ERR_PTR(-ENOMEM);
852 goto retry;
853 }
854
855 err = set(s, data);
856 if (err) {
857 spin_unlock(&sb_lock);
858 destroy_unused_super(s);
859 return ERR_PTR(err);
860 }
861 s->s_type = type;
862 strscpy(s->s_id, type->name, sizeof(s->s_id));
863 list_add_tail(&s->s_list, &super_blocks);
864 hlist_add_head(&s->s_instances, &type->fs_supers);
865 spin_unlock(&sb_lock);
866 get_filesystem(type);
867 shrinker_register(s->s_shrink);
868 return s;
869 }
870 EXPORT_SYMBOL(sget);
871
drop_super(struct super_block * sb)872 void drop_super(struct super_block *sb)
873 {
874 super_unlock_shared(sb);
875 put_super(sb);
876 }
877
878 EXPORT_SYMBOL(drop_super);
879
drop_super_exclusive(struct super_block * sb)880 void drop_super_exclusive(struct super_block *sb)
881 {
882 super_unlock_excl(sb);
883 put_super(sb);
884 }
885 EXPORT_SYMBOL(drop_super_exclusive);
886
887 enum super_iter_flags_t {
888 SUPER_ITER_EXCL = (1U << 0),
889 SUPER_ITER_UNLOCKED = (1U << 1),
890 SUPER_ITER_REVERSE = (1U << 2),
891 };
892
first_super(enum super_iter_flags_t flags)893 static inline struct super_block *first_super(enum super_iter_flags_t flags)
894 {
895 if (flags & SUPER_ITER_REVERSE)
896 return list_last_entry(&super_blocks, struct super_block, s_list);
897 return list_first_entry(&super_blocks, struct super_block, s_list);
898 }
899
next_super(struct super_block * sb,enum super_iter_flags_t flags)900 static inline struct super_block *next_super(struct super_block *sb,
901 enum super_iter_flags_t flags)
902 {
903 if (flags & SUPER_ITER_REVERSE)
904 return list_prev_entry(sb, s_list);
905 return list_next_entry(sb, s_list);
906 }
907
__iterate_supers(void (* f)(struct super_block *,void *),void * arg,enum super_iter_flags_t flags)908 static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg,
909 enum super_iter_flags_t flags)
910 {
911 struct super_block *sb, *p = NULL;
912 bool excl = flags & SUPER_ITER_EXCL;
913
914 guard(spinlock)(&sb_lock);
915
916 for (sb = first_super(flags);
917 !list_entry_is_head(sb, &super_blocks, s_list);
918 sb = next_super(sb, flags)) {
919 if (super_flags(sb, SB_DYING))
920 continue;
921 sb->s_count++;
922 spin_unlock(&sb_lock);
923
924 if (flags & SUPER_ITER_UNLOCKED) {
925 f(sb, arg);
926 } else if (super_lock(sb, excl)) {
927 f(sb, arg);
928 super_unlock(sb, excl);
929 }
930
931 spin_lock(&sb_lock);
932 if (p)
933 __put_super(p);
934 p = sb;
935 }
936 if (p)
937 __put_super(p);
938 }
939
iterate_supers(void (* f)(struct super_block *,void *),void * arg)940 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
941 {
942 __iterate_supers(f, arg, 0);
943 }
944
945 /**
946 * iterate_supers_type - call function for superblocks of given type
947 * @type: fs type
948 * @f: function to call
949 * @arg: argument to pass to it
950 *
951 * Scans the superblock list and calls given function, passing it
952 * locked superblock and given argument.
953 */
iterate_supers_type(struct file_system_type * type,void (* f)(struct super_block *,void *),void * arg)954 void iterate_supers_type(struct file_system_type *type,
955 void (*f)(struct super_block *, void *), void *arg)
956 {
957 struct super_block *sb, *p = NULL;
958
959 spin_lock(&sb_lock);
960 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
961 bool locked;
962
963 if (super_flags(sb, SB_DYING))
964 continue;
965
966 sb->s_count++;
967 spin_unlock(&sb_lock);
968
969 locked = super_lock_shared(sb);
970 if (locked) {
971 f(sb, arg);
972 super_unlock_shared(sb);
973 }
974
975 spin_lock(&sb_lock);
976 if (p)
977 __put_super(p);
978 p = sb;
979 }
980 if (p)
981 __put_super(p);
982 spin_unlock(&sb_lock);
983 }
984
985 EXPORT_SYMBOL(iterate_supers_type);
986
user_get_super(dev_t dev,bool excl)987 struct super_block *user_get_super(dev_t dev, bool excl)
988 {
989 struct super_block *sb;
990
991 spin_lock(&sb_lock);
992 list_for_each_entry(sb, &super_blocks, s_list) {
993 bool locked;
994
995 if (sb->s_dev != dev)
996 continue;
997
998 sb->s_count++;
999 spin_unlock(&sb_lock);
1000
1001 locked = super_lock(sb, excl);
1002 if (locked)
1003 return sb;
1004
1005 spin_lock(&sb_lock);
1006 __put_super(sb);
1007 break;
1008 }
1009 spin_unlock(&sb_lock);
1010 return NULL;
1011 }
1012
1013 /**
1014 * reconfigure_super - asks filesystem to change superblock parameters
1015 * @fc: The superblock and configuration
1016 *
1017 * Alters the configuration parameters of a live superblock.
1018 */
reconfigure_super(struct fs_context * fc)1019 int reconfigure_super(struct fs_context *fc)
1020 {
1021 struct super_block *sb = fc->root->d_sb;
1022 int retval;
1023 bool remount_ro = false;
1024 bool remount_rw = false;
1025 bool force = fc->sb_flags & SB_FORCE;
1026
1027 if (fc->sb_flags_mask & ~MS_RMT_MASK)
1028 return -EINVAL;
1029 if (sb->s_writers.frozen != SB_UNFROZEN)
1030 return -EBUSY;
1031
1032 retval = security_sb_remount(sb, fc->security);
1033 if (retval)
1034 return retval;
1035
1036 if (fc->sb_flags_mask & SB_RDONLY) {
1037 #ifdef CONFIG_BLOCK
1038 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1039 bdev_read_only(sb->s_bdev))
1040 return -EACCES;
1041 #endif
1042 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1043 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1044 }
1045
1046 if (remount_ro) {
1047 if (!hlist_empty(&sb->s_pins)) {
1048 super_unlock_excl(sb);
1049 group_pin_kill(&sb->s_pins);
1050 __super_lock_excl(sb);
1051 if (!sb->s_root)
1052 return 0;
1053 if (sb->s_writers.frozen != SB_UNFROZEN)
1054 return -EBUSY;
1055 remount_ro = !sb_rdonly(sb);
1056 }
1057 }
1058 shrink_dcache_sb(sb);
1059
1060 /* If we are reconfiguring to RDONLY and current sb is read/write,
1061 * make sure there are no files open for writing.
1062 */
1063 if (remount_ro) {
1064 if (force) {
1065 sb_start_ro_state_change(sb);
1066 } else {
1067 retval = sb_prepare_remount_readonly(sb);
1068 if (retval)
1069 return retval;
1070 }
1071 } else if (remount_rw) {
1072 /*
1073 * Protect filesystem's reconfigure code from writes from
1074 * userspace until reconfigure finishes.
1075 */
1076 sb_start_ro_state_change(sb);
1077 }
1078
1079 if (fc->ops->reconfigure) {
1080 retval = fc->ops->reconfigure(fc);
1081 if (retval) {
1082 if (!force)
1083 goto cancel_readonly;
1084 /* If forced remount, go ahead despite any errors */
1085 WARN(1, "forced remount of a %s fs returned %i\n",
1086 sb->s_type->name, retval);
1087 }
1088 }
1089
1090 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1091 (fc->sb_flags & fc->sb_flags_mask)));
1092 sb_end_ro_state_change(sb);
1093
1094 /*
1095 * Some filesystems modify their metadata via some other path than the
1096 * bdev buffer cache (eg. use a private mapping, or directories in
1097 * pagecache, etc). Also file data modifications go via their own
1098 * mappings. So If we try to mount readonly then copy the filesystem
1099 * from bdev, we could get stale data, so invalidate it to give a best
1100 * effort at coherency.
1101 */
1102 if (remount_ro && sb->s_bdev)
1103 invalidate_bdev(sb->s_bdev);
1104 return 0;
1105
1106 cancel_readonly:
1107 sb_end_ro_state_change(sb);
1108 return retval;
1109 }
1110
do_emergency_remount_callback(struct super_block * sb,void * unused)1111 static void do_emergency_remount_callback(struct super_block *sb, void *unused)
1112 {
1113 if (sb->s_bdev && !sb_rdonly(sb)) {
1114 struct fs_context *fc;
1115
1116 fc = fs_context_for_reconfigure(sb->s_root,
1117 SB_RDONLY | SB_FORCE, SB_RDONLY);
1118 if (!IS_ERR(fc)) {
1119 if (parse_monolithic_mount_data(fc, NULL) == 0)
1120 (void)reconfigure_super(fc);
1121 put_fs_context(fc);
1122 }
1123 }
1124 }
1125
do_emergency_remount(struct work_struct * work)1126 static void do_emergency_remount(struct work_struct *work)
1127 {
1128 __iterate_supers(do_emergency_remount_callback, NULL,
1129 SUPER_ITER_EXCL | SUPER_ITER_REVERSE);
1130 kfree(work);
1131 printk("Emergency Remount complete\n");
1132 }
1133
emergency_remount(void)1134 void emergency_remount(void)
1135 {
1136 struct work_struct *work;
1137
1138 work = kmalloc_obj(*work, GFP_ATOMIC);
1139 if (work) {
1140 INIT_WORK(work, do_emergency_remount);
1141 schedule_work(work);
1142 }
1143 }
1144
do_thaw_all_callback(struct super_block * sb,void * unused)1145 static void do_thaw_all_callback(struct super_block *sb, void *unused)
1146 {
1147 if (IS_ENABLED(CONFIG_BLOCK))
1148 while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1149 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1150 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL);
1151 return;
1152 }
1153
do_thaw_all(struct work_struct * work)1154 static void do_thaw_all(struct work_struct *work)
1155 {
1156 __iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL);
1157 kfree(work);
1158 printk(KERN_WARNING "Emergency Thaw complete\n");
1159 }
1160
1161 /**
1162 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1163 *
1164 * Used for emergency unfreeze of all filesystems via SysRq
1165 */
emergency_thaw_all(void)1166 void emergency_thaw_all(void)
1167 {
1168 struct work_struct *work;
1169
1170 work = kmalloc_obj(*work, GFP_ATOMIC);
1171 if (work) {
1172 INIT_WORK(work, do_thaw_all);
1173 schedule_work(work);
1174 }
1175 }
1176
get_active_super(struct super_block * sb)1177 static inline bool get_active_super(struct super_block *sb)
1178 {
1179 bool active = false;
1180
1181 if (super_lock_excl(sb)) {
1182 active = atomic_inc_not_zero(&sb->s_active);
1183 super_unlock_excl(sb);
1184 }
1185 return active;
1186 }
1187
1188 static const char *filesystems_freeze_ptr = "filesystems_freeze";
1189
filesystems_freeze_callback(struct super_block * sb,void * freeze_all_ptr)1190 static void filesystems_freeze_callback(struct super_block *sb, void *freeze_all_ptr)
1191 {
1192 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1193 return;
1194
1195 if (!freeze_all_ptr && !(sb->s_type->fs_flags & FS_POWER_FREEZE))
1196 return;
1197
1198 if (!get_active_super(sb))
1199 return;
1200
1201 if (sb->s_op->freeze_super)
1202 sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1203 filesystems_freeze_ptr);
1204 else
1205 freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1206 filesystems_freeze_ptr);
1207
1208 deactivate_super(sb);
1209 }
1210
filesystems_freeze(bool freeze_all)1211 void filesystems_freeze(bool freeze_all)
1212 {
1213 void *freeze_all_ptr = NULL;
1214
1215 if (freeze_all)
1216 freeze_all_ptr = &freeze_all;
1217 __iterate_supers(filesystems_freeze_callback, freeze_all_ptr,
1218 SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE);
1219 }
1220
filesystems_thaw_callback(struct super_block * sb,void * unused)1221 static void filesystems_thaw_callback(struct super_block *sb, void *unused)
1222 {
1223 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super)
1224 return;
1225
1226 if (!get_active_super(sb))
1227 return;
1228
1229 if (sb->s_op->thaw_super)
1230 sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1231 filesystems_freeze_ptr);
1232 else
1233 thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL,
1234 filesystems_freeze_ptr);
1235
1236 deactivate_super(sb);
1237 }
1238
filesystems_thaw(void)1239 void filesystems_thaw(void)
1240 {
1241 __iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED);
1242 }
1243
1244 static DEFINE_IDA(unnamed_dev_ida);
1245
1246 /**
1247 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1248 * @p: Pointer to a dev_t.
1249 *
1250 * Filesystems which don't use real block devices can call this function
1251 * to allocate a virtual block device.
1252 *
1253 * Context: Any context. Frequently called while holding sb_lock.
1254 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1255 * or -ENOMEM if memory allocation failed.
1256 */
get_anon_bdev(dev_t * p)1257 int get_anon_bdev(dev_t *p)
1258 {
1259 int dev;
1260
1261 /*
1262 * Many userspace utilities consider an FSID of 0 invalid.
1263 * Always return at least 1 from get_anon_bdev.
1264 */
1265 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1266 GFP_ATOMIC);
1267 if (dev == -ENOSPC)
1268 dev = -EMFILE;
1269 if (dev < 0)
1270 return dev;
1271
1272 *p = MKDEV(0, dev);
1273 return 0;
1274 }
1275 EXPORT_SYMBOL(get_anon_bdev);
1276
free_anon_bdev(dev_t dev)1277 void free_anon_bdev(dev_t dev)
1278 {
1279 ida_free(&unnamed_dev_ida, MINOR(dev));
1280 }
1281 EXPORT_SYMBOL(free_anon_bdev);
1282
set_anon_super(struct super_block * s,void * data)1283 int set_anon_super(struct super_block *s, void *data)
1284 {
1285 return get_anon_bdev(&s->s_dev);
1286 }
1287 EXPORT_SYMBOL(set_anon_super);
1288
kill_anon_super(struct super_block * sb)1289 void kill_anon_super(struct super_block *sb)
1290 {
1291 dev_t dev = sb->s_dev;
1292 generic_shutdown_super(sb);
1293 kill_super_notify(sb);
1294 free_anon_bdev(dev);
1295 }
1296 EXPORT_SYMBOL(kill_anon_super);
1297
set_anon_super_fc(struct super_block * sb,struct fs_context * fc)1298 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1299 {
1300 return set_anon_super(sb, NULL);
1301 }
1302 EXPORT_SYMBOL(set_anon_super_fc);
1303
test_keyed_super(struct super_block * sb,struct fs_context * fc)1304 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1305 {
1306 return sb->s_fs_info == fc->s_fs_info;
1307 }
1308
test_single_super(struct super_block * s,struct fs_context * fc)1309 static int test_single_super(struct super_block *s, struct fs_context *fc)
1310 {
1311 return 1;
1312 }
1313
vfs_get_super(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* fill_super)(struct super_block * sb,struct fs_context * fc))1314 static int vfs_get_super(struct fs_context *fc,
1315 int (*test)(struct super_block *, struct fs_context *),
1316 int (*fill_super)(struct super_block *sb,
1317 struct fs_context *fc))
1318 {
1319 struct super_block *sb;
1320 int err;
1321
1322 sb = sget_fc(fc, test, set_anon_super_fc);
1323 if (IS_ERR(sb))
1324 return PTR_ERR(sb);
1325
1326 if (!sb->s_root) {
1327 err = fill_super(sb, fc);
1328 if (err)
1329 goto error;
1330
1331 sb->s_flags |= SB_ACTIVE;
1332 }
1333
1334 fc->root = dget(sb->s_root);
1335 return 0;
1336
1337 error:
1338 deactivate_locked_super(sb);
1339 return err;
1340 }
1341
get_tree_nodev(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1342 int get_tree_nodev(struct fs_context *fc,
1343 int (*fill_super)(struct super_block *sb,
1344 struct fs_context *fc))
1345 {
1346 return vfs_get_super(fc, NULL, fill_super);
1347 }
1348 EXPORT_SYMBOL(get_tree_nodev);
1349
get_tree_single(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1350 int get_tree_single(struct fs_context *fc,
1351 int (*fill_super)(struct super_block *sb,
1352 struct fs_context *fc))
1353 {
1354 return vfs_get_super(fc, test_single_super, fill_super);
1355 }
1356 EXPORT_SYMBOL(get_tree_single);
1357
get_tree_keyed(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),void * key)1358 int get_tree_keyed(struct fs_context *fc,
1359 int (*fill_super)(struct super_block *sb,
1360 struct fs_context *fc),
1361 void *key)
1362 {
1363 fc->s_fs_info = key;
1364 return vfs_get_super(fc, test_keyed_super, fill_super);
1365 }
1366 EXPORT_SYMBOL(get_tree_keyed);
1367
set_bdev_super(struct super_block * s,void * data)1368 static int set_bdev_super(struct super_block *s, void *data)
1369 {
1370 s->s_dev = *(dev_t *)data;
1371 return 0;
1372 }
1373
super_s_dev_set(struct super_block * s,struct fs_context * fc)1374 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1375 {
1376 return set_bdev_super(s, fc->sget_key);
1377 }
1378
super_s_dev_test(struct super_block * s,struct fs_context * fc)1379 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1380 {
1381 return !(s->s_iflags & SB_I_RETIRED) &&
1382 s->s_dev == *(dev_t *)fc->sget_key;
1383 }
1384
1385 /**
1386 * sget_dev - Find or create a superblock by device number
1387 * @fc: Filesystem context.
1388 * @dev: device number
1389 *
1390 * Find or create a superblock using the provided device number that
1391 * will be stored in fc->sget_key.
1392 *
1393 * If an extant superblock is matched, then that will be returned with
1394 * an elevated reference count that the caller must transfer or discard.
1395 *
1396 * If no match is made, a new superblock will be allocated and basic
1397 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1398 * be set). The superblock will be published and it will be returned in
1399 * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1400 * unset.
1401 *
1402 * Return: an existing or newly created superblock on success, an error
1403 * pointer on failure.
1404 */
sget_dev(struct fs_context * fc,dev_t dev)1405 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1406 {
1407 fc->sget_key = &dev;
1408 return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1409 }
1410 EXPORT_SYMBOL(sget_dev);
1411
1412 #ifdef CONFIG_BLOCK
1413 /*
1414 * Lock the superblock that is holder of the bdev. Returns the superblock
1415 * pointer if we successfully locked the superblock and it is alive. Otherwise
1416 * we return NULL and just unlock bdev->bd_holder_lock.
1417 *
1418 * The function must be called with bdev->bd_holder_lock and releases it.
1419 */
bdev_super_lock(struct block_device * bdev,bool excl)1420 static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
1421 __releases(&bdev->bd_holder_lock)
1422 {
1423 struct super_block *sb = bdev->bd_holder;
1424 bool locked;
1425
1426 lockdep_assert_held(&bdev->bd_holder_lock);
1427 lockdep_assert_not_held(&sb->s_umount);
1428 lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
1429
1430 /* Make sure sb doesn't go away from under us */
1431 spin_lock(&sb_lock);
1432 sb->s_count++;
1433 spin_unlock(&sb_lock);
1434
1435 mutex_unlock(&bdev->bd_holder_lock);
1436
1437 locked = super_lock(sb, excl);
1438
1439 /*
1440 * If the superblock wasn't already SB_DYING then we hold
1441 * s_umount and can safely drop our temporary reference.
1442 */
1443 put_super(sb);
1444
1445 if (!locked)
1446 return NULL;
1447
1448 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1449 super_unlock(sb, excl);
1450 return NULL;
1451 }
1452
1453 return sb;
1454 }
1455
fs_bdev_mark_dead(struct block_device * bdev,bool surprise)1456 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1457 {
1458 struct super_block *sb;
1459
1460 sb = bdev_super_lock(bdev, false);
1461 if (!sb)
1462 return;
1463
1464 if (sb->s_op->remove_bdev) {
1465 int ret;
1466
1467 ret = sb->s_op->remove_bdev(sb, bdev);
1468 if (!ret) {
1469 super_unlock_shared(sb);
1470 return;
1471 }
1472 /* Fallback to shutdown. */
1473 }
1474
1475 if (!surprise)
1476 sync_filesystem(sb);
1477 shrink_dcache_sb(sb);
1478 evict_inodes(sb);
1479 if (sb->s_op->shutdown)
1480 sb->s_op->shutdown(sb);
1481
1482 super_unlock_shared(sb);
1483 }
1484
fs_bdev_sync(struct block_device * bdev)1485 static void fs_bdev_sync(struct block_device *bdev)
1486 {
1487 struct super_block *sb;
1488
1489 sb = bdev_super_lock(bdev, false);
1490 if (!sb)
1491 return;
1492
1493 sync_filesystem(sb);
1494 super_unlock_shared(sb);
1495 }
1496
get_bdev_super(struct block_device * bdev)1497 static struct super_block *get_bdev_super(struct block_device *bdev)
1498 {
1499 bool active = false;
1500 struct super_block *sb;
1501
1502 sb = bdev_super_lock(bdev, true);
1503 if (sb) {
1504 active = atomic_inc_not_zero(&sb->s_active);
1505 super_unlock_excl(sb);
1506 }
1507 if (!active)
1508 return NULL;
1509 return sb;
1510 }
1511
1512 /**
1513 * fs_bdev_freeze - freeze owning filesystem of block device
1514 * @bdev: block device
1515 *
1516 * Freeze the filesystem that owns this block device if it is still
1517 * active.
1518 *
1519 * A filesystem that owns multiple block devices may be frozen from each
1520 * block device and won't be unfrozen until all block devices are
1521 * unfrozen. Each block device can only freeze the filesystem once as we
1522 * nest freezes for block devices in the block layer.
1523 *
1524 * Return: If the freeze was successful zero is returned. If the freeze
1525 * failed a negative error code is returned.
1526 */
fs_bdev_freeze(struct block_device * bdev)1527 static int fs_bdev_freeze(struct block_device *bdev)
1528 {
1529 struct super_block *sb;
1530 int error = 0;
1531
1532 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1533
1534 sb = get_bdev_super(bdev);
1535 if (!sb)
1536 return -EINVAL;
1537
1538 if (sb->s_op->freeze_super)
1539 error = sb->s_op->freeze_super(sb,
1540 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1541 else
1542 error = freeze_super(sb,
1543 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1544 if (!error)
1545 error = sync_blockdev(bdev);
1546 deactivate_super(sb);
1547 return error;
1548 }
1549
1550 /**
1551 * fs_bdev_thaw - thaw owning filesystem of block device
1552 * @bdev: block device
1553 *
1554 * Thaw the filesystem that owns this block device.
1555 *
1556 * A filesystem that owns multiple block devices may be frozen from each
1557 * block device and won't be unfrozen until all block devices are
1558 * unfrozen. Each block device can only freeze the filesystem once as we
1559 * nest freezes for block devices in the block layer.
1560 *
1561 * Return: If the thaw was successful zero is returned. If the thaw
1562 * failed a negative error code is returned. If this function
1563 * returns zero it doesn't mean that the filesystem is unfrozen
1564 * as it may have been frozen multiple times (kernel may hold a
1565 * freeze or might be frozen from other block devices).
1566 */
fs_bdev_thaw(struct block_device * bdev)1567 static int fs_bdev_thaw(struct block_device *bdev)
1568 {
1569 struct super_block *sb;
1570 int error;
1571
1572 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1573
1574 /*
1575 * The block device may have been frozen before it was claimed by a
1576 * filesystem. Concurrently another process might try to mount that
1577 * frozen block device and has temporarily claimed the block device for
1578 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
1579 * mounter is already about to abort mounting because they still saw an
1580 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
1581 * NULL in that case.
1582 */
1583 sb = get_bdev_super(bdev);
1584 if (!sb)
1585 return -EINVAL;
1586
1587 if (sb->s_op->thaw_super)
1588 error = sb->s_op->thaw_super(sb,
1589 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1590 else
1591 error = thaw_super(sb,
1592 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL);
1593 deactivate_super(sb);
1594 return error;
1595 }
1596
1597 const struct blk_holder_ops fs_holder_ops = {
1598 .mark_dead = fs_bdev_mark_dead,
1599 .sync = fs_bdev_sync,
1600 .freeze = fs_bdev_freeze,
1601 .thaw = fs_bdev_thaw,
1602 };
1603 EXPORT_SYMBOL_GPL(fs_holder_ops);
1604
setup_bdev_super(struct super_block * sb,int sb_flags,struct fs_context * fc)1605 int setup_bdev_super(struct super_block *sb, int sb_flags,
1606 struct fs_context *fc)
1607 {
1608 blk_mode_t mode = sb_open_mode(sb_flags);
1609 struct file *bdev_file;
1610 struct block_device *bdev;
1611
1612 bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1613 if (IS_ERR(bdev_file)) {
1614 if (fc)
1615 errorf(fc, "%s: Can't open blockdev", fc->source);
1616 return PTR_ERR(bdev_file);
1617 }
1618 bdev = file_bdev(bdev_file);
1619
1620 /*
1621 * This really should be in blkdev_get_by_dev, but right now can't due
1622 * to legacy issues that require us to allow opening a block device node
1623 * writable from userspace even for a read-only block device.
1624 */
1625 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1626 bdev_fput(bdev_file);
1627 return -EACCES;
1628 }
1629
1630 /*
1631 * It is enough to check bdev was not frozen before we set
1632 * s_bdev as freezing will wait until SB_BORN is set.
1633 */
1634 if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
1635 if (fc)
1636 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1637 bdev_fput(bdev_file);
1638 return -EBUSY;
1639 }
1640 spin_lock(&sb_lock);
1641 sb->s_bdev_file = bdev_file;
1642 sb->s_bdev = bdev;
1643 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1644 if (bdev_stable_writes(bdev))
1645 sb->s_iflags |= SB_I_STABLE_WRITES;
1646 spin_unlock(&sb_lock);
1647
1648 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1649 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1650 sb->s_id);
1651 sb_set_blocksize(sb, block_size(bdev));
1652 return 0;
1653 }
1654 EXPORT_SYMBOL_GPL(setup_bdev_super);
1655
1656 /**
1657 * get_tree_bdev_flags - Get a superblock based on a single block device
1658 * @fc: The filesystem context holding the parameters
1659 * @fill_super: Helper to initialise a new superblock
1660 * @flags: GET_TREE_BDEV_* flags
1661 */
get_tree_bdev_flags(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),unsigned int flags)1662 int get_tree_bdev_flags(struct fs_context *fc,
1663 int (*fill_super)(struct super_block *sb,
1664 struct fs_context *fc), unsigned int flags)
1665 {
1666 struct super_block *s;
1667 int error = 0;
1668 dev_t dev;
1669
1670 if (!fc->source)
1671 return invalf(fc, "No source specified");
1672
1673 error = lookup_bdev(fc->source, &dev);
1674 if (error) {
1675 if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP))
1676 errorf(fc, "%s: Can't lookup blockdev", fc->source);
1677 return error;
1678 }
1679 fc->sb_flags |= SB_NOSEC;
1680 s = sget_dev(fc, dev);
1681 if (IS_ERR(s))
1682 return PTR_ERR(s);
1683
1684 if (s->s_root) {
1685 /* Don't summarily change the RO/RW state. */
1686 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1687 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1688 deactivate_locked_super(s);
1689 return -EBUSY;
1690 }
1691 } else {
1692 error = setup_bdev_super(s, fc->sb_flags, fc);
1693 if (!error)
1694 error = fill_super(s, fc);
1695 if (error) {
1696 deactivate_locked_super(s);
1697 return error;
1698 }
1699 s->s_flags |= SB_ACTIVE;
1700 }
1701
1702 BUG_ON(fc->root);
1703 fc->root = dget(s->s_root);
1704 return 0;
1705 }
1706 EXPORT_SYMBOL_GPL(get_tree_bdev_flags);
1707
1708 /**
1709 * get_tree_bdev - Get a superblock based on a single block device
1710 * @fc: The filesystem context holding the parameters
1711 * @fill_super: Helper to initialise a new superblock
1712 */
get_tree_bdev(struct fs_context * fc,int (* fill_super)(struct super_block *,struct fs_context *))1713 int get_tree_bdev(struct fs_context *fc,
1714 int (*fill_super)(struct super_block *,
1715 struct fs_context *))
1716 {
1717 return get_tree_bdev_flags(fc, fill_super, 0);
1718 }
1719 EXPORT_SYMBOL(get_tree_bdev);
1720
kill_block_super(struct super_block * sb)1721 void kill_block_super(struct super_block *sb)
1722 {
1723 struct block_device *bdev = sb->s_bdev;
1724
1725 generic_shutdown_super(sb);
1726 if (bdev) {
1727 sync_blockdev(bdev);
1728 bdev_fput(sb->s_bdev_file);
1729 }
1730 }
1731
1732 EXPORT_SYMBOL(kill_block_super);
1733 #endif
1734
1735 /**
1736 * vfs_get_tree - Get the mountable root
1737 * @fc: The superblock configuration context.
1738 *
1739 * The filesystem is invoked to get or create a superblock which can then later
1740 * be used for mounting. The filesystem places a pointer to the root to be
1741 * used for mounting in @fc->root.
1742 */
vfs_get_tree(struct fs_context * fc)1743 int vfs_get_tree(struct fs_context *fc)
1744 {
1745 struct super_block *sb;
1746 int error;
1747
1748 if (fc->root)
1749 return -EBUSY;
1750
1751 /* Get the mountable root in fc->root, with a ref on the root and a ref
1752 * on the superblock.
1753 */
1754 error = fc->ops->get_tree(fc);
1755 if (error < 0)
1756 return error;
1757
1758 if (!fc->root) {
1759 pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n",
1760 fc->fs_type->name, error);
1761 /* We don't know what the locking state of the superblock is -
1762 * if there is a superblock.
1763 */
1764 BUG();
1765 }
1766
1767 sb = fc->root->d_sb;
1768 WARN_ON(!sb->s_bdi);
1769
1770 /*
1771 * super_wake() contains a memory barrier which also care of
1772 * ordering for super_cache_count(). We place it before setting
1773 * SB_BORN as the data dependency between the two functions is
1774 * the superblock structure contents that we just set up, not
1775 * the SB_BORN flag.
1776 */
1777 super_wake(sb, SB_BORN);
1778
1779 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1780 if (unlikely(error)) {
1781 fc_drop_locked(fc);
1782 return error;
1783 }
1784
1785 /*
1786 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1787 * but s_maxbytes was an unsigned long long for many releases. Throw
1788 * this warning for a little while to try and catch filesystems that
1789 * violate this rule.
1790 */
1791 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1792 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1793
1794 return 0;
1795 }
1796 EXPORT_SYMBOL(vfs_get_tree);
1797
1798 /*
1799 * Setup private BDI for given superblock. It gets automatically cleaned up
1800 * in generic_shutdown_super().
1801 */
super_setup_bdi_name(struct super_block * sb,char * fmt,...)1802 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1803 {
1804 struct backing_dev_info *bdi;
1805 int err;
1806 va_list args;
1807
1808 bdi = bdi_alloc(NUMA_NO_NODE);
1809 if (!bdi)
1810 return -ENOMEM;
1811
1812 va_start(args, fmt);
1813 err = bdi_register_va(bdi, fmt, args);
1814 va_end(args);
1815 if (err) {
1816 bdi_put(bdi);
1817 return err;
1818 }
1819 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1820 sb->s_bdi = bdi;
1821 sb->s_iflags |= SB_I_PERSB_BDI;
1822
1823 return 0;
1824 }
1825 EXPORT_SYMBOL(super_setup_bdi_name);
1826
1827 /*
1828 * Setup private BDI for given superblock. I gets automatically cleaned up
1829 * in generic_shutdown_super().
1830 */
super_setup_bdi(struct super_block * sb)1831 int super_setup_bdi(struct super_block *sb)
1832 {
1833 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1834
1835 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1836 atomic_long_inc_return(&bdi_seq));
1837 }
1838 EXPORT_SYMBOL(super_setup_bdi);
1839
1840 /**
1841 * sb_wait_write - wait until all writers to given file system finish
1842 * @sb: the super for which we wait
1843 * @level: type of writers we wait for (normal vs page fault)
1844 *
1845 * This function waits until there are no writers of given type to given file
1846 * system.
1847 */
sb_wait_write(struct super_block * sb,int level)1848 static void sb_wait_write(struct super_block *sb, int level)
1849 {
1850 percpu_down_write(sb->s_writers.rw_sem + level-1);
1851 }
1852
1853 /*
1854 * We are going to return to userspace and forget about these locks, the
1855 * ownership goes to the caller of thaw_super() which does unlock().
1856 */
lockdep_sb_freeze_release(struct super_block * sb)1857 static void lockdep_sb_freeze_release(struct super_block *sb)
1858 {
1859 int level;
1860
1861 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1862 percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_);
1863 }
1864
1865 /*
1866 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1867 */
lockdep_sb_freeze_acquire(struct super_block * sb)1868 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1869 {
1870 int level;
1871
1872 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1873 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1874 }
1875
sb_freeze_unlock(struct super_block * sb,int level)1876 static void sb_freeze_unlock(struct super_block *sb, int level)
1877 {
1878 for (level--; level >= 0; level--)
1879 percpu_up_write(sb->s_writers.rw_sem + level);
1880 }
1881
wait_for_partially_frozen(struct super_block * sb)1882 static int wait_for_partially_frozen(struct super_block *sb)
1883 {
1884 int ret = 0;
1885
1886 do {
1887 unsigned short old = sb->s_writers.frozen;
1888
1889 up_write(&sb->s_umount);
1890 ret = wait_var_event_killable(&sb->s_writers.frozen,
1891 sb->s_writers.frozen != old);
1892 down_write(&sb->s_umount);
1893 } while (ret == 0 &&
1894 sb->s_writers.frozen != SB_UNFROZEN &&
1895 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1896
1897 return ret;
1898 }
1899
1900 #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1901 #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL)
1902
freeze_inc(struct super_block * sb,enum freeze_holder who)1903 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1904 {
1905 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1906 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1907
1908 if (who & FREEZE_HOLDER_KERNEL)
1909 ++sb->s_writers.freeze_kcount;
1910 if (who & FREEZE_HOLDER_USERSPACE)
1911 ++sb->s_writers.freeze_ucount;
1912 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1913 }
1914
freeze_dec(struct super_block * sb,enum freeze_holder who)1915 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1916 {
1917 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1918 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1919
1920 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1921 --sb->s_writers.freeze_kcount;
1922 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1923 --sb->s_writers.freeze_ucount;
1924 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1925 }
1926
may_freeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)1927 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who,
1928 const void *freeze_owner)
1929 {
1930 lockdep_assert_held(&sb->s_umount);
1931
1932 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1933 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1934
1935 if (who & FREEZE_EXCL) {
1936 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
1937 return false;
1938 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
1939 return false;
1940 if (WARN_ON_ONCE(!freeze_owner))
1941 return false;
1942 /* This freeze already has a specific owner. */
1943 if (sb->s_writers.freeze_owner)
1944 return false;
1945 /*
1946 * This is already frozen multiple times so we're just
1947 * going to take a reference count and mark the freeze as
1948 * being owned by the caller.
1949 */
1950 if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount)
1951 sb->s_writers.freeze_owner = freeze_owner;
1952 return true;
1953 }
1954
1955 if (who & FREEZE_HOLDER_KERNEL)
1956 return (who & FREEZE_MAY_NEST) ||
1957 sb->s_writers.freeze_kcount == 0;
1958 if (who & FREEZE_HOLDER_USERSPACE)
1959 return (who & FREEZE_MAY_NEST) ||
1960 sb->s_writers.freeze_ucount == 0;
1961 return false;
1962 }
1963
may_unfreeze(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)1964 static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who,
1965 const void *freeze_owner)
1966 {
1967 lockdep_assert_held(&sb->s_umount);
1968
1969 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1970 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1971
1972 if (who & FREEZE_EXCL) {
1973 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL)))
1974 return false;
1975 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL)))
1976 return false;
1977 if (WARN_ON_ONCE(!freeze_owner))
1978 return false;
1979 if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0))
1980 return false;
1981 /* This isn't exclusively frozen. */
1982 if (!sb->s_writers.freeze_owner)
1983 return false;
1984 /* This isn't exclusively frozen by us. */
1985 if (sb->s_writers.freeze_owner != freeze_owner)
1986 return false;
1987 /*
1988 * This is still frozen multiple times so we're just
1989 * going to drop our reference count and undo our
1990 * exclusive freeze.
1991 */
1992 if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1)
1993 sb->s_writers.freeze_owner = NULL;
1994 return true;
1995 }
1996
1997 if (who & FREEZE_HOLDER_KERNEL) {
1998 /*
1999 * Someone's trying to steal the reference belonging to
2000 * @sb->s_writers.freeze_owner.
2001 */
2002 if (sb->s_writers.freeze_kcount == 1 &&
2003 sb->s_writers.freeze_owner)
2004 return false;
2005 return sb->s_writers.freeze_kcount > 0;
2006 }
2007
2008 if (who & FREEZE_HOLDER_USERSPACE)
2009 return sb->s_writers.freeze_ucount > 0;
2010
2011 return false;
2012 }
2013
2014 /**
2015 * freeze_super - lock the filesystem and force it into a consistent state
2016 * @sb: the super to lock
2017 * @who: context that wants to freeze
2018 * @freeze_owner: owner of the freeze
2019 *
2020 * Syncs the super to make sure the filesystem is consistent and calls the fs's
2021 * freeze_fs. Subsequent calls to this without first thawing the fs may return
2022 * -EBUSY.
2023 *
2024 * @who should be:
2025 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
2026 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
2027 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
2028 *
2029 * The @who argument distinguishes between the kernel and userspace trying to
2030 * freeze the filesystem. Although there cannot be multiple kernel freezes or
2031 * multiple userspace freezes in effect at any given time, the kernel and
2032 * userspace can both hold a filesystem frozen. The filesystem remains frozen
2033 * until there are no kernel or userspace freezes in effect.
2034 *
2035 * A filesystem may hold multiple devices and thus a filesystems may be
2036 * frozen through the block layer via multiple block devices. In this
2037 * case the request is marked as being allowed to nest by passing
2038 * FREEZE_MAY_NEST. The filesystem remains frozen until all block
2039 * devices are unfrozen. If multiple freezes are attempted without
2040 * FREEZE_MAY_NEST -EBUSY will be returned.
2041 *
2042 * During this function, sb->s_writers.frozen goes through these values:
2043 *
2044 * SB_UNFROZEN: File system is normal, all writes progress as usual.
2045 *
2046 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
2047 * writes should be blocked, though page faults are still allowed. We wait for
2048 * all writes to complete and then proceed to the next stage.
2049 *
2050 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2051 * but internal fs threads can still modify the filesystem (although they
2052 * should not dirty new pages or inodes), writeback can run etc. After waiting
2053 * for all running page faults we sync the filesystem which will clean all
2054 * dirty pages and inodes (no new dirty pages or inodes can be created when
2055 * sync is running).
2056 *
2057 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2058 * modification are blocked (e.g. XFS preallocation truncation on inode
2059 * reclaim). This is usually implemented by blocking new transactions for
2060 * filesystems that have them and need this additional guard. After all
2061 * internal writers are finished we call ->freeze_fs() to finish filesystem
2062 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2063 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2064 *
2065 * sb->s_writers.frozen is protected by sb->s_umount.
2066 *
2067 * Return: If the freeze was successful zero is returned. If the freeze
2068 * failed a negative error code is returned.
2069 */
freeze_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2070 int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner)
2071 {
2072 int ret;
2073
2074 if (!super_lock_excl(sb)) {
2075 WARN_ON_ONCE("Dying superblock while freezing!");
2076 return -EINVAL;
2077 }
2078 atomic_inc(&sb->s_active);
2079
2080 retry:
2081 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2082 if (may_freeze(sb, who, freeze_owner))
2083 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2084 else
2085 ret = -EBUSY;
2086 /* All freezers share a single active reference. */
2087 deactivate_locked_super(sb);
2088 return ret;
2089 }
2090
2091 if (sb->s_writers.frozen != SB_UNFROZEN) {
2092 ret = wait_for_partially_frozen(sb);
2093 if (ret) {
2094 deactivate_locked_super(sb);
2095 return ret;
2096 }
2097
2098 goto retry;
2099 }
2100
2101 if (sb_rdonly(sb)) {
2102 /* Nothing to do really... */
2103 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2104 sb->s_writers.freeze_owner = freeze_owner;
2105 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2106 wake_up_var(&sb->s_writers.frozen);
2107 super_unlock_excl(sb);
2108 return 0;
2109 }
2110
2111 sb->s_writers.frozen = SB_FREEZE_WRITE;
2112 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
2113 super_unlock_excl(sb);
2114 sb_wait_write(sb, SB_FREEZE_WRITE);
2115 __super_lock_excl(sb);
2116
2117 /* Now we go and block page faults... */
2118 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2119 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2120
2121 /* All writers are done so after syncing there won't be dirty data */
2122 ret = sync_filesystem(sb);
2123 if (ret) {
2124 sb->s_writers.frozen = SB_UNFROZEN;
2125 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2126 wake_up_var(&sb->s_writers.frozen);
2127 deactivate_locked_super(sb);
2128 return ret;
2129 }
2130
2131 /* Now wait for internal filesystem counter */
2132 sb->s_writers.frozen = SB_FREEZE_FS;
2133 sb_wait_write(sb, SB_FREEZE_FS);
2134
2135 if (sb->s_op->freeze_fs) {
2136 ret = sb->s_op->freeze_fs(sb);
2137 if (ret) {
2138 printk(KERN_ERR
2139 "VFS:Filesystem freeze failed\n");
2140 sb->s_writers.frozen = SB_UNFROZEN;
2141 sb_freeze_unlock(sb, SB_FREEZE_FS);
2142 wake_up_var(&sb->s_writers.frozen);
2143 deactivate_locked_super(sb);
2144 return ret;
2145 }
2146 }
2147 /*
2148 * For debugging purposes so that fs can warn if it sees write activity
2149 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2150 */
2151 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2152 sb->s_writers.freeze_owner = freeze_owner;
2153 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2154 wake_up_var(&sb->s_writers.frozen);
2155 lockdep_sb_freeze_release(sb);
2156 super_unlock_excl(sb);
2157 return 0;
2158 }
2159 EXPORT_SYMBOL(freeze_super);
2160
2161 /*
2162 * Undoes the effect of a freeze_super_locked call. If the filesystem is
2163 * frozen both by userspace and the kernel, a thaw call from either source
2164 * removes that state without releasing the other state or unlocking the
2165 * filesystem.
2166 */
thaw_super_locked(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2167 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who,
2168 const void *freeze_owner)
2169 {
2170 int error = -EINVAL;
2171
2172 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2173 goto out_unlock;
2174
2175 if (!may_unfreeze(sb, who, freeze_owner))
2176 goto out_unlock;
2177
2178 /*
2179 * All freezers share a single active reference.
2180 * So just unlock in case there are any left.
2181 */
2182 if (freeze_dec(sb, who))
2183 goto out_unlock;
2184
2185 if (sb_rdonly(sb)) {
2186 sb->s_writers.frozen = SB_UNFROZEN;
2187 sb->s_writers.freeze_owner = NULL;
2188 wake_up_var(&sb->s_writers.frozen);
2189 goto out_deactivate;
2190 }
2191
2192 lockdep_sb_freeze_acquire(sb);
2193
2194 if (sb->s_op->unfreeze_fs) {
2195 error = sb->s_op->unfreeze_fs(sb);
2196 if (error) {
2197 pr_err("VFS: Filesystem thaw failed\n");
2198 freeze_inc(sb, who);
2199 lockdep_sb_freeze_release(sb);
2200 goto out_unlock;
2201 }
2202 }
2203
2204 sb->s_writers.frozen = SB_UNFROZEN;
2205 sb->s_writers.freeze_owner = NULL;
2206 wake_up_var(&sb->s_writers.frozen);
2207 sb_freeze_unlock(sb, SB_FREEZE_FS);
2208 out_deactivate:
2209 deactivate_locked_super(sb);
2210 return 0;
2211
2212 out_unlock:
2213 super_unlock_excl(sb);
2214 return error;
2215 }
2216
2217 /**
2218 * thaw_super -- unlock filesystem
2219 * @sb: the super to thaw
2220 * @who: context that wants to freeze
2221 * @freeze_owner: owner of the freeze
2222 *
2223 * Unlocks the filesystem and marks it writeable again after freeze_super()
2224 * if there are no remaining freezes on the filesystem.
2225 *
2226 * @who should be:
2227 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2228 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2229 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2230 *
2231 * A filesystem may hold multiple devices and thus a filesystems may
2232 * have been frozen through the block layer via multiple block devices.
2233 * The filesystem remains frozen until all block devices are unfrozen.
2234 */
thaw_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)2235 int thaw_super(struct super_block *sb, enum freeze_holder who,
2236 const void *freeze_owner)
2237 {
2238 if (!super_lock_excl(sb)) {
2239 WARN_ON_ONCE("Dying superblock while thawing!");
2240 return -EINVAL;
2241 }
2242 return thaw_super_locked(sb, who, freeze_owner);
2243 }
2244 EXPORT_SYMBOL(thaw_super);
2245
2246 /*
2247 * Create workqueue for deferred direct IO completions. We allocate the
2248 * workqueue when it's first needed. This avoids creating workqueue for
2249 * filesystems that don't need it and also allows us to create the workqueue
2250 * late enough so the we can include s_id in the name of the workqueue.
2251 */
sb_init_dio_done_wq(struct super_block * sb)2252 int sb_init_dio_done_wq(struct super_block *sb)
2253 {
2254 struct workqueue_struct *old;
2255 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2256 WQ_MEM_RECLAIM | WQ_PERCPU,
2257 0,
2258 sb->s_id);
2259 if (!wq)
2260 return -ENOMEM;
2261
2262 old = NULL;
2263 /*
2264 * This has to be atomic as more DIOs can race to create the workqueue
2265 */
2266 if (!try_cmpxchg(&sb->s_dio_done_wq, &old, wq)) {
2267 /* Someone created workqueue before us? Free ours... */
2268 destroy_workqueue(wq);
2269 }
2270 return 0;
2271 }
2272 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
2273