xref: /linux/fs/fs-writeback.c (revision 5ac072e110ff358a9ebc318a1b54f0182b799f72)
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002	Andrew Morton
12  *		Split out of fs/inode.c
13  *		Additions for address_space-based writeback
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/writeback.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/buffer_head.h>
26 #include "internal.h"
27 
28 
29 /**
30  * writeback_acquire - attempt to get exclusive writeback access to a device
31  * @bdi: the device's backing_dev_info structure
32  *
33  * It is a waste of resources to have more than one pdflush thread blocked on
34  * a single request queue.  Exclusion at the request_queue level is obtained
35  * via a flag in the request_queue's backing_dev_info.state.
36  *
37  * Non-request_queue-backed address_spaces will share default_backing_dev_info,
38  * unless they implement their own.  Which is somewhat inefficient, as this
39  * may prevent concurrent writeback against multiple devices.
40  */
41 static int writeback_acquire(struct backing_dev_info *bdi)
42 {
43 	return !test_and_set_bit(BDI_pdflush, &bdi->state);
44 }
45 
46 /**
47  * writeback_in_progress - determine whether there is writeback in progress
48  * @bdi: the device's backing_dev_info structure.
49  *
50  * Determine whether there is writeback in progress against a backing device.
51  */
52 int writeback_in_progress(struct backing_dev_info *bdi)
53 {
54 	return test_bit(BDI_pdflush, &bdi->state);
55 }
56 
57 /**
58  * writeback_release - relinquish exclusive writeback access against a device.
59  * @bdi: the device's backing_dev_info structure
60  */
61 static void writeback_release(struct backing_dev_info *bdi)
62 {
63 	BUG_ON(!writeback_in_progress(bdi));
64 	clear_bit(BDI_pdflush, &bdi->state);
65 }
66 
67 /**
68  *	__mark_inode_dirty -	internal function
69  *	@inode: inode to mark
70  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
71  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
72  *  	mark_inode_dirty_sync.
73  *
74  * Put the inode on the super block's dirty list.
75  *
76  * CAREFUL! We mark it dirty unconditionally, but move it onto the
77  * dirty list only if it is hashed or if it refers to a blockdev.
78  * If it was not hashed, it will never be added to the dirty list
79  * even if it is later hashed, as it will have been marked dirty already.
80  *
81  * In short, make sure you hash any inodes _before_ you start marking
82  * them dirty.
83  *
84  * This function *must* be atomic for the I_DIRTY_PAGES case -
85  * set_page_dirty() is called under spinlock in several places.
86  *
87  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
88  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
89  * the kernel-internal blockdev inode represents the dirtying time of the
90  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
91  * page->mapping->host, so the page-dirtying time is recorded in the internal
92  * blockdev inode.
93  */
94 void __mark_inode_dirty(struct inode *inode, int flags)
95 {
96 	struct super_block *sb = inode->i_sb;
97 
98 	/*
99 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
100 	 * dirty the inode itself
101 	 */
102 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
103 		if (sb->s_op->dirty_inode)
104 			sb->s_op->dirty_inode(inode);
105 	}
106 
107 	/*
108 	 * make sure that changes are seen by all cpus before we test i_state
109 	 * -- mikulas
110 	 */
111 	smp_mb();
112 
113 	/* avoid the locking if we can */
114 	if ((inode->i_state & flags) == flags)
115 		return;
116 
117 	if (unlikely(block_dump)) {
118 		struct dentry *dentry = NULL;
119 		const char *name = "?";
120 
121 		if (!list_empty(&inode->i_dentry)) {
122 			dentry = list_entry(inode->i_dentry.next,
123 					    struct dentry, d_alias);
124 			if (dentry && dentry->d_name.name)
125 				name = (const char *) dentry->d_name.name;
126 		}
127 
128 		if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
129 			printk(KERN_DEBUG
130 			       "%s(%d): dirtied inode %lu (%s) on %s\n",
131 			       current->comm, task_pid_nr(current), inode->i_ino,
132 			       name, inode->i_sb->s_id);
133 	}
134 
135 	spin_lock(&inode_lock);
136 	if ((inode->i_state & flags) != flags) {
137 		const int was_dirty = inode->i_state & I_DIRTY;
138 
139 		inode->i_state |= flags;
140 
141 		/*
142 		 * If the inode is being synced, just update its dirty state.
143 		 * The unlocker will place the inode on the appropriate
144 		 * superblock list, based upon its state.
145 		 */
146 		if (inode->i_state & I_SYNC)
147 			goto out;
148 
149 		/*
150 		 * Only add valid (hashed) inodes to the superblock's
151 		 * dirty list.  Add blockdev inodes as well.
152 		 */
153 		if (!S_ISBLK(inode->i_mode)) {
154 			if (hlist_unhashed(&inode->i_hash))
155 				goto out;
156 		}
157 		if (inode->i_state & (I_FREEING|I_CLEAR))
158 			goto out;
159 
160 		/*
161 		 * If the inode was already on s_dirty/s_io/s_more_io, don't
162 		 * reposition it (that would break s_dirty time-ordering).
163 		 */
164 		if (!was_dirty) {
165 			inode->dirtied_when = jiffies;
166 			list_move(&inode->i_list, &sb->s_dirty);
167 		}
168 	}
169 out:
170 	spin_unlock(&inode_lock);
171 }
172 
173 EXPORT_SYMBOL(__mark_inode_dirty);
174 
175 static int write_inode(struct inode *inode, int sync)
176 {
177 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
178 		return inode->i_sb->s_op->write_inode(inode, sync);
179 	return 0;
180 }
181 
182 /*
183  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
184  * furthest end of its superblock's dirty-inode list.
185  *
186  * Before stamping the inode's ->dirtied_when, we check to see whether it is
187  * already the most-recently-dirtied inode on the s_dirty list.  If that is
188  * the case then the inode must have been redirtied while it was being written
189  * out and we don't reset its dirtied_when.
190  */
191 static void redirty_tail(struct inode *inode)
192 {
193 	struct super_block *sb = inode->i_sb;
194 
195 	if (!list_empty(&sb->s_dirty)) {
196 		struct inode *tail_inode;
197 
198 		tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
199 		if (!time_after_eq(inode->dirtied_when,
200 				tail_inode->dirtied_when))
201 			inode->dirtied_when = jiffies;
202 	}
203 	list_move(&inode->i_list, &sb->s_dirty);
204 }
205 
206 /*
207  * requeue inode for re-scanning after sb->s_io list is exhausted.
208  */
209 static void requeue_io(struct inode *inode)
210 {
211 	list_move(&inode->i_list, &inode->i_sb->s_more_io);
212 }
213 
214 static void inode_sync_complete(struct inode *inode)
215 {
216 	/*
217 	 * Prevent speculative execution through spin_unlock(&inode_lock);
218 	 */
219 	smp_mb();
220 	wake_up_bit(&inode->i_state, __I_SYNC);
221 }
222 
223 /*
224  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
225  */
226 static void move_expired_inodes(struct list_head *delaying_queue,
227 			       struct list_head *dispatch_queue,
228 				unsigned long *older_than_this)
229 {
230 	while (!list_empty(delaying_queue)) {
231 		struct inode *inode = list_entry(delaying_queue->prev,
232 						struct inode, i_list);
233 		if (older_than_this &&
234 			time_after(inode->dirtied_when, *older_than_this))
235 			break;
236 		list_move(&inode->i_list, dispatch_queue);
237 	}
238 }
239 
240 /*
241  * Queue all expired dirty inodes for io, eldest first.
242  */
243 static void queue_io(struct super_block *sb,
244 				unsigned long *older_than_this)
245 {
246 	list_splice_init(&sb->s_more_io, sb->s_io.prev);
247 	move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
248 }
249 
250 int sb_has_dirty_inodes(struct super_block *sb)
251 {
252 	return !list_empty(&sb->s_dirty) ||
253 	       !list_empty(&sb->s_io) ||
254 	       !list_empty(&sb->s_more_io);
255 }
256 EXPORT_SYMBOL(sb_has_dirty_inodes);
257 
258 /*
259  * Write a single inode's dirty pages and inode data out to disk.
260  * If `wait' is set, wait on the writeout.
261  *
262  * The whole writeout design is quite complex and fragile.  We want to avoid
263  * starvation of particular inodes when others are being redirtied, prevent
264  * livelocks, etc.
265  *
266  * Called under inode_lock.
267  */
268 static int
269 __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
270 {
271 	unsigned dirty;
272 	struct address_space *mapping = inode->i_mapping;
273 	int wait = wbc->sync_mode == WB_SYNC_ALL;
274 	int ret;
275 
276 	BUG_ON(inode->i_state & I_SYNC);
277 
278 	/* Set I_SYNC, reset I_DIRTY */
279 	dirty = inode->i_state & I_DIRTY;
280 	inode->i_state |= I_SYNC;
281 	inode->i_state &= ~I_DIRTY;
282 
283 	spin_unlock(&inode_lock);
284 
285 	ret = do_writepages(mapping, wbc);
286 
287 	/* Don't write the inode if only I_DIRTY_PAGES was set */
288 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
289 		int err = write_inode(inode, wait);
290 		if (ret == 0)
291 			ret = err;
292 	}
293 
294 	if (wait) {
295 		int err = filemap_fdatawait(mapping);
296 		if (ret == 0)
297 			ret = err;
298 	}
299 
300 	spin_lock(&inode_lock);
301 	inode->i_state &= ~I_SYNC;
302 	if (!(inode->i_state & I_FREEING)) {
303 		if (!(inode->i_state & I_DIRTY) &&
304 		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
305 			/*
306 			 * We didn't write back all the pages.  nfs_writepages()
307 			 * sometimes bales out without doing anything. Redirty
308 			 * the inode; Move it from s_io onto s_more_io/s_dirty.
309 			 */
310 			/*
311 			 * akpm: if the caller was the kupdate function we put
312 			 * this inode at the head of s_dirty so it gets first
313 			 * consideration.  Otherwise, move it to the tail, for
314 			 * the reasons described there.  I'm not really sure
315 			 * how much sense this makes.  Presumably I had a good
316 			 * reasons for doing it this way, and I'd rather not
317 			 * muck with it at present.
318 			 */
319 			if (wbc->for_kupdate) {
320 				/*
321 				 * For the kupdate function we move the inode
322 				 * to s_more_io so it will get more writeout as
323 				 * soon as the queue becomes uncongested.
324 				 */
325 				inode->i_state |= I_DIRTY_PAGES;
326 				if (wbc->nr_to_write <= 0) {
327 					/*
328 					 * slice used up: queue for next turn
329 					 */
330 					requeue_io(inode);
331 				} else {
332 					/*
333 					 * somehow blocked: retry later
334 					 */
335 					redirty_tail(inode);
336 				}
337 			} else {
338 				/*
339 				 * Otherwise fully redirty the inode so that
340 				 * other inodes on this superblock will get some
341 				 * writeout.  Otherwise heavy writing to one
342 				 * file would indefinitely suspend writeout of
343 				 * all the other files.
344 				 */
345 				inode->i_state |= I_DIRTY_PAGES;
346 				redirty_tail(inode);
347 			}
348 		} else if (inode->i_state & I_DIRTY) {
349 			/*
350 			 * Someone redirtied the inode while were writing back
351 			 * the pages.
352 			 */
353 			redirty_tail(inode);
354 		} else if (atomic_read(&inode->i_count)) {
355 			/*
356 			 * The inode is clean, inuse
357 			 */
358 			list_move(&inode->i_list, &inode_in_use);
359 		} else {
360 			/*
361 			 * The inode is clean, unused
362 			 */
363 			list_move(&inode->i_list, &inode_unused);
364 		}
365 	}
366 	inode_sync_complete(inode);
367 	return ret;
368 }
369 
370 /*
371  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
372  * caller has ref on the inode (either via __iget or via syscall against an fd)
373  * or the inode has I_WILL_FREE set (via generic_forget_inode)
374  */
375 static int
376 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
377 {
378 	wait_queue_head_t *wqh;
379 
380 	if (!atomic_read(&inode->i_count))
381 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
382 	else
383 		WARN_ON(inode->i_state & I_WILL_FREE);
384 
385 	if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
386 		/*
387 		 * We're skipping this inode because it's locked, and we're not
388 		 * doing writeback-for-data-integrity.  Move it to s_more_io so
389 		 * that writeback can proceed with the other inodes on s_io.
390 		 * We'll have another go at writing back this inode when we
391 		 * completed a full scan of s_io.
392 		 */
393 		requeue_io(inode);
394 		return 0;
395 	}
396 
397 	/*
398 	 * It's a data-integrity sync.  We must wait.
399 	 */
400 	if (inode->i_state & I_SYNC) {
401 		DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
402 
403 		wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
404 		do {
405 			spin_unlock(&inode_lock);
406 			__wait_on_bit(wqh, &wq, inode_wait,
407 							TASK_UNINTERRUPTIBLE);
408 			spin_lock(&inode_lock);
409 		} while (inode->i_state & I_SYNC);
410 	}
411 	return __sync_single_inode(inode, wbc);
412 }
413 
414 /*
415  * Write out a superblock's list of dirty inodes.  A wait will be performed
416  * upon no inodes, all inodes or the final one, depending upon sync_mode.
417  *
418  * If older_than_this is non-NULL, then only write out inodes which
419  * had their first dirtying at a time earlier than *older_than_this.
420  *
421  * If we're a pdlfush thread, then implement pdflush collision avoidance
422  * against the entire list.
423  *
424  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
425  * This function assumes that the blockdev superblock's inodes are backed by
426  * a variety of queues, so all inodes are searched.  For other superblocks,
427  * assume that all inodes are backed by the same queue.
428  *
429  * FIXME: this linear search could get expensive with many fileystems.  But
430  * how to fix?  We need to go from an address_space to all inodes which share
431  * a queue with that address_space.  (Easy: have a global "dirty superblocks"
432  * list).
433  *
434  * The inodes to be written are parked on sb->s_io.  They are moved back onto
435  * sb->s_dirty as they are selected for writing.  This way, none can be missed
436  * on the writer throttling path, and we get decent balancing between many
437  * throttled threads: we don't want them all piling up on inode_sync_wait.
438  */
439 void generic_sync_sb_inodes(struct super_block *sb,
440 				struct writeback_control *wbc)
441 {
442 	const unsigned long start = jiffies;	/* livelock avoidance */
443 	int sync = wbc->sync_mode == WB_SYNC_ALL;
444 
445 	spin_lock(&inode_lock);
446 	if (!wbc->for_kupdate || list_empty(&sb->s_io))
447 		queue_io(sb, wbc->older_than_this);
448 
449 	while (!list_empty(&sb->s_io)) {
450 		struct inode *inode = list_entry(sb->s_io.prev,
451 						struct inode, i_list);
452 		struct address_space *mapping = inode->i_mapping;
453 		struct backing_dev_info *bdi = mapping->backing_dev_info;
454 		long pages_skipped;
455 
456 		if (!bdi_cap_writeback_dirty(bdi)) {
457 			redirty_tail(inode);
458 			if (sb_is_blkdev_sb(sb)) {
459 				/*
460 				 * Dirty memory-backed blockdev: the ramdisk
461 				 * driver does this.  Skip just this inode
462 				 */
463 				continue;
464 			}
465 			/*
466 			 * Dirty memory-backed inode against a filesystem other
467 			 * than the kernel-internal bdev filesystem.  Skip the
468 			 * entire superblock.
469 			 */
470 			break;
471 		}
472 
473 		if (wbc->nonblocking && bdi_write_congested(bdi)) {
474 			wbc->encountered_congestion = 1;
475 			if (!sb_is_blkdev_sb(sb))
476 				break;		/* Skip a congested fs */
477 			requeue_io(inode);
478 			continue;		/* Skip a congested blockdev */
479 		}
480 
481 		if (wbc->bdi && bdi != wbc->bdi) {
482 			if (!sb_is_blkdev_sb(sb))
483 				break;		/* fs has the wrong queue */
484 			requeue_io(inode);
485 			continue;		/* blockdev has wrong queue */
486 		}
487 
488 		/* Was this inode dirtied after sync_sb_inodes was called? */
489 		if (time_after(inode->dirtied_when, start))
490 			break;
491 
492 		/* Is another pdflush already flushing this queue? */
493 		if (current_is_pdflush() && !writeback_acquire(bdi))
494 			break;
495 
496 		BUG_ON(inode->i_state & I_FREEING);
497 		__iget(inode);
498 		pages_skipped = wbc->pages_skipped;
499 		__writeback_single_inode(inode, wbc);
500 		if (current_is_pdflush())
501 			writeback_release(bdi);
502 		if (wbc->pages_skipped != pages_skipped) {
503 			/*
504 			 * writeback is not making progress due to locked
505 			 * buffers.  Skip this inode for now.
506 			 */
507 			redirty_tail(inode);
508 		}
509 		spin_unlock(&inode_lock);
510 		iput(inode);
511 		cond_resched();
512 		spin_lock(&inode_lock);
513 		if (wbc->nr_to_write <= 0) {
514 			wbc->more_io = 1;
515 			break;
516 		}
517 		if (!list_empty(&sb->s_more_io))
518 			wbc->more_io = 1;
519 	}
520 
521 	if (sync) {
522 		struct inode *inode, *old_inode = NULL;
523 
524 		/*
525 		 * Data integrity sync. Must wait for all pages under writeback,
526 		 * because there may have been pages dirtied before our sync
527 		 * call, but which had writeout started before we write it out.
528 		 * In which case, the inode may not be on the dirty list, but
529 		 * we still have to wait for that writeout.
530 		 */
531 		list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
532 			struct address_space *mapping;
533 
534 			if (inode->i_state & (I_FREEING|I_WILL_FREE))
535 				continue;
536 			mapping = inode->i_mapping;
537 			if (mapping->nrpages == 0)
538 				continue;
539 			__iget(inode);
540 			spin_unlock(&inode_lock);
541 			/*
542 			 * We hold a reference to 'inode' so it couldn't have
543 			 * been removed from s_inodes list while we dropped the
544 			 * inode_lock.  We cannot iput the inode now as we can
545 			 * be holding the last reference and we cannot iput it
546 			 * under inode_lock. So we keep the reference and iput
547 			 * it later.
548 			 */
549 			iput(old_inode);
550 			old_inode = inode;
551 
552 			filemap_fdatawait(mapping);
553 
554 			cond_resched();
555 
556 			spin_lock(&inode_lock);
557 		}
558 		spin_unlock(&inode_lock);
559 		iput(old_inode);
560 	} else
561 		spin_unlock(&inode_lock);
562 
563 	return;		/* Leave any unwritten inodes on s_io */
564 }
565 EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
566 
567 static void sync_sb_inodes(struct super_block *sb,
568 				struct writeback_control *wbc)
569 {
570 	generic_sync_sb_inodes(sb, wbc);
571 }
572 
573 /*
574  * Start writeback of dirty pagecache data against all unlocked inodes.
575  *
576  * Note:
577  * We don't need to grab a reference to superblock here. If it has non-empty
578  * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
579  * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
580  * empty. Since __sync_single_inode() regains inode_lock before it finally moves
581  * inode from superblock lists we are OK.
582  *
583  * If `older_than_this' is non-zero then only flush inodes which have a
584  * flushtime older than *older_than_this.
585  *
586  * If `bdi' is non-zero then we will scan the first inode against each
587  * superblock until we find the matching ones.  One group will be the dirty
588  * inodes against a filesystem.  Then when we hit the dummy blockdev superblock,
589  * sync_sb_inodes will seekout the blockdev which matches `bdi'.  Maybe not
590  * super-efficient but we're about to do a ton of I/O...
591  */
592 void
593 writeback_inodes(struct writeback_control *wbc)
594 {
595 	struct super_block *sb;
596 
597 	might_sleep();
598 	spin_lock(&sb_lock);
599 restart:
600 	list_for_each_entry_reverse(sb, &super_blocks, s_list) {
601 		if (sb_has_dirty_inodes(sb)) {
602 			/* we're making our own get_super here */
603 			sb->s_count++;
604 			spin_unlock(&sb_lock);
605 			/*
606 			 * If we can't get the readlock, there's no sense in
607 			 * waiting around, most of the time the FS is going to
608 			 * be unmounted by the time it is released.
609 			 */
610 			if (down_read_trylock(&sb->s_umount)) {
611 				if (sb->s_root)
612 					sync_sb_inodes(sb, wbc);
613 				up_read(&sb->s_umount);
614 			}
615 			spin_lock(&sb_lock);
616 			if (__put_super_and_need_restart(sb))
617 				goto restart;
618 		}
619 		if (wbc->nr_to_write <= 0)
620 			break;
621 	}
622 	spin_unlock(&sb_lock);
623 }
624 
625 /*
626  * writeback and wait upon the filesystem's dirty inodes.  The caller will
627  * do this in two passes - one to write, and one to wait.
628  *
629  * A finite limit is set on the number of pages which will be written.
630  * To prevent infinite livelock of sys_sync().
631  *
632  * We add in the number of potentially dirty inodes, because each inode write
633  * can dirty pagecache in the underlying blockdev.
634  */
635 void sync_inodes_sb(struct super_block *sb, int wait)
636 {
637 	struct writeback_control wbc = {
638 		.sync_mode	= wait ? WB_SYNC_ALL : WB_SYNC_NONE,
639 		.range_start	= 0,
640 		.range_end	= LLONG_MAX,
641 	};
642 
643 	if (!wait) {
644 		unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
645 		unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
646 
647 		wbc.nr_to_write = nr_dirty + nr_unstable +
648 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
649 	} else
650 		wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
651 
652 	sync_sb_inodes(sb, &wbc);
653 }
654 
655 /**
656  * sync_inodes - writes all inodes to disk
657  * @wait: wait for completion
658  *
659  * sync_inodes() goes through each super block's dirty inode list, writes the
660  * inodes out, waits on the writeout and puts the inodes back on the normal
661  * list.
662  *
663  * This is for sys_sync().  fsync_dev() uses the same algorithm.  The subtle
664  * part of the sync functions is that the blockdev "superblock" is processed
665  * last.  This is because the write_inode() function of a typical fs will
666  * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
667  * What we want to do is to perform all that dirtying first, and then write
668  * back all those inode blocks via the blockdev mapping in one sweep.  So the
669  * additional (somewhat redundant) sync_blockdev() calls here are to make
670  * sure that really happens.  Because if we call sync_inodes_sb(wait=1) with
671  * outstanding dirty inodes, the writeback goes block-at-a-time within the
672  * filesystem's write_inode().  This is extremely slow.
673  */
674 static void __sync_inodes(int wait)
675 {
676 	struct super_block *sb;
677 
678 	spin_lock(&sb_lock);
679 restart:
680 	list_for_each_entry(sb, &super_blocks, s_list) {
681 		sb->s_count++;
682 		spin_unlock(&sb_lock);
683 		down_read(&sb->s_umount);
684 		if (sb->s_root) {
685 			sync_inodes_sb(sb, wait);
686 			sync_blockdev(sb->s_bdev);
687 		}
688 		up_read(&sb->s_umount);
689 		spin_lock(&sb_lock);
690 		if (__put_super_and_need_restart(sb))
691 			goto restart;
692 	}
693 	spin_unlock(&sb_lock);
694 }
695 
696 void sync_inodes(int wait)
697 {
698 	__sync_inodes(0);
699 
700 	if (wait)
701 		__sync_inodes(1);
702 }
703 
704 /**
705  * write_inode_now	-	write an inode to disk
706  * @inode: inode to write to disk
707  * @sync: whether the write should be synchronous or not
708  *
709  * This function commits an inode to disk immediately if it is dirty. This is
710  * primarily needed by knfsd.
711  *
712  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
713  */
714 int write_inode_now(struct inode *inode, int sync)
715 {
716 	int ret;
717 	struct writeback_control wbc = {
718 		.nr_to_write = LONG_MAX,
719 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
720 		.range_start = 0,
721 		.range_end = LLONG_MAX,
722 	};
723 
724 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
725 		wbc.nr_to_write = 0;
726 
727 	might_sleep();
728 	spin_lock(&inode_lock);
729 	ret = __writeback_single_inode(inode, &wbc);
730 	spin_unlock(&inode_lock);
731 	if (sync)
732 		inode_sync_wait(inode);
733 	return ret;
734 }
735 EXPORT_SYMBOL(write_inode_now);
736 
737 /**
738  * sync_inode - write an inode and its pages to disk.
739  * @inode: the inode to sync
740  * @wbc: controls the writeback mode
741  *
742  * sync_inode() will write an inode and its pages to disk.  It will also
743  * correctly update the inode on its superblock's dirty inode lists and will
744  * update inode->i_state.
745  *
746  * The caller must have a ref on the inode.
747  */
748 int sync_inode(struct inode *inode, struct writeback_control *wbc)
749 {
750 	int ret;
751 
752 	spin_lock(&inode_lock);
753 	ret = __writeback_single_inode(inode, wbc);
754 	spin_unlock(&inode_lock);
755 	return ret;
756 }
757 EXPORT_SYMBOL(sync_inode);
758 
759 /**
760  * generic_osync_inode - flush all dirty data for a given inode to disk
761  * @inode: inode to write
762  * @mapping: the address_space that should be flushed
763  * @what:  what to write and wait upon
764  *
765  * This can be called by file_write functions for files which have the
766  * O_SYNC flag set, to flush dirty writes to disk.
767  *
768  * @what is a bitmask, specifying which part of the inode's data should be
769  * written and waited upon.
770  *
771  *    OSYNC_DATA:     i_mapping's dirty data
772  *    OSYNC_METADATA: the buffers at i_mapping->private_list
773  *    OSYNC_INODE:    the inode itself
774  */
775 
776 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
777 {
778 	int err = 0;
779 	int need_write_inode_now = 0;
780 	int err2;
781 
782 	if (what & OSYNC_DATA)
783 		err = filemap_fdatawrite(mapping);
784 	if (what & (OSYNC_METADATA|OSYNC_DATA)) {
785 		err2 = sync_mapping_buffers(mapping);
786 		if (!err)
787 			err = err2;
788 	}
789 	if (what & OSYNC_DATA) {
790 		err2 = filemap_fdatawait(mapping);
791 		if (!err)
792 			err = err2;
793 	}
794 
795 	spin_lock(&inode_lock);
796 	if ((inode->i_state & I_DIRTY) &&
797 	    ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
798 		need_write_inode_now = 1;
799 	spin_unlock(&inode_lock);
800 
801 	if (need_write_inode_now) {
802 		err2 = write_inode_now(inode, 1);
803 		if (!err)
804 			err = err2;
805 	}
806 	else
807 		inode_sync_wait(inode);
808 
809 	return err;
810 }
811 EXPORT_SYMBOL(generic_osync_inode);
812