xref: /linux/fs/fs-writeback.c (revision 5bdef865eb358b6f3760e25e591ae115e9eeddef)
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002	Andrew Morton
12  *		Split out of fs/inode.c
13  *		Additions for address_space-based writeback
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/writeback.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/buffer_head.h>
26 #include "internal.h"
27 
28 
29 /**
30  * writeback_acquire - attempt to get exclusive writeback access to a device
31  * @bdi: the device's backing_dev_info structure
32  *
33  * It is a waste of resources to have more than one pdflush thread blocked on
34  * a single request queue.  Exclusion at the request_queue level is obtained
35  * via a flag in the request_queue's backing_dev_info.state.
36  *
37  * Non-request_queue-backed address_spaces will share default_backing_dev_info,
38  * unless they implement their own.  Which is somewhat inefficient, as this
39  * may prevent concurrent writeback against multiple devices.
40  */
41 static int writeback_acquire(struct backing_dev_info *bdi)
42 {
43 	return !test_and_set_bit(BDI_pdflush, &bdi->state);
44 }
45 
46 /**
47  * writeback_in_progress - determine whether there is writeback in progress
48  * @bdi: the device's backing_dev_info structure.
49  *
50  * Determine whether there is writeback in progress against a backing device.
51  */
52 int writeback_in_progress(struct backing_dev_info *bdi)
53 {
54 	return test_bit(BDI_pdflush, &bdi->state);
55 }
56 
57 /**
58  * writeback_release - relinquish exclusive writeback access against a device.
59  * @bdi: the device's backing_dev_info structure
60  */
61 static void writeback_release(struct backing_dev_info *bdi)
62 {
63 	BUG_ON(!writeback_in_progress(bdi));
64 	clear_bit(BDI_pdflush, &bdi->state);
65 }
66 
67 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
68 {
69 	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
70 		struct dentry *dentry;
71 		const char *name = "?";
72 
73 		dentry = d_find_alias(inode);
74 		if (dentry) {
75 			spin_lock(&dentry->d_lock);
76 			name = (const char *) dentry->d_name.name;
77 		}
78 		printk(KERN_DEBUG
79 		       "%s(%d): dirtied inode %lu (%s) on %s\n",
80 		       current->comm, task_pid_nr(current), inode->i_ino,
81 		       name, inode->i_sb->s_id);
82 		if (dentry) {
83 			spin_unlock(&dentry->d_lock);
84 			dput(dentry);
85 		}
86 	}
87 }
88 
89 /**
90  *	__mark_inode_dirty -	internal function
91  *	@inode: inode to mark
92  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
93  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
94  *  	mark_inode_dirty_sync.
95  *
96  * Put the inode on the super block's dirty list.
97  *
98  * CAREFUL! We mark it dirty unconditionally, but move it onto the
99  * dirty list only if it is hashed or if it refers to a blockdev.
100  * If it was not hashed, it will never be added to the dirty list
101  * even if it is later hashed, as it will have been marked dirty already.
102  *
103  * In short, make sure you hash any inodes _before_ you start marking
104  * them dirty.
105  *
106  * This function *must* be atomic for the I_DIRTY_PAGES case -
107  * set_page_dirty() is called under spinlock in several places.
108  *
109  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
110  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
111  * the kernel-internal blockdev inode represents the dirtying time of the
112  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
113  * page->mapping->host, so the page-dirtying time is recorded in the internal
114  * blockdev inode.
115  */
116 void __mark_inode_dirty(struct inode *inode, int flags)
117 {
118 	struct super_block *sb = inode->i_sb;
119 
120 	/*
121 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
122 	 * dirty the inode itself
123 	 */
124 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
125 		if (sb->s_op->dirty_inode)
126 			sb->s_op->dirty_inode(inode);
127 	}
128 
129 	/*
130 	 * make sure that changes are seen by all cpus before we test i_state
131 	 * -- mikulas
132 	 */
133 	smp_mb();
134 
135 	/* avoid the locking if we can */
136 	if ((inode->i_state & flags) == flags)
137 		return;
138 
139 	if (unlikely(block_dump))
140 		block_dump___mark_inode_dirty(inode);
141 
142 	spin_lock(&inode_lock);
143 	if ((inode->i_state & flags) != flags) {
144 		const int was_dirty = inode->i_state & I_DIRTY;
145 
146 		inode->i_state |= flags;
147 
148 		/*
149 		 * If the inode is being synced, just update its dirty state.
150 		 * The unlocker will place the inode on the appropriate
151 		 * superblock list, based upon its state.
152 		 */
153 		if (inode->i_state & I_SYNC)
154 			goto out;
155 
156 		/*
157 		 * Only add valid (hashed) inodes to the superblock's
158 		 * dirty list.  Add blockdev inodes as well.
159 		 */
160 		if (!S_ISBLK(inode->i_mode)) {
161 			if (hlist_unhashed(&inode->i_hash))
162 				goto out;
163 		}
164 		if (inode->i_state & (I_FREEING|I_CLEAR))
165 			goto out;
166 
167 		/*
168 		 * If the inode was already on s_dirty/s_io/s_more_io, don't
169 		 * reposition it (that would break s_dirty time-ordering).
170 		 */
171 		if (!was_dirty) {
172 			inode->dirtied_when = jiffies;
173 			list_move(&inode->i_list, &sb->s_dirty);
174 		}
175 	}
176 out:
177 	spin_unlock(&inode_lock);
178 }
179 
180 EXPORT_SYMBOL(__mark_inode_dirty);
181 
182 static int write_inode(struct inode *inode, int sync)
183 {
184 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
185 		return inode->i_sb->s_op->write_inode(inode, sync);
186 	return 0;
187 }
188 
189 /*
190  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
191  * furthest end of its superblock's dirty-inode list.
192  *
193  * Before stamping the inode's ->dirtied_when, we check to see whether it is
194  * already the most-recently-dirtied inode on the s_dirty list.  If that is
195  * the case then the inode must have been redirtied while it was being written
196  * out and we don't reset its dirtied_when.
197  */
198 static void redirty_tail(struct inode *inode)
199 {
200 	struct super_block *sb = inode->i_sb;
201 
202 	if (!list_empty(&sb->s_dirty)) {
203 		struct inode *tail_inode;
204 
205 		tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
206 		if (time_before(inode->dirtied_when,
207 				tail_inode->dirtied_when))
208 			inode->dirtied_when = jiffies;
209 	}
210 	list_move(&inode->i_list, &sb->s_dirty);
211 }
212 
213 /*
214  * requeue inode for re-scanning after sb->s_io list is exhausted.
215  */
216 static void requeue_io(struct inode *inode)
217 {
218 	list_move(&inode->i_list, &inode->i_sb->s_more_io);
219 }
220 
221 static void inode_sync_complete(struct inode *inode)
222 {
223 	/*
224 	 * Prevent speculative execution through spin_unlock(&inode_lock);
225 	 */
226 	smp_mb();
227 	wake_up_bit(&inode->i_state, __I_SYNC);
228 }
229 
230 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
231 {
232 	bool ret = time_after(inode->dirtied_when, t);
233 #ifndef CONFIG_64BIT
234 	/*
235 	 * For inodes being constantly redirtied, dirtied_when can get stuck.
236 	 * It _appears_ to be in the future, but is actually in distant past.
237 	 * This test is necessary to prevent such wrapped-around relative times
238 	 * from permanently stopping the whole pdflush writeback.
239 	 */
240 	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
241 #endif
242 	return ret;
243 }
244 
245 /*
246  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
247  */
248 static void move_expired_inodes(struct list_head *delaying_queue,
249 			       struct list_head *dispatch_queue,
250 				unsigned long *older_than_this)
251 {
252 	while (!list_empty(delaying_queue)) {
253 		struct inode *inode = list_entry(delaying_queue->prev,
254 						struct inode, i_list);
255 		if (older_than_this &&
256 		    inode_dirtied_after(inode, *older_than_this))
257 			break;
258 		list_move(&inode->i_list, dispatch_queue);
259 	}
260 }
261 
262 /*
263  * Queue all expired dirty inodes for io, eldest first.
264  */
265 static void queue_io(struct super_block *sb,
266 				unsigned long *older_than_this)
267 {
268 	list_splice_init(&sb->s_more_io, sb->s_io.prev);
269 	move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
270 }
271 
272 int sb_has_dirty_inodes(struct super_block *sb)
273 {
274 	return !list_empty(&sb->s_dirty) ||
275 	       !list_empty(&sb->s_io) ||
276 	       !list_empty(&sb->s_more_io);
277 }
278 EXPORT_SYMBOL(sb_has_dirty_inodes);
279 
280 /*
281  * Wait for writeback on an inode to complete.
282  */
283 static void inode_wait_for_writeback(struct inode *inode)
284 {
285 	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
286 	wait_queue_head_t *wqh;
287 
288 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
289 	do {
290 		spin_unlock(&inode_lock);
291 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
292 		spin_lock(&inode_lock);
293 	} while (inode->i_state & I_SYNC);
294 }
295 
296 /*
297  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
298  * caller has ref on the inode (either via __iget or via syscall against an fd)
299  * or the inode has I_WILL_FREE set (via generic_forget_inode)
300  *
301  * If `wait' is set, wait on the writeout.
302  *
303  * The whole writeout design is quite complex and fragile.  We want to avoid
304  * starvation of particular inodes when others are being redirtied, prevent
305  * livelocks, etc.
306  *
307  * Called under inode_lock.
308  */
309 static int
310 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
311 {
312 	struct address_space *mapping = inode->i_mapping;
313 	int wait = wbc->sync_mode == WB_SYNC_ALL;
314 	unsigned dirty;
315 	int ret;
316 
317 	if (!atomic_read(&inode->i_count))
318 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
319 	else
320 		WARN_ON(inode->i_state & I_WILL_FREE);
321 
322 	if (inode->i_state & I_SYNC) {
323 		/*
324 		 * If this inode is locked for writeback and we are not doing
325 		 * writeback-for-data-integrity, move it to s_more_io so that
326 		 * writeback can proceed with the other inodes on s_io.
327 		 *
328 		 * We'll have another go at writing back this inode when we
329 		 * completed a full scan of s_io.
330 		 */
331 		if (!wait) {
332 			requeue_io(inode);
333 			return 0;
334 		}
335 
336 		/*
337 		 * It's a data-integrity sync.  We must wait.
338 		 */
339 		inode_wait_for_writeback(inode);
340 	}
341 
342 	BUG_ON(inode->i_state & I_SYNC);
343 
344 	/* Set I_SYNC, reset I_DIRTY */
345 	dirty = inode->i_state & I_DIRTY;
346 	inode->i_state |= I_SYNC;
347 	inode->i_state &= ~I_DIRTY;
348 
349 	spin_unlock(&inode_lock);
350 
351 	ret = do_writepages(mapping, wbc);
352 
353 	/* Don't write the inode if only I_DIRTY_PAGES was set */
354 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
355 		int err = write_inode(inode, wait);
356 		if (ret == 0)
357 			ret = err;
358 	}
359 
360 	if (wait) {
361 		int err = filemap_fdatawait(mapping);
362 		if (ret == 0)
363 			ret = err;
364 	}
365 
366 	spin_lock(&inode_lock);
367 	inode->i_state &= ~I_SYNC;
368 	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
369 		if (!(inode->i_state & I_DIRTY) &&
370 		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
371 			/*
372 			 * We didn't write back all the pages.  nfs_writepages()
373 			 * sometimes bales out without doing anything. Redirty
374 			 * the inode; Move it from s_io onto s_more_io/s_dirty.
375 			 */
376 			/*
377 			 * akpm: if the caller was the kupdate function we put
378 			 * this inode at the head of s_dirty so it gets first
379 			 * consideration.  Otherwise, move it to the tail, for
380 			 * the reasons described there.  I'm not really sure
381 			 * how much sense this makes.  Presumably I had a good
382 			 * reasons for doing it this way, and I'd rather not
383 			 * muck with it at present.
384 			 */
385 			if (wbc->for_kupdate) {
386 				/*
387 				 * For the kupdate function we move the inode
388 				 * to s_more_io so it will get more writeout as
389 				 * soon as the queue becomes uncongested.
390 				 */
391 				inode->i_state |= I_DIRTY_PAGES;
392 				if (wbc->nr_to_write <= 0) {
393 					/*
394 					 * slice used up: queue for next turn
395 					 */
396 					requeue_io(inode);
397 				} else {
398 					/*
399 					 * somehow blocked: retry later
400 					 */
401 					redirty_tail(inode);
402 				}
403 			} else {
404 				/*
405 				 * Otherwise fully redirty the inode so that
406 				 * other inodes on this superblock will get some
407 				 * writeout.  Otherwise heavy writing to one
408 				 * file would indefinitely suspend writeout of
409 				 * all the other files.
410 				 */
411 				inode->i_state |= I_DIRTY_PAGES;
412 				redirty_tail(inode);
413 			}
414 		} else if (inode->i_state & I_DIRTY) {
415 			/*
416 			 * Someone redirtied the inode while were writing back
417 			 * the pages.
418 			 */
419 			redirty_tail(inode);
420 		} else if (atomic_read(&inode->i_count)) {
421 			/*
422 			 * The inode is clean, inuse
423 			 */
424 			list_move(&inode->i_list, &inode_in_use);
425 		} else {
426 			/*
427 			 * The inode is clean, unused
428 			 */
429 			list_move(&inode->i_list, &inode_unused);
430 		}
431 	}
432 	inode_sync_complete(inode);
433 	return ret;
434 }
435 
436 /*
437  * Write out a superblock's list of dirty inodes.  A wait will be performed
438  * upon no inodes, all inodes or the final one, depending upon sync_mode.
439  *
440  * If older_than_this is non-NULL, then only write out inodes which
441  * had their first dirtying at a time earlier than *older_than_this.
442  *
443  * If we're a pdflush thread, then implement pdflush collision avoidance
444  * against the entire list.
445  *
446  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
447  * This function assumes that the blockdev superblock's inodes are backed by
448  * a variety of queues, so all inodes are searched.  For other superblocks,
449  * assume that all inodes are backed by the same queue.
450  *
451  * FIXME: this linear search could get expensive with many fileystems.  But
452  * how to fix?  We need to go from an address_space to all inodes which share
453  * a queue with that address_space.  (Easy: have a global "dirty superblocks"
454  * list).
455  *
456  * The inodes to be written are parked on sb->s_io.  They are moved back onto
457  * sb->s_dirty as they are selected for writing.  This way, none can be missed
458  * on the writer throttling path, and we get decent balancing between many
459  * throttled threads: we don't want them all piling up on inode_sync_wait.
460  */
461 void generic_sync_sb_inodes(struct super_block *sb,
462 				struct writeback_control *wbc)
463 {
464 	const unsigned long start = jiffies;	/* livelock avoidance */
465 	int sync = wbc->sync_mode == WB_SYNC_ALL;
466 
467 	spin_lock(&inode_lock);
468 	if (!wbc->for_kupdate || list_empty(&sb->s_io))
469 		queue_io(sb, wbc->older_than_this);
470 
471 	while (!list_empty(&sb->s_io)) {
472 		struct inode *inode = list_entry(sb->s_io.prev,
473 						struct inode, i_list);
474 		struct address_space *mapping = inode->i_mapping;
475 		struct backing_dev_info *bdi = mapping->backing_dev_info;
476 		long pages_skipped;
477 
478 		if (!bdi_cap_writeback_dirty(bdi)) {
479 			redirty_tail(inode);
480 			if (sb_is_blkdev_sb(sb)) {
481 				/*
482 				 * Dirty memory-backed blockdev: the ramdisk
483 				 * driver does this.  Skip just this inode
484 				 */
485 				continue;
486 			}
487 			/*
488 			 * Dirty memory-backed inode against a filesystem other
489 			 * than the kernel-internal bdev filesystem.  Skip the
490 			 * entire superblock.
491 			 */
492 			break;
493 		}
494 
495 		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
496 			requeue_io(inode);
497 			continue;
498 		}
499 
500 		if (wbc->nonblocking && bdi_write_congested(bdi)) {
501 			wbc->encountered_congestion = 1;
502 			if (!sb_is_blkdev_sb(sb))
503 				break;		/* Skip a congested fs */
504 			requeue_io(inode);
505 			continue;		/* Skip a congested blockdev */
506 		}
507 
508 		if (wbc->bdi && bdi != wbc->bdi) {
509 			if (!sb_is_blkdev_sb(sb))
510 				break;		/* fs has the wrong queue */
511 			requeue_io(inode);
512 			continue;		/* blockdev has wrong queue */
513 		}
514 
515 		/*
516 		 * Was this inode dirtied after sync_sb_inodes was called?
517 		 * This keeps sync from extra jobs and livelock.
518 		 */
519 		if (inode_dirtied_after(inode, start))
520 			break;
521 
522 		/* Is another pdflush already flushing this queue? */
523 		if (current_is_pdflush() && !writeback_acquire(bdi))
524 			break;
525 
526 		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
527 		__iget(inode);
528 		pages_skipped = wbc->pages_skipped;
529 		writeback_single_inode(inode, wbc);
530 		if (current_is_pdflush())
531 			writeback_release(bdi);
532 		if (wbc->pages_skipped != pages_skipped) {
533 			/*
534 			 * writeback is not making progress due to locked
535 			 * buffers.  Skip this inode for now.
536 			 */
537 			redirty_tail(inode);
538 		}
539 		spin_unlock(&inode_lock);
540 		iput(inode);
541 		cond_resched();
542 		spin_lock(&inode_lock);
543 		if (wbc->nr_to_write <= 0) {
544 			wbc->more_io = 1;
545 			break;
546 		}
547 		if (!list_empty(&sb->s_more_io))
548 			wbc->more_io = 1;
549 	}
550 
551 	if (sync) {
552 		struct inode *inode, *old_inode = NULL;
553 
554 		/*
555 		 * Data integrity sync. Must wait for all pages under writeback,
556 		 * because there may have been pages dirtied before our sync
557 		 * call, but which had writeout started before we write it out.
558 		 * In which case, the inode may not be on the dirty list, but
559 		 * we still have to wait for that writeout.
560 		 */
561 		list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
562 			struct address_space *mapping;
563 
564 			if (inode->i_state &
565 					(I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
566 				continue;
567 			mapping = inode->i_mapping;
568 			if (mapping->nrpages == 0)
569 				continue;
570 			__iget(inode);
571 			spin_unlock(&inode_lock);
572 			/*
573 			 * We hold a reference to 'inode' so it couldn't have
574 			 * been removed from s_inodes list while we dropped the
575 			 * inode_lock.  We cannot iput the inode now as we can
576 			 * be holding the last reference and we cannot iput it
577 			 * under inode_lock. So we keep the reference and iput
578 			 * it later.
579 			 */
580 			iput(old_inode);
581 			old_inode = inode;
582 
583 			filemap_fdatawait(mapping);
584 
585 			cond_resched();
586 
587 			spin_lock(&inode_lock);
588 		}
589 		spin_unlock(&inode_lock);
590 		iput(old_inode);
591 	} else
592 		spin_unlock(&inode_lock);
593 
594 	return;		/* Leave any unwritten inodes on s_io */
595 }
596 EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
597 
598 static void sync_sb_inodes(struct super_block *sb,
599 				struct writeback_control *wbc)
600 {
601 	generic_sync_sb_inodes(sb, wbc);
602 }
603 
604 /*
605  * Start writeback of dirty pagecache data against all unlocked inodes.
606  *
607  * Note:
608  * We don't need to grab a reference to superblock here. If it has non-empty
609  * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
610  * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
611  * empty. Since __sync_single_inode() regains inode_lock before it finally moves
612  * inode from superblock lists we are OK.
613  *
614  * If `older_than_this' is non-zero then only flush inodes which have a
615  * flushtime older than *older_than_this.
616  *
617  * If `bdi' is non-zero then we will scan the first inode against each
618  * superblock until we find the matching ones.  One group will be the dirty
619  * inodes against a filesystem.  Then when we hit the dummy blockdev superblock,
620  * sync_sb_inodes will seekout the blockdev which matches `bdi'.  Maybe not
621  * super-efficient but we're about to do a ton of I/O...
622  */
623 void
624 writeback_inodes(struct writeback_control *wbc)
625 {
626 	struct super_block *sb;
627 
628 	might_sleep();
629 	spin_lock(&sb_lock);
630 restart:
631 	list_for_each_entry_reverse(sb, &super_blocks, s_list) {
632 		if (sb_has_dirty_inodes(sb)) {
633 			/* we're making our own get_super here */
634 			sb->s_count++;
635 			spin_unlock(&sb_lock);
636 			/*
637 			 * If we can't get the readlock, there's no sense in
638 			 * waiting around, most of the time the FS is going to
639 			 * be unmounted by the time it is released.
640 			 */
641 			if (down_read_trylock(&sb->s_umount)) {
642 				if (sb->s_root)
643 					sync_sb_inodes(sb, wbc);
644 				up_read(&sb->s_umount);
645 			}
646 			spin_lock(&sb_lock);
647 			if (__put_super_and_need_restart(sb))
648 				goto restart;
649 		}
650 		if (wbc->nr_to_write <= 0)
651 			break;
652 	}
653 	spin_unlock(&sb_lock);
654 }
655 
656 /*
657  * writeback and wait upon the filesystem's dirty inodes.  The caller will
658  * do this in two passes - one to write, and one to wait.
659  *
660  * A finite limit is set on the number of pages which will be written.
661  * To prevent infinite livelock of sys_sync().
662  *
663  * We add in the number of potentially dirty inodes, because each inode write
664  * can dirty pagecache in the underlying blockdev.
665  */
666 void sync_inodes_sb(struct super_block *sb, int wait)
667 {
668 	struct writeback_control wbc = {
669 		.sync_mode	= wait ? WB_SYNC_ALL : WB_SYNC_NONE,
670 		.range_start	= 0,
671 		.range_end	= LLONG_MAX,
672 	};
673 
674 	if (!wait) {
675 		unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
676 		unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
677 
678 		wbc.nr_to_write = nr_dirty + nr_unstable +
679 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
680 	} else
681 		wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
682 
683 	sync_sb_inodes(sb, &wbc);
684 }
685 
686 /**
687  * write_inode_now	-	write an inode to disk
688  * @inode: inode to write to disk
689  * @sync: whether the write should be synchronous or not
690  *
691  * This function commits an inode to disk immediately if it is dirty. This is
692  * primarily needed by knfsd.
693  *
694  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
695  */
696 int write_inode_now(struct inode *inode, int sync)
697 {
698 	int ret;
699 	struct writeback_control wbc = {
700 		.nr_to_write = LONG_MAX,
701 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
702 		.range_start = 0,
703 		.range_end = LLONG_MAX,
704 	};
705 
706 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
707 		wbc.nr_to_write = 0;
708 
709 	might_sleep();
710 	spin_lock(&inode_lock);
711 	ret = writeback_single_inode(inode, &wbc);
712 	spin_unlock(&inode_lock);
713 	if (sync)
714 		inode_sync_wait(inode);
715 	return ret;
716 }
717 EXPORT_SYMBOL(write_inode_now);
718 
719 /**
720  * sync_inode - write an inode and its pages to disk.
721  * @inode: the inode to sync
722  * @wbc: controls the writeback mode
723  *
724  * sync_inode() will write an inode and its pages to disk.  It will also
725  * correctly update the inode on its superblock's dirty inode lists and will
726  * update inode->i_state.
727  *
728  * The caller must have a ref on the inode.
729  */
730 int sync_inode(struct inode *inode, struct writeback_control *wbc)
731 {
732 	int ret;
733 
734 	spin_lock(&inode_lock);
735 	ret = writeback_single_inode(inode, wbc);
736 	spin_unlock(&inode_lock);
737 	return ret;
738 }
739 EXPORT_SYMBOL(sync_inode);
740 
741 /**
742  * generic_osync_inode - flush all dirty data for a given inode to disk
743  * @inode: inode to write
744  * @mapping: the address_space that should be flushed
745  * @what:  what to write and wait upon
746  *
747  * This can be called by file_write functions for files which have the
748  * O_SYNC flag set, to flush dirty writes to disk.
749  *
750  * @what is a bitmask, specifying which part of the inode's data should be
751  * written and waited upon.
752  *
753  *    OSYNC_DATA:     i_mapping's dirty data
754  *    OSYNC_METADATA: the buffers at i_mapping->private_list
755  *    OSYNC_INODE:    the inode itself
756  */
757 
758 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
759 {
760 	int err = 0;
761 	int need_write_inode_now = 0;
762 	int err2;
763 
764 	if (what & OSYNC_DATA)
765 		err = filemap_fdatawrite(mapping);
766 	if (what & (OSYNC_METADATA|OSYNC_DATA)) {
767 		err2 = sync_mapping_buffers(mapping);
768 		if (!err)
769 			err = err2;
770 	}
771 	if (what & OSYNC_DATA) {
772 		err2 = filemap_fdatawait(mapping);
773 		if (!err)
774 			err = err2;
775 	}
776 
777 	spin_lock(&inode_lock);
778 	if ((inode->i_state & I_DIRTY) &&
779 	    ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
780 		need_write_inode_now = 1;
781 	spin_unlock(&inode_lock);
782 
783 	if (need_write_inode_now) {
784 		err2 = write_inode_now(inode, 1);
785 		if (!err)
786 			err = err2;
787 	}
788 	else
789 		inode_sync_wait(inode);
790 
791 	return err;
792 }
793 EXPORT_SYMBOL(generic_osync_inode);
794