xref: /linux/fs/fs-writeback.c (revision a5c4300389bb33ade2515c082709217f0614cf15)
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002	Andrew Morton
12  *		Split out of fs/inode.c
13  *		Additions for address_space-based writeback
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/buffer_head.h>
29 #include "internal.h"
30 
31 #define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
32 
33 /*
34  * We don't actually have pdflush, but this one is exported though /proc...
35  */
36 int nr_pdflush_threads;
37 
38 /*
39  * Passed into wb_writeback(), essentially a subset of writeback_control
40  */
41 struct wb_writeback_args {
42 	long nr_pages;
43 	struct super_block *sb;
44 	enum writeback_sync_modes sync_mode;
45 	unsigned int for_kupdate:1;
46 	unsigned int range_cyclic:1;
47 	unsigned int for_background:1;
48 	unsigned int sb_pinned:1;
49 };
50 
51 /*
52  * Work items for the bdi_writeback threads
53  */
54 struct bdi_work {
55 	struct list_head list;		/* pending work list */
56 	struct rcu_head rcu_head;	/* for RCU free/clear of work */
57 
58 	unsigned long seen;		/* threads that have seen this work */
59 	atomic_t pending;		/* number of threads still to do work */
60 
61 	struct wb_writeback_args args;	/* writeback arguments */
62 
63 	unsigned long state;		/* flag bits, see WS_* */
64 };
65 
66 enum {
67 	WS_USED_B = 0,
68 	WS_ONSTACK_B,
69 };
70 
71 #define WS_USED (1 << WS_USED_B)
72 #define WS_ONSTACK (1 << WS_ONSTACK_B)
73 
74 static inline bool bdi_work_on_stack(struct bdi_work *work)
75 {
76 	return test_bit(WS_ONSTACK_B, &work->state);
77 }
78 
79 static inline void bdi_work_init(struct bdi_work *work,
80 				 struct wb_writeback_args *args)
81 {
82 	INIT_RCU_HEAD(&work->rcu_head);
83 	work->args = *args;
84 	work->state = WS_USED;
85 }
86 
87 /**
88  * writeback_in_progress - determine whether there is writeback in progress
89  * @bdi: the device's backing_dev_info structure.
90  *
91  * Determine whether there is writeback waiting to be handled against a
92  * backing device.
93  */
94 int writeback_in_progress(struct backing_dev_info *bdi)
95 {
96 	return !list_empty(&bdi->work_list);
97 }
98 
99 static void bdi_work_clear(struct bdi_work *work)
100 {
101 	clear_bit(WS_USED_B, &work->state);
102 	smp_mb__after_clear_bit();
103 	/*
104 	 * work can have disappeared at this point. bit waitq functions
105 	 * should be able to tolerate this, provided bdi_sched_wait does
106 	 * not dereference it's pointer argument.
107 	*/
108 	wake_up_bit(&work->state, WS_USED_B);
109 }
110 
111 static void bdi_work_free(struct rcu_head *head)
112 {
113 	struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
114 
115 	if (!bdi_work_on_stack(work))
116 		kfree(work);
117 	else
118 		bdi_work_clear(work);
119 }
120 
121 static void wb_work_complete(struct bdi_work *work)
122 {
123 	const enum writeback_sync_modes sync_mode = work->args.sync_mode;
124 	int onstack = bdi_work_on_stack(work);
125 
126 	/*
127 	 * For allocated work, we can clear the done/seen bit right here.
128 	 * For on-stack work, we need to postpone both the clear and free
129 	 * to after the RCU grace period, since the stack could be invalidated
130 	 * as soon as bdi_work_clear() has done the wakeup.
131 	 */
132 	if (!onstack)
133 		bdi_work_clear(work);
134 	if (sync_mode == WB_SYNC_NONE || onstack)
135 		call_rcu(&work->rcu_head, bdi_work_free);
136 }
137 
138 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
139 {
140 	/*
141 	 * The caller has retrieved the work arguments from this work,
142 	 * drop our reference. If this is the last ref, delete and free it
143 	 */
144 	if (atomic_dec_and_test(&work->pending)) {
145 		struct backing_dev_info *bdi = wb->bdi;
146 
147 		spin_lock(&bdi->wb_lock);
148 		list_del_rcu(&work->list);
149 		spin_unlock(&bdi->wb_lock);
150 
151 		wb_work_complete(work);
152 	}
153 }
154 
155 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
156 {
157 	work->seen = bdi->wb_mask;
158 	BUG_ON(!work->seen);
159 	atomic_set(&work->pending, bdi->wb_cnt);
160 	BUG_ON(!bdi->wb_cnt);
161 
162 	/*
163 	 * list_add_tail_rcu() contains the necessary barriers to
164 	 * make sure the above stores are seen before the item is
165 	 * noticed on the list
166 	 */
167 	spin_lock(&bdi->wb_lock);
168 	list_add_tail_rcu(&work->list, &bdi->work_list);
169 	spin_unlock(&bdi->wb_lock);
170 
171 	/*
172 	 * If the default thread isn't there, make sure we add it. When
173 	 * it gets created and wakes up, we'll run this work.
174 	 */
175 	if (unlikely(list_empty_careful(&bdi->wb_list)))
176 		wake_up_process(default_backing_dev_info.wb.task);
177 	else {
178 		struct bdi_writeback *wb = &bdi->wb;
179 
180 		if (wb->task)
181 			wake_up_process(wb->task);
182 	}
183 }
184 
185 /*
186  * Used for on-stack allocated work items. The caller needs to wait until
187  * the wb threads have acked the work before it's safe to continue.
188  */
189 static void bdi_wait_on_work_clear(struct bdi_work *work)
190 {
191 	wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
192 		    TASK_UNINTERRUPTIBLE);
193 }
194 
195 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
196 				 struct wb_writeback_args *args,
197 				 int wait)
198 {
199 	struct bdi_work *work;
200 
201 	/*
202 	 * This is WB_SYNC_NONE writeback, so if allocation fails just
203 	 * wakeup the thread for old dirty data writeback
204 	 */
205 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
206 	if (work) {
207 		bdi_work_init(work, args);
208 		bdi_queue_work(bdi, work);
209 		if (wait)
210 			bdi_wait_on_work_clear(work);
211 	} else {
212 		struct bdi_writeback *wb = &bdi->wb;
213 
214 		if (wb->task)
215 			wake_up_process(wb->task);
216 	}
217 }
218 
219 /**
220  * bdi_sync_writeback - start and wait for writeback
221  * @bdi: the backing device to write from
222  * @sb: write inodes from this super_block
223  *
224  * Description:
225  *   This does WB_SYNC_ALL data integrity writeback and waits for the
226  *   IO to complete. Callers must hold the sb s_umount semaphore for
227  *   reading, to avoid having the super disappear before we are done.
228  */
229 static void bdi_sync_writeback(struct backing_dev_info *bdi,
230 			       struct super_block *sb)
231 {
232 	struct wb_writeback_args args = {
233 		.sb		= sb,
234 		.sync_mode	= WB_SYNC_ALL,
235 		.nr_pages	= LONG_MAX,
236 		.range_cyclic	= 0,
237 		/*
238 		 * Setting sb_pinned is not necessary for WB_SYNC_ALL, but
239 		 * lets make it explicitly clear.
240 		 */
241 		.sb_pinned	= 1,
242 	};
243 	struct bdi_work work;
244 
245 	bdi_work_init(&work, &args);
246 	work.state |= WS_ONSTACK;
247 
248 	bdi_queue_work(bdi, &work);
249 	bdi_wait_on_work_clear(&work);
250 }
251 
252 /**
253  * bdi_start_writeback - start writeback
254  * @bdi: the backing device to write from
255  * @sb: write inodes from this super_block
256  * @nr_pages: the number of pages to write
257  * @sb_locked: caller already holds sb umount sem.
258  *
259  * Description:
260  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
261  *   started when this function returns, we make no guarentees on
262  *   completion. Caller specifies whether sb umount sem is held already or not.
263  *
264  */
265 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
266 			 long nr_pages, int sb_locked)
267 {
268 	struct wb_writeback_args args = {
269 		.sb		= sb,
270 		.sync_mode	= WB_SYNC_NONE,
271 		.nr_pages	= nr_pages,
272 		.range_cyclic	= 1,
273 		.sb_pinned	= sb_locked,
274 	};
275 
276 	/*
277 	 * We treat @nr_pages=0 as the special case to do background writeback,
278 	 * ie. to sync pages until the background dirty threshold is reached.
279 	 */
280 	if (!nr_pages) {
281 		args.nr_pages = LONG_MAX;
282 		args.for_background = 1;
283 	}
284 
285 	bdi_alloc_queue_work(bdi, &args, sb_locked);
286 }
287 
288 /*
289  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
290  * furthest end of its superblock's dirty-inode list.
291  *
292  * Before stamping the inode's ->dirtied_when, we check to see whether it is
293  * already the most-recently-dirtied inode on the b_dirty list.  If that is
294  * the case then the inode must have been redirtied while it was being written
295  * out and we don't reset its dirtied_when.
296  */
297 static void redirty_tail(struct inode *inode)
298 {
299 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
300 
301 	if (!list_empty(&wb->b_dirty)) {
302 		struct inode *tail;
303 
304 		tail = list_entry(wb->b_dirty.next, struct inode, i_list);
305 		if (time_before(inode->dirtied_when, tail->dirtied_when))
306 			inode->dirtied_when = jiffies;
307 	}
308 	list_move(&inode->i_list, &wb->b_dirty);
309 }
310 
311 /*
312  * requeue inode for re-scanning after bdi->b_io list is exhausted.
313  */
314 static void requeue_io(struct inode *inode)
315 {
316 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
317 
318 	list_move(&inode->i_list, &wb->b_more_io);
319 }
320 
321 static void inode_sync_complete(struct inode *inode)
322 {
323 	/*
324 	 * Prevent speculative execution through spin_unlock(&inode_lock);
325 	 */
326 	smp_mb();
327 	wake_up_bit(&inode->i_state, __I_SYNC);
328 }
329 
330 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
331 {
332 	bool ret = time_after(inode->dirtied_when, t);
333 #ifndef CONFIG_64BIT
334 	/*
335 	 * For inodes being constantly redirtied, dirtied_when can get stuck.
336 	 * It _appears_ to be in the future, but is actually in distant past.
337 	 * This test is necessary to prevent such wrapped-around relative times
338 	 * from permanently stopping the whole bdi writeback.
339 	 */
340 	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
341 #endif
342 	return ret;
343 }
344 
345 /*
346  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
347  */
348 static void move_expired_inodes(struct list_head *delaying_queue,
349 			       struct list_head *dispatch_queue,
350 				unsigned long *older_than_this)
351 {
352 	LIST_HEAD(tmp);
353 	struct list_head *pos, *node;
354 	struct super_block *sb = NULL;
355 	struct inode *inode;
356 	int do_sb_sort = 0;
357 
358 	while (!list_empty(delaying_queue)) {
359 		inode = list_entry(delaying_queue->prev, struct inode, i_list);
360 		if (older_than_this &&
361 		    inode_dirtied_after(inode, *older_than_this))
362 			break;
363 		if (sb && sb != inode->i_sb)
364 			do_sb_sort = 1;
365 		sb = inode->i_sb;
366 		list_move(&inode->i_list, &tmp);
367 	}
368 
369 	/* just one sb in list, splice to dispatch_queue and we're done */
370 	if (!do_sb_sort) {
371 		list_splice(&tmp, dispatch_queue);
372 		return;
373 	}
374 
375 	/* Move inodes from one superblock together */
376 	while (!list_empty(&tmp)) {
377 		inode = list_entry(tmp.prev, struct inode, i_list);
378 		sb = inode->i_sb;
379 		list_for_each_prev_safe(pos, node, &tmp) {
380 			inode = list_entry(pos, struct inode, i_list);
381 			if (inode->i_sb == sb)
382 				list_move(&inode->i_list, dispatch_queue);
383 		}
384 	}
385 }
386 
387 /*
388  * Queue all expired dirty inodes for io, eldest first.
389  */
390 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
391 {
392 	list_splice_init(&wb->b_more_io, wb->b_io.prev);
393 	move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
394 }
395 
396 static int write_inode(struct inode *inode, struct writeback_control *wbc)
397 {
398 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
399 		return inode->i_sb->s_op->write_inode(inode, wbc);
400 	return 0;
401 }
402 
403 /*
404  * Wait for writeback on an inode to complete.
405  */
406 static void inode_wait_for_writeback(struct inode *inode)
407 {
408 	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
409 	wait_queue_head_t *wqh;
410 
411 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
412 	do {
413 		spin_unlock(&inode_lock);
414 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
415 		spin_lock(&inode_lock);
416 	} while (inode->i_state & I_SYNC);
417 }
418 
419 /*
420  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
421  * caller has ref on the inode (either via __iget or via syscall against an fd)
422  * or the inode has I_WILL_FREE set (via generic_forget_inode)
423  *
424  * If `wait' is set, wait on the writeout.
425  *
426  * The whole writeout design is quite complex and fragile.  We want to avoid
427  * starvation of particular inodes when others are being redirtied, prevent
428  * livelocks, etc.
429  *
430  * Called under inode_lock.
431  */
432 static int
433 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
434 {
435 	struct address_space *mapping = inode->i_mapping;
436 	unsigned dirty;
437 	int ret;
438 
439 	if (!atomic_read(&inode->i_count))
440 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
441 	else
442 		WARN_ON(inode->i_state & I_WILL_FREE);
443 
444 	if (inode->i_state & I_SYNC) {
445 		/*
446 		 * If this inode is locked for writeback and we are not doing
447 		 * writeback-for-data-integrity, move it to b_more_io so that
448 		 * writeback can proceed with the other inodes on s_io.
449 		 *
450 		 * We'll have another go at writing back this inode when we
451 		 * completed a full scan of b_io.
452 		 */
453 		if (wbc->sync_mode != WB_SYNC_ALL) {
454 			requeue_io(inode);
455 			return 0;
456 		}
457 
458 		/*
459 		 * It's a data-integrity sync.  We must wait.
460 		 */
461 		inode_wait_for_writeback(inode);
462 	}
463 
464 	BUG_ON(inode->i_state & I_SYNC);
465 
466 	/* Set I_SYNC, reset I_DIRTY_PAGES */
467 	inode->i_state |= I_SYNC;
468 	inode->i_state &= ~I_DIRTY_PAGES;
469 	spin_unlock(&inode_lock);
470 
471 	ret = do_writepages(mapping, wbc);
472 
473 	/*
474 	 * Make sure to wait on the data before writing out the metadata.
475 	 * This is important for filesystems that modify metadata on data
476 	 * I/O completion.
477 	 */
478 	if (wbc->sync_mode == WB_SYNC_ALL) {
479 		int err = filemap_fdatawait(mapping);
480 		if (ret == 0)
481 			ret = err;
482 	}
483 
484 	/*
485 	 * Some filesystems may redirty the inode during the writeback
486 	 * due to delalloc, clear dirty metadata flags right before
487 	 * write_inode()
488 	 */
489 	spin_lock(&inode_lock);
490 	dirty = inode->i_state & I_DIRTY;
491 	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
492 	spin_unlock(&inode_lock);
493 	/* Don't write the inode if only I_DIRTY_PAGES was set */
494 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
495 		int err = write_inode(inode, wbc);
496 		if (ret == 0)
497 			ret = err;
498 	}
499 
500 	spin_lock(&inode_lock);
501 	inode->i_state &= ~I_SYNC;
502 	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
503 		if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
504 			/*
505 			 * More pages get dirtied by a fast dirtier.
506 			 */
507 			goto select_queue;
508 		} else if (inode->i_state & I_DIRTY) {
509 			/*
510 			 * At least XFS will redirty the inode during the
511 			 * writeback (delalloc) and on io completion (isize).
512 			 */
513 			redirty_tail(inode);
514 		} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
515 			/*
516 			 * We didn't write back all the pages.  nfs_writepages()
517 			 * sometimes bales out without doing anything. Redirty
518 			 * the inode; Move it from b_io onto b_more_io/b_dirty.
519 			 */
520 			/*
521 			 * akpm: if the caller was the kupdate function we put
522 			 * this inode at the head of b_dirty so it gets first
523 			 * consideration.  Otherwise, move it to the tail, for
524 			 * the reasons described there.  I'm not really sure
525 			 * how much sense this makes.  Presumably I had a good
526 			 * reasons for doing it this way, and I'd rather not
527 			 * muck with it at present.
528 			 */
529 			if (wbc->for_kupdate) {
530 				/*
531 				 * For the kupdate function we move the inode
532 				 * to b_more_io so it will get more writeout as
533 				 * soon as the queue becomes uncongested.
534 				 */
535 				inode->i_state |= I_DIRTY_PAGES;
536 select_queue:
537 				if (wbc->nr_to_write <= 0) {
538 					/*
539 					 * slice used up: queue for next turn
540 					 */
541 					requeue_io(inode);
542 				} else {
543 					/*
544 					 * somehow blocked: retry later
545 					 */
546 					redirty_tail(inode);
547 				}
548 			} else {
549 				/*
550 				 * Otherwise fully redirty the inode so that
551 				 * other inodes on this superblock will get some
552 				 * writeout.  Otherwise heavy writing to one
553 				 * file would indefinitely suspend writeout of
554 				 * all the other files.
555 				 */
556 				inode->i_state |= I_DIRTY_PAGES;
557 				redirty_tail(inode);
558 			}
559 		} else if (atomic_read(&inode->i_count)) {
560 			/*
561 			 * The inode is clean, inuse
562 			 */
563 			list_move(&inode->i_list, &inode_in_use);
564 		} else {
565 			/*
566 			 * The inode is clean, unused
567 			 */
568 			list_move(&inode->i_list, &inode_unused);
569 		}
570 	}
571 	inode_sync_complete(inode);
572 	return ret;
573 }
574 
575 static void unpin_sb_for_writeback(struct super_block *sb)
576 {
577 	up_read(&sb->s_umount);
578 	put_super(sb);
579 }
580 
581 enum sb_pin_state {
582 	SB_PINNED,
583 	SB_NOT_PINNED,
584 	SB_PIN_FAILED
585 };
586 
587 /*
588  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
589  * before calling writeback. So make sure that we do pin it, so it doesn't
590  * go away while we are writing inodes from it.
591  */
592 static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
593 					      struct super_block *sb)
594 {
595 	/*
596 	 * Caller must already hold the ref for this
597 	 */
598 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->sb_pinned) {
599 		WARN_ON(!rwsem_is_locked(&sb->s_umount));
600 		return SB_NOT_PINNED;
601 	}
602 	spin_lock(&sb_lock);
603 	sb->s_count++;
604 	if (down_read_trylock(&sb->s_umount)) {
605 		if (sb->s_root) {
606 			spin_unlock(&sb_lock);
607 			return SB_PINNED;
608 		}
609 		/*
610 		 * umounted, drop rwsem again and fall through to failure
611 		 */
612 		up_read(&sb->s_umount);
613 	}
614 	sb->s_count--;
615 	spin_unlock(&sb_lock);
616 	return SB_PIN_FAILED;
617 }
618 
619 /*
620  * Write a portion of b_io inodes which belong to @sb.
621  * If @wbc->sb != NULL, then find and write all such
622  * inodes. Otherwise write only ones which go sequentially
623  * in reverse order.
624  * Return 1, if the caller writeback routine should be
625  * interrupted. Otherwise return 0.
626  */
627 static int writeback_sb_inodes(struct super_block *sb,
628 			       struct bdi_writeback *wb,
629 			       struct writeback_control *wbc)
630 {
631 	while (!list_empty(&wb->b_io)) {
632 		long pages_skipped;
633 		struct inode *inode = list_entry(wb->b_io.prev,
634 						 struct inode, i_list);
635 		if (wbc->sb && sb != inode->i_sb) {
636 			/* super block given and doesn't
637 			   match, skip this inode */
638 			redirty_tail(inode);
639 			continue;
640 		}
641 		if (sb != inode->i_sb)
642 			/* finish with this superblock */
643 			return 0;
644 		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
645 			requeue_io(inode);
646 			continue;
647 		}
648 		/*
649 		 * Was this inode dirtied after sync_sb_inodes was called?
650 		 * This keeps sync from extra jobs and livelock.
651 		 */
652 		if (inode_dirtied_after(inode, wbc->wb_start))
653 			return 1;
654 
655 		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
656 		__iget(inode);
657 		pages_skipped = wbc->pages_skipped;
658 		writeback_single_inode(inode, wbc);
659 		if (wbc->pages_skipped != pages_skipped) {
660 			/*
661 			 * writeback is not making progress due to locked
662 			 * buffers.  Skip this inode for now.
663 			 */
664 			redirty_tail(inode);
665 		}
666 		spin_unlock(&inode_lock);
667 		iput(inode);
668 		cond_resched();
669 		spin_lock(&inode_lock);
670 		if (wbc->nr_to_write <= 0) {
671 			wbc->more_io = 1;
672 			return 1;
673 		}
674 		if (!list_empty(&wb->b_more_io))
675 			wbc->more_io = 1;
676 	}
677 	/* b_io is empty */
678 	return 1;
679 }
680 
681 static void writeback_inodes_wb(struct bdi_writeback *wb,
682 				struct writeback_control *wbc)
683 {
684 	int ret = 0;
685 
686 	wbc->wb_start = jiffies; /* livelock avoidance */
687 	spin_lock(&inode_lock);
688 	if (!wbc->for_kupdate || list_empty(&wb->b_io))
689 		queue_io(wb, wbc->older_than_this);
690 
691 	while (!list_empty(&wb->b_io)) {
692 		struct inode *inode = list_entry(wb->b_io.prev,
693 						 struct inode, i_list);
694 		struct super_block *sb = inode->i_sb;
695 		enum sb_pin_state state;
696 
697 		if (wbc->sb && sb != wbc->sb) {
698 			/* super block given and doesn't
699 			   match, skip this inode */
700 			redirty_tail(inode);
701 			continue;
702 		}
703 		state = pin_sb_for_writeback(wbc, sb);
704 
705 		if (state == SB_PIN_FAILED) {
706 			requeue_io(inode);
707 			continue;
708 		}
709 		ret = writeback_sb_inodes(sb, wb, wbc);
710 
711 		if (state == SB_PINNED)
712 			unpin_sb_for_writeback(sb);
713 		if (ret)
714 			break;
715 	}
716 	spin_unlock(&inode_lock);
717 	/* Leave any unwritten inodes on b_io */
718 }
719 
720 void writeback_inodes_wbc(struct writeback_control *wbc)
721 {
722 	struct backing_dev_info *bdi = wbc->bdi;
723 
724 	writeback_inodes_wb(&bdi->wb, wbc);
725 }
726 
727 /*
728  * The maximum number of pages to writeout in a single bdi flush/kupdate
729  * operation.  We do this so we don't hold I_SYNC against an inode for
730  * enormous amounts of time, which would block a userspace task which has
731  * been forced to throttle against that inode.  Also, the code reevaluates
732  * the dirty each time it has written this many pages.
733  */
734 #define MAX_WRITEBACK_PAGES     1024
735 
736 static inline bool over_bground_thresh(void)
737 {
738 	unsigned long background_thresh, dirty_thresh;
739 
740 	get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
741 
742 	return (global_page_state(NR_FILE_DIRTY) +
743 		global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
744 }
745 
746 /*
747  * Explicit flushing or periodic writeback of "old" data.
748  *
749  * Define "old": the first time one of an inode's pages is dirtied, we mark the
750  * dirtying-time in the inode's address_space.  So this periodic writeback code
751  * just walks the superblock inode list, writing back any inodes which are
752  * older than a specific point in time.
753  *
754  * Try to run once per dirty_writeback_interval.  But if a writeback event
755  * takes longer than a dirty_writeback_interval interval, then leave a
756  * one-second gap.
757  *
758  * older_than_this takes precedence over nr_to_write.  So we'll only write back
759  * all dirty pages if they are all attached to "old" mappings.
760  */
761 static long wb_writeback(struct bdi_writeback *wb,
762 			 struct wb_writeback_args *args)
763 {
764 	struct writeback_control wbc = {
765 		.bdi			= wb->bdi,
766 		.sb			= args->sb,
767 		.sync_mode		= args->sync_mode,
768 		.older_than_this	= NULL,
769 		.for_kupdate		= args->for_kupdate,
770 		.for_background		= args->for_background,
771 		.range_cyclic		= args->range_cyclic,
772 		.sb_pinned		= args->sb_pinned,
773 	};
774 	unsigned long oldest_jif;
775 	long wrote = 0;
776 	struct inode *inode;
777 
778 	if (wbc.for_kupdate) {
779 		wbc.older_than_this = &oldest_jif;
780 		oldest_jif = jiffies -
781 				msecs_to_jiffies(dirty_expire_interval * 10);
782 	}
783 	if (!wbc.range_cyclic) {
784 		wbc.range_start = 0;
785 		wbc.range_end = LLONG_MAX;
786 	}
787 
788 	for (;;) {
789 		/*
790 		 * Stop writeback when nr_pages has been consumed
791 		 */
792 		if (args->nr_pages <= 0)
793 			break;
794 
795 		/*
796 		 * For background writeout, stop when we are below the
797 		 * background dirty threshold
798 		 */
799 		if (args->for_background && !over_bground_thresh())
800 			break;
801 
802 		wbc.more_io = 0;
803 		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
804 		wbc.pages_skipped = 0;
805 		writeback_inodes_wb(wb, &wbc);
806 		args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
807 		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
808 
809 		/*
810 		 * If we consumed everything, see if we have more
811 		 */
812 		if (wbc.nr_to_write <= 0)
813 			continue;
814 		/*
815 		 * Didn't write everything and we don't have more IO, bail
816 		 */
817 		if (!wbc.more_io)
818 			break;
819 		/*
820 		 * Did we write something? Try for more
821 		 */
822 		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
823 			continue;
824 		/*
825 		 * Nothing written. Wait for some inode to
826 		 * become available for writeback. Otherwise
827 		 * we'll just busyloop.
828 		 */
829 		spin_lock(&inode_lock);
830 		if (!list_empty(&wb->b_more_io))  {
831 			inode = list_entry(wb->b_more_io.prev,
832 						struct inode, i_list);
833 			inode_wait_for_writeback(inode);
834 		}
835 		spin_unlock(&inode_lock);
836 	}
837 
838 	return wrote;
839 }
840 
841 /*
842  * Return the next bdi_work struct that hasn't been processed by this
843  * wb thread yet. ->seen is initially set for each thread that exists
844  * for this device, when a thread first notices a piece of work it
845  * clears its bit. Depending on writeback type, the thread will notify
846  * completion on either receiving the work (WB_SYNC_NONE) or after
847  * it is done (WB_SYNC_ALL).
848  */
849 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
850 					   struct bdi_writeback *wb)
851 {
852 	struct bdi_work *work, *ret = NULL;
853 
854 	rcu_read_lock();
855 
856 	list_for_each_entry_rcu(work, &bdi->work_list, list) {
857 		if (!test_bit(wb->nr, &work->seen))
858 			continue;
859 		clear_bit(wb->nr, &work->seen);
860 
861 		ret = work;
862 		break;
863 	}
864 
865 	rcu_read_unlock();
866 	return ret;
867 }
868 
869 static long wb_check_old_data_flush(struct bdi_writeback *wb)
870 {
871 	unsigned long expired;
872 	long nr_pages;
873 
874 	/*
875 	 * When set to zero, disable periodic writeback
876 	 */
877 	if (!dirty_writeback_interval)
878 		return 0;
879 
880 	expired = wb->last_old_flush +
881 			msecs_to_jiffies(dirty_writeback_interval * 10);
882 	if (time_before(jiffies, expired))
883 		return 0;
884 
885 	wb->last_old_flush = jiffies;
886 	nr_pages = global_page_state(NR_FILE_DIRTY) +
887 			global_page_state(NR_UNSTABLE_NFS) +
888 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
889 
890 	if (nr_pages) {
891 		struct wb_writeback_args args = {
892 			.nr_pages	= nr_pages,
893 			.sync_mode	= WB_SYNC_NONE,
894 			.for_kupdate	= 1,
895 			.range_cyclic	= 1,
896 		};
897 
898 		return wb_writeback(wb, &args);
899 	}
900 
901 	return 0;
902 }
903 
904 /*
905  * Retrieve work items and do the writeback they describe
906  */
907 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
908 {
909 	struct backing_dev_info *bdi = wb->bdi;
910 	struct bdi_work *work;
911 	long wrote = 0;
912 
913 	while ((work = get_next_work_item(bdi, wb)) != NULL) {
914 		struct wb_writeback_args args = work->args;
915 		int post_clear;
916 
917 		/*
918 		 * Override sync mode, in case we must wait for completion
919 		 */
920 		if (force_wait)
921 			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
922 
923 		post_clear = WB_SYNC_ALL || args.sb_pinned;
924 
925 		/*
926 		 * If this isn't a data integrity operation, just notify
927 		 * that we have seen this work and we are now starting it.
928 		 */
929 		if (!post_clear)
930 			wb_clear_pending(wb, work);
931 
932 		wrote += wb_writeback(wb, &args);
933 
934 		/*
935 		 * This is a data integrity writeback, so only do the
936 		 * notification when we have completed the work.
937 		 */
938 		if (post_clear)
939 			wb_clear_pending(wb, work);
940 	}
941 
942 	/*
943 	 * Check for periodic writeback, kupdated() style
944 	 */
945 	wrote += wb_check_old_data_flush(wb);
946 
947 	return wrote;
948 }
949 
950 /*
951  * Handle writeback of dirty data for the device backed by this bdi. Also
952  * wakes up periodically and does kupdated style flushing.
953  */
954 int bdi_writeback_task(struct bdi_writeback *wb)
955 {
956 	unsigned long last_active = jiffies;
957 	unsigned long wait_jiffies = -1UL;
958 	long pages_written;
959 
960 	while (!kthread_should_stop()) {
961 		pages_written = wb_do_writeback(wb, 0);
962 
963 		if (pages_written)
964 			last_active = jiffies;
965 		else if (wait_jiffies != -1UL) {
966 			unsigned long max_idle;
967 
968 			/*
969 			 * Longest period of inactivity that we tolerate. If we
970 			 * see dirty data again later, the task will get
971 			 * recreated automatically.
972 			 */
973 			max_idle = max(5UL * 60 * HZ, wait_jiffies);
974 			if (time_after(jiffies, max_idle + last_active))
975 				break;
976 		}
977 
978 		if (dirty_writeback_interval) {
979 			wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
980 			schedule_timeout_interruptible(wait_jiffies);
981 		} else {
982 			set_current_state(TASK_INTERRUPTIBLE);
983 			if (list_empty_careful(&wb->bdi->work_list) &&
984 			    !kthread_should_stop())
985 				schedule();
986 			__set_current_state(TASK_RUNNING);
987 		}
988 
989 		try_to_freeze();
990 	}
991 
992 	return 0;
993 }
994 
995 /*
996  * Schedule writeback for all backing devices. This does WB_SYNC_NONE
997  * writeback, for integrity writeback see bdi_sync_writeback().
998  */
999 static void bdi_writeback_all(struct super_block *sb, long nr_pages)
1000 {
1001 	struct wb_writeback_args args = {
1002 		.sb		= sb,
1003 		.nr_pages	= nr_pages,
1004 		.sync_mode	= WB_SYNC_NONE,
1005 	};
1006 	struct backing_dev_info *bdi;
1007 
1008 	rcu_read_lock();
1009 
1010 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1011 		if (!bdi_has_dirty_io(bdi))
1012 			continue;
1013 
1014 		bdi_alloc_queue_work(bdi, &args, 0);
1015 	}
1016 
1017 	rcu_read_unlock();
1018 }
1019 
1020 /*
1021  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
1022  * the whole world.
1023  */
1024 void wakeup_flusher_threads(long nr_pages)
1025 {
1026 	if (nr_pages == 0)
1027 		nr_pages = global_page_state(NR_FILE_DIRTY) +
1028 				global_page_state(NR_UNSTABLE_NFS);
1029 	bdi_writeback_all(NULL, nr_pages);
1030 }
1031 
1032 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1033 {
1034 	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1035 		struct dentry *dentry;
1036 		const char *name = "?";
1037 
1038 		dentry = d_find_alias(inode);
1039 		if (dentry) {
1040 			spin_lock(&dentry->d_lock);
1041 			name = (const char *) dentry->d_name.name;
1042 		}
1043 		printk(KERN_DEBUG
1044 		       "%s(%d): dirtied inode %lu (%s) on %s\n",
1045 		       current->comm, task_pid_nr(current), inode->i_ino,
1046 		       name, inode->i_sb->s_id);
1047 		if (dentry) {
1048 			spin_unlock(&dentry->d_lock);
1049 			dput(dentry);
1050 		}
1051 	}
1052 }
1053 
1054 /**
1055  *	__mark_inode_dirty -	internal function
1056  *	@inode: inode to mark
1057  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1058  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
1059  *  	mark_inode_dirty_sync.
1060  *
1061  * Put the inode on the super block's dirty list.
1062  *
1063  * CAREFUL! We mark it dirty unconditionally, but move it onto the
1064  * dirty list only if it is hashed or if it refers to a blockdev.
1065  * If it was not hashed, it will never be added to the dirty list
1066  * even if it is later hashed, as it will have been marked dirty already.
1067  *
1068  * In short, make sure you hash any inodes _before_ you start marking
1069  * them dirty.
1070  *
1071  * This function *must* be atomic for the I_DIRTY_PAGES case -
1072  * set_page_dirty() is called under spinlock in several places.
1073  *
1074  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1075  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
1076  * the kernel-internal blockdev inode represents the dirtying time of the
1077  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
1078  * page->mapping->host, so the page-dirtying time is recorded in the internal
1079  * blockdev inode.
1080  */
1081 void __mark_inode_dirty(struct inode *inode, int flags)
1082 {
1083 	struct super_block *sb = inode->i_sb;
1084 
1085 	/*
1086 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1087 	 * dirty the inode itself
1088 	 */
1089 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1090 		if (sb->s_op->dirty_inode)
1091 			sb->s_op->dirty_inode(inode);
1092 	}
1093 
1094 	/*
1095 	 * make sure that changes are seen by all cpus before we test i_state
1096 	 * -- mikulas
1097 	 */
1098 	smp_mb();
1099 
1100 	/* avoid the locking if we can */
1101 	if ((inode->i_state & flags) == flags)
1102 		return;
1103 
1104 	if (unlikely(block_dump))
1105 		block_dump___mark_inode_dirty(inode);
1106 
1107 	spin_lock(&inode_lock);
1108 	if ((inode->i_state & flags) != flags) {
1109 		const int was_dirty = inode->i_state & I_DIRTY;
1110 
1111 		inode->i_state |= flags;
1112 
1113 		/*
1114 		 * If the inode is being synced, just update its dirty state.
1115 		 * The unlocker will place the inode on the appropriate
1116 		 * superblock list, based upon its state.
1117 		 */
1118 		if (inode->i_state & I_SYNC)
1119 			goto out;
1120 
1121 		/*
1122 		 * Only add valid (hashed) inodes to the superblock's
1123 		 * dirty list.  Add blockdev inodes as well.
1124 		 */
1125 		if (!S_ISBLK(inode->i_mode)) {
1126 			if (hlist_unhashed(&inode->i_hash))
1127 				goto out;
1128 		}
1129 		if (inode->i_state & (I_FREEING|I_CLEAR))
1130 			goto out;
1131 
1132 		/*
1133 		 * If the inode was already on b_dirty/b_io/b_more_io, don't
1134 		 * reposition it (that would break b_dirty time-ordering).
1135 		 */
1136 		if (!was_dirty) {
1137 			struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1138 			struct backing_dev_info *bdi = wb->bdi;
1139 
1140 			if (bdi_cap_writeback_dirty(bdi) &&
1141 			    !test_bit(BDI_registered, &bdi->state)) {
1142 				WARN_ON(1);
1143 				printk(KERN_ERR "bdi-%s not registered\n",
1144 								bdi->name);
1145 			}
1146 
1147 			inode->dirtied_when = jiffies;
1148 			list_move(&inode->i_list, &wb->b_dirty);
1149 		}
1150 	}
1151 out:
1152 	spin_unlock(&inode_lock);
1153 }
1154 EXPORT_SYMBOL(__mark_inode_dirty);
1155 
1156 /*
1157  * Write out a superblock's list of dirty inodes.  A wait will be performed
1158  * upon no inodes, all inodes or the final one, depending upon sync_mode.
1159  *
1160  * If older_than_this is non-NULL, then only write out inodes which
1161  * had their first dirtying at a time earlier than *older_than_this.
1162  *
1163  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1164  * This function assumes that the blockdev superblock's inodes are backed by
1165  * a variety of queues, so all inodes are searched.  For other superblocks,
1166  * assume that all inodes are backed by the same queue.
1167  *
1168  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
1169  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
1170  * on the writer throttling path, and we get decent balancing between many
1171  * throttled threads: we don't want them all piling up on inode_sync_wait.
1172  */
1173 static void wait_sb_inodes(struct super_block *sb)
1174 {
1175 	struct inode *inode, *old_inode = NULL;
1176 
1177 	/*
1178 	 * We need to be protected against the filesystem going from
1179 	 * r/o to r/w or vice versa.
1180 	 */
1181 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1182 
1183 	spin_lock(&inode_lock);
1184 
1185 	/*
1186 	 * Data integrity sync. Must wait for all pages under writeback,
1187 	 * because there may have been pages dirtied before our sync
1188 	 * call, but which had writeout started before we write it out.
1189 	 * In which case, the inode may not be on the dirty list, but
1190 	 * we still have to wait for that writeout.
1191 	 */
1192 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1193 		struct address_space *mapping;
1194 
1195 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1196 			continue;
1197 		mapping = inode->i_mapping;
1198 		if (mapping->nrpages == 0)
1199 			continue;
1200 		__iget(inode);
1201 		spin_unlock(&inode_lock);
1202 		/*
1203 		 * We hold a reference to 'inode' so it couldn't have
1204 		 * been removed from s_inodes list while we dropped the
1205 		 * inode_lock.  We cannot iput the inode now as we can
1206 		 * be holding the last reference and we cannot iput it
1207 		 * under inode_lock. So we keep the reference and iput
1208 		 * it later.
1209 		 */
1210 		iput(old_inode);
1211 		old_inode = inode;
1212 
1213 		filemap_fdatawait(mapping);
1214 
1215 		cond_resched();
1216 
1217 		spin_lock(&inode_lock);
1218 	}
1219 	spin_unlock(&inode_lock);
1220 	iput(old_inode);
1221 }
1222 
1223 static void __writeback_inodes_sb(struct super_block *sb, int sb_locked)
1224 {
1225 	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1226 	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1227 	long nr_to_write;
1228 
1229 	nr_to_write = nr_dirty + nr_unstable +
1230 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
1231 
1232 	bdi_start_writeback(sb->s_bdi, sb, nr_to_write, sb_locked);
1233 }
1234 
1235 /**
1236  * writeback_inodes_sb	-	writeback dirty inodes from given super_block
1237  * @sb: the superblock
1238  *
1239  * Start writeback on some inodes on this super_block. No guarantees are made
1240  * on how many (if any) will be written, and this function does not wait
1241  * for IO completion of submitted IO. The number of pages submitted is
1242  * returned.
1243  */
1244 void writeback_inodes_sb(struct super_block *sb)
1245 {
1246 	__writeback_inodes_sb(sb, 0);
1247 }
1248 EXPORT_SYMBOL(writeback_inodes_sb);
1249 
1250 /**
1251  * writeback_inodes_sb_locked	- writeback dirty inodes from given super_block
1252  * @sb: the superblock
1253  *
1254  * Like writeback_inodes_sb(), except the caller already holds the
1255  * sb umount sem.
1256  */
1257 void writeback_inodes_sb_locked(struct super_block *sb)
1258 {
1259 	__writeback_inodes_sb(sb, 1);
1260 }
1261 
1262 /**
1263  * writeback_inodes_sb_if_idle	-	start writeback if none underway
1264  * @sb: the superblock
1265  *
1266  * Invoke writeback_inodes_sb if no writeback is currently underway.
1267  * Returns 1 if writeback was started, 0 if not.
1268  */
1269 int writeback_inodes_sb_if_idle(struct super_block *sb)
1270 {
1271 	if (!writeback_in_progress(sb->s_bdi)) {
1272 		writeback_inodes_sb(sb);
1273 		return 1;
1274 	} else
1275 		return 0;
1276 }
1277 EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1278 
1279 /**
1280  * sync_inodes_sb	-	sync sb inode pages
1281  * @sb: the superblock
1282  *
1283  * This function writes and waits on any dirty inode belonging to this
1284  * super_block. The number of pages synced is returned.
1285  */
1286 void sync_inodes_sb(struct super_block *sb)
1287 {
1288 	bdi_sync_writeback(sb->s_bdi, sb);
1289 	wait_sb_inodes(sb);
1290 }
1291 EXPORT_SYMBOL(sync_inodes_sb);
1292 
1293 /**
1294  * write_inode_now	-	write an inode to disk
1295  * @inode: inode to write to disk
1296  * @sync: whether the write should be synchronous or not
1297  *
1298  * This function commits an inode to disk immediately if it is dirty. This is
1299  * primarily needed by knfsd.
1300  *
1301  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1302  */
1303 int write_inode_now(struct inode *inode, int sync)
1304 {
1305 	int ret;
1306 	struct writeback_control wbc = {
1307 		.nr_to_write = LONG_MAX,
1308 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1309 		.range_start = 0,
1310 		.range_end = LLONG_MAX,
1311 	};
1312 
1313 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1314 		wbc.nr_to_write = 0;
1315 
1316 	might_sleep();
1317 	spin_lock(&inode_lock);
1318 	ret = writeback_single_inode(inode, &wbc);
1319 	spin_unlock(&inode_lock);
1320 	if (sync)
1321 		inode_sync_wait(inode);
1322 	return ret;
1323 }
1324 EXPORT_SYMBOL(write_inode_now);
1325 
1326 /**
1327  * sync_inode - write an inode and its pages to disk.
1328  * @inode: the inode to sync
1329  * @wbc: controls the writeback mode
1330  *
1331  * sync_inode() will write an inode and its pages to disk.  It will also
1332  * correctly update the inode on its superblock's dirty inode lists and will
1333  * update inode->i_state.
1334  *
1335  * The caller must have a ref on the inode.
1336  */
1337 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1338 {
1339 	int ret;
1340 
1341 	spin_lock(&inode_lock);
1342 	ret = writeback_single_inode(inode, wbc);
1343 	spin_unlock(&inode_lock);
1344 	return ret;
1345 }
1346 EXPORT_SYMBOL(sync_inode);
1347