xref: /linux/fs/fs-writeback.c (revision e190bfe56841551b1ad5abb42ebd0c4798cc8c01)
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002	Andrew Morton
12  *		Split out of fs/inode.c
13  *		Additions for address_space-based writeback
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/buffer_head.h>
29 #include "internal.h"
30 
31 #define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
32 
33 /*
34  * We don't actually have pdflush, but this one is exported though /proc...
35  */
36 int nr_pdflush_threads;
37 
38 /*
39  * Passed into wb_writeback(), essentially a subset of writeback_control
40  */
41 struct wb_writeback_args {
42 	long nr_pages;
43 	struct super_block *sb;
44 	enum writeback_sync_modes sync_mode;
45 	unsigned int for_kupdate:1;
46 	unsigned int range_cyclic:1;
47 	unsigned int for_background:1;
48 };
49 
50 /*
51  * Work items for the bdi_writeback threads
52  */
53 struct bdi_work {
54 	struct list_head list;		/* pending work list */
55 	struct rcu_head rcu_head;	/* for RCU free/clear of work */
56 
57 	unsigned long seen;		/* threads that have seen this work */
58 	atomic_t pending;		/* number of threads still to do work */
59 
60 	struct wb_writeback_args args;	/* writeback arguments */
61 
62 	unsigned long state;		/* flag bits, see WS_* */
63 };
64 
65 enum {
66 	WS_INPROGRESS = 0,
67 	WS_ONSTACK,
68 };
69 
70 static inline void bdi_work_init(struct bdi_work *work,
71 				 struct wb_writeback_args *args)
72 {
73 	INIT_RCU_HEAD(&work->rcu_head);
74 	work->args = *args;
75 	__set_bit(WS_INPROGRESS, &work->state);
76 }
77 
78 /**
79  * writeback_in_progress - determine whether there is writeback in progress
80  * @bdi: the device's backing_dev_info structure.
81  *
82  * Determine whether there is writeback waiting to be handled against a
83  * backing device.
84  */
85 int writeback_in_progress(struct backing_dev_info *bdi)
86 {
87 	return !list_empty(&bdi->work_list);
88 }
89 
90 static void bdi_work_free(struct rcu_head *head)
91 {
92 	struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
93 
94 	clear_bit(WS_INPROGRESS, &work->state);
95 	smp_mb__after_clear_bit();
96 	wake_up_bit(&work->state, WS_INPROGRESS);
97 
98 	if (!test_bit(WS_ONSTACK, &work->state))
99 		kfree(work);
100 }
101 
102 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
103 {
104 	/*
105 	 * The caller has retrieved the work arguments from this work,
106 	 * drop our reference. If this is the last ref, delete and free it
107 	 */
108 	if (atomic_dec_and_test(&work->pending)) {
109 		struct backing_dev_info *bdi = wb->bdi;
110 
111 		spin_lock(&bdi->wb_lock);
112 		list_del_rcu(&work->list);
113 		spin_unlock(&bdi->wb_lock);
114 
115 		call_rcu(&work->rcu_head, bdi_work_free);
116 	}
117 }
118 
119 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
120 {
121 	work->seen = bdi->wb_mask;
122 	BUG_ON(!work->seen);
123 	atomic_set(&work->pending, bdi->wb_cnt);
124 	BUG_ON(!bdi->wb_cnt);
125 
126 	/*
127 	 * list_add_tail_rcu() contains the necessary barriers to
128 	 * make sure the above stores are seen before the item is
129 	 * noticed on the list
130 	 */
131 	spin_lock(&bdi->wb_lock);
132 	list_add_tail_rcu(&work->list, &bdi->work_list);
133 	spin_unlock(&bdi->wb_lock);
134 
135 	/*
136 	 * If the default thread isn't there, make sure we add it. When
137 	 * it gets created and wakes up, we'll run this work.
138 	 */
139 	if (unlikely(list_empty_careful(&bdi->wb_list)))
140 		wake_up_process(default_backing_dev_info.wb.task);
141 	else {
142 		struct bdi_writeback *wb = &bdi->wb;
143 
144 		if (wb->task)
145 			wake_up_process(wb->task);
146 	}
147 }
148 
149 /*
150  * Used for on-stack allocated work items. The caller needs to wait until
151  * the wb threads have acked the work before it's safe to continue.
152  */
153 static void bdi_wait_on_work_done(struct bdi_work *work)
154 {
155 	wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait,
156 		    TASK_UNINTERRUPTIBLE);
157 }
158 
159 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
160 				 struct wb_writeback_args *args)
161 {
162 	struct bdi_work *work;
163 
164 	/*
165 	 * This is WB_SYNC_NONE writeback, so if allocation fails just
166 	 * wakeup the thread for old dirty data writeback
167 	 */
168 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
169 	if (work) {
170 		bdi_work_init(work, args);
171 		bdi_queue_work(bdi, work);
172 	} else {
173 		struct bdi_writeback *wb = &bdi->wb;
174 
175 		if (wb->task)
176 			wake_up_process(wb->task);
177 	}
178 }
179 
180 /**
181  * bdi_queue_work_onstack - start and wait for writeback
182  * @sb: write inodes from this super_block
183  *
184  * Description:
185  *   This function initiates writeback and waits for the operation to
186  *   complete. Callers must hold the sb s_umount semaphore for
187  *   reading, to avoid having the super disappear before we are done.
188  */
189 static void bdi_queue_work_onstack(struct wb_writeback_args *args)
190 {
191 	struct bdi_work work;
192 
193 	bdi_work_init(&work, args);
194 	__set_bit(WS_ONSTACK, &work.state);
195 
196 	bdi_queue_work(args->sb->s_bdi, &work);
197 	bdi_wait_on_work_done(&work);
198 }
199 
200 /**
201  * bdi_start_writeback - start writeback
202  * @bdi: the backing device to write from
203  * @nr_pages: the number of pages to write
204  *
205  * Description:
206  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
207  *   started when this function returns, we make no guarentees on
208  *   completion. Caller need not hold sb s_umount semaphore.
209  *
210  */
211 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
212 {
213 	struct wb_writeback_args args = {
214 		.sync_mode	= WB_SYNC_NONE,
215 		.nr_pages	= nr_pages,
216 		.range_cyclic	= 1,
217 	};
218 
219 	bdi_alloc_queue_work(bdi, &args);
220 }
221 
222 /**
223  * bdi_start_background_writeback - start background writeback
224  * @bdi: the backing device to write from
225  *
226  * Description:
227  *   This does WB_SYNC_NONE background writeback. The IO is only
228  *   started when this function returns, we make no guarentees on
229  *   completion. Caller need not hold sb s_umount semaphore.
230  */
231 void bdi_start_background_writeback(struct backing_dev_info *bdi)
232 {
233 	struct wb_writeback_args args = {
234 		.sync_mode	= WB_SYNC_NONE,
235 		.nr_pages	= LONG_MAX,
236 		.for_background = 1,
237 		.range_cyclic	= 1,
238 	};
239 	bdi_alloc_queue_work(bdi, &args);
240 }
241 
242 /*
243  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
244  * furthest end of its superblock's dirty-inode list.
245  *
246  * Before stamping the inode's ->dirtied_when, we check to see whether it is
247  * already the most-recently-dirtied inode on the b_dirty list.  If that is
248  * the case then the inode must have been redirtied while it was being written
249  * out and we don't reset its dirtied_when.
250  */
251 static void redirty_tail(struct inode *inode)
252 {
253 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
254 
255 	if (!list_empty(&wb->b_dirty)) {
256 		struct inode *tail;
257 
258 		tail = list_entry(wb->b_dirty.next, struct inode, i_list);
259 		if (time_before(inode->dirtied_when, tail->dirtied_when))
260 			inode->dirtied_when = jiffies;
261 	}
262 	list_move(&inode->i_list, &wb->b_dirty);
263 }
264 
265 /*
266  * requeue inode for re-scanning after bdi->b_io list is exhausted.
267  */
268 static void requeue_io(struct inode *inode)
269 {
270 	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
271 
272 	list_move(&inode->i_list, &wb->b_more_io);
273 }
274 
275 static void inode_sync_complete(struct inode *inode)
276 {
277 	/*
278 	 * Prevent speculative execution through spin_unlock(&inode_lock);
279 	 */
280 	smp_mb();
281 	wake_up_bit(&inode->i_state, __I_SYNC);
282 }
283 
284 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
285 {
286 	bool ret = time_after(inode->dirtied_when, t);
287 #ifndef CONFIG_64BIT
288 	/*
289 	 * For inodes being constantly redirtied, dirtied_when can get stuck.
290 	 * It _appears_ to be in the future, but is actually in distant past.
291 	 * This test is necessary to prevent such wrapped-around relative times
292 	 * from permanently stopping the whole bdi writeback.
293 	 */
294 	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
295 #endif
296 	return ret;
297 }
298 
299 /*
300  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
301  */
302 static void move_expired_inodes(struct list_head *delaying_queue,
303 			       struct list_head *dispatch_queue,
304 				unsigned long *older_than_this)
305 {
306 	LIST_HEAD(tmp);
307 	struct list_head *pos, *node;
308 	struct super_block *sb = NULL;
309 	struct inode *inode;
310 	int do_sb_sort = 0;
311 
312 	while (!list_empty(delaying_queue)) {
313 		inode = list_entry(delaying_queue->prev, struct inode, i_list);
314 		if (older_than_this &&
315 		    inode_dirtied_after(inode, *older_than_this))
316 			break;
317 		if (sb && sb != inode->i_sb)
318 			do_sb_sort = 1;
319 		sb = inode->i_sb;
320 		list_move(&inode->i_list, &tmp);
321 	}
322 
323 	/* just one sb in list, splice to dispatch_queue and we're done */
324 	if (!do_sb_sort) {
325 		list_splice(&tmp, dispatch_queue);
326 		return;
327 	}
328 
329 	/* Move inodes from one superblock together */
330 	while (!list_empty(&tmp)) {
331 		inode = list_entry(tmp.prev, struct inode, i_list);
332 		sb = inode->i_sb;
333 		list_for_each_prev_safe(pos, node, &tmp) {
334 			inode = list_entry(pos, struct inode, i_list);
335 			if (inode->i_sb == sb)
336 				list_move(&inode->i_list, dispatch_queue);
337 		}
338 	}
339 }
340 
341 /*
342  * Queue all expired dirty inodes for io, eldest first.
343  */
344 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
345 {
346 	list_splice_init(&wb->b_more_io, wb->b_io.prev);
347 	move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
348 }
349 
350 static int write_inode(struct inode *inode, struct writeback_control *wbc)
351 {
352 	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
353 		return inode->i_sb->s_op->write_inode(inode, wbc);
354 	return 0;
355 }
356 
357 /*
358  * Wait for writeback on an inode to complete.
359  */
360 static void inode_wait_for_writeback(struct inode *inode)
361 {
362 	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
363 	wait_queue_head_t *wqh;
364 
365 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
366 	 while (inode->i_state & I_SYNC) {
367 		spin_unlock(&inode_lock);
368 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
369 		spin_lock(&inode_lock);
370 	}
371 }
372 
373 /*
374  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
375  * caller has ref on the inode (either via __iget or via syscall against an fd)
376  * or the inode has I_WILL_FREE set (via generic_forget_inode)
377  *
378  * If `wait' is set, wait on the writeout.
379  *
380  * The whole writeout design is quite complex and fragile.  We want to avoid
381  * starvation of particular inodes when others are being redirtied, prevent
382  * livelocks, etc.
383  *
384  * Called under inode_lock.
385  */
386 static int
387 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
388 {
389 	struct address_space *mapping = inode->i_mapping;
390 	unsigned dirty;
391 	int ret;
392 
393 	if (!atomic_read(&inode->i_count))
394 		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
395 	else
396 		WARN_ON(inode->i_state & I_WILL_FREE);
397 
398 	if (inode->i_state & I_SYNC) {
399 		/*
400 		 * If this inode is locked for writeback and we are not doing
401 		 * writeback-for-data-integrity, move it to b_more_io so that
402 		 * writeback can proceed with the other inodes on s_io.
403 		 *
404 		 * We'll have another go at writing back this inode when we
405 		 * completed a full scan of b_io.
406 		 */
407 		if (wbc->sync_mode != WB_SYNC_ALL) {
408 			requeue_io(inode);
409 			return 0;
410 		}
411 
412 		/*
413 		 * It's a data-integrity sync.  We must wait.
414 		 */
415 		inode_wait_for_writeback(inode);
416 	}
417 
418 	BUG_ON(inode->i_state & I_SYNC);
419 
420 	/* Set I_SYNC, reset I_DIRTY_PAGES */
421 	inode->i_state |= I_SYNC;
422 	inode->i_state &= ~I_DIRTY_PAGES;
423 	spin_unlock(&inode_lock);
424 
425 	ret = do_writepages(mapping, wbc);
426 
427 	/*
428 	 * Make sure to wait on the data before writing out the metadata.
429 	 * This is important for filesystems that modify metadata on data
430 	 * I/O completion.
431 	 */
432 	if (wbc->sync_mode == WB_SYNC_ALL) {
433 		int err = filemap_fdatawait(mapping);
434 		if (ret == 0)
435 			ret = err;
436 	}
437 
438 	/*
439 	 * Some filesystems may redirty the inode during the writeback
440 	 * due to delalloc, clear dirty metadata flags right before
441 	 * write_inode()
442 	 */
443 	spin_lock(&inode_lock);
444 	dirty = inode->i_state & I_DIRTY;
445 	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
446 	spin_unlock(&inode_lock);
447 	/* Don't write the inode if only I_DIRTY_PAGES was set */
448 	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
449 		int err = write_inode(inode, wbc);
450 		if (ret == 0)
451 			ret = err;
452 	}
453 
454 	spin_lock(&inode_lock);
455 	inode->i_state &= ~I_SYNC;
456 	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
457 		if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
458 			/*
459 			 * More pages get dirtied by a fast dirtier.
460 			 */
461 			goto select_queue;
462 		} else if (inode->i_state & I_DIRTY) {
463 			/*
464 			 * At least XFS will redirty the inode during the
465 			 * writeback (delalloc) and on io completion (isize).
466 			 */
467 			redirty_tail(inode);
468 		} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
469 			/*
470 			 * We didn't write back all the pages.  nfs_writepages()
471 			 * sometimes bales out without doing anything. Redirty
472 			 * the inode; Move it from b_io onto b_more_io/b_dirty.
473 			 */
474 			/*
475 			 * akpm: if the caller was the kupdate function we put
476 			 * this inode at the head of b_dirty so it gets first
477 			 * consideration.  Otherwise, move it to the tail, for
478 			 * the reasons described there.  I'm not really sure
479 			 * how much sense this makes.  Presumably I had a good
480 			 * reasons for doing it this way, and I'd rather not
481 			 * muck with it at present.
482 			 */
483 			if (wbc->for_kupdate) {
484 				/*
485 				 * For the kupdate function we move the inode
486 				 * to b_more_io so it will get more writeout as
487 				 * soon as the queue becomes uncongested.
488 				 */
489 				inode->i_state |= I_DIRTY_PAGES;
490 select_queue:
491 				if (wbc->nr_to_write <= 0) {
492 					/*
493 					 * slice used up: queue for next turn
494 					 */
495 					requeue_io(inode);
496 				} else {
497 					/*
498 					 * somehow blocked: retry later
499 					 */
500 					redirty_tail(inode);
501 				}
502 			} else {
503 				/*
504 				 * Otherwise fully redirty the inode so that
505 				 * other inodes on this superblock will get some
506 				 * writeout.  Otherwise heavy writing to one
507 				 * file would indefinitely suspend writeout of
508 				 * all the other files.
509 				 */
510 				inode->i_state |= I_DIRTY_PAGES;
511 				redirty_tail(inode);
512 			}
513 		} else if (atomic_read(&inode->i_count)) {
514 			/*
515 			 * The inode is clean, inuse
516 			 */
517 			list_move(&inode->i_list, &inode_in_use);
518 		} else {
519 			/*
520 			 * The inode is clean, unused
521 			 */
522 			list_move(&inode->i_list, &inode_unused);
523 		}
524 	}
525 	inode_sync_complete(inode);
526 	return ret;
527 }
528 
529 /*
530  * For background writeback the caller does not have the sb pinned
531  * before calling writeback. So make sure that we do pin it, so it doesn't
532  * go away while we are writing inodes from it.
533  */
534 static bool pin_sb_for_writeback(struct super_block *sb)
535 {
536 	spin_lock(&sb_lock);
537 	if (list_empty(&sb->s_instances)) {
538 		spin_unlock(&sb_lock);
539 		return false;
540 	}
541 
542 	sb->s_count++;
543 	spin_unlock(&sb_lock);
544 
545 	if (down_read_trylock(&sb->s_umount)) {
546 		if (sb->s_root)
547 			return true;
548 		up_read(&sb->s_umount);
549 	}
550 
551 	put_super(sb);
552 	return false;
553 }
554 
555 /*
556  * Write a portion of b_io inodes which belong to @sb.
557  * If @wbc->sb != NULL, then find and write all such
558  * inodes. Otherwise write only ones which go sequentially
559  * in reverse order.
560  * Return 1, if the caller writeback routine should be
561  * interrupted. Otherwise return 0.
562  */
563 static int writeback_sb_inodes(struct super_block *sb,
564 			       struct bdi_writeback *wb,
565 			       struct writeback_control *wbc)
566 {
567 	while (!list_empty(&wb->b_io)) {
568 		long pages_skipped;
569 		struct inode *inode = list_entry(wb->b_io.prev,
570 						 struct inode, i_list);
571 		if (wbc->sb && sb != inode->i_sb) {
572 			/* super block given and doesn't
573 			   match, skip this inode */
574 			redirty_tail(inode);
575 			continue;
576 		}
577 		if (sb != inode->i_sb)
578 			/* finish with this superblock */
579 			return 0;
580 		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
581 			requeue_io(inode);
582 			continue;
583 		}
584 		/*
585 		 * Was this inode dirtied after sync_sb_inodes was called?
586 		 * This keeps sync from extra jobs and livelock.
587 		 */
588 		if (inode_dirtied_after(inode, wbc->wb_start))
589 			return 1;
590 
591 		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
592 		__iget(inode);
593 		pages_skipped = wbc->pages_skipped;
594 		writeback_single_inode(inode, wbc);
595 		if (wbc->pages_skipped != pages_skipped) {
596 			/*
597 			 * writeback is not making progress due to locked
598 			 * buffers.  Skip this inode for now.
599 			 */
600 			redirty_tail(inode);
601 		}
602 		spin_unlock(&inode_lock);
603 		iput(inode);
604 		cond_resched();
605 		spin_lock(&inode_lock);
606 		if (wbc->nr_to_write <= 0) {
607 			wbc->more_io = 1;
608 			return 1;
609 		}
610 		if (!list_empty(&wb->b_more_io))
611 			wbc->more_io = 1;
612 	}
613 	/* b_io is empty */
614 	return 1;
615 }
616 
617 static void writeback_inodes_wb(struct bdi_writeback *wb,
618 				struct writeback_control *wbc)
619 {
620 	int ret = 0;
621 
622 	wbc->wb_start = jiffies; /* livelock avoidance */
623 	spin_lock(&inode_lock);
624 	if (!wbc->for_kupdate || list_empty(&wb->b_io))
625 		queue_io(wb, wbc->older_than_this);
626 
627 	while (!list_empty(&wb->b_io)) {
628 		struct inode *inode = list_entry(wb->b_io.prev,
629 						 struct inode, i_list);
630 		struct super_block *sb = inode->i_sb;
631 
632 		if (wbc->sb) {
633 			/*
634 			 * We are requested to write out inodes for a specific
635 			 * superblock.  This means we already have s_umount
636 			 * taken by the caller which also waits for us to
637 			 * complete the writeout.
638 			 */
639 			if (sb != wbc->sb) {
640 				redirty_tail(inode);
641 				continue;
642 			}
643 
644 			WARN_ON(!rwsem_is_locked(&sb->s_umount));
645 
646 			ret = writeback_sb_inodes(sb, wb, wbc);
647 		} else {
648 			if (!pin_sb_for_writeback(sb)) {
649 				requeue_io(inode);
650 				continue;
651 			}
652 			ret = writeback_sb_inodes(sb, wb, wbc);
653 			drop_super(sb);
654 		}
655 
656 		if (ret)
657 			break;
658 	}
659 	spin_unlock(&inode_lock);
660 	/* Leave any unwritten inodes on b_io */
661 }
662 
663 void writeback_inodes_wbc(struct writeback_control *wbc)
664 {
665 	struct backing_dev_info *bdi = wbc->bdi;
666 
667 	writeback_inodes_wb(&bdi->wb, wbc);
668 }
669 
670 /*
671  * The maximum number of pages to writeout in a single bdi flush/kupdate
672  * operation.  We do this so we don't hold I_SYNC against an inode for
673  * enormous amounts of time, which would block a userspace task which has
674  * been forced to throttle against that inode.  Also, the code reevaluates
675  * the dirty each time it has written this many pages.
676  */
677 #define MAX_WRITEBACK_PAGES     1024
678 
679 static inline bool over_bground_thresh(void)
680 {
681 	unsigned long background_thresh, dirty_thresh;
682 
683 	get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
684 
685 	return (global_page_state(NR_FILE_DIRTY) +
686 		global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
687 }
688 
689 /*
690  * Explicit flushing or periodic writeback of "old" data.
691  *
692  * Define "old": the first time one of an inode's pages is dirtied, we mark the
693  * dirtying-time in the inode's address_space.  So this periodic writeback code
694  * just walks the superblock inode list, writing back any inodes which are
695  * older than a specific point in time.
696  *
697  * Try to run once per dirty_writeback_interval.  But if a writeback event
698  * takes longer than a dirty_writeback_interval interval, then leave a
699  * one-second gap.
700  *
701  * older_than_this takes precedence over nr_to_write.  So we'll only write back
702  * all dirty pages if they are all attached to "old" mappings.
703  */
704 static long wb_writeback(struct bdi_writeback *wb,
705 			 struct wb_writeback_args *args)
706 {
707 	struct writeback_control wbc = {
708 		.bdi			= wb->bdi,
709 		.sb			= args->sb,
710 		.sync_mode		= args->sync_mode,
711 		.older_than_this	= NULL,
712 		.for_kupdate		= args->for_kupdate,
713 		.for_background		= args->for_background,
714 		.range_cyclic		= args->range_cyclic,
715 	};
716 	unsigned long oldest_jif;
717 	long wrote = 0;
718 	struct inode *inode;
719 
720 	if (wbc.for_kupdate) {
721 		wbc.older_than_this = &oldest_jif;
722 		oldest_jif = jiffies -
723 				msecs_to_jiffies(dirty_expire_interval * 10);
724 	}
725 	if (!wbc.range_cyclic) {
726 		wbc.range_start = 0;
727 		wbc.range_end = LLONG_MAX;
728 	}
729 
730 	for (;;) {
731 		/*
732 		 * Stop writeback when nr_pages has been consumed
733 		 */
734 		if (args->nr_pages <= 0)
735 			break;
736 
737 		/*
738 		 * For background writeout, stop when we are below the
739 		 * background dirty threshold
740 		 */
741 		if (args->for_background && !over_bground_thresh())
742 			break;
743 
744 		wbc.more_io = 0;
745 		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
746 		wbc.pages_skipped = 0;
747 		writeback_inodes_wb(wb, &wbc);
748 		args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
749 		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
750 
751 		/*
752 		 * If we consumed everything, see if we have more
753 		 */
754 		if (wbc.nr_to_write <= 0)
755 			continue;
756 		/*
757 		 * Didn't write everything and we don't have more IO, bail
758 		 */
759 		if (!wbc.more_io)
760 			break;
761 		/*
762 		 * Did we write something? Try for more
763 		 */
764 		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
765 			continue;
766 		/*
767 		 * Nothing written. Wait for some inode to
768 		 * become available for writeback. Otherwise
769 		 * we'll just busyloop.
770 		 */
771 		spin_lock(&inode_lock);
772 		if (!list_empty(&wb->b_more_io))  {
773 			inode = list_entry(wb->b_more_io.prev,
774 						struct inode, i_list);
775 			inode_wait_for_writeback(inode);
776 		}
777 		spin_unlock(&inode_lock);
778 	}
779 
780 	return wrote;
781 }
782 
783 /*
784  * Return the next bdi_work struct that hasn't been processed by this
785  * wb thread yet. ->seen is initially set for each thread that exists
786  * for this device, when a thread first notices a piece of work it
787  * clears its bit. Depending on writeback type, the thread will notify
788  * completion on either receiving the work (WB_SYNC_NONE) or after
789  * it is done (WB_SYNC_ALL).
790  */
791 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
792 					   struct bdi_writeback *wb)
793 {
794 	struct bdi_work *work, *ret = NULL;
795 
796 	rcu_read_lock();
797 
798 	list_for_each_entry_rcu(work, &bdi->work_list, list) {
799 		if (!test_bit(wb->nr, &work->seen))
800 			continue;
801 		clear_bit(wb->nr, &work->seen);
802 
803 		ret = work;
804 		break;
805 	}
806 
807 	rcu_read_unlock();
808 	return ret;
809 }
810 
811 static long wb_check_old_data_flush(struct bdi_writeback *wb)
812 {
813 	unsigned long expired;
814 	long nr_pages;
815 
816 	/*
817 	 * When set to zero, disable periodic writeback
818 	 */
819 	if (!dirty_writeback_interval)
820 		return 0;
821 
822 	expired = wb->last_old_flush +
823 			msecs_to_jiffies(dirty_writeback_interval * 10);
824 	if (time_before(jiffies, expired))
825 		return 0;
826 
827 	wb->last_old_flush = jiffies;
828 	nr_pages = global_page_state(NR_FILE_DIRTY) +
829 			global_page_state(NR_UNSTABLE_NFS) +
830 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
831 
832 	if (nr_pages) {
833 		struct wb_writeback_args args = {
834 			.nr_pages	= nr_pages,
835 			.sync_mode	= WB_SYNC_NONE,
836 			.for_kupdate	= 1,
837 			.range_cyclic	= 1,
838 		};
839 
840 		return wb_writeback(wb, &args);
841 	}
842 
843 	return 0;
844 }
845 
846 /*
847  * Retrieve work items and do the writeback they describe
848  */
849 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
850 {
851 	struct backing_dev_info *bdi = wb->bdi;
852 	struct bdi_work *work;
853 	long wrote = 0;
854 
855 	while ((work = get_next_work_item(bdi, wb)) != NULL) {
856 		struct wb_writeback_args args = work->args;
857 
858 		/*
859 		 * Override sync mode, in case we must wait for completion
860 		 */
861 		if (force_wait)
862 			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
863 
864 		/*
865 		 * If this isn't a data integrity operation, just notify
866 		 * that we have seen this work and we are now starting it.
867 		 */
868 		if (!test_bit(WS_ONSTACK, &work->state))
869 			wb_clear_pending(wb, work);
870 
871 		wrote += wb_writeback(wb, &args);
872 
873 		/*
874 		 * This is a data integrity writeback, so only do the
875 		 * notification when we have completed the work.
876 		 */
877 		if (test_bit(WS_ONSTACK, &work->state))
878 			wb_clear_pending(wb, work);
879 	}
880 
881 	/*
882 	 * Check for periodic writeback, kupdated() style
883 	 */
884 	wrote += wb_check_old_data_flush(wb);
885 
886 	return wrote;
887 }
888 
889 /*
890  * Handle writeback of dirty data for the device backed by this bdi. Also
891  * wakes up periodically and does kupdated style flushing.
892  */
893 int bdi_writeback_task(struct bdi_writeback *wb)
894 {
895 	unsigned long last_active = jiffies;
896 	unsigned long wait_jiffies = -1UL;
897 	long pages_written;
898 
899 	while (!kthread_should_stop()) {
900 		pages_written = wb_do_writeback(wb, 0);
901 
902 		if (pages_written)
903 			last_active = jiffies;
904 		else if (wait_jiffies != -1UL) {
905 			unsigned long max_idle;
906 
907 			/*
908 			 * Longest period of inactivity that we tolerate. If we
909 			 * see dirty data again later, the task will get
910 			 * recreated automatically.
911 			 */
912 			max_idle = max(5UL * 60 * HZ, wait_jiffies);
913 			if (time_after(jiffies, max_idle + last_active))
914 				break;
915 		}
916 
917 		if (dirty_writeback_interval) {
918 			wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
919 			schedule_timeout_interruptible(wait_jiffies);
920 		} else {
921 			set_current_state(TASK_INTERRUPTIBLE);
922 			if (list_empty_careful(&wb->bdi->work_list) &&
923 			    !kthread_should_stop())
924 				schedule();
925 			__set_current_state(TASK_RUNNING);
926 		}
927 
928 		try_to_freeze();
929 	}
930 
931 	return 0;
932 }
933 
934 /*
935  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
936  * the whole world.
937  */
938 void wakeup_flusher_threads(long nr_pages)
939 {
940 	struct backing_dev_info *bdi;
941 	struct wb_writeback_args args = {
942 		.sync_mode	= WB_SYNC_NONE,
943 	};
944 
945 	if (nr_pages) {
946 		args.nr_pages = nr_pages;
947 	} else {
948 		args.nr_pages = global_page_state(NR_FILE_DIRTY) +
949 				global_page_state(NR_UNSTABLE_NFS);
950 	}
951 
952 	rcu_read_lock();
953 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
954 		if (!bdi_has_dirty_io(bdi))
955 			continue;
956 		bdi_alloc_queue_work(bdi, &args);
957 	}
958 	rcu_read_unlock();
959 }
960 
961 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
962 {
963 	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
964 		struct dentry *dentry;
965 		const char *name = "?";
966 
967 		dentry = d_find_alias(inode);
968 		if (dentry) {
969 			spin_lock(&dentry->d_lock);
970 			name = (const char *) dentry->d_name.name;
971 		}
972 		printk(KERN_DEBUG
973 		       "%s(%d): dirtied inode %lu (%s) on %s\n",
974 		       current->comm, task_pid_nr(current), inode->i_ino,
975 		       name, inode->i_sb->s_id);
976 		if (dentry) {
977 			spin_unlock(&dentry->d_lock);
978 			dput(dentry);
979 		}
980 	}
981 }
982 
983 /**
984  *	__mark_inode_dirty -	internal function
985  *	@inode: inode to mark
986  *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
987  *	Mark an inode as dirty. Callers should use mark_inode_dirty or
988  *  	mark_inode_dirty_sync.
989  *
990  * Put the inode on the super block's dirty list.
991  *
992  * CAREFUL! We mark it dirty unconditionally, but move it onto the
993  * dirty list only if it is hashed or if it refers to a blockdev.
994  * If it was not hashed, it will never be added to the dirty list
995  * even if it is later hashed, as it will have been marked dirty already.
996  *
997  * In short, make sure you hash any inodes _before_ you start marking
998  * them dirty.
999  *
1000  * This function *must* be atomic for the I_DIRTY_PAGES case -
1001  * set_page_dirty() is called under spinlock in several places.
1002  *
1003  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1004  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
1005  * the kernel-internal blockdev inode represents the dirtying time of the
1006  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
1007  * page->mapping->host, so the page-dirtying time is recorded in the internal
1008  * blockdev inode.
1009  */
1010 void __mark_inode_dirty(struct inode *inode, int flags)
1011 {
1012 	struct super_block *sb = inode->i_sb;
1013 
1014 	/*
1015 	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1016 	 * dirty the inode itself
1017 	 */
1018 	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1019 		if (sb->s_op->dirty_inode)
1020 			sb->s_op->dirty_inode(inode);
1021 	}
1022 
1023 	/*
1024 	 * make sure that changes are seen by all cpus before we test i_state
1025 	 * -- mikulas
1026 	 */
1027 	smp_mb();
1028 
1029 	/* avoid the locking if we can */
1030 	if ((inode->i_state & flags) == flags)
1031 		return;
1032 
1033 	if (unlikely(block_dump))
1034 		block_dump___mark_inode_dirty(inode);
1035 
1036 	spin_lock(&inode_lock);
1037 	if ((inode->i_state & flags) != flags) {
1038 		const int was_dirty = inode->i_state & I_DIRTY;
1039 
1040 		inode->i_state |= flags;
1041 
1042 		/*
1043 		 * If the inode is being synced, just update its dirty state.
1044 		 * The unlocker will place the inode on the appropriate
1045 		 * superblock list, based upon its state.
1046 		 */
1047 		if (inode->i_state & I_SYNC)
1048 			goto out;
1049 
1050 		/*
1051 		 * Only add valid (hashed) inodes to the superblock's
1052 		 * dirty list.  Add blockdev inodes as well.
1053 		 */
1054 		if (!S_ISBLK(inode->i_mode)) {
1055 			if (hlist_unhashed(&inode->i_hash))
1056 				goto out;
1057 		}
1058 		if (inode->i_state & (I_FREEING|I_CLEAR))
1059 			goto out;
1060 
1061 		/*
1062 		 * If the inode was already on b_dirty/b_io/b_more_io, don't
1063 		 * reposition it (that would break b_dirty time-ordering).
1064 		 */
1065 		if (!was_dirty) {
1066 			struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1067 			struct backing_dev_info *bdi = wb->bdi;
1068 
1069 			if (bdi_cap_writeback_dirty(bdi) &&
1070 			    !test_bit(BDI_registered, &bdi->state)) {
1071 				WARN_ON(1);
1072 				printk(KERN_ERR "bdi-%s not registered\n",
1073 								bdi->name);
1074 			}
1075 
1076 			inode->dirtied_when = jiffies;
1077 			list_move(&inode->i_list, &wb->b_dirty);
1078 		}
1079 	}
1080 out:
1081 	spin_unlock(&inode_lock);
1082 }
1083 EXPORT_SYMBOL(__mark_inode_dirty);
1084 
1085 /*
1086  * Write out a superblock's list of dirty inodes.  A wait will be performed
1087  * upon no inodes, all inodes or the final one, depending upon sync_mode.
1088  *
1089  * If older_than_this is non-NULL, then only write out inodes which
1090  * had their first dirtying at a time earlier than *older_than_this.
1091  *
1092  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1093  * This function assumes that the blockdev superblock's inodes are backed by
1094  * a variety of queues, so all inodes are searched.  For other superblocks,
1095  * assume that all inodes are backed by the same queue.
1096  *
1097  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
1098  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
1099  * on the writer throttling path, and we get decent balancing between many
1100  * throttled threads: we don't want them all piling up on inode_sync_wait.
1101  */
1102 static void wait_sb_inodes(struct super_block *sb)
1103 {
1104 	struct inode *inode, *old_inode = NULL;
1105 
1106 	/*
1107 	 * We need to be protected against the filesystem going from
1108 	 * r/o to r/w or vice versa.
1109 	 */
1110 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1111 
1112 	spin_lock(&inode_lock);
1113 
1114 	/*
1115 	 * Data integrity sync. Must wait for all pages under writeback,
1116 	 * because there may have been pages dirtied before our sync
1117 	 * call, but which had writeout started before we write it out.
1118 	 * In which case, the inode may not be on the dirty list, but
1119 	 * we still have to wait for that writeout.
1120 	 */
1121 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1122 		struct address_space *mapping;
1123 
1124 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1125 			continue;
1126 		mapping = inode->i_mapping;
1127 		if (mapping->nrpages == 0)
1128 			continue;
1129 		__iget(inode);
1130 		spin_unlock(&inode_lock);
1131 		/*
1132 		 * We hold a reference to 'inode' so it couldn't have
1133 		 * been removed from s_inodes list while we dropped the
1134 		 * inode_lock.  We cannot iput the inode now as we can
1135 		 * be holding the last reference and we cannot iput it
1136 		 * under inode_lock. So we keep the reference and iput
1137 		 * it later.
1138 		 */
1139 		iput(old_inode);
1140 		old_inode = inode;
1141 
1142 		filemap_fdatawait(mapping);
1143 
1144 		cond_resched();
1145 
1146 		spin_lock(&inode_lock);
1147 	}
1148 	spin_unlock(&inode_lock);
1149 	iput(old_inode);
1150 }
1151 
1152 /**
1153  * writeback_inodes_sb	-	writeback dirty inodes from given super_block
1154  * @sb: the superblock
1155  *
1156  * Start writeback on some inodes on this super_block. No guarantees are made
1157  * on how many (if any) will be written, and this function does not wait
1158  * for IO completion of submitted IO. The number of pages submitted is
1159  * returned.
1160  */
1161 void writeback_inodes_sb(struct super_block *sb)
1162 {
1163 	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1164 	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1165 	struct wb_writeback_args args = {
1166 		.sb		= sb,
1167 		.sync_mode	= WB_SYNC_NONE,
1168 	};
1169 
1170 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1171 
1172 	args.nr_pages = nr_dirty + nr_unstable +
1173 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
1174 
1175 	bdi_queue_work_onstack(&args);
1176 }
1177 EXPORT_SYMBOL(writeback_inodes_sb);
1178 
1179 /**
1180  * writeback_inodes_sb_if_idle	-	start writeback if none underway
1181  * @sb: the superblock
1182  *
1183  * Invoke writeback_inodes_sb if no writeback is currently underway.
1184  * Returns 1 if writeback was started, 0 if not.
1185  */
1186 int writeback_inodes_sb_if_idle(struct super_block *sb)
1187 {
1188 	if (!writeback_in_progress(sb->s_bdi)) {
1189 		down_read(&sb->s_umount);
1190 		writeback_inodes_sb(sb);
1191 		up_read(&sb->s_umount);
1192 		return 1;
1193 	} else
1194 		return 0;
1195 }
1196 EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1197 
1198 /**
1199  * sync_inodes_sb	-	sync sb inode pages
1200  * @sb: the superblock
1201  *
1202  * This function writes and waits on any dirty inode belonging to this
1203  * super_block. The number of pages synced is returned.
1204  */
1205 void sync_inodes_sb(struct super_block *sb)
1206 {
1207 	struct wb_writeback_args args = {
1208 		.sb		= sb,
1209 		.sync_mode	= WB_SYNC_ALL,
1210 		.nr_pages	= LONG_MAX,
1211 		.range_cyclic	= 0,
1212 	};
1213 
1214 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1215 
1216 	bdi_queue_work_onstack(&args);
1217 	wait_sb_inodes(sb);
1218 }
1219 EXPORT_SYMBOL(sync_inodes_sb);
1220 
1221 /**
1222  * write_inode_now	-	write an inode to disk
1223  * @inode: inode to write to disk
1224  * @sync: whether the write should be synchronous or not
1225  *
1226  * This function commits an inode to disk immediately if it is dirty. This is
1227  * primarily needed by knfsd.
1228  *
1229  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1230  */
1231 int write_inode_now(struct inode *inode, int sync)
1232 {
1233 	int ret;
1234 	struct writeback_control wbc = {
1235 		.nr_to_write = LONG_MAX,
1236 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1237 		.range_start = 0,
1238 		.range_end = LLONG_MAX,
1239 	};
1240 
1241 	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1242 		wbc.nr_to_write = 0;
1243 
1244 	might_sleep();
1245 	spin_lock(&inode_lock);
1246 	ret = writeback_single_inode(inode, &wbc);
1247 	spin_unlock(&inode_lock);
1248 	if (sync)
1249 		inode_sync_wait(inode);
1250 	return ret;
1251 }
1252 EXPORT_SYMBOL(write_inode_now);
1253 
1254 /**
1255  * sync_inode - write an inode and its pages to disk.
1256  * @inode: the inode to sync
1257  * @wbc: controls the writeback mode
1258  *
1259  * sync_inode() will write an inode and its pages to disk.  It will also
1260  * correctly update the inode on its superblock's dirty inode lists and will
1261  * update inode->i_state.
1262  *
1263  * The caller must have a ref on the inode.
1264  */
1265 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1266 {
1267 	int ret;
1268 
1269 	spin_lock(&inode_lock);
1270 	ret = writeback_single_inode(inode, wbc);
1271 	spin_unlock(&inode_lock);
1272 	return ret;
1273 }
1274 EXPORT_SYMBOL(sync_inode);
1275