xref: /linux/mm/backing-dev.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14 
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16 
17 struct backing_dev_info default_backing_dev_info = {
18 	.name		= "default",
19 	.ra_pages	= VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
20 	.state		= 0,
21 	.capabilities	= BDI_CAP_MAP_COPY,
22 };
23 EXPORT_SYMBOL_GPL(default_backing_dev_info);
24 
25 struct backing_dev_info noop_backing_dev_info = {
26 	.name		= "noop",
27 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
28 };
29 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
30 
31 static struct class *bdi_class;
32 
33 /*
34  * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
35  * reader side protection for bdi_pending_list. bdi_list has RCU reader side
36  * locking.
37  */
38 DEFINE_SPINLOCK(bdi_lock);
39 LIST_HEAD(bdi_list);
40 LIST_HEAD(bdi_pending_list);
41 
42 static struct task_struct *sync_supers_tsk;
43 static struct timer_list sync_supers_timer;
44 
45 static int bdi_sync_supers(void *);
46 static void sync_supers_timer_fn(unsigned long);
47 
48 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
49 {
50 	if (wb1 < wb2) {
51 		spin_lock(&wb1->list_lock);
52 		spin_lock_nested(&wb2->list_lock, 1);
53 	} else {
54 		spin_lock(&wb2->list_lock);
55 		spin_lock_nested(&wb1->list_lock, 1);
56 	}
57 }
58 
59 #ifdef CONFIG_DEBUG_FS
60 #include <linux/debugfs.h>
61 #include <linux/seq_file.h>
62 
63 static struct dentry *bdi_debug_root;
64 
65 static void bdi_debug_init(void)
66 {
67 	bdi_debug_root = debugfs_create_dir("bdi", NULL);
68 }
69 
70 static int bdi_debug_stats_show(struct seq_file *m, void *v)
71 {
72 	struct backing_dev_info *bdi = m->private;
73 	struct bdi_writeback *wb = &bdi->wb;
74 	unsigned long background_thresh;
75 	unsigned long dirty_thresh;
76 	unsigned long bdi_thresh;
77 	unsigned long nr_dirty, nr_io, nr_more_io;
78 	struct inode *inode;
79 
80 	nr_dirty = nr_io = nr_more_io = 0;
81 	spin_lock(&wb->list_lock);
82 	list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
83 		nr_dirty++;
84 	list_for_each_entry(inode, &wb->b_io, i_wb_list)
85 		nr_io++;
86 	list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
87 		nr_more_io++;
88 	spin_unlock(&wb->list_lock);
89 
90 	global_dirty_limits(&background_thresh, &dirty_thresh);
91 	bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
92 
93 #define K(x) ((x) << (PAGE_SHIFT - 10))
94 	seq_printf(m,
95 		   "BdiWriteback:       %10lu kB\n"
96 		   "BdiReclaimable:     %10lu kB\n"
97 		   "BdiDirtyThresh:     %10lu kB\n"
98 		   "DirtyThresh:        %10lu kB\n"
99 		   "BackgroundThresh:   %10lu kB\n"
100 		   "BdiDirtied:         %10lu kB\n"
101 		   "BdiWritten:         %10lu kB\n"
102 		   "BdiWriteBandwidth:  %10lu kBps\n"
103 		   "b_dirty:            %10lu\n"
104 		   "b_io:               %10lu\n"
105 		   "b_more_io:          %10lu\n"
106 		   "bdi_list:           %10u\n"
107 		   "state:              %10lx\n",
108 		   (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
109 		   (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
110 		   K(bdi_thresh),
111 		   K(dirty_thresh),
112 		   K(background_thresh),
113 		   (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
114 		   (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
115 		   (unsigned long) K(bdi->write_bandwidth),
116 		   nr_dirty,
117 		   nr_io,
118 		   nr_more_io,
119 		   !list_empty(&bdi->bdi_list), bdi->state);
120 #undef K
121 
122 	return 0;
123 }
124 
125 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
126 {
127 	return single_open(file, bdi_debug_stats_show, inode->i_private);
128 }
129 
130 static const struct file_operations bdi_debug_stats_fops = {
131 	.open		= bdi_debug_stats_open,
132 	.read		= seq_read,
133 	.llseek		= seq_lseek,
134 	.release	= single_release,
135 };
136 
137 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
138 {
139 	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
140 	bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
141 					       bdi, &bdi_debug_stats_fops);
142 }
143 
144 static void bdi_debug_unregister(struct backing_dev_info *bdi)
145 {
146 	debugfs_remove(bdi->debug_stats);
147 	debugfs_remove(bdi->debug_dir);
148 }
149 #else
150 static inline void bdi_debug_init(void)
151 {
152 }
153 static inline void bdi_debug_register(struct backing_dev_info *bdi,
154 				      const char *name)
155 {
156 }
157 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
158 {
159 }
160 #endif
161 
162 static ssize_t read_ahead_kb_store(struct device *dev,
163 				  struct device_attribute *attr,
164 				  const char *buf, size_t count)
165 {
166 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
167 	char *end;
168 	unsigned long read_ahead_kb;
169 	ssize_t ret = -EINVAL;
170 
171 	read_ahead_kb = simple_strtoul(buf, &end, 10);
172 	if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
173 		bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
174 		ret = count;
175 	}
176 	return ret;
177 }
178 
179 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
180 
181 #define BDI_SHOW(name, expr)						\
182 static ssize_t name##_show(struct device *dev,				\
183 			   struct device_attribute *attr, char *page)	\
184 {									\
185 	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
186 									\
187 	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
188 }
189 
190 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
191 
192 static ssize_t min_ratio_store(struct device *dev,
193 		struct device_attribute *attr, const char *buf, size_t count)
194 {
195 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
196 	char *end;
197 	unsigned int ratio;
198 	ssize_t ret = -EINVAL;
199 
200 	ratio = simple_strtoul(buf, &end, 10);
201 	if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
202 		ret = bdi_set_min_ratio(bdi, ratio);
203 		if (!ret)
204 			ret = count;
205 	}
206 	return ret;
207 }
208 BDI_SHOW(min_ratio, bdi->min_ratio)
209 
210 static ssize_t max_ratio_store(struct device *dev,
211 		struct device_attribute *attr, const char *buf, size_t count)
212 {
213 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
214 	char *end;
215 	unsigned int ratio;
216 	ssize_t ret = -EINVAL;
217 
218 	ratio = simple_strtoul(buf, &end, 10);
219 	if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
220 		ret = bdi_set_max_ratio(bdi, ratio);
221 		if (!ret)
222 			ret = count;
223 	}
224 	return ret;
225 }
226 BDI_SHOW(max_ratio, bdi->max_ratio)
227 
228 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
229 
230 static struct device_attribute bdi_dev_attrs[] = {
231 	__ATTR_RW(read_ahead_kb),
232 	__ATTR_RW(min_ratio),
233 	__ATTR_RW(max_ratio),
234 	__ATTR_NULL,
235 };
236 
237 static __init int bdi_class_init(void)
238 {
239 	bdi_class = class_create(THIS_MODULE, "bdi");
240 	if (IS_ERR(bdi_class))
241 		return PTR_ERR(bdi_class);
242 
243 	bdi_class->dev_attrs = bdi_dev_attrs;
244 	bdi_debug_init();
245 	return 0;
246 }
247 postcore_initcall(bdi_class_init);
248 
249 static int __init default_bdi_init(void)
250 {
251 	int err;
252 
253 	sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
254 	BUG_ON(IS_ERR(sync_supers_tsk));
255 
256 	setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
257 	bdi_arm_supers_timer();
258 
259 	err = bdi_init(&default_backing_dev_info);
260 	if (!err)
261 		bdi_register(&default_backing_dev_info, NULL, "default");
262 	err = bdi_init(&noop_backing_dev_info);
263 
264 	return err;
265 }
266 subsys_initcall(default_bdi_init);
267 
268 int bdi_has_dirty_io(struct backing_dev_info *bdi)
269 {
270 	return wb_has_dirty_io(&bdi->wb);
271 }
272 
273 /*
274  * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
275  * or we risk deadlocking on ->s_umount. The longer term solution would be
276  * to implement sync_supers_bdi() or similar and simply do it from the
277  * bdi writeback thread individually.
278  */
279 static int bdi_sync_supers(void *unused)
280 {
281 	set_user_nice(current, 0);
282 
283 	while (!kthread_should_stop()) {
284 		set_current_state(TASK_INTERRUPTIBLE);
285 		schedule();
286 
287 		/*
288 		 * Do this periodically, like kupdated() did before.
289 		 */
290 		sync_supers();
291 	}
292 
293 	return 0;
294 }
295 
296 void bdi_arm_supers_timer(void)
297 {
298 	unsigned long next;
299 
300 	if (!dirty_writeback_interval)
301 		return;
302 
303 	next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
304 	mod_timer(&sync_supers_timer, round_jiffies_up(next));
305 }
306 
307 static void sync_supers_timer_fn(unsigned long unused)
308 {
309 	wake_up_process(sync_supers_tsk);
310 	bdi_arm_supers_timer();
311 }
312 
313 static void wakeup_timer_fn(unsigned long data)
314 {
315 	struct backing_dev_info *bdi = (struct backing_dev_info *)data;
316 
317 	spin_lock_bh(&bdi->wb_lock);
318 	if (bdi->wb.task) {
319 		trace_writeback_wake_thread(bdi);
320 		wake_up_process(bdi->wb.task);
321 	} else {
322 		/*
323 		 * When bdi tasks are inactive for long time, they are killed.
324 		 * In this case we have to wake-up the forker thread which
325 		 * should create and run the bdi thread.
326 		 */
327 		trace_writeback_wake_forker_thread(bdi);
328 		wake_up_process(default_backing_dev_info.wb.task);
329 	}
330 	spin_unlock_bh(&bdi->wb_lock);
331 }
332 
333 /*
334  * This function is used when the first inode for this bdi is marked dirty. It
335  * wakes-up the corresponding bdi thread which should then take care of the
336  * periodic background write-out of dirty inodes. Since the write-out would
337  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
338  * set up a timer which wakes the bdi thread up later.
339  *
340  * Note, we wouldn't bother setting up the timer, but this function is on the
341  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
342  * by delaying the wake-up.
343  */
344 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
345 {
346 	unsigned long timeout;
347 
348 	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
349 	mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
350 }
351 
352 /*
353  * Calculate the longest interval (jiffies) bdi threads are allowed to be
354  * inactive.
355  */
356 static unsigned long bdi_longest_inactive(void)
357 {
358 	unsigned long interval;
359 
360 	interval = msecs_to_jiffies(dirty_writeback_interval * 10);
361 	return max(5UL * 60 * HZ, interval);
362 }
363 
364 /*
365  * Clear pending bit and wakeup anybody waiting for flusher thread creation or
366  * shutdown
367  */
368 static void bdi_clear_pending(struct backing_dev_info *bdi)
369 {
370 	clear_bit(BDI_pending, &bdi->state);
371 	smp_mb__after_clear_bit();
372 	wake_up_bit(&bdi->state, BDI_pending);
373 }
374 
375 static int bdi_forker_thread(void *ptr)
376 {
377 	struct bdi_writeback *me = ptr;
378 
379 	current->flags |= PF_SWAPWRITE;
380 	set_freezable();
381 
382 	/*
383 	 * Our parent may run at a different priority, just set us to normal
384 	 */
385 	set_user_nice(current, 0);
386 
387 	for (;;) {
388 		struct task_struct *task = NULL;
389 		struct backing_dev_info *bdi;
390 		enum {
391 			NO_ACTION,   /* Nothing to do */
392 			FORK_THREAD, /* Fork bdi thread */
393 			KILL_THREAD, /* Kill inactive bdi thread */
394 		} action = NO_ACTION;
395 
396 		/*
397 		 * Temporary measure, we want to make sure we don't see
398 		 * dirty data on the default backing_dev_info
399 		 */
400 		if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
401 			del_timer(&me->wakeup_timer);
402 			wb_do_writeback(me, 0);
403 		}
404 
405 		spin_lock_bh(&bdi_lock);
406 		/*
407 		 * In the following loop we are going to check whether we have
408 		 * some work to do without any synchronization with tasks
409 		 * waking us up to do work for them. Set the task state here
410 		 * so that we don't miss wakeups after verifying conditions.
411 		 */
412 		set_current_state(TASK_INTERRUPTIBLE);
413 
414 		list_for_each_entry(bdi, &bdi_list, bdi_list) {
415 			bool have_dirty_io;
416 
417 			if (!bdi_cap_writeback_dirty(bdi) ||
418 			     bdi_cap_flush_forker(bdi))
419 				continue;
420 
421 			WARN(!test_bit(BDI_registered, &bdi->state),
422 			     "bdi %p/%s is not registered!\n", bdi, bdi->name);
423 
424 			have_dirty_io = !list_empty(&bdi->work_list) ||
425 					wb_has_dirty_io(&bdi->wb);
426 
427 			/*
428 			 * If the bdi has work to do, but the thread does not
429 			 * exist - create it.
430 			 */
431 			if (!bdi->wb.task && have_dirty_io) {
432 				/*
433 				 * Set the pending bit - if someone will try to
434 				 * unregister this bdi - it'll wait on this bit.
435 				 */
436 				set_bit(BDI_pending, &bdi->state);
437 				action = FORK_THREAD;
438 				break;
439 			}
440 
441 			spin_lock(&bdi->wb_lock);
442 
443 			/*
444 			 * If there is no work to do and the bdi thread was
445 			 * inactive long enough - kill it. The wb_lock is taken
446 			 * to make sure no-one adds more work to this bdi and
447 			 * wakes the bdi thread up.
448 			 */
449 			if (bdi->wb.task && !have_dirty_io &&
450 			    time_after(jiffies, bdi->wb.last_active +
451 						bdi_longest_inactive())) {
452 				task = bdi->wb.task;
453 				bdi->wb.task = NULL;
454 				spin_unlock(&bdi->wb_lock);
455 				set_bit(BDI_pending, &bdi->state);
456 				action = KILL_THREAD;
457 				break;
458 			}
459 			spin_unlock(&bdi->wb_lock);
460 		}
461 		spin_unlock_bh(&bdi_lock);
462 
463 		/* Keep working if default bdi still has things to do */
464 		if (!list_empty(&me->bdi->work_list))
465 			__set_current_state(TASK_RUNNING);
466 
467 		switch (action) {
468 		case FORK_THREAD:
469 			__set_current_state(TASK_RUNNING);
470 			task = kthread_create(bdi_writeback_thread, &bdi->wb,
471 					      "flush-%s", dev_name(bdi->dev));
472 			if (IS_ERR(task)) {
473 				/*
474 				 * If thread creation fails, force writeout of
475 				 * the bdi from the thread. Hopefully 1024 is
476 				 * large enough for efficient IO.
477 				 */
478 				writeback_inodes_wb(&bdi->wb, 1024,
479 						    WB_REASON_FORKER_THREAD);
480 			} else {
481 				/*
482 				 * The spinlock makes sure we do not lose
483 				 * wake-ups when racing with 'bdi_queue_work()'.
484 				 * And as soon as the bdi thread is visible, we
485 				 * can start it.
486 				 */
487 				spin_lock_bh(&bdi->wb_lock);
488 				bdi->wb.task = task;
489 				spin_unlock_bh(&bdi->wb_lock);
490 				wake_up_process(task);
491 			}
492 			bdi_clear_pending(bdi);
493 			break;
494 
495 		case KILL_THREAD:
496 			__set_current_state(TASK_RUNNING);
497 			kthread_stop(task);
498 			bdi_clear_pending(bdi);
499 			break;
500 
501 		case NO_ACTION:
502 			if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
503 				/*
504 				 * There are no dirty data. The only thing we
505 				 * should now care about is checking for
506 				 * inactive bdi threads and killing them. Thus,
507 				 * let's sleep for longer time, save energy and
508 				 * be friendly for battery-driven devices.
509 				 */
510 				schedule_timeout(bdi_longest_inactive());
511 			else
512 				schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
513 			try_to_freeze();
514 			break;
515 		}
516 	}
517 
518 	return 0;
519 }
520 
521 /*
522  * Remove bdi from bdi_list, and ensure that it is no longer visible
523  */
524 static void bdi_remove_from_list(struct backing_dev_info *bdi)
525 {
526 	spin_lock_bh(&bdi_lock);
527 	list_del_rcu(&bdi->bdi_list);
528 	spin_unlock_bh(&bdi_lock);
529 
530 	synchronize_rcu_expedited();
531 }
532 
533 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
534 		const char *fmt, ...)
535 {
536 	va_list args;
537 	struct device *dev;
538 
539 	if (bdi->dev)	/* The driver needs to use separate queues per device */
540 		return 0;
541 
542 	va_start(args, fmt);
543 	dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
544 	va_end(args);
545 	if (IS_ERR(dev))
546 		return PTR_ERR(dev);
547 
548 	bdi->dev = dev;
549 
550 	/*
551 	 * Just start the forker thread for our default backing_dev_info,
552 	 * and add other bdi's to the list. They will get a thread created
553 	 * on-demand when they need it.
554 	 */
555 	if (bdi_cap_flush_forker(bdi)) {
556 		struct bdi_writeback *wb = &bdi->wb;
557 
558 		wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
559 						dev_name(dev));
560 		if (IS_ERR(wb->task))
561 			return PTR_ERR(wb->task);
562 	}
563 
564 	bdi_debug_register(bdi, dev_name(dev));
565 	set_bit(BDI_registered, &bdi->state);
566 
567 	spin_lock_bh(&bdi_lock);
568 	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
569 	spin_unlock_bh(&bdi_lock);
570 
571 	trace_writeback_bdi_register(bdi);
572 	return 0;
573 }
574 EXPORT_SYMBOL(bdi_register);
575 
576 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
577 {
578 	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
579 }
580 EXPORT_SYMBOL(bdi_register_dev);
581 
582 /*
583  * Remove bdi from the global list and shutdown any threads we have running
584  */
585 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
586 {
587 	if (!bdi_cap_writeback_dirty(bdi))
588 		return;
589 
590 	/*
591 	 * Make sure nobody finds us on the bdi_list anymore
592 	 */
593 	bdi_remove_from_list(bdi);
594 
595 	/*
596 	 * If setup is pending, wait for that to complete first
597 	 */
598 	wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
599 			TASK_UNINTERRUPTIBLE);
600 
601 	/*
602 	 * Finally, kill the kernel thread. We don't need to be RCU
603 	 * safe anymore, since the bdi is gone from visibility.
604 	 */
605 	if (bdi->wb.task)
606 		kthread_stop(bdi->wb.task);
607 }
608 
609 /*
610  * This bdi is going away now, make sure that no super_blocks point to it
611  */
612 static void bdi_prune_sb(struct backing_dev_info *bdi)
613 {
614 	struct super_block *sb;
615 
616 	spin_lock(&sb_lock);
617 	list_for_each_entry(sb, &super_blocks, s_list) {
618 		if (sb->s_bdi == bdi)
619 			sb->s_bdi = &default_backing_dev_info;
620 	}
621 	spin_unlock(&sb_lock);
622 }
623 
624 void bdi_unregister(struct backing_dev_info *bdi)
625 {
626 	if (bdi->dev) {
627 		bdi_set_min_ratio(bdi, 0);
628 		trace_writeback_bdi_unregister(bdi);
629 		bdi_prune_sb(bdi);
630 		del_timer_sync(&bdi->wb.wakeup_timer);
631 
632 		if (!bdi_cap_flush_forker(bdi))
633 			bdi_wb_shutdown(bdi);
634 		bdi_debug_unregister(bdi);
635 		device_unregister(bdi->dev);
636 		bdi->dev = NULL;
637 	}
638 }
639 EXPORT_SYMBOL(bdi_unregister);
640 
641 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
642 {
643 	memset(wb, 0, sizeof(*wb));
644 
645 	wb->bdi = bdi;
646 	wb->last_old_flush = jiffies;
647 	INIT_LIST_HEAD(&wb->b_dirty);
648 	INIT_LIST_HEAD(&wb->b_io);
649 	INIT_LIST_HEAD(&wb->b_more_io);
650 	spin_lock_init(&wb->list_lock);
651 	setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
652 }
653 
654 /*
655  * Initial write bandwidth: 100 MB/s
656  */
657 #define INIT_BW		(100 << (20 - PAGE_SHIFT))
658 
659 int bdi_init(struct backing_dev_info *bdi)
660 {
661 	int i, err;
662 
663 	bdi->dev = NULL;
664 
665 	bdi->min_ratio = 0;
666 	bdi->max_ratio = 100;
667 	bdi->max_prop_frac = PROP_FRAC_BASE;
668 	spin_lock_init(&bdi->wb_lock);
669 	INIT_LIST_HEAD(&bdi->bdi_list);
670 	INIT_LIST_HEAD(&bdi->work_list);
671 
672 	bdi_wb_init(&bdi->wb, bdi);
673 
674 	for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
675 		err = percpu_counter_init(&bdi->bdi_stat[i], 0);
676 		if (err)
677 			goto err;
678 	}
679 
680 	bdi->dirty_exceeded = 0;
681 
682 	bdi->bw_time_stamp = jiffies;
683 	bdi->written_stamp = 0;
684 
685 	bdi->balanced_dirty_ratelimit = INIT_BW;
686 	bdi->dirty_ratelimit = INIT_BW;
687 	bdi->write_bandwidth = INIT_BW;
688 	bdi->avg_write_bandwidth = INIT_BW;
689 
690 	err = prop_local_init_percpu(&bdi->completions);
691 
692 	if (err) {
693 err:
694 		while (i--)
695 			percpu_counter_destroy(&bdi->bdi_stat[i]);
696 	}
697 
698 	return err;
699 }
700 EXPORT_SYMBOL(bdi_init);
701 
702 void bdi_destroy(struct backing_dev_info *bdi)
703 {
704 	int i;
705 
706 	/*
707 	 * Splice our entries to the default_backing_dev_info, if this
708 	 * bdi disappears
709 	 */
710 	if (bdi_has_dirty_io(bdi)) {
711 		struct bdi_writeback *dst = &default_backing_dev_info.wb;
712 
713 		bdi_lock_two(&bdi->wb, dst);
714 		list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
715 		list_splice(&bdi->wb.b_io, &dst->b_io);
716 		list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
717 		spin_unlock(&bdi->wb.list_lock);
718 		spin_unlock(&dst->list_lock);
719 	}
720 
721 	bdi_unregister(bdi);
722 
723 	/*
724 	 * If bdi_unregister() had already been called earlier, the
725 	 * wakeup_timer could still be armed because bdi_prune_sb()
726 	 * can race with the bdi_wakeup_thread_delayed() calls from
727 	 * __mark_inode_dirty().
728 	 */
729 	del_timer_sync(&bdi->wb.wakeup_timer);
730 
731 	for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
732 		percpu_counter_destroy(&bdi->bdi_stat[i]);
733 
734 	prop_local_destroy_percpu(&bdi->completions);
735 }
736 EXPORT_SYMBOL(bdi_destroy);
737 
738 /*
739  * For use from filesystems to quickly init and register a bdi associated
740  * with dirty writeback
741  */
742 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
743 			   unsigned int cap)
744 {
745 	char tmp[32];
746 	int err;
747 
748 	bdi->name = name;
749 	bdi->capabilities = cap;
750 	err = bdi_init(bdi);
751 	if (err)
752 		return err;
753 
754 	sprintf(tmp, "%.28s%s", name, "-%d");
755 	err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
756 	if (err) {
757 		bdi_destroy(bdi);
758 		return err;
759 	}
760 
761 	return 0;
762 }
763 EXPORT_SYMBOL(bdi_setup_and_register);
764 
765 static wait_queue_head_t congestion_wqh[2] = {
766 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
767 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
768 	};
769 static atomic_t nr_bdi_congested[2];
770 
771 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
772 {
773 	enum bdi_state bit;
774 	wait_queue_head_t *wqh = &congestion_wqh[sync];
775 
776 	bit = sync ? BDI_sync_congested : BDI_async_congested;
777 	if (test_and_clear_bit(bit, &bdi->state))
778 		atomic_dec(&nr_bdi_congested[sync]);
779 	smp_mb__after_clear_bit();
780 	if (waitqueue_active(wqh))
781 		wake_up(wqh);
782 }
783 EXPORT_SYMBOL(clear_bdi_congested);
784 
785 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
786 {
787 	enum bdi_state bit;
788 
789 	bit = sync ? BDI_sync_congested : BDI_async_congested;
790 	if (!test_and_set_bit(bit, &bdi->state))
791 		atomic_inc(&nr_bdi_congested[sync]);
792 }
793 EXPORT_SYMBOL(set_bdi_congested);
794 
795 /**
796  * congestion_wait - wait for a backing_dev to become uncongested
797  * @sync: SYNC or ASYNC IO
798  * @timeout: timeout in jiffies
799  *
800  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
801  * write congestion.  If no backing_devs are congested then just wait for the
802  * next write to be completed.
803  */
804 long congestion_wait(int sync, long timeout)
805 {
806 	long ret;
807 	unsigned long start = jiffies;
808 	DEFINE_WAIT(wait);
809 	wait_queue_head_t *wqh = &congestion_wqh[sync];
810 
811 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
812 	ret = io_schedule_timeout(timeout);
813 	finish_wait(wqh, &wait);
814 
815 	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
816 					jiffies_to_usecs(jiffies - start));
817 
818 	return ret;
819 }
820 EXPORT_SYMBOL(congestion_wait);
821 
822 /**
823  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
824  * @zone: A zone to check if it is heavily congested
825  * @sync: SYNC or ASYNC IO
826  * @timeout: timeout in jiffies
827  *
828  * In the event of a congested backing_dev (any backing_dev) and the given
829  * @zone has experienced recent congestion, this waits for up to @timeout
830  * jiffies for either a BDI to exit congestion of the given @sync queue
831  * or a write to complete.
832  *
833  * In the absence of zone congestion, cond_resched() is called to yield
834  * the processor if necessary but otherwise does not sleep.
835  *
836  * The return value is 0 if the sleep is for the full timeout. Otherwise,
837  * it is the number of jiffies that were still remaining when the function
838  * returned. return_value == timeout implies the function did not sleep.
839  */
840 long wait_iff_congested(struct zone *zone, int sync, long timeout)
841 {
842 	long ret;
843 	unsigned long start = jiffies;
844 	DEFINE_WAIT(wait);
845 	wait_queue_head_t *wqh = &congestion_wqh[sync];
846 
847 	/*
848 	 * If there is no congestion, or heavy congestion is not being
849 	 * encountered in the current zone, yield if necessary instead
850 	 * of sleeping on the congestion queue
851 	 */
852 	if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
853 			!zone_is_reclaim_congested(zone)) {
854 		cond_resched();
855 
856 		/* In case we scheduled, work out time remaining */
857 		ret = timeout - (jiffies - start);
858 		if (ret < 0)
859 			ret = 0;
860 
861 		goto out;
862 	}
863 
864 	/* Sleep until uncongested or a write happens */
865 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
866 	ret = io_schedule_timeout(timeout);
867 	finish_wait(wqh, &wait);
868 
869 out:
870 	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
871 					jiffies_to_usecs(jiffies - start));
872 
873 	return ret;
874 }
875 EXPORT_SYMBOL(wait_iff_congested);
876