xref: /linux/mm/backing-dev.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/blkdev.h>
4 #include <linux/wait.h>
5 #include <linux/rbtree.h>
6 #include <linux/kthread.h>
7 #include <linux/backing-dev.h>
8 #include <linux/blk-cgroup.h>
9 #include <linux/freezer.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/mm.h>
13 #include <linux/sched/mm.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/writeback.h>
17 #include <linux/device.h>
18 #include <trace/events/writeback.h>
19 
20 struct backing_dev_info noop_backing_dev_info;
21 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
22 
23 static struct class *bdi_class;
24 static const char *bdi_unknown_name = "(unknown)";
25 
26 /*
27  * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
28  * reader side locking.
29  */
30 DEFINE_SPINLOCK(bdi_lock);
31 static u64 bdi_id_cursor;
32 static struct rb_root bdi_tree = RB_ROOT;
33 LIST_HEAD(bdi_list);
34 
35 /* bdi_wq serves all asynchronous writeback tasks */
36 struct workqueue_struct *bdi_wq;
37 
38 #define K(x) ((x) << (PAGE_SHIFT - 10))
39 
40 #ifdef CONFIG_DEBUG_FS
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 
44 static struct dentry *bdi_debug_root;
45 
46 static void bdi_debug_init(void)
47 {
48 	bdi_debug_root = debugfs_create_dir("bdi", NULL);
49 }
50 
51 static int bdi_debug_stats_show(struct seq_file *m, void *v)
52 {
53 	struct backing_dev_info *bdi = m->private;
54 	struct bdi_writeback *wb = &bdi->wb;
55 	unsigned long background_thresh;
56 	unsigned long dirty_thresh;
57 	unsigned long wb_thresh;
58 	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
59 	struct inode *inode;
60 
61 	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
62 	spin_lock(&wb->list_lock);
63 	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
64 		nr_dirty++;
65 	list_for_each_entry(inode, &wb->b_io, i_io_list)
66 		nr_io++;
67 	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
68 		nr_more_io++;
69 	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
70 		if (inode->i_state & I_DIRTY_TIME)
71 			nr_dirty_time++;
72 	spin_unlock(&wb->list_lock);
73 
74 	global_dirty_limits(&background_thresh, &dirty_thresh);
75 	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
76 
77 	seq_printf(m,
78 		   "BdiWriteback:       %10lu kB\n"
79 		   "BdiReclaimable:     %10lu kB\n"
80 		   "BdiDirtyThresh:     %10lu kB\n"
81 		   "DirtyThresh:        %10lu kB\n"
82 		   "BackgroundThresh:   %10lu kB\n"
83 		   "BdiDirtied:         %10lu kB\n"
84 		   "BdiWritten:         %10lu kB\n"
85 		   "BdiWriteBandwidth:  %10lu kBps\n"
86 		   "b_dirty:            %10lu\n"
87 		   "b_io:               %10lu\n"
88 		   "b_more_io:          %10lu\n"
89 		   "b_dirty_time:       %10lu\n"
90 		   "bdi_list:           %10u\n"
91 		   "state:              %10lx\n",
92 		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
93 		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
94 		   K(wb_thresh),
95 		   K(dirty_thresh),
96 		   K(background_thresh),
97 		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
98 		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
99 		   (unsigned long) K(wb->write_bandwidth),
100 		   nr_dirty,
101 		   nr_io,
102 		   nr_more_io,
103 		   nr_dirty_time,
104 		   !list_empty(&bdi->bdi_list), bdi->wb.state);
105 
106 	return 0;
107 }
108 DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
109 
110 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
111 {
112 	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
113 
114 	debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
115 			    &bdi_debug_stats_fops);
116 }
117 
118 static void bdi_debug_unregister(struct backing_dev_info *bdi)
119 {
120 	debugfs_remove_recursive(bdi->debug_dir);
121 }
122 #else
123 static inline void bdi_debug_init(void)
124 {
125 }
126 static inline void bdi_debug_register(struct backing_dev_info *bdi,
127 				      const char *name)
128 {
129 }
130 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
131 {
132 }
133 #endif
134 
135 static ssize_t read_ahead_kb_store(struct device *dev,
136 				  struct device_attribute *attr,
137 				  const char *buf, size_t count)
138 {
139 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
140 	unsigned long read_ahead_kb;
141 	ssize_t ret;
142 
143 	ret = kstrtoul(buf, 10, &read_ahead_kb);
144 	if (ret < 0)
145 		return ret;
146 
147 	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
148 
149 	return count;
150 }
151 
152 #define BDI_SHOW(name, expr)						\
153 static ssize_t name##_show(struct device *dev,				\
154 			   struct device_attribute *attr, char *buf)	\
155 {									\
156 	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
157 									\
158 	return sysfs_emit(buf, "%lld\n", (long long)expr);		\
159 }									\
160 static DEVICE_ATTR_RW(name);
161 
162 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
163 
164 static ssize_t min_ratio_store(struct device *dev,
165 		struct device_attribute *attr, const char *buf, size_t count)
166 {
167 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
168 	unsigned int ratio;
169 	ssize_t ret;
170 
171 	ret = kstrtouint(buf, 10, &ratio);
172 	if (ret < 0)
173 		return ret;
174 
175 	ret = bdi_set_min_ratio(bdi, ratio);
176 	if (!ret)
177 		ret = count;
178 
179 	return ret;
180 }
181 BDI_SHOW(min_ratio, bdi->min_ratio)
182 
183 static ssize_t max_ratio_store(struct device *dev,
184 		struct device_attribute *attr, const char *buf, size_t count)
185 {
186 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
187 	unsigned int ratio;
188 	ssize_t ret;
189 
190 	ret = kstrtouint(buf, 10, &ratio);
191 	if (ret < 0)
192 		return ret;
193 
194 	ret = bdi_set_max_ratio(bdi, ratio);
195 	if (!ret)
196 		ret = count;
197 
198 	return ret;
199 }
200 BDI_SHOW(max_ratio, bdi->max_ratio)
201 
202 static ssize_t stable_pages_required_show(struct device *dev,
203 					  struct device_attribute *attr,
204 					  char *buf)
205 {
206 	dev_warn_once(dev,
207 		"the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
208 	return sysfs_emit(buf, "%d\n", 0);
209 }
210 static DEVICE_ATTR_RO(stable_pages_required);
211 
212 static struct attribute *bdi_dev_attrs[] = {
213 	&dev_attr_read_ahead_kb.attr,
214 	&dev_attr_min_ratio.attr,
215 	&dev_attr_max_ratio.attr,
216 	&dev_attr_stable_pages_required.attr,
217 	NULL,
218 };
219 ATTRIBUTE_GROUPS(bdi_dev);
220 
221 static __init int bdi_class_init(void)
222 {
223 	bdi_class = class_create(THIS_MODULE, "bdi");
224 	if (IS_ERR(bdi_class))
225 		return PTR_ERR(bdi_class);
226 
227 	bdi_class->dev_groups = bdi_dev_groups;
228 	bdi_debug_init();
229 
230 	return 0;
231 }
232 postcore_initcall(bdi_class_init);
233 
234 static int bdi_init(struct backing_dev_info *bdi);
235 
236 static int __init default_bdi_init(void)
237 {
238 	int err;
239 
240 	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
241 				 WQ_SYSFS, 0);
242 	if (!bdi_wq)
243 		return -ENOMEM;
244 
245 	err = bdi_init(&noop_backing_dev_info);
246 
247 	return err;
248 }
249 subsys_initcall(default_bdi_init);
250 
251 /*
252  * This function is used when the first inode for this wb is marked dirty. It
253  * wakes-up the corresponding bdi thread which should then take care of the
254  * periodic background write-out of dirty inodes. Since the write-out would
255  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
256  * set up a timer which wakes the bdi thread up later.
257  *
258  * Note, we wouldn't bother setting up the timer, but this function is on the
259  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
260  * by delaying the wake-up.
261  *
262  * We have to be careful not to postpone flush work if it is scheduled for
263  * earlier. Thus we use queue_delayed_work().
264  */
265 void wb_wakeup_delayed(struct bdi_writeback *wb)
266 {
267 	unsigned long timeout;
268 
269 	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
270 	spin_lock_bh(&wb->work_lock);
271 	if (test_bit(WB_registered, &wb->state))
272 		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
273 	spin_unlock_bh(&wb->work_lock);
274 }
275 
276 static void wb_update_bandwidth_workfn(struct work_struct *work)
277 {
278 	struct bdi_writeback *wb = container_of(to_delayed_work(work),
279 						struct bdi_writeback, bw_dwork);
280 
281 	wb_update_bandwidth(wb);
282 }
283 
284 /*
285  * Initial write bandwidth: 100 MB/s
286  */
287 #define INIT_BW		(100 << (20 - PAGE_SHIFT))
288 
289 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
290 		   gfp_t gfp)
291 {
292 	int i, err;
293 
294 	memset(wb, 0, sizeof(*wb));
295 
296 	wb->bdi = bdi;
297 	wb->last_old_flush = jiffies;
298 	INIT_LIST_HEAD(&wb->b_dirty);
299 	INIT_LIST_HEAD(&wb->b_io);
300 	INIT_LIST_HEAD(&wb->b_more_io);
301 	INIT_LIST_HEAD(&wb->b_dirty_time);
302 	spin_lock_init(&wb->list_lock);
303 
304 	atomic_set(&wb->writeback_inodes, 0);
305 	wb->bw_time_stamp = jiffies;
306 	wb->balanced_dirty_ratelimit = INIT_BW;
307 	wb->dirty_ratelimit = INIT_BW;
308 	wb->write_bandwidth = INIT_BW;
309 	wb->avg_write_bandwidth = INIT_BW;
310 
311 	spin_lock_init(&wb->work_lock);
312 	INIT_LIST_HEAD(&wb->work_list);
313 	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
314 	INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn);
315 	wb->dirty_sleep = jiffies;
316 
317 	err = fprop_local_init_percpu(&wb->completions, gfp);
318 	if (err)
319 		return err;
320 
321 	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
322 		err = percpu_counter_init(&wb->stat[i], 0, gfp);
323 		if (err)
324 			goto out_destroy_stat;
325 	}
326 
327 	return 0;
328 
329 out_destroy_stat:
330 	while (i--)
331 		percpu_counter_destroy(&wb->stat[i]);
332 	fprop_local_destroy_percpu(&wb->completions);
333 	return err;
334 }
335 
336 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
337 
338 /*
339  * Remove bdi from the global list and shutdown any threads we have running
340  */
341 static void wb_shutdown(struct bdi_writeback *wb)
342 {
343 	/* Make sure nobody queues further work */
344 	spin_lock_bh(&wb->work_lock);
345 	if (!test_and_clear_bit(WB_registered, &wb->state)) {
346 		spin_unlock_bh(&wb->work_lock);
347 		return;
348 	}
349 	spin_unlock_bh(&wb->work_lock);
350 
351 	cgwb_remove_from_bdi_list(wb);
352 	/*
353 	 * Drain work list and shutdown the delayed_work.  !WB_registered
354 	 * tells wb_workfn() that @wb is dying and its work_list needs to
355 	 * be drained no matter what.
356 	 */
357 	mod_delayed_work(bdi_wq, &wb->dwork, 0);
358 	flush_delayed_work(&wb->dwork);
359 	WARN_ON(!list_empty(&wb->work_list));
360 	flush_delayed_work(&wb->bw_dwork);
361 }
362 
363 static void wb_exit(struct bdi_writeback *wb)
364 {
365 	int i;
366 
367 	WARN_ON(delayed_work_pending(&wb->dwork));
368 
369 	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
370 		percpu_counter_destroy(&wb->stat[i]);
371 
372 	fprop_local_destroy_percpu(&wb->completions);
373 }
374 
375 #ifdef CONFIG_CGROUP_WRITEBACK
376 
377 #include <linux/memcontrol.h>
378 
379 /*
380  * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
381  * memcg->cgwb_list.  bdi->cgwb_tree is also RCU protected.
382  */
383 static DEFINE_SPINLOCK(cgwb_lock);
384 static struct workqueue_struct *cgwb_release_wq;
385 
386 static LIST_HEAD(offline_cgwbs);
387 static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
388 static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
389 
390 static void cgwb_release_workfn(struct work_struct *work)
391 {
392 	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
393 						release_work);
394 	struct backing_dev_info *bdi = wb->bdi;
395 
396 	mutex_lock(&wb->bdi->cgwb_release_mutex);
397 	wb_shutdown(wb);
398 
399 	css_put(wb->memcg_css);
400 	css_put(wb->blkcg_css);
401 	mutex_unlock(&wb->bdi->cgwb_release_mutex);
402 
403 	/* triggers blkg destruction if no online users left */
404 	blkcg_unpin_online(wb->blkcg_css);
405 
406 	fprop_local_destroy_percpu(&wb->memcg_completions);
407 
408 	spin_lock_irq(&cgwb_lock);
409 	list_del(&wb->offline_node);
410 	spin_unlock_irq(&cgwb_lock);
411 
412 	percpu_ref_exit(&wb->refcnt);
413 	wb_exit(wb);
414 	bdi_put(bdi);
415 	WARN_ON_ONCE(!list_empty(&wb->b_attached));
416 	kfree_rcu(wb, rcu);
417 }
418 
419 static void cgwb_release(struct percpu_ref *refcnt)
420 {
421 	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
422 						refcnt);
423 	queue_work(cgwb_release_wq, &wb->release_work);
424 }
425 
426 static void cgwb_kill(struct bdi_writeback *wb)
427 {
428 	lockdep_assert_held(&cgwb_lock);
429 
430 	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
431 	list_del(&wb->memcg_node);
432 	list_del(&wb->blkcg_node);
433 	list_add(&wb->offline_node, &offline_cgwbs);
434 	percpu_ref_kill(&wb->refcnt);
435 }
436 
437 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
438 {
439 	spin_lock_irq(&cgwb_lock);
440 	list_del_rcu(&wb->bdi_node);
441 	spin_unlock_irq(&cgwb_lock);
442 }
443 
444 static int cgwb_create(struct backing_dev_info *bdi,
445 		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
446 {
447 	struct mem_cgroup *memcg;
448 	struct cgroup_subsys_state *blkcg_css;
449 	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
450 	struct bdi_writeback *wb;
451 	unsigned long flags;
452 	int ret = 0;
453 
454 	memcg = mem_cgroup_from_css(memcg_css);
455 	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
456 	memcg_cgwb_list = &memcg->cgwb_list;
457 	blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css);
458 
459 	/* look up again under lock and discard on blkcg mismatch */
460 	spin_lock_irqsave(&cgwb_lock, flags);
461 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
462 	if (wb && wb->blkcg_css != blkcg_css) {
463 		cgwb_kill(wb);
464 		wb = NULL;
465 	}
466 	spin_unlock_irqrestore(&cgwb_lock, flags);
467 	if (wb)
468 		goto out_put;
469 
470 	/* need to create a new one */
471 	wb = kmalloc(sizeof(*wb), gfp);
472 	if (!wb) {
473 		ret = -ENOMEM;
474 		goto out_put;
475 	}
476 
477 	ret = wb_init(wb, bdi, gfp);
478 	if (ret)
479 		goto err_free;
480 
481 	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
482 	if (ret)
483 		goto err_wb_exit;
484 
485 	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
486 	if (ret)
487 		goto err_ref_exit;
488 
489 	wb->memcg_css = memcg_css;
490 	wb->blkcg_css = blkcg_css;
491 	INIT_LIST_HEAD(&wb->b_attached);
492 	INIT_WORK(&wb->release_work, cgwb_release_workfn);
493 	set_bit(WB_registered, &wb->state);
494 	bdi_get(bdi);
495 
496 	/*
497 	 * The root wb determines the registered state of the whole bdi and
498 	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
499 	 * whether they're still online.  Don't link @wb if any is dead.
500 	 * See wb_memcg_offline() and wb_blkcg_offline().
501 	 */
502 	ret = -ENODEV;
503 	spin_lock_irqsave(&cgwb_lock, flags);
504 	if (test_bit(WB_registered, &bdi->wb.state) &&
505 	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
506 		/* we might have raced another instance of this function */
507 		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
508 		if (!ret) {
509 			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
510 			list_add(&wb->memcg_node, memcg_cgwb_list);
511 			list_add(&wb->blkcg_node, blkcg_cgwb_list);
512 			blkcg_pin_online(blkcg_css);
513 			css_get(memcg_css);
514 			css_get(blkcg_css);
515 		}
516 	}
517 	spin_unlock_irqrestore(&cgwb_lock, flags);
518 	if (ret) {
519 		if (ret == -EEXIST)
520 			ret = 0;
521 		goto err_fprop_exit;
522 	}
523 	goto out_put;
524 
525 err_fprop_exit:
526 	bdi_put(bdi);
527 	fprop_local_destroy_percpu(&wb->memcg_completions);
528 err_ref_exit:
529 	percpu_ref_exit(&wb->refcnt);
530 err_wb_exit:
531 	wb_exit(wb);
532 err_free:
533 	kfree(wb);
534 out_put:
535 	css_put(blkcg_css);
536 	return ret;
537 }
538 
539 /**
540  * wb_get_lookup - get wb for a given memcg
541  * @bdi: target bdi
542  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
543  *
544  * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
545  * refcount incremented.
546  *
547  * This function uses css_get() on @memcg_css and thus expects its refcnt
548  * to be positive on invocation.  IOW, rcu_read_lock() protection on
549  * @memcg_css isn't enough.  try_get it before calling this function.
550  *
551  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
552  * memcg on the default hierarchy, memcg association is guaranteed to be
553  * more specific (equal or descendant to the associated blkcg) and thus can
554  * identify both the memcg and blkcg associations.
555  *
556  * Because the blkcg associated with a memcg may change as blkcg is enabled
557  * and disabled closer to root in the hierarchy, each wb keeps track of
558  * both the memcg and blkcg associated with it and verifies the blkcg on
559  * each lookup.  On mismatch, the existing wb is discarded and a new one is
560  * created.
561  */
562 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
563 				    struct cgroup_subsys_state *memcg_css)
564 {
565 	struct bdi_writeback *wb;
566 
567 	if (!memcg_css->parent)
568 		return &bdi->wb;
569 
570 	rcu_read_lock();
571 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
572 	if (wb) {
573 		struct cgroup_subsys_state *blkcg_css;
574 
575 		/* see whether the blkcg association has changed */
576 		blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
577 		if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
578 			wb = NULL;
579 		css_put(blkcg_css);
580 	}
581 	rcu_read_unlock();
582 
583 	return wb;
584 }
585 
586 /**
587  * wb_get_create - get wb for a given memcg, create if necessary
588  * @bdi: target bdi
589  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
590  * @gfp: allocation mask to use
591  *
592  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
593  * create one.  See wb_get_lookup() for more details.
594  */
595 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
596 				    struct cgroup_subsys_state *memcg_css,
597 				    gfp_t gfp)
598 {
599 	struct bdi_writeback *wb;
600 
601 	might_alloc(gfp);
602 
603 	if (!memcg_css->parent)
604 		return &bdi->wb;
605 
606 	do {
607 		wb = wb_get_lookup(bdi, memcg_css);
608 	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
609 
610 	return wb;
611 }
612 
613 static int cgwb_bdi_init(struct backing_dev_info *bdi)
614 {
615 	int ret;
616 
617 	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
618 	mutex_init(&bdi->cgwb_release_mutex);
619 	init_rwsem(&bdi->wb_switch_rwsem);
620 
621 	ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
622 	if (!ret) {
623 		bdi->wb.memcg_css = &root_mem_cgroup->css;
624 		bdi->wb.blkcg_css = blkcg_root_css;
625 	}
626 	return ret;
627 }
628 
629 static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
630 {
631 	struct radix_tree_iter iter;
632 	void **slot;
633 	struct bdi_writeback *wb;
634 
635 	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
636 
637 	spin_lock_irq(&cgwb_lock);
638 	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
639 		cgwb_kill(*slot);
640 	spin_unlock_irq(&cgwb_lock);
641 
642 	mutex_lock(&bdi->cgwb_release_mutex);
643 	spin_lock_irq(&cgwb_lock);
644 	while (!list_empty(&bdi->wb_list)) {
645 		wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
646 				      bdi_node);
647 		spin_unlock_irq(&cgwb_lock);
648 		wb_shutdown(wb);
649 		spin_lock_irq(&cgwb_lock);
650 	}
651 	spin_unlock_irq(&cgwb_lock);
652 	mutex_unlock(&bdi->cgwb_release_mutex);
653 }
654 
655 /*
656  * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
657  *
658  * Try to release dying cgwbs by switching attached inodes to the nearest
659  * living ancestor's writeback. Processed wbs are placed at the end
660  * of the list to guarantee the forward progress.
661  */
662 static void cleanup_offline_cgwbs_workfn(struct work_struct *work)
663 {
664 	struct bdi_writeback *wb;
665 	LIST_HEAD(processed);
666 
667 	spin_lock_irq(&cgwb_lock);
668 
669 	while (!list_empty(&offline_cgwbs)) {
670 		wb = list_first_entry(&offline_cgwbs, struct bdi_writeback,
671 				      offline_node);
672 		list_move(&wb->offline_node, &processed);
673 
674 		/*
675 		 * If wb is dirty, cleaning up the writeback by switching
676 		 * attached inodes will result in an effective removal of any
677 		 * bandwidth restrictions, which isn't the goal.  Instead,
678 		 * it can be postponed until the next time, when all io
679 		 * will be likely completed.  If in the meantime some inodes
680 		 * will get re-dirtied, they should be eventually switched to
681 		 * a new cgwb.
682 		 */
683 		if (wb_has_dirty_io(wb))
684 			continue;
685 
686 		if (!wb_tryget(wb))
687 			continue;
688 
689 		spin_unlock_irq(&cgwb_lock);
690 		while (cleanup_offline_cgwb(wb))
691 			cond_resched();
692 		spin_lock_irq(&cgwb_lock);
693 
694 		wb_put(wb);
695 	}
696 
697 	if (!list_empty(&processed))
698 		list_splice_tail(&processed, &offline_cgwbs);
699 
700 	spin_unlock_irq(&cgwb_lock);
701 }
702 
703 /**
704  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
705  * @memcg: memcg being offlined
706  *
707  * Also prevents creation of any new wb's associated with @memcg.
708  */
709 void wb_memcg_offline(struct mem_cgroup *memcg)
710 {
711 	struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
712 	struct bdi_writeback *wb, *next;
713 
714 	spin_lock_irq(&cgwb_lock);
715 	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
716 		cgwb_kill(wb);
717 	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
718 	spin_unlock_irq(&cgwb_lock);
719 
720 	queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work);
721 }
722 
723 /**
724  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
725  * @css: blkcg being offlined
726  *
727  * Also prevents creation of any new wb's associated with @blkcg.
728  */
729 void wb_blkcg_offline(struct cgroup_subsys_state *css)
730 {
731 	struct bdi_writeback *wb, *next;
732 	struct list_head *list = blkcg_get_cgwb_list(css);
733 
734 	spin_lock_irq(&cgwb_lock);
735 	list_for_each_entry_safe(wb, next, list, blkcg_node)
736 		cgwb_kill(wb);
737 	list->next = NULL;	/* prevent new wb's */
738 	spin_unlock_irq(&cgwb_lock);
739 }
740 
741 static void cgwb_bdi_register(struct backing_dev_info *bdi)
742 {
743 	spin_lock_irq(&cgwb_lock);
744 	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
745 	spin_unlock_irq(&cgwb_lock);
746 }
747 
748 static int __init cgwb_init(void)
749 {
750 	/*
751 	 * There can be many concurrent release work items overwhelming
752 	 * system_wq.  Put them in a separate wq and limit concurrency.
753 	 * There's no point in executing many of these in parallel.
754 	 */
755 	cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
756 	if (!cgwb_release_wq)
757 		return -ENOMEM;
758 
759 	return 0;
760 }
761 subsys_initcall(cgwb_init);
762 
763 #else	/* CONFIG_CGROUP_WRITEBACK */
764 
765 static int cgwb_bdi_init(struct backing_dev_info *bdi)
766 {
767 	return wb_init(&bdi->wb, bdi, GFP_KERNEL);
768 }
769 
770 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
771 
772 static void cgwb_bdi_register(struct backing_dev_info *bdi)
773 {
774 	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
775 }
776 
777 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
778 {
779 	list_del_rcu(&wb->bdi_node);
780 }
781 
782 #endif	/* CONFIG_CGROUP_WRITEBACK */
783 
784 static int bdi_init(struct backing_dev_info *bdi)
785 {
786 	int ret;
787 
788 	bdi->dev = NULL;
789 
790 	kref_init(&bdi->refcnt);
791 	bdi->min_ratio = 0;
792 	bdi->max_ratio = 100;
793 	bdi->max_prop_frac = FPROP_FRAC_BASE;
794 	INIT_LIST_HEAD(&bdi->bdi_list);
795 	INIT_LIST_HEAD(&bdi->wb_list);
796 	init_waitqueue_head(&bdi->wb_waitq);
797 
798 	ret = cgwb_bdi_init(bdi);
799 
800 	return ret;
801 }
802 
803 struct backing_dev_info *bdi_alloc(int node_id)
804 {
805 	struct backing_dev_info *bdi;
806 
807 	bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
808 	if (!bdi)
809 		return NULL;
810 
811 	if (bdi_init(bdi)) {
812 		kfree(bdi);
813 		return NULL;
814 	}
815 	bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
816 	bdi->ra_pages = VM_READAHEAD_PAGES;
817 	bdi->io_pages = VM_READAHEAD_PAGES;
818 	timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0);
819 	return bdi;
820 }
821 EXPORT_SYMBOL(bdi_alloc);
822 
823 static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
824 {
825 	struct rb_node **p = &bdi_tree.rb_node;
826 	struct rb_node *parent = NULL;
827 	struct backing_dev_info *bdi;
828 
829 	lockdep_assert_held(&bdi_lock);
830 
831 	while (*p) {
832 		parent = *p;
833 		bdi = rb_entry(parent, struct backing_dev_info, rb_node);
834 
835 		if (bdi->id > id)
836 			p = &(*p)->rb_left;
837 		else if (bdi->id < id)
838 			p = &(*p)->rb_right;
839 		else
840 			break;
841 	}
842 
843 	if (parentp)
844 		*parentp = parent;
845 	return p;
846 }
847 
848 /**
849  * bdi_get_by_id - lookup and get bdi from its id
850  * @id: bdi id to lookup
851  *
852  * Find bdi matching @id and get it.  Returns NULL if the matching bdi
853  * doesn't exist or is already unregistered.
854  */
855 struct backing_dev_info *bdi_get_by_id(u64 id)
856 {
857 	struct backing_dev_info *bdi = NULL;
858 	struct rb_node **p;
859 
860 	spin_lock_bh(&bdi_lock);
861 	p = bdi_lookup_rb_node(id, NULL);
862 	if (*p) {
863 		bdi = rb_entry(*p, struct backing_dev_info, rb_node);
864 		bdi_get(bdi);
865 	}
866 	spin_unlock_bh(&bdi_lock);
867 
868 	return bdi;
869 }
870 
871 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
872 {
873 	struct device *dev;
874 	struct rb_node *parent, **p;
875 
876 	if (bdi->dev)	/* The driver needs to use separate queues per device */
877 		return 0;
878 
879 	vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
880 	dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
881 	if (IS_ERR(dev))
882 		return PTR_ERR(dev);
883 
884 	cgwb_bdi_register(bdi);
885 	bdi->dev = dev;
886 
887 	bdi_debug_register(bdi, dev_name(dev));
888 	set_bit(WB_registered, &bdi->wb.state);
889 
890 	spin_lock_bh(&bdi_lock);
891 
892 	bdi->id = ++bdi_id_cursor;
893 
894 	p = bdi_lookup_rb_node(bdi->id, &parent);
895 	rb_link_node(&bdi->rb_node, parent, p);
896 	rb_insert_color(&bdi->rb_node, &bdi_tree);
897 
898 	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
899 
900 	spin_unlock_bh(&bdi_lock);
901 
902 	trace_writeback_bdi_register(bdi);
903 	return 0;
904 }
905 
906 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
907 {
908 	va_list args;
909 	int ret;
910 
911 	va_start(args, fmt);
912 	ret = bdi_register_va(bdi, fmt, args);
913 	va_end(args);
914 	return ret;
915 }
916 EXPORT_SYMBOL(bdi_register);
917 
918 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
919 {
920 	WARN_ON_ONCE(bdi->owner);
921 	bdi->owner = owner;
922 	get_device(owner);
923 }
924 
925 /*
926  * Remove bdi from bdi_list, and ensure that it is no longer visible
927  */
928 static void bdi_remove_from_list(struct backing_dev_info *bdi)
929 {
930 	spin_lock_bh(&bdi_lock);
931 	rb_erase(&bdi->rb_node, &bdi_tree);
932 	list_del_rcu(&bdi->bdi_list);
933 	spin_unlock_bh(&bdi_lock);
934 
935 	synchronize_rcu_expedited();
936 }
937 
938 void bdi_unregister(struct backing_dev_info *bdi)
939 {
940 	del_timer_sync(&bdi->laptop_mode_wb_timer);
941 
942 	/* make sure nobody finds us on the bdi_list anymore */
943 	bdi_remove_from_list(bdi);
944 	wb_shutdown(&bdi->wb);
945 	cgwb_bdi_unregister(bdi);
946 
947 	/*
948 	 * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
949 	 * update the global bdi_min_ratio.
950 	 */
951 	if (bdi->min_ratio)
952 		bdi_set_min_ratio(bdi, 0);
953 
954 	if (bdi->dev) {
955 		bdi_debug_unregister(bdi);
956 		device_unregister(bdi->dev);
957 		bdi->dev = NULL;
958 	}
959 
960 	if (bdi->owner) {
961 		put_device(bdi->owner);
962 		bdi->owner = NULL;
963 	}
964 }
965 EXPORT_SYMBOL(bdi_unregister);
966 
967 static void release_bdi(struct kref *ref)
968 {
969 	struct backing_dev_info *bdi =
970 			container_of(ref, struct backing_dev_info, refcnt);
971 
972 	WARN_ON_ONCE(test_bit(WB_registered, &bdi->wb.state));
973 	WARN_ON_ONCE(bdi->dev);
974 	wb_exit(&bdi->wb);
975 	kfree(bdi);
976 }
977 
978 void bdi_put(struct backing_dev_info *bdi)
979 {
980 	kref_put(&bdi->refcnt, release_bdi);
981 }
982 EXPORT_SYMBOL(bdi_put);
983 
984 struct backing_dev_info *inode_to_bdi(struct inode *inode)
985 {
986 	struct super_block *sb;
987 
988 	if (!inode)
989 		return &noop_backing_dev_info;
990 
991 	sb = inode->i_sb;
992 #ifdef CONFIG_BLOCK
993 	if (sb_is_blkdev_sb(sb))
994 		return I_BDEV(inode)->bd_disk->bdi;
995 #endif
996 	return sb->s_bdi;
997 }
998 EXPORT_SYMBOL(inode_to_bdi);
999 
1000 const char *bdi_dev_name(struct backing_dev_info *bdi)
1001 {
1002 	if (!bdi || !bdi->dev)
1003 		return bdi_unknown_name;
1004 	return bdi->dev_name;
1005 }
1006 EXPORT_SYMBOL_GPL(bdi_dev_name);
1007