xref: /linux/block/blk-cgroup.c (revision 19f2e267a5d0d26282a64f8f788c482852c95324)
1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  * 	              Nauman Rafique <nauman@google.com>
12  *
13  * For policy-specific per-blkcg data:
14  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
16  */
17 #include <linux/ioprio.h>
18 #include <linux/kdev_t.h>
19 #include <linux/module.h>
20 #include <linux/sched/signal.h>
21 #include <linux/err.h>
22 #include <linux/blkdev.h>
23 #include <linux/backing-dev.h>
24 #include <linux/slab.h>
25 #include <linux/genhd.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/ctype.h>
29 #include <linux/blk-cgroup.h>
30 #include <linux/tracehook.h>
31 #include "blk.h"
32 
33 #define MAX_KEY_LEN 100
34 
35 /*
36  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
37  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
38  * policy [un]register operations including cgroup file additions /
39  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
40  * allows grabbing it from cgroup callbacks.
41  */
42 static DEFINE_MUTEX(blkcg_pol_register_mutex);
43 static DEFINE_MUTEX(blkcg_pol_mutex);
44 
45 struct blkcg blkcg_root;
46 EXPORT_SYMBOL_GPL(blkcg_root);
47 
48 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
49 
50 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
51 
52 static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
53 
54 static bool blkcg_debug_stats = false;
55 
56 static bool blkcg_policy_enabled(struct request_queue *q,
57 				 const struct blkcg_policy *pol)
58 {
59 	return pol && test_bit(pol->plid, q->blkcg_pols);
60 }
61 
62 /**
63  * blkg_free - free a blkg
64  * @blkg: blkg to free
65  *
66  * Free @blkg which may be partially allocated.
67  */
68 static void blkg_free(struct blkcg_gq *blkg)
69 {
70 	int i;
71 
72 	if (!blkg)
73 		return;
74 
75 	for (i = 0; i < BLKCG_MAX_POLS; i++)
76 		if (blkg->pd[i])
77 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
78 
79 	blkg_rwstat_exit(&blkg->stat_ios);
80 	blkg_rwstat_exit(&blkg->stat_bytes);
81 	kfree(blkg);
82 }
83 
84 static void __blkg_release(struct rcu_head *rcu)
85 {
86 	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
87 
88 	percpu_ref_exit(&blkg->refcnt);
89 
90 	/* release the blkcg and parent blkg refs this blkg has been holding */
91 	css_put(&blkg->blkcg->css);
92 	if (blkg->parent)
93 		blkg_put(blkg->parent);
94 
95 	wb_congested_put(blkg->wb_congested);
96 
97 	blkg_free(blkg);
98 }
99 
100 /*
101  * A group is RCU protected, but having an rcu lock does not mean that one
102  * can access all the fields of blkg and assume these are valid.  For
103  * example, don't try to follow throtl_data and request queue links.
104  *
105  * Having a reference to blkg under an rcu allows accesses to only values
106  * local to groups like group stats and group rate limits.
107  */
108 static void blkg_release(struct percpu_ref *ref)
109 {
110 	struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
111 
112 	call_rcu(&blkg->rcu_head, __blkg_release);
113 }
114 
115 /**
116  * blkg_alloc - allocate a blkg
117  * @blkcg: block cgroup the new blkg is associated with
118  * @q: request_queue the new blkg is associated with
119  * @gfp_mask: allocation mask to use
120  *
121  * Allocate a new blkg assocating @blkcg and @q.
122  */
123 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
124 				   gfp_t gfp_mask)
125 {
126 	struct blkcg_gq *blkg;
127 	int i;
128 
129 	/* alloc and init base part */
130 	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
131 	if (!blkg)
132 		return NULL;
133 
134 	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
135 	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
136 		goto err_free;
137 
138 	blkg->q = q;
139 	INIT_LIST_HEAD(&blkg->q_node);
140 	blkg->blkcg = blkcg;
141 
142 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
143 		struct blkcg_policy *pol = blkcg_policy[i];
144 		struct blkg_policy_data *pd;
145 
146 		if (!blkcg_policy_enabled(q, pol))
147 			continue;
148 
149 		/* alloc per-policy data and attach it to blkg */
150 		pd = pol->pd_alloc_fn(gfp_mask, q->node);
151 		if (!pd)
152 			goto err_free;
153 
154 		blkg->pd[i] = pd;
155 		pd->blkg = blkg;
156 		pd->plid = i;
157 	}
158 
159 	return blkg;
160 
161 err_free:
162 	blkg_free(blkg);
163 	return NULL;
164 }
165 
166 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
167 				      struct request_queue *q, bool update_hint)
168 {
169 	struct blkcg_gq *blkg;
170 
171 	/*
172 	 * Hint didn't match.  Look up from the radix tree.  Note that the
173 	 * hint can only be updated under queue_lock as otherwise @blkg
174 	 * could have already been removed from blkg_tree.  The caller is
175 	 * responsible for grabbing queue_lock if @update_hint.
176 	 */
177 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
178 	if (blkg && blkg->q == q) {
179 		if (update_hint) {
180 			lockdep_assert_held(&q->queue_lock);
181 			rcu_assign_pointer(blkcg->blkg_hint, blkg);
182 		}
183 		return blkg;
184 	}
185 
186 	return NULL;
187 }
188 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
189 
190 /*
191  * If @new_blkg is %NULL, this function tries to allocate a new one as
192  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
193  */
194 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
195 				    struct request_queue *q,
196 				    struct blkcg_gq *new_blkg)
197 {
198 	struct blkcg_gq *blkg;
199 	struct bdi_writeback_congested *wb_congested;
200 	int i, ret;
201 
202 	WARN_ON_ONCE(!rcu_read_lock_held());
203 	lockdep_assert_held(&q->queue_lock);
204 
205 	/* request_queue is dying, do not create/recreate a blkg */
206 	if (blk_queue_dying(q)) {
207 		ret = -ENODEV;
208 		goto err_free_blkg;
209 	}
210 
211 	/* blkg holds a reference to blkcg */
212 	if (!css_tryget_online(&blkcg->css)) {
213 		ret = -ENODEV;
214 		goto err_free_blkg;
215 	}
216 
217 	wb_congested = wb_congested_get_create(q->backing_dev_info,
218 					       blkcg->css.id,
219 					       GFP_NOWAIT | __GFP_NOWARN);
220 	if (!wb_congested) {
221 		ret = -ENOMEM;
222 		goto err_put_css;
223 	}
224 
225 	/* allocate */
226 	if (!new_blkg) {
227 		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
228 		if (unlikely(!new_blkg)) {
229 			ret = -ENOMEM;
230 			goto err_put_congested;
231 		}
232 	}
233 	blkg = new_blkg;
234 	blkg->wb_congested = wb_congested;
235 
236 	/* link parent */
237 	if (blkcg_parent(blkcg)) {
238 		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
239 		if (WARN_ON_ONCE(!blkg->parent)) {
240 			ret = -ENODEV;
241 			goto err_put_congested;
242 		}
243 		blkg_get(blkg->parent);
244 	}
245 
246 	ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
247 			      GFP_NOWAIT | __GFP_NOWARN);
248 	if (ret)
249 		goto err_cancel_ref;
250 
251 	/* invoke per-policy init */
252 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
253 		struct blkcg_policy *pol = blkcg_policy[i];
254 
255 		if (blkg->pd[i] && pol->pd_init_fn)
256 			pol->pd_init_fn(blkg->pd[i]);
257 	}
258 
259 	/* insert */
260 	spin_lock(&blkcg->lock);
261 	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
262 	if (likely(!ret)) {
263 		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
264 		list_add(&blkg->q_node, &q->blkg_list);
265 
266 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
267 			struct blkcg_policy *pol = blkcg_policy[i];
268 
269 			if (blkg->pd[i] && pol->pd_online_fn)
270 				pol->pd_online_fn(blkg->pd[i]);
271 		}
272 	}
273 	blkg->online = true;
274 	spin_unlock(&blkcg->lock);
275 
276 	if (!ret)
277 		return blkg;
278 
279 	/* @blkg failed fully initialized, use the usual release path */
280 	blkg_put(blkg);
281 	return ERR_PTR(ret);
282 
283 err_cancel_ref:
284 	percpu_ref_exit(&blkg->refcnt);
285 err_put_congested:
286 	wb_congested_put(wb_congested);
287 err_put_css:
288 	css_put(&blkcg->css);
289 err_free_blkg:
290 	blkg_free(new_blkg);
291 	return ERR_PTR(ret);
292 }
293 
294 /**
295  * __blkg_lookup_create - lookup blkg, try to create one if not there
296  * @blkcg: blkcg of interest
297  * @q: request_queue of interest
298  *
299  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
300  * create one.  blkg creation is performed recursively from blkcg_root such
301  * that all non-root blkg's have access to the parent blkg.  This function
302  * should be called under RCU read lock and @q->queue_lock.
303  *
304  * Returns the blkg or the closest blkg if blkg_create() fails as it walks
305  * down from root.
306  */
307 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
308 				      struct request_queue *q)
309 {
310 	struct blkcg_gq *blkg;
311 
312 	WARN_ON_ONCE(!rcu_read_lock_held());
313 	lockdep_assert_held(&q->queue_lock);
314 
315 	blkg = __blkg_lookup(blkcg, q, true);
316 	if (blkg)
317 		return blkg;
318 
319 	/*
320 	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
321 	 * non-root blkgs have access to their parents.  Returns the closest
322 	 * blkg to the intended blkg should blkg_create() fail.
323 	 */
324 	while (true) {
325 		struct blkcg *pos = blkcg;
326 		struct blkcg *parent = blkcg_parent(blkcg);
327 		struct blkcg_gq *ret_blkg = q->root_blkg;
328 
329 		while (parent) {
330 			blkg = __blkg_lookup(parent, q, false);
331 			if (blkg) {
332 				/* remember closest blkg */
333 				ret_blkg = blkg;
334 				break;
335 			}
336 			pos = parent;
337 			parent = blkcg_parent(parent);
338 		}
339 
340 		blkg = blkg_create(pos, q, NULL);
341 		if (IS_ERR(blkg))
342 			return ret_blkg;
343 		if (pos == blkcg)
344 			return blkg;
345 	}
346 }
347 
348 /**
349  * blkg_lookup_create - find or create a blkg
350  * @blkcg: target block cgroup
351  * @q: target request_queue
352  *
353  * This looks up or creates the blkg representing the unique pair
354  * of the blkcg and the request_queue.
355  */
356 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
357 				    struct request_queue *q)
358 {
359 	struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
360 
361 	if (unlikely(!blkg)) {
362 		unsigned long flags;
363 
364 		spin_lock_irqsave(&q->queue_lock, flags);
365 		blkg = __blkg_lookup_create(blkcg, q);
366 		spin_unlock_irqrestore(&q->queue_lock, flags);
367 	}
368 
369 	return blkg;
370 }
371 
372 static void blkg_destroy(struct blkcg_gq *blkg)
373 {
374 	struct blkcg *blkcg = blkg->blkcg;
375 	struct blkcg_gq *parent = blkg->parent;
376 	int i;
377 
378 	lockdep_assert_held(&blkg->q->queue_lock);
379 	lockdep_assert_held(&blkcg->lock);
380 
381 	/* Something wrong if we are trying to remove same group twice */
382 	WARN_ON_ONCE(list_empty(&blkg->q_node));
383 	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
384 
385 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
386 		struct blkcg_policy *pol = blkcg_policy[i];
387 
388 		if (blkg->pd[i] && pol->pd_offline_fn)
389 			pol->pd_offline_fn(blkg->pd[i]);
390 	}
391 
392 	if (parent) {
393 		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
394 		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
395 	}
396 
397 	blkg->online = false;
398 
399 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
400 	list_del_init(&blkg->q_node);
401 	hlist_del_init_rcu(&blkg->blkcg_node);
402 
403 	/*
404 	 * Both setting lookup hint to and clearing it from @blkg are done
405 	 * under queue_lock.  If it's not pointing to @blkg now, it never
406 	 * will.  Hint assignment itself can race safely.
407 	 */
408 	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
409 		rcu_assign_pointer(blkcg->blkg_hint, NULL);
410 
411 	/*
412 	 * Put the reference taken at the time of creation so that when all
413 	 * queues are gone, group can be destroyed.
414 	 */
415 	percpu_ref_kill(&blkg->refcnt);
416 }
417 
418 /**
419  * blkg_destroy_all - destroy all blkgs associated with a request_queue
420  * @q: request_queue of interest
421  *
422  * Destroy all blkgs associated with @q.
423  */
424 static void blkg_destroy_all(struct request_queue *q)
425 {
426 	struct blkcg_gq *blkg, *n;
427 
428 	spin_lock_irq(&q->queue_lock);
429 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
430 		struct blkcg *blkcg = blkg->blkcg;
431 
432 		spin_lock(&blkcg->lock);
433 		blkg_destroy(blkg);
434 		spin_unlock(&blkcg->lock);
435 	}
436 
437 	q->root_blkg = NULL;
438 	spin_unlock_irq(&q->queue_lock);
439 }
440 
441 /*
442  * A group is RCU protected, but having an rcu lock does not mean that one
443  * can access all the fields of blkg and assume these are valid.  For
444  * example, don't try to follow throtl_data and request queue links.
445  *
446  * Having a reference to blkg under an rcu allows accesses to only values
447  * local to groups like group stats and group rate limits.
448  */
449 void __blkg_release_rcu(struct rcu_head *rcu_head)
450 {
451 	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
452 
453 	/* release the blkcg and parent blkg refs this blkg has been holding */
454 	css_put(&blkg->blkcg->css);
455 	if (blkg->parent)
456 		blkg_put(blkg->parent);
457 
458 	wb_congested_put(blkg->wb_congested);
459 
460 	blkg_free(blkg);
461 }
462 EXPORT_SYMBOL_GPL(__blkg_release_rcu);
463 
464 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
465 			     struct cftype *cftype, u64 val)
466 {
467 	struct blkcg *blkcg = css_to_blkcg(css);
468 	struct blkcg_gq *blkg;
469 	int i;
470 
471 	mutex_lock(&blkcg_pol_mutex);
472 	spin_lock_irq(&blkcg->lock);
473 
474 	/*
475 	 * Note that stat reset is racy - it doesn't synchronize against
476 	 * stat updates.  This is a debug feature which shouldn't exist
477 	 * anyway.  If you get hit by a race, retry.
478 	 */
479 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
480 		blkg_rwstat_reset(&blkg->stat_bytes);
481 		blkg_rwstat_reset(&blkg->stat_ios);
482 
483 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
484 			struct blkcg_policy *pol = blkcg_policy[i];
485 
486 			if (blkg->pd[i] && pol->pd_reset_stats_fn)
487 				pol->pd_reset_stats_fn(blkg->pd[i]);
488 		}
489 	}
490 
491 	spin_unlock_irq(&blkcg->lock);
492 	mutex_unlock(&blkcg_pol_mutex);
493 	return 0;
494 }
495 
496 const char *blkg_dev_name(struct blkcg_gq *blkg)
497 {
498 	/* some drivers (floppy) instantiate a queue w/o disk registered */
499 	if (blkg->q->backing_dev_info->dev)
500 		return dev_name(blkg->q->backing_dev_info->dev);
501 	return NULL;
502 }
503 
504 /**
505  * blkcg_print_blkgs - helper for printing per-blkg data
506  * @sf: seq_file to print to
507  * @blkcg: blkcg of interest
508  * @prfill: fill function to print out a blkg
509  * @pol: policy in question
510  * @data: data to be passed to @prfill
511  * @show_total: to print out sum of prfill return values or not
512  *
513  * This function invokes @prfill on each blkg of @blkcg if pd for the
514  * policy specified by @pol exists.  @prfill is invoked with @sf, the
515  * policy data and @data and the matching queue lock held.  If @show_total
516  * is %true, the sum of the return values from @prfill is printed with
517  * "Total" label at the end.
518  *
519  * This is to be used to construct print functions for
520  * cftype->read_seq_string method.
521  */
522 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
523 		       u64 (*prfill)(struct seq_file *,
524 				     struct blkg_policy_data *, int),
525 		       const struct blkcg_policy *pol, int data,
526 		       bool show_total)
527 {
528 	struct blkcg_gq *blkg;
529 	u64 total = 0;
530 
531 	rcu_read_lock();
532 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
533 		spin_lock_irq(&blkg->q->queue_lock);
534 		if (blkcg_policy_enabled(blkg->q, pol))
535 			total += prfill(sf, blkg->pd[pol->plid], data);
536 		spin_unlock_irq(&blkg->q->queue_lock);
537 	}
538 	rcu_read_unlock();
539 
540 	if (show_total)
541 		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
542 }
543 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
544 
545 /**
546  * __blkg_prfill_u64 - prfill helper for a single u64 value
547  * @sf: seq_file to print to
548  * @pd: policy private data of interest
549  * @v: value to print
550  *
551  * Print @v to @sf for the device assocaited with @pd.
552  */
553 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
554 {
555 	const char *dname = blkg_dev_name(pd->blkg);
556 
557 	if (!dname)
558 		return 0;
559 
560 	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
561 	return v;
562 }
563 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
564 
565 /**
566  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
567  * @sf: seq_file to print to
568  * @pd: policy private data of interest
569  * @rwstat: rwstat to print
570  *
571  * Print @rwstat to @sf for the device assocaited with @pd.
572  */
573 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
574 			 const struct blkg_rwstat *rwstat)
575 {
576 	static const char *rwstr[] = {
577 		[BLKG_RWSTAT_READ]	= "Read",
578 		[BLKG_RWSTAT_WRITE]	= "Write",
579 		[BLKG_RWSTAT_SYNC]	= "Sync",
580 		[BLKG_RWSTAT_ASYNC]	= "Async",
581 		[BLKG_RWSTAT_DISCARD]	= "Discard",
582 	};
583 	const char *dname = blkg_dev_name(pd->blkg);
584 	u64 v;
585 	int i;
586 
587 	if (!dname)
588 		return 0;
589 
590 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
591 		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
592 			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
593 
594 	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
595 		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
596 		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
597 	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
598 	return v;
599 }
600 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
601 
602 /**
603  * blkg_prfill_stat - prfill callback for blkg_stat
604  * @sf: seq_file to print to
605  * @pd: policy private data of interest
606  * @off: offset to the blkg_stat in @pd
607  *
608  * prfill callback for printing a blkg_stat.
609  */
610 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
611 {
612 	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
613 }
614 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
615 
616 /**
617  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
618  * @sf: seq_file to print to
619  * @pd: policy private data of interest
620  * @off: offset to the blkg_rwstat in @pd
621  *
622  * prfill callback for printing a blkg_rwstat.
623  */
624 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
625 		       int off)
626 {
627 	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
628 
629 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
630 }
631 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
632 
633 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
634 				    struct blkg_policy_data *pd, int off)
635 {
636 	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
637 
638 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
639 }
640 
641 /**
642  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
643  * @sf: seq_file to print to
644  * @v: unused
645  *
646  * To be used as cftype->seq_show to print blkg->stat_bytes.
647  * cftype->private must be set to the blkcg_policy.
648  */
649 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
650 {
651 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
652 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
653 			  offsetof(struct blkcg_gq, stat_bytes), true);
654 	return 0;
655 }
656 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
657 
658 /**
659  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
660  * @sf: seq_file to print to
661  * @v: unused
662  *
663  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
664  * must be set to the blkcg_policy.
665  */
666 int blkg_print_stat_ios(struct seq_file *sf, void *v)
667 {
668 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
669 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
670 			  offsetof(struct blkcg_gq, stat_ios), true);
671 	return 0;
672 }
673 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
674 
675 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
676 					      struct blkg_policy_data *pd,
677 					      int off)
678 {
679 	struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
680 							      NULL, off);
681 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
682 }
683 
684 /**
685  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
686  * @sf: seq_file to print to
687  * @v: unused
688  */
689 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
690 {
691 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
692 			  blkg_prfill_rwstat_field_recursive,
693 			  (void *)seq_cft(sf)->private,
694 			  offsetof(struct blkcg_gq, stat_bytes), true);
695 	return 0;
696 }
697 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
698 
699 /**
700  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
701  * @sf: seq_file to print to
702  * @v: unused
703  */
704 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
705 {
706 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
707 			  blkg_prfill_rwstat_field_recursive,
708 			  (void *)seq_cft(sf)->private,
709 			  offsetof(struct blkcg_gq, stat_ios), true);
710 	return 0;
711 }
712 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
713 
714 /**
715  * blkg_stat_recursive_sum - collect hierarchical blkg_stat
716  * @blkg: blkg of interest
717  * @pol: blkcg_policy which contains the blkg_stat
718  * @off: offset to the blkg_stat in blkg_policy_data or @blkg
719  *
720  * Collect the blkg_stat specified by @blkg, @pol and @off and all its
721  * online descendants and their aux counts.  The caller must be holding the
722  * queue lock for online tests.
723  *
724  * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
725  * at @off bytes into @blkg's blkg_policy_data of the policy.
726  */
727 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
728 			    struct blkcg_policy *pol, int off)
729 {
730 	struct blkcg_gq *pos_blkg;
731 	struct cgroup_subsys_state *pos_css;
732 	u64 sum = 0;
733 
734 	lockdep_assert_held(&blkg->q->queue_lock);
735 
736 	rcu_read_lock();
737 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
738 		struct blkg_stat *stat;
739 
740 		if (!pos_blkg->online)
741 			continue;
742 
743 		if (pol)
744 			stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
745 		else
746 			stat = (void *)blkg + off;
747 
748 		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
749 	}
750 	rcu_read_unlock();
751 
752 	return sum;
753 }
754 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
755 
756 /**
757  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
758  * @blkg: blkg of interest
759  * @pol: blkcg_policy which contains the blkg_rwstat
760  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
761  *
762  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
763  * online descendants and their aux counts.  The caller must be holding the
764  * queue lock for online tests.
765  *
766  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
767  * is at @off bytes into @blkg's blkg_policy_data of the policy.
768  */
769 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
770 					     struct blkcg_policy *pol, int off)
771 {
772 	struct blkcg_gq *pos_blkg;
773 	struct cgroup_subsys_state *pos_css;
774 	struct blkg_rwstat sum = { };
775 	int i;
776 
777 	lockdep_assert_held(&blkg->q->queue_lock);
778 
779 	rcu_read_lock();
780 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
781 		struct blkg_rwstat *rwstat;
782 
783 		if (!pos_blkg->online)
784 			continue;
785 
786 		if (pol)
787 			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
788 		else
789 			rwstat = (void *)pos_blkg + off;
790 
791 		for (i = 0; i < BLKG_RWSTAT_NR; i++)
792 			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
793 				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
794 				&sum.aux_cnt[i]);
795 	}
796 	rcu_read_unlock();
797 
798 	return sum;
799 }
800 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
801 
802 /* Performs queue bypass and policy enabled checks then looks up blkg. */
803 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
804 					  const struct blkcg_policy *pol,
805 					  struct request_queue *q)
806 {
807 	WARN_ON_ONCE(!rcu_read_lock_held());
808 	lockdep_assert_held(&q->queue_lock);
809 
810 	if (!blkcg_policy_enabled(q, pol))
811 		return ERR_PTR(-EOPNOTSUPP);
812 	return __blkg_lookup(blkcg, q, true /* update_hint */);
813 }
814 
815 /**
816  * blkg_conf_prep - parse and prepare for per-blkg config update
817  * @blkcg: target block cgroup
818  * @pol: target policy
819  * @input: input string
820  * @ctx: blkg_conf_ctx to be filled
821  *
822  * Parse per-blkg config update from @input and initialize @ctx with the
823  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
824  * part of @input following MAJ:MIN.  This function returns with RCU read
825  * lock and queue lock held and must be paired with blkg_conf_finish().
826  */
827 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
828 		   char *input, struct blkg_conf_ctx *ctx)
829 	__acquires(rcu) __acquires(&disk->queue->queue_lock)
830 {
831 	struct gendisk *disk;
832 	struct request_queue *q;
833 	struct blkcg_gq *blkg;
834 	unsigned int major, minor;
835 	int key_len, part, ret;
836 	char *body;
837 
838 	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
839 		return -EINVAL;
840 
841 	body = input + key_len;
842 	if (!isspace(*body))
843 		return -EINVAL;
844 	body = skip_spaces(body);
845 
846 	disk = get_gendisk(MKDEV(major, minor), &part);
847 	if (!disk)
848 		return -ENODEV;
849 	if (part) {
850 		ret = -ENODEV;
851 		goto fail;
852 	}
853 
854 	q = disk->queue;
855 
856 	rcu_read_lock();
857 	spin_lock_irq(&q->queue_lock);
858 
859 	blkg = blkg_lookup_check(blkcg, pol, q);
860 	if (IS_ERR(blkg)) {
861 		ret = PTR_ERR(blkg);
862 		goto fail_unlock;
863 	}
864 
865 	if (blkg)
866 		goto success;
867 
868 	/*
869 	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
870 	 * non-root blkgs have access to their parents.
871 	 */
872 	while (true) {
873 		struct blkcg *pos = blkcg;
874 		struct blkcg *parent;
875 		struct blkcg_gq *new_blkg;
876 
877 		parent = blkcg_parent(blkcg);
878 		while (parent && !__blkg_lookup(parent, q, false)) {
879 			pos = parent;
880 			parent = blkcg_parent(parent);
881 		}
882 
883 		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
884 		spin_unlock_irq(&q->queue_lock);
885 		rcu_read_unlock();
886 
887 		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
888 		if (unlikely(!new_blkg)) {
889 			ret = -ENOMEM;
890 			goto fail;
891 		}
892 
893 		rcu_read_lock();
894 		spin_lock_irq(&q->queue_lock);
895 
896 		blkg = blkg_lookup_check(pos, pol, q);
897 		if (IS_ERR(blkg)) {
898 			ret = PTR_ERR(blkg);
899 			goto fail_unlock;
900 		}
901 
902 		if (blkg) {
903 			blkg_free(new_blkg);
904 		} else {
905 			blkg = blkg_create(pos, q, new_blkg);
906 			if (unlikely(IS_ERR(blkg))) {
907 				ret = PTR_ERR(blkg);
908 				goto fail_unlock;
909 			}
910 		}
911 
912 		if (pos == blkcg)
913 			goto success;
914 	}
915 success:
916 	ctx->disk = disk;
917 	ctx->blkg = blkg;
918 	ctx->body = body;
919 	return 0;
920 
921 fail_unlock:
922 	spin_unlock_irq(&q->queue_lock);
923 	rcu_read_unlock();
924 fail:
925 	put_disk_and_module(disk);
926 	/*
927 	 * If queue was bypassing, we should retry.  Do so after a
928 	 * short msleep().  It isn't strictly necessary but queue
929 	 * can be bypassing for some time and it's always nice to
930 	 * avoid busy looping.
931 	 */
932 	if (ret == -EBUSY) {
933 		msleep(10);
934 		ret = restart_syscall();
935 	}
936 	return ret;
937 }
938 
939 /**
940  * blkg_conf_finish - finish up per-blkg config update
941  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
942  *
943  * Finish up after per-blkg config update.  This function must be paired
944  * with blkg_conf_prep().
945  */
946 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
947 	__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
948 {
949 	spin_unlock_irq(&ctx->disk->queue->queue_lock);
950 	rcu_read_unlock();
951 	put_disk_and_module(ctx->disk);
952 }
953 
954 static int blkcg_print_stat(struct seq_file *sf, void *v)
955 {
956 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
957 	struct blkcg_gq *blkg;
958 
959 	rcu_read_lock();
960 
961 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
962 		const char *dname;
963 		char *buf;
964 		struct blkg_rwstat rwstat;
965 		u64 rbytes, wbytes, rios, wios, dbytes, dios;
966 		size_t size = seq_get_buf(sf, &buf), off = 0;
967 		int i;
968 		bool has_stats = false;
969 
970 		dname = blkg_dev_name(blkg);
971 		if (!dname)
972 			continue;
973 
974 		/*
975 		 * Hooray string manipulation, count is the size written NOT
976 		 * INCLUDING THE \0, so size is now count+1 less than what we
977 		 * had before, but we want to start writing the next bit from
978 		 * the \0 so we only add count to buf.
979 		 */
980 		off += scnprintf(buf+off, size-off, "%s ", dname);
981 
982 		spin_lock_irq(&blkg->q->queue_lock);
983 
984 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
985 					offsetof(struct blkcg_gq, stat_bytes));
986 		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
987 		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
988 		dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
989 
990 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
991 					offsetof(struct blkcg_gq, stat_ios));
992 		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
993 		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
994 		dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
995 
996 		spin_unlock_irq(&blkg->q->queue_lock);
997 
998 		if (rbytes || wbytes || rios || wios) {
999 			has_stats = true;
1000 			off += scnprintf(buf+off, size-off,
1001 					 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
1002 					 rbytes, wbytes, rios, wios,
1003 					 dbytes, dios);
1004 		}
1005 
1006 		if (!blkcg_debug_stats)
1007 			goto next;
1008 
1009 		if (atomic_read(&blkg->use_delay)) {
1010 			has_stats = true;
1011 			off += scnprintf(buf+off, size-off,
1012 					 " use_delay=%d delay_nsec=%llu",
1013 					 atomic_read(&blkg->use_delay),
1014 					(unsigned long long)atomic64_read(&blkg->delay_nsec));
1015 		}
1016 
1017 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
1018 			struct blkcg_policy *pol = blkcg_policy[i];
1019 			size_t written;
1020 
1021 			if (!blkg->pd[i] || !pol->pd_stat_fn)
1022 				continue;
1023 
1024 			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
1025 			if (written)
1026 				has_stats = true;
1027 			off += written;
1028 		}
1029 next:
1030 		if (has_stats) {
1031 			off += scnprintf(buf+off, size-off, "\n");
1032 			seq_commit(sf, off);
1033 		}
1034 	}
1035 
1036 	rcu_read_unlock();
1037 	return 0;
1038 }
1039 
1040 static struct cftype blkcg_files[] = {
1041 	{
1042 		.name = "stat",
1043 		.flags = CFTYPE_NOT_ON_ROOT,
1044 		.seq_show = blkcg_print_stat,
1045 	},
1046 	{ }	/* terminate */
1047 };
1048 
1049 static struct cftype blkcg_legacy_files[] = {
1050 	{
1051 		.name = "reset_stats",
1052 		.write_u64 = blkcg_reset_stats,
1053 	},
1054 	{ }	/* terminate */
1055 };
1056 
1057 /*
1058  * blkcg destruction is a three-stage process.
1059  *
1060  * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1061  *    which offlines writeback.  Here we tie the next stage of blkg destruction
1062  *    to the completion of writeback associated with the blkcg.  This lets us
1063  *    avoid punting potentially large amounts of outstanding writeback to root
1064  *    while maintaining any ongoing policies.  The next stage is triggered when
1065  *    the nr_cgwbs count goes to zero.
1066  *
1067  * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1068  *    and handles the destruction of blkgs.  Here the css reference held by
1069  *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1070  *    This work may occur in cgwb_release_workfn() on the cgwb_release
1071  *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1072  *    punted to the root_blkg.
1073  *
1074  * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1075  *    This finally frees the blkcg.
1076  */
1077 
1078 /**
1079  * blkcg_css_offline - cgroup css_offline callback
1080  * @css: css of interest
1081  *
1082  * This function is called when @css is about to go away.  Here the cgwbs are
1083  * offlined first and only once writeback associated with the blkcg has
1084  * finished do we start step 2 (see above).
1085  */
1086 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1087 {
1088 	struct blkcg *blkcg = css_to_blkcg(css);
1089 
1090 	/* this prevents anyone from attaching or migrating to this blkcg */
1091 	wb_blkcg_offline(blkcg);
1092 
1093 	/* put the base cgwb reference allowing step 2 to be triggered */
1094 	blkcg_cgwb_put(blkcg);
1095 }
1096 
1097 /**
1098  * blkcg_destroy_blkgs - responsible for shooting down blkgs
1099  * @blkcg: blkcg of interest
1100  *
1101  * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1102  * is nested inside q lock, this function performs reverse double lock dancing.
1103  * Destroying the blkgs releases the reference held on the blkcg's css allowing
1104  * blkcg_css_free to eventually be called.
1105  *
1106  * This is the blkcg counterpart of ioc_release_fn().
1107  */
1108 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1109 {
1110 	spin_lock_irq(&blkcg->lock);
1111 
1112 	while (!hlist_empty(&blkcg->blkg_list)) {
1113 		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1114 						struct blkcg_gq, blkcg_node);
1115 		struct request_queue *q = blkg->q;
1116 
1117 		if (spin_trylock(&q->queue_lock)) {
1118 			blkg_destroy(blkg);
1119 			spin_unlock(&q->queue_lock);
1120 		} else {
1121 			spin_unlock_irq(&blkcg->lock);
1122 			cpu_relax();
1123 			spin_lock_irq(&blkcg->lock);
1124 		}
1125 	}
1126 
1127 	spin_unlock_irq(&blkcg->lock);
1128 }
1129 
1130 static void blkcg_css_free(struct cgroup_subsys_state *css)
1131 {
1132 	struct blkcg *blkcg = css_to_blkcg(css);
1133 	int i;
1134 
1135 	mutex_lock(&blkcg_pol_mutex);
1136 
1137 	list_del(&blkcg->all_blkcgs_node);
1138 
1139 	for (i = 0; i < BLKCG_MAX_POLS; i++)
1140 		if (blkcg->cpd[i])
1141 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1142 
1143 	mutex_unlock(&blkcg_pol_mutex);
1144 
1145 	kfree(blkcg);
1146 }
1147 
1148 static struct cgroup_subsys_state *
1149 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1150 {
1151 	struct blkcg *blkcg;
1152 	struct cgroup_subsys_state *ret;
1153 	int i;
1154 
1155 	mutex_lock(&blkcg_pol_mutex);
1156 
1157 	if (!parent_css) {
1158 		blkcg = &blkcg_root;
1159 	} else {
1160 		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1161 		if (!blkcg) {
1162 			ret = ERR_PTR(-ENOMEM);
1163 			goto unlock;
1164 		}
1165 	}
1166 
1167 	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1168 		struct blkcg_policy *pol = blkcg_policy[i];
1169 		struct blkcg_policy_data *cpd;
1170 
1171 		/*
1172 		 * If the policy hasn't been attached yet, wait for it
1173 		 * to be attached before doing anything else. Otherwise,
1174 		 * check if the policy requires any specific per-cgroup
1175 		 * data: if it does, allocate and initialize it.
1176 		 */
1177 		if (!pol || !pol->cpd_alloc_fn)
1178 			continue;
1179 
1180 		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1181 		if (!cpd) {
1182 			ret = ERR_PTR(-ENOMEM);
1183 			goto free_pd_blkcg;
1184 		}
1185 		blkcg->cpd[i] = cpd;
1186 		cpd->blkcg = blkcg;
1187 		cpd->plid = i;
1188 		if (pol->cpd_init_fn)
1189 			pol->cpd_init_fn(cpd);
1190 	}
1191 
1192 	spin_lock_init(&blkcg->lock);
1193 	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1194 	INIT_HLIST_HEAD(&blkcg->blkg_list);
1195 #ifdef CONFIG_CGROUP_WRITEBACK
1196 	INIT_LIST_HEAD(&blkcg->cgwb_list);
1197 	refcount_set(&blkcg->cgwb_refcnt, 1);
1198 #endif
1199 	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1200 
1201 	mutex_unlock(&blkcg_pol_mutex);
1202 	return &blkcg->css;
1203 
1204 free_pd_blkcg:
1205 	for (i--; i >= 0; i--)
1206 		if (blkcg->cpd[i])
1207 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1208 
1209 	if (blkcg != &blkcg_root)
1210 		kfree(blkcg);
1211 unlock:
1212 	mutex_unlock(&blkcg_pol_mutex);
1213 	return ret;
1214 }
1215 
1216 /**
1217  * blkcg_init_queue - initialize blkcg part of request queue
1218  * @q: request_queue to initialize
1219  *
1220  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1221  * part of new request_queue @q.
1222  *
1223  * RETURNS:
1224  * 0 on success, -errno on failure.
1225  */
1226 int blkcg_init_queue(struct request_queue *q)
1227 {
1228 	struct blkcg_gq *new_blkg, *blkg;
1229 	bool preloaded;
1230 	int ret;
1231 
1232 	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1233 	if (!new_blkg)
1234 		return -ENOMEM;
1235 
1236 	preloaded = !radix_tree_preload(GFP_KERNEL);
1237 
1238 	/* Make sure the root blkg exists. */
1239 	rcu_read_lock();
1240 	spin_lock_irq(&q->queue_lock);
1241 	blkg = blkg_create(&blkcg_root, q, new_blkg);
1242 	if (IS_ERR(blkg))
1243 		goto err_unlock;
1244 	q->root_blkg = blkg;
1245 	spin_unlock_irq(&q->queue_lock);
1246 	rcu_read_unlock();
1247 
1248 	if (preloaded)
1249 		radix_tree_preload_end();
1250 
1251 	ret = blk_iolatency_init(q);
1252 	if (ret)
1253 		goto err_destroy_all;
1254 
1255 	ret = blk_throtl_init(q);
1256 	if (ret)
1257 		goto err_destroy_all;
1258 	return 0;
1259 
1260 err_destroy_all:
1261 	blkg_destroy_all(q);
1262 	return ret;
1263 err_unlock:
1264 	spin_unlock_irq(&q->queue_lock);
1265 	rcu_read_unlock();
1266 	if (preloaded)
1267 		radix_tree_preload_end();
1268 	return PTR_ERR(blkg);
1269 }
1270 
1271 /**
1272  * blkcg_drain_queue - drain blkcg part of request_queue
1273  * @q: request_queue to drain
1274  *
1275  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1276  */
1277 void blkcg_drain_queue(struct request_queue *q)
1278 {
1279 	lockdep_assert_held(&q->queue_lock);
1280 
1281 	/*
1282 	 * @q could be exiting and already have destroyed all blkgs as
1283 	 * indicated by NULL root_blkg.  If so, don't confuse policies.
1284 	 */
1285 	if (!q->root_blkg)
1286 		return;
1287 
1288 	blk_throtl_drain(q);
1289 }
1290 
1291 /**
1292  * blkcg_exit_queue - exit and release blkcg part of request_queue
1293  * @q: request_queue being released
1294  *
1295  * Called from blk_release_queue().  Responsible for exiting blkcg part.
1296  */
1297 void blkcg_exit_queue(struct request_queue *q)
1298 {
1299 	blkg_destroy_all(q);
1300 	blk_throtl_exit(q);
1301 }
1302 
1303 /*
1304  * We cannot support shared io contexts, as we have no mean to support
1305  * two tasks with the same ioc in two different groups without major rework
1306  * of the main cic data structures.  For now we allow a task to change
1307  * its cgroup only if it's the only owner of its ioc.
1308  */
1309 static int blkcg_can_attach(struct cgroup_taskset *tset)
1310 {
1311 	struct task_struct *task;
1312 	struct cgroup_subsys_state *dst_css;
1313 	struct io_context *ioc;
1314 	int ret = 0;
1315 
1316 	/* task_lock() is needed to avoid races with exit_io_context() */
1317 	cgroup_taskset_for_each(task, dst_css, tset) {
1318 		task_lock(task);
1319 		ioc = task->io_context;
1320 		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1321 			ret = -EINVAL;
1322 		task_unlock(task);
1323 		if (ret)
1324 			break;
1325 	}
1326 	return ret;
1327 }
1328 
1329 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1330 {
1331 	int i;
1332 
1333 	mutex_lock(&blkcg_pol_mutex);
1334 
1335 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1336 		struct blkcg_policy *pol = blkcg_policy[i];
1337 		struct blkcg *blkcg;
1338 
1339 		if (!pol || !pol->cpd_bind_fn)
1340 			continue;
1341 
1342 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1343 			if (blkcg->cpd[pol->plid])
1344 				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1345 	}
1346 	mutex_unlock(&blkcg_pol_mutex);
1347 }
1348 
1349 static void blkcg_exit(struct task_struct *tsk)
1350 {
1351 	if (tsk->throttle_queue)
1352 		blk_put_queue(tsk->throttle_queue);
1353 	tsk->throttle_queue = NULL;
1354 }
1355 
1356 struct cgroup_subsys io_cgrp_subsys = {
1357 	.css_alloc = blkcg_css_alloc,
1358 	.css_offline = blkcg_css_offline,
1359 	.css_free = blkcg_css_free,
1360 	.can_attach = blkcg_can_attach,
1361 	.bind = blkcg_bind,
1362 	.dfl_cftypes = blkcg_files,
1363 	.legacy_cftypes = blkcg_legacy_files,
1364 	.legacy_name = "blkio",
1365 	.exit = blkcg_exit,
1366 #ifdef CONFIG_MEMCG
1367 	/*
1368 	 * This ensures that, if available, memcg is automatically enabled
1369 	 * together on the default hierarchy so that the owner cgroup can
1370 	 * be retrieved from writeback pages.
1371 	 */
1372 	.depends_on = 1 << memory_cgrp_id,
1373 #endif
1374 };
1375 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1376 
1377 /**
1378  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1379  * @q: request_queue of interest
1380  * @pol: blkcg policy to activate
1381  *
1382  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1383  * bypass mode to populate its blkgs with policy_data for @pol.
1384  *
1385  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1386  * from IO path.  Update of each blkg is protected by both queue and blkcg
1387  * locks so that holding either lock and testing blkcg_policy_enabled() is
1388  * always enough for dereferencing policy data.
1389  *
1390  * The caller is responsible for synchronizing [de]activations and policy
1391  * [un]registerations.  Returns 0 on success, -errno on failure.
1392  */
1393 int blkcg_activate_policy(struct request_queue *q,
1394 			  const struct blkcg_policy *pol)
1395 {
1396 	struct blkg_policy_data *pd_prealloc = NULL;
1397 	struct blkcg_gq *blkg;
1398 	int ret;
1399 
1400 	if (blkcg_policy_enabled(q, pol))
1401 		return 0;
1402 
1403 	if (queue_is_mq(q))
1404 		blk_mq_freeze_queue(q);
1405 pd_prealloc:
1406 	if (!pd_prealloc) {
1407 		pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1408 		if (!pd_prealloc) {
1409 			ret = -ENOMEM;
1410 			goto out_bypass_end;
1411 		}
1412 	}
1413 
1414 	spin_lock_irq(&q->queue_lock);
1415 
1416 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1417 		struct blkg_policy_data *pd;
1418 
1419 		if (blkg->pd[pol->plid])
1420 			continue;
1421 
1422 		pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1423 		if (!pd)
1424 			swap(pd, pd_prealloc);
1425 		if (!pd) {
1426 			spin_unlock_irq(&q->queue_lock);
1427 			goto pd_prealloc;
1428 		}
1429 
1430 		blkg->pd[pol->plid] = pd;
1431 		pd->blkg = blkg;
1432 		pd->plid = pol->plid;
1433 		if (pol->pd_init_fn)
1434 			pol->pd_init_fn(pd);
1435 	}
1436 
1437 	__set_bit(pol->plid, q->blkcg_pols);
1438 	ret = 0;
1439 
1440 	spin_unlock_irq(&q->queue_lock);
1441 out_bypass_end:
1442 	if (queue_is_mq(q))
1443 		blk_mq_unfreeze_queue(q);
1444 	if (pd_prealloc)
1445 		pol->pd_free_fn(pd_prealloc);
1446 	return ret;
1447 }
1448 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1449 
1450 /**
1451  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1452  * @q: request_queue of interest
1453  * @pol: blkcg policy to deactivate
1454  *
1455  * Deactivate @pol on @q.  Follows the same synchronization rules as
1456  * blkcg_activate_policy().
1457  */
1458 void blkcg_deactivate_policy(struct request_queue *q,
1459 			     const struct blkcg_policy *pol)
1460 {
1461 	struct blkcg_gq *blkg;
1462 
1463 	if (!blkcg_policy_enabled(q, pol))
1464 		return;
1465 
1466 	if (queue_is_mq(q))
1467 		blk_mq_freeze_queue(q);
1468 
1469 	spin_lock_irq(&q->queue_lock);
1470 
1471 	__clear_bit(pol->plid, q->blkcg_pols);
1472 
1473 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1474 		if (blkg->pd[pol->plid]) {
1475 			if (pol->pd_offline_fn)
1476 				pol->pd_offline_fn(blkg->pd[pol->plid]);
1477 			pol->pd_free_fn(blkg->pd[pol->plid]);
1478 			blkg->pd[pol->plid] = NULL;
1479 		}
1480 	}
1481 
1482 	spin_unlock_irq(&q->queue_lock);
1483 
1484 	if (queue_is_mq(q))
1485 		blk_mq_unfreeze_queue(q);
1486 }
1487 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1488 
1489 /**
1490  * blkcg_policy_register - register a blkcg policy
1491  * @pol: blkcg policy to register
1492  *
1493  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1494  * successful registration.  Returns 0 on success and -errno on failure.
1495  */
1496 int blkcg_policy_register(struct blkcg_policy *pol)
1497 {
1498 	struct blkcg *blkcg;
1499 	int i, ret;
1500 
1501 	mutex_lock(&blkcg_pol_register_mutex);
1502 	mutex_lock(&blkcg_pol_mutex);
1503 
1504 	/* find an empty slot */
1505 	ret = -ENOSPC;
1506 	for (i = 0; i < BLKCG_MAX_POLS; i++)
1507 		if (!blkcg_policy[i])
1508 			break;
1509 	if (i >= BLKCG_MAX_POLS) {
1510 		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1511 		goto err_unlock;
1512 	}
1513 
1514 	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1515 	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1516 		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1517 		goto err_unlock;
1518 
1519 	/* register @pol */
1520 	pol->plid = i;
1521 	blkcg_policy[pol->plid] = pol;
1522 
1523 	/* allocate and install cpd's */
1524 	if (pol->cpd_alloc_fn) {
1525 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1526 			struct blkcg_policy_data *cpd;
1527 
1528 			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1529 			if (!cpd)
1530 				goto err_free_cpds;
1531 
1532 			blkcg->cpd[pol->plid] = cpd;
1533 			cpd->blkcg = blkcg;
1534 			cpd->plid = pol->plid;
1535 			pol->cpd_init_fn(cpd);
1536 		}
1537 	}
1538 
1539 	mutex_unlock(&blkcg_pol_mutex);
1540 
1541 	/* everything is in place, add intf files for the new policy */
1542 	if (pol->dfl_cftypes)
1543 		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1544 					       pol->dfl_cftypes));
1545 	if (pol->legacy_cftypes)
1546 		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1547 						  pol->legacy_cftypes));
1548 	mutex_unlock(&blkcg_pol_register_mutex);
1549 	return 0;
1550 
1551 err_free_cpds:
1552 	if (pol->cpd_free_fn) {
1553 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1554 			if (blkcg->cpd[pol->plid]) {
1555 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1556 				blkcg->cpd[pol->plid] = NULL;
1557 			}
1558 		}
1559 	}
1560 	blkcg_policy[pol->plid] = NULL;
1561 err_unlock:
1562 	mutex_unlock(&blkcg_pol_mutex);
1563 	mutex_unlock(&blkcg_pol_register_mutex);
1564 	return ret;
1565 }
1566 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1567 
1568 /**
1569  * blkcg_policy_unregister - unregister a blkcg policy
1570  * @pol: blkcg policy to unregister
1571  *
1572  * Undo blkcg_policy_register(@pol).  Might sleep.
1573  */
1574 void blkcg_policy_unregister(struct blkcg_policy *pol)
1575 {
1576 	struct blkcg *blkcg;
1577 
1578 	mutex_lock(&blkcg_pol_register_mutex);
1579 
1580 	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1581 		goto out_unlock;
1582 
1583 	/* kill the intf files first */
1584 	if (pol->dfl_cftypes)
1585 		cgroup_rm_cftypes(pol->dfl_cftypes);
1586 	if (pol->legacy_cftypes)
1587 		cgroup_rm_cftypes(pol->legacy_cftypes);
1588 
1589 	/* remove cpds and unregister */
1590 	mutex_lock(&blkcg_pol_mutex);
1591 
1592 	if (pol->cpd_free_fn) {
1593 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1594 			if (blkcg->cpd[pol->plid]) {
1595 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1596 				blkcg->cpd[pol->plid] = NULL;
1597 			}
1598 		}
1599 	}
1600 	blkcg_policy[pol->plid] = NULL;
1601 
1602 	mutex_unlock(&blkcg_pol_mutex);
1603 out_unlock:
1604 	mutex_unlock(&blkcg_pol_register_mutex);
1605 }
1606 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1607 
1608 /*
1609  * Scale the accumulated delay based on how long it has been since we updated
1610  * the delay.  We only call this when we are adding delay, in case it's been a
1611  * while since we added delay, and when we are checking to see if we need to
1612  * delay a task, to account for any delays that may have occurred.
1613  */
1614 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1615 {
1616 	u64 old = atomic64_read(&blkg->delay_start);
1617 
1618 	/*
1619 	 * We only want to scale down every second.  The idea here is that we
1620 	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1621 	 * time window.  We only want to throttle tasks for recent delay that
1622 	 * has occurred, in 1 second time windows since that's the maximum
1623 	 * things can be throttled.  We save the current delay window in
1624 	 * blkg->last_delay so we know what amount is still left to be charged
1625 	 * to the blkg from this point onward.  blkg->last_use keeps track of
1626 	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
1627 	 * are ok with whatever is happening now, and we can take away more of
1628 	 * the accumulated delay as we've already throttled enough that
1629 	 * everybody is happy with their IO latencies.
1630 	 */
1631 	if (time_before64(old + NSEC_PER_SEC, now) &&
1632 	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1633 		u64 cur = atomic64_read(&blkg->delay_nsec);
1634 		u64 sub = min_t(u64, blkg->last_delay, now - old);
1635 		int cur_use = atomic_read(&blkg->use_delay);
1636 
1637 		/*
1638 		 * We've been unthrottled, subtract a larger chunk of our
1639 		 * accumulated delay.
1640 		 */
1641 		if (cur_use < blkg->last_use)
1642 			sub = max_t(u64, sub, blkg->last_delay >> 1);
1643 
1644 		/*
1645 		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
1646 		 * should only ever be growing except here where we subtract out
1647 		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1648 		 * rather not end up with negative numbers.
1649 		 */
1650 		if (unlikely(cur < sub)) {
1651 			atomic64_set(&blkg->delay_nsec, 0);
1652 			blkg->last_delay = 0;
1653 		} else {
1654 			atomic64_sub(sub, &blkg->delay_nsec);
1655 			blkg->last_delay = cur - sub;
1656 		}
1657 		blkg->last_use = cur_use;
1658 	}
1659 }
1660 
1661 /*
1662  * This is called when we want to actually walk up the hierarchy and check to
1663  * see if we need to throttle, and then actually throttle if there is some
1664  * accumulated delay.  This should only be called upon return to user space so
1665  * we're not holding some lock that would induce a priority inversion.
1666  */
1667 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1668 {
1669 	u64 now = ktime_to_ns(ktime_get());
1670 	u64 exp;
1671 	u64 delay_nsec = 0;
1672 	int tok;
1673 
1674 	while (blkg->parent) {
1675 		if (atomic_read(&blkg->use_delay)) {
1676 			blkcg_scale_delay(blkg, now);
1677 			delay_nsec = max_t(u64, delay_nsec,
1678 					   atomic64_read(&blkg->delay_nsec));
1679 		}
1680 		blkg = blkg->parent;
1681 	}
1682 
1683 	if (!delay_nsec)
1684 		return;
1685 
1686 	/*
1687 	 * Let's not sleep for all eternity if we've amassed a huge delay.
1688 	 * Swapping or metadata IO can accumulate 10's of seconds worth of
1689 	 * delay, and we want userspace to be able to do _something_ so cap the
1690 	 * delays at 1 second.  If there's 10's of seconds worth of delay then
1691 	 * the tasks will be delayed for 1 second for every syscall.
1692 	 */
1693 	delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1694 
1695 	/*
1696 	 * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
1697 	 * that hasn't landed upstream yet.  Once that stuff is in place we need
1698 	 * to do a psi_memstall_enter/leave if memdelay is set.
1699 	 */
1700 
1701 	exp = ktime_add_ns(now, delay_nsec);
1702 	tok = io_schedule_prepare();
1703 	do {
1704 		__set_current_state(TASK_KILLABLE);
1705 		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1706 			break;
1707 	} while (!fatal_signal_pending(current));
1708 	io_schedule_finish(tok);
1709 }
1710 
1711 /**
1712  * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1713  *
1714  * This is only called if we've been marked with set_notify_resume().  Obviously
1715  * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1716  * check to see if current->throttle_queue is set and if not this doesn't do
1717  * anything.  This should only ever be called by the resume code, it's not meant
1718  * to be called by people willy-nilly as it will actually do the work to
1719  * throttle the task if it is setup for throttling.
1720  */
1721 void blkcg_maybe_throttle_current(void)
1722 {
1723 	struct request_queue *q = current->throttle_queue;
1724 	struct cgroup_subsys_state *css;
1725 	struct blkcg *blkcg;
1726 	struct blkcg_gq *blkg;
1727 	bool use_memdelay = current->use_memdelay;
1728 
1729 	if (!q)
1730 		return;
1731 
1732 	current->throttle_queue = NULL;
1733 	current->use_memdelay = false;
1734 
1735 	rcu_read_lock();
1736 	css = kthread_blkcg();
1737 	if (css)
1738 		blkcg = css_to_blkcg(css);
1739 	else
1740 		blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1741 
1742 	if (!blkcg)
1743 		goto out;
1744 	blkg = blkg_lookup(blkcg, q);
1745 	if (!blkg)
1746 		goto out;
1747 	if (!blkg_tryget(blkg))
1748 		goto out;
1749 	rcu_read_unlock();
1750 
1751 	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1752 	blkg_put(blkg);
1753 	blk_put_queue(q);
1754 	return;
1755 out:
1756 	rcu_read_unlock();
1757 	blk_put_queue(q);
1758 }
1759 
1760 /**
1761  * blkcg_schedule_throttle - this task needs to check for throttling
1762  * @q - the request queue IO was submitted on
1763  * @use_memdelay - do we charge this to memory delay for PSI
1764  *
1765  * This is called by the IO controller when we know there's delay accumulated
1766  * for the blkg for this task.  We do not pass the blkg because there are places
1767  * we call this that may not have that information, the swapping code for
1768  * instance will only have a request_queue at that point.  This set's the
1769  * notify_resume for the task to check and see if it requires throttling before
1770  * returning to user space.
1771  *
1772  * We will only schedule once per syscall.  You can call this over and over
1773  * again and it will only do the check once upon return to user space, and only
1774  * throttle once.  If the task needs to be throttled again it'll need to be
1775  * re-set at the next time we see the task.
1776  */
1777 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1778 {
1779 	if (unlikely(current->flags & PF_KTHREAD))
1780 		return;
1781 
1782 	if (!blk_get_queue(q))
1783 		return;
1784 
1785 	if (current->throttle_queue)
1786 		blk_put_queue(current->throttle_queue);
1787 	current->throttle_queue = q;
1788 	if (use_memdelay)
1789 		current->use_memdelay = use_memdelay;
1790 	set_notify_resume(current);
1791 }
1792 
1793 /**
1794  * blkcg_add_delay - add delay to this blkg
1795  * @now - the current time in nanoseconds
1796  * @delta - how many nanoseconds of delay to add
1797  *
1798  * Charge @delta to the blkg's current delay accumulation.  This is used to
1799  * throttle tasks if an IO controller thinks we need more throttling.
1800  */
1801 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1802 {
1803 	blkcg_scale_delay(blkg, now);
1804 	atomic64_add(delta, &blkg->delay_nsec);
1805 }
1806 
1807 module_param(blkcg_debug_stats, bool, 0644);
1808 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1809