xref: /linux/block/blk-cgroup.c (revision 2dbc0838bcf24ca59cabc3130cf3b1d6809cdcd4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Block IO controller cgroup interface
4  *
5  * Based on ideas and code from CFQ, CFS and BFQ:
6  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7  *
8  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
9  *		      Paolo Valente <paolo.valente@unimore.it>
10  *
11  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
12  * 	              Nauman Rafique <nauman@google.com>
13  *
14  * For policy-specific per-blkcg data:
15  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
16  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
17  */
18 #include <linux/ioprio.h>
19 #include <linux/kdev_t.h>
20 #include <linux/module.h>
21 #include <linux/sched/signal.h>
22 #include <linux/err.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/slab.h>
26 #include <linux/genhd.h>
27 #include <linux/delay.h>
28 #include <linux/atomic.h>
29 #include <linux/ctype.h>
30 #include <linux/blk-cgroup.h>
31 #include <linux/tracehook.h>
32 #include "blk.h"
33 
34 #define MAX_KEY_LEN 100
35 
36 /*
37  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
38  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
39  * policy [un]register operations including cgroup file additions /
40  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
41  * allows grabbing it from cgroup callbacks.
42  */
43 static DEFINE_MUTEX(blkcg_pol_register_mutex);
44 static DEFINE_MUTEX(blkcg_pol_mutex);
45 
46 struct blkcg blkcg_root;
47 EXPORT_SYMBOL_GPL(blkcg_root);
48 
49 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
50 
51 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
52 
53 static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
54 
55 static bool blkcg_debug_stats = false;
56 
57 static bool blkcg_policy_enabled(struct request_queue *q,
58 				 const struct blkcg_policy *pol)
59 {
60 	return pol && test_bit(pol->plid, q->blkcg_pols);
61 }
62 
63 /**
64  * blkg_free - free a blkg
65  * @blkg: blkg to free
66  *
67  * Free @blkg which may be partially allocated.
68  */
69 static void blkg_free(struct blkcg_gq *blkg)
70 {
71 	int i;
72 
73 	if (!blkg)
74 		return;
75 
76 	for (i = 0; i < BLKCG_MAX_POLS; i++)
77 		if (blkg->pd[i])
78 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
79 
80 	blkg_rwstat_exit(&blkg->stat_ios);
81 	blkg_rwstat_exit(&blkg->stat_bytes);
82 	percpu_ref_exit(&blkg->refcnt);
83 	kfree(blkg);
84 }
85 
86 static void __blkg_release(struct rcu_head *rcu)
87 {
88 	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
89 
90 	/* release the blkcg and parent blkg refs this blkg has been holding */
91 	css_put(&blkg->blkcg->css);
92 	if (blkg->parent)
93 		blkg_put(blkg->parent);
94 
95 	wb_congested_put(blkg->wb_congested);
96 
97 	blkg_free(blkg);
98 }
99 
100 /*
101  * A group is RCU protected, but having an rcu lock does not mean that one
102  * can access all the fields of blkg and assume these are valid.  For
103  * example, don't try to follow throtl_data and request queue links.
104  *
105  * Having a reference to blkg under an rcu allows accesses to only values
106  * local to groups like group stats and group rate limits.
107  */
108 static void blkg_release(struct percpu_ref *ref)
109 {
110 	struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
111 
112 	call_rcu(&blkg->rcu_head, __blkg_release);
113 }
114 
115 /**
116  * blkg_alloc - allocate a blkg
117  * @blkcg: block cgroup the new blkg is associated with
118  * @q: request_queue the new blkg is associated with
119  * @gfp_mask: allocation mask to use
120  *
121  * Allocate a new blkg assocating @blkcg and @q.
122  */
123 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
124 				   gfp_t gfp_mask)
125 {
126 	struct blkcg_gq *blkg;
127 	int i;
128 
129 	/* alloc and init base part */
130 	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
131 	if (!blkg)
132 		return NULL;
133 
134 	if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
135 		goto err_free;
136 
137 	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
138 	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
139 		goto err_free;
140 
141 	blkg->q = q;
142 	INIT_LIST_HEAD(&blkg->q_node);
143 	blkg->blkcg = blkcg;
144 
145 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
146 		struct blkcg_policy *pol = blkcg_policy[i];
147 		struct blkg_policy_data *pd;
148 
149 		if (!blkcg_policy_enabled(q, pol))
150 			continue;
151 
152 		/* alloc per-policy data and attach it to blkg */
153 		pd = pol->pd_alloc_fn(gfp_mask, q->node);
154 		if (!pd)
155 			goto err_free;
156 
157 		blkg->pd[i] = pd;
158 		pd->blkg = blkg;
159 		pd->plid = i;
160 	}
161 
162 	return blkg;
163 
164 err_free:
165 	blkg_free(blkg);
166 	return NULL;
167 }
168 
169 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
170 				      struct request_queue *q, bool update_hint)
171 {
172 	struct blkcg_gq *blkg;
173 
174 	/*
175 	 * Hint didn't match.  Look up from the radix tree.  Note that the
176 	 * hint can only be updated under queue_lock as otherwise @blkg
177 	 * could have already been removed from blkg_tree.  The caller is
178 	 * responsible for grabbing queue_lock if @update_hint.
179 	 */
180 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
181 	if (blkg && blkg->q == q) {
182 		if (update_hint) {
183 			lockdep_assert_held(&q->queue_lock);
184 			rcu_assign_pointer(blkcg->blkg_hint, blkg);
185 		}
186 		return blkg;
187 	}
188 
189 	return NULL;
190 }
191 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
192 
193 /*
194  * If @new_blkg is %NULL, this function tries to allocate a new one as
195  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
196  */
197 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
198 				    struct request_queue *q,
199 				    struct blkcg_gq *new_blkg)
200 {
201 	struct blkcg_gq *blkg;
202 	struct bdi_writeback_congested *wb_congested;
203 	int i, ret;
204 
205 	WARN_ON_ONCE(!rcu_read_lock_held());
206 	lockdep_assert_held(&q->queue_lock);
207 
208 	/* request_queue is dying, do not create/recreate a blkg */
209 	if (blk_queue_dying(q)) {
210 		ret = -ENODEV;
211 		goto err_free_blkg;
212 	}
213 
214 	/* blkg holds a reference to blkcg */
215 	if (!css_tryget_online(&blkcg->css)) {
216 		ret = -ENODEV;
217 		goto err_free_blkg;
218 	}
219 
220 	wb_congested = wb_congested_get_create(q->backing_dev_info,
221 					       blkcg->css.id,
222 					       GFP_NOWAIT | __GFP_NOWARN);
223 	if (!wb_congested) {
224 		ret = -ENOMEM;
225 		goto err_put_css;
226 	}
227 
228 	/* allocate */
229 	if (!new_blkg) {
230 		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
231 		if (unlikely(!new_blkg)) {
232 			ret = -ENOMEM;
233 			goto err_put_congested;
234 		}
235 	}
236 	blkg = new_blkg;
237 	blkg->wb_congested = wb_congested;
238 
239 	/* link parent */
240 	if (blkcg_parent(blkcg)) {
241 		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
242 		if (WARN_ON_ONCE(!blkg->parent)) {
243 			ret = -ENODEV;
244 			goto err_put_congested;
245 		}
246 		blkg_get(blkg->parent);
247 	}
248 
249 	/* invoke per-policy init */
250 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
251 		struct blkcg_policy *pol = blkcg_policy[i];
252 
253 		if (blkg->pd[i] && pol->pd_init_fn)
254 			pol->pd_init_fn(blkg->pd[i]);
255 	}
256 
257 	/* insert */
258 	spin_lock(&blkcg->lock);
259 	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
260 	if (likely(!ret)) {
261 		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
262 		list_add(&blkg->q_node, &q->blkg_list);
263 
264 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
265 			struct blkcg_policy *pol = blkcg_policy[i];
266 
267 			if (blkg->pd[i] && pol->pd_online_fn)
268 				pol->pd_online_fn(blkg->pd[i]);
269 		}
270 	}
271 	blkg->online = true;
272 	spin_unlock(&blkcg->lock);
273 
274 	if (!ret)
275 		return blkg;
276 
277 	/* @blkg failed fully initialized, use the usual release path */
278 	blkg_put(blkg);
279 	return ERR_PTR(ret);
280 
281 err_put_congested:
282 	wb_congested_put(wb_congested);
283 err_put_css:
284 	css_put(&blkcg->css);
285 err_free_blkg:
286 	blkg_free(new_blkg);
287 	return ERR_PTR(ret);
288 }
289 
290 /**
291  * __blkg_lookup_create - lookup blkg, try to create one if not there
292  * @blkcg: blkcg of interest
293  * @q: request_queue of interest
294  *
295  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
296  * create one.  blkg creation is performed recursively from blkcg_root such
297  * that all non-root blkg's have access to the parent blkg.  This function
298  * should be called under RCU read lock and @q->queue_lock.
299  *
300  * Returns the blkg or the closest blkg if blkg_create() fails as it walks
301  * down from root.
302  */
303 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
304 				      struct request_queue *q)
305 {
306 	struct blkcg_gq *blkg;
307 
308 	WARN_ON_ONCE(!rcu_read_lock_held());
309 	lockdep_assert_held(&q->queue_lock);
310 
311 	blkg = __blkg_lookup(blkcg, q, true);
312 	if (blkg)
313 		return blkg;
314 
315 	/*
316 	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
317 	 * non-root blkgs have access to their parents.  Returns the closest
318 	 * blkg to the intended blkg should blkg_create() fail.
319 	 */
320 	while (true) {
321 		struct blkcg *pos = blkcg;
322 		struct blkcg *parent = blkcg_parent(blkcg);
323 		struct blkcg_gq *ret_blkg = q->root_blkg;
324 
325 		while (parent) {
326 			blkg = __blkg_lookup(parent, q, false);
327 			if (blkg) {
328 				/* remember closest blkg */
329 				ret_blkg = blkg;
330 				break;
331 			}
332 			pos = parent;
333 			parent = blkcg_parent(parent);
334 		}
335 
336 		blkg = blkg_create(pos, q, NULL);
337 		if (IS_ERR(blkg))
338 			return ret_blkg;
339 		if (pos == blkcg)
340 			return blkg;
341 	}
342 }
343 
344 /**
345  * blkg_lookup_create - find or create a blkg
346  * @blkcg: target block cgroup
347  * @q: target request_queue
348  *
349  * This looks up or creates the blkg representing the unique pair
350  * of the blkcg and the request_queue.
351  */
352 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
353 				    struct request_queue *q)
354 {
355 	struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
356 
357 	if (unlikely(!blkg)) {
358 		unsigned long flags;
359 
360 		spin_lock_irqsave(&q->queue_lock, flags);
361 		blkg = __blkg_lookup_create(blkcg, q);
362 		spin_unlock_irqrestore(&q->queue_lock, flags);
363 	}
364 
365 	return blkg;
366 }
367 
368 static void blkg_destroy(struct blkcg_gq *blkg)
369 {
370 	struct blkcg *blkcg = blkg->blkcg;
371 	struct blkcg_gq *parent = blkg->parent;
372 	int i;
373 
374 	lockdep_assert_held(&blkg->q->queue_lock);
375 	lockdep_assert_held(&blkcg->lock);
376 
377 	/* Something wrong if we are trying to remove same group twice */
378 	WARN_ON_ONCE(list_empty(&blkg->q_node));
379 	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
380 
381 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
382 		struct blkcg_policy *pol = blkcg_policy[i];
383 
384 		if (blkg->pd[i] && pol->pd_offline_fn)
385 			pol->pd_offline_fn(blkg->pd[i]);
386 	}
387 
388 	if (parent) {
389 		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
390 		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
391 	}
392 
393 	blkg->online = false;
394 
395 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
396 	list_del_init(&blkg->q_node);
397 	hlist_del_init_rcu(&blkg->blkcg_node);
398 
399 	/*
400 	 * Both setting lookup hint to and clearing it from @blkg are done
401 	 * under queue_lock.  If it's not pointing to @blkg now, it never
402 	 * will.  Hint assignment itself can race safely.
403 	 */
404 	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
405 		rcu_assign_pointer(blkcg->blkg_hint, NULL);
406 
407 	/*
408 	 * Put the reference taken at the time of creation so that when all
409 	 * queues are gone, group can be destroyed.
410 	 */
411 	percpu_ref_kill(&blkg->refcnt);
412 }
413 
414 /**
415  * blkg_destroy_all - destroy all blkgs associated with a request_queue
416  * @q: request_queue of interest
417  *
418  * Destroy all blkgs associated with @q.
419  */
420 static void blkg_destroy_all(struct request_queue *q)
421 {
422 	struct blkcg_gq *blkg, *n;
423 
424 	spin_lock_irq(&q->queue_lock);
425 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
426 		struct blkcg *blkcg = blkg->blkcg;
427 
428 		spin_lock(&blkcg->lock);
429 		blkg_destroy(blkg);
430 		spin_unlock(&blkcg->lock);
431 	}
432 
433 	q->root_blkg = NULL;
434 	spin_unlock_irq(&q->queue_lock);
435 }
436 
437 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
438 			     struct cftype *cftype, u64 val)
439 {
440 	struct blkcg *blkcg = css_to_blkcg(css);
441 	struct blkcg_gq *blkg;
442 	int i;
443 
444 	mutex_lock(&blkcg_pol_mutex);
445 	spin_lock_irq(&blkcg->lock);
446 
447 	/*
448 	 * Note that stat reset is racy - it doesn't synchronize against
449 	 * stat updates.  This is a debug feature which shouldn't exist
450 	 * anyway.  If you get hit by a race, retry.
451 	 */
452 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
453 		blkg_rwstat_reset(&blkg->stat_bytes);
454 		blkg_rwstat_reset(&blkg->stat_ios);
455 
456 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
457 			struct blkcg_policy *pol = blkcg_policy[i];
458 
459 			if (blkg->pd[i] && pol->pd_reset_stats_fn)
460 				pol->pd_reset_stats_fn(blkg->pd[i]);
461 		}
462 	}
463 
464 	spin_unlock_irq(&blkcg->lock);
465 	mutex_unlock(&blkcg_pol_mutex);
466 	return 0;
467 }
468 
469 const char *blkg_dev_name(struct blkcg_gq *blkg)
470 {
471 	/* some drivers (floppy) instantiate a queue w/o disk registered */
472 	if (blkg->q->backing_dev_info->dev)
473 		return dev_name(blkg->q->backing_dev_info->dev);
474 	return NULL;
475 }
476 
477 /**
478  * blkcg_print_blkgs - helper for printing per-blkg data
479  * @sf: seq_file to print to
480  * @blkcg: blkcg of interest
481  * @prfill: fill function to print out a blkg
482  * @pol: policy in question
483  * @data: data to be passed to @prfill
484  * @show_total: to print out sum of prfill return values or not
485  *
486  * This function invokes @prfill on each blkg of @blkcg if pd for the
487  * policy specified by @pol exists.  @prfill is invoked with @sf, the
488  * policy data and @data and the matching queue lock held.  If @show_total
489  * is %true, the sum of the return values from @prfill is printed with
490  * "Total" label at the end.
491  *
492  * This is to be used to construct print functions for
493  * cftype->read_seq_string method.
494  */
495 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
496 		       u64 (*prfill)(struct seq_file *,
497 				     struct blkg_policy_data *, int),
498 		       const struct blkcg_policy *pol, int data,
499 		       bool show_total)
500 {
501 	struct blkcg_gq *blkg;
502 	u64 total = 0;
503 
504 	rcu_read_lock();
505 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
506 		spin_lock_irq(&blkg->q->queue_lock);
507 		if (blkcg_policy_enabled(blkg->q, pol))
508 			total += prfill(sf, blkg->pd[pol->plid], data);
509 		spin_unlock_irq(&blkg->q->queue_lock);
510 	}
511 	rcu_read_unlock();
512 
513 	if (show_total)
514 		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
515 }
516 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
517 
518 /**
519  * __blkg_prfill_u64 - prfill helper for a single u64 value
520  * @sf: seq_file to print to
521  * @pd: policy private data of interest
522  * @v: value to print
523  *
524  * Print @v to @sf for the device assocaited with @pd.
525  */
526 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
527 {
528 	const char *dname = blkg_dev_name(pd->blkg);
529 
530 	if (!dname)
531 		return 0;
532 
533 	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
534 	return v;
535 }
536 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
537 
538 /**
539  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
540  * @sf: seq_file to print to
541  * @pd: policy private data of interest
542  * @rwstat: rwstat to print
543  *
544  * Print @rwstat to @sf for the device assocaited with @pd.
545  */
546 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
547 			 const struct blkg_rwstat_sample *rwstat)
548 {
549 	static const char *rwstr[] = {
550 		[BLKG_RWSTAT_READ]	= "Read",
551 		[BLKG_RWSTAT_WRITE]	= "Write",
552 		[BLKG_RWSTAT_SYNC]	= "Sync",
553 		[BLKG_RWSTAT_ASYNC]	= "Async",
554 		[BLKG_RWSTAT_DISCARD]	= "Discard",
555 	};
556 	const char *dname = blkg_dev_name(pd->blkg);
557 	u64 v;
558 	int i;
559 
560 	if (!dname)
561 		return 0;
562 
563 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
564 		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
565 			   rwstat->cnt[i]);
566 
567 	v = rwstat->cnt[BLKG_RWSTAT_READ] +
568 		rwstat->cnt[BLKG_RWSTAT_WRITE] +
569 		rwstat->cnt[BLKG_RWSTAT_DISCARD];
570 	seq_printf(sf, "%s Total %llu\n", dname, v);
571 	return v;
572 }
573 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
574 
575 /**
576  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
577  * @sf: seq_file to print to
578  * @pd: policy private data of interest
579  * @off: offset to the blkg_rwstat in @pd
580  *
581  * prfill callback for printing a blkg_rwstat.
582  */
583 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
584 		       int off)
585 {
586 	struct blkg_rwstat_sample rwstat = { };
587 
588 	blkg_rwstat_read((void *)pd + off, &rwstat);
589 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
590 }
591 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
592 
593 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
594 				    struct blkg_policy_data *pd, int off)
595 {
596 	struct blkg_rwstat_sample rwstat = { };
597 
598 	blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
599 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
600 }
601 
602 /**
603  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
604  * @sf: seq_file to print to
605  * @v: unused
606  *
607  * To be used as cftype->seq_show to print blkg->stat_bytes.
608  * cftype->private must be set to the blkcg_policy.
609  */
610 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
611 {
612 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
613 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
614 			  offsetof(struct blkcg_gq, stat_bytes), true);
615 	return 0;
616 }
617 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
618 
619 /**
620  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
621  * @sf: seq_file to print to
622  * @v: unused
623  *
624  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
625  * must be set to the blkcg_policy.
626  */
627 int blkg_print_stat_ios(struct seq_file *sf, void *v)
628 {
629 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
630 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
631 			  offsetof(struct blkcg_gq, stat_ios), true);
632 	return 0;
633 }
634 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
635 
636 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
637 					      struct blkg_policy_data *pd,
638 					      int off)
639 {
640 	struct blkg_rwstat_sample rwstat;
641 
642 	blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
643 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
644 }
645 
646 /**
647  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
648  * @sf: seq_file to print to
649  * @v: unused
650  */
651 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
652 {
653 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
654 			  blkg_prfill_rwstat_field_recursive,
655 			  (void *)seq_cft(sf)->private,
656 			  offsetof(struct blkcg_gq, stat_bytes), true);
657 	return 0;
658 }
659 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
660 
661 /**
662  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
663  * @sf: seq_file to print to
664  * @v: unused
665  */
666 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
667 {
668 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
669 			  blkg_prfill_rwstat_field_recursive,
670 			  (void *)seq_cft(sf)->private,
671 			  offsetof(struct blkcg_gq, stat_ios), true);
672 	return 0;
673 }
674 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
675 
676 /**
677  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
678  * @blkg: blkg of interest
679  * @pol: blkcg_policy which contains the blkg_rwstat
680  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
681  * @sum: blkg_rwstat_sample structure containing the results
682  *
683  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
684  * online descendants and their aux counts.  The caller must be holding the
685  * queue lock for online tests.
686  *
687  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
688  * is at @off bytes into @blkg's blkg_policy_data of the policy.
689  */
690 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
691 		int off, struct blkg_rwstat_sample *sum)
692 {
693 	struct blkcg_gq *pos_blkg;
694 	struct cgroup_subsys_state *pos_css;
695 	unsigned int i;
696 
697 	lockdep_assert_held(&blkg->q->queue_lock);
698 
699 	rcu_read_lock();
700 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
701 		struct blkg_rwstat *rwstat;
702 
703 		if (!pos_blkg->online)
704 			continue;
705 
706 		if (pol)
707 			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
708 		else
709 			rwstat = (void *)pos_blkg + off;
710 
711 		for (i = 0; i < BLKG_RWSTAT_NR; i++)
712 			sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
713 	}
714 	rcu_read_unlock();
715 }
716 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
717 
718 /* Performs queue bypass and policy enabled checks then looks up blkg. */
719 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
720 					  const struct blkcg_policy *pol,
721 					  struct request_queue *q)
722 {
723 	WARN_ON_ONCE(!rcu_read_lock_held());
724 	lockdep_assert_held(&q->queue_lock);
725 
726 	if (!blkcg_policy_enabled(q, pol))
727 		return ERR_PTR(-EOPNOTSUPP);
728 	return __blkg_lookup(blkcg, q, true /* update_hint */);
729 }
730 
731 /**
732  * blkg_conf_prep - parse and prepare for per-blkg config update
733  * @blkcg: target block cgroup
734  * @pol: target policy
735  * @input: input string
736  * @ctx: blkg_conf_ctx to be filled
737  *
738  * Parse per-blkg config update from @input and initialize @ctx with the
739  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
740  * part of @input following MAJ:MIN.  This function returns with RCU read
741  * lock and queue lock held and must be paired with blkg_conf_finish().
742  */
743 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
744 		   char *input, struct blkg_conf_ctx *ctx)
745 	__acquires(rcu) __acquires(&disk->queue->queue_lock)
746 {
747 	struct gendisk *disk;
748 	struct request_queue *q;
749 	struct blkcg_gq *blkg;
750 	unsigned int major, minor;
751 	int key_len, part, ret;
752 	char *body;
753 
754 	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
755 		return -EINVAL;
756 
757 	body = input + key_len;
758 	if (!isspace(*body))
759 		return -EINVAL;
760 	body = skip_spaces(body);
761 
762 	disk = get_gendisk(MKDEV(major, minor), &part);
763 	if (!disk)
764 		return -ENODEV;
765 	if (part) {
766 		ret = -ENODEV;
767 		goto fail;
768 	}
769 
770 	q = disk->queue;
771 
772 	rcu_read_lock();
773 	spin_lock_irq(&q->queue_lock);
774 
775 	blkg = blkg_lookup_check(blkcg, pol, q);
776 	if (IS_ERR(blkg)) {
777 		ret = PTR_ERR(blkg);
778 		goto fail_unlock;
779 	}
780 
781 	if (blkg)
782 		goto success;
783 
784 	/*
785 	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
786 	 * non-root blkgs have access to their parents.
787 	 */
788 	while (true) {
789 		struct blkcg *pos = blkcg;
790 		struct blkcg *parent;
791 		struct blkcg_gq *new_blkg;
792 
793 		parent = blkcg_parent(blkcg);
794 		while (parent && !__blkg_lookup(parent, q, false)) {
795 			pos = parent;
796 			parent = blkcg_parent(parent);
797 		}
798 
799 		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
800 		spin_unlock_irq(&q->queue_lock);
801 		rcu_read_unlock();
802 
803 		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
804 		if (unlikely(!new_blkg)) {
805 			ret = -ENOMEM;
806 			goto fail;
807 		}
808 
809 		rcu_read_lock();
810 		spin_lock_irq(&q->queue_lock);
811 
812 		blkg = blkg_lookup_check(pos, pol, q);
813 		if (IS_ERR(blkg)) {
814 			ret = PTR_ERR(blkg);
815 			goto fail_unlock;
816 		}
817 
818 		if (blkg) {
819 			blkg_free(new_blkg);
820 		} else {
821 			blkg = blkg_create(pos, q, new_blkg);
822 			if (IS_ERR(blkg)) {
823 				ret = PTR_ERR(blkg);
824 				goto fail_unlock;
825 			}
826 		}
827 
828 		if (pos == blkcg)
829 			goto success;
830 	}
831 success:
832 	ctx->disk = disk;
833 	ctx->blkg = blkg;
834 	ctx->body = body;
835 	return 0;
836 
837 fail_unlock:
838 	spin_unlock_irq(&q->queue_lock);
839 	rcu_read_unlock();
840 fail:
841 	put_disk_and_module(disk);
842 	/*
843 	 * If queue was bypassing, we should retry.  Do so after a
844 	 * short msleep().  It isn't strictly necessary but queue
845 	 * can be bypassing for some time and it's always nice to
846 	 * avoid busy looping.
847 	 */
848 	if (ret == -EBUSY) {
849 		msleep(10);
850 		ret = restart_syscall();
851 	}
852 	return ret;
853 }
854 
855 /**
856  * blkg_conf_finish - finish up per-blkg config update
857  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
858  *
859  * Finish up after per-blkg config update.  This function must be paired
860  * with blkg_conf_prep().
861  */
862 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
863 	__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
864 {
865 	spin_unlock_irq(&ctx->disk->queue->queue_lock);
866 	rcu_read_unlock();
867 	put_disk_and_module(ctx->disk);
868 }
869 
870 static int blkcg_print_stat(struct seq_file *sf, void *v)
871 {
872 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
873 	struct blkcg_gq *blkg;
874 
875 	rcu_read_lock();
876 
877 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
878 		const char *dname;
879 		char *buf;
880 		struct blkg_rwstat_sample rwstat;
881 		u64 rbytes, wbytes, rios, wios, dbytes, dios;
882 		size_t size = seq_get_buf(sf, &buf), off = 0;
883 		int i;
884 		bool has_stats = false;
885 
886 		dname = blkg_dev_name(blkg);
887 		if (!dname)
888 			continue;
889 
890 		/*
891 		 * Hooray string manipulation, count is the size written NOT
892 		 * INCLUDING THE \0, so size is now count+1 less than what we
893 		 * had before, but we want to start writing the next bit from
894 		 * the \0 so we only add count to buf.
895 		 */
896 		off += scnprintf(buf+off, size-off, "%s ", dname);
897 
898 		spin_lock_irq(&blkg->q->queue_lock);
899 
900 		blkg_rwstat_recursive_sum(blkg, NULL,
901 				offsetof(struct blkcg_gq, stat_bytes), &rwstat);
902 		rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
903 		wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
904 		dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
905 
906 		blkg_rwstat_recursive_sum(blkg, NULL,
907 					offsetof(struct blkcg_gq, stat_ios), &rwstat);
908 		rios = rwstat.cnt[BLKG_RWSTAT_READ];
909 		wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
910 		dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
911 
912 		spin_unlock_irq(&blkg->q->queue_lock);
913 
914 		if (rbytes || wbytes || rios || wios) {
915 			has_stats = true;
916 			off += scnprintf(buf+off, size-off,
917 					 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
918 					 rbytes, wbytes, rios, wios,
919 					 dbytes, dios);
920 		}
921 
922 		if (!blkcg_debug_stats)
923 			goto next;
924 
925 		if (atomic_read(&blkg->use_delay)) {
926 			has_stats = true;
927 			off += scnprintf(buf+off, size-off,
928 					 " use_delay=%d delay_nsec=%llu",
929 					 atomic_read(&blkg->use_delay),
930 					(unsigned long long)atomic64_read(&blkg->delay_nsec));
931 		}
932 
933 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
934 			struct blkcg_policy *pol = blkcg_policy[i];
935 			size_t written;
936 
937 			if (!blkg->pd[i] || !pol->pd_stat_fn)
938 				continue;
939 
940 			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
941 			if (written)
942 				has_stats = true;
943 			off += written;
944 		}
945 next:
946 		if (has_stats) {
947 			if (off < size - 1) {
948 				off += scnprintf(buf+off, size-off, "\n");
949 				seq_commit(sf, off);
950 			} else {
951 				seq_commit(sf, -1);
952 			}
953 		}
954 	}
955 
956 	rcu_read_unlock();
957 	return 0;
958 }
959 
960 static struct cftype blkcg_files[] = {
961 	{
962 		.name = "stat",
963 		.flags = CFTYPE_NOT_ON_ROOT,
964 		.seq_show = blkcg_print_stat,
965 	},
966 	{ }	/* terminate */
967 };
968 
969 static struct cftype blkcg_legacy_files[] = {
970 	{
971 		.name = "reset_stats",
972 		.write_u64 = blkcg_reset_stats,
973 	},
974 	{ }	/* terminate */
975 };
976 
977 /*
978  * blkcg destruction is a three-stage process.
979  *
980  * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
981  *    which offlines writeback.  Here we tie the next stage of blkg destruction
982  *    to the completion of writeback associated with the blkcg.  This lets us
983  *    avoid punting potentially large amounts of outstanding writeback to root
984  *    while maintaining any ongoing policies.  The next stage is triggered when
985  *    the nr_cgwbs count goes to zero.
986  *
987  * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
988  *    and handles the destruction of blkgs.  Here the css reference held by
989  *    the blkg is put back eventually allowing blkcg_css_free() to be called.
990  *    This work may occur in cgwb_release_workfn() on the cgwb_release
991  *    workqueue.  Any submitted ios that fail to get the blkg ref will be
992  *    punted to the root_blkg.
993  *
994  * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
995  *    This finally frees the blkcg.
996  */
997 
998 /**
999  * blkcg_css_offline - cgroup css_offline callback
1000  * @css: css of interest
1001  *
1002  * This function is called when @css is about to go away.  Here the cgwbs are
1003  * offlined first and only once writeback associated with the blkcg has
1004  * finished do we start step 2 (see above).
1005  */
1006 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1007 {
1008 	struct blkcg *blkcg = css_to_blkcg(css);
1009 
1010 	/* this prevents anyone from attaching or migrating to this blkcg */
1011 	wb_blkcg_offline(blkcg);
1012 
1013 	/* put the base cgwb reference allowing step 2 to be triggered */
1014 	blkcg_cgwb_put(blkcg);
1015 }
1016 
1017 /**
1018  * blkcg_destroy_blkgs - responsible for shooting down blkgs
1019  * @blkcg: blkcg of interest
1020  *
1021  * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1022  * is nested inside q lock, this function performs reverse double lock dancing.
1023  * Destroying the blkgs releases the reference held on the blkcg's css allowing
1024  * blkcg_css_free to eventually be called.
1025  *
1026  * This is the blkcg counterpart of ioc_release_fn().
1027  */
1028 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1029 {
1030 	spin_lock_irq(&blkcg->lock);
1031 
1032 	while (!hlist_empty(&blkcg->blkg_list)) {
1033 		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1034 						struct blkcg_gq, blkcg_node);
1035 		struct request_queue *q = blkg->q;
1036 
1037 		if (spin_trylock(&q->queue_lock)) {
1038 			blkg_destroy(blkg);
1039 			spin_unlock(&q->queue_lock);
1040 		} else {
1041 			spin_unlock_irq(&blkcg->lock);
1042 			cpu_relax();
1043 			spin_lock_irq(&blkcg->lock);
1044 		}
1045 	}
1046 
1047 	spin_unlock_irq(&blkcg->lock);
1048 }
1049 
1050 static void blkcg_css_free(struct cgroup_subsys_state *css)
1051 {
1052 	struct blkcg *blkcg = css_to_blkcg(css);
1053 	int i;
1054 
1055 	mutex_lock(&blkcg_pol_mutex);
1056 
1057 	list_del(&blkcg->all_blkcgs_node);
1058 
1059 	for (i = 0; i < BLKCG_MAX_POLS; i++)
1060 		if (blkcg->cpd[i])
1061 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1062 
1063 	mutex_unlock(&blkcg_pol_mutex);
1064 
1065 	kfree(blkcg);
1066 }
1067 
1068 static struct cgroup_subsys_state *
1069 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1070 {
1071 	struct blkcg *blkcg;
1072 	struct cgroup_subsys_state *ret;
1073 	int i;
1074 
1075 	mutex_lock(&blkcg_pol_mutex);
1076 
1077 	if (!parent_css) {
1078 		blkcg = &blkcg_root;
1079 	} else {
1080 		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1081 		if (!blkcg) {
1082 			ret = ERR_PTR(-ENOMEM);
1083 			goto unlock;
1084 		}
1085 	}
1086 
1087 	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1088 		struct blkcg_policy *pol = blkcg_policy[i];
1089 		struct blkcg_policy_data *cpd;
1090 
1091 		/*
1092 		 * If the policy hasn't been attached yet, wait for it
1093 		 * to be attached before doing anything else. Otherwise,
1094 		 * check if the policy requires any specific per-cgroup
1095 		 * data: if it does, allocate and initialize it.
1096 		 */
1097 		if (!pol || !pol->cpd_alloc_fn)
1098 			continue;
1099 
1100 		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1101 		if (!cpd) {
1102 			ret = ERR_PTR(-ENOMEM);
1103 			goto free_pd_blkcg;
1104 		}
1105 		blkcg->cpd[i] = cpd;
1106 		cpd->blkcg = blkcg;
1107 		cpd->plid = i;
1108 		if (pol->cpd_init_fn)
1109 			pol->cpd_init_fn(cpd);
1110 	}
1111 
1112 	spin_lock_init(&blkcg->lock);
1113 	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1114 	INIT_HLIST_HEAD(&blkcg->blkg_list);
1115 #ifdef CONFIG_CGROUP_WRITEBACK
1116 	INIT_LIST_HEAD(&blkcg->cgwb_list);
1117 	refcount_set(&blkcg->cgwb_refcnt, 1);
1118 #endif
1119 	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1120 
1121 	mutex_unlock(&blkcg_pol_mutex);
1122 	return &blkcg->css;
1123 
1124 free_pd_blkcg:
1125 	for (i--; i >= 0; i--)
1126 		if (blkcg->cpd[i])
1127 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1128 
1129 	if (blkcg != &blkcg_root)
1130 		kfree(blkcg);
1131 unlock:
1132 	mutex_unlock(&blkcg_pol_mutex);
1133 	return ret;
1134 }
1135 
1136 /**
1137  * blkcg_init_queue - initialize blkcg part of request queue
1138  * @q: request_queue to initialize
1139  *
1140  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1141  * part of new request_queue @q.
1142  *
1143  * RETURNS:
1144  * 0 on success, -errno on failure.
1145  */
1146 int blkcg_init_queue(struct request_queue *q)
1147 {
1148 	struct blkcg_gq *new_blkg, *blkg;
1149 	bool preloaded;
1150 	int ret;
1151 
1152 	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1153 	if (!new_blkg)
1154 		return -ENOMEM;
1155 
1156 	preloaded = !radix_tree_preload(GFP_KERNEL);
1157 
1158 	/* Make sure the root blkg exists. */
1159 	rcu_read_lock();
1160 	spin_lock_irq(&q->queue_lock);
1161 	blkg = blkg_create(&blkcg_root, q, new_blkg);
1162 	if (IS_ERR(blkg))
1163 		goto err_unlock;
1164 	q->root_blkg = blkg;
1165 	spin_unlock_irq(&q->queue_lock);
1166 	rcu_read_unlock();
1167 
1168 	if (preloaded)
1169 		radix_tree_preload_end();
1170 
1171 	ret = blk_iolatency_init(q);
1172 	if (ret)
1173 		goto err_destroy_all;
1174 
1175 	ret = blk_throtl_init(q);
1176 	if (ret)
1177 		goto err_destroy_all;
1178 	return 0;
1179 
1180 err_destroy_all:
1181 	blkg_destroy_all(q);
1182 	return ret;
1183 err_unlock:
1184 	spin_unlock_irq(&q->queue_lock);
1185 	rcu_read_unlock();
1186 	if (preloaded)
1187 		radix_tree_preload_end();
1188 	return PTR_ERR(blkg);
1189 }
1190 
1191 /**
1192  * blkcg_drain_queue - drain blkcg part of request_queue
1193  * @q: request_queue to drain
1194  *
1195  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1196  */
1197 void blkcg_drain_queue(struct request_queue *q)
1198 {
1199 	lockdep_assert_held(&q->queue_lock);
1200 
1201 	/*
1202 	 * @q could be exiting and already have destroyed all blkgs as
1203 	 * indicated by NULL root_blkg.  If so, don't confuse policies.
1204 	 */
1205 	if (!q->root_blkg)
1206 		return;
1207 
1208 	blk_throtl_drain(q);
1209 }
1210 
1211 /**
1212  * blkcg_exit_queue - exit and release blkcg part of request_queue
1213  * @q: request_queue being released
1214  *
1215  * Called from blk_exit_queue().  Responsible for exiting blkcg part.
1216  */
1217 void blkcg_exit_queue(struct request_queue *q)
1218 {
1219 	blkg_destroy_all(q);
1220 	blk_throtl_exit(q);
1221 }
1222 
1223 /*
1224  * We cannot support shared io contexts, as we have no mean to support
1225  * two tasks with the same ioc in two different groups without major rework
1226  * of the main cic data structures.  For now we allow a task to change
1227  * its cgroup only if it's the only owner of its ioc.
1228  */
1229 static int blkcg_can_attach(struct cgroup_taskset *tset)
1230 {
1231 	struct task_struct *task;
1232 	struct cgroup_subsys_state *dst_css;
1233 	struct io_context *ioc;
1234 	int ret = 0;
1235 
1236 	/* task_lock() is needed to avoid races with exit_io_context() */
1237 	cgroup_taskset_for_each(task, dst_css, tset) {
1238 		task_lock(task);
1239 		ioc = task->io_context;
1240 		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1241 			ret = -EINVAL;
1242 		task_unlock(task);
1243 		if (ret)
1244 			break;
1245 	}
1246 	return ret;
1247 }
1248 
1249 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1250 {
1251 	int i;
1252 
1253 	mutex_lock(&blkcg_pol_mutex);
1254 
1255 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1256 		struct blkcg_policy *pol = blkcg_policy[i];
1257 		struct blkcg *blkcg;
1258 
1259 		if (!pol || !pol->cpd_bind_fn)
1260 			continue;
1261 
1262 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1263 			if (blkcg->cpd[pol->plid])
1264 				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1265 	}
1266 	mutex_unlock(&blkcg_pol_mutex);
1267 }
1268 
1269 static void blkcg_exit(struct task_struct *tsk)
1270 {
1271 	if (tsk->throttle_queue)
1272 		blk_put_queue(tsk->throttle_queue);
1273 	tsk->throttle_queue = NULL;
1274 }
1275 
1276 struct cgroup_subsys io_cgrp_subsys = {
1277 	.css_alloc = blkcg_css_alloc,
1278 	.css_offline = blkcg_css_offline,
1279 	.css_free = blkcg_css_free,
1280 	.can_attach = blkcg_can_attach,
1281 	.bind = blkcg_bind,
1282 	.dfl_cftypes = blkcg_files,
1283 	.legacy_cftypes = blkcg_legacy_files,
1284 	.legacy_name = "blkio",
1285 	.exit = blkcg_exit,
1286 #ifdef CONFIG_MEMCG
1287 	/*
1288 	 * This ensures that, if available, memcg is automatically enabled
1289 	 * together on the default hierarchy so that the owner cgroup can
1290 	 * be retrieved from writeback pages.
1291 	 */
1292 	.depends_on = 1 << memory_cgrp_id,
1293 #endif
1294 };
1295 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1296 
1297 /**
1298  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1299  * @q: request_queue of interest
1300  * @pol: blkcg policy to activate
1301  *
1302  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1303  * bypass mode to populate its blkgs with policy_data for @pol.
1304  *
1305  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1306  * from IO path.  Update of each blkg is protected by both queue and blkcg
1307  * locks so that holding either lock and testing blkcg_policy_enabled() is
1308  * always enough for dereferencing policy data.
1309  *
1310  * The caller is responsible for synchronizing [de]activations and policy
1311  * [un]registerations.  Returns 0 on success, -errno on failure.
1312  */
1313 int blkcg_activate_policy(struct request_queue *q,
1314 			  const struct blkcg_policy *pol)
1315 {
1316 	struct blkg_policy_data *pd_prealloc = NULL;
1317 	struct blkcg_gq *blkg;
1318 	int ret;
1319 
1320 	if (blkcg_policy_enabled(q, pol))
1321 		return 0;
1322 
1323 	if (queue_is_mq(q))
1324 		blk_mq_freeze_queue(q);
1325 pd_prealloc:
1326 	if (!pd_prealloc) {
1327 		pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1328 		if (!pd_prealloc) {
1329 			ret = -ENOMEM;
1330 			goto out_bypass_end;
1331 		}
1332 	}
1333 
1334 	spin_lock_irq(&q->queue_lock);
1335 
1336 	/* blkg_list is pushed at the head, reverse walk to init parents first */
1337 	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
1338 		struct blkg_policy_data *pd;
1339 
1340 		if (blkg->pd[pol->plid])
1341 			continue;
1342 
1343 		pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1344 		if (!pd)
1345 			swap(pd, pd_prealloc);
1346 		if (!pd) {
1347 			spin_unlock_irq(&q->queue_lock);
1348 			goto pd_prealloc;
1349 		}
1350 
1351 		blkg->pd[pol->plid] = pd;
1352 		pd->blkg = blkg;
1353 		pd->plid = pol->plid;
1354 		if (pol->pd_init_fn)
1355 			pol->pd_init_fn(pd);
1356 	}
1357 
1358 	__set_bit(pol->plid, q->blkcg_pols);
1359 	ret = 0;
1360 
1361 	spin_unlock_irq(&q->queue_lock);
1362 out_bypass_end:
1363 	if (queue_is_mq(q))
1364 		blk_mq_unfreeze_queue(q);
1365 	if (pd_prealloc)
1366 		pol->pd_free_fn(pd_prealloc);
1367 	return ret;
1368 }
1369 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1370 
1371 /**
1372  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1373  * @q: request_queue of interest
1374  * @pol: blkcg policy to deactivate
1375  *
1376  * Deactivate @pol on @q.  Follows the same synchronization rules as
1377  * blkcg_activate_policy().
1378  */
1379 void blkcg_deactivate_policy(struct request_queue *q,
1380 			     const struct blkcg_policy *pol)
1381 {
1382 	struct blkcg_gq *blkg;
1383 
1384 	if (!blkcg_policy_enabled(q, pol))
1385 		return;
1386 
1387 	if (queue_is_mq(q))
1388 		blk_mq_freeze_queue(q);
1389 
1390 	spin_lock_irq(&q->queue_lock);
1391 
1392 	__clear_bit(pol->plid, q->blkcg_pols);
1393 
1394 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1395 		if (blkg->pd[pol->plid]) {
1396 			if (pol->pd_offline_fn)
1397 				pol->pd_offline_fn(blkg->pd[pol->plid]);
1398 			pol->pd_free_fn(blkg->pd[pol->plid]);
1399 			blkg->pd[pol->plid] = NULL;
1400 		}
1401 	}
1402 
1403 	spin_unlock_irq(&q->queue_lock);
1404 
1405 	if (queue_is_mq(q))
1406 		blk_mq_unfreeze_queue(q);
1407 }
1408 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1409 
1410 /**
1411  * blkcg_policy_register - register a blkcg policy
1412  * @pol: blkcg policy to register
1413  *
1414  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1415  * successful registration.  Returns 0 on success and -errno on failure.
1416  */
1417 int blkcg_policy_register(struct blkcg_policy *pol)
1418 {
1419 	struct blkcg *blkcg;
1420 	int i, ret;
1421 
1422 	mutex_lock(&blkcg_pol_register_mutex);
1423 	mutex_lock(&blkcg_pol_mutex);
1424 
1425 	/* find an empty slot */
1426 	ret = -ENOSPC;
1427 	for (i = 0; i < BLKCG_MAX_POLS; i++)
1428 		if (!blkcg_policy[i])
1429 			break;
1430 	if (i >= BLKCG_MAX_POLS) {
1431 		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1432 		goto err_unlock;
1433 	}
1434 
1435 	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1436 	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1437 		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1438 		goto err_unlock;
1439 
1440 	/* register @pol */
1441 	pol->plid = i;
1442 	blkcg_policy[pol->plid] = pol;
1443 
1444 	/* allocate and install cpd's */
1445 	if (pol->cpd_alloc_fn) {
1446 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1447 			struct blkcg_policy_data *cpd;
1448 
1449 			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1450 			if (!cpd)
1451 				goto err_free_cpds;
1452 
1453 			blkcg->cpd[pol->plid] = cpd;
1454 			cpd->blkcg = blkcg;
1455 			cpd->plid = pol->plid;
1456 			pol->cpd_init_fn(cpd);
1457 		}
1458 	}
1459 
1460 	mutex_unlock(&blkcg_pol_mutex);
1461 
1462 	/* everything is in place, add intf files for the new policy */
1463 	if (pol->dfl_cftypes)
1464 		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1465 					       pol->dfl_cftypes));
1466 	if (pol->legacy_cftypes)
1467 		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1468 						  pol->legacy_cftypes));
1469 	mutex_unlock(&blkcg_pol_register_mutex);
1470 	return 0;
1471 
1472 err_free_cpds:
1473 	if (pol->cpd_free_fn) {
1474 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1475 			if (blkcg->cpd[pol->plid]) {
1476 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1477 				blkcg->cpd[pol->plid] = NULL;
1478 			}
1479 		}
1480 	}
1481 	blkcg_policy[pol->plid] = NULL;
1482 err_unlock:
1483 	mutex_unlock(&blkcg_pol_mutex);
1484 	mutex_unlock(&blkcg_pol_register_mutex);
1485 	return ret;
1486 }
1487 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1488 
1489 /**
1490  * blkcg_policy_unregister - unregister a blkcg policy
1491  * @pol: blkcg policy to unregister
1492  *
1493  * Undo blkcg_policy_register(@pol).  Might sleep.
1494  */
1495 void blkcg_policy_unregister(struct blkcg_policy *pol)
1496 {
1497 	struct blkcg *blkcg;
1498 
1499 	mutex_lock(&blkcg_pol_register_mutex);
1500 
1501 	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1502 		goto out_unlock;
1503 
1504 	/* kill the intf files first */
1505 	if (pol->dfl_cftypes)
1506 		cgroup_rm_cftypes(pol->dfl_cftypes);
1507 	if (pol->legacy_cftypes)
1508 		cgroup_rm_cftypes(pol->legacy_cftypes);
1509 
1510 	/* remove cpds and unregister */
1511 	mutex_lock(&blkcg_pol_mutex);
1512 
1513 	if (pol->cpd_free_fn) {
1514 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1515 			if (blkcg->cpd[pol->plid]) {
1516 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1517 				blkcg->cpd[pol->plid] = NULL;
1518 			}
1519 		}
1520 	}
1521 	blkcg_policy[pol->plid] = NULL;
1522 
1523 	mutex_unlock(&blkcg_pol_mutex);
1524 out_unlock:
1525 	mutex_unlock(&blkcg_pol_register_mutex);
1526 }
1527 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1528 
1529 /*
1530  * Scale the accumulated delay based on how long it has been since we updated
1531  * the delay.  We only call this when we are adding delay, in case it's been a
1532  * while since we added delay, and when we are checking to see if we need to
1533  * delay a task, to account for any delays that may have occurred.
1534  */
1535 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1536 {
1537 	u64 old = atomic64_read(&blkg->delay_start);
1538 
1539 	/*
1540 	 * We only want to scale down every second.  The idea here is that we
1541 	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1542 	 * time window.  We only want to throttle tasks for recent delay that
1543 	 * has occurred, in 1 second time windows since that's the maximum
1544 	 * things can be throttled.  We save the current delay window in
1545 	 * blkg->last_delay so we know what amount is still left to be charged
1546 	 * to the blkg from this point onward.  blkg->last_use keeps track of
1547 	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
1548 	 * are ok with whatever is happening now, and we can take away more of
1549 	 * the accumulated delay as we've already throttled enough that
1550 	 * everybody is happy with their IO latencies.
1551 	 */
1552 	if (time_before64(old + NSEC_PER_SEC, now) &&
1553 	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1554 		u64 cur = atomic64_read(&blkg->delay_nsec);
1555 		u64 sub = min_t(u64, blkg->last_delay, now - old);
1556 		int cur_use = atomic_read(&blkg->use_delay);
1557 
1558 		/*
1559 		 * We've been unthrottled, subtract a larger chunk of our
1560 		 * accumulated delay.
1561 		 */
1562 		if (cur_use < blkg->last_use)
1563 			sub = max_t(u64, sub, blkg->last_delay >> 1);
1564 
1565 		/*
1566 		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
1567 		 * should only ever be growing except here where we subtract out
1568 		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1569 		 * rather not end up with negative numbers.
1570 		 */
1571 		if (unlikely(cur < sub)) {
1572 			atomic64_set(&blkg->delay_nsec, 0);
1573 			blkg->last_delay = 0;
1574 		} else {
1575 			atomic64_sub(sub, &blkg->delay_nsec);
1576 			blkg->last_delay = cur - sub;
1577 		}
1578 		blkg->last_use = cur_use;
1579 	}
1580 }
1581 
1582 /*
1583  * This is called when we want to actually walk up the hierarchy and check to
1584  * see if we need to throttle, and then actually throttle if there is some
1585  * accumulated delay.  This should only be called upon return to user space so
1586  * we're not holding some lock that would induce a priority inversion.
1587  */
1588 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1589 {
1590 	u64 now = ktime_to_ns(ktime_get());
1591 	u64 exp;
1592 	u64 delay_nsec = 0;
1593 	int tok;
1594 
1595 	while (blkg->parent) {
1596 		if (atomic_read(&blkg->use_delay)) {
1597 			blkcg_scale_delay(blkg, now);
1598 			delay_nsec = max_t(u64, delay_nsec,
1599 					   atomic64_read(&blkg->delay_nsec));
1600 		}
1601 		blkg = blkg->parent;
1602 	}
1603 
1604 	if (!delay_nsec)
1605 		return;
1606 
1607 	/*
1608 	 * Let's not sleep for all eternity if we've amassed a huge delay.
1609 	 * Swapping or metadata IO can accumulate 10's of seconds worth of
1610 	 * delay, and we want userspace to be able to do _something_ so cap the
1611 	 * delays at 1 second.  If there's 10's of seconds worth of delay then
1612 	 * the tasks will be delayed for 1 second for every syscall.
1613 	 */
1614 	delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1615 
1616 	/*
1617 	 * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
1618 	 * that hasn't landed upstream yet.  Once that stuff is in place we need
1619 	 * to do a psi_memstall_enter/leave if memdelay is set.
1620 	 */
1621 
1622 	exp = ktime_add_ns(now, delay_nsec);
1623 	tok = io_schedule_prepare();
1624 	do {
1625 		__set_current_state(TASK_KILLABLE);
1626 		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1627 			break;
1628 	} while (!fatal_signal_pending(current));
1629 	io_schedule_finish(tok);
1630 }
1631 
1632 /**
1633  * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1634  *
1635  * This is only called if we've been marked with set_notify_resume().  Obviously
1636  * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1637  * check to see if current->throttle_queue is set and if not this doesn't do
1638  * anything.  This should only ever be called by the resume code, it's not meant
1639  * to be called by people willy-nilly as it will actually do the work to
1640  * throttle the task if it is setup for throttling.
1641  */
1642 void blkcg_maybe_throttle_current(void)
1643 {
1644 	struct request_queue *q = current->throttle_queue;
1645 	struct cgroup_subsys_state *css;
1646 	struct blkcg *blkcg;
1647 	struct blkcg_gq *blkg;
1648 	bool use_memdelay = current->use_memdelay;
1649 
1650 	if (!q)
1651 		return;
1652 
1653 	current->throttle_queue = NULL;
1654 	current->use_memdelay = false;
1655 
1656 	rcu_read_lock();
1657 	css = kthread_blkcg();
1658 	if (css)
1659 		blkcg = css_to_blkcg(css);
1660 	else
1661 		blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1662 
1663 	if (!blkcg)
1664 		goto out;
1665 	blkg = blkg_lookup(blkcg, q);
1666 	if (!blkg)
1667 		goto out;
1668 	if (!blkg_tryget(blkg))
1669 		goto out;
1670 	rcu_read_unlock();
1671 
1672 	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1673 	blkg_put(blkg);
1674 	blk_put_queue(q);
1675 	return;
1676 out:
1677 	rcu_read_unlock();
1678 	blk_put_queue(q);
1679 }
1680 
1681 /**
1682  * blkcg_schedule_throttle - this task needs to check for throttling
1683  * @q: the request queue IO was submitted on
1684  * @use_memdelay: do we charge this to memory delay for PSI
1685  *
1686  * This is called by the IO controller when we know there's delay accumulated
1687  * for the blkg for this task.  We do not pass the blkg because there are places
1688  * we call this that may not have that information, the swapping code for
1689  * instance will only have a request_queue at that point.  This set's the
1690  * notify_resume for the task to check and see if it requires throttling before
1691  * returning to user space.
1692  *
1693  * We will only schedule once per syscall.  You can call this over and over
1694  * again and it will only do the check once upon return to user space, and only
1695  * throttle once.  If the task needs to be throttled again it'll need to be
1696  * re-set at the next time we see the task.
1697  */
1698 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1699 {
1700 	if (unlikely(current->flags & PF_KTHREAD))
1701 		return;
1702 
1703 	if (!blk_get_queue(q))
1704 		return;
1705 
1706 	if (current->throttle_queue)
1707 		blk_put_queue(current->throttle_queue);
1708 	current->throttle_queue = q;
1709 	if (use_memdelay)
1710 		current->use_memdelay = use_memdelay;
1711 	set_notify_resume(current);
1712 }
1713 
1714 /**
1715  * blkcg_add_delay - add delay to this blkg
1716  * @blkg: blkg of interest
1717  * @now: the current time in nanoseconds
1718  * @delta: how many nanoseconds of delay to add
1719  *
1720  * Charge @delta to the blkg's current delay accumulation.  This is used to
1721  * throttle tasks if an IO controller thinks we need more throttling.
1722  */
1723 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1724 {
1725 	blkcg_scale_delay(blkg, now);
1726 	atomic64_add(delta, &blkg->delay_nsec);
1727 }
1728 
1729 module_param(blkcg_debug_stats, bool, 0644);
1730 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1731