xref: /linux/block/blk-cgroup.c (revision 80d443e8876602be2c130f79c4de81e12e2a700d)
1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  * 	              Nauman Rafique <nauman@google.com>
12  *
13  * For policy-specific per-blkcg data:
14  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
16  */
17 #include <linux/ioprio.h>
18 #include <linux/kdev_t.h>
19 #include <linux/module.h>
20 #include <linux/err.h>
21 #include <linux/blkdev.h>
22 #include <linux/backing-dev.h>
23 #include <linux/slab.h>
24 #include <linux/genhd.h>
25 #include <linux/delay.h>
26 #include <linux/atomic.h>
27 #include <linux/ctype.h>
28 #include <linux/blk-cgroup.h>
29 #include "blk.h"
30 
31 #define MAX_KEY_LEN 100
32 
33 /*
34  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
35  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
36  * policy [un]register operations including cgroup file additions /
37  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
38  * allows grabbing it from cgroup callbacks.
39  */
40 static DEFINE_MUTEX(blkcg_pol_register_mutex);
41 static DEFINE_MUTEX(blkcg_pol_mutex);
42 
43 struct blkcg blkcg_root;
44 EXPORT_SYMBOL_GPL(blkcg_root);
45 
46 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
47 
48 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
49 
50 static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
51 
52 static bool blkcg_policy_enabled(struct request_queue *q,
53 				 const struct blkcg_policy *pol)
54 {
55 	return pol && test_bit(pol->plid, q->blkcg_pols);
56 }
57 
58 /**
59  * blkg_free - free a blkg
60  * @blkg: blkg to free
61  *
62  * Free @blkg which may be partially allocated.
63  */
64 static void blkg_free(struct blkcg_gq *blkg)
65 {
66 	int i;
67 
68 	if (!blkg)
69 		return;
70 
71 	for (i = 0; i < BLKCG_MAX_POLS; i++)
72 		if (blkg->pd[i])
73 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
74 
75 	if (blkg->blkcg != &blkcg_root)
76 		blk_exit_rl(&blkg->rl);
77 
78 	blkg_rwstat_exit(&blkg->stat_ios);
79 	blkg_rwstat_exit(&blkg->stat_bytes);
80 	kfree(blkg);
81 }
82 
83 /**
84  * blkg_alloc - allocate a blkg
85  * @blkcg: block cgroup the new blkg is associated with
86  * @q: request_queue the new blkg is associated with
87  * @gfp_mask: allocation mask to use
88  *
89  * Allocate a new blkg assocating @blkcg and @q.
90  */
91 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
92 				   gfp_t gfp_mask)
93 {
94 	struct blkcg_gq *blkg;
95 	int i;
96 
97 	/* alloc and init base part */
98 	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
99 	if (!blkg)
100 		return NULL;
101 
102 	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
103 	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
104 		goto err_free;
105 
106 	blkg->q = q;
107 	INIT_LIST_HEAD(&blkg->q_node);
108 	blkg->blkcg = blkcg;
109 	atomic_set(&blkg->refcnt, 1);
110 
111 	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
112 	if (blkcg != &blkcg_root) {
113 		if (blk_init_rl(&blkg->rl, q, gfp_mask))
114 			goto err_free;
115 		blkg->rl.blkg = blkg;
116 	}
117 
118 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
119 		struct blkcg_policy *pol = blkcg_policy[i];
120 		struct blkg_policy_data *pd;
121 
122 		if (!blkcg_policy_enabled(q, pol))
123 			continue;
124 
125 		/* alloc per-policy data and attach it to blkg */
126 		pd = pol->pd_alloc_fn(gfp_mask, q->node);
127 		if (!pd)
128 			goto err_free;
129 
130 		blkg->pd[i] = pd;
131 		pd->blkg = blkg;
132 		pd->plid = i;
133 	}
134 
135 	return blkg;
136 
137 err_free:
138 	blkg_free(blkg);
139 	return NULL;
140 }
141 
142 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
143 				      struct request_queue *q, bool update_hint)
144 {
145 	struct blkcg_gq *blkg;
146 
147 	/*
148 	 * Hint didn't match.  Look up from the radix tree.  Note that the
149 	 * hint can only be updated under queue_lock as otherwise @blkg
150 	 * could have already been removed from blkg_tree.  The caller is
151 	 * responsible for grabbing queue_lock if @update_hint.
152 	 */
153 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
154 	if (blkg && blkg->q == q) {
155 		if (update_hint) {
156 			lockdep_assert_held(q->queue_lock);
157 			rcu_assign_pointer(blkcg->blkg_hint, blkg);
158 		}
159 		return blkg;
160 	}
161 
162 	return NULL;
163 }
164 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
165 
166 /*
167  * If @new_blkg is %NULL, this function tries to allocate a new one as
168  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
169  */
170 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
171 				    struct request_queue *q,
172 				    struct blkcg_gq *new_blkg)
173 {
174 	struct blkcg_gq *blkg;
175 	struct bdi_writeback_congested *wb_congested;
176 	int i, ret;
177 
178 	WARN_ON_ONCE(!rcu_read_lock_held());
179 	lockdep_assert_held(q->queue_lock);
180 
181 	/* blkg holds a reference to blkcg */
182 	if (!css_tryget_online(&blkcg->css)) {
183 		ret = -ENODEV;
184 		goto err_free_blkg;
185 	}
186 
187 	wb_congested = wb_congested_get_create(&q->backing_dev_info,
188 					       blkcg->css.id,
189 					       GFP_NOWAIT | __GFP_NOWARN);
190 	if (!wb_congested) {
191 		ret = -ENOMEM;
192 		goto err_put_css;
193 	}
194 
195 	/* allocate */
196 	if (!new_blkg) {
197 		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
198 		if (unlikely(!new_blkg)) {
199 			ret = -ENOMEM;
200 			goto err_put_congested;
201 		}
202 	}
203 	blkg = new_blkg;
204 	blkg->wb_congested = wb_congested;
205 
206 	/* link parent */
207 	if (blkcg_parent(blkcg)) {
208 		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
209 		if (WARN_ON_ONCE(!blkg->parent)) {
210 			ret = -ENODEV;
211 			goto err_put_congested;
212 		}
213 		blkg_get(blkg->parent);
214 	}
215 
216 	/* invoke per-policy init */
217 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
218 		struct blkcg_policy *pol = blkcg_policy[i];
219 
220 		if (blkg->pd[i] && pol->pd_init_fn)
221 			pol->pd_init_fn(blkg->pd[i]);
222 	}
223 
224 	/* insert */
225 	spin_lock(&blkcg->lock);
226 	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
227 	if (likely(!ret)) {
228 		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
229 		list_add(&blkg->q_node, &q->blkg_list);
230 
231 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
232 			struct blkcg_policy *pol = blkcg_policy[i];
233 
234 			if (blkg->pd[i] && pol->pd_online_fn)
235 				pol->pd_online_fn(blkg->pd[i]);
236 		}
237 	}
238 	blkg->online = true;
239 	spin_unlock(&blkcg->lock);
240 
241 	if (!ret)
242 		return blkg;
243 
244 	/* @blkg failed fully initialized, use the usual release path */
245 	blkg_put(blkg);
246 	return ERR_PTR(ret);
247 
248 err_put_congested:
249 	wb_congested_put(wb_congested);
250 err_put_css:
251 	css_put(&blkcg->css);
252 err_free_blkg:
253 	blkg_free(new_blkg);
254 	return ERR_PTR(ret);
255 }
256 
257 /**
258  * blkg_lookup_create - lookup blkg, try to create one if not there
259  * @blkcg: blkcg of interest
260  * @q: request_queue of interest
261  *
262  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
263  * create one.  blkg creation is performed recursively from blkcg_root such
264  * that all non-root blkg's have access to the parent blkg.  This function
265  * should be called under RCU read lock and @q->queue_lock.
266  *
267  * Returns pointer to the looked up or created blkg on success, ERR_PTR()
268  * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
269  * dead and bypassing, returns ERR_PTR(-EBUSY).
270  */
271 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
272 				    struct request_queue *q)
273 {
274 	struct blkcg_gq *blkg;
275 
276 	WARN_ON_ONCE(!rcu_read_lock_held());
277 	lockdep_assert_held(q->queue_lock);
278 
279 	/*
280 	 * This could be the first entry point of blkcg implementation and
281 	 * we shouldn't allow anything to go through for a bypassing queue.
282 	 */
283 	if (unlikely(blk_queue_bypass(q)))
284 		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
285 
286 	blkg = __blkg_lookup(blkcg, q, true);
287 	if (blkg)
288 		return blkg;
289 
290 	/*
291 	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
292 	 * non-root blkgs have access to their parents.
293 	 */
294 	while (true) {
295 		struct blkcg *pos = blkcg;
296 		struct blkcg *parent = blkcg_parent(blkcg);
297 
298 		while (parent && !__blkg_lookup(parent, q, false)) {
299 			pos = parent;
300 			parent = blkcg_parent(parent);
301 		}
302 
303 		blkg = blkg_create(pos, q, NULL);
304 		if (pos == blkcg || IS_ERR(blkg))
305 			return blkg;
306 	}
307 }
308 
309 static void blkg_destroy(struct blkcg_gq *blkg)
310 {
311 	struct blkcg *blkcg = blkg->blkcg;
312 	struct blkcg_gq *parent = blkg->parent;
313 	int i;
314 
315 	lockdep_assert_held(blkg->q->queue_lock);
316 	lockdep_assert_held(&blkcg->lock);
317 
318 	/* Something wrong if we are trying to remove same group twice */
319 	WARN_ON_ONCE(list_empty(&blkg->q_node));
320 	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
321 
322 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
323 		struct blkcg_policy *pol = blkcg_policy[i];
324 
325 		if (blkg->pd[i] && pol->pd_offline_fn)
326 			pol->pd_offline_fn(blkg->pd[i]);
327 	}
328 
329 	if (parent) {
330 		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
331 		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
332 	}
333 
334 	blkg->online = false;
335 
336 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
337 	list_del_init(&blkg->q_node);
338 	hlist_del_init_rcu(&blkg->blkcg_node);
339 
340 	/*
341 	 * Both setting lookup hint to and clearing it from @blkg are done
342 	 * under queue_lock.  If it's not pointing to @blkg now, it never
343 	 * will.  Hint assignment itself can race safely.
344 	 */
345 	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
346 		rcu_assign_pointer(blkcg->blkg_hint, NULL);
347 
348 	/*
349 	 * Put the reference taken at the time of creation so that when all
350 	 * queues are gone, group can be destroyed.
351 	 */
352 	blkg_put(blkg);
353 }
354 
355 /**
356  * blkg_destroy_all - destroy all blkgs associated with a request_queue
357  * @q: request_queue of interest
358  *
359  * Destroy all blkgs associated with @q.
360  */
361 static void blkg_destroy_all(struct request_queue *q)
362 {
363 	struct blkcg_gq *blkg, *n;
364 
365 	lockdep_assert_held(q->queue_lock);
366 
367 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
368 		struct blkcg *blkcg = blkg->blkcg;
369 
370 		spin_lock(&blkcg->lock);
371 		blkg_destroy(blkg);
372 		spin_unlock(&blkcg->lock);
373 	}
374 
375 	q->root_blkg = NULL;
376 	q->root_rl.blkg = NULL;
377 }
378 
379 /*
380  * A group is RCU protected, but having an rcu lock does not mean that one
381  * can access all the fields of blkg and assume these are valid.  For
382  * example, don't try to follow throtl_data and request queue links.
383  *
384  * Having a reference to blkg under an rcu allows accesses to only values
385  * local to groups like group stats and group rate limits.
386  */
387 void __blkg_release_rcu(struct rcu_head *rcu_head)
388 {
389 	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
390 
391 	/* release the blkcg and parent blkg refs this blkg has been holding */
392 	css_put(&blkg->blkcg->css);
393 	if (blkg->parent)
394 		blkg_put(blkg->parent);
395 
396 	wb_congested_put(blkg->wb_congested);
397 
398 	blkg_free(blkg);
399 }
400 EXPORT_SYMBOL_GPL(__blkg_release_rcu);
401 
402 /*
403  * The next function used by blk_queue_for_each_rl().  It's a bit tricky
404  * because the root blkg uses @q->root_rl instead of its own rl.
405  */
406 struct request_list *__blk_queue_next_rl(struct request_list *rl,
407 					 struct request_queue *q)
408 {
409 	struct list_head *ent;
410 	struct blkcg_gq *blkg;
411 
412 	/*
413 	 * Determine the current blkg list_head.  The first entry is
414 	 * root_rl which is off @q->blkg_list and mapped to the head.
415 	 */
416 	if (rl == &q->root_rl) {
417 		ent = &q->blkg_list;
418 		/* There are no more block groups, hence no request lists */
419 		if (list_empty(ent))
420 			return NULL;
421 	} else {
422 		blkg = container_of(rl, struct blkcg_gq, rl);
423 		ent = &blkg->q_node;
424 	}
425 
426 	/* walk to the next list_head, skip root blkcg */
427 	ent = ent->next;
428 	if (ent == &q->root_blkg->q_node)
429 		ent = ent->next;
430 	if (ent == &q->blkg_list)
431 		return NULL;
432 
433 	blkg = container_of(ent, struct blkcg_gq, q_node);
434 	return &blkg->rl;
435 }
436 
437 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
438 			     struct cftype *cftype, u64 val)
439 {
440 	struct blkcg *blkcg = css_to_blkcg(css);
441 	struct blkcg_gq *blkg;
442 	int i;
443 
444 	mutex_lock(&blkcg_pol_mutex);
445 	spin_lock_irq(&blkcg->lock);
446 
447 	/*
448 	 * Note that stat reset is racy - it doesn't synchronize against
449 	 * stat updates.  This is a debug feature which shouldn't exist
450 	 * anyway.  If you get hit by a race, retry.
451 	 */
452 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
453 		blkg_rwstat_reset(&blkg->stat_bytes);
454 		blkg_rwstat_reset(&blkg->stat_ios);
455 
456 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
457 			struct blkcg_policy *pol = blkcg_policy[i];
458 
459 			if (blkg->pd[i] && pol->pd_reset_stats_fn)
460 				pol->pd_reset_stats_fn(blkg->pd[i]);
461 		}
462 	}
463 
464 	spin_unlock_irq(&blkcg->lock);
465 	mutex_unlock(&blkcg_pol_mutex);
466 	return 0;
467 }
468 
469 const char *blkg_dev_name(struct blkcg_gq *blkg)
470 {
471 	/* some drivers (floppy) instantiate a queue w/o disk registered */
472 	if (blkg->q->backing_dev_info.dev)
473 		return dev_name(blkg->q->backing_dev_info.dev);
474 	return NULL;
475 }
476 EXPORT_SYMBOL_GPL(blkg_dev_name);
477 
478 /**
479  * blkcg_print_blkgs - helper for printing per-blkg data
480  * @sf: seq_file to print to
481  * @blkcg: blkcg of interest
482  * @prfill: fill function to print out a blkg
483  * @pol: policy in question
484  * @data: data to be passed to @prfill
485  * @show_total: to print out sum of prfill return values or not
486  *
487  * This function invokes @prfill on each blkg of @blkcg if pd for the
488  * policy specified by @pol exists.  @prfill is invoked with @sf, the
489  * policy data and @data and the matching queue lock held.  If @show_total
490  * is %true, the sum of the return values from @prfill is printed with
491  * "Total" label at the end.
492  *
493  * This is to be used to construct print functions for
494  * cftype->read_seq_string method.
495  */
496 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
497 		       u64 (*prfill)(struct seq_file *,
498 				     struct blkg_policy_data *, int),
499 		       const struct blkcg_policy *pol, int data,
500 		       bool show_total)
501 {
502 	struct blkcg_gq *blkg;
503 	u64 total = 0;
504 
505 	rcu_read_lock();
506 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
507 		spin_lock_irq(blkg->q->queue_lock);
508 		if (blkcg_policy_enabled(blkg->q, pol))
509 			total += prfill(sf, blkg->pd[pol->plid], data);
510 		spin_unlock_irq(blkg->q->queue_lock);
511 	}
512 	rcu_read_unlock();
513 
514 	if (show_total)
515 		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
516 }
517 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
518 
519 /**
520  * __blkg_prfill_u64 - prfill helper for a single u64 value
521  * @sf: seq_file to print to
522  * @pd: policy private data of interest
523  * @v: value to print
524  *
525  * Print @v to @sf for the device assocaited with @pd.
526  */
527 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
528 {
529 	const char *dname = blkg_dev_name(pd->blkg);
530 
531 	if (!dname)
532 		return 0;
533 
534 	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
535 	return v;
536 }
537 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
538 
539 /**
540  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
541  * @sf: seq_file to print to
542  * @pd: policy private data of interest
543  * @rwstat: rwstat to print
544  *
545  * Print @rwstat to @sf for the device assocaited with @pd.
546  */
547 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
548 			 const struct blkg_rwstat *rwstat)
549 {
550 	static const char *rwstr[] = {
551 		[BLKG_RWSTAT_READ]	= "Read",
552 		[BLKG_RWSTAT_WRITE]	= "Write",
553 		[BLKG_RWSTAT_SYNC]	= "Sync",
554 		[BLKG_RWSTAT_ASYNC]	= "Async",
555 	};
556 	const char *dname = blkg_dev_name(pd->blkg);
557 	u64 v;
558 	int i;
559 
560 	if (!dname)
561 		return 0;
562 
563 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
564 		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
565 			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
566 
567 	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
568 		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
569 	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
570 	return v;
571 }
572 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
573 
574 /**
575  * blkg_prfill_stat - prfill callback for blkg_stat
576  * @sf: seq_file to print to
577  * @pd: policy private data of interest
578  * @off: offset to the blkg_stat in @pd
579  *
580  * prfill callback for printing a blkg_stat.
581  */
582 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
583 {
584 	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
585 }
586 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
587 
588 /**
589  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
590  * @sf: seq_file to print to
591  * @pd: policy private data of interest
592  * @off: offset to the blkg_rwstat in @pd
593  *
594  * prfill callback for printing a blkg_rwstat.
595  */
596 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
597 		       int off)
598 {
599 	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
600 
601 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
602 }
603 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
604 
605 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
606 				    struct blkg_policy_data *pd, int off)
607 {
608 	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
609 
610 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
611 }
612 
613 /**
614  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
615  * @sf: seq_file to print to
616  * @v: unused
617  *
618  * To be used as cftype->seq_show to print blkg->stat_bytes.
619  * cftype->private must be set to the blkcg_policy.
620  */
621 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
622 {
623 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
624 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
625 			  offsetof(struct blkcg_gq, stat_bytes), true);
626 	return 0;
627 }
628 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
629 
630 /**
631  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
632  * @sf: seq_file to print to
633  * @v: unused
634  *
635  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
636  * must be set to the blkcg_policy.
637  */
638 int blkg_print_stat_ios(struct seq_file *sf, void *v)
639 {
640 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
641 			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
642 			  offsetof(struct blkcg_gq, stat_ios), true);
643 	return 0;
644 }
645 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
646 
647 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
648 					      struct blkg_policy_data *pd,
649 					      int off)
650 {
651 	struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
652 							      NULL, off);
653 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
654 }
655 
656 /**
657  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
658  * @sf: seq_file to print to
659  * @v: unused
660  */
661 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
662 {
663 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
664 			  blkg_prfill_rwstat_field_recursive,
665 			  (void *)seq_cft(sf)->private,
666 			  offsetof(struct blkcg_gq, stat_bytes), true);
667 	return 0;
668 }
669 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
670 
671 /**
672  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
673  * @sf: seq_file to print to
674  * @v: unused
675  */
676 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
677 {
678 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
679 			  blkg_prfill_rwstat_field_recursive,
680 			  (void *)seq_cft(sf)->private,
681 			  offsetof(struct blkcg_gq, stat_ios), true);
682 	return 0;
683 }
684 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
685 
686 /**
687  * blkg_stat_recursive_sum - collect hierarchical blkg_stat
688  * @blkg: blkg of interest
689  * @pol: blkcg_policy which contains the blkg_stat
690  * @off: offset to the blkg_stat in blkg_policy_data or @blkg
691  *
692  * Collect the blkg_stat specified by @blkg, @pol and @off and all its
693  * online descendants and their aux counts.  The caller must be holding the
694  * queue lock for online tests.
695  *
696  * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
697  * at @off bytes into @blkg's blkg_policy_data of the policy.
698  */
699 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
700 			    struct blkcg_policy *pol, int off)
701 {
702 	struct blkcg_gq *pos_blkg;
703 	struct cgroup_subsys_state *pos_css;
704 	u64 sum = 0;
705 
706 	lockdep_assert_held(blkg->q->queue_lock);
707 
708 	rcu_read_lock();
709 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
710 		struct blkg_stat *stat;
711 
712 		if (!pos_blkg->online)
713 			continue;
714 
715 		if (pol)
716 			stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
717 		else
718 			stat = (void *)blkg + off;
719 
720 		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
721 	}
722 	rcu_read_unlock();
723 
724 	return sum;
725 }
726 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
727 
728 /**
729  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
730  * @blkg: blkg of interest
731  * @pol: blkcg_policy which contains the blkg_rwstat
732  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
733  *
734  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
735  * online descendants and their aux counts.  The caller must be holding the
736  * queue lock for online tests.
737  *
738  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
739  * is at @off bytes into @blkg's blkg_policy_data of the policy.
740  */
741 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
742 					     struct blkcg_policy *pol, int off)
743 {
744 	struct blkcg_gq *pos_blkg;
745 	struct cgroup_subsys_state *pos_css;
746 	struct blkg_rwstat sum = { };
747 	int i;
748 
749 	lockdep_assert_held(blkg->q->queue_lock);
750 
751 	rcu_read_lock();
752 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
753 		struct blkg_rwstat *rwstat;
754 
755 		if (!pos_blkg->online)
756 			continue;
757 
758 		if (pol)
759 			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
760 		else
761 			rwstat = (void *)pos_blkg + off;
762 
763 		for (i = 0; i < BLKG_RWSTAT_NR; i++)
764 			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
765 				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
766 				&sum.aux_cnt[i]);
767 	}
768 	rcu_read_unlock();
769 
770 	return sum;
771 }
772 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
773 
774 /**
775  * blkg_conf_prep - parse and prepare for per-blkg config update
776  * @blkcg: target block cgroup
777  * @pol: target policy
778  * @input: input string
779  * @ctx: blkg_conf_ctx to be filled
780  *
781  * Parse per-blkg config update from @input and initialize @ctx with the
782  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
783  * part of @input following MAJ:MIN.  This function returns with RCU read
784  * lock and queue lock held and must be paired with blkg_conf_finish().
785  */
786 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
787 		   char *input, struct blkg_conf_ctx *ctx)
788 	__acquires(rcu) __acquires(disk->queue->queue_lock)
789 {
790 	struct gendisk *disk;
791 	struct blkcg_gq *blkg;
792 	struct module *owner;
793 	unsigned int major, minor;
794 	int key_len, part, ret;
795 	char *body;
796 
797 	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
798 		return -EINVAL;
799 
800 	body = input + key_len;
801 	if (!isspace(*body))
802 		return -EINVAL;
803 	body = skip_spaces(body);
804 
805 	disk = get_gendisk(MKDEV(major, minor), &part);
806 	if (!disk)
807 		return -ENODEV;
808 	if (part) {
809 		owner = disk->fops->owner;
810 		put_disk(disk);
811 		module_put(owner);
812 		return -ENODEV;
813 	}
814 
815 	rcu_read_lock();
816 	spin_lock_irq(disk->queue->queue_lock);
817 
818 	if (blkcg_policy_enabled(disk->queue, pol))
819 		blkg = blkg_lookup_create(blkcg, disk->queue);
820 	else
821 		blkg = ERR_PTR(-EOPNOTSUPP);
822 
823 	if (IS_ERR(blkg)) {
824 		ret = PTR_ERR(blkg);
825 		rcu_read_unlock();
826 		spin_unlock_irq(disk->queue->queue_lock);
827 		owner = disk->fops->owner;
828 		put_disk(disk);
829 		module_put(owner);
830 		/*
831 		 * If queue was bypassing, we should retry.  Do so after a
832 		 * short msleep().  It isn't strictly necessary but queue
833 		 * can be bypassing for some time and it's always nice to
834 		 * avoid busy looping.
835 		 */
836 		if (ret == -EBUSY) {
837 			msleep(10);
838 			ret = restart_syscall();
839 		}
840 		return ret;
841 	}
842 
843 	ctx->disk = disk;
844 	ctx->blkg = blkg;
845 	ctx->body = body;
846 	return 0;
847 }
848 EXPORT_SYMBOL_GPL(blkg_conf_prep);
849 
850 /**
851  * blkg_conf_finish - finish up per-blkg config update
852  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
853  *
854  * Finish up after per-blkg config update.  This function must be paired
855  * with blkg_conf_prep().
856  */
857 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
858 	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
859 {
860 	struct module *owner;
861 
862 	spin_unlock_irq(ctx->disk->queue->queue_lock);
863 	rcu_read_unlock();
864 	owner = ctx->disk->fops->owner;
865 	put_disk(ctx->disk);
866 	module_put(owner);
867 }
868 EXPORT_SYMBOL_GPL(blkg_conf_finish);
869 
870 static int blkcg_print_stat(struct seq_file *sf, void *v)
871 {
872 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
873 	struct blkcg_gq *blkg;
874 
875 	rcu_read_lock();
876 
877 	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
878 		const char *dname;
879 		struct blkg_rwstat rwstat;
880 		u64 rbytes, wbytes, rios, wios;
881 
882 		dname = blkg_dev_name(blkg);
883 		if (!dname)
884 			continue;
885 
886 		spin_lock_irq(blkg->q->queue_lock);
887 
888 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
889 					offsetof(struct blkcg_gq, stat_bytes));
890 		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
891 		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
892 
893 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
894 					offsetof(struct blkcg_gq, stat_ios));
895 		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
896 		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
897 
898 		spin_unlock_irq(blkg->q->queue_lock);
899 
900 		if (rbytes || wbytes || rios || wios)
901 			seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
902 				   dname, rbytes, wbytes, rios, wios);
903 	}
904 
905 	rcu_read_unlock();
906 	return 0;
907 }
908 
909 static struct cftype blkcg_files[] = {
910 	{
911 		.name = "stat",
912 		.flags = CFTYPE_NOT_ON_ROOT,
913 		.seq_show = blkcg_print_stat,
914 	},
915 	{ }	/* terminate */
916 };
917 
918 static struct cftype blkcg_legacy_files[] = {
919 	{
920 		.name = "reset_stats",
921 		.write_u64 = blkcg_reset_stats,
922 	},
923 	{ }	/* terminate */
924 };
925 
926 /**
927  * blkcg_css_offline - cgroup css_offline callback
928  * @css: css of interest
929  *
930  * This function is called when @css is about to go away and responsible
931  * for shooting down all blkgs associated with @css.  blkgs should be
932  * removed while holding both q and blkcg locks.  As blkcg lock is nested
933  * inside q lock, this function performs reverse double lock dancing.
934  *
935  * This is the blkcg counterpart of ioc_release_fn().
936  */
937 static void blkcg_css_offline(struct cgroup_subsys_state *css)
938 {
939 	struct blkcg *blkcg = css_to_blkcg(css);
940 
941 	spin_lock_irq(&blkcg->lock);
942 
943 	while (!hlist_empty(&blkcg->blkg_list)) {
944 		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
945 						struct blkcg_gq, blkcg_node);
946 		struct request_queue *q = blkg->q;
947 
948 		if (spin_trylock(q->queue_lock)) {
949 			blkg_destroy(blkg);
950 			spin_unlock(q->queue_lock);
951 		} else {
952 			spin_unlock_irq(&blkcg->lock);
953 			cpu_relax();
954 			spin_lock_irq(&blkcg->lock);
955 		}
956 	}
957 
958 	spin_unlock_irq(&blkcg->lock);
959 
960 	wb_blkcg_offline(blkcg);
961 }
962 
963 static void blkcg_css_free(struct cgroup_subsys_state *css)
964 {
965 	struct blkcg *blkcg = css_to_blkcg(css);
966 	int i;
967 
968 	mutex_lock(&blkcg_pol_mutex);
969 
970 	list_del(&blkcg->all_blkcgs_node);
971 
972 	for (i = 0; i < BLKCG_MAX_POLS; i++)
973 		if (blkcg->cpd[i])
974 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
975 
976 	mutex_unlock(&blkcg_pol_mutex);
977 
978 	kfree(blkcg);
979 }
980 
981 static struct cgroup_subsys_state *
982 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
983 {
984 	struct blkcg *blkcg;
985 	struct cgroup_subsys_state *ret;
986 	int i;
987 
988 	mutex_lock(&blkcg_pol_mutex);
989 
990 	if (!parent_css) {
991 		blkcg = &blkcg_root;
992 	} else {
993 		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
994 		if (!blkcg) {
995 			ret = ERR_PTR(-ENOMEM);
996 			goto free_blkcg;
997 		}
998 	}
999 
1000 	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1001 		struct blkcg_policy *pol = blkcg_policy[i];
1002 		struct blkcg_policy_data *cpd;
1003 
1004 		/*
1005 		 * If the policy hasn't been attached yet, wait for it
1006 		 * to be attached before doing anything else. Otherwise,
1007 		 * check if the policy requires any specific per-cgroup
1008 		 * data: if it does, allocate and initialize it.
1009 		 */
1010 		if (!pol || !pol->cpd_alloc_fn)
1011 			continue;
1012 
1013 		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1014 		if (!cpd) {
1015 			ret = ERR_PTR(-ENOMEM);
1016 			goto free_pd_blkcg;
1017 		}
1018 		blkcg->cpd[i] = cpd;
1019 		cpd->blkcg = blkcg;
1020 		cpd->plid = i;
1021 		if (pol->cpd_init_fn)
1022 			pol->cpd_init_fn(cpd);
1023 	}
1024 
1025 	spin_lock_init(&blkcg->lock);
1026 	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1027 	INIT_HLIST_HEAD(&blkcg->blkg_list);
1028 #ifdef CONFIG_CGROUP_WRITEBACK
1029 	INIT_LIST_HEAD(&blkcg->cgwb_list);
1030 #endif
1031 	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1032 
1033 	mutex_unlock(&blkcg_pol_mutex);
1034 	return &blkcg->css;
1035 
1036 free_pd_blkcg:
1037 	for (i--; i >= 0; i--)
1038 		if (blkcg->cpd[i])
1039 			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1040 free_blkcg:
1041 	kfree(blkcg);
1042 	mutex_unlock(&blkcg_pol_mutex);
1043 	return ret;
1044 }
1045 
1046 /**
1047  * blkcg_init_queue - initialize blkcg part of request queue
1048  * @q: request_queue to initialize
1049  *
1050  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1051  * part of new request_queue @q.
1052  *
1053  * RETURNS:
1054  * 0 on success, -errno on failure.
1055  */
1056 int blkcg_init_queue(struct request_queue *q)
1057 {
1058 	struct blkcg_gq *new_blkg, *blkg;
1059 	bool preloaded;
1060 	int ret;
1061 
1062 	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1063 	if (!new_blkg)
1064 		return -ENOMEM;
1065 
1066 	preloaded = !radix_tree_preload(GFP_KERNEL);
1067 
1068 	/*
1069 	 * Make sure the root blkg exists and count the existing blkgs.  As
1070 	 * @q is bypassing at this point, blkg_lookup_create() can't be
1071 	 * used.  Open code insertion.
1072 	 */
1073 	rcu_read_lock();
1074 	spin_lock_irq(q->queue_lock);
1075 	blkg = blkg_create(&blkcg_root, q, new_blkg);
1076 	spin_unlock_irq(q->queue_lock);
1077 	rcu_read_unlock();
1078 
1079 	if (preloaded)
1080 		radix_tree_preload_end();
1081 
1082 	if (IS_ERR(blkg)) {
1083 		blkg_free(new_blkg);
1084 		return PTR_ERR(blkg);
1085 	}
1086 
1087 	q->root_blkg = blkg;
1088 	q->root_rl.blkg = blkg;
1089 
1090 	ret = blk_throtl_init(q);
1091 	if (ret) {
1092 		spin_lock_irq(q->queue_lock);
1093 		blkg_destroy_all(q);
1094 		spin_unlock_irq(q->queue_lock);
1095 	}
1096 	return ret;
1097 }
1098 
1099 /**
1100  * blkcg_drain_queue - drain blkcg part of request_queue
1101  * @q: request_queue to drain
1102  *
1103  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1104  */
1105 void blkcg_drain_queue(struct request_queue *q)
1106 {
1107 	lockdep_assert_held(q->queue_lock);
1108 
1109 	/*
1110 	 * @q could be exiting and already have destroyed all blkgs as
1111 	 * indicated by NULL root_blkg.  If so, don't confuse policies.
1112 	 */
1113 	if (!q->root_blkg)
1114 		return;
1115 
1116 	blk_throtl_drain(q);
1117 }
1118 
1119 /**
1120  * blkcg_exit_queue - exit and release blkcg part of request_queue
1121  * @q: request_queue being released
1122  *
1123  * Called from blk_release_queue().  Responsible for exiting blkcg part.
1124  */
1125 void blkcg_exit_queue(struct request_queue *q)
1126 {
1127 	spin_lock_irq(q->queue_lock);
1128 	blkg_destroy_all(q);
1129 	spin_unlock_irq(q->queue_lock);
1130 
1131 	blk_throtl_exit(q);
1132 }
1133 
1134 /*
1135  * We cannot support shared io contexts, as we have no mean to support
1136  * two tasks with the same ioc in two different groups without major rework
1137  * of the main cic data structures.  For now we allow a task to change
1138  * its cgroup only if it's the only owner of its ioc.
1139  */
1140 static int blkcg_can_attach(struct cgroup_taskset *tset)
1141 {
1142 	struct task_struct *task;
1143 	struct cgroup_subsys_state *dst_css;
1144 	struct io_context *ioc;
1145 	int ret = 0;
1146 
1147 	/* task_lock() is needed to avoid races with exit_io_context() */
1148 	cgroup_taskset_for_each(task, dst_css, tset) {
1149 		task_lock(task);
1150 		ioc = task->io_context;
1151 		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1152 			ret = -EINVAL;
1153 		task_unlock(task);
1154 		if (ret)
1155 			break;
1156 	}
1157 	return ret;
1158 }
1159 
1160 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1161 {
1162 	int i;
1163 
1164 	mutex_lock(&blkcg_pol_mutex);
1165 
1166 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1167 		struct blkcg_policy *pol = blkcg_policy[i];
1168 		struct blkcg *blkcg;
1169 
1170 		if (!pol || !pol->cpd_bind_fn)
1171 			continue;
1172 
1173 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1174 			if (blkcg->cpd[pol->plid])
1175 				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1176 	}
1177 	mutex_unlock(&blkcg_pol_mutex);
1178 }
1179 
1180 struct cgroup_subsys io_cgrp_subsys = {
1181 	.css_alloc = blkcg_css_alloc,
1182 	.css_offline = blkcg_css_offline,
1183 	.css_free = blkcg_css_free,
1184 	.can_attach = blkcg_can_attach,
1185 	.bind = blkcg_bind,
1186 	.dfl_cftypes = blkcg_files,
1187 	.legacy_cftypes = blkcg_legacy_files,
1188 	.legacy_name = "blkio",
1189 #ifdef CONFIG_MEMCG
1190 	/*
1191 	 * This ensures that, if available, memcg is automatically enabled
1192 	 * together on the default hierarchy so that the owner cgroup can
1193 	 * be retrieved from writeback pages.
1194 	 */
1195 	.depends_on = 1 << memory_cgrp_id,
1196 #endif
1197 };
1198 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1199 
1200 /**
1201  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1202  * @q: request_queue of interest
1203  * @pol: blkcg policy to activate
1204  *
1205  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1206  * bypass mode to populate its blkgs with policy_data for @pol.
1207  *
1208  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1209  * from IO path.  Update of each blkg is protected by both queue and blkcg
1210  * locks so that holding either lock and testing blkcg_policy_enabled() is
1211  * always enough for dereferencing policy data.
1212  *
1213  * The caller is responsible for synchronizing [de]activations and policy
1214  * [un]registerations.  Returns 0 on success, -errno on failure.
1215  */
1216 int blkcg_activate_policy(struct request_queue *q,
1217 			  const struct blkcg_policy *pol)
1218 {
1219 	struct blkg_policy_data *pd_prealloc = NULL;
1220 	struct blkcg_gq *blkg;
1221 	int ret;
1222 
1223 	if (blkcg_policy_enabled(q, pol))
1224 		return 0;
1225 
1226 	blk_queue_bypass_start(q);
1227 pd_prealloc:
1228 	if (!pd_prealloc) {
1229 		pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1230 		if (!pd_prealloc) {
1231 			ret = -ENOMEM;
1232 			goto out_bypass_end;
1233 		}
1234 	}
1235 
1236 	spin_lock_irq(q->queue_lock);
1237 
1238 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1239 		struct blkg_policy_data *pd;
1240 
1241 		if (blkg->pd[pol->plid])
1242 			continue;
1243 
1244 		pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1245 		if (!pd)
1246 			swap(pd, pd_prealloc);
1247 		if (!pd) {
1248 			spin_unlock_irq(q->queue_lock);
1249 			goto pd_prealloc;
1250 		}
1251 
1252 		blkg->pd[pol->plid] = pd;
1253 		pd->blkg = blkg;
1254 		pd->plid = pol->plid;
1255 		if (pol->pd_init_fn)
1256 			pol->pd_init_fn(pd);
1257 	}
1258 
1259 	__set_bit(pol->plid, q->blkcg_pols);
1260 	ret = 0;
1261 
1262 	spin_unlock_irq(q->queue_lock);
1263 out_bypass_end:
1264 	blk_queue_bypass_end(q);
1265 	if (pd_prealloc)
1266 		pol->pd_free_fn(pd_prealloc);
1267 	return ret;
1268 }
1269 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1270 
1271 /**
1272  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1273  * @q: request_queue of interest
1274  * @pol: blkcg policy to deactivate
1275  *
1276  * Deactivate @pol on @q.  Follows the same synchronization rules as
1277  * blkcg_activate_policy().
1278  */
1279 void blkcg_deactivate_policy(struct request_queue *q,
1280 			     const struct blkcg_policy *pol)
1281 {
1282 	struct blkcg_gq *blkg;
1283 
1284 	if (!blkcg_policy_enabled(q, pol))
1285 		return;
1286 
1287 	blk_queue_bypass_start(q);
1288 	spin_lock_irq(q->queue_lock);
1289 
1290 	__clear_bit(pol->plid, q->blkcg_pols);
1291 
1292 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1293 		/* grab blkcg lock too while removing @pd from @blkg */
1294 		spin_lock(&blkg->blkcg->lock);
1295 
1296 		if (blkg->pd[pol->plid]) {
1297 			if (pol->pd_offline_fn)
1298 				pol->pd_offline_fn(blkg->pd[pol->plid]);
1299 			pol->pd_free_fn(blkg->pd[pol->plid]);
1300 			blkg->pd[pol->plid] = NULL;
1301 		}
1302 
1303 		spin_unlock(&blkg->blkcg->lock);
1304 	}
1305 
1306 	spin_unlock_irq(q->queue_lock);
1307 	blk_queue_bypass_end(q);
1308 }
1309 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1310 
1311 /**
1312  * blkcg_policy_register - register a blkcg policy
1313  * @pol: blkcg policy to register
1314  *
1315  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1316  * successful registration.  Returns 0 on success and -errno on failure.
1317  */
1318 int blkcg_policy_register(struct blkcg_policy *pol)
1319 {
1320 	struct blkcg *blkcg;
1321 	int i, ret;
1322 
1323 	mutex_lock(&blkcg_pol_register_mutex);
1324 	mutex_lock(&blkcg_pol_mutex);
1325 
1326 	/* find an empty slot */
1327 	ret = -ENOSPC;
1328 	for (i = 0; i < BLKCG_MAX_POLS; i++)
1329 		if (!blkcg_policy[i])
1330 			break;
1331 	if (i >= BLKCG_MAX_POLS)
1332 		goto err_unlock;
1333 
1334 	/* register @pol */
1335 	pol->plid = i;
1336 	blkcg_policy[pol->plid] = pol;
1337 
1338 	/* allocate and install cpd's */
1339 	if (pol->cpd_alloc_fn) {
1340 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1341 			struct blkcg_policy_data *cpd;
1342 
1343 			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1344 			if (!cpd)
1345 				goto err_free_cpds;
1346 
1347 			blkcg->cpd[pol->plid] = cpd;
1348 			cpd->blkcg = blkcg;
1349 			cpd->plid = pol->plid;
1350 			pol->cpd_init_fn(cpd);
1351 		}
1352 	}
1353 
1354 	mutex_unlock(&blkcg_pol_mutex);
1355 
1356 	/* everything is in place, add intf files for the new policy */
1357 	if (pol->dfl_cftypes)
1358 		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1359 					       pol->dfl_cftypes));
1360 	if (pol->legacy_cftypes)
1361 		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1362 						  pol->legacy_cftypes));
1363 	mutex_unlock(&blkcg_pol_register_mutex);
1364 	return 0;
1365 
1366 err_free_cpds:
1367 	if (pol->cpd_alloc_fn) {
1368 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1369 			if (blkcg->cpd[pol->plid]) {
1370 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1371 				blkcg->cpd[pol->plid] = NULL;
1372 			}
1373 		}
1374 	}
1375 	blkcg_policy[pol->plid] = NULL;
1376 err_unlock:
1377 	mutex_unlock(&blkcg_pol_mutex);
1378 	mutex_unlock(&blkcg_pol_register_mutex);
1379 	return ret;
1380 }
1381 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1382 
1383 /**
1384  * blkcg_policy_unregister - unregister a blkcg policy
1385  * @pol: blkcg policy to unregister
1386  *
1387  * Undo blkcg_policy_register(@pol).  Might sleep.
1388  */
1389 void blkcg_policy_unregister(struct blkcg_policy *pol)
1390 {
1391 	struct blkcg *blkcg;
1392 
1393 	mutex_lock(&blkcg_pol_register_mutex);
1394 
1395 	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1396 		goto out_unlock;
1397 
1398 	/* kill the intf files first */
1399 	if (pol->dfl_cftypes)
1400 		cgroup_rm_cftypes(pol->dfl_cftypes);
1401 	if (pol->legacy_cftypes)
1402 		cgroup_rm_cftypes(pol->legacy_cftypes);
1403 
1404 	/* remove cpds and unregister */
1405 	mutex_lock(&blkcg_pol_mutex);
1406 
1407 	if (pol->cpd_alloc_fn) {
1408 		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1409 			if (blkcg->cpd[pol->plid]) {
1410 				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1411 				blkcg->cpd[pol->plid] = NULL;
1412 			}
1413 		}
1414 	}
1415 	blkcg_policy[pol->plid] = NULL;
1416 
1417 	mutex_unlock(&blkcg_pol_mutex);
1418 out_unlock:
1419 	mutex_unlock(&blkcg_pol_register_mutex);
1420 }
1421 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1422