xref: /linux/block/blk-cgroup.c (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  * 	              Nauman Rafique <nauman@google.com>
12  */
13 #include <linux/ioprio.h>
14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
16 #include <linux/err.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/genhd.h>
20 #include <linux/delay.h>
21 #include <linux/atomic.h>
22 #include "blk-cgroup.h"
23 #include "blk.h"
24 
25 #define MAX_KEY_LEN 100
26 
27 static DEFINE_MUTEX(blkcg_pol_mutex);
28 
29 struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
30 EXPORT_SYMBOL_GPL(blkcg_root);
31 
32 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33 
34 static bool blkcg_policy_enabled(struct request_queue *q,
35 				 const struct blkcg_policy *pol)
36 {
37 	return pol && test_bit(pol->plid, q->blkcg_pols);
38 }
39 
40 /**
41  * blkg_free - free a blkg
42  * @blkg: blkg to free
43  *
44  * Free @blkg which may be partially allocated.
45  */
46 static void blkg_free(struct blkcg_gq *blkg)
47 {
48 	int i;
49 
50 	if (!blkg)
51 		return;
52 
53 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
54 		struct blkcg_policy *pol = blkcg_policy[i];
55 		struct blkg_policy_data *pd = blkg->pd[i];
56 
57 		if (!pd)
58 			continue;
59 
60 		if (pol && pol->pd_exit_fn)
61 			pol->pd_exit_fn(blkg);
62 
63 		kfree(pd);
64 	}
65 
66 	blk_exit_rl(&blkg->rl);
67 	kfree(blkg);
68 }
69 
70 /**
71  * blkg_alloc - allocate a blkg
72  * @blkcg: block cgroup the new blkg is associated with
73  * @q: request_queue the new blkg is associated with
74  * @gfp_mask: allocation mask to use
75  *
76  * Allocate a new blkg assocating @blkcg and @q.
77  */
78 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
79 				   gfp_t gfp_mask)
80 {
81 	struct blkcg_gq *blkg;
82 	int i;
83 
84 	/* alloc and init base part */
85 	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
86 	if (!blkg)
87 		return NULL;
88 
89 	blkg->q = q;
90 	INIT_LIST_HEAD(&blkg->q_node);
91 	blkg->blkcg = blkcg;
92 	blkg->refcnt = 1;
93 
94 	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
95 	if (blkcg != &blkcg_root) {
96 		if (blk_init_rl(&blkg->rl, q, gfp_mask))
97 			goto err_free;
98 		blkg->rl.blkg = blkg;
99 	}
100 
101 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
102 		struct blkcg_policy *pol = blkcg_policy[i];
103 		struct blkg_policy_data *pd;
104 
105 		if (!blkcg_policy_enabled(q, pol))
106 			continue;
107 
108 		/* alloc per-policy data and attach it to blkg */
109 		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
110 		if (!pd)
111 			goto err_free;
112 
113 		blkg->pd[i] = pd;
114 		pd->blkg = blkg;
115 
116 		/* invoke per-policy init */
117 		if (blkcg_policy_enabled(blkg->q, pol))
118 			pol->pd_init_fn(blkg);
119 	}
120 
121 	return blkg;
122 
123 err_free:
124 	blkg_free(blkg);
125 	return NULL;
126 }
127 
128 static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
129 				      struct request_queue *q)
130 {
131 	struct blkcg_gq *blkg;
132 
133 	blkg = rcu_dereference(blkcg->blkg_hint);
134 	if (blkg && blkg->q == q)
135 		return blkg;
136 
137 	/*
138 	 * Hint didn't match.  Look up from the radix tree.  Note that we
139 	 * may not be holding queue_lock and thus are not sure whether
140 	 * @blkg from blkg_tree has already been removed or not, so we
141 	 * can't update hint to the lookup result.  Leave it to the caller.
142 	 */
143 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
144 	if (blkg && blkg->q == q)
145 		return blkg;
146 
147 	return NULL;
148 }
149 
150 /**
151  * blkg_lookup - lookup blkg for the specified blkcg - q pair
152  * @blkcg: blkcg of interest
153  * @q: request_queue of interest
154  *
155  * Lookup blkg for the @blkcg - @q pair.  This function should be called
156  * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
157  * - see blk_queue_bypass_start() for details.
158  */
159 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
160 {
161 	WARN_ON_ONCE(!rcu_read_lock_held());
162 
163 	if (unlikely(blk_queue_bypass(q)))
164 		return NULL;
165 	return __blkg_lookup(blkcg, q);
166 }
167 EXPORT_SYMBOL_GPL(blkg_lookup);
168 
169 /*
170  * If @new_blkg is %NULL, this function tries to allocate a new one as
171  * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
172  */
173 static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
174 					     struct request_queue *q,
175 					     struct blkcg_gq *new_blkg)
176 {
177 	struct blkcg_gq *blkg;
178 	int ret;
179 
180 	WARN_ON_ONCE(!rcu_read_lock_held());
181 	lockdep_assert_held(q->queue_lock);
182 
183 	/* lookup and update hint on success, see __blkg_lookup() for details */
184 	blkg = __blkg_lookup(blkcg, q);
185 	if (blkg) {
186 		rcu_assign_pointer(blkcg->blkg_hint, blkg);
187 		goto out_free;
188 	}
189 
190 	/* blkg holds a reference to blkcg */
191 	if (!css_tryget(&blkcg->css)) {
192 		blkg = ERR_PTR(-EINVAL);
193 		goto out_free;
194 	}
195 
196 	/* allocate */
197 	if (!new_blkg) {
198 		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
199 		if (unlikely(!new_blkg)) {
200 			blkg = ERR_PTR(-ENOMEM);
201 			goto out_put;
202 		}
203 	}
204 	blkg = new_blkg;
205 
206 	/* insert */
207 	spin_lock(&blkcg->lock);
208 	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
209 	if (likely(!ret)) {
210 		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
211 		list_add(&blkg->q_node, &q->blkg_list);
212 	}
213 	spin_unlock(&blkcg->lock);
214 
215 	if (!ret)
216 		return blkg;
217 
218 	blkg = ERR_PTR(ret);
219 out_put:
220 	css_put(&blkcg->css);
221 out_free:
222 	blkg_free(new_blkg);
223 	return blkg;
224 }
225 
226 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
227 				    struct request_queue *q)
228 {
229 	/*
230 	 * This could be the first entry point of blkcg implementation and
231 	 * we shouldn't allow anything to go through for a bypassing queue.
232 	 */
233 	if (unlikely(blk_queue_bypass(q)))
234 		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
235 	return __blkg_lookup_create(blkcg, q, NULL);
236 }
237 EXPORT_SYMBOL_GPL(blkg_lookup_create);
238 
239 static void blkg_destroy(struct blkcg_gq *blkg)
240 {
241 	struct blkcg *blkcg = blkg->blkcg;
242 
243 	lockdep_assert_held(blkg->q->queue_lock);
244 	lockdep_assert_held(&blkcg->lock);
245 
246 	/* Something wrong if we are trying to remove same group twice */
247 	WARN_ON_ONCE(list_empty(&blkg->q_node));
248 	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
249 
250 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
251 	list_del_init(&blkg->q_node);
252 	hlist_del_init_rcu(&blkg->blkcg_node);
253 
254 	/*
255 	 * Both setting lookup hint to and clearing it from @blkg are done
256 	 * under queue_lock.  If it's not pointing to @blkg now, it never
257 	 * will.  Hint assignment itself can race safely.
258 	 */
259 	if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
260 		rcu_assign_pointer(blkcg->blkg_hint, NULL);
261 
262 	/*
263 	 * Put the reference taken at the time of creation so that when all
264 	 * queues are gone, group can be destroyed.
265 	 */
266 	blkg_put(blkg);
267 }
268 
269 /**
270  * blkg_destroy_all - destroy all blkgs associated with a request_queue
271  * @q: request_queue of interest
272  *
273  * Destroy all blkgs associated with @q.
274  */
275 static void blkg_destroy_all(struct request_queue *q)
276 {
277 	struct blkcg_gq *blkg, *n;
278 
279 	lockdep_assert_held(q->queue_lock);
280 
281 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
282 		struct blkcg *blkcg = blkg->blkcg;
283 
284 		spin_lock(&blkcg->lock);
285 		blkg_destroy(blkg);
286 		spin_unlock(&blkcg->lock);
287 	}
288 }
289 
290 static void blkg_rcu_free(struct rcu_head *rcu_head)
291 {
292 	blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
293 }
294 
295 void __blkg_release(struct blkcg_gq *blkg)
296 {
297 	/* release the extra blkcg reference this blkg has been holding */
298 	css_put(&blkg->blkcg->css);
299 
300 	/*
301 	 * A group is freed in rcu manner. But having an rcu lock does not
302 	 * mean that one can access all the fields of blkg and assume these
303 	 * are valid. For example, don't try to follow throtl_data and
304 	 * request queue links.
305 	 *
306 	 * Having a reference to blkg under an rcu allows acess to only
307 	 * values local to groups like group stats and group rate limits
308 	 */
309 	call_rcu(&blkg->rcu_head, blkg_rcu_free);
310 }
311 EXPORT_SYMBOL_GPL(__blkg_release);
312 
313 /*
314  * The next function used by blk_queue_for_each_rl().  It's a bit tricky
315  * because the root blkg uses @q->root_rl instead of its own rl.
316  */
317 struct request_list *__blk_queue_next_rl(struct request_list *rl,
318 					 struct request_queue *q)
319 {
320 	struct list_head *ent;
321 	struct blkcg_gq *blkg;
322 
323 	/*
324 	 * Determine the current blkg list_head.  The first entry is
325 	 * root_rl which is off @q->blkg_list and mapped to the head.
326 	 */
327 	if (rl == &q->root_rl) {
328 		ent = &q->blkg_list;
329 	} else {
330 		blkg = container_of(rl, struct blkcg_gq, rl);
331 		ent = &blkg->q_node;
332 	}
333 
334 	/* walk to the next list_head, skip root blkcg */
335 	ent = ent->next;
336 	if (ent == &q->root_blkg->q_node)
337 		ent = ent->next;
338 	if (ent == &q->blkg_list)
339 		return NULL;
340 
341 	blkg = container_of(ent, struct blkcg_gq, q_node);
342 	return &blkg->rl;
343 }
344 
345 static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
346 			     u64 val)
347 {
348 	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
349 	struct blkcg_gq *blkg;
350 	struct hlist_node *n;
351 	int i;
352 
353 	mutex_lock(&blkcg_pol_mutex);
354 	spin_lock_irq(&blkcg->lock);
355 
356 	/*
357 	 * Note that stat reset is racy - it doesn't synchronize against
358 	 * stat updates.  This is a debug feature which shouldn't exist
359 	 * anyway.  If you get hit by a race, retry.
360 	 */
361 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
362 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
363 			struct blkcg_policy *pol = blkcg_policy[i];
364 
365 			if (blkcg_policy_enabled(blkg->q, pol) &&
366 			    pol->pd_reset_stats_fn)
367 				pol->pd_reset_stats_fn(blkg);
368 		}
369 	}
370 
371 	spin_unlock_irq(&blkcg->lock);
372 	mutex_unlock(&blkcg_pol_mutex);
373 	return 0;
374 }
375 
376 static const char *blkg_dev_name(struct blkcg_gq *blkg)
377 {
378 	/* some drivers (floppy) instantiate a queue w/o disk registered */
379 	if (blkg->q->backing_dev_info.dev)
380 		return dev_name(blkg->q->backing_dev_info.dev);
381 	return NULL;
382 }
383 
384 /**
385  * blkcg_print_blkgs - helper for printing per-blkg data
386  * @sf: seq_file to print to
387  * @blkcg: blkcg of interest
388  * @prfill: fill function to print out a blkg
389  * @pol: policy in question
390  * @data: data to be passed to @prfill
391  * @show_total: to print out sum of prfill return values or not
392  *
393  * This function invokes @prfill on each blkg of @blkcg if pd for the
394  * policy specified by @pol exists.  @prfill is invoked with @sf, the
395  * policy data and @data.  If @show_total is %true, the sum of the return
396  * values from @prfill is printed with "Total" label at the end.
397  *
398  * This is to be used to construct print functions for
399  * cftype->read_seq_string method.
400  */
401 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
402 		       u64 (*prfill)(struct seq_file *,
403 				     struct blkg_policy_data *, int),
404 		       const struct blkcg_policy *pol, int data,
405 		       bool show_total)
406 {
407 	struct blkcg_gq *blkg;
408 	struct hlist_node *n;
409 	u64 total = 0;
410 
411 	spin_lock_irq(&blkcg->lock);
412 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
413 		if (blkcg_policy_enabled(blkg->q, pol))
414 			total += prfill(sf, blkg->pd[pol->plid], data);
415 	spin_unlock_irq(&blkcg->lock);
416 
417 	if (show_total)
418 		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
419 }
420 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
421 
422 /**
423  * __blkg_prfill_u64 - prfill helper for a single u64 value
424  * @sf: seq_file to print to
425  * @pd: policy private data of interest
426  * @v: value to print
427  *
428  * Print @v to @sf for the device assocaited with @pd.
429  */
430 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
431 {
432 	const char *dname = blkg_dev_name(pd->blkg);
433 
434 	if (!dname)
435 		return 0;
436 
437 	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
438 	return v;
439 }
440 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
441 
442 /**
443  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
444  * @sf: seq_file to print to
445  * @pd: policy private data of interest
446  * @rwstat: rwstat to print
447  *
448  * Print @rwstat to @sf for the device assocaited with @pd.
449  */
450 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
451 			 const struct blkg_rwstat *rwstat)
452 {
453 	static const char *rwstr[] = {
454 		[BLKG_RWSTAT_READ]	= "Read",
455 		[BLKG_RWSTAT_WRITE]	= "Write",
456 		[BLKG_RWSTAT_SYNC]	= "Sync",
457 		[BLKG_RWSTAT_ASYNC]	= "Async",
458 	};
459 	const char *dname = blkg_dev_name(pd->blkg);
460 	u64 v;
461 	int i;
462 
463 	if (!dname)
464 		return 0;
465 
466 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
467 		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
468 			   (unsigned long long)rwstat->cnt[i]);
469 
470 	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
471 	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
472 	return v;
473 }
474 
475 /**
476  * blkg_prfill_stat - prfill callback for blkg_stat
477  * @sf: seq_file to print to
478  * @pd: policy private data of interest
479  * @off: offset to the blkg_stat in @pd
480  *
481  * prfill callback for printing a blkg_stat.
482  */
483 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
484 {
485 	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
486 }
487 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
488 
489 /**
490  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
491  * @sf: seq_file to print to
492  * @pd: policy private data of interest
493  * @off: offset to the blkg_rwstat in @pd
494  *
495  * prfill callback for printing a blkg_rwstat.
496  */
497 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
498 		       int off)
499 {
500 	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
501 
502 	return __blkg_prfill_rwstat(sf, pd, &rwstat);
503 }
504 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
505 
506 /**
507  * blkg_conf_prep - parse and prepare for per-blkg config update
508  * @blkcg: target block cgroup
509  * @pol: target policy
510  * @input: input string
511  * @ctx: blkg_conf_ctx to be filled
512  *
513  * Parse per-blkg config update from @input and initialize @ctx with the
514  * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
515  * value.  This function returns with RCU read lock and queue lock held and
516  * must be paired with blkg_conf_finish().
517  */
518 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
519 		   const char *input, struct blkg_conf_ctx *ctx)
520 	__acquires(rcu) __acquires(disk->queue->queue_lock)
521 {
522 	struct gendisk *disk;
523 	struct blkcg_gq *blkg;
524 	unsigned int major, minor;
525 	unsigned long long v;
526 	int part, ret;
527 
528 	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
529 		return -EINVAL;
530 
531 	disk = get_gendisk(MKDEV(major, minor), &part);
532 	if (!disk || part)
533 		return -EINVAL;
534 
535 	rcu_read_lock();
536 	spin_lock_irq(disk->queue->queue_lock);
537 
538 	if (blkcg_policy_enabled(disk->queue, pol))
539 		blkg = blkg_lookup_create(blkcg, disk->queue);
540 	else
541 		blkg = ERR_PTR(-EINVAL);
542 
543 	if (IS_ERR(blkg)) {
544 		ret = PTR_ERR(blkg);
545 		rcu_read_unlock();
546 		spin_unlock_irq(disk->queue->queue_lock);
547 		put_disk(disk);
548 		/*
549 		 * If queue was bypassing, we should retry.  Do so after a
550 		 * short msleep().  It isn't strictly necessary but queue
551 		 * can be bypassing for some time and it's always nice to
552 		 * avoid busy looping.
553 		 */
554 		if (ret == -EBUSY) {
555 			msleep(10);
556 			ret = restart_syscall();
557 		}
558 		return ret;
559 	}
560 
561 	ctx->disk = disk;
562 	ctx->blkg = blkg;
563 	ctx->v = v;
564 	return 0;
565 }
566 EXPORT_SYMBOL_GPL(blkg_conf_prep);
567 
568 /**
569  * blkg_conf_finish - finish up per-blkg config update
570  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
571  *
572  * Finish up after per-blkg config update.  This function must be paired
573  * with blkg_conf_prep().
574  */
575 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
576 	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
577 {
578 	spin_unlock_irq(ctx->disk->queue->queue_lock);
579 	rcu_read_unlock();
580 	put_disk(ctx->disk);
581 }
582 EXPORT_SYMBOL_GPL(blkg_conf_finish);
583 
584 struct cftype blkcg_files[] = {
585 	{
586 		.name = "reset_stats",
587 		.write_u64 = blkcg_reset_stats,
588 	},
589 	{ }	/* terminate */
590 };
591 
592 /**
593  * blkcg_pre_destroy - cgroup pre_destroy callback
594  * @cgroup: cgroup of interest
595  *
596  * This function is called when @cgroup is about to go away and responsible
597  * for shooting down all blkgs associated with @cgroup.  blkgs should be
598  * removed while holding both q and blkcg locks.  As blkcg lock is nested
599  * inside q lock, this function performs reverse double lock dancing.
600  *
601  * This is the blkcg counterpart of ioc_release_fn().
602  */
603 static int blkcg_pre_destroy(struct cgroup *cgroup)
604 {
605 	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
606 
607 	spin_lock_irq(&blkcg->lock);
608 
609 	while (!hlist_empty(&blkcg->blkg_list)) {
610 		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
611 						struct blkcg_gq, blkcg_node);
612 		struct request_queue *q = blkg->q;
613 
614 		if (spin_trylock(q->queue_lock)) {
615 			blkg_destroy(blkg);
616 			spin_unlock(q->queue_lock);
617 		} else {
618 			spin_unlock_irq(&blkcg->lock);
619 			cpu_relax();
620 			spin_lock_irq(&blkcg->lock);
621 		}
622 	}
623 
624 	spin_unlock_irq(&blkcg->lock);
625 	return 0;
626 }
627 
628 static void blkcg_destroy(struct cgroup *cgroup)
629 {
630 	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
631 
632 	if (blkcg != &blkcg_root)
633 		kfree(blkcg);
634 }
635 
636 static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
637 {
638 	static atomic64_t id_seq = ATOMIC64_INIT(0);
639 	struct blkcg *blkcg;
640 	struct cgroup *parent = cgroup->parent;
641 
642 	if (!parent) {
643 		blkcg = &blkcg_root;
644 		goto done;
645 	}
646 
647 	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
648 	if (!blkcg)
649 		return ERR_PTR(-ENOMEM);
650 
651 	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
652 	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
653 done:
654 	spin_lock_init(&blkcg->lock);
655 	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
656 	INIT_HLIST_HEAD(&blkcg->blkg_list);
657 
658 	return &blkcg->css;
659 }
660 
661 /**
662  * blkcg_init_queue - initialize blkcg part of request queue
663  * @q: request_queue to initialize
664  *
665  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
666  * part of new request_queue @q.
667  *
668  * RETURNS:
669  * 0 on success, -errno on failure.
670  */
671 int blkcg_init_queue(struct request_queue *q)
672 {
673 	might_sleep();
674 
675 	return blk_throtl_init(q);
676 }
677 
678 /**
679  * blkcg_drain_queue - drain blkcg part of request_queue
680  * @q: request_queue to drain
681  *
682  * Called from blk_drain_queue().  Responsible for draining blkcg part.
683  */
684 void blkcg_drain_queue(struct request_queue *q)
685 {
686 	lockdep_assert_held(q->queue_lock);
687 
688 	blk_throtl_drain(q);
689 }
690 
691 /**
692  * blkcg_exit_queue - exit and release blkcg part of request_queue
693  * @q: request_queue being released
694  *
695  * Called from blk_release_queue().  Responsible for exiting blkcg part.
696  */
697 void blkcg_exit_queue(struct request_queue *q)
698 {
699 	spin_lock_irq(q->queue_lock);
700 	blkg_destroy_all(q);
701 	spin_unlock_irq(q->queue_lock);
702 
703 	blk_throtl_exit(q);
704 }
705 
706 /*
707  * We cannot support shared io contexts, as we have no mean to support
708  * two tasks with the same ioc in two different groups without major rework
709  * of the main cic data structures.  For now we allow a task to change
710  * its cgroup only if it's the only owner of its ioc.
711  */
712 static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
713 {
714 	struct task_struct *task;
715 	struct io_context *ioc;
716 	int ret = 0;
717 
718 	/* task_lock() is needed to avoid races with exit_io_context() */
719 	cgroup_taskset_for_each(task, cgrp, tset) {
720 		task_lock(task);
721 		ioc = task->io_context;
722 		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
723 			ret = -EINVAL;
724 		task_unlock(task);
725 		if (ret)
726 			break;
727 	}
728 	return ret;
729 }
730 
731 struct cgroup_subsys blkio_subsys = {
732 	.name = "blkio",
733 	.create = blkcg_create,
734 	.can_attach = blkcg_can_attach,
735 	.pre_destroy = blkcg_pre_destroy,
736 	.destroy = blkcg_destroy,
737 	.subsys_id = blkio_subsys_id,
738 	.base_cftypes = blkcg_files,
739 	.module = THIS_MODULE,
740 
741 	/*
742 	 * blkio subsystem is utterly broken in terms of hierarchy support.
743 	 * It treats all cgroups equally regardless of where they're
744 	 * located in the hierarchy - all cgroups are treated as if they're
745 	 * right below the root.  Fix it and remove the following.
746 	 */
747 	.broken_hierarchy = true,
748 };
749 EXPORT_SYMBOL_GPL(blkio_subsys);
750 
751 /**
752  * blkcg_activate_policy - activate a blkcg policy on a request_queue
753  * @q: request_queue of interest
754  * @pol: blkcg policy to activate
755  *
756  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
757  * bypass mode to populate its blkgs with policy_data for @pol.
758  *
759  * Activation happens with @q bypassed, so nobody would be accessing blkgs
760  * from IO path.  Update of each blkg is protected by both queue and blkcg
761  * locks so that holding either lock and testing blkcg_policy_enabled() is
762  * always enough for dereferencing policy data.
763  *
764  * The caller is responsible for synchronizing [de]activations and policy
765  * [un]registerations.  Returns 0 on success, -errno on failure.
766  */
767 int blkcg_activate_policy(struct request_queue *q,
768 			  const struct blkcg_policy *pol)
769 {
770 	LIST_HEAD(pds);
771 	struct blkcg_gq *blkg;
772 	struct blkg_policy_data *pd, *n;
773 	int cnt = 0, ret;
774 	bool preloaded;
775 
776 	if (blkcg_policy_enabled(q, pol))
777 		return 0;
778 
779 	/* preallocations for root blkg */
780 	blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
781 	if (!blkg)
782 		return -ENOMEM;
783 
784 	preloaded = !radix_tree_preload(GFP_KERNEL);
785 
786 	blk_queue_bypass_start(q);
787 
788 	/* make sure the root blkg exists and count the existing blkgs */
789 	spin_lock_irq(q->queue_lock);
790 
791 	rcu_read_lock();
792 	blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
793 	rcu_read_unlock();
794 
795 	if (preloaded)
796 		radix_tree_preload_end();
797 
798 	if (IS_ERR(blkg)) {
799 		ret = PTR_ERR(blkg);
800 		goto out_unlock;
801 	}
802 	q->root_blkg = blkg;
803 	q->root_rl.blkg = blkg;
804 
805 	list_for_each_entry(blkg, &q->blkg_list, q_node)
806 		cnt++;
807 
808 	spin_unlock_irq(q->queue_lock);
809 
810 	/* allocate policy_data for all existing blkgs */
811 	while (cnt--) {
812 		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
813 		if (!pd) {
814 			ret = -ENOMEM;
815 			goto out_free;
816 		}
817 		list_add_tail(&pd->alloc_node, &pds);
818 	}
819 
820 	/*
821 	 * Install the allocated pds.  With @q bypassing, no new blkg
822 	 * should have been created while the queue lock was dropped.
823 	 */
824 	spin_lock_irq(q->queue_lock);
825 
826 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
827 		if (WARN_ON(list_empty(&pds))) {
828 			/* umm... this shouldn't happen, just abort */
829 			ret = -ENOMEM;
830 			goto out_unlock;
831 		}
832 		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
833 		list_del_init(&pd->alloc_node);
834 
835 		/* grab blkcg lock too while installing @pd on @blkg */
836 		spin_lock(&blkg->blkcg->lock);
837 
838 		blkg->pd[pol->plid] = pd;
839 		pd->blkg = blkg;
840 		pol->pd_init_fn(blkg);
841 
842 		spin_unlock(&blkg->blkcg->lock);
843 	}
844 
845 	__set_bit(pol->plid, q->blkcg_pols);
846 	ret = 0;
847 out_unlock:
848 	spin_unlock_irq(q->queue_lock);
849 out_free:
850 	blk_queue_bypass_end(q);
851 	list_for_each_entry_safe(pd, n, &pds, alloc_node)
852 		kfree(pd);
853 	return ret;
854 }
855 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
856 
857 /**
858  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
859  * @q: request_queue of interest
860  * @pol: blkcg policy to deactivate
861  *
862  * Deactivate @pol on @q.  Follows the same synchronization rules as
863  * blkcg_activate_policy().
864  */
865 void blkcg_deactivate_policy(struct request_queue *q,
866 			     const struct blkcg_policy *pol)
867 {
868 	struct blkcg_gq *blkg;
869 
870 	if (!blkcg_policy_enabled(q, pol))
871 		return;
872 
873 	blk_queue_bypass_start(q);
874 	spin_lock_irq(q->queue_lock);
875 
876 	__clear_bit(pol->plid, q->blkcg_pols);
877 
878 	/* if no policy is left, no need for blkgs - shoot them down */
879 	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
880 		blkg_destroy_all(q);
881 
882 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
883 		/* grab blkcg lock too while removing @pd from @blkg */
884 		spin_lock(&blkg->blkcg->lock);
885 
886 		if (pol->pd_exit_fn)
887 			pol->pd_exit_fn(blkg);
888 
889 		kfree(blkg->pd[pol->plid]);
890 		blkg->pd[pol->plid] = NULL;
891 
892 		spin_unlock(&blkg->blkcg->lock);
893 	}
894 
895 	spin_unlock_irq(q->queue_lock);
896 	blk_queue_bypass_end(q);
897 }
898 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
899 
900 /**
901  * blkcg_policy_register - register a blkcg policy
902  * @pol: blkcg policy to register
903  *
904  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
905  * successful registration.  Returns 0 on success and -errno on failure.
906  */
907 int blkcg_policy_register(struct blkcg_policy *pol)
908 {
909 	int i, ret;
910 
911 	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
912 		return -EINVAL;
913 
914 	mutex_lock(&blkcg_pol_mutex);
915 
916 	/* find an empty slot */
917 	ret = -ENOSPC;
918 	for (i = 0; i < BLKCG_MAX_POLS; i++)
919 		if (!blkcg_policy[i])
920 			break;
921 	if (i >= BLKCG_MAX_POLS)
922 		goto out_unlock;
923 
924 	/* register and update blkgs */
925 	pol->plid = i;
926 	blkcg_policy[i] = pol;
927 
928 	/* everything is in place, add intf files for the new policy */
929 	if (pol->cftypes)
930 		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
931 	ret = 0;
932 out_unlock:
933 	mutex_unlock(&blkcg_pol_mutex);
934 	return ret;
935 }
936 EXPORT_SYMBOL_GPL(blkcg_policy_register);
937 
938 /**
939  * blkcg_policy_unregister - unregister a blkcg policy
940  * @pol: blkcg policy to unregister
941  *
942  * Undo blkcg_policy_register(@pol).  Might sleep.
943  */
944 void blkcg_policy_unregister(struct blkcg_policy *pol)
945 {
946 	mutex_lock(&blkcg_pol_mutex);
947 
948 	if (WARN_ON(blkcg_policy[pol->plid] != pol))
949 		goto out_unlock;
950 
951 	/* kill the intf files first */
952 	if (pol->cftypes)
953 		cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
954 
955 	/* unregister and update blkgs */
956 	blkcg_policy[pol->plid] = NULL;
957 out_unlock:
958 	mutex_unlock(&blkcg_pol_mutex);
959 }
960 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
961