xref: /linux/block/blk-cgroup.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  * 	              Nauman Rafique <nauman@google.com>
12  */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
22 
23 #define MAX_KEY_LEN 100
24 
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
27 
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30 
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 						  struct cgroup *);
33 static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
34 static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
35 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37 
38 /* for encoding cft->private value on file */
39 #define BLKIOFILE_PRIVATE(x, val)	(((x) << 16) | (val))
40 /* What policy owns the file, proportional or throttle */
41 #define BLKIOFILE_POLICY(val)		(((val) >> 16) & 0xffff)
42 #define BLKIOFILE_ATTR(val)		((val) & 0xffff)
43 
44 struct cgroup_subsys blkio_subsys = {
45 	.name = "blkio",
46 	.create = blkiocg_create,
47 	.can_attach_task = blkiocg_can_attach_task,
48 	.attach_task = blkiocg_attach_task,
49 	.destroy = blkiocg_destroy,
50 	.populate = blkiocg_populate,
51 #ifdef CONFIG_BLK_CGROUP
52 	/* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
53 	.subsys_id = blkio_subsys_id,
54 #endif
55 	.use_id = 1,
56 	.module = THIS_MODULE,
57 };
58 EXPORT_SYMBOL_GPL(blkio_subsys);
59 
60 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
61 					    struct blkio_policy_node *pn)
62 {
63 	list_add(&pn->node, &blkcg->policy_list);
64 }
65 
66 static inline bool cftype_blkg_same_policy(struct cftype *cft,
67 			struct blkio_group *blkg)
68 {
69 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
70 
71 	if (blkg->plid == plid)
72 		return 1;
73 
74 	return 0;
75 }
76 
77 /* Determines if policy node matches cgroup file being accessed */
78 static inline bool pn_matches_cftype(struct cftype *cft,
79 			struct blkio_policy_node *pn)
80 {
81 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
82 	int fileid = BLKIOFILE_ATTR(cft->private);
83 
84 	return (plid == pn->plid && fileid == pn->fileid);
85 }
86 
87 /* Must be called with blkcg->lock held */
88 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
89 {
90 	list_del(&pn->node);
91 }
92 
93 /* Must be called with blkcg->lock held */
94 static struct blkio_policy_node *
95 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
96 		enum blkio_policy_id plid, int fileid)
97 {
98 	struct blkio_policy_node *pn;
99 
100 	list_for_each_entry(pn, &blkcg->policy_list, node) {
101 		if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
102 			return pn;
103 	}
104 
105 	return NULL;
106 }
107 
108 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
109 {
110 	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
111 			    struct blkio_cgroup, css);
112 }
113 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
114 
115 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
116 {
117 	return container_of(task_subsys_state(tsk, blkio_subsys_id),
118 			    struct blkio_cgroup, css);
119 }
120 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
121 
122 static inline void
123 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
124 {
125 	struct blkio_policy_type *blkiop;
126 
127 	list_for_each_entry(blkiop, &blkio_list, list) {
128 		/* If this policy does not own the blkg, do not send updates */
129 		if (blkiop->plid != blkg->plid)
130 			continue;
131 		if (blkiop->ops.blkio_update_group_weight_fn)
132 			blkiop->ops.blkio_update_group_weight_fn(blkg->key,
133 							blkg, weight);
134 	}
135 }
136 
137 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
138 				int fileid)
139 {
140 	struct blkio_policy_type *blkiop;
141 
142 	list_for_each_entry(blkiop, &blkio_list, list) {
143 
144 		/* If this policy does not own the blkg, do not send updates */
145 		if (blkiop->plid != blkg->plid)
146 			continue;
147 
148 		if (fileid == BLKIO_THROTL_read_bps_device
149 		    && blkiop->ops.blkio_update_group_read_bps_fn)
150 			blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
151 								blkg, bps);
152 
153 		if (fileid == BLKIO_THROTL_write_bps_device
154 		    && blkiop->ops.blkio_update_group_write_bps_fn)
155 			blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
156 								blkg, bps);
157 	}
158 }
159 
160 static inline void blkio_update_group_iops(struct blkio_group *blkg,
161 			unsigned int iops, int fileid)
162 {
163 	struct blkio_policy_type *blkiop;
164 
165 	list_for_each_entry(blkiop, &blkio_list, list) {
166 
167 		/* If this policy does not own the blkg, do not send updates */
168 		if (blkiop->plid != blkg->plid)
169 			continue;
170 
171 		if (fileid == BLKIO_THROTL_read_iops_device
172 		    && blkiop->ops.blkio_update_group_read_iops_fn)
173 			blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
174 								blkg, iops);
175 
176 		if (fileid == BLKIO_THROTL_write_iops_device
177 		    && blkiop->ops.blkio_update_group_write_iops_fn)
178 			blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
179 								blkg,iops);
180 	}
181 }
182 
183 /*
184  * Add to the appropriate stat variable depending on the request type.
185  * This should be called with the blkg->stats_lock held.
186  */
187 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
188 				bool sync)
189 {
190 	if (direction)
191 		stat[BLKIO_STAT_WRITE] += add;
192 	else
193 		stat[BLKIO_STAT_READ] += add;
194 	if (sync)
195 		stat[BLKIO_STAT_SYNC] += add;
196 	else
197 		stat[BLKIO_STAT_ASYNC] += add;
198 }
199 
200 /*
201  * Decrements the appropriate stat variable if non-zero depending on the
202  * request type. Panics on value being zero.
203  * This should be called with the blkg->stats_lock held.
204  */
205 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
206 {
207 	if (direction) {
208 		BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
209 		stat[BLKIO_STAT_WRITE]--;
210 	} else {
211 		BUG_ON(stat[BLKIO_STAT_READ] == 0);
212 		stat[BLKIO_STAT_READ]--;
213 	}
214 	if (sync) {
215 		BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
216 		stat[BLKIO_STAT_SYNC]--;
217 	} else {
218 		BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
219 		stat[BLKIO_STAT_ASYNC]--;
220 	}
221 }
222 
223 #ifdef CONFIG_DEBUG_BLK_CGROUP
224 /* This should be called with the blkg->stats_lock held. */
225 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
226 						struct blkio_group *curr_blkg)
227 {
228 	if (blkio_blkg_waiting(&blkg->stats))
229 		return;
230 	if (blkg == curr_blkg)
231 		return;
232 	blkg->stats.start_group_wait_time = sched_clock();
233 	blkio_mark_blkg_waiting(&blkg->stats);
234 }
235 
236 /* This should be called with the blkg->stats_lock held. */
237 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
238 {
239 	unsigned long long now;
240 
241 	if (!blkio_blkg_waiting(stats))
242 		return;
243 
244 	now = sched_clock();
245 	if (time_after64(now, stats->start_group_wait_time))
246 		stats->group_wait_time += now - stats->start_group_wait_time;
247 	blkio_clear_blkg_waiting(stats);
248 }
249 
250 /* This should be called with the blkg->stats_lock held. */
251 static void blkio_end_empty_time(struct blkio_group_stats *stats)
252 {
253 	unsigned long long now;
254 
255 	if (!blkio_blkg_empty(stats))
256 		return;
257 
258 	now = sched_clock();
259 	if (time_after64(now, stats->start_empty_time))
260 		stats->empty_time += now - stats->start_empty_time;
261 	blkio_clear_blkg_empty(stats);
262 }
263 
264 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
265 {
266 	unsigned long flags;
267 
268 	spin_lock_irqsave(&blkg->stats_lock, flags);
269 	BUG_ON(blkio_blkg_idling(&blkg->stats));
270 	blkg->stats.start_idle_time = sched_clock();
271 	blkio_mark_blkg_idling(&blkg->stats);
272 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
273 }
274 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
275 
276 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
277 {
278 	unsigned long flags;
279 	unsigned long long now;
280 	struct blkio_group_stats *stats;
281 
282 	spin_lock_irqsave(&blkg->stats_lock, flags);
283 	stats = &blkg->stats;
284 	if (blkio_blkg_idling(stats)) {
285 		now = sched_clock();
286 		if (time_after64(now, stats->start_idle_time))
287 			stats->idle_time += now - stats->start_idle_time;
288 		blkio_clear_blkg_idling(stats);
289 	}
290 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
291 }
292 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
293 
294 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
295 {
296 	unsigned long flags;
297 	struct blkio_group_stats *stats;
298 
299 	spin_lock_irqsave(&blkg->stats_lock, flags);
300 	stats = &blkg->stats;
301 	stats->avg_queue_size_sum +=
302 			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
303 			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
304 	stats->avg_queue_size_samples++;
305 	blkio_update_group_wait_time(stats);
306 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
307 }
308 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
309 
310 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
311 {
312 	unsigned long flags;
313 	struct blkio_group_stats *stats;
314 
315 	spin_lock_irqsave(&blkg->stats_lock, flags);
316 	stats = &blkg->stats;
317 
318 	if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
319 			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
320 		spin_unlock_irqrestore(&blkg->stats_lock, flags);
321 		return;
322 	}
323 
324 	/*
325 	 * group is already marked empty. This can happen if cfqq got new
326 	 * request in parent group and moved to this group while being added
327 	 * to service tree. Just ignore the event and move on.
328 	 */
329 	if(blkio_blkg_empty(stats)) {
330 		spin_unlock_irqrestore(&blkg->stats_lock, flags);
331 		return;
332 	}
333 
334 	stats->start_empty_time = sched_clock();
335 	blkio_mark_blkg_empty(stats);
336 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
337 }
338 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
339 
340 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
341 			unsigned long dequeue)
342 {
343 	blkg->stats.dequeue += dequeue;
344 }
345 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
346 #else
347 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
348 					struct blkio_group *curr_blkg) {}
349 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
350 #endif
351 
352 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
353 			struct blkio_group *curr_blkg, bool direction,
354 			bool sync)
355 {
356 	unsigned long flags;
357 
358 	spin_lock_irqsave(&blkg->stats_lock, flags);
359 	blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
360 			sync);
361 	blkio_end_empty_time(&blkg->stats);
362 	blkio_set_start_group_wait_time(blkg, curr_blkg);
363 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
364 }
365 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
366 
367 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
368 						bool direction, bool sync)
369 {
370 	unsigned long flags;
371 
372 	spin_lock_irqsave(&blkg->stats_lock, flags);
373 	blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
374 					direction, sync);
375 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
376 }
377 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
378 
379 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
380 				unsigned long unaccounted_time)
381 {
382 	unsigned long flags;
383 
384 	spin_lock_irqsave(&blkg->stats_lock, flags);
385 	blkg->stats.time += time;
386 #ifdef CONFIG_DEBUG_BLK_CGROUP
387 	blkg->stats.unaccounted_time += unaccounted_time;
388 #endif
389 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
390 }
391 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
392 
393 /*
394  * should be called under rcu read lock or queue lock to make sure blkg pointer
395  * is valid.
396  */
397 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
398 				uint64_t bytes, bool direction, bool sync)
399 {
400 	struct blkio_group_stats_cpu *stats_cpu;
401 	unsigned long flags;
402 
403 	/*
404 	 * Disabling interrupts to provide mutual exclusion between two
405 	 * writes on same cpu. It probably is not needed for 64bit. Not
406 	 * optimizing that case yet.
407 	 */
408 	local_irq_save(flags);
409 
410 	stats_cpu = this_cpu_ptr(blkg->stats_cpu);
411 
412 	u64_stats_update_begin(&stats_cpu->syncp);
413 	stats_cpu->sectors += bytes >> 9;
414 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
415 			1, direction, sync);
416 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
417 			bytes, direction, sync);
418 	u64_stats_update_end(&stats_cpu->syncp);
419 	local_irq_restore(flags);
420 }
421 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
422 
423 void blkiocg_update_completion_stats(struct blkio_group *blkg,
424 	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
425 {
426 	struct blkio_group_stats *stats;
427 	unsigned long flags;
428 	unsigned long long now = sched_clock();
429 
430 	spin_lock_irqsave(&blkg->stats_lock, flags);
431 	stats = &blkg->stats;
432 	if (time_after64(now, io_start_time))
433 		blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
434 				now - io_start_time, direction, sync);
435 	if (time_after64(io_start_time, start_time))
436 		blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
437 				io_start_time - start_time, direction, sync);
438 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
439 }
440 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
441 
442 /*  Merged stats are per cpu.  */
443 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
444 					bool sync)
445 {
446 	struct blkio_group_stats_cpu *stats_cpu;
447 	unsigned long flags;
448 
449 	/*
450 	 * Disabling interrupts to provide mutual exclusion between two
451 	 * writes on same cpu. It probably is not needed for 64bit. Not
452 	 * optimizing that case yet.
453 	 */
454 	local_irq_save(flags);
455 
456 	stats_cpu = this_cpu_ptr(blkg->stats_cpu);
457 
458 	u64_stats_update_begin(&stats_cpu->syncp);
459 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
460 				direction, sync);
461 	u64_stats_update_end(&stats_cpu->syncp);
462 	local_irq_restore(flags);
463 }
464 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
465 
466 /*
467  * This function allocates the per cpu stats for blkio_group. Should be called
468  * from sleepable context as alloc_per_cpu() requires that.
469  */
470 int blkio_alloc_blkg_stats(struct blkio_group *blkg)
471 {
472 	/* Allocate memory for per cpu stats */
473 	blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
474 	if (!blkg->stats_cpu)
475 		return -ENOMEM;
476 	return 0;
477 }
478 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
479 
480 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
481 		struct blkio_group *blkg, void *key, dev_t dev,
482 		enum blkio_policy_id plid)
483 {
484 	unsigned long flags;
485 
486 	spin_lock_irqsave(&blkcg->lock, flags);
487 	spin_lock_init(&blkg->stats_lock);
488 	rcu_assign_pointer(blkg->key, key);
489 	blkg->blkcg_id = css_id(&blkcg->css);
490 	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
491 	blkg->plid = plid;
492 	spin_unlock_irqrestore(&blkcg->lock, flags);
493 	/* Need to take css reference ? */
494 	cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
495 	blkg->dev = dev;
496 }
497 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
498 
499 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
500 {
501 	hlist_del_init_rcu(&blkg->blkcg_node);
502 	blkg->blkcg_id = 0;
503 }
504 
505 /*
506  * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
507  * indicating that blk_group was unhashed by the time we got to it.
508  */
509 int blkiocg_del_blkio_group(struct blkio_group *blkg)
510 {
511 	struct blkio_cgroup *blkcg;
512 	unsigned long flags;
513 	struct cgroup_subsys_state *css;
514 	int ret = 1;
515 
516 	rcu_read_lock();
517 	css = css_lookup(&blkio_subsys, blkg->blkcg_id);
518 	if (css) {
519 		blkcg = container_of(css, struct blkio_cgroup, css);
520 		spin_lock_irqsave(&blkcg->lock, flags);
521 		if (!hlist_unhashed(&blkg->blkcg_node)) {
522 			__blkiocg_del_blkio_group(blkg);
523 			ret = 0;
524 		}
525 		spin_unlock_irqrestore(&blkcg->lock, flags);
526 	}
527 
528 	rcu_read_unlock();
529 	return ret;
530 }
531 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
532 
533 /* called under rcu_read_lock(). */
534 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
535 {
536 	struct blkio_group *blkg;
537 	struct hlist_node *n;
538 	void *__key;
539 
540 	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
541 		__key = blkg->key;
542 		if (__key == key)
543 			return blkg;
544 	}
545 
546 	return NULL;
547 }
548 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
549 
550 static void blkio_reset_stats_cpu(struct blkio_group *blkg)
551 {
552 	struct blkio_group_stats_cpu *stats_cpu;
553 	int i, j, k;
554 	/*
555 	 * Note: On 64 bit arch this should not be an issue. This has the
556 	 * possibility of returning some inconsistent value on 32bit arch
557 	 * as 64bit update on 32bit is non atomic. Taking care of this
558 	 * corner case makes code very complicated, like sending IPIs to
559 	 * cpus, taking care of stats of offline cpus etc.
560 	 *
561 	 * reset stats is anyway more of a debug feature and this sounds a
562 	 * corner case. So I am not complicating the code yet until and
563 	 * unless this becomes a real issue.
564 	 */
565 	for_each_possible_cpu(i) {
566 		stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
567 		stats_cpu->sectors = 0;
568 		for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
569 			for (k = 0; k < BLKIO_STAT_TOTAL; k++)
570 				stats_cpu->stat_arr_cpu[j][k] = 0;
571 	}
572 }
573 
574 static int
575 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
576 {
577 	struct blkio_cgroup *blkcg;
578 	struct blkio_group *blkg;
579 	struct blkio_group_stats *stats;
580 	struct hlist_node *n;
581 	uint64_t queued[BLKIO_STAT_TOTAL];
582 	int i;
583 #ifdef CONFIG_DEBUG_BLK_CGROUP
584 	bool idling, waiting, empty;
585 	unsigned long long now = sched_clock();
586 #endif
587 
588 	blkcg = cgroup_to_blkio_cgroup(cgroup);
589 	spin_lock_irq(&blkcg->lock);
590 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
591 		spin_lock(&blkg->stats_lock);
592 		stats = &blkg->stats;
593 #ifdef CONFIG_DEBUG_BLK_CGROUP
594 		idling = blkio_blkg_idling(stats);
595 		waiting = blkio_blkg_waiting(stats);
596 		empty = blkio_blkg_empty(stats);
597 #endif
598 		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
599 			queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
600 		memset(stats, 0, sizeof(struct blkio_group_stats));
601 		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
602 			stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
603 #ifdef CONFIG_DEBUG_BLK_CGROUP
604 		if (idling) {
605 			blkio_mark_blkg_idling(stats);
606 			stats->start_idle_time = now;
607 		}
608 		if (waiting) {
609 			blkio_mark_blkg_waiting(stats);
610 			stats->start_group_wait_time = now;
611 		}
612 		if (empty) {
613 			blkio_mark_blkg_empty(stats);
614 			stats->start_empty_time = now;
615 		}
616 #endif
617 		spin_unlock(&blkg->stats_lock);
618 
619 		/* Reset Per cpu stats which don't take blkg->stats_lock */
620 		blkio_reset_stats_cpu(blkg);
621 	}
622 
623 	spin_unlock_irq(&blkcg->lock);
624 	return 0;
625 }
626 
627 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
628 				int chars_left, bool diskname_only)
629 {
630 	snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
631 	chars_left -= strlen(str);
632 	if (chars_left <= 0) {
633 		printk(KERN_WARNING
634 			"Possibly incorrect cgroup stat display format");
635 		return;
636 	}
637 	if (diskname_only)
638 		return;
639 	switch (type) {
640 	case BLKIO_STAT_READ:
641 		strlcat(str, " Read", chars_left);
642 		break;
643 	case BLKIO_STAT_WRITE:
644 		strlcat(str, " Write", chars_left);
645 		break;
646 	case BLKIO_STAT_SYNC:
647 		strlcat(str, " Sync", chars_left);
648 		break;
649 	case BLKIO_STAT_ASYNC:
650 		strlcat(str, " Async", chars_left);
651 		break;
652 	case BLKIO_STAT_TOTAL:
653 		strlcat(str, " Total", chars_left);
654 		break;
655 	default:
656 		strlcat(str, " Invalid", chars_left);
657 	}
658 }
659 
660 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
661 				struct cgroup_map_cb *cb, dev_t dev)
662 {
663 	blkio_get_key_name(0, dev, str, chars_left, true);
664 	cb->fill(cb, str, val);
665 	return val;
666 }
667 
668 
669 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
670 			enum stat_type_cpu type, enum stat_sub_type sub_type)
671 {
672 	int cpu;
673 	struct blkio_group_stats_cpu *stats_cpu;
674 	u64 val = 0, tval;
675 
676 	for_each_possible_cpu(cpu) {
677 		unsigned int start;
678 		stats_cpu  = per_cpu_ptr(blkg->stats_cpu, cpu);
679 
680 		do {
681 			start = u64_stats_fetch_begin(&stats_cpu->syncp);
682 			if (type == BLKIO_STAT_CPU_SECTORS)
683 				tval = stats_cpu->sectors;
684 			else
685 				tval = stats_cpu->stat_arr_cpu[type][sub_type];
686 		} while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
687 
688 		val += tval;
689 	}
690 
691 	return val;
692 }
693 
694 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
695 		struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
696 {
697 	uint64_t disk_total, val;
698 	char key_str[MAX_KEY_LEN];
699 	enum stat_sub_type sub_type;
700 
701 	if (type == BLKIO_STAT_CPU_SECTORS) {
702 		val = blkio_read_stat_cpu(blkg, type, 0);
703 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
704 	}
705 
706 	for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
707 			sub_type++) {
708 		blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
709 		val = blkio_read_stat_cpu(blkg, type, sub_type);
710 		cb->fill(cb, key_str, val);
711 	}
712 
713 	disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
714 			blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
715 
716 	blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
717 	cb->fill(cb, key_str, disk_total);
718 	return disk_total;
719 }
720 
721 /* This should be called with blkg->stats_lock held */
722 static uint64_t blkio_get_stat(struct blkio_group *blkg,
723 		struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
724 {
725 	uint64_t disk_total;
726 	char key_str[MAX_KEY_LEN];
727 	enum stat_sub_type sub_type;
728 
729 	if (type == BLKIO_STAT_TIME)
730 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
731 					blkg->stats.time, cb, dev);
732 #ifdef CONFIG_DEBUG_BLK_CGROUP
733 	if (type == BLKIO_STAT_UNACCOUNTED_TIME)
734 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
735 					blkg->stats.unaccounted_time, cb, dev);
736 	if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
737 		uint64_t sum = blkg->stats.avg_queue_size_sum;
738 		uint64_t samples = blkg->stats.avg_queue_size_samples;
739 		if (samples)
740 			do_div(sum, samples);
741 		else
742 			sum = 0;
743 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
744 	}
745 	if (type == BLKIO_STAT_GROUP_WAIT_TIME)
746 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
747 					blkg->stats.group_wait_time, cb, dev);
748 	if (type == BLKIO_STAT_IDLE_TIME)
749 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
750 					blkg->stats.idle_time, cb, dev);
751 	if (type == BLKIO_STAT_EMPTY_TIME)
752 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
753 					blkg->stats.empty_time, cb, dev);
754 	if (type == BLKIO_STAT_DEQUEUE)
755 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
756 					blkg->stats.dequeue, cb, dev);
757 #endif
758 
759 	for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
760 			sub_type++) {
761 		blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
762 		cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
763 	}
764 	disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
765 			blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
766 	blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
767 	cb->fill(cb, key_str, disk_total);
768 	return disk_total;
769 }
770 
771 static int blkio_policy_parse_and_set(char *buf,
772 	struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
773 {
774 	struct gendisk *disk = NULL;
775 	char *s[4], *p, *major_s = NULL, *minor_s = NULL;
776 	unsigned long major, minor;
777 	int i = 0, ret = -EINVAL;
778 	int part;
779 	dev_t dev;
780 	u64 temp;
781 
782 	memset(s, 0, sizeof(s));
783 
784 	while ((p = strsep(&buf, " ")) != NULL) {
785 		if (!*p)
786 			continue;
787 
788 		s[i++] = p;
789 
790 		/* Prevent from inputing too many things */
791 		if (i == 3)
792 			break;
793 	}
794 
795 	if (i != 2)
796 		goto out;
797 
798 	p = strsep(&s[0], ":");
799 	if (p != NULL)
800 		major_s = p;
801 	else
802 		goto out;
803 
804 	minor_s = s[0];
805 	if (!minor_s)
806 		goto out;
807 
808 	if (strict_strtoul(major_s, 10, &major))
809 		goto out;
810 
811 	if (strict_strtoul(minor_s, 10, &minor))
812 		goto out;
813 
814 	dev = MKDEV(major, minor);
815 
816 	if (strict_strtoull(s[1], 10, &temp))
817 		goto out;
818 
819 	/* For rule removal, do not check for device presence. */
820 	if (temp) {
821 		disk = get_gendisk(dev, &part);
822 		if (!disk || part) {
823 			ret = -ENODEV;
824 			goto out;
825 		}
826 	}
827 
828 	newpn->dev = dev;
829 
830 	switch (plid) {
831 	case BLKIO_POLICY_PROP:
832 		if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
833 		     temp > BLKIO_WEIGHT_MAX)
834 			goto out;
835 
836 		newpn->plid = plid;
837 		newpn->fileid = fileid;
838 		newpn->val.weight = temp;
839 		break;
840 	case BLKIO_POLICY_THROTL:
841 		switch(fileid) {
842 		case BLKIO_THROTL_read_bps_device:
843 		case BLKIO_THROTL_write_bps_device:
844 			newpn->plid = plid;
845 			newpn->fileid = fileid;
846 			newpn->val.bps = temp;
847 			break;
848 		case BLKIO_THROTL_read_iops_device:
849 		case BLKIO_THROTL_write_iops_device:
850 			if (temp > THROTL_IOPS_MAX)
851 				goto out;
852 
853 			newpn->plid = plid;
854 			newpn->fileid = fileid;
855 			newpn->val.iops = (unsigned int)temp;
856 			break;
857 		}
858 		break;
859 	default:
860 		BUG();
861 	}
862 	ret = 0;
863 out:
864 	put_disk(disk);
865 	return ret;
866 }
867 
868 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
869 			      dev_t dev)
870 {
871 	struct blkio_policy_node *pn;
872 	unsigned long flags;
873 	unsigned int weight;
874 
875 	spin_lock_irqsave(&blkcg->lock, flags);
876 
877 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
878 				BLKIO_PROP_weight_device);
879 	if (pn)
880 		weight = pn->val.weight;
881 	else
882 		weight = blkcg->weight;
883 
884 	spin_unlock_irqrestore(&blkcg->lock, flags);
885 
886 	return weight;
887 }
888 EXPORT_SYMBOL_GPL(blkcg_get_weight);
889 
890 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
891 {
892 	struct blkio_policy_node *pn;
893 	unsigned long flags;
894 	uint64_t bps = -1;
895 
896 	spin_lock_irqsave(&blkcg->lock, flags);
897 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
898 				BLKIO_THROTL_read_bps_device);
899 	if (pn)
900 		bps = pn->val.bps;
901 	spin_unlock_irqrestore(&blkcg->lock, flags);
902 
903 	return bps;
904 }
905 
906 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
907 {
908 	struct blkio_policy_node *pn;
909 	unsigned long flags;
910 	uint64_t bps = -1;
911 
912 	spin_lock_irqsave(&blkcg->lock, flags);
913 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
914 				BLKIO_THROTL_write_bps_device);
915 	if (pn)
916 		bps = pn->val.bps;
917 	spin_unlock_irqrestore(&blkcg->lock, flags);
918 
919 	return bps;
920 }
921 
922 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
923 {
924 	struct blkio_policy_node *pn;
925 	unsigned long flags;
926 	unsigned int iops = -1;
927 
928 	spin_lock_irqsave(&blkcg->lock, flags);
929 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
930 				BLKIO_THROTL_read_iops_device);
931 	if (pn)
932 		iops = pn->val.iops;
933 	spin_unlock_irqrestore(&blkcg->lock, flags);
934 
935 	return iops;
936 }
937 
938 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
939 {
940 	struct blkio_policy_node *pn;
941 	unsigned long flags;
942 	unsigned int iops = -1;
943 
944 	spin_lock_irqsave(&blkcg->lock, flags);
945 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
946 				BLKIO_THROTL_write_iops_device);
947 	if (pn)
948 		iops = pn->val.iops;
949 	spin_unlock_irqrestore(&blkcg->lock, flags);
950 
951 	return iops;
952 }
953 
954 /* Checks whether user asked for deleting a policy rule */
955 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
956 {
957 	switch(pn->plid) {
958 	case BLKIO_POLICY_PROP:
959 		if (pn->val.weight == 0)
960 			return 1;
961 		break;
962 	case BLKIO_POLICY_THROTL:
963 		switch(pn->fileid) {
964 		case BLKIO_THROTL_read_bps_device:
965 		case BLKIO_THROTL_write_bps_device:
966 			if (pn->val.bps == 0)
967 				return 1;
968 			break;
969 		case BLKIO_THROTL_read_iops_device:
970 		case BLKIO_THROTL_write_iops_device:
971 			if (pn->val.iops == 0)
972 				return 1;
973 		}
974 		break;
975 	default:
976 		BUG();
977 	}
978 
979 	return 0;
980 }
981 
982 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
983 					struct blkio_policy_node *newpn)
984 {
985 	switch(oldpn->plid) {
986 	case BLKIO_POLICY_PROP:
987 		oldpn->val.weight = newpn->val.weight;
988 		break;
989 	case BLKIO_POLICY_THROTL:
990 		switch(newpn->fileid) {
991 		case BLKIO_THROTL_read_bps_device:
992 		case BLKIO_THROTL_write_bps_device:
993 			oldpn->val.bps = newpn->val.bps;
994 			break;
995 		case BLKIO_THROTL_read_iops_device:
996 		case BLKIO_THROTL_write_iops_device:
997 			oldpn->val.iops = newpn->val.iops;
998 		}
999 		break;
1000 	default:
1001 		BUG();
1002 	}
1003 }
1004 
1005 /*
1006  * Some rules/values in blkg have changed. Propagate those to respective
1007  * policies.
1008  */
1009 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
1010 		struct blkio_group *blkg, struct blkio_policy_node *pn)
1011 {
1012 	unsigned int weight, iops;
1013 	u64 bps;
1014 
1015 	switch(pn->plid) {
1016 	case BLKIO_POLICY_PROP:
1017 		weight = pn->val.weight ? pn->val.weight :
1018 				blkcg->weight;
1019 		blkio_update_group_weight(blkg, weight);
1020 		break;
1021 	case BLKIO_POLICY_THROTL:
1022 		switch(pn->fileid) {
1023 		case BLKIO_THROTL_read_bps_device:
1024 		case BLKIO_THROTL_write_bps_device:
1025 			bps = pn->val.bps ? pn->val.bps : (-1);
1026 			blkio_update_group_bps(blkg, bps, pn->fileid);
1027 			break;
1028 		case BLKIO_THROTL_read_iops_device:
1029 		case BLKIO_THROTL_write_iops_device:
1030 			iops = pn->val.iops ? pn->val.iops : (-1);
1031 			blkio_update_group_iops(blkg, iops, pn->fileid);
1032 			break;
1033 		}
1034 		break;
1035 	default:
1036 		BUG();
1037 	}
1038 }
1039 
1040 /*
1041  * A policy node rule has been updated. Propagate this update to all the
1042  * block groups which might be affected by this update.
1043  */
1044 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1045 				struct blkio_policy_node *pn)
1046 {
1047 	struct blkio_group *blkg;
1048 	struct hlist_node *n;
1049 
1050 	spin_lock(&blkio_list_lock);
1051 	spin_lock_irq(&blkcg->lock);
1052 
1053 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1054 		if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1055 			continue;
1056 		blkio_update_blkg_policy(blkcg, blkg, pn);
1057 	}
1058 
1059 	spin_unlock_irq(&blkcg->lock);
1060 	spin_unlock(&blkio_list_lock);
1061 }
1062 
1063 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1064  				       const char *buffer)
1065 {
1066 	int ret = 0;
1067 	char *buf;
1068 	struct blkio_policy_node *newpn, *pn;
1069 	struct blkio_cgroup *blkcg;
1070 	int keep_newpn = 0;
1071 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1072 	int fileid = BLKIOFILE_ATTR(cft->private);
1073 
1074 	buf = kstrdup(buffer, GFP_KERNEL);
1075 	if (!buf)
1076 		return -ENOMEM;
1077 
1078 	newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1079 	if (!newpn) {
1080 		ret = -ENOMEM;
1081 		goto free_buf;
1082 	}
1083 
1084 	ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
1085 	if (ret)
1086 		goto free_newpn;
1087 
1088 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1089 
1090 	spin_lock_irq(&blkcg->lock);
1091 
1092 	pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
1093 	if (!pn) {
1094 		if (!blkio_delete_rule_command(newpn)) {
1095 			blkio_policy_insert_node(blkcg, newpn);
1096 			keep_newpn = 1;
1097 		}
1098 		spin_unlock_irq(&blkcg->lock);
1099 		goto update_io_group;
1100 	}
1101 
1102 	if (blkio_delete_rule_command(newpn)) {
1103 		blkio_policy_delete_node(pn);
1104 		kfree(pn);
1105 		spin_unlock_irq(&blkcg->lock);
1106 		goto update_io_group;
1107 	}
1108 	spin_unlock_irq(&blkcg->lock);
1109 
1110 	blkio_update_policy_rule(pn, newpn);
1111 
1112 update_io_group:
1113 	blkio_update_policy_node_blkg(blkcg, newpn);
1114 
1115 free_newpn:
1116 	if (!keep_newpn)
1117 		kfree(newpn);
1118 free_buf:
1119 	kfree(buf);
1120 	return ret;
1121 }
1122 
1123 static void
1124 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
1125 {
1126 	switch(pn->plid) {
1127 		case BLKIO_POLICY_PROP:
1128 			if (pn->fileid == BLKIO_PROP_weight_device)
1129 				seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1130 					MINOR(pn->dev), pn->val.weight);
1131 			break;
1132 		case BLKIO_POLICY_THROTL:
1133 			switch(pn->fileid) {
1134 			case BLKIO_THROTL_read_bps_device:
1135 			case BLKIO_THROTL_write_bps_device:
1136 				seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1137 					MINOR(pn->dev), pn->val.bps);
1138 				break;
1139 			case BLKIO_THROTL_read_iops_device:
1140 			case BLKIO_THROTL_write_iops_device:
1141 				seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1142 					MINOR(pn->dev), pn->val.iops);
1143 				break;
1144 			}
1145 			break;
1146 		default:
1147 			BUG();
1148 	}
1149 }
1150 
1151 /* cgroup files which read their data from policy nodes end up here */
1152 static void blkio_read_policy_node_files(struct cftype *cft,
1153 			struct blkio_cgroup *blkcg, struct seq_file *m)
1154 {
1155 	struct blkio_policy_node *pn;
1156 
1157 	if (!list_empty(&blkcg->policy_list)) {
1158 		spin_lock_irq(&blkcg->lock);
1159 		list_for_each_entry(pn, &blkcg->policy_list, node) {
1160 			if (!pn_matches_cftype(cft, pn))
1161 				continue;
1162 			blkio_print_policy_node(m, pn);
1163 		}
1164 		spin_unlock_irq(&blkcg->lock);
1165 	}
1166 }
1167 
1168 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1169 				struct seq_file *m)
1170 {
1171 	struct blkio_cgroup *blkcg;
1172 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1173 	int name = BLKIOFILE_ATTR(cft->private);
1174 
1175 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1176 
1177 	switch(plid) {
1178 	case BLKIO_POLICY_PROP:
1179 		switch(name) {
1180 		case BLKIO_PROP_weight_device:
1181 			blkio_read_policy_node_files(cft, blkcg, m);
1182 			return 0;
1183 		default:
1184 			BUG();
1185 		}
1186 		break;
1187 	case BLKIO_POLICY_THROTL:
1188 		switch(name){
1189 		case BLKIO_THROTL_read_bps_device:
1190 		case BLKIO_THROTL_write_bps_device:
1191 		case BLKIO_THROTL_read_iops_device:
1192 		case BLKIO_THROTL_write_iops_device:
1193 			blkio_read_policy_node_files(cft, blkcg, m);
1194 			return 0;
1195 		default:
1196 			BUG();
1197 		}
1198 		break;
1199 	default:
1200 		BUG();
1201 	}
1202 
1203 	return 0;
1204 }
1205 
1206 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1207 		struct cftype *cft, struct cgroup_map_cb *cb,
1208 		enum stat_type type, bool show_total, bool pcpu)
1209 {
1210 	struct blkio_group *blkg;
1211 	struct hlist_node *n;
1212 	uint64_t cgroup_total = 0;
1213 
1214 	rcu_read_lock();
1215 	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1216 		if (blkg->dev) {
1217 			if (!cftype_blkg_same_policy(cft, blkg))
1218 				continue;
1219 			if (pcpu)
1220 				cgroup_total += blkio_get_stat_cpu(blkg, cb,
1221 						blkg->dev, type);
1222 			else {
1223 				spin_lock_irq(&blkg->stats_lock);
1224 				cgroup_total += blkio_get_stat(blkg, cb,
1225 						blkg->dev, type);
1226 				spin_unlock_irq(&blkg->stats_lock);
1227 			}
1228 		}
1229 	}
1230 	if (show_total)
1231 		cb->fill(cb, "Total", cgroup_total);
1232 	rcu_read_unlock();
1233 	return 0;
1234 }
1235 
1236 /* All map kind of cgroup file get serviced by this function */
1237 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1238 				struct cgroup_map_cb *cb)
1239 {
1240 	struct blkio_cgroup *blkcg;
1241 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1242 	int name = BLKIOFILE_ATTR(cft->private);
1243 
1244 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1245 
1246 	switch(plid) {
1247 	case BLKIO_POLICY_PROP:
1248 		switch(name) {
1249 		case BLKIO_PROP_time:
1250 			return blkio_read_blkg_stats(blkcg, cft, cb,
1251 						BLKIO_STAT_TIME, 0, 0);
1252 		case BLKIO_PROP_sectors:
1253 			return blkio_read_blkg_stats(blkcg, cft, cb,
1254 						BLKIO_STAT_CPU_SECTORS, 0, 1);
1255 		case BLKIO_PROP_io_service_bytes:
1256 			return blkio_read_blkg_stats(blkcg, cft, cb,
1257 					BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1258 		case BLKIO_PROP_io_serviced:
1259 			return blkio_read_blkg_stats(blkcg, cft, cb,
1260 						BLKIO_STAT_CPU_SERVICED, 1, 1);
1261 		case BLKIO_PROP_io_service_time:
1262 			return blkio_read_blkg_stats(blkcg, cft, cb,
1263 						BLKIO_STAT_SERVICE_TIME, 1, 0);
1264 		case BLKIO_PROP_io_wait_time:
1265 			return blkio_read_blkg_stats(blkcg, cft, cb,
1266 						BLKIO_STAT_WAIT_TIME, 1, 0);
1267 		case BLKIO_PROP_io_merged:
1268 			return blkio_read_blkg_stats(blkcg, cft, cb,
1269 						BLKIO_STAT_CPU_MERGED, 1, 1);
1270 		case BLKIO_PROP_io_queued:
1271 			return blkio_read_blkg_stats(blkcg, cft, cb,
1272 						BLKIO_STAT_QUEUED, 1, 0);
1273 #ifdef CONFIG_DEBUG_BLK_CGROUP
1274 		case BLKIO_PROP_unaccounted_time:
1275 			return blkio_read_blkg_stats(blkcg, cft, cb,
1276 					BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1277 		case BLKIO_PROP_dequeue:
1278 			return blkio_read_blkg_stats(blkcg, cft, cb,
1279 						BLKIO_STAT_DEQUEUE, 0, 0);
1280 		case BLKIO_PROP_avg_queue_size:
1281 			return blkio_read_blkg_stats(blkcg, cft, cb,
1282 					BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1283 		case BLKIO_PROP_group_wait_time:
1284 			return blkio_read_blkg_stats(blkcg, cft, cb,
1285 					BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1286 		case BLKIO_PROP_idle_time:
1287 			return blkio_read_blkg_stats(blkcg, cft, cb,
1288 						BLKIO_STAT_IDLE_TIME, 0, 0);
1289 		case BLKIO_PROP_empty_time:
1290 			return blkio_read_blkg_stats(blkcg, cft, cb,
1291 						BLKIO_STAT_EMPTY_TIME, 0, 0);
1292 #endif
1293 		default:
1294 			BUG();
1295 		}
1296 		break;
1297 	case BLKIO_POLICY_THROTL:
1298 		switch(name){
1299 		case BLKIO_THROTL_io_service_bytes:
1300 			return blkio_read_blkg_stats(blkcg, cft, cb,
1301 						BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1302 		case BLKIO_THROTL_io_serviced:
1303 			return blkio_read_blkg_stats(blkcg, cft, cb,
1304 						BLKIO_STAT_CPU_SERVICED, 1, 1);
1305 		default:
1306 			BUG();
1307 		}
1308 		break;
1309 	default:
1310 		BUG();
1311 	}
1312 
1313 	return 0;
1314 }
1315 
1316 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1317 {
1318 	struct blkio_group *blkg;
1319 	struct hlist_node *n;
1320 	struct blkio_policy_node *pn;
1321 
1322 	if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1323 		return -EINVAL;
1324 
1325 	spin_lock(&blkio_list_lock);
1326 	spin_lock_irq(&blkcg->lock);
1327 	blkcg->weight = (unsigned int)val;
1328 
1329 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1330 		pn = blkio_policy_search_node(blkcg, blkg->dev,
1331 				BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1332 		if (pn)
1333 			continue;
1334 
1335 		blkio_update_group_weight(blkg, blkcg->weight);
1336 	}
1337 	spin_unlock_irq(&blkcg->lock);
1338 	spin_unlock(&blkio_list_lock);
1339 	return 0;
1340 }
1341 
1342 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1343 	struct blkio_cgroup *blkcg;
1344 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1345 	int name = BLKIOFILE_ATTR(cft->private);
1346 
1347 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1348 
1349 	switch(plid) {
1350 	case BLKIO_POLICY_PROP:
1351 		switch(name) {
1352 		case BLKIO_PROP_weight:
1353 			return (u64)blkcg->weight;
1354 		}
1355 		break;
1356 	default:
1357 		BUG();
1358 	}
1359 	return 0;
1360 }
1361 
1362 static int
1363 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1364 {
1365 	struct blkio_cgroup *blkcg;
1366 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1367 	int name = BLKIOFILE_ATTR(cft->private);
1368 
1369 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1370 
1371 	switch(plid) {
1372 	case BLKIO_POLICY_PROP:
1373 		switch(name) {
1374 		case BLKIO_PROP_weight:
1375 			return blkio_weight_write(blkcg, val);
1376 		}
1377 		break;
1378 	default:
1379 		BUG();
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 struct cftype blkio_files[] = {
1386 	{
1387 		.name = "weight_device",
1388 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1389 				BLKIO_PROP_weight_device),
1390 		.read_seq_string = blkiocg_file_read,
1391 		.write_string = blkiocg_file_write,
1392 		.max_write_len = 256,
1393 	},
1394 	{
1395 		.name = "weight",
1396 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1397 				BLKIO_PROP_weight),
1398 		.read_u64 = blkiocg_file_read_u64,
1399 		.write_u64 = blkiocg_file_write_u64,
1400 	},
1401 	{
1402 		.name = "time",
1403 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1404 				BLKIO_PROP_time),
1405 		.read_map = blkiocg_file_read_map,
1406 	},
1407 	{
1408 		.name = "sectors",
1409 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1410 				BLKIO_PROP_sectors),
1411 		.read_map = blkiocg_file_read_map,
1412 	},
1413 	{
1414 		.name = "io_service_bytes",
1415 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1416 				BLKIO_PROP_io_service_bytes),
1417 		.read_map = blkiocg_file_read_map,
1418 	},
1419 	{
1420 		.name = "io_serviced",
1421 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1422 				BLKIO_PROP_io_serviced),
1423 		.read_map = blkiocg_file_read_map,
1424 	},
1425 	{
1426 		.name = "io_service_time",
1427 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1428 				BLKIO_PROP_io_service_time),
1429 		.read_map = blkiocg_file_read_map,
1430 	},
1431 	{
1432 		.name = "io_wait_time",
1433 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1434 				BLKIO_PROP_io_wait_time),
1435 		.read_map = blkiocg_file_read_map,
1436 	},
1437 	{
1438 		.name = "io_merged",
1439 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1440 				BLKIO_PROP_io_merged),
1441 		.read_map = blkiocg_file_read_map,
1442 	},
1443 	{
1444 		.name = "io_queued",
1445 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1446 				BLKIO_PROP_io_queued),
1447 		.read_map = blkiocg_file_read_map,
1448 	},
1449 	{
1450 		.name = "reset_stats",
1451 		.write_u64 = blkiocg_reset_stats,
1452 	},
1453 #ifdef CONFIG_BLK_DEV_THROTTLING
1454 	{
1455 		.name = "throttle.read_bps_device",
1456 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1457 				BLKIO_THROTL_read_bps_device),
1458 		.read_seq_string = blkiocg_file_read,
1459 		.write_string = blkiocg_file_write,
1460 		.max_write_len = 256,
1461 	},
1462 
1463 	{
1464 		.name = "throttle.write_bps_device",
1465 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1466 				BLKIO_THROTL_write_bps_device),
1467 		.read_seq_string = blkiocg_file_read,
1468 		.write_string = blkiocg_file_write,
1469 		.max_write_len = 256,
1470 	},
1471 
1472 	{
1473 		.name = "throttle.read_iops_device",
1474 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1475 				BLKIO_THROTL_read_iops_device),
1476 		.read_seq_string = blkiocg_file_read,
1477 		.write_string = blkiocg_file_write,
1478 		.max_write_len = 256,
1479 	},
1480 
1481 	{
1482 		.name = "throttle.write_iops_device",
1483 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1484 				BLKIO_THROTL_write_iops_device),
1485 		.read_seq_string = blkiocg_file_read,
1486 		.write_string = blkiocg_file_write,
1487 		.max_write_len = 256,
1488 	},
1489 	{
1490 		.name = "throttle.io_service_bytes",
1491 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1492 				BLKIO_THROTL_io_service_bytes),
1493 		.read_map = blkiocg_file_read_map,
1494 	},
1495 	{
1496 		.name = "throttle.io_serviced",
1497 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1498 				BLKIO_THROTL_io_serviced),
1499 		.read_map = blkiocg_file_read_map,
1500 	},
1501 #endif /* CONFIG_BLK_DEV_THROTTLING */
1502 
1503 #ifdef CONFIG_DEBUG_BLK_CGROUP
1504 	{
1505 		.name = "avg_queue_size",
1506 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1507 				BLKIO_PROP_avg_queue_size),
1508 		.read_map = blkiocg_file_read_map,
1509 	},
1510 	{
1511 		.name = "group_wait_time",
1512 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1513 				BLKIO_PROP_group_wait_time),
1514 		.read_map = blkiocg_file_read_map,
1515 	},
1516 	{
1517 		.name = "idle_time",
1518 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1519 				BLKIO_PROP_idle_time),
1520 		.read_map = blkiocg_file_read_map,
1521 	},
1522 	{
1523 		.name = "empty_time",
1524 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1525 				BLKIO_PROP_empty_time),
1526 		.read_map = blkiocg_file_read_map,
1527 	},
1528 	{
1529 		.name = "dequeue",
1530 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1531 				BLKIO_PROP_dequeue),
1532 		.read_map = blkiocg_file_read_map,
1533 	},
1534 	{
1535 		.name = "unaccounted_time",
1536 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1537 				BLKIO_PROP_unaccounted_time),
1538 		.read_map = blkiocg_file_read_map,
1539 	},
1540 #endif
1541 };
1542 
1543 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1544 {
1545 	return cgroup_add_files(cgroup, subsys, blkio_files,
1546 				ARRAY_SIZE(blkio_files));
1547 }
1548 
1549 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1550 {
1551 	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1552 	unsigned long flags;
1553 	struct blkio_group *blkg;
1554 	void *key;
1555 	struct blkio_policy_type *blkiop;
1556 	struct blkio_policy_node *pn, *pntmp;
1557 
1558 	rcu_read_lock();
1559 	do {
1560 		spin_lock_irqsave(&blkcg->lock, flags);
1561 
1562 		if (hlist_empty(&blkcg->blkg_list)) {
1563 			spin_unlock_irqrestore(&blkcg->lock, flags);
1564 			break;
1565 		}
1566 
1567 		blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1568 					blkcg_node);
1569 		key = rcu_dereference(blkg->key);
1570 		__blkiocg_del_blkio_group(blkg);
1571 
1572 		spin_unlock_irqrestore(&blkcg->lock, flags);
1573 
1574 		/*
1575 		 * This blkio_group is being unlinked as associated cgroup is
1576 		 * going away. Let all the IO controlling policies know about
1577 		 * this event.
1578 		 */
1579 		spin_lock(&blkio_list_lock);
1580 		list_for_each_entry(blkiop, &blkio_list, list) {
1581 			if (blkiop->plid != blkg->plid)
1582 				continue;
1583 			blkiop->ops.blkio_unlink_group_fn(key, blkg);
1584 		}
1585 		spin_unlock(&blkio_list_lock);
1586 	} while (1);
1587 
1588 	list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1589 		blkio_policy_delete_node(pn);
1590 		kfree(pn);
1591 	}
1592 
1593 	free_css_id(&blkio_subsys, &blkcg->css);
1594 	rcu_read_unlock();
1595 	if (blkcg != &blkio_root_cgroup)
1596 		kfree(blkcg);
1597 }
1598 
1599 static struct cgroup_subsys_state *
1600 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1601 {
1602 	struct blkio_cgroup *blkcg;
1603 	struct cgroup *parent = cgroup->parent;
1604 
1605 	if (!parent) {
1606 		blkcg = &blkio_root_cgroup;
1607 		goto done;
1608 	}
1609 
1610 	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1611 	if (!blkcg)
1612 		return ERR_PTR(-ENOMEM);
1613 
1614 	blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1615 done:
1616 	spin_lock_init(&blkcg->lock);
1617 	INIT_HLIST_HEAD(&blkcg->blkg_list);
1618 
1619 	INIT_LIST_HEAD(&blkcg->policy_list);
1620 	return &blkcg->css;
1621 }
1622 
1623 /*
1624  * We cannot support shared io contexts, as we have no mean to support
1625  * two tasks with the same ioc in two different groups without major rework
1626  * of the main cic data structures.  For now we allow a task to change
1627  * its cgroup only if it's the only owner of its ioc.
1628  */
1629 static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1630 {
1631 	struct io_context *ioc;
1632 	int ret = 0;
1633 
1634 	/* task_lock() is needed to avoid races with exit_io_context() */
1635 	task_lock(tsk);
1636 	ioc = tsk->io_context;
1637 	if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1638 		ret = -EINVAL;
1639 	task_unlock(tsk);
1640 
1641 	return ret;
1642 }
1643 
1644 static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1645 {
1646 	struct io_context *ioc;
1647 
1648 	task_lock(tsk);
1649 	ioc = tsk->io_context;
1650 	if (ioc)
1651 		ioc->cgroup_changed = 1;
1652 	task_unlock(tsk);
1653 }
1654 
1655 void blkio_policy_register(struct blkio_policy_type *blkiop)
1656 {
1657 	spin_lock(&blkio_list_lock);
1658 	list_add_tail(&blkiop->list, &blkio_list);
1659 	spin_unlock(&blkio_list_lock);
1660 }
1661 EXPORT_SYMBOL_GPL(blkio_policy_register);
1662 
1663 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1664 {
1665 	spin_lock(&blkio_list_lock);
1666 	list_del_init(&blkiop->list);
1667 	spin_unlock(&blkio_list_lock);
1668 }
1669 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1670 
1671 static int __init init_cgroup_blkio(void)
1672 {
1673 	return cgroup_load_subsys(&blkio_subsys);
1674 }
1675 
1676 static void __exit exit_cgroup_blkio(void)
1677 {
1678 	cgroup_unload_subsys(&blkio_subsys);
1679 }
1680 
1681 module_init(init_cgroup_blkio);
1682 module_exit(exit_cgroup_blkio);
1683 MODULE_LICENSE("GPL");
1684