xref: /linux/block/blk-cgroup.c (revision 12871a0bd67dd4db4418e1daafcd46e9d329ef10)
1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *		      Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  * 	              Nauman Rafique <nauman@google.com>
12  */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
22 
23 #define MAX_KEY_LEN 100
24 
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
27 
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30 
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 						  struct cgroup *);
33 static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
34 static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
35 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37 
38 /* for encoding cft->private value on file */
39 #define BLKIOFILE_PRIVATE(x, val)	(((x) << 16) | (val))
40 /* What policy owns the file, proportional or throttle */
41 #define BLKIOFILE_POLICY(val)		(((val) >> 16) & 0xffff)
42 #define BLKIOFILE_ATTR(val)		((val) & 0xffff)
43 
44 struct cgroup_subsys blkio_subsys = {
45 	.name = "blkio",
46 	.create = blkiocg_create,
47 	.can_attach_task = blkiocg_can_attach_task,
48 	.attach_task = blkiocg_attach_task,
49 	.destroy = blkiocg_destroy,
50 	.populate = blkiocg_populate,
51 #ifdef CONFIG_BLK_CGROUP
52 	/* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
53 	.subsys_id = blkio_subsys_id,
54 #endif
55 	.use_id = 1,
56 	.module = THIS_MODULE,
57 };
58 EXPORT_SYMBOL_GPL(blkio_subsys);
59 
60 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
61 					    struct blkio_policy_node *pn)
62 {
63 	list_add(&pn->node, &blkcg->policy_list);
64 }
65 
66 static inline bool cftype_blkg_same_policy(struct cftype *cft,
67 			struct blkio_group *blkg)
68 {
69 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
70 
71 	if (blkg->plid == plid)
72 		return 1;
73 
74 	return 0;
75 }
76 
77 /* Determines if policy node matches cgroup file being accessed */
78 static inline bool pn_matches_cftype(struct cftype *cft,
79 			struct blkio_policy_node *pn)
80 {
81 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
82 	int fileid = BLKIOFILE_ATTR(cft->private);
83 
84 	return (plid == pn->plid && fileid == pn->fileid);
85 }
86 
87 /* Must be called with blkcg->lock held */
88 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
89 {
90 	list_del(&pn->node);
91 }
92 
93 /* Must be called with blkcg->lock held */
94 static struct blkio_policy_node *
95 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
96 		enum blkio_policy_id plid, int fileid)
97 {
98 	struct blkio_policy_node *pn;
99 
100 	list_for_each_entry(pn, &blkcg->policy_list, node) {
101 		if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
102 			return pn;
103 	}
104 
105 	return NULL;
106 }
107 
108 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
109 {
110 	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
111 			    struct blkio_cgroup, css);
112 }
113 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
114 
115 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
116 {
117 	return container_of(task_subsys_state(tsk, blkio_subsys_id),
118 			    struct blkio_cgroup, css);
119 }
120 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
121 
122 static inline void
123 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
124 {
125 	struct blkio_policy_type *blkiop;
126 
127 	list_for_each_entry(blkiop, &blkio_list, list) {
128 		/* If this policy does not own the blkg, do not send updates */
129 		if (blkiop->plid != blkg->plid)
130 			continue;
131 		if (blkiop->ops.blkio_update_group_weight_fn)
132 			blkiop->ops.blkio_update_group_weight_fn(blkg->key,
133 							blkg, weight);
134 	}
135 }
136 
137 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
138 				int fileid)
139 {
140 	struct blkio_policy_type *blkiop;
141 
142 	list_for_each_entry(blkiop, &blkio_list, list) {
143 
144 		/* If this policy does not own the blkg, do not send updates */
145 		if (blkiop->plid != blkg->plid)
146 			continue;
147 
148 		if (fileid == BLKIO_THROTL_read_bps_device
149 		    && blkiop->ops.blkio_update_group_read_bps_fn)
150 			blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
151 								blkg, bps);
152 
153 		if (fileid == BLKIO_THROTL_write_bps_device
154 		    && blkiop->ops.blkio_update_group_write_bps_fn)
155 			blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
156 								blkg, bps);
157 	}
158 }
159 
160 static inline void blkio_update_group_iops(struct blkio_group *blkg,
161 			unsigned int iops, int fileid)
162 {
163 	struct blkio_policy_type *blkiop;
164 
165 	list_for_each_entry(blkiop, &blkio_list, list) {
166 
167 		/* If this policy does not own the blkg, do not send updates */
168 		if (blkiop->plid != blkg->plid)
169 			continue;
170 
171 		if (fileid == BLKIO_THROTL_read_iops_device
172 		    && blkiop->ops.blkio_update_group_read_iops_fn)
173 			blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
174 								blkg, iops);
175 
176 		if (fileid == BLKIO_THROTL_write_iops_device
177 		    && blkiop->ops.blkio_update_group_write_iops_fn)
178 			blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
179 								blkg,iops);
180 	}
181 }
182 
183 /*
184  * Add to the appropriate stat variable depending on the request type.
185  * This should be called with the blkg->stats_lock held.
186  */
187 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
188 				bool sync)
189 {
190 	if (direction)
191 		stat[BLKIO_STAT_WRITE] += add;
192 	else
193 		stat[BLKIO_STAT_READ] += add;
194 	if (sync)
195 		stat[BLKIO_STAT_SYNC] += add;
196 	else
197 		stat[BLKIO_STAT_ASYNC] += add;
198 }
199 
200 /*
201  * Decrements the appropriate stat variable if non-zero depending on the
202  * request type. Panics on value being zero.
203  * This should be called with the blkg->stats_lock held.
204  */
205 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
206 {
207 	if (direction) {
208 		BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
209 		stat[BLKIO_STAT_WRITE]--;
210 	} else {
211 		BUG_ON(stat[BLKIO_STAT_READ] == 0);
212 		stat[BLKIO_STAT_READ]--;
213 	}
214 	if (sync) {
215 		BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
216 		stat[BLKIO_STAT_SYNC]--;
217 	} else {
218 		BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
219 		stat[BLKIO_STAT_ASYNC]--;
220 	}
221 }
222 
223 #ifdef CONFIG_DEBUG_BLK_CGROUP
224 /* This should be called with the blkg->stats_lock held. */
225 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
226 						struct blkio_group *curr_blkg)
227 {
228 	if (blkio_blkg_waiting(&blkg->stats))
229 		return;
230 	if (blkg == curr_blkg)
231 		return;
232 	blkg->stats.start_group_wait_time = sched_clock();
233 	blkio_mark_blkg_waiting(&blkg->stats);
234 }
235 
236 /* This should be called with the blkg->stats_lock held. */
237 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
238 {
239 	unsigned long long now;
240 
241 	if (!blkio_blkg_waiting(stats))
242 		return;
243 
244 	now = sched_clock();
245 	if (time_after64(now, stats->start_group_wait_time))
246 		stats->group_wait_time += now - stats->start_group_wait_time;
247 	blkio_clear_blkg_waiting(stats);
248 }
249 
250 /* This should be called with the blkg->stats_lock held. */
251 static void blkio_end_empty_time(struct blkio_group_stats *stats)
252 {
253 	unsigned long long now;
254 
255 	if (!blkio_blkg_empty(stats))
256 		return;
257 
258 	now = sched_clock();
259 	if (time_after64(now, stats->start_empty_time))
260 		stats->empty_time += now - stats->start_empty_time;
261 	blkio_clear_blkg_empty(stats);
262 }
263 
264 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
265 {
266 	unsigned long flags;
267 
268 	spin_lock_irqsave(&blkg->stats_lock, flags);
269 	BUG_ON(blkio_blkg_idling(&blkg->stats));
270 	blkg->stats.start_idle_time = sched_clock();
271 	blkio_mark_blkg_idling(&blkg->stats);
272 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
273 }
274 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
275 
276 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
277 {
278 	unsigned long flags;
279 	unsigned long long now;
280 	struct blkio_group_stats *stats;
281 
282 	spin_lock_irqsave(&blkg->stats_lock, flags);
283 	stats = &blkg->stats;
284 	if (blkio_blkg_idling(stats)) {
285 		now = sched_clock();
286 		if (time_after64(now, stats->start_idle_time))
287 			stats->idle_time += now - stats->start_idle_time;
288 		blkio_clear_blkg_idling(stats);
289 	}
290 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
291 }
292 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
293 
294 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
295 {
296 	unsigned long flags;
297 	struct blkio_group_stats *stats;
298 
299 	spin_lock_irqsave(&blkg->stats_lock, flags);
300 	stats = &blkg->stats;
301 	stats->avg_queue_size_sum +=
302 			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
303 			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
304 	stats->avg_queue_size_samples++;
305 	blkio_update_group_wait_time(stats);
306 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
307 }
308 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
309 
310 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
311 {
312 	unsigned long flags;
313 	struct blkio_group_stats *stats;
314 
315 	spin_lock_irqsave(&blkg->stats_lock, flags);
316 	stats = &blkg->stats;
317 
318 	if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
319 			stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
320 		spin_unlock_irqrestore(&blkg->stats_lock, flags);
321 		return;
322 	}
323 
324 	/*
325 	 * group is already marked empty. This can happen if cfqq got new
326 	 * request in parent group and moved to this group while being added
327 	 * to service tree. Just ignore the event and move on.
328 	 */
329 	if(blkio_blkg_empty(stats)) {
330 		spin_unlock_irqrestore(&blkg->stats_lock, flags);
331 		return;
332 	}
333 
334 	stats->start_empty_time = sched_clock();
335 	blkio_mark_blkg_empty(stats);
336 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
337 }
338 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
339 
340 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
341 			unsigned long dequeue)
342 {
343 	blkg->stats.dequeue += dequeue;
344 }
345 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
346 #else
347 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
348 					struct blkio_group *curr_blkg) {}
349 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
350 #endif
351 
352 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
353 			struct blkio_group *curr_blkg, bool direction,
354 			bool sync)
355 {
356 	unsigned long flags;
357 
358 	spin_lock_irqsave(&blkg->stats_lock, flags);
359 	blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
360 			sync);
361 	blkio_end_empty_time(&blkg->stats);
362 	blkio_set_start_group_wait_time(blkg, curr_blkg);
363 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
364 }
365 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
366 
367 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
368 						bool direction, bool sync)
369 {
370 	unsigned long flags;
371 
372 	spin_lock_irqsave(&blkg->stats_lock, flags);
373 	blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
374 					direction, sync);
375 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
376 }
377 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
378 
379 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
380 				unsigned long unaccounted_time)
381 {
382 	unsigned long flags;
383 
384 	spin_lock_irqsave(&blkg->stats_lock, flags);
385 	blkg->stats.time += time;
386 #ifdef CONFIG_DEBUG_BLK_CGROUP
387 	blkg->stats.unaccounted_time += unaccounted_time;
388 #endif
389 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
390 }
391 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
392 
393 /*
394  * should be called under rcu read lock or queue lock to make sure blkg pointer
395  * is valid.
396  */
397 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
398 				uint64_t bytes, bool direction, bool sync)
399 {
400 	struct blkio_group_stats_cpu *stats_cpu;
401 	unsigned long flags;
402 
403 	/*
404 	 * Disabling interrupts to provide mutual exclusion between two
405 	 * writes on same cpu. It probably is not needed for 64bit. Not
406 	 * optimizing that case yet.
407 	 */
408 	local_irq_save(flags);
409 
410 	stats_cpu = this_cpu_ptr(blkg->stats_cpu);
411 
412 	u64_stats_update_begin(&stats_cpu->syncp);
413 	stats_cpu->sectors += bytes >> 9;
414 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
415 			1, direction, sync);
416 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
417 			bytes, direction, sync);
418 	u64_stats_update_end(&stats_cpu->syncp);
419 	local_irq_restore(flags);
420 }
421 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
422 
423 void blkiocg_update_completion_stats(struct blkio_group *blkg,
424 	uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
425 {
426 	struct blkio_group_stats *stats;
427 	unsigned long flags;
428 	unsigned long long now = sched_clock();
429 
430 	spin_lock_irqsave(&blkg->stats_lock, flags);
431 	stats = &blkg->stats;
432 	if (time_after64(now, io_start_time))
433 		blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
434 				now - io_start_time, direction, sync);
435 	if (time_after64(io_start_time, start_time))
436 		blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
437 				io_start_time - start_time, direction, sync);
438 	spin_unlock_irqrestore(&blkg->stats_lock, flags);
439 }
440 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
441 
442 /*  Merged stats are per cpu.  */
443 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
444 					bool sync)
445 {
446 	struct blkio_group_stats_cpu *stats_cpu;
447 	unsigned long flags;
448 
449 	/*
450 	 * Disabling interrupts to provide mutual exclusion between two
451 	 * writes on same cpu. It probably is not needed for 64bit. Not
452 	 * optimizing that case yet.
453 	 */
454 	local_irq_save(flags);
455 
456 	stats_cpu = this_cpu_ptr(blkg->stats_cpu);
457 
458 	u64_stats_update_begin(&stats_cpu->syncp);
459 	blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
460 				direction, sync);
461 	u64_stats_update_end(&stats_cpu->syncp);
462 	local_irq_restore(flags);
463 }
464 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
465 
466 /*
467  * This function allocates the per cpu stats for blkio_group. Should be called
468  * from sleepable context as alloc_per_cpu() requires that.
469  */
470 int blkio_alloc_blkg_stats(struct blkio_group *blkg)
471 {
472 	/* Allocate memory for per cpu stats */
473 	blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
474 	if (!blkg->stats_cpu)
475 		return -ENOMEM;
476 	return 0;
477 }
478 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
479 
480 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
481 		struct blkio_group *blkg, void *key, dev_t dev,
482 		enum blkio_policy_id plid)
483 {
484 	unsigned long flags;
485 
486 	spin_lock_irqsave(&blkcg->lock, flags);
487 	spin_lock_init(&blkg->stats_lock);
488 	rcu_assign_pointer(blkg->key, key);
489 	blkg->blkcg_id = css_id(&blkcg->css);
490 	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
491 	blkg->plid = plid;
492 	spin_unlock_irqrestore(&blkcg->lock, flags);
493 	/* Need to take css reference ? */
494 	cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
495 	blkg->dev = dev;
496 }
497 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
498 
499 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
500 {
501 	hlist_del_init_rcu(&blkg->blkcg_node);
502 	blkg->blkcg_id = 0;
503 }
504 
505 /*
506  * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
507  * indicating that blk_group was unhashed by the time we got to it.
508  */
509 int blkiocg_del_blkio_group(struct blkio_group *blkg)
510 {
511 	struct blkio_cgroup *blkcg;
512 	unsigned long flags;
513 	struct cgroup_subsys_state *css;
514 	int ret = 1;
515 
516 	rcu_read_lock();
517 	css = css_lookup(&blkio_subsys, blkg->blkcg_id);
518 	if (css) {
519 		blkcg = container_of(css, struct blkio_cgroup, css);
520 		spin_lock_irqsave(&blkcg->lock, flags);
521 		if (!hlist_unhashed(&blkg->blkcg_node)) {
522 			__blkiocg_del_blkio_group(blkg);
523 			ret = 0;
524 		}
525 		spin_unlock_irqrestore(&blkcg->lock, flags);
526 	}
527 
528 	rcu_read_unlock();
529 	return ret;
530 }
531 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
532 
533 /* called under rcu_read_lock(). */
534 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
535 {
536 	struct blkio_group *blkg;
537 	struct hlist_node *n;
538 	void *__key;
539 
540 	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
541 		__key = blkg->key;
542 		if (__key == key)
543 			return blkg;
544 	}
545 
546 	return NULL;
547 }
548 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
549 
550 static void blkio_reset_stats_cpu(struct blkio_group *blkg)
551 {
552 	struct blkio_group_stats_cpu *stats_cpu;
553 	int i, j, k;
554 	/*
555 	 * Note: On 64 bit arch this should not be an issue. This has the
556 	 * possibility of returning some inconsistent value on 32bit arch
557 	 * as 64bit update on 32bit is non atomic. Taking care of this
558 	 * corner case makes code very complicated, like sending IPIs to
559 	 * cpus, taking care of stats of offline cpus etc.
560 	 *
561 	 * reset stats is anyway more of a debug feature and this sounds a
562 	 * corner case. So I am not complicating the code yet until and
563 	 * unless this becomes a real issue.
564 	 */
565 	for_each_possible_cpu(i) {
566 		stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
567 		stats_cpu->sectors = 0;
568 		for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
569 			for (k = 0; k < BLKIO_STAT_TOTAL; k++)
570 				stats_cpu->stat_arr_cpu[j][k] = 0;
571 	}
572 }
573 
574 static int
575 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
576 {
577 	struct blkio_cgroup *blkcg;
578 	struct blkio_group *blkg;
579 	struct blkio_group_stats *stats;
580 	struct hlist_node *n;
581 	uint64_t queued[BLKIO_STAT_TOTAL];
582 	int i;
583 #ifdef CONFIG_DEBUG_BLK_CGROUP
584 	bool idling, waiting, empty;
585 	unsigned long long now = sched_clock();
586 #endif
587 
588 	blkcg = cgroup_to_blkio_cgroup(cgroup);
589 	spin_lock_irq(&blkcg->lock);
590 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
591 		spin_lock(&blkg->stats_lock);
592 		stats = &blkg->stats;
593 #ifdef CONFIG_DEBUG_BLK_CGROUP
594 		idling = blkio_blkg_idling(stats);
595 		waiting = blkio_blkg_waiting(stats);
596 		empty = blkio_blkg_empty(stats);
597 #endif
598 		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
599 			queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
600 		memset(stats, 0, sizeof(struct blkio_group_stats));
601 		for (i = 0; i < BLKIO_STAT_TOTAL; i++)
602 			stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
603 #ifdef CONFIG_DEBUG_BLK_CGROUP
604 		if (idling) {
605 			blkio_mark_blkg_idling(stats);
606 			stats->start_idle_time = now;
607 		}
608 		if (waiting) {
609 			blkio_mark_blkg_waiting(stats);
610 			stats->start_group_wait_time = now;
611 		}
612 		if (empty) {
613 			blkio_mark_blkg_empty(stats);
614 			stats->start_empty_time = now;
615 		}
616 #endif
617 		spin_unlock(&blkg->stats_lock);
618 
619 		/* Reset Per cpu stats which don't take blkg->stats_lock */
620 		blkio_reset_stats_cpu(blkg);
621 	}
622 
623 	spin_unlock_irq(&blkcg->lock);
624 	return 0;
625 }
626 
627 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
628 				int chars_left, bool diskname_only)
629 {
630 	snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
631 	chars_left -= strlen(str);
632 	if (chars_left <= 0) {
633 		printk(KERN_WARNING
634 			"Possibly incorrect cgroup stat display format");
635 		return;
636 	}
637 	if (diskname_only)
638 		return;
639 	switch (type) {
640 	case BLKIO_STAT_READ:
641 		strlcat(str, " Read", chars_left);
642 		break;
643 	case BLKIO_STAT_WRITE:
644 		strlcat(str, " Write", chars_left);
645 		break;
646 	case BLKIO_STAT_SYNC:
647 		strlcat(str, " Sync", chars_left);
648 		break;
649 	case BLKIO_STAT_ASYNC:
650 		strlcat(str, " Async", chars_left);
651 		break;
652 	case BLKIO_STAT_TOTAL:
653 		strlcat(str, " Total", chars_left);
654 		break;
655 	default:
656 		strlcat(str, " Invalid", chars_left);
657 	}
658 }
659 
660 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
661 				struct cgroup_map_cb *cb, dev_t dev)
662 {
663 	blkio_get_key_name(0, dev, str, chars_left, true);
664 	cb->fill(cb, str, val);
665 	return val;
666 }
667 
668 
669 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
670 			enum stat_type_cpu type, enum stat_sub_type sub_type)
671 {
672 	int cpu;
673 	struct blkio_group_stats_cpu *stats_cpu;
674 	u64 val = 0, tval;
675 
676 	for_each_possible_cpu(cpu) {
677 		unsigned int start;
678 		stats_cpu  = per_cpu_ptr(blkg->stats_cpu, cpu);
679 
680 		do {
681 			start = u64_stats_fetch_begin(&stats_cpu->syncp);
682 			if (type == BLKIO_STAT_CPU_SECTORS)
683 				tval = stats_cpu->sectors;
684 			else
685 				tval = stats_cpu->stat_arr_cpu[type][sub_type];
686 		} while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
687 
688 		val += tval;
689 	}
690 
691 	return val;
692 }
693 
694 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
695 		struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
696 {
697 	uint64_t disk_total, val;
698 	char key_str[MAX_KEY_LEN];
699 	enum stat_sub_type sub_type;
700 
701 	if (type == BLKIO_STAT_CPU_SECTORS) {
702 		val = blkio_read_stat_cpu(blkg, type, 0);
703 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
704 	}
705 
706 	for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
707 			sub_type++) {
708 		blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
709 		val = blkio_read_stat_cpu(blkg, type, sub_type);
710 		cb->fill(cb, key_str, val);
711 	}
712 
713 	disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
714 			blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
715 
716 	blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
717 	cb->fill(cb, key_str, disk_total);
718 	return disk_total;
719 }
720 
721 /* This should be called with blkg->stats_lock held */
722 static uint64_t blkio_get_stat(struct blkio_group *blkg,
723 		struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
724 {
725 	uint64_t disk_total;
726 	char key_str[MAX_KEY_LEN];
727 	enum stat_sub_type sub_type;
728 
729 	if (type == BLKIO_STAT_TIME)
730 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
731 					blkg->stats.time, cb, dev);
732 #ifdef CONFIG_DEBUG_BLK_CGROUP
733 	if (type == BLKIO_STAT_UNACCOUNTED_TIME)
734 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
735 					blkg->stats.unaccounted_time, cb, dev);
736 	if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
737 		uint64_t sum = blkg->stats.avg_queue_size_sum;
738 		uint64_t samples = blkg->stats.avg_queue_size_samples;
739 		if (samples)
740 			do_div(sum, samples);
741 		else
742 			sum = 0;
743 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
744 	}
745 	if (type == BLKIO_STAT_GROUP_WAIT_TIME)
746 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
747 					blkg->stats.group_wait_time, cb, dev);
748 	if (type == BLKIO_STAT_IDLE_TIME)
749 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
750 					blkg->stats.idle_time, cb, dev);
751 	if (type == BLKIO_STAT_EMPTY_TIME)
752 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
753 					blkg->stats.empty_time, cb, dev);
754 	if (type == BLKIO_STAT_DEQUEUE)
755 		return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
756 					blkg->stats.dequeue, cb, dev);
757 #endif
758 
759 	for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
760 			sub_type++) {
761 		blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
762 		cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
763 	}
764 	disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
765 			blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
766 	blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
767 	cb->fill(cb, key_str, disk_total);
768 	return disk_total;
769 }
770 
771 static int blkio_check_dev_num(dev_t dev)
772 {
773 	int part = 0;
774 	struct gendisk *disk;
775 
776 	disk = get_gendisk(dev, &part);
777 	if (!disk || part)
778 		return -ENODEV;
779 
780 	return 0;
781 }
782 
783 static int blkio_policy_parse_and_set(char *buf,
784 	struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
785 {
786 	char *s[4], *p, *major_s = NULL, *minor_s = NULL;
787 	int ret;
788 	unsigned long major, minor, temp;
789 	int i = 0;
790 	dev_t dev;
791 	u64 bps, iops;
792 
793 	memset(s, 0, sizeof(s));
794 
795 	while ((p = strsep(&buf, " ")) != NULL) {
796 		if (!*p)
797 			continue;
798 
799 		s[i++] = p;
800 
801 		/* Prevent from inputing too many things */
802 		if (i == 3)
803 			break;
804 	}
805 
806 	if (i != 2)
807 		return -EINVAL;
808 
809 	p = strsep(&s[0], ":");
810 	if (p != NULL)
811 		major_s = p;
812 	else
813 		return -EINVAL;
814 
815 	minor_s = s[0];
816 	if (!minor_s)
817 		return -EINVAL;
818 
819 	ret = strict_strtoul(major_s, 10, &major);
820 	if (ret)
821 		return -EINVAL;
822 
823 	ret = strict_strtoul(minor_s, 10, &minor);
824 	if (ret)
825 		return -EINVAL;
826 
827 	dev = MKDEV(major, minor);
828 
829 	ret = blkio_check_dev_num(dev);
830 	if (ret)
831 		return ret;
832 
833 	newpn->dev = dev;
834 
835 	if (s[1] == NULL)
836 		return -EINVAL;
837 
838 	switch (plid) {
839 	case BLKIO_POLICY_PROP:
840 		ret = strict_strtoul(s[1], 10, &temp);
841 		if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
842 			temp > BLKIO_WEIGHT_MAX)
843 			return -EINVAL;
844 
845 		newpn->plid = plid;
846 		newpn->fileid = fileid;
847 		newpn->val.weight = temp;
848 		break;
849 	case BLKIO_POLICY_THROTL:
850 		switch(fileid) {
851 		case BLKIO_THROTL_read_bps_device:
852 		case BLKIO_THROTL_write_bps_device:
853 			ret = strict_strtoull(s[1], 10, &bps);
854 			if (ret)
855 				return -EINVAL;
856 
857 			newpn->plid = plid;
858 			newpn->fileid = fileid;
859 			newpn->val.bps = bps;
860 			break;
861 		case BLKIO_THROTL_read_iops_device:
862 		case BLKIO_THROTL_write_iops_device:
863 			ret = strict_strtoull(s[1], 10, &iops);
864 			if (ret)
865 				return -EINVAL;
866 
867 			if (iops > THROTL_IOPS_MAX)
868 				return -EINVAL;
869 
870 			newpn->plid = plid;
871 			newpn->fileid = fileid;
872 			newpn->val.iops = (unsigned int)iops;
873 			break;
874 		}
875 		break;
876 	default:
877 		BUG();
878 	}
879 
880 	return 0;
881 }
882 
883 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
884 			      dev_t dev)
885 {
886 	struct blkio_policy_node *pn;
887 
888 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
889 				BLKIO_PROP_weight_device);
890 	if (pn)
891 		return pn->val.weight;
892 	else
893 		return blkcg->weight;
894 }
895 EXPORT_SYMBOL_GPL(blkcg_get_weight);
896 
897 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
898 {
899 	struct blkio_policy_node *pn;
900 
901 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
902 				BLKIO_THROTL_read_bps_device);
903 	if (pn)
904 		return pn->val.bps;
905 	else
906 		return -1;
907 }
908 
909 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
910 {
911 	struct blkio_policy_node *pn;
912 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
913 				BLKIO_THROTL_write_bps_device);
914 	if (pn)
915 		return pn->val.bps;
916 	else
917 		return -1;
918 }
919 
920 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
921 {
922 	struct blkio_policy_node *pn;
923 
924 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
925 				BLKIO_THROTL_read_iops_device);
926 	if (pn)
927 		return pn->val.iops;
928 	else
929 		return -1;
930 }
931 
932 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
933 {
934 	struct blkio_policy_node *pn;
935 	pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
936 				BLKIO_THROTL_write_iops_device);
937 	if (pn)
938 		return pn->val.iops;
939 	else
940 		return -1;
941 }
942 
943 /* Checks whether user asked for deleting a policy rule */
944 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
945 {
946 	switch(pn->plid) {
947 	case BLKIO_POLICY_PROP:
948 		if (pn->val.weight == 0)
949 			return 1;
950 		break;
951 	case BLKIO_POLICY_THROTL:
952 		switch(pn->fileid) {
953 		case BLKIO_THROTL_read_bps_device:
954 		case BLKIO_THROTL_write_bps_device:
955 			if (pn->val.bps == 0)
956 				return 1;
957 			break;
958 		case BLKIO_THROTL_read_iops_device:
959 		case BLKIO_THROTL_write_iops_device:
960 			if (pn->val.iops == 0)
961 				return 1;
962 		}
963 		break;
964 	default:
965 		BUG();
966 	}
967 
968 	return 0;
969 }
970 
971 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
972 					struct blkio_policy_node *newpn)
973 {
974 	switch(oldpn->plid) {
975 	case BLKIO_POLICY_PROP:
976 		oldpn->val.weight = newpn->val.weight;
977 		break;
978 	case BLKIO_POLICY_THROTL:
979 		switch(newpn->fileid) {
980 		case BLKIO_THROTL_read_bps_device:
981 		case BLKIO_THROTL_write_bps_device:
982 			oldpn->val.bps = newpn->val.bps;
983 			break;
984 		case BLKIO_THROTL_read_iops_device:
985 		case BLKIO_THROTL_write_iops_device:
986 			oldpn->val.iops = newpn->val.iops;
987 		}
988 		break;
989 	default:
990 		BUG();
991 	}
992 }
993 
994 /*
995  * Some rules/values in blkg have changed. Propagate those to respective
996  * policies.
997  */
998 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
999 		struct blkio_group *blkg, struct blkio_policy_node *pn)
1000 {
1001 	unsigned int weight, iops;
1002 	u64 bps;
1003 
1004 	switch(pn->plid) {
1005 	case BLKIO_POLICY_PROP:
1006 		weight = pn->val.weight ? pn->val.weight :
1007 				blkcg->weight;
1008 		blkio_update_group_weight(blkg, weight);
1009 		break;
1010 	case BLKIO_POLICY_THROTL:
1011 		switch(pn->fileid) {
1012 		case BLKIO_THROTL_read_bps_device:
1013 		case BLKIO_THROTL_write_bps_device:
1014 			bps = pn->val.bps ? pn->val.bps : (-1);
1015 			blkio_update_group_bps(blkg, bps, pn->fileid);
1016 			break;
1017 		case BLKIO_THROTL_read_iops_device:
1018 		case BLKIO_THROTL_write_iops_device:
1019 			iops = pn->val.iops ? pn->val.iops : (-1);
1020 			blkio_update_group_iops(blkg, iops, pn->fileid);
1021 			break;
1022 		}
1023 		break;
1024 	default:
1025 		BUG();
1026 	}
1027 }
1028 
1029 /*
1030  * A policy node rule has been updated. Propagate this update to all the
1031  * block groups which might be affected by this update.
1032  */
1033 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1034 				struct blkio_policy_node *pn)
1035 {
1036 	struct blkio_group *blkg;
1037 	struct hlist_node *n;
1038 
1039 	spin_lock(&blkio_list_lock);
1040 	spin_lock_irq(&blkcg->lock);
1041 
1042 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1043 		if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1044 			continue;
1045 		blkio_update_blkg_policy(blkcg, blkg, pn);
1046 	}
1047 
1048 	spin_unlock_irq(&blkcg->lock);
1049 	spin_unlock(&blkio_list_lock);
1050 }
1051 
1052 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1053  				       const char *buffer)
1054 {
1055 	int ret = 0;
1056 	char *buf;
1057 	struct blkio_policy_node *newpn, *pn;
1058 	struct blkio_cgroup *blkcg;
1059 	int keep_newpn = 0;
1060 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1061 	int fileid = BLKIOFILE_ATTR(cft->private);
1062 
1063 	buf = kstrdup(buffer, GFP_KERNEL);
1064 	if (!buf)
1065 		return -ENOMEM;
1066 
1067 	newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1068 	if (!newpn) {
1069 		ret = -ENOMEM;
1070 		goto free_buf;
1071 	}
1072 
1073 	ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
1074 	if (ret)
1075 		goto free_newpn;
1076 
1077 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1078 
1079 	spin_lock_irq(&blkcg->lock);
1080 
1081 	pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
1082 	if (!pn) {
1083 		if (!blkio_delete_rule_command(newpn)) {
1084 			blkio_policy_insert_node(blkcg, newpn);
1085 			keep_newpn = 1;
1086 		}
1087 		spin_unlock_irq(&blkcg->lock);
1088 		goto update_io_group;
1089 	}
1090 
1091 	if (blkio_delete_rule_command(newpn)) {
1092 		blkio_policy_delete_node(pn);
1093 		spin_unlock_irq(&blkcg->lock);
1094 		goto update_io_group;
1095 	}
1096 	spin_unlock_irq(&blkcg->lock);
1097 
1098 	blkio_update_policy_rule(pn, newpn);
1099 
1100 update_io_group:
1101 	blkio_update_policy_node_blkg(blkcg, newpn);
1102 
1103 free_newpn:
1104 	if (!keep_newpn)
1105 		kfree(newpn);
1106 free_buf:
1107 	kfree(buf);
1108 	return ret;
1109 }
1110 
1111 static void
1112 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
1113 {
1114 	switch(pn->plid) {
1115 		case BLKIO_POLICY_PROP:
1116 			if (pn->fileid == BLKIO_PROP_weight_device)
1117 				seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1118 					MINOR(pn->dev), pn->val.weight);
1119 			break;
1120 		case BLKIO_POLICY_THROTL:
1121 			switch(pn->fileid) {
1122 			case BLKIO_THROTL_read_bps_device:
1123 			case BLKIO_THROTL_write_bps_device:
1124 				seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1125 					MINOR(pn->dev), pn->val.bps);
1126 				break;
1127 			case BLKIO_THROTL_read_iops_device:
1128 			case BLKIO_THROTL_write_iops_device:
1129 				seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1130 					MINOR(pn->dev), pn->val.iops);
1131 				break;
1132 			}
1133 			break;
1134 		default:
1135 			BUG();
1136 	}
1137 }
1138 
1139 /* cgroup files which read their data from policy nodes end up here */
1140 static void blkio_read_policy_node_files(struct cftype *cft,
1141 			struct blkio_cgroup *blkcg, struct seq_file *m)
1142 {
1143 	struct blkio_policy_node *pn;
1144 
1145 	if (!list_empty(&blkcg->policy_list)) {
1146 		spin_lock_irq(&blkcg->lock);
1147 		list_for_each_entry(pn, &blkcg->policy_list, node) {
1148 			if (!pn_matches_cftype(cft, pn))
1149 				continue;
1150 			blkio_print_policy_node(m, pn);
1151 		}
1152 		spin_unlock_irq(&blkcg->lock);
1153 	}
1154 }
1155 
1156 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1157 				struct seq_file *m)
1158 {
1159 	struct blkio_cgroup *blkcg;
1160 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1161 	int name = BLKIOFILE_ATTR(cft->private);
1162 
1163 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1164 
1165 	switch(plid) {
1166 	case BLKIO_POLICY_PROP:
1167 		switch(name) {
1168 		case BLKIO_PROP_weight_device:
1169 			blkio_read_policy_node_files(cft, blkcg, m);
1170 			return 0;
1171 		default:
1172 			BUG();
1173 		}
1174 		break;
1175 	case BLKIO_POLICY_THROTL:
1176 		switch(name){
1177 		case BLKIO_THROTL_read_bps_device:
1178 		case BLKIO_THROTL_write_bps_device:
1179 		case BLKIO_THROTL_read_iops_device:
1180 		case BLKIO_THROTL_write_iops_device:
1181 			blkio_read_policy_node_files(cft, blkcg, m);
1182 			return 0;
1183 		default:
1184 			BUG();
1185 		}
1186 		break;
1187 	default:
1188 		BUG();
1189 	}
1190 
1191 	return 0;
1192 }
1193 
1194 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1195 		struct cftype *cft, struct cgroup_map_cb *cb,
1196 		enum stat_type type, bool show_total, bool pcpu)
1197 {
1198 	struct blkio_group *blkg;
1199 	struct hlist_node *n;
1200 	uint64_t cgroup_total = 0;
1201 
1202 	rcu_read_lock();
1203 	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1204 		if (blkg->dev) {
1205 			if (!cftype_blkg_same_policy(cft, blkg))
1206 				continue;
1207 			if (pcpu)
1208 				cgroup_total += blkio_get_stat_cpu(blkg, cb,
1209 						blkg->dev, type);
1210 			else {
1211 				spin_lock_irq(&blkg->stats_lock);
1212 				cgroup_total += blkio_get_stat(blkg, cb,
1213 						blkg->dev, type);
1214 				spin_unlock_irq(&blkg->stats_lock);
1215 			}
1216 		}
1217 	}
1218 	if (show_total)
1219 		cb->fill(cb, "Total", cgroup_total);
1220 	rcu_read_unlock();
1221 	return 0;
1222 }
1223 
1224 /* All map kind of cgroup file get serviced by this function */
1225 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1226 				struct cgroup_map_cb *cb)
1227 {
1228 	struct blkio_cgroup *blkcg;
1229 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1230 	int name = BLKIOFILE_ATTR(cft->private);
1231 
1232 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1233 
1234 	switch(plid) {
1235 	case BLKIO_POLICY_PROP:
1236 		switch(name) {
1237 		case BLKIO_PROP_time:
1238 			return blkio_read_blkg_stats(blkcg, cft, cb,
1239 						BLKIO_STAT_TIME, 0, 0);
1240 		case BLKIO_PROP_sectors:
1241 			return blkio_read_blkg_stats(blkcg, cft, cb,
1242 						BLKIO_STAT_CPU_SECTORS, 0, 1);
1243 		case BLKIO_PROP_io_service_bytes:
1244 			return blkio_read_blkg_stats(blkcg, cft, cb,
1245 					BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1246 		case BLKIO_PROP_io_serviced:
1247 			return blkio_read_blkg_stats(blkcg, cft, cb,
1248 						BLKIO_STAT_CPU_SERVICED, 1, 1);
1249 		case BLKIO_PROP_io_service_time:
1250 			return blkio_read_blkg_stats(blkcg, cft, cb,
1251 						BLKIO_STAT_SERVICE_TIME, 1, 0);
1252 		case BLKIO_PROP_io_wait_time:
1253 			return blkio_read_blkg_stats(blkcg, cft, cb,
1254 						BLKIO_STAT_WAIT_TIME, 1, 0);
1255 		case BLKIO_PROP_io_merged:
1256 			return blkio_read_blkg_stats(blkcg, cft, cb,
1257 						BLKIO_STAT_CPU_MERGED, 1, 1);
1258 		case BLKIO_PROP_io_queued:
1259 			return blkio_read_blkg_stats(blkcg, cft, cb,
1260 						BLKIO_STAT_QUEUED, 1, 0);
1261 #ifdef CONFIG_DEBUG_BLK_CGROUP
1262 		case BLKIO_PROP_unaccounted_time:
1263 			return blkio_read_blkg_stats(blkcg, cft, cb,
1264 					BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1265 		case BLKIO_PROP_dequeue:
1266 			return blkio_read_blkg_stats(blkcg, cft, cb,
1267 						BLKIO_STAT_DEQUEUE, 0, 0);
1268 		case BLKIO_PROP_avg_queue_size:
1269 			return blkio_read_blkg_stats(blkcg, cft, cb,
1270 					BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1271 		case BLKIO_PROP_group_wait_time:
1272 			return blkio_read_blkg_stats(blkcg, cft, cb,
1273 					BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1274 		case BLKIO_PROP_idle_time:
1275 			return blkio_read_blkg_stats(blkcg, cft, cb,
1276 						BLKIO_STAT_IDLE_TIME, 0, 0);
1277 		case BLKIO_PROP_empty_time:
1278 			return blkio_read_blkg_stats(blkcg, cft, cb,
1279 						BLKIO_STAT_EMPTY_TIME, 0, 0);
1280 #endif
1281 		default:
1282 			BUG();
1283 		}
1284 		break;
1285 	case BLKIO_POLICY_THROTL:
1286 		switch(name){
1287 		case BLKIO_THROTL_io_service_bytes:
1288 			return blkio_read_blkg_stats(blkcg, cft, cb,
1289 						BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1290 		case BLKIO_THROTL_io_serviced:
1291 			return blkio_read_blkg_stats(blkcg, cft, cb,
1292 						BLKIO_STAT_CPU_SERVICED, 1, 1);
1293 		default:
1294 			BUG();
1295 		}
1296 		break;
1297 	default:
1298 		BUG();
1299 	}
1300 
1301 	return 0;
1302 }
1303 
1304 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1305 {
1306 	struct blkio_group *blkg;
1307 	struct hlist_node *n;
1308 	struct blkio_policy_node *pn;
1309 
1310 	if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1311 		return -EINVAL;
1312 
1313 	spin_lock(&blkio_list_lock);
1314 	spin_lock_irq(&blkcg->lock);
1315 	blkcg->weight = (unsigned int)val;
1316 
1317 	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1318 		pn = blkio_policy_search_node(blkcg, blkg->dev,
1319 				BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1320 		if (pn)
1321 			continue;
1322 
1323 		blkio_update_group_weight(blkg, blkcg->weight);
1324 	}
1325 	spin_unlock_irq(&blkcg->lock);
1326 	spin_unlock(&blkio_list_lock);
1327 	return 0;
1328 }
1329 
1330 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1331 	struct blkio_cgroup *blkcg;
1332 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1333 	int name = BLKIOFILE_ATTR(cft->private);
1334 
1335 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1336 
1337 	switch(plid) {
1338 	case BLKIO_POLICY_PROP:
1339 		switch(name) {
1340 		case BLKIO_PROP_weight:
1341 			return (u64)blkcg->weight;
1342 		}
1343 		break;
1344 	default:
1345 		BUG();
1346 	}
1347 	return 0;
1348 }
1349 
1350 static int
1351 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1352 {
1353 	struct blkio_cgroup *blkcg;
1354 	enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1355 	int name = BLKIOFILE_ATTR(cft->private);
1356 
1357 	blkcg = cgroup_to_blkio_cgroup(cgrp);
1358 
1359 	switch(plid) {
1360 	case BLKIO_POLICY_PROP:
1361 		switch(name) {
1362 		case BLKIO_PROP_weight:
1363 			return blkio_weight_write(blkcg, val);
1364 		}
1365 		break;
1366 	default:
1367 		BUG();
1368 	}
1369 
1370 	return 0;
1371 }
1372 
1373 struct cftype blkio_files[] = {
1374 	{
1375 		.name = "weight_device",
1376 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1377 				BLKIO_PROP_weight_device),
1378 		.read_seq_string = blkiocg_file_read,
1379 		.write_string = blkiocg_file_write,
1380 		.max_write_len = 256,
1381 	},
1382 	{
1383 		.name = "weight",
1384 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1385 				BLKIO_PROP_weight),
1386 		.read_u64 = blkiocg_file_read_u64,
1387 		.write_u64 = blkiocg_file_write_u64,
1388 	},
1389 	{
1390 		.name = "time",
1391 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1392 				BLKIO_PROP_time),
1393 		.read_map = blkiocg_file_read_map,
1394 	},
1395 	{
1396 		.name = "sectors",
1397 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1398 				BLKIO_PROP_sectors),
1399 		.read_map = blkiocg_file_read_map,
1400 	},
1401 	{
1402 		.name = "io_service_bytes",
1403 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1404 				BLKIO_PROP_io_service_bytes),
1405 		.read_map = blkiocg_file_read_map,
1406 	},
1407 	{
1408 		.name = "io_serviced",
1409 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1410 				BLKIO_PROP_io_serviced),
1411 		.read_map = blkiocg_file_read_map,
1412 	},
1413 	{
1414 		.name = "io_service_time",
1415 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1416 				BLKIO_PROP_io_service_time),
1417 		.read_map = blkiocg_file_read_map,
1418 	},
1419 	{
1420 		.name = "io_wait_time",
1421 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1422 				BLKIO_PROP_io_wait_time),
1423 		.read_map = blkiocg_file_read_map,
1424 	},
1425 	{
1426 		.name = "io_merged",
1427 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1428 				BLKIO_PROP_io_merged),
1429 		.read_map = blkiocg_file_read_map,
1430 	},
1431 	{
1432 		.name = "io_queued",
1433 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1434 				BLKIO_PROP_io_queued),
1435 		.read_map = blkiocg_file_read_map,
1436 	},
1437 	{
1438 		.name = "reset_stats",
1439 		.write_u64 = blkiocg_reset_stats,
1440 	},
1441 #ifdef CONFIG_BLK_DEV_THROTTLING
1442 	{
1443 		.name = "throttle.read_bps_device",
1444 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1445 				BLKIO_THROTL_read_bps_device),
1446 		.read_seq_string = blkiocg_file_read,
1447 		.write_string = blkiocg_file_write,
1448 		.max_write_len = 256,
1449 	},
1450 
1451 	{
1452 		.name = "throttle.write_bps_device",
1453 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1454 				BLKIO_THROTL_write_bps_device),
1455 		.read_seq_string = blkiocg_file_read,
1456 		.write_string = blkiocg_file_write,
1457 		.max_write_len = 256,
1458 	},
1459 
1460 	{
1461 		.name = "throttle.read_iops_device",
1462 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1463 				BLKIO_THROTL_read_iops_device),
1464 		.read_seq_string = blkiocg_file_read,
1465 		.write_string = blkiocg_file_write,
1466 		.max_write_len = 256,
1467 	},
1468 
1469 	{
1470 		.name = "throttle.write_iops_device",
1471 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1472 				BLKIO_THROTL_write_iops_device),
1473 		.read_seq_string = blkiocg_file_read,
1474 		.write_string = blkiocg_file_write,
1475 		.max_write_len = 256,
1476 	},
1477 	{
1478 		.name = "throttle.io_service_bytes",
1479 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1480 				BLKIO_THROTL_io_service_bytes),
1481 		.read_map = blkiocg_file_read_map,
1482 	},
1483 	{
1484 		.name = "throttle.io_serviced",
1485 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1486 				BLKIO_THROTL_io_serviced),
1487 		.read_map = blkiocg_file_read_map,
1488 	},
1489 #endif /* CONFIG_BLK_DEV_THROTTLING */
1490 
1491 #ifdef CONFIG_DEBUG_BLK_CGROUP
1492 	{
1493 		.name = "avg_queue_size",
1494 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1495 				BLKIO_PROP_avg_queue_size),
1496 		.read_map = blkiocg_file_read_map,
1497 	},
1498 	{
1499 		.name = "group_wait_time",
1500 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1501 				BLKIO_PROP_group_wait_time),
1502 		.read_map = blkiocg_file_read_map,
1503 	},
1504 	{
1505 		.name = "idle_time",
1506 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1507 				BLKIO_PROP_idle_time),
1508 		.read_map = blkiocg_file_read_map,
1509 	},
1510 	{
1511 		.name = "empty_time",
1512 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1513 				BLKIO_PROP_empty_time),
1514 		.read_map = blkiocg_file_read_map,
1515 	},
1516 	{
1517 		.name = "dequeue",
1518 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1519 				BLKIO_PROP_dequeue),
1520 		.read_map = blkiocg_file_read_map,
1521 	},
1522 	{
1523 		.name = "unaccounted_time",
1524 		.private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1525 				BLKIO_PROP_unaccounted_time),
1526 		.read_map = blkiocg_file_read_map,
1527 	},
1528 #endif
1529 };
1530 
1531 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1532 {
1533 	return cgroup_add_files(cgroup, subsys, blkio_files,
1534 				ARRAY_SIZE(blkio_files));
1535 }
1536 
1537 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1538 {
1539 	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1540 	unsigned long flags;
1541 	struct blkio_group *blkg;
1542 	void *key;
1543 	struct blkio_policy_type *blkiop;
1544 	struct blkio_policy_node *pn, *pntmp;
1545 
1546 	rcu_read_lock();
1547 	do {
1548 		spin_lock_irqsave(&blkcg->lock, flags);
1549 
1550 		if (hlist_empty(&blkcg->blkg_list)) {
1551 			spin_unlock_irqrestore(&blkcg->lock, flags);
1552 			break;
1553 		}
1554 
1555 		blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1556 					blkcg_node);
1557 		key = rcu_dereference(blkg->key);
1558 		__blkiocg_del_blkio_group(blkg);
1559 
1560 		spin_unlock_irqrestore(&blkcg->lock, flags);
1561 
1562 		/*
1563 		 * This blkio_group is being unlinked as associated cgroup is
1564 		 * going away. Let all the IO controlling policies know about
1565 		 * this event.
1566 		 */
1567 		spin_lock(&blkio_list_lock);
1568 		list_for_each_entry(blkiop, &blkio_list, list) {
1569 			if (blkiop->plid != blkg->plid)
1570 				continue;
1571 			blkiop->ops.blkio_unlink_group_fn(key, blkg);
1572 		}
1573 		spin_unlock(&blkio_list_lock);
1574 	} while (1);
1575 
1576 	list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1577 		blkio_policy_delete_node(pn);
1578 		kfree(pn);
1579 	}
1580 
1581 	free_css_id(&blkio_subsys, &blkcg->css);
1582 	rcu_read_unlock();
1583 	if (blkcg != &blkio_root_cgroup)
1584 		kfree(blkcg);
1585 }
1586 
1587 static struct cgroup_subsys_state *
1588 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1589 {
1590 	struct blkio_cgroup *blkcg;
1591 	struct cgroup *parent = cgroup->parent;
1592 
1593 	if (!parent) {
1594 		blkcg = &blkio_root_cgroup;
1595 		goto done;
1596 	}
1597 
1598 	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1599 	if (!blkcg)
1600 		return ERR_PTR(-ENOMEM);
1601 
1602 	blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1603 done:
1604 	spin_lock_init(&blkcg->lock);
1605 	INIT_HLIST_HEAD(&blkcg->blkg_list);
1606 
1607 	INIT_LIST_HEAD(&blkcg->policy_list);
1608 	return &blkcg->css;
1609 }
1610 
1611 /*
1612  * We cannot support shared io contexts, as we have no mean to support
1613  * two tasks with the same ioc in two different groups without major rework
1614  * of the main cic data structures.  For now we allow a task to change
1615  * its cgroup only if it's the only owner of its ioc.
1616  */
1617 static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1618 {
1619 	struct io_context *ioc;
1620 	int ret = 0;
1621 
1622 	/* task_lock() is needed to avoid races with exit_io_context() */
1623 	task_lock(tsk);
1624 	ioc = tsk->io_context;
1625 	if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1626 		ret = -EINVAL;
1627 	task_unlock(tsk);
1628 
1629 	return ret;
1630 }
1631 
1632 static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1633 {
1634 	struct io_context *ioc;
1635 
1636 	task_lock(tsk);
1637 	ioc = tsk->io_context;
1638 	if (ioc)
1639 		ioc->cgroup_changed = 1;
1640 	task_unlock(tsk);
1641 }
1642 
1643 void blkio_policy_register(struct blkio_policy_type *blkiop)
1644 {
1645 	spin_lock(&blkio_list_lock);
1646 	list_add_tail(&blkiop->list, &blkio_list);
1647 	spin_unlock(&blkio_list_lock);
1648 }
1649 EXPORT_SYMBOL_GPL(blkio_policy_register);
1650 
1651 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1652 {
1653 	spin_lock(&blkio_list_lock);
1654 	list_del_init(&blkiop->list);
1655 	spin_unlock(&blkio_list_lock);
1656 }
1657 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1658 
1659 static int __init init_cgroup_blkio(void)
1660 {
1661 	return cgroup_load_subsys(&blkio_subsys);
1662 }
1663 
1664 static void __exit exit_cgroup_blkio(void)
1665 {
1666 	cgroup_unload_subsys(&blkio_subsys);
1667 }
1668 
1669 module_init(init_cgroup_blkio);
1670 module_exit(exit_cgroup_blkio);
1671 MODULE_LICENSE("GPL");
1672