xref: /linux/block/blk-throttle.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Interface for controlling IO bandwidth on a request queue
3  *
4  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
13 #include "blk.h"
14 
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
17 
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
20 
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice = HZ/10;	/* 100 ms */
23 
24 /* A workqueue to queue throttle related work */
25 static struct workqueue_struct *kthrotld_workqueue;
26 static void throtl_schedule_delayed_work(struct throtl_data *td,
27 				unsigned long delay);
28 
29 struct throtl_rb_root {
30 	struct rb_root rb;
31 	struct rb_node *left;
32 	unsigned int count;
33 	unsigned long min_disptime;
34 };
35 
36 #define THROTL_RB_ROOT	(struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 			.count = 0, .min_disptime = 0}
38 
39 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
40 
41 struct throtl_grp {
42 	/* List of throtl groups on the request queue*/
43 	struct hlist_node tg_node;
44 
45 	/* active throtl group service_tree member */
46 	struct rb_node rb_node;
47 
48 	/*
49 	 * Dispatch time in jiffies. This is the estimated time when group
50 	 * will unthrottle and is ready to dispatch more bio. It is used as
51 	 * key to sort active groups in service tree.
52 	 */
53 	unsigned long disptime;
54 
55 	struct blkio_group blkg;
56 	atomic_t ref;
57 	unsigned int flags;
58 
59 	/* Two lists for READ and WRITE */
60 	struct bio_list bio_lists[2];
61 
62 	/* Number of queued bios on READ and WRITE lists */
63 	unsigned int nr_queued[2];
64 
65 	/* bytes per second rate limits */
66 	uint64_t bps[2];
67 
68 	/* IOPS limits */
69 	unsigned int iops[2];
70 
71 	/* Number of bytes disptached in current slice */
72 	uint64_t bytes_disp[2];
73 	/* Number of bio's dispatched in current slice */
74 	unsigned int io_disp[2];
75 
76 	/* When did we start a new slice */
77 	unsigned long slice_start[2];
78 	unsigned long slice_end[2];
79 
80 	/* Some throttle limits got updated for the group */
81 	int limits_changed;
82 
83 	struct rcu_head rcu_head;
84 };
85 
86 struct throtl_data
87 {
88 	/* List of throtl groups */
89 	struct hlist_head tg_list;
90 
91 	/* service tree for active throtl groups */
92 	struct throtl_rb_root tg_service_tree;
93 
94 	struct throtl_grp *root_tg;
95 	struct request_queue *queue;
96 
97 	/* Total Number of queued bios on READ and WRITE lists */
98 	unsigned int nr_queued[2];
99 
100 	/*
101 	 * number of total undestroyed groups
102 	 */
103 	unsigned int nr_undestroyed_grps;
104 
105 	/* Work for dispatching throttled bios */
106 	struct delayed_work throtl_work;
107 
108 	int limits_changed;
109 };
110 
111 enum tg_state_flags {
112 	THROTL_TG_FLAG_on_rr = 0,	/* on round-robin busy list */
113 };
114 
115 #define THROTL_TG_FNS(name)						\
116 static inline void throtl_mark_tg_##name(struct throtl_grp *tg)		\
117 {									\
118 	(tg)->flags |= (1 << THROTL_TG_FLAG_##name);			\
119 }									\
120 static inline void throtl_clear_tg_##name(struct throtl_grp *tg)	\
121 {									\
122 	(tg)->flags &= ~(1 << THROTL_TG_FLAG_##name);			\
123 }									\
124 static inline int throtl_tg_##name(const struct throtl_grp *tg)		\
125 {									\
126 	return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0;	\
127 }
128 
129 THROTL_TG_FNS(on_rr);
130 
131 #define throtl_log_tg(td, tg, fmt, args...)				\
132 	blk_add_trace_msg((td)->queue, "throtl %s " fmt,		\
133 				blkg_path(&(tg)->blkg), ##args);      	\
134 
135 #define throtl_log(td, fmt, args...)	\
136 	blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
137 
138 static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
139 {
140 	if (blkg)
141 		return container_of(blkg, struct throtl_grp, blkg);
142 
143 	return NULL;
144 }
145 
146 static inline unsigned int total_nr_queued(struct throtl_data *td)
147 {
148 	return td->nr_queued[0] + td->nr_queued[1];
149 }
150 
151 static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
152 {
153 	atomic_inc(&tg->ref);
154 	return tg;
155 }
156 
157 static void throtl_free_tg(struct rcu_head *head)
158 {
159 	struct throtl_grp *tg;
160 
161 	tg = container_of(head, struct throtl_grp, rcu_head);
162 	free_percpu(tg->blkg.stats_cpu);
163 	kfree(tg);
164 }
165 
166 static void throtl_put_tg(struct throtl_grp *tg)
167 {
168 	BUG_ON(atomic_read(&tg->ref) <= 0);
169 	if (!atomic_dec_and_test(&tg->ref))
170 		return;
171 
172 	/*
173 	 * A group is freed in rcu manner. But having an rcu lock does not
174 	 * mean that one can access all the fields of blkg and assume these
175 	 * are valid. For example, don't try to follow throtl_data and
176 	 * request queue links.
177 	 *
178 	 * Having a reference to blkg under an rcu allows acess to only
179 	 * values local to groups like group stats and group rate limits
180 	 */
181 	call_rcu(&tg->rcu_head, throtl_free_tg);
182 }
183 
184 static void throtl_init_group(struct throtl_grp *tg)
185 {
186 	INIT_HLIST_NODE(&tg->tg_node);
187 	RB_CLEAR_NODE(&tg->rb_node);
188 	bio_list_init(&tg->bio_lists[0]);
189 	bio_list_init(&tg->bio_lists[1]);
190 	tg->limits_changed = false;
191 
192 	/* Practically unlimited BW */
193 	tg->bps[0] = tg->bps[1] = -1;
194 	tg->iops[0] = tg->iops[1] = -1;
195 
196 	/*
197 	 * Take the initial reference that will be released on destroy
198 	 * This can be thought of a joint reference by cgroup and
199 	 * request queue which will be dropped by either request queue
200 	 * exit or cgroup deletion path depending on who is exiting first.
201 	 */
202 	atomic_set(&tg->ref, 1);
203 }
204 
205 /* Should be called with rcu read lock held (needed for blkcg) */
206 static void
207 throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
208 {
209 	hlist_add_head(&tg->tg_node, &td->tg_list);
210 	td->nr_undestroyed_grps++;
211 }
212 
213 static void
214 __throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
215 {
216 	struct backing_dev_info *bdi = &td->queue->backing_dev_info;
217 	unsigned int major, minor;
218 
219 	if (!tg || tg->blkg.dev)
220 		return;
221 
222 	/*
223 	 * Fill in device details for a group which might not have been
224 	 * filled at group creation time as queue was being instantiated
225 	 * and driver had not attached a device yet
226 	 */
227 	if (bdi->dev && dev_name(bdi->dev)) {
228 		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
229 		tg->blkg.dev = MKDEV(major, minor);
230 	}
231 }
232 
233 /*
234  * Should be called with without queue lock held. Here queue lock will be
235  * taken rarely. It will be taken only once during life time of a group
236  * if need be
237  */
238 static void
239 throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
240 {
241 	if (!tg || tg->blkg.dev)
242 		return;
243 
244 	spin_lock_irq(td->queue->queue_lock);
245 	__throtl_tg_fill_dev_details(td, tg);
246 	spin_unlock_irq(td->queue->queue_lock);
247 }
248 
249 static void throtl_init_add_tg_lists(struct throtl_data *td,
250 			struct throtl_grp *tg, struct blkio_cgroup *blkcg)
251 {
252 	__throtl_tg_fill_dev_details(td, tg);
253 
254 	/* Add group onto cgroup list */
255 	blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
256 				tg->blkg.dev, BLKIO_POLICY_THROTL);
257 
258 	tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
259 	tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
260 	tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
261 	tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
262 
263 	throtl_add_group_to_td_list(td, tg);
264 }
265 
266 /* Should be called without queue lock and outside of rcu period */
267 static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
268 {
269 	struct throtl_grp *tg = NULL;
270 	int ret;
271 
272 	tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
273 	if (!tg)
274 		return NULL;
275 
276 	ret = blkio_alloc_blkg_stats(&tg->blkg);
277 
278 	if (ret) {
279 		kfree(tg);
280 		return NULL;
281 	}
282 
283 	throtl_init_group(tg);
284 	return tg;
285 }
286 
287 static struct
288 throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
289 {
290 	struct throtl_grp *tg = NULL;
291 	void *key = td;
292 
293 	/*
294 	 * This is the common case when there are no blkio cgroups.
295  	 * Avoid lookup in this case
296  	 */
297 	if (blkcg == &blkio_root_cgroup)
298 		tg = td->root_tg;
299 	else
300 		tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
301 
302 	__throtl_tg_fill_dev_details(td, tg);
303 	return tg;
304 }
305 
306 static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
307 {
308 	struct throtl_grp *tg = NULL, *__tg = NULL;
309 	struct blkio_cgroup *blkcg;
310 	struct request_queue *q = td->queue;
311 
312 	/* no throttling for dead queue */
313 	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
314 		return NULL;
315 
316 	rcu_read_lock();
317 	blkcg = task_blkio_cgroup(current);
318 	tg = throtl_find_tg(td, blkcg);
319 	if (tg) {
320 		rcu_read_unlock();
321 		return tg;
322 	}
323 
324 	/*
325 	 * Need to allocate a group. Allocation of group also needs allocation
326 	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
327 	 * we need to drop rcu lock and queue_lock before we call alloc.
328 	 */
329 	rcu_read_unlock();
330 	spin_unlock_irq(q->queue_lock);
331 
332 	tg = throtl_alloc_tg(td);
333 
334 	/* Group allocated and queue is still alive. take the lock */
335 	spin_lock_irq(q->queue_lock);
336 
337 	/* Make sure @q is still alive */
338 	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
339 		kfree(tg);
340 		return NULL;
341 	}
342 
343 	/*
344 	 * Initialize the new group. After sleeping, read the blkcg again.
345 	 */
346 	rcu_read_lock();
347 	blkcg = task_blkio_cgroup(current);
348 
349 	/*
350 	 * If some other thread already allocated the group while we were
351 	 * not holding queue lock, free up the group
352 	 */
353 	__tg = throtl_find_tg(td, blkcg);
354 
355 	if (__tg) {
356 		kfree(tg);
357 		rcu_read_unlock();
358 		return __tg;
359 	}
360 
361 	/* Group allocation failed. Account the IO to root group */
362 	if (!tg) {
363 		tg = td->root_tg;
364 		return tg;
365 	}
366 
367 	throtl_init_add_tg_lists(td, tg, blkcg);
368 	rcu_read_unlock();
369 	return tg;
370 }
371 
372 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
373 {
374 	/* Service tree is empty */
375 	if (!root->count)
376 		return NULL;
377 
378 	if (!root->left)
379 		root->left = rb_first(&root->rb);
380 
381 	if (root->left)
382 		return rb_entry_tg(root->left);
383 
384 	return NULL;
385 }
386 
387 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
388 {
389 	rb_erase(n, root);
390 	RB_CLEAR_NODE(n);
391 }
392 
393 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
394 {
395 	if (root->left == n)
396 		root->left = NULL;
397 	rb_erase_init(n, &root->rb);
398 	--root->count;
399 }
400 
401 static void update_min_dispatch_time(struct throtl_rb_root *st)
402 {
403 	struct throtl_grp *tg;
404 
405 	tg = throtl_rb_first(st);
406 	if (!tg)
407 		return;
408 
409 	st->min_disptime = tg->disptime;
410 }
411 
412 static void
413 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
414 {
415 	struct rb_node **node = &st->rb.rb_node;
416 	struct rb_node *parent = NULL;
417 	struct throtl_grp *__tg;
418 	unsigned long key = tg->disptime;
419 	int left = 1;
420 
421 	while (*node != NULL) {
422 		parent = *node;
423 		__tg = rb_entry_tg(parent);
424 
425 		if (time_before(key, __tg->disptime))
426 			node = &parent->rb_left;
427 		else {
428 			node = &parent->rb_right;
429 			left = 0;
430 		}
431 	}
432 
433 	if (left)
434 		st->left = &tg->rb_node;
435 
436 	rb_link_node(&tg->rb_node, parent, node);
437 	rb_insert_color(&tg->rb_node, &st->rb);
438 }
439 
440 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
441 {
442 	struct throtl_rb_root *st = &td->tg_service_tree;
443 
444 	tg_service_tree_add(st, tg);
445 	throtl_mark_tg_on_rr(tg);
446 	st->count++;
447 }
448 
449 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
450 {
451 	if (!throtl_tg_on_rr(tg))
452 		__throtl_enqueue_tg(td, tg);
453 }
454 
455 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
456 {
457 	throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
458 	throtl_clear_tg_on_rr(tg);
459 }
460 
461 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
462 {
463 	if (throtl_tg_on_rr(tg))
464 		__throtl_dequeue_tg(td, tg);
465 }
466 
467 static void throtl_schedule_next_dispatch(struct throtl_data *td)
468 {
469 	struct throtl_rb_root *st = &td->tg_service_tree;
470 
471 	/*
472 	 * If there are more bios pending, schedule more work.
473 	 */
474 	if (!total_nr_queued(td))
475 		return;
476 
477 	BUG_ON(!st->count);
478 
479 	update_min_dispatch_time(st);
480 
481 	if (time_before_eq(st->min_disptime, jiffies))
482 		throtl_schedule_delayed_work(td, 0);
483 	else
484 		throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
485 }
486 
487 static inline void
488 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
489 {
490 	tg->bytes_disp[rw] = 0;
491 	tg->io_disp[rw] = 0;
492 	tg->slice_start[rw] = jiffies;
493 	tg->slice_end[rw] = jiffies + throtl_slice;
494 	throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
495 			rw == READ ? 'R' : 'W', tg->slice_start[rw],
496 			tg->slice_end[rw], jiffies);
497 }
498 
499 static inline void throtl_set_slice_end(struct throtl_data *td,
500 		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
501 {
502 	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
503 }
504 
505 static inline void throtl_extend_slice(struct throtl_data *td,
506 		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
507 {
508 	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
509 	throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
510 			rw == READ ? 'R' : 'W', tg->slice_start[rw],
511 			tg->slice_end[rw], jiffies);
512 }
513 
514 /* Determine if previously allocated or extended slice is complete or not */
515 static bool
516 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
517 {
518 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
519 		return 0;
520 
521 	return 1;
522 }
523 
524 /* Trim the used slices and adjust slice start accordingly */
525 static inline void
526 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
527 {
528 	unsigned long nr_slices, time_elapsed, io_trim;
529 	u64 bytes_trim, tmp;
530 
531 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
532 
533 	/*
534 	 * If bps are unlimited (-1), then time slice don't get
535 	 * renewed. Don't try to trim the slice if slice is used. A new
536 	 * slice will start when appropriate.
537 	 */
538 	if (throtl_slice_used(td, tg, rw))
539 		return;
540 
541 	/*
542 	 * A bio has been dispatched. Also adjust slice_end. It might happen
543 	 * that initially cgroup limit was very low resulting in high
544 	 * slice_end, but later limit was bumped up and bio was dispached
545 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
546 	 * is bad because it does not allow new slice to start.
547 	 */
548 
549 	throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
550 
551 	time_elapsed = jiffies - tg->slice_start[rw];
552 
553 	nr_slices = time_elapsed / throtl_slice;
554 
555 	if (!nr_slices)
556 		return;
557 	tmp = tg->bps[rw] * throtl_slice * nr_slices;
558 	do_div(tmp, HZ);
559 	bytes_trim = tmp;
560 
561 	io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
562 
563 	if (!bytes_trim && !io_trim)
564 		return;
565 
566 	if (tg->bytes_disp[rw] >= bytes_trim)
567 		tg->bytes_disp[rw] -= bytes_trim;
568 	else
569 		tg->bytes_disp[rw] = 0;
570 
571 	if (tg->io_disp[rw] >= io_trim)
572 		tg->io_disp[rw] -= io_trim;
573 	else
574 		tg->io_disp[rw] = 0;
575 
576 	tg->slice_start[rw] += nr_slices * throtl_slice;
577 
578 	throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
579 			" start=%lu end=%lu jiffies=%lu",
580 			rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
581 			tg->slice_start[rw], tg->slice_end[rw], jiffies);
582 }
583 
584 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
585 		struct bio *bio, unsigned long *wait)
586 {
587 	bool rw = bio_data_dir(bio);
588 	unsigned int io_allowed;
589 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
590 	u64 tmp;
591 
592 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
593 
594 	/* Slice has just started. Consider one slice interval */
595 	if (!jiffy_elapsed)
596 		jiffy_elapsed_rnd = throtl_slice;
597 
598 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
599 
600 	/*
601 	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
602 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
603 	 * will allow dispatch after 1 second and after that slice should
604 	 * have been trimmed.
605 	 */
606 
607 	tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
608 	do_div(tmp, HZ);
609 
610 	if (tmp > UINT_MAX)
611 		io_allowed = UINT_MAX;
612 	else
613 		io_allowed = tmp;
614 
615 	if (tg->io_disp[rw] + 1 <= io_allowed) {
616 		if (wait)
617 			*wait = 0;
618 		return 1;
619 	}
620 
621 	/* Calc approx time to dispatch */
622 	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
623 
624 	if (jiffy_wait > jiffy_elapsed)
625 		jiffy_wait = jiffy_wait - jiffy_elapsed;
626 	else
627 		jiffy_wait = 1;
628 
629 	if (wait)
630 		*wait = jiffy_wait;
631 	return 0;
632 }
633 
634 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
635 		struct bio *bio, unsigned long *wait)
636 {
637 	bool rw = bio_data_dir(bio);
638 	u64 bytes_allowed, extra_bytes, tmp;
639 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
640 
641 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
642 
643 	/* Slice has just started. Consider one slice interval */
644 	if (!jiffy_elapsed)
645 		jiffy_elapsed_rnd = throtl_slice;
646 
647 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
648 
649 	tmp = tg->bps[rw] * jiffy_elapsed_rnd;
650 	do_div(tmp, HZ);
651 	bytes_allowed = tmp;
652 
653 	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
654 		if (wait)
655 			*wait = 0;
656 		return 1;
657 	}
658 
659 	/* Calc approx time to dispatch */
660 	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
661 	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
662 
663 	if (!jiffy_wait)
664 		jiffy_wait = 1;
665 
666 	/*
667 	 * This wait time is without taking into consideration the rounding
668 	 * up we did. Add that time also.
669 	 */
670 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
671 	if (wait)
672 		*wait = jiffy_wait;
673 	return 0;
674 }
675 
676 static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
677 	if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
678 		return 1;
679 	return 0;
680 }
681 
682 /*
683  * Returns whether one can dispatch a bio or not. Also returns approx number
684  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
685  */
686 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
687 				struct bio *bio, unsigned long *wait)
688 {
689 	bool rw = bio_data_dir(bio);
690 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
691 
692 	/*
693  	 * Currently whole state machine of group depends on first bio
694 	 * queued in the group bio list. So one should not be calling
695 	 * this function with a different bio if there are other bios
696 	 * queued.
697 	 */
698 	BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
699 
700 	/* If tg->bps = -1, then BW is unlimited */
701 	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
702 		if (wait)
703 			*wait = 0;
704 		return 1;
705 	}
706 
707 	/*
708 	 * If previous slice expired, start a new one otherwise renew/extend
709 	 * existing slice to make sure it is at least throtl_slice interval
710 	 * long since now.
711 	 */
712 	if (throtl_slice_used(td, tg, rw))
713 		throtl_start_new_slice(td, tg, rw);
714 	else {
715 		if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
716 			throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
717 	}
718 
719 	if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
720 	    && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
721 		if (wait)
722 			*wait = 0;
723 		return 1;
724 	}
725 
726 	max_wait = max(bps_wait, iops_wait);
727 
728 	if (wait)
729 		*wait = max_wait;
730 
731 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
732 		throtl_extend_slice(td, tg, rw, jiffies + max_wait);
733 
734 	return 0;
735 }
736 
737 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
738 {
739 	bool rw = bio_data_dir(bio);
740 	bool sync = rw_is_sync(bio->bi_rw);
741 
742 	/* Charge the bio to the group */
743 	tg->bytes_disp[rw] += bio->bi_size;
744 	tg->io_disp[rw]++;
745 
746 	blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
747 }
748 
749 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
750 			struct bio *bio)
751 {
752 	bool rw = bio_data_dir(bio);
753 
754 	bio_list_add(&tg->bio_lists[rw], bio);
755 	/* Take a bio reference on tg */
756 	throtl_ref_get_tg(tg);
757 	tg->nr_queued[rw]++;
758 	td->nr_queued[rw]++;
759 	throtl_enqueue_tg(td, tg);
760 }
761 
762 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
763 {
764 	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
765 	struct bio *bio;
766 
767 	if ((bio = bio_list_peek(&tg->bio_lists[READ])))
768 		tg_may_dispatch(td, tg, bio, &read_wait);
769 
770 	if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
771 		tg_may_dispatch(td, tg, bio, &write_wait);
772 
773 	min_wait = min(read_wait, write_wait);
774 	disptime = jiffies + min_wait;
775 
776 	/* Update dispatch time */
777 	throtl_dequeue_tg(td, tg);
778 	tg->disptime = disptime;
779 	throtl_enqueue_tg(td, tg);
780 }
781 
782 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
783 				bool rw, struct bio_list *bl)
784 {
785 	struct bio *bio;
786 
787 	bio = bio_list_pop(&tg->bio_lists[rw]);
788 	tg->nr_queued[rw]--;
789 	/* Drop bio reference on tg */
790 	throtl_put_tg(tg);
791 
792 	BUG_ON(td->nr_queued[rw] <= 0);
793 	td->nr_queued[rw]--;
794 
795 	throtl_charge_bio(tg, bio);
796 	bio_list_add(bl, bio);
797 	bio->bi_rw |= REQ_THROTTLED;
798 
799 	throtl_trim_slice(td, tg, rw);
800 }
801 
802 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
803 				struct bio_list *bl)
804 {
805 	unsigned int nr_reads = 0, nr_writes = 0;
806 	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
807 	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
808 	struct bio *bio;
809 
810 	/* Try to dispatch 75% READS and 25% WRITES */
811 
812 	while ((bio = bio_list_peek(&tg->bio_lists[READ]))
813 		&& tg_may_dispatch(td, tg, bio, NULL)) {
814 
815 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
816 		nr_reads++;
817 
818 		if (nr_reads >= max_nr_reads)
819 			break;
820 	}
821 
822 	while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
823 		&& tg_may_dispatch(td, tg, bio, NULL)) {
824 
825 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
826 		nr_writes++;
827 
828 		if (nr_writes >= max_nr_writes)
829 			break;
830 	}
831 
832 	return nr_reads + nr_writes;
833 }
834 
835 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
836 {
837 	unsigned int nr_disp = 0;
838 	struct throtl_grp *tg;
839 	struct throtl_rb_root *st = &td->tg_service_tree;
840 
841 	while (1) {
842 		tg = throtl_rb_first(st);
843 
844 		if (!tg)
845 			break;
846 
847 		if (time_before(jiffies, tg->disptime))
848 			break;
849 
850 		throtl_dequeue_tg(td, tg);
851 
852 		nr_disp += throtl_dispatch_tg(td, tg, bl);
853 
854 		if (tg->nr_queued[0] || tg->nr_queued[1]) {
855 			tg_update_disptime(td, tg);
856 			throtl_enqueue_tg(td, tg);
857 		}
858 
859 		if (nr_disp >= throtl_quantum)
860 			break;
861 	}
862 
863 	return nr_disp;
864 }
865 
866 static void throtl_process_limit_change(struct throtl_data *td)
867 {
868 	struct throtl_grp *tg;
869 	struct hlist_node *pos, *n;
870 
871 	if (!td->limits_changed)
872 		return;
873 
874 	xchg(&td->limits_changed, false);
875 
876 	throtl_log(td, "limits changed");
877 
878 	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
879 		if (!tg->limits_changed)
880 			continue;
881 
882 		if (!xchg(&tg->limits_changed, false))
883 			continue;
884 
885 		throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
886 			" riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
887 			tg->iops[READ], tg->iops[WRITE]);
888 
889 		/*
890 		 * Restart the slices for both READ and WRITES. It
891 		 * might happen that a group's limit are dropped
892 		 * suddenly and we don't want to account recently
893 		 * dispatched IO with new low rate
894 		 */
895 		throtl_start_new_slice(td, tg, 0);
896 		throtl_start_new_slice(td, tg, 1);
897 
898 		if (throtl_tg_on_rr(tg))
899 			tg_update_disptime(td, tg);
900 	}
901 }
902 
903 /* Dispatch throttled bios. Should be called without queue lock held. */
904 static int throtl_dispatch(struct request_queue *q)
905 {
906 	struct throtl_data *td = q->td;
907 	unsigned int nr_disp = 0;
908 	struct bio_list bio_list_on_stack;
909 	struct bio *bio;
910 	struct blk_plug plug;
911 
912 	spin_lock_irq(q->queue_lock);
913 
914 	throtl_process_limit_change(td);
915 
916 	if (!total_nr_queued(td))
917 		goto out;
918 
919 	bio_list_init(&bio_list_on_stack);
920 
921 	throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
922 			total_nr_queued(td), td->nr_queued[READ],
923 			td->nr_queued[WRITE]);
924 
925 	nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
926 
927 	if (nr_disp)
928 		throtl_log(td, "bios disp=%u", nr_disp);
929 
930 	throtl_schedule_next_dispatch(td);
931 out:
932 	spin_unlock_irq(q->queue_lock);
933 
934 	/*
935 	 * If we dispatched some requests, unplug the queue to make sure
936 	 * immediate dispatch
937 	 */
938 	if (nr_disp) {
939 		blk_start_plug(&plug);
940 		while((bio = bio_list_pop(&bio_list_on_stack)))
941 			generic_make_request(bio);
942 		blk_finish_plug(&plug);
943 	}
944 	return nr_disp;
945 }
946 
947 void blk_throtl_work(struct work_struct *work)
948 {
949 	struct throtl_data *td = container_of(work, struct throtl_data,
950 					throtl_work.work);
951 	struct request_queue *q = td->queue;
952 
953 	throtl_dispatch(q);
954 }
955 
956 /* Call with queue lock held */
957 static void
958 throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
959 {
960 
961 	struct delayed_work *dwork = &td->throtl_work;
962 
963 	/* schedule work if limits changed even if no bio is queued */
964 	if (total_nr_queued(td) || td->limits_changed) {
965 		/*
966 		 * We might have a work scheduled to be executed in future.
967 		 * Cancel that and schedule a new one.
968 		 */
969 		__cancel_delayed_work(dwork);
970 		queue_delayed_work(kthrotld_workqueue, dwork, delay);
971 		throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
972 				delay, jiffies);
973 	}
974 }
975 
976 static void
977 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
978 {
979 	/* Something wrong if we are trying to remove same group twice */
980 	BUG_ON(hlist_unhashed(&tg->tg_node));
981 
982 	hlist_del_init(&tg->tg_node);
983 
984 	/*
985 	 * Put the reference taken at the time of creation so that when all
986 	 * queues are gone, group can be destroyed.
987 	 */
988 	throtl_put_tg(tg);
989 	td->nr_undestroyed_grps--;
990 }
991 
992 static void throtl_release_tgs(struct throtl_data *td)
993 {
994 	struct hlist_node *pos, *n;
995 	struct throtl_grp *tg;
996 
997 	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
998 		/*
999 		 * If cgroup removal path got to blk_group first and removed
1000 		 * it from cgroup list, then it will take care of destroying
1001 		 * cfqg also.
1002 		 */
1003 		if (!blkiocg_del_blkio_group(&tg->blkg))
1004 			throtl_destroy_tg(td, tg);
1005 	}
1006 }
1007 
1008 /*
1009  * Blk cgroup controller notification saying that blkio_group object is being
1010  * delinked as associated cgroup object is going away. That also means that
1011  * no new IO will come in this group. So get rid of this group as soon as
1012  * any pending IO in the group is finished.
1013  *
1014  * This function is called under rcu_read_lock(). key is the rcu protected
1015  * pointer. That means "key" is a valid throtl_data pointer as long as we are
1016  * rcu read lock.
1017  *
1018  * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1019  * it should not be NULL as even if queue was going away, cgroup deltion
1020  * path got to it first.
1021  */
1022 void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
1023 {
1024 	unsigned long flags;
1025 	struct throtl_data *td = key;
1026 
1027 	spin_lock_irqsave(td->queue->queue_lock, flags);
1028 	throtl_destroy_tg(td, tg_of_blkg(blkg));
1029 	spin_unlock_irqrestore(td->queue->queue_lock, flags);
1030 }
1031 
1032 static void throtl_update_blkio_group_common(struct throtl_data *td,
1033 				struct throtl_grp *tg)
1034 {
1035 	xchg(&tg->limits_changed, true);
1036 	xchg(&td->limits_changed, true);
1037 	/* Schedule a work now to process the limit change */
1038 	throtl_schedule_delayed_work(td, 0);
1039 }
1040 
1041 /*
1042  * For all update functions, key should be a valid pointer because these
1043  * update functions are called under blkcg_lock, that means, blkg is
1044  * valid and in turn key is valid. queue exit path can not race because
1045  * of blkcg_lock
1046  *
1047  * Can not take queue lock in update functions as queue lock under blkcg_lock
1048  * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1049  */
1050 static void throtl_update_blkio_group_read_bps(void *key,
1051 				struct blkio_group *blkg, u64 read_bps)
1052 {
1053 	struct throtl_data *td = key;
1054 	struct throtl_grp *tg = tg_of_blkg(blkg);
1055 
1056 	tg->bps[READ] = read_bps;
1057 	throtl_update_blkio_group_common(td, tg);
1058 }
1059 
1060 static void throtl_update_blkio_group_write_bps(void *key,
1061 				struct blkio_group *blkg, u64 write_bps)
1062 {
1063 	struct throtl_data *td = key;
1064 	struct throtl_grp *tg = tg_of_blkg(blkg);
1065 
1066 	tg->bps[WRITE] = write_bps;
1067 	throtl_update_blkio_group_common(td, tg);
1068 }
1069 
1070 static void throtl_update_blkio_group_read_iops(void *key,
1071 			struct blkio_group *blkg, unsigned int read_iops)
1072 {
1073 	struct throtl_data *td = key;
1074 	struct throtl_grp *tg = tg_of_blkg(blkg);
1075 
1076 	tg->iops[READ] = read_iops;
1077 	throtl_update_blkio_group_common(td, tg);
1078 }
1079 
1080 static void throtl_update_blkio_group_write_iops(void *key,
1081 			struct blkio_group *blkg, unsigned int write_iops)
1082 {
1083 	struct throtl_data *td = key;
1084 	struct throtl_grp *tg = tg_of_blkg(blkg);
1085 
1086 	tg->iops[WRITE] = write_iops;
1087 	throtl_update_blkio_group_common(td, tg);
1088 }
1089 
1090 static void throtl_shutdown_wq(struct request_queue *q)
1091 {
1092 	struct throtl_data *td = q->td;
1093 
1094 	cancel_delayed_work_sync(&td->throtl_work);
1095 }
1096 
1097 static struct blkio_policy_type blkio_policy_throtl = {
1098 	.ops = {
1099 		.blkio_unlink_group_fn = throtl_unlink_blkio_group,
1100 		.blkio_update_group_read_bps_fn =
1101 					throtl_update_blkio_group_read_bps,
1102 		.blkio_update_group_write_bps_fn =
1103 					throtl_update_blkio_group_write_bps,
1104 		.blkio_update_group_read_iops_fn =
1105 					throtl_update_blkio_group_read_iops,
1106 		.blkio_update_group_write_iops_fn =
1107 					throtl_update_blkio_group_write_iops,
1108 	},
1109 	.plid = BLKIO_POLICY_THROTL,
1110 };
1111 
1112 bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1113 {
1114 	struct throtl_data *td = q->td;
1115 	struct throtl_grp *tg;
1116 	bool rw = bio_data_dir(bio), update_disptime = true;
1117 	struct blkio_cgroup *blkcg;
1118 	bool throttled = false;
1119 
1120 	if (bio->bi_rw & REQ_THROTTLED) {
1121 		bio->bi_rw &= ~REQ_THROTTLED;
1122 		goto out;
1123 	}
1124 
1125 	/*
1126 	 * A throtl_grp pointer retrieved under rcu can be used to access
1127 	 * basic fields like stats and io rates. If a group has no rules,
1128 	 * just update the dispatch stats in lockless manner and return.
1129 	 */
1130 
1131 	rcu_read_lock();
1132 	blkcg = task_blkio_cgroup(current);
1133 	tg = throtl_find_tg(td, blkcg);
1134 	if (tg) {
1135 		throtl_tg_fill_dev_details(td, tg);
1136 
1137 		if (tg_no_rule_group(tg, rw)) {
1138 			blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1139 					rw, rw_is_sync(bio->bi_rw));
1140 			rcu_read_unlock();
1141 			goto out;
1142 		}
1143 	}
1144 	rcu_read_unlock();
1145 
1146 	/*
1147 	 * Either group has not been allocated yet or it is not an unlimited
1148 	 * IO group
1149 	 */
1150 	spin_lock_irq(q->queue_lock);
1151 	tg = throtl_get_tg(td);
1152 	if (unlikely(!tg))
1153 		goto out_unlock;
1154 
1155 	if (tg->nr_queued[rw]) {
1156 		/*
1157 		 * There is already another bio queued in same dir. No
1158 		 * need to update dispatch time.
1159 		 */
1160 		update_disptime = false;
1161 		goto queue_bio;
1162 
1163 	}
1164 
1165 	/* Bio is with-in rate limit of group */
1166 	if (tg_may_dispatch(td, tg, bio, NULL)) {
1167 		throtl_charge_bio(tg, bio);
1168 
1169 		/*
1170 		 * We need to trim slice even when bios are not being queued
1171 		 * otherwise it might happen that a bio is not queued for
1172 		 * a long time and slice keeps on extending and trim is not
1173 		 * called for a long time. Now if limits are reduced suddenly
1174 		 * we take into account all the IO dispatched so far at new
1175 		 * low rate and * newly queued IO gets a really long dispatch
1176 		 * time.
1177 		 *
1178 		 * So keep on trimming slice even if bio is not queued.
1179 		 */
1180 		throtl_trim_slice(td, tg, rw);
1181 		goto out_unlock;
1182 	}
1183 
1184 queue_bio:
1185 	throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1186 			" iodisp=%u iops=%u queued=%d/%d",
1187 			rw == READ ? 'R' : 'W',
1188 			tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1189 			tg->io_disp[rw], tg->iops[rw],
1190 			tg->nr_queued[READ], tg->nr_queued[WRITE]);
1191 
1192 	throtl_add_bio_tg(q->td, tg, bio);
1193 	throttled = true;
1194 
1195 	if (update_disptime) {
1196 		tg_update_disptime(td, tg);
1197 		throtl_schedule_next_dispatch(td);
1198 	}
1199 
1200 out_unlock:
1201 	spin_unlock_irq(q->queue_lock);
1202 out:
1203 	return throttled;
1204 }
1205 
1206 /**
1207  * blk_throtl_drain - drain throttled bios
1208  * @q: request_queue to drain throttled bios for
1209  *
1210  * Dispatch all currently throttled bios on @q through ->make_request_fn().
1211  */
1212 void blk_throtl_drain(struct request_queue *q)
1213 	__releases(q->queue_lock) __acquires(q->queue_lock)
1214 {
1215 	struct throtl_data *td = q->td;
1216 	struct throtl_rb_root *st = &td->tg_service_tree;
1217 	struct throtl_grp *tg;
1218 	struct bio_list bl;
1219 	struct bio *bio;
1220 
1221 	WARN_ON_ONCE(!queue_is_locked(q));
1222 
1223 	bio_list_init(&bl);
1224 
1225 	while ((tg = throtl_rb_first(st))) {
1226 		throtl_dequeue_tg(td, tg);
1227 
1228 		while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1229 			tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1230 		while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1231 			tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1232 	}
1233 	spin_unlock_irq(q->queue_lock);
1234 
1235 	while ((bio = bio_list_pop(&bl)))
1236 		generic_make_request(bio);
1237 
1238 	spin_lock_irq(q->queue_lock);
1239 }
1240 
1241 int blk_throtl_init(struct request_queue *q)
1242 {
1243 	struct throtl_data *td;
1244 	struct throtl_grp *tg;
1245 
1246 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1247 	if (!td)
1248 		return -ENOMEM;
1249 
1250 	INIT_HLIST_HEAD(&td->tg_list);
1251 	td->tg_service_tree = THROTL_RB_ROOT;
1252 	td->limits_changed = false;
1253 	INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1254 
1255 	/* alloc and Init root group. */
1256 	td->queue = q;
1257 	tg = throtl_alloc_tg(td);
1258 
1259 	if (!tg) {
1260 		kfree(td);
1261 		return -ENOMEM;
1262 	}
1263 
1264 	td->root_tg = tg;
1265 
1266 	rcu_read_lock();
1267 	throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
1268 	rcu_read_unlock();
1269 
1270 	/* Attach throtl data to request queue */
1271 	q->td = td;
1272 	return 0;
1273 }
1274 
1275 void blk_throtl_exit(struct request_queue *q)
1276 {
1277 	struct throtl_data *td = q->td;
1278 	bool wait = false;
1279 
1280 	BUG_ON(!td);
1281 
1282 	throtl_shutdown_wq(q);
1283 
1284 	spin_lock_irq(q->queue_lock);
1285 	throtl_release_tgs(td);
1286 
1287 	/* If there are other groups */
1288 	if (td->nr_undestroyed_grps > 0)
1289 		wait = true;
1290 
1291 	spin_unlock_irq(q->queue_lock);
1292 
1293 	/*
1294 	 * Wait for tg->blkg->key accessors to exit their grace periods.
1295 	 * Do this wait only if there are other undestroyed groups out
1296 	 * there (other than root group). This can happen if cgroup deletion
1297 	 * path claimed the responsibility of cleaning up a group before
1298 	 * queue cleanup code get to the group.
1299 	 *
1300 	 * Do not call synchronize_rcu() unconditionally as there are drivers
1301 	 * which create/delete request queue hundreds of times during scan/boot
1302 	 * and synchronize_rcu() can take significant time and slow down boot.
1303 	 */
1304 	if (wait)
1305 		synchronize_rcu();
1306 
1307 	/*
1308 	 * Just being safe to make sure after previous flush if some body did
1309 	 * update limits through cgroup and another work got queued, cancel
1310 	 * it.
1311 	 */
1312 	throtl_shutdown_wq(q);
1313 }
1314 
1315 void blk_throtl_release(struct request_queue *q)
1316 {
1317 	kfree(q->td);
1318 }
1319 
1320 static int __init throtl_init(void)
1321 {
1322 	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1323 	if (!kthrotld_workqueue)
1324 		panic("Failed to create kthrotld\n");
1325 
1326 	blkio_policy_register(&blkio_policy_throtl);
1327 	return 0;
1328 }
1329 
1330 module_init(throtl_init);
1331