xref: /linux/block/blk-throttle.c (revision 42466b9f29b415c254dc4c2f4618e2a96951a406)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
14 #include "blk.h"
15 
16 /* Max dispatch from a group in 1 round */
17 static int throtl_grp_quantum = 8;
18 
19 /* Total max dispatch from all groups in one round */
20 static int throtl_quantum = 32;
21 
22 /* Throttling is performed over a slice and after that slice is renewed */
23 #define DFL_THROTL_SLICE_HD (HZ / 10)
24 #define DFL_THROTL_SLICE_SSD (HZ / 50)
25 #define MAX_THROTL_SLICE (HZ)
26 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
27 #define MIN_THROTL_BPS (320 * 1024)
28 #define MIN_THROTL_IOPS (10)
29 #define DFL_LATENCY_TARGET (-1L)
30 #define DFL_IDLE_THRESHOLD (0)
31 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
32 #define LATENCY_FILTERED_SSD (0)
33 /*
34  * For HD, very small latency comes from sequential IO. Such IO is helpless to
35  * help determine if its IO is impacted by others, hence we ignore the IO
36  */
37 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
38 
39 static struct blkcg_policy blkcg_policy_throtl;
40 
41 /* A workqueue to queue throttle related work */
42 static struct workqueue_struct *kthrotld_workqueue;
43 
44 /*
45  * To implement hierarchical throttling, throtl_grps form a tree and bios
46  * are dispatched upwards level by level until they reach the top and get
47  * issued.  When dispatching bios from the children and local group at each
48  * level, if the bios are dispatched into a single bio_list, there's a risk
49  * of a local or child group which can queue many bios at once filling up
50  * the list starving others.
51  *
52  * To avoid such starvation, dispatched bios are queued separately
53  * according to where they came from.  When they are again dispatched to
54  * the parent, they're popped in round-robin order so that no single source
55  * hogs the dispatch window.
56  *
57  * throtl_qnode is used to keep the queued bios separated by their sources.
58  * Bios are queued to throtl_qnode which in turn is queued to
59  * throtl_service_queue and then dispatched in round-robin order.
60  *
61  * It's also used to track the reference counts on blkg's.  A qnode always
62  * belongs to a throtl_grp and gets queued on itself or the parent, so
63  * incrementing the reference of the associated throtl_grp when a qnode is
64  * queued and decrementing when dequeued is enough to keep the whole blkg
65  * tree pinned while bios are in flight.
66  */
67 struct throtl_qnode {
68 	struct list_head	node;		/* service_queue->queued[] */
69 	struct bio_list		bios;		/* queued bios */
70 	struct throtl_grp	*tg;		/* tg this qnode belongs to */
71 };
72 
73 struct throtl_service_queue {
74 	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
75 
76 	/*
77 	 * Bios queued directly to this service_queue or dispatched from
78 	 * children throtl_grp's.
79 	 */
80 	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
81 	unsigned int		nr_queued[2];	/* number of queued bios */
82 
83 	/*
84 	 * RB tree of active children throtl_grp's, which are sorted by
85 	 * their ->disptime.
86 	 */
87 	struct rb_root_cached	pending_tree;	/* RB tree of active tgs */
88 	unsigned int		nr_pending;	/* # queued in the tree */
89 	unsigned long		first_pending_disptime;	/* disptime of the first tg */
90 	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
91 };
92 
93 enum tg_state_flags {
94 	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
95 	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
96 };
97 
98 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
99 
100 enum {
101 	LIMIT_LOW,
102 	LIMIT_MAX,
103 	LIMIT_CNT,
104 };
105 
106 struct throtl_grp {
107 	/* must be the first member */
108 	struct blkg_policy_data pd;
109 
110 	/* active throtl group service_queue member */
111 	struct rb_node rb_node;
112 
113 	/* throtl_data this group belongs to */
114 	struct throtl_data *td;
115 
116 	/* this group's service queue */
117 	struct throtl_service_queue service_queue;
118 
119 	/*
120 	 * qnode_on_self is used when bios are directly queued to this
121 	 * throtl_grp so that local bios compete fairly with bios
122 	 * dispatched from children.  qnode_on_parent is used when bios are
123 	 * dispatched from this throtl_grp into its parent and will compete
124 	 * with the sibling qnode_on_parents and the parent's
125 	 * qnode_on_self.
126 	 */
127 	struct throtl_qnode qnode_on_self[2];
128 	struct throtl_qnode qnode_on_parent[2];
129 
130 	/*
131 	 * Dispatch time in jiffies. This is the estimated time when group
132 	 * will unthrottle and is ready to dispatch more bio. It is used as
133 	 * key to sort active groups in service tree.
134 	 */
135 	unsigned long disptime;
136 
137 	unsigned int flags;
138 
139 	/* are there any throtl rules between this group and td? */
140 	bool has_rules[2];
141 
142 	/* internally used bytes per second rate limits */
143 	uint64_t bps[2][LIMIT_CNT];
144 	/* user configured bps limits */
145 	uint64_t bps_conf[2][LIMIT_CNT];
146 
147 	/* internally used IOPS limits */
148 	unsigned int iops[2][LIMIT_CNT];
149 	/* user configured IOPS limits */
150 	unsigned int iops_conf[2][LIMIT_CNT];
151 
152 	/* Number of bytes disptached in current slice */
153 	uint64_t bytes_disp[2];
154 	/* Number of bio's dispatched in current slice */
155 	unsigned int io_disp[2];
156 
157 	unsigned long last_low_overflow_time[2];
158 
159 	uint64_t last_bytes_disp[2];
160 	unsigned int last_io_disp[2];
161 
162 	unsigned long last_check_time;
163 
164 	unsigned long latency_target; /* us */
165 	unsigned long latency_target_conf; /* us */
166 	/* When did we start a new slice */
167 	unsigned long slice_start[2];
168 	unsigned long slice_end[2];
169 
170 	unsigned long last_finish_time; /* ns / 1024 */
171 	unsigned long checked_last_finish_time; /* ns / 1024 */
172 	unsigned long avg_idletime; /* ns / 1024 */
173 	unsigned long idletime_threshold; /* us */
174 	unsigned long idletime_threshold_conf; /* us */
175 
176 	unsigned int bio_cnt; /* total bios */
177 	unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
178 	unsigned long bio_cnt_reset_time;
179 };
180 
181 /* We measure latency for request size from <= 4k to >= 1M */
182 #define LATENCY_BUCKET_SIZE 9
183 
184 struct latency_bucket {
185 	unsigned long total_latency; /* ns / 1024 */
186 	int samples;
187 };
188 
189 struct avg_latency_bucket {
190 	unsigned long latency; /* ns / 1024 */
191 	bool valid;
192 };
193 
194 struct throtl_data
195 {
196 	/* service tree for active throtl groups */
197 	struct throtl_service_queue service_queue;
198 
199 	struct request_queue *queue;
200 
201 	/* Total Number of queued bios on READ and WRITE lists */
202 	unsigned int nr_queued[2];
203 
204 	unsigned int throtl_slice;
205 
206 	/* Work for dispatching throttled bios */
207 	struct work_struct dispatch_work;
208 	unsigned int limit_index;
209 	bool limit_valid[LIMIT_CNT];
210 
211 	unsigned long low_upgrade_time;
212 	unsigned long low_downgrade_time;
213 
214 	unsigned int scale;
215 
216 	struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
217 	struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
218 	struct latency_bucket __percpu *latency_buckets[2];
219 	unsigned long last_calculate_time;
220 	unsigned long filtered_latency;
221 
222 	bool track_bio_latency;
223 };
224 
225 static void throtl_pending_timer_fn(struct timer_list *t);
226 
227 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
228 {
229 	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
230 }
231 
232 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
233 {
234 	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
235 }
236 
237 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
238 {
239 	return pd_to_blkg(&tg->pd);
240 }
241 
242 /**
243  * sq_to_tg - return the throl_grp the specified service queue belongs to
244  * @sq: the throtl_service_queue of interest
245  *
246  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
247  * embedded in throtl_data, %NULL is returned.
248  */
249 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
250 {
251 	if (sq && sq->parent_sq)
252 		return container_of(sq, struct throtl_grp, service_queue);
253 	else
254 		return NULL;
255 }
256 
257 /**
258  * sq_to_td - return throtl_data the specified service queue belongs to
259  * @sq: the throtl_service_queue of interest
260  *
261  * A service_queue can be embedded in either a throtl_grp or throtl_data.
262  * Determine the associated throtl_data accordingly and return it.
263  */
264 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
265 {
266 	struct throtl_grp *tg = sq_to_tg(sq);
267 
268 	if (tg)
269 		return tg->td;
270 	else
271 		return container_of(sq, struct throtl_data, service_queue);
272 }
273 
274 /*
275  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
276  * make the IO dispatch more smooth.
277  * Scale up: linearly scale up according to lapsed time since upgrade. For
278  *           every throtl_slice, the limit scales up 1/2 .low limit till the
279  *           limit hits .max limit
280  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
281  */
282 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
283 {
284 	/* arbitrary value to avoid too big scale */
285 	if (td->scale < 4096 && time_after_eq(jiffies,
286 	    td->low_upgrade_time + td->scale * td->throtl_slice))
287 		td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
288 
289 	return low + (low >> 1) * td->scale;
290 }
291 
292 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
293 {
294 	struct blkcg_gq *blkg = tg_to_blkg(tg);
295 	struct throtl_data *td;
296 	uint64_t ret;
297 
298 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
299 		return U64_MAX;
300 
301 	td = tg->td;
302 	ret = tg->bps[rw][td->limit_index];
303 	if (ret == 0 && td->limit_index == LIMIT_LOW) {
304 		/* intermediate node or iops isn't 0 */
305 		if (!list_empty(&blkg->blkcg->css.children) ||
306 		    tg->iops[rw][td->limit_index])
307 			return U64_MAX;
308 		else
309 			return MIN_THROTL_BPS;
310 	}
311 
312 	if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
313 	    tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
314 		uint64_t adjusted;
315 
316 		adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
317 		ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
318 	}
319 	return ret;
320 }
321 
322 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
323 {
324 	struct blkcg_gq *blkg = tg_to_blkg(tg);
325 	struct throtl_data *td;
326 	unsigned int ret;
327 
328 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
329 		return UINT_MAX;
330 
331 	td = tg->td;
332 	ret = tg->iops[rw][td->limit_index];
333 	if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
334 		/* intermediate node or bps isn't 0 */
335 		if (!list_empty(&blkg->blkcg->css.children) ||
336 		    tg->bps[rw][td->limit_index])
337 			return UINT_MAX;
338 		else
339 			return MIN_THROTL_IOPS;
340 	}
341 
342 	if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
343 	    tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
344 		uint64_t adjusted;
345 
346 		adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
347 		if (adjusted > UINT_MAX)
348 			adjusted = UINT_MAX;
349 		ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
350 	}
351 	return ret;
352 }
353 
354 #define request_bucket_index(sectors) \
355 	clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
356 
357 /**
358  * throtl_log - log debug message via blktrace
359  * @sq: the service_queue being reported
360  * @fmt: printf format string
361  * @args: printf args
362  *
363  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
364  * throtl_grp; otherwise, just "throtl".
365  */
366 #define throtl_log(sq, fmt, args...)	do {				\
367 	struct throtl_grp *__tg = sq_to_tg((sq));			\
368 	struct throtl_data *__td = sq_to_td((sq));			\
369 									\
370 	(void)__td;							\
371 	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
372 		break;							\
373 	if ((__tg)) {							\
374 		blk_add_cgroup_trace_msg(__td->queue,			\
375 			tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
376 	} else {							\
377 		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
378 	}								\
379 } while (0)
380 
381 static inline unsigned int throtl_bio_data_size(struct bio *bio)
382 {
383 	/* assume it's one sector */
384 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
385 		return 512;
386 	return bio->bi_iter.bi_size;
387 }
388 
389 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
390 {
391 	INIT_LIST_HEAD(&qn->node);
392 	bio_list_init(&qn->bios);
393 	qn->tg = tg;
394 }
395 
396 /**
397  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
398  * @bio: bio being added
399  * @qn: qnode to add bio to
400  * @queued: the service_queue->queued[] list @qn belongs to
401  *
402  * Add @bio to @qn and put @qn on @queued if it's not already on.
403  * @qn->tg's reference count is bumped when @qn is activated.  See the
404  * comment on top of throtl_qnode definition for details.
405  */
406 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
407 				 struct list_head *queued)
408 {
409 	bio_list_add(&qn->bios, bio);
410 	if (list_empty(&qn->node)) {
411 		list_add_tail(&qn->node, queued);
412 		blkg_get(tg_to_blkg(qn->tg));
413 	}
414 }
415 
416 /**
417  * throtl_peek_queued - peek the first bio on a qnode list
418  * @queued: the qnode list to peek
419  */
420 static struct bio *throtl_peek_queued(struct list_head *queued)
421 {
422 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
423 	struct bio *bio;
424 
425 	if (list_empty(queued))
426 		return NULL;
427 
428 	bio = bio_list_peek(&qn->bios);
429 	WARN_ON_ONCE(!bio);
430 	return bio;
431 }
432 
433 /**
434  * throtl_pop_queued - pop the first bio form a qnode list
435  * @queued: the qnode list to pop a bio from
436  * @tg_to_put: optional out argument for throtl_grp to put
437  *
438  * Pop the first bio from the qnode list @queued.  After popping, the first
439  * qnode is removed from @queued if empty or moved to the end of @queued so
440  * that the popping order is round-robin.
441  *
442  * When the first qnode is removed, its associated throtl_grp should be put
443  * too.  If @tg_to_put is NULL, this function automatically puts it;
444  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
445  * responsible for putting it.
446  */
447 static struct bio *throtl_pop_queued(struct list_head *queued,
448 				     struct throtl_grp **tg_to_put)
449 {
450 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
451 	struct bio *bio;
452 
453 	if (list_empty(queued))
454 		return NULL;
455 
456 	bio = bio_list_pop(&qn->bios);
457 	WARN_ON_ONCE(!bio);
458 
459 	if (bio_list_empty(&qn->bios)) {
460 		list_del_init(&qn->node);
461 		if (tg_to_put)
462 			*tg_to_put = qn->tg;
463 		else
464 			blkg_put(tg_to_blkg(qn->tg));
465 	} else {
466 		list_move_tail(&qn->node, queued);
467 	}
468 
469 	return bio;
470 }
471 
472 /* init a service_queue, assumes the caller zeroed it */
473 static void throtl_service_queue_init(struct throtl_service_queue *sq)
474 {
475 	INIT_LIST_HEAD(&sq->queued[0]);
476 	INIT_LIST_HEAD(&sq->queued[1]);
477 	sq->pending_tree = RB_ROOT_CACHED;
478 	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
479 }
480 
481 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
482 						struct request_queue *q,
483 						struct blkcg *blkcg)
484 {
485 	struct throtl_grp *tg;
486 	int rw;
487 
488 	tg = kzalloc_node(sizeof(*tg), gfp, q->node);
489 	if (!tg)
490 		return NULL;
491 
492 	throtl_service_queue_init(&tg->service_queue);
493 
494 	for (rw = READ; rw <= WRITE; rw++) {
495 		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
496 		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
497 	}
498 
499 	RB_CLEAR_NODE(&tg->rb_node);
500 	tg->bps[READ][LIMIT_MAX] = U64_MAX;
501 	tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
502 	tg->iops[READ][LIMIT_MAX] = UINT_MAX;
503 	tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
504 	tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
505 	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
506 	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
507 	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
508 	/* LIMIT_LOW will have default value 0 */
509 
510 	tg->latency_target = DFL_LATENCY_TARGET;
511 	tg->latency_target_conf = DFL_LATENCY_TARGET;
512 	tg->idletime_threshold = DFL_IDLE_THRESHOLD;
513 	tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
514 
515 	return &tg->pd;
516 }
517 
518 static void throtl_pd_init(struct blkg_policy_data *pd)
519 {
520 	struct throtl_grp *tg = pd_to_tg(pd);
521 	struct blkcg_gq *blkg = tg_to_blkg(tg);
522 	struct throtl_data *td = blkg->q->td;
523 	struct throtl_service_queue *sq = &tg->service_queue;
524 
525 	/*
526 	 * If on the default hierarchy, we switch to properly hierarchical
527 	 * behavior where limits on a given throtl_grp are applied to the
528 	 * whole subtree rather than just the group itself.  e.g. If 16M
529 	 * read_bps limit is set on the root group, the whole system can't
530 	 * exceed 16M for the device.
531 	 *
532 	 * If not on the default hierarchy, the broken flat hierarchy
533 	 * behavior is retained where all throtl_grps are treated as if
534 	 * they're all separate root groups right below throtl_data.
535 	 * Limits of a group don't interact with limits of other groups
536 	 * regardless of the position of the group in the hierarchy.
537 	 */
538 	sq->parent_sq = &td->service_queue;
539 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
540 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
541 	tg->td = td;
542 }
543 
544 /*
545  * Set has_rules[] if @tg or any of its parents have limits configured.
546  * This doesn't require walking up to the top of the hierarchy as the
547  * parent's has_rules[] is guaranteed to be correct.
548  */
549 static void tg_update_has_rules(struct throtl_grp *tg)
550 {
551 	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
552 	struct throtl_data *td = tg->td;
553 	int rw;
554 
555 	for (rw = READ; rw <= WRITE; rw++)
556 		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
557 			(td->limit_valid[td->limit_index] &&
558 			 (tg_bps_limit(tg, rw) != U64_MAX ||
559 			  tg_iops_limit(tg, rw) != UINT_MAX));
560 }
561 
562 static void throtl_pd_online(struct blkg_policy_data *pd)
563 {
564 	struct throtl_grp *tg = pd_to_tg(pd);
565 	/*
566 	 * We don't want new groups to escape the limits of its ancestors.
567 	 * Update has_rules[] after a new group is brought online.
568 	 */
569 	tg_update_has_rules(tg);
570 }
571 
572 static void blk_throtl_update_limit_valid(struct throtl_data *td)
573 {
574 	struct cgroup_subsys_state *pos_css;
575 	struct blkcg_gq *blkg;
576 	bool low_valid = false;
577 
578 	rcu_read_lock();
579 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
580 		struct throtl_grp *tg = blkg_to_tg(blkg);
581 
582 		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
583 		    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
584 			low_valid = true;
585 			break;
586 		}
587 	}
588 	rcu_read_unlock();
589 
590 	td->limit_valid[LIMIT_LOW] = low_valid;
591 }
592 
593 static void throtl_upgrade_state(struct throtl_data *td);
594 static void throtl_pd_offline(struct blkg_policy_data *pd)
595 {
596 	struct throtl_grp *tg = pd_to_tg(pd);
597 
598 	tg->bps[READ][LIMIT_LOW] = 0;
599 	tg->bps[WRITE][LIMIT_LOW] = 0;
600 	tg->iops[READ][LIMIT_LOW] = 0;
601 	tg->iops[WRITE][LIMIT_LOW] = 0;
602 
603 	blk_throtl_update_limit_valid(tg->td);
604 
605 	if (!tg->td->limit_valid[tg->td->limit_index])
606 		throtl_upgrade_state(tg->td);
607 }
608 
609 static void throtl_pd_free(struct blkg_policy_data *pd)
610 {
611 	struct throtl_grp *tg = pd_to_tg(pd);
612 
613 	del_timer_sync(&tg->service_queue.pending_timer);
614 	kfree(tg);
615 }
616 
617 static struct throtl_grp *
618 throtl_rb_first(struct throtl_service_queue *parent_sq)
619 {
620 	struct rb_node *n;
621 	/* Service tree is empty */
622 	if (!parent_sq->nr_pending)
623 		return NULL;
624 
625 	n = rb_first_cached(&parent_sq->pending_tree);
626 	WARN_ON_ONCE(!n);
627 	if (!n)
628 		return NULL;
629 	return rb_entry_tg(n);
630 }
631 
632 static void throtl_rb_erase(struct rb_node *n,
633 			    struct throtl_service_queue *parent_sq)
634 {
635 	rb_erase_cached(n, &parent_sq->pending_tree);
636 	RB_CLEAR_NODE(n);
637 	--parent_sq->nr_pending;
638 }
639 
640 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
641 {
642 	struct throtl_grp *tg;
643 
644 	tg = throtl_rb_first(parent_sq);
645 	if (!tg)
646 		return;
647 
648 	parent_sq->first_pending_disptime = tg->disptime;
649 }
650 
651 static void tg_service_queue_add(struct throtl_grp *tg)
652 {
653 	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
654 	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
655 	struct rb_node *parent = NULL;
656 	struct throtl_grp *__tg;
657 	unsigned long key = tg->disptime;
658 	bool leftmost = true;
659 
660 	while (*node != NULL) {
661 		parent = *node;
662 		__tg = rb_entry_tg(parent);
663 
664 		if (time_before(key, __tg->disptime))
665 			node = &parent->rb_left;
666 		else {
667 			node = &parent->rb_right;
668 			leftmost = false;
669 		}
670 	}
671 
672 	rb_link_node(&tg->rb_node, parent, node);
673 	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
674 			       leftmost);
675 }
676 
677 static void __throtl_enqueue_tg(struct throtl_grp *tg)
678 {
679 	tg_service_queue_add(tg);
680 	tg->flags |= THROTL_TG_PENDING;
681 	tg->service_queue.parent_sq->nr_pending++;
682 }
683 
684 static void throtl_enqueue_tg(struct throtl_grp *tg)
685 {
686 	if (!(tg->flags & THROTL_TG_PENDING))
687 		__throtl_enqueue_tg(tg);
688 }
689 
690 static void __throtl_dequeue_tg(struct throtl_grp *tg)
691 {
692 	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
693 	tg->flags &= ~THROTL_TG_PENDING;
694 }
695 
696 static void throtl_dequeue_tg(struct throtl_grp *tg)
697 {
698 	if (tg->flags & THROTL_TG_PENDING)
699 		__throtl_dequeue_tg(tg);
700 }
701 
702 /* Call with queue lock held */
703 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
704 					  unsigned long expires)
705 {
706 	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
707 
708 	/*
709 	 * Since we are adjusting the throttle limit dynamically, the sleep
710 	 * time calculated according to previous limit might be invalid. It's
711 	 * possible the cgroup sleep time is very long and no other cgroups
712 	 * have IO running so notify the limit changes. Make sure the cgroup
713 	 * doesn't sleep too long to avoid the missed notification.
714 	 */
715 	if (time_after(expires, max_expire))
716 		expires = max_expire;
717 	mod_timer(&sq->pending_timer, expires);
718 	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
719 		   expires - jiffies, jiffies);
720 }
721 
722 /**
723  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
724  * @sq: the service_queue to schedule dispatch for
725  * @force: force scheduling
726  *
727  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
728  * dispatch time of the first pending child.  Returns %true if either timer
729  * is armed or there's no pending child left.  %false if the current
730  * dispatch window is still open and the caller should continue
731  * dispatching.
732  *
733  * If @force is %true, the dispatch timer is always scheduled and this
734  * function is guaranteed to return %true.  This is to be used when the
735  * caller can't dispatch itself and needs to invoke pending_timer
736  * unconditionally.  Note that forced scheduling is likely to induce short
737  * delay before dispatch starts even if @sq->first_pending_disptime is not
738  * in the future and thus shouldn't be used in hot paths.
739  */
740 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
741 					  bool force)
742 {
743 	/* any pending children left? */
744 	if (!sq->nr_pending)
745 		return true;
746 
747 	update_min_dispatch_time(sq);
748 
749 	/* is the next dispatch time in the future? */
750 	if (force || time_after(sq->first_pending_disptime, jiffies)) {
751 		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
752 		return true;
753 	}
754 
755 	/* tell the caller to continue dispatching */
756 	return false;
757 }
758 
759 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
760 		bool rw, unsigned long start)
761 {
762 	tg->bytes_disp[rw] = 0;
763 	tg->io_disp[rw] = 0;
764 
765 	/*
766 	 * Previous slice has expired. We must have trimmed it after last
767 	 * bio dispatch. That means since start of last slice, we never used
768 	 * that bandwidth. Do try to make use of that bandwidth while giving
769 	 * credit.
770 	 */
771 	if (time_after_eq(start, tg->slice_start[rw]))
772 		tg->slice_start[rw] = start;
773 
774 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
775 	throtl_log(&tg->service_queue,
776 		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
777 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
778 		   tg->slice_end[rw], jiffies);
779 }
780 
781 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
782 {
783 	tg->bytes_disp[rw] = 0;
784 	tg->io_disp[rw] = 0;
785 	tg->slice_start[rw] = jiffies;
786 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
787 	throtl_log(&tg->service_queue,
788 		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
789 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
790 		   tg->slice_end[rw], jiffies);
791 }
792 
793 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
794 					unsigned long jiffy_end)
795 {
796 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
797 }
798 
799 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
800 				       unsigned long jiffy_end)
801 {
802 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
803 	throtl_log(&tg->service_queue,
804 		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
805 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
806 		   tg->slice_end[rw], jiffies);
807 }
808 
809 /* Determine if previously allocated or extended slice is complete or not */
810 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
811 {
812 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
813 		return false;
814 
815 	return true;
816 }
817 
818 /* Trim the used slices and adjust slice start accordingly */
819 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
820 {
821 	unsigned long nr_slices, time_elapsed, io_trim;
822 	u64 bytes_trim, tmp;
823 
824 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
825 
826 	/*
827 	 * If bps are unlimited (-1), then time slice don't get
828 	 * renewed. Don't try to trim the slice if slice is used. A new
829 	 * slice will start when appropriate.
830 	 */
831 	if (throtl_slice_used(tg, rw))
832 		return;
833 
834 	/*
835 	 * A bio has been dispatched. Also adjust slice_end. It might happen
836 	 * that initially cgroup limit was very low resulting in high
837 	 * slice_end, but later limit was bumped up and bio was dispached
838 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
839 	 * is bad because it does not allow new slice to start.
840 	 */
841 
842 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
843 
844 	time_elapsed = jiffies - tg->slice_start[rw];
845 
846 	nr_slices = time_elapsed / tg->td->throtl_slice;
847 
848 	if (!nr_slices)
849 		return;
850 	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
851 	do_div(tmp, HZ);
852 	bytes_trim = tmp;
853 
854 	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
855 		HZ;
856 
857 	if (!bytes_trim && !io_trim)
858 		return;
859 
860 	if (tg->bytes_disp[rw] >= bytes_trim)
861 		tg->bytes_disp[rw] -= bytes_trim;
862 	else
863 		tg->bytes_disp[rw] = 0;
864 
865 	if (tg->io_disp[rw] >= io_trim)
866 		tg->io_disp[rw] -= io_trim;
867 	else
868 		tg->io_disp[rw] = 0;
869 
870 	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
871 
872 	throtl_log(&tg->service_queue,
873 		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
874 		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
875 		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
876 }
877 
878 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
879 				  unsigned long *wait)
880 {
881 	bool rw = bio_data_dir(bio);
882 	unsigned int io_allowed;
883 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
884 	u64 tmp;
885 
886 	jiffy_elapsed = jiffies - tg->slice_start[rw];
887 
888 	/* Round up to the next throttle slice, wait time must be nonzero */
889 	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
890 
891 	/*
892 	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
893 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
894 	 * will allow dispatch after 1 second and after that slice should
895 	 * have been trimmed.
896 	 */
897 
898 	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
899 	do_div(tmp, HZ);
900 
901 	if (tmp > UINT_MAX)
902 		io_allowed = UINT_MAX;
903 	else
904 		io_allowed = tmp;
905 
906 	if (tg->io_disp[rw] + 1 <= io_allowed) {
907 		if (wait)
908 			*wait = 0;
909 		return true;
910 	}
911 
912 	/* Calc approx time to dispatch */
913 	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
914 
915 	if (wait)
916 		*wait = jiffy_wait;
917 	return false;
918 }
919 
920 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
921 				 unsigned long *wait)
922 {
923 	bool rw = bio_data_dir(bio);
924 	u64 bytes_allowed, extra_bytes, tmp;
925 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
926 	unsigned int bio_size = throtl_bio_data_size(bio);
927 
928 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
929 
930 	/* Slice has just started. Consider one slice interval */
931 	if (!jiffy_elapsed)
932 		jiffy_elapsed_rnd = tg->td->throtl_slice;
933 
934 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
935 
936 	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
937 	do_div(tmp, HZ);
938 	bytes_allowed = tmp;
939 
940 	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
941 		if (wait)
942 			*wait = 0;
943 		return true;
944 	}
945 
946 	/* Calc approx time to dispatch */
947 	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
948 	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
949 
950 	if (!jiffy_wait)
951 		jiffy_wait = 1;
952 
953 	/*
954 	 * This wait time is without taking into consideration the rounding
955 	 * up we did. Add that time also.
956 	 */
957 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
958 	if (wait)
959 		*wait = jiffy_wait;
960 	return false;
961 }
962 
963 /*
964  * Returns whether one can dispatch a bio or not. Also returns approx number
965  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
966  */
967 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
968 			    unsigned long *wait)
969 {
970 	bool rw = bio_data_dir(bio);
971 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
972 
973 	/*
974  	 * Currently whole state machine of group depends on first bio
975 	 * queued in the group bio list. So one should not be calling
976 	 * this function with a different bio if there are other bios
977 	 * queued.
978 	 */
979 	BUG_ON(tg->service_queue.nr_queued[rw] &&
980 	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
981 
982 	/* If tg->bps = -1, then BW is unlimited */
983 	if (tg_bps_limit(tg, rw) == U64_MAX &&
984 	    tg_iops_limit(tg, rw) == UINT_MAX) {
985 		if (wait)
986 			*wait = 0;
987 		return true;
988 	}
989 
990 	/*
991 	 * If previous slice expired, start a new one otherwise renew/extend
992 	 * existing slice to make sure it is at least throtl_slice interval
993 	 * long since now. New slice is started only for empty throttle group.
994 	 * If there is queued bio, that means there should be an active
995 	 * slice and it should be extended instead.
996 	 */
997 	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
998 		throtl_start_new_slice(tg, rw);
999 	else {
1000 		if (time_before(tg->slice_end[rw],
1001 		    jiffies + tg->td->throtl_slice))
1002 			throtl_extend_slice(tg, rw,
1003 				jiffies + tg->td->throtl_slice);
1004 	}
1005 
1006 	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1007 	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1008 		if (wait)
1009 			*wait = 0;
1010 		return true;
1011 	}
1012 
1013 	max_wait = max(bps_wait, iops_wait);
1014 
1015 	if (wait)
1016 		*wait = max_wait;
1017 
1018 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
1019 		throtl_extend_slice(tg, rw, jiffies + max_wait);
1020 
1021 	return false;
1022 }
1023 
1024 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1025 {
1026 	bool rw = bio_data_dir(bio);
1027 	unsigned int bio_size = throtl_bio_data_size(bio);
1028 
1029 	/* Charge the bio to the group */
1030 	tg->bytes_disp[rw] += bio_size;
1031 	tg->io_disp[rw]++;
1032 	tg->last_bytes_disp[rw] += bio_size;
1033 	tg->last_io_disp[rw]++;
1034 
1035 	/*
1036 	 * BIO_THROTTLED is used to prevent the same bio to be throttled
1037 	 * more than once as a throttled bio will go through blk-throtl the
1038 	 * second time when it eventually gets issued.  Set it when a bio
1039 	 * is being charged to a tg.
1040 	 */
1041 	if (!bio_flagged(bio, BIO_THROTTLED))
1042 		bio_set_flag(bio, BIO_THROTTLED);
1043 }
1044 
1045 /**
1046  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1047  * @bio: bio to add
1048  * @qn: qnode to use
1049  * @tg: the target throtl_grp
1050  *
1051  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1052  * tg->qnode_on_self[] is used.
1053  */
1054 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1055 			      struct throtl_grp *tg)
1056 {
1057 	struct throtl_service_queue *sq = &tg->service_queue;
1058 	bool rw = bio_data_dir(bio);
1059 
1060 	if (!qn)
1061 		qn = &tg->qnode_on_self[rw];
1062 
1063 	/*
1064 	 * If @tg doesn't currently have any bios queued in the same
1065 	 * direction, queueing @bio can change when @tg should be
1066 	 * dispatched.  Mark that @tg was empty.  This is automatically
1067 	 * cleaered on the next tg_update_disptime().
1068 	 */
1069 	if (!sq->nr_queued[rw])
1070 		tg->flags |= THROTL_TG_WAS_EMPTY;
1071 
1072 	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1073 
1074 	sq->nr_queued[rw]++;
1075 	throtl_enqueue_tg(tg);
1076 }
1077 
1078 static void tg_update_disptime(struct throtl_grp *tg)
1079 {
1080 	struct throtl_service_queue *sq = &tg->service_queue;
1081 	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1082 	struct bio *bio;
1083 
1084 	bio = throtl_peek_queued(&sq->queued[READ]);
1085 	if (bio)
1086 		tg_may_dispatch(tg, bio, &read_wait);
1087 
1088 	bio = throtl_peek_queued(&sq->queued[WRITE]);
1089 	if (bio)
1090 		tg_may_dispatch(tg, bio, &write_wait);
1091 
1092 	min_wait = min(read_wait, write_wait);
1093 	disptime = jiffies + min_wait;
1094 
1095 	/* Update dispatch time */
1096 	throtl_dequeue_tg(tg);
1097 	tg->disptime = disptime;
1098 	throtl_enqueue_tg(tg);
1099 
1100 	/* see throtl_add_bio_tg() */
1101 	tg->flags &= ~THROTL_TG_WAS_EMPTY;
1102 }
1103 
1104 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1105 					struct throtl_grp *parent_tg, bool rw)
1106 {
1107 	if (throtl_slice_used(parent_tg, rw)) {
1108 		throtl_start_new_slice_with_credit(parent_tg, rw,
1109 				child_tg->slice_start[rw]);
1110 	}
1111 
1112 }
1113 
1114 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1115 {
1116 	struct throtl_service_queue *sq = &tg->service_queue;
1117 	struct throtl_service_queue *parent_sq = sq->parent_sq;
1118 	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1119 	struct throtl_grp *tg_to_put = NULL;
1120 	struct bio *bio;
1121 
1122 	/*
1123 	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1124 	 * from @tg may put its reference and @parent_sq might end up
1125 	 * getting released prematurely.  Remember the tg to put and put it
1126 	 * after @bio is transferred to @parent_sq.
1127 	 */
1128 	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1129 	sq->nr_queued[rw]--;
1130 
1131 	throtl_charge_bio(tg, bio);
1132 
1133 	/*
1134 	 * If our parent is another tg, we just need to transfer @bio to
1135 	 * the parent using throtl_add_bio_tg().  If our parent is
1136 	 * @td->service_queue, @bio is ready to be issued.  Put it on its
1137 	 * bio_lists[] and decrease total number queued.  The caller is
1138 	 * responsible for issuing these bios.
1139 	 */
1140 	if (parent_tg) {
1141 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1142 		start_parent_slice_with_credit(tg, parent_tg, rw);
1143 	} else {
1144 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1145 				     &parent_sq->queued[rw]);
1146 		BUG_ON(tg->td->nr_queued[rw] <= 0);
1147 		tg->td->nr_queued[rw]--;
1148 	}
1149 
1150 	throtl_trim_slice(tg, rw);
1151 
1152 	if (tg_to_put)
1153 		blkg_put(tg_to_blkg(tg_to_put));
1154 }
1155 
1156 static int throtl_dispatch_tg(struct throtl_grp *tg)
1157 {
1158 	struct throtl_service_queue *sq = &tg->service_queue;
1159 	unsigned int nr_reads = 0, nr_writes = 0;
1160 	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1161 	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1162 	struct bio *bio;
1163 
1164 	/* Try to dispatch 75% READS and 25% WRITES */
1165 
1166 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1167 	       tg_may_dispatch(tg, bio, NULL)) {
1168 
1169 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1170 		nr_reads++;
1171 
1172 		if (nr_reads >= max_nr_reads)
1173 			break;
1174 	}
1175 
1176 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1177 	       tg_may_dispatch(tg, bio, NULL)) {
1178 
1179 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1180 		nr_writes++;
1181 
1182 		if (nr_writes >= max_nr_writes)
1183 			break;
1184 	}
1185 
1186 	return nr_reads + nr_writes;
1187 }
1188 
1189 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1190 {
1191 	unsigned int nr_disp = 0;
1192 
1193 	while (1) {
1194 		struct throtl_grp *tg = throtl_rb_first(parent_sq);
1195 		struct throtl_service_queue *sq;
1196 
1197 		if (!tg)
1198 			break;
1199 
1200 		if (time_before(jiffies, tg->disptime))
1201 			break;
1202 
1203 		throtl_dequeue_tg(tg);
1204 
1205 		nr_disp += throtl_dispatch_tg(tg);
1206 
1207 		sq = &tg->service_queue;
1208 		if (sq->nr_queued[0] || sq->nr_queued[1])
1209 			tg_update_disptime(tg);
1210 
1211 		if (nr_disp >= throtl_quantum)
1212 			break;
1213 	}
1214 
1215 	return nr_disp;
1216 }
1217 
1218 static bool throtl_can_upgrade(struct throtl_data *td,
1219 	struct throtl_grp *this_tg);
1220 /**
1221  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1222  * @t: the pending_timer member of the throtl_service_queue being serviced
1223  *
1224  * This timer is armed when a child throtl_grp with active bio's become
1225  * pending and queued on the service_queue's pending_tree and expires when
1226  * the first child throtl_grp should be dispatched.  This function
1227  * dispatches bio's from the children throtl_grps to the parent
1228  * service_queue.
1229  *
1230  * If the parent's parent is another throtl_grp, dispatching is propagated
1231  * by either arming its pending_timer or repeating dispatch directly.  If
1232  * the top-level service_tree is reached, throtl_data->dispatch_work is
1233  * kicked so that the ready bio's are issued.
1234  */
1235 static void throtl_pending_timer_fn(struct timer_list *t)
1236 {
1237 	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1238 	struct throtl_grp *tg = sq_to_tg(sq);
1239 	struct throtl_data *td = sq_to_td(sq);
1240 	struct request_queue *q = td->queue;
1241 	struct throtl_service_queue *parent_sq;
1242 	bool dispatched;
1243 	int ret;
1244 
1245 	spin_lock_irq(&q->queue_lock);
1246 	if (throtl_can_upgrade(td, NULL))
1247 		throtl_upgrade_state(td);
1248 
1249 again:
1250 	parent_sq = sq->parent_sq;
1251 	dispatched = false;
1252 
1253 	while (true) {
1254 		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1255 			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1256 			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1257 
1258 		ret = throtl_select_dispatch(sq);
1259 		if (ret) {
1260 			throtl_log(sq, "bios disp=%u", ret);
1261 			dispatched = true;
1262 		}
1263 
1264 		if (throtl_schedule_next_dispatch(sq, false))
1265 			break;
1266 
1267 		/* this dispatch windows is still open, relax and repeat */
1268 		spin_unlock_irq(&q->queue_lock);
1269 		cpu_relax();
1270 		spin_lock_irq(&q->queue_lock);
1271 	}
1272 
1273 	if (!dispatched)
1274 		goto out_unlock;
1275 
1276 	if (parent_sq) {
1277 		/* @parent_sq is another throl_grp, propagate dispatch */
1278 		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1279 			tg_update_disptime(tg);
1280 			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1281 				/* window is already open, repeat dispatching */
1282 				sq = parent_sq;
1283 				tg = sq_to_tg(sq);
1284 				goto again;
1285 			}
1286 		}
1287 	} else {
1288 		/* reached the top-level, queue issueing */
1289 		queue_work(kthrotld_workqueue, &td->dispatch_work);
1290 	}
1291 out_unlock:
1292 	spin_unlock_irq(&q->queue_lock);
1293 }
1294 
1295 /**
1296  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1297  * @work: work item being executed
1298  *
1299  * This function is queued for execution when bio's reach the bio_lists[]
1300  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1301  * function.
1302  */
1303 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1304 {
1305 	struct throtl_data *td = container_of(work, struct throtl_data,
1306 					      dispatch_work);
1307 	struct throtl_service_queue *td_sq = &td->service_queue;
1308 	struct request_queue *q = td->queue;
1309 	struct bio_list bio_list_on_stack;
1310 	struct bio *bio;
1311 	struct blk_plug plug;
1312 	int rw;
1313 
1314 	bio_list_init(&bio_list_on_stack);
1315 
1316 	spin_lock_irq(&q->queue_lock);
1317 	for (rw = READ; rw <= WRITE; rw++)
1318 		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1319 			bio_list_add(&bio_list_on_stack, bio);
1320 	spin_unlock_irq(&q->queue_lock);
1321 
1322 	if (!bio_list_empty(&bio_list_on_stack)) {
1323 		blk_start_plug(&plug);
1324 		while((bio = bio_list_pop(&bio_list_on_stack)))
1325 			generic_make_request(bio);
1326 		blk_finish_plug(&plug);
1327 	}
1328 }
1329 
1330 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1331 			      int off)
1332 {
1333 	struct throtl_grp *tg = pd_to_tg(pd);
1334 	u64 v = *(u64 *)((void *)tg + off);
1335 
1336 	if (v == U64_MAX)
1337 		return 0;
1338 	return __blkg_prfill_u64(sf, pd, v);
1339 }
1340 
1341 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1342 			       int off)
1343 {
1344 	struct throtl_grp *tg = pd_to_tg(pd);
1345 	unsigned int v = *(unsigned int *)((void *)tg + off);
1346 
1347 	if (v == UINT_MAX)
1348 		return 0;
1349 	return __blkg_prfill_u64(sf, pd, v);
1350 }
1351 
1352 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1353 {
1354 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1355 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1356 	return 0;
1357 }
1358 
1359 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1360 {
1361 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1362 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1363 	return 0;
1364 }
1365 
1366 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1367 {
1368 	struct throtl_service_queue *sq = &tg->service_queue;
1369 	struct cgroup_subsys_state *pos_css;
1370 	struct blkcg_gq *blkg;
1371 
1372 	throtl_log(&tg->service_queue,
1373 		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1374 		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1375 		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1376 
1377 	/*
1378 	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1379 	 * considered to have rules if either the tg itself or any of its
1380 	 * ancestors has rules.  This identifies groups without any
1381 	 * restrictions in the whole hierarchy and allows them to bypass
1382 	 * blk-throttle.
1383 	 */
1384 	blkg_for_each_descendant_pre(blkg, pos_css,
1385 			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1386 		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1387 		struct throtl_grp *parent_tg;
1388 
1389 		tg_update_has_rules(this_tg);
1390 		/* ignore root/second level */
1391 		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1392 		    !blkg->parent->parent)
1393 			continue;
1394 		parent_tg = blkg_to_tg(blkg->parent);
1395 		/*
1396 		 * make sure all children has lower idle time threshold and
1397 		 * higher latency target
1398 		 */
1399 		this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1400 				parent_tg->idletime_threshold);
1401 		this_tg->latency_target = max(this_tg->latency_target,
1402 				parent_tg->latency_target);
1403 	}
1404 
1405 	/*
1406 	 * We're already holding queue_lock and know @tg is valid.  Let's
1407 	 * apply the new config directly.
1408 	 *
1409 	 * Restart the slices for both READ and WRITES. It might happen
1410 	 * that a group's limit are dropped suddenly and we don't want to
1411 	 * account recently dispatched IO with new low rate.
1412 	 */
1413 	throtl_start_new_slice(tg, 0);
1414 	throtl_start_new_slice(tg, 1);
1415 
1416 	if (tg->flags & THROTL_TG_PENDING) {
1417 		tg_update_disptime(tg);
1418 		throtl_schedule_next_dispatch(sq->parent_sq, true);
1419 	}
1420 }
1421 
1422 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1423 			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1424 {
1425 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1426 	struct blkg_conf_ctx ctx;
1427 	struct throtl_grp *tg;
1428 	int ret;
1429 	u64 v;
1430 
1431 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1432 	if (ret)
1433 		return ret;
1434 
1435 	ret = -EINVAL;
1436 	if (sscanf(ctx.body, "%llu", &v) != 1)
1437 		goto out_finish;
1438 	if (!v)
1439 		v = U64_MAX;
1440 
1441 	tg = blkg_to_tg(ctx.blkg);
1442 
1443 	if (is_u64)
1444 		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1445 	else
1446 		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1447 
1448 	tg_conf_updated(tg, false);
1449 	ret = 0;
1450 out_finish:
1451 	blkg_conf_finish(&ctx);
1452 	return ret ?: nbytes;
1453 }
1454 
1455 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1456 			       char *buf, size_t nbytes, loff_t off)
1457 {
1458 	return tg_set_conf(of, buf, nbytes, off, true);
1459 }
1460 
1461 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1462 				char *buf, size_t nbytes, loff_t off)
1463 {
1464 	return tg_set_conf(of, buf, nbytes, off, false);
1465 }
1466 
1467 static struct cftype throtl_legacy_files[] = {
1468 	{
1469 		.name = "throttle.read_bps_device",
1470 		.private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1471 		.seq_show = tg_print_conf_u64,
1472 		.write = tg_set_conf_u64,
1473 	},
1474 	{
1475 		.name = "throttle.write_bps_device",
1476 		.private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1477 		.seq_show = tg_print_conf_u64,
1478 		.write = tg_set_conf_u64,
1479 	},
1480 	{
1481 		.name = "throttle.read_iops_device",
1482 		.private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1483 		.seq_show = tg_print_conf_uint,
1484 		.write = tg_set_conf_uint,
1485 	},
1486 	{
1487 		.name = "throttle.write_iops_device",
1488 		.private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1489 		.seq_show = tg_print_conf_uint,
1490 		.write = tg_set_conf_uint,
1491 	},
1492 	{
1493 		.name = "throttle.io_service_bytes",
1494 		.private = (unsigned long)&blkcg_policy_throtl,
1495 		.seq_show = blkg_print_stat_bytes,
1496 	},
1497 	{
1498 		.name = "throttle.io_service_bytes_recursive",
1499 		.private = (unsigned long)&blkcg_policy_throtl,
1500 		.seq_show = blkg_print_stat_bytes_recursive,
1501 	},
1502 	{
1503 		.name = "throttle.io_serviced",
1504 		.private = (unsigned long)&blkcg_policy_throtl,
1505 		.seq_show = blkg_print_stat_ios,
1506 	},
1507 	{
1508 		.name = "throttle.io_serviced_recursive",
1509 		.private = (unsigned long)&blkcg_policy_throtl,
1510 		.seq_show = blkg_print_stat_ios_recursive,
1511 	},
1512 	{ }	/* terminate */
1513 };
1514 
1515 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1516 			 int off)
1517 {
1518 	struct throtl_grp *tg = pd_to_tg(pd);
1519 	const char *dname = blkg_dev_name(pd->blkg);
1520 	char bufs[4][21] = { "max", "max", "max", "max" };
1521 	u64 bps_dft;
1522 	unsigned int iops_dft;
1523 	char idle_time[26] = "";
1524 	char latency_time[26] = "";
1525 
1526 	if (!dname)
1527 		return 0;
1528 
1529 	if (off == LIMIT_LOW) {
1530 		bps_dft = 0;
1531 		iops_dft = 0;
1532 	} else {
1533 		bps_dft = U64_MAX;
1534 		iops_dft = UINT_MAX;
1535 	}
1536 
1537 	if (tg->bps_conf[READ][off] == bps_dft &&
1538 	    tg->bps_conf[WRITE][off] == bps_dft &&
1539 	    tg->iops_conf[READ][off] == iops_dft &&
1540 	    tg->iops_conf[WRITE][off] == iops_dft &&
1541 	    (off != LIMIT_LOW ||
1542 	     (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1543 	      tg->latency_target_conf == DFL_LATENCY_TARGET)))
1544 		return 0;
1545 
1546 	if (tg->bps_conf[READ][off] != U64_MAX)
1547 		snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1548 			tg->bps_conf[READ][off]);
1549 	if (tg->bps_conf[WRITE][off] != U64_MAX)
1550 		snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1551 			tg->bps_conf[WRITE][off]);
1552 	if (tg->iops_conf[READ][off] != UINT_MAX)
1553 		snprintf(bufs[2], sizeof(bufs[2]), "%u",
1554 			tg->iops_conf[READ][off]);
1555 	if (tg->iops_conf[WRITE][off] != UINT_MAX)
1556 		snprintf(bufs[3], sizeof(bufs[3]), "%u",
1557 			tg->iops_conf[WRITE][off]);
1558 	if (off == LIMIT_LOW) {
1559 		if (tg->idletime_threshold_conf == ULONG_MAX)
1560 			strcpy(idle_time, " idle=max");
1561 		else
1562 			snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1563 				tg->idletime_threshold_conf);
1564 
1565 		if (tg->latency_target_conf == ULONG_MAX)
1566 			strcpy(latency_time, " latency=max");
1567 		else
1568 			snprintf(latency_time, sizeof(latency_time),
1569 				" latency=%lu", tg->latency_target_conf);
1570 	}
1571 
1572 	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1573 		   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1574 		   latency_time);
1575 	return 0;
1576 }
1577 
1578 static int tg_print_limit(struct seq_file *sf, void *v)
1579 {
1580 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1581 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1582 	return 0;
1583 }
1584 
1585 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1586 			  char *buf, size_t nbytes, loff_t off)
1587 {
1588 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1589 	struct blkg_conf_ctx ctx;
1590 	struct throtl_grp *tg;
1591 	u64 v[4];
1592 	unsigned long idle_time;
1593 	unsigned long latency_time;
1594 	int ret;
1595 	int index = of_cft(of)->private;
1596 
1597 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1598 	if (ret)
1599 		return ret;
1600 
1601 	tg = blkg_to_tg(ctx.blkg);
1602 
1603 	v[0] = tg->bps_conf[READ][index];
1604 	v[1] = tg->bps_conf[WRITE][index];
1605 	v[2] = tg->iops_conf[READ][index];
1606 	v[3] = tg->iops_conf[WRITE][index];
1607 
1608 	idle_time = tg->idletime_threshold_conf;
1609 	latency_time = tg->latency_target_conf;
1610 	while (true) {
1611 		char tok[27];	/* wiops=18446744073709551616 */
1612 		char *p;
1613 		u64 val = U64_MAX;
1614 		int len;
1615 
1616 		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1617 			break;
1618 		if (tok[0] == '\0')
1619 			break;
1620 		ctx.body += len;
1621 
1622 		ret = -EINVAL;
1623 		p = tok;
1624 		strsep(&p, "=");
1625 		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1626 			goto out_finish;
1627 
1628 		ret = -ERANGE;
1629 		if (!val)
1630 			goto out_finish;
1631 
1632 		ret = -EINVAL;
1633 		if (!strcmp(tok, "rbps"))
1634 			v[0] = val;
1635 		else if (!strcmp(tok, "wbps"))
1636 			v[1] = val;
1637 		else if (!strcmp(tok, "riops"))
1638 			v[2] = min_t(u64, val, UINT_MAX);
1639 		else if (!strcmp(tok, "wiops"))
1640 			v[3] = min_t(u64, val, UINT_MAX);
1641 		else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1642 			idle_time = val;
1643 		else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1644 			latency_time = val;
1645 		else
1646 			goto out_finish;
1647 	}
1648 
1649 	tg->bps_conf[READ][index] = v[0];
1650 	tg->bps_conf[WRITE][index] = v[1];
1651 	tg->iops_conf[READ][index] = v[2];
1652 	tg->iops_conf[WRITE][index] = v[3];
1653 
1654 	if (index == LIMIT_MAX) {
1655 		tg->bps[READ][index] = v[0];
1656 		tg->bps[WRITE][index] = v[1];
1657 		tg->iops[READ][index] = v[2];
1658 		tg->iops[WRITE][index] = v[3];
1659 	}
1660 	tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1661 		tg->bps_conf[READ][LIMIT_MAX]);
1662 	tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1663 		tg->bps_conf[WRITE][LIMIT_MAX]);
1664 	tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1665 		tg->iops_conf[READ][LIMIT_MAX]);
1666 	tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1667 		tg->iops_conf[WRITE][LIMIT_MAX]);
1668 	tg->idletime_threshold_conf = idle_time;
1669 	tg->latency_target_conf = latency_time;
1670 
1671 	/* force user to configure all settings for low limit  */
1672 	if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1673 	      tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1674 	    tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1675 	    tg->latency_target_conf == DFL_LATENCY_TARGET) {
1676 		tg->bps[READ][LIMIT_LOW] = 0;
1677 		tg->bps[WRITE][LIMIT_LOW] = 0;
1678 		tg->iops[READ][LIMIT_LOW] = 0;
1679 		tg->iops[WRITE][LIMIT_LOW] = 0;
1680 		tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1681 		tg->latency_target = DFL_LATENCY_TARGET;
1682 	} else if (index == LIMIT_LOW) {
1683 		tg->idletime_threshold = tg->idletime_threshold_conf;
1684 		tg->latency_target = tg->latency_target_conf;
1685 	}
1686 
1687 	blk_throtl_update_limit_valid(tg->td);
1688 	if (tg->td->limit_valid[LIMIT_LOW]) {
1689 		if (index == LIMIT_LOW)
1690 			tg->td->limit_index = LIMIT_LOW;
1691 	} else
1692 		tg->td->limit_index = LIMIT_MAX;
1693 	tg_conf_updated(tg, index == LIMIT_LOW &&
1694 		tg->td->limit_valid[LIMIT_LOW]);
1695 	ret = 0;
1696 out_finish:
1697 	blkg_conf_finish(&ctx);
1698 	return ret ?: nbytes;
1699 }
1700 
1701 static struct cftype throtl_files[] = {
1702 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1703 	{
1704 		.name = "low",
1705 		.flags = CFTYPE_NOT_ON_ROOT,
1706 		.seq_show = tg_print_limit,
1707 		.write = tg_set_limit,
1708 		.private = LIMIT_LOW,
1709 	},
1710 #endif
1711 	{
1712 		.name = "max",
1713 		.flags = CFTYPE_NOT_ON_ROOT,
1714 		.seq_show = tg_print_limit,
1715 		.write = tg_set_limit,
1716 		.private = LIMIT_MAX,
1717 	},
1718 	{ }	/* terminate */
1719 };
1720 
1721 static void throtl_shutdown_wq(struct request_queue *q)
1722 {
1723 	struct throtl_data *td = q->td;
1724 
1725 	cancel_work_sync(&td->dispatch_work);
1726 }
1727 
1728 static struct blkcg_policy blkcg_policy_throtl = {
1729 	.dfl_cftypes		= throtl_files,
1730 	.legacy_cftypes		= throtl_legacy_files,
1731 
1732 	.pd_alloc_fn		= throtl_pd_alloc,
1733 	.pd_init_fn		= throtl_pd_init,
1734 	.pd_online_fn		= throtl_pd_online,
1735 	.pd_offline_fn		= throtl_pd_offline,
1736 	.pd_free_fn		= throtl_pd_free,
1737 };
1738 
1739 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1740 {
1741 	unsigned long rtime = jiffies, wtime = jiffies;
1742 
1743 	if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1744 		rtime = tg->last_low_overflow_time[READ];
1745 	if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1746 		wtime = tg->last_low_overflow_time[WRITE];
1747 	return min(rtime, wtime);
1748 }
1749 
1750 /* tg should not be an intermediate node */
1751 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1752 {
1753 	struct throtl_service_queue *parent_sq;
1754 	struct throtl_grp *parent = tg;
1755 	unsigned long ret = __tg_last_low_overflow_time(tg);
1756 
1757 	while (true) {
1758 		parent_sq = parent->service_queue.parent_sq;
1759 		parent = sq_to_tg(parent_sq);
1760 		if (!parent)
1761 			break;
1762 
1763 		/*
1764 		 * The parent doesn't have low limit, it always reaches low
1765 		 * limit. Its overflow time is useless for children
1766 		 */
1767 		if (!parent->bps[READ][LIMIT_LOW] &&
1768 		    !parent->iops[READ][LIMIT_LOW] &&
1769 		    !parent->bps[WRITE][LIMIT_LOW] &&
1770 		    !parent->iops[WRITE][LIMIT_LOW])
1771 			continue;
1772 		if (time_after(__tg_last_low_overflow_time(parent), ret))
1773 			ret = __tg_last_low_overflow_time(parent);
1774 	}
1775 	return ret;
1776 }
1777 
1778 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1779 {
1780 	/*
1781 	 * cgroup is idle if:
1782 	 * - single idle is too long, longer than a fixed value (in case user
1783 	 *   configure a too big threshold) or 4 times of idletime threshold
1784 	 * - average think time is more than threshold
1785 	 * - IO latency is largely below threshold
1786 	 */
1787 	unsigned long time;
1788 	bool ret;
1789 
1790 	time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1791 	ret = tg->latency_target == DFL_LATENCY_TARGET ||
1792 	      tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1793 	      (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1794 	      tg->avg_idletime > tg->idletime_threshold ||
1795 	      (tg->latency_target && tg->bio_cnt &&
1796 		tg->bad_bio_cnt * 5 < tg->bio_cnt);
1797 	throtl_log(&tg->service_queue,
1798 		"avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1799 		tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1800 		tg->bio_cnt, ret, tg->td->scale);
1801 	return ret;
1802 }
1803 
1804 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1805 {
1806 	struct throtl_service_queue *sq = &tg->service_queue;
1807 	bool read_limit, write_limit;
1808 
1809 	/*
1810 	 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1811 	 * reaches), it's ok to upgrade to next limit
1812 	 */
1813 	read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1814 	write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1815 	if (!read_limit && !write_limit)
1816 		return true;
1817 	if (read_limit && sq->nr_queued[READ] &&
1818 	    (!write_limit || sq->nr_queued[WRITE]))
1819 		return true;
1820 	if (write_limit && sq->nr_queued[WRITE] &&
1821 	    (!read_limit || sq->nr_queued[READ]))
1822 		return true;
1823 
1824 	if (time_after_eq(jiffies,
1825 		tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1826 	    throtl_tg_is_idle(tg))
1827 		return true;
1828 	return false;
1829 }
1830 
1831 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1832 {
1833 	while (true) {
1834 		if (throtl_tg_can_upgrade(tg))
1835 			return true;
1836 		tg = sq_to_tg(tg->service_queue.parent_sq);
1837 		if (!tg || !tg_to_blkg(tg)->parent)
1838 			return false;
1839 	}
1840 	return false;
1841 }
1842 
1843 static bool throtl_can_upgrade(struct throtl_data *td,
1844 	struct throtl_grp *this_tg)
1845 {
1846 	struct cgroup_subsys_state *pos_css;
1847 	struct blkcg_gq *blkg;
1848 
1849 	if (td->limit_index != LIMIT_LOW)
1850 		return false;
1851 
1852 	if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1853 		return false;
1854 
1855 	rcu_read_lock();
1856 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1857 		struct throtl_grp *tg = blkg_to_tg(blkg);
1858 
1859 		if (tg == this_tg)
1860 			continue;
1861 		if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1862 			continue;
1863 		if (!throtl_hierarchy_can_upgrade(tg)) {
1864 			rcu_read_unlock();
1865 			return false;
1866 		}
1867 	}
1868 	rcu_read_unlock();
1869 	return true;
1870 }
1871 
1872 static void throtl_upgrade_check(struct throtl_grp *tg)
1873 {
1874 	unsigned long now = jiffies;
1875 
1876 	if (tg->td->limit_index != LIMIT_LOW)
1877 		return;
1878 
1879 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1880 		return;
1881 
1882 	tg->last_check_time = now;
1883 
1884 	if (!time_after_eq(now,
1885 	     __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1886 		return;
1887 
1888 	if (throtl_can_upgrade(tg->td, NULL))
1889 		throtl_upgrade_state(tg->td);
1890 }
1891 
1892 static void throtl_upgrade_state(struct throtl_data *td)
1893 {
1894 	struct cgroup_subsys_state *pos_css;
1895 	struct blkcg_gq *blkg;
1896 
1897 	throtl_log(&td->service_queue, "upgrade to max");
1898 	td->limit_index = LIMIT_MAX;
1899 	td->low_upgrade_time = jiffies;
1900 	td->scale = 0;
1901 	rcu_read_lock();
1902 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1903 		struct throtl_grp *tg = blkg_to_tg(blkg);
1904 		struct throtl_service_queue *sq = &tg->service_queue;
1905 
1906 		tg->disptime = jiffies - 1;
1907 		throtl_select_dispatch(sq);
1908 		throtl_schedule_next_dispatch(sq, true);
1909 	}
1910 	rcu_read_unlock();
1911 	throtl_select_dispatch(&td->service_queue);
1912 	throtl_schedule_next_dispatch(&td->service_queue, true);
1913 	queue_work(kthrotld_workqueue, &td->dispatch_work);
1914 }
1915 
1916 static void throtl_downgrade_state(struct throtl_data *td, int new)
1917 {
1918 	td->scale /= 2;
1919 
1920 	throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1921 	if (td->scale) {
1922 		td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1923 		return;
1924 	}
1925 
1926 	td->limit_index = new;
1927 	td->low_downgrade_time = jiffies;
1928 }
1929 
1930 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1931 {
1932 	struct throtl_data *td = tg->td;
1933 	unsigned long now = jiffies;
1934 
1935 	/*
1936 	 * If cgroup is below low limit, consider downgrade and throttle other
1937 	 * cgroups
1938 	 */
1939 	if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1940 	    time_after_eq(now, tg_last_low_overflow_time(tg) +
1941 					td->throtl_slice) &&
1942 	    (!throtl_tg_is_idle(tg) ||
1943 	     !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1944 		return true;
1945 	return false;
1946 }
1947 
1948 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1949 {
1950 	while (true) {
1951 		if (!throtl_tg_can_downgrade(tg))
1952 			return false;
1953 		tg = sq_to_tg(tg->service_queue.parent_sq);
1954 		if (!tg || !tg_to_blkg(tg)->parent)
1955 			break;
1956 	}
1957 	return true;
1958 }
1959 
1960 static void throtl_downgrade_check(struct throtl_grp *tg)
1961 {
1962 	uint64_t bps;
1963 	unsigned int iops;
1964 	unsigned long elapsed_time;
1965 	unsigned long now = jiffies;
1966 
1967 	if (tg->td->limit_index != LIMIT_MAX ||
1968 	    !tg->td->limit_valid[LIMIT_LOW])
1969 		return;
1970 	if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1971 		return;
1972 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1973 		return;
1974 
1975 	elapsed_time = now - tg->last_check_time;
1976 	tg->last_check_time = now;
1977 
1978 	if (time_before(now, tg_last_low_overflow_time(tg) +
1979 			tg->td->throtl_slice))
1980 		return;
1981 
1982 	if (tg->bps[READ][LIMIT_LOW]) {
1983 		bps = tg->last_bytes_disp[READ] * HZ;
1984 		do_div(bps, elapsed_time);
1985 		if (bps >= tg->bps[READ][LIMIT_LOW])
1986 			tg->last_low_overflow_time[READ] = now;
1987 	}
1988 
1989 	if (tg->bps[WRITE][LIMIT_LOW]) {
1990 		bps = tg->last_bytes_disp[WRITE] * HZ;
1991 		do_div(bps, elapsed_time);
1992 		if (bps >= tg->bps[WRITE][LIMIT_LOW])
1993 			tg->last_low_overflow_time[WRITE] = now;
1994 	}
1995 
1996 	if (tg->iops[READ][LIMIT_LOW]) {
1997 		iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1998 		if (iops >= tg->iops[READ][LIMIT_LOW])
1999 			tg->last_low_overflow_time[READ] = now;
2000 	}
2001 
2002 	if (tg->iops[WRITE][LIMIT_LOW]) {
2003 		iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2004 		if (iops >= tg->iops[WRITE][LIMIT_LOW])
2005 			tg->last_low_overflow_time[WRITE] = now;
2006 	}
2007 
2008 	/*
2009 	 * If cgroup is below low limit, consider downgrade and throttle other
2010 	 * cgroups
2011 	 */
2012 	if (throtl_hierarchy_can_downgrade(tg))
2013 		throtl_downgrade_state(tg->td, LIMIT_LOW);
2014 
2015 	tg->last_bytes_disp[READ] = 0;
2016 	tg->last_bytes_disp[WRITE] = 0;
2017 	tg->last_io_disp[READ] = 0;
2018 	tg->last_io_disp[WRITE] = 0;
2019 }
2020 
2021 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2022 {
2023 	unsigned long now = ktime_get_ns() >> 10;
2024 	unsigned long last_finish_time = tg->last_finish_time;
2025 
2026 	if (now <= last_finish_time || last_finish_time == 0 ||
2027 	    last_finish_time == tg->checked_last_finish_time)
2028 		return;
2029 
2030 	tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2031 	tg->checked_last_finish_time = last_finish_time;
2032 }
2033 
2034 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2035 static void throtl_update_latency_buckets(struct throtl_data *td)
2036 {
2037 	struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2038 	int i, cpu, rw;
2039 	unsigned long last_latency[2] = { 0 };
2040 	unsigned long latency[2];
2041 
2042 	if (!blk_queue_nonrot(td->queue))
2043 		return;
2044 	if (time_before(jiffies, td->last_calculate_time + HZ))
2045 		return;
2046 	td->last_calculate_time = jiffies;
2047 
2048 	memset(avg_latency, 0, sizeof(avg_latency));
2049 	for (rw = READ; rw <= WRITE; rw++) {
2050 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2051 			struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2052 
2053 			for_each_possible_cpu(cpu) {
2054 				struct latency_bucket *bucket;
2055 
2056 				/* this isn't race free, but ok in practice */
2057 				bucket = per_cpu_ptr(td->latency_buckets[rw],
2058 					cpu);
2059 				tmp->total_latency += bucket[i].total_latency;
2060 				tmp->samples += bucket[i].samples;
2061 				bucket[i].total_latency = 0;
2062 				bucket[i].samples = 0;
2063 			}
2064 
2065 			if (tmp->samples >= 32) {
2066 				int samples = tmp->samples;
2067 
2068 				latency[rw] = tmp->total_latency;
2069 
2070 				tmp->total_latency = 0;
2071 				tmp->samples = 0;
2072 				latency[rw] /= samples;
2073 				if (latency[rw] == 0)
2074 					continue;
2075 				avg_latency[rw][i].latency = latency[rw];
2076 			}
2077 		}
2078 	}
2079 
2080 	for (rw = READ; rw <= WRITE; rw++) {
2081 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2082 			if (!avg_latency[rw][i].latency) {
2083 				if (td->avg_buckets[rw][i].latency < last_latency[rw])
2084 					td->avg_buckets[rw][i].latency =
2085 						last_latency[rw];
2086 				continue;
2087 			}
2088 
2089 			if (!td->avg_buckets[rw][i].valid)
2090 				latency[rw] = avg_latency[rw][i].latency;
2091 			else
2092 				latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2093 					avg_latency[rw][i].latency) >> 3;
2094 
2095 			td->avg_buckets[rw][i].latency = max(latency[rw],
2096 				last_latency[rw]);
2097 			td->avg_buckets[rw][i].valid = true;
2098 			last_latency[rw] = td->avg_buckets[rw][i].latency;
2099 		}
2100 	}
2101 
2102 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2103 		throtl_log(&td->service_queue,
2104 			"Latency bucket %d: read latency=%ld, read valid=%d, "
2105 			"write latency=%ld, write valid=%d", i,
2106 			td->avg_buckets[READ][i].latency,
2107 			td->avg_buckets[READ][i].valid,
2108 			td->avg_buckets[WRITE][i].latency,
2109 			td->avg_buckets[WRITE][i].valid);
2110 }
2111 #else
2112 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2113 {
2114 }
2115 #endif
2116 
2117 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2118 		    struct bio *bio)
2119 {
2120 	struct throtl_qnode *qn = NULL;
2121 	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2122 	struct throtl_service_queue *sq;
2123 	bool rw = bio_data_dir(bio);
2124 	bool throttled = false;
2125 	struct throtl_data *td = tg->td;
2126 
2127 	WARN_ON_ONCE(!rcu_read_lock_held());
2128 
2129 	/* see throtl_charge_bio() */
2130 	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2131 		goto out;
2132 
2133 	spin_lock_irq(&q->queue_lock);
2134 
2135 	throtl_update_latency_buckets(td);
2136 
2137 	blk_throtl_update_idletime(tg);
2138 
2139 	sq = &tg->service_queue;
2140 
2141 again:
2142 	while (true) {
2143 		if (tg->last_low_overflow_time[rw] == 0)
2144 			tg->last_low_overflow_time[rw] = jiffies;
2145 		throtl_downgrade_check(tg);
2146 		throtl_upgrade_check(tg);
2147 		/* throtl is FIFO - if bios are already queued, should queue */
2148 		if (sq->nr_queued[rw])
2149 			break;
2150 
2151 		/* if above limits, break to queue */
2152 		if (!tg_may_dispatch(tg, bio, NULL)) {
2153 			tg->last_low_overflow_time[rw] = jiffies;
2154 			if (throtl_can_upgrade(td, tg)) {
2155 				throtl_upgrade_state(td);
2156 				goto again;
2157 			}
2158 			break;
2159 		}
2160 
2161 		/* within limits, let's charge and dispatch directly */
2162 		throtl_charge_bio(tg, bio);
2163 
2164 		/*
2165 		 * We need to trim slice even when bios are not being queued
2166 		 * otherwise it might happen that a bio is not queued for
2167 		 * a long time and slice keeps on extending and trim is not
2168 		 * called for a long time. Now if limits are reduced suddenly
2169 		 * we take into account all the IO dispatched so far at new
2170 		 * low rate and * newly queued IO gets a really long dispatch
2171 		 * time.
2172 		 *
2173 		 * So keep on trimming slice even if bio is not queued.
2174 		 */
2175 		throtl_trim_slice(tg, rw);
2176 
2177 		/*
2178 		 * @bio passed through this layer without being throttled.
2179 		 * Climb up the ladder.  If we''re already at the top, it
2180 		 * can be executed directly.
2181 		 */
2182 		qn = &tg->qnode_on_parent[rw];
2183 		sq = sq->parent_sq;
2184 		tg = sq_to_tg(sq);
2185 		if (!tg)
2186 			goto out_unlock;
2187 	}
2188 
2189 	/* out-of-limit, queue to @tg */
2190 	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2191 		   rw == READ ? 'R' : 'W',
2192 		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
2193 		   tg_bps_limit(tg, rw),
2194 		   tg->io_disp[rw], tg_iops_limit(tg, rw),
2195 		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
2196 
2197 	tg->last_low_overflow_time[rw] = jiffies;
2198 
2199 	td->nr_queued[rw]++;
2200 	throtl_add_bio_tg(bio, qn, tg);
2201 	throttled = true;
2202 
2203 	/*
2204 	 * Update @tg's dispatch time and force schedule dispatch if @tg
2205 	 * was empty before @bio.  The forced scheduling isn't likely to
2206 	 * cause undue delay as @bio is likely to be dispatched directly if
2207 	 * its @tg's disptime is not in the future.
2208 	 */
2209 	if (tg->flags & THROTL_TG_WAS_EMPTY) {
2210 		tg_update_disptime(tg);
2211 		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2212 	}
2213 
2214 out_unlock:
2215 	spin_unlock_irq(&q->queue_lock);
2216 out:
2217 	bio_set_flag(bio, BIO_THROTTLED);
2218 
2219 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2220 	if (throttled || !td->track_bio_latency)
2221 		bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2222 #endif
2223 	return throttled;
2224 }
2225 
2226 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2227 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2228 	int op, unsigned long time)
2229 {
2230 	struct latency_bucket *latency;
2231 	int index;
2232 
2233 	if (!td || td->limit_index != LIMIT_LOW ||
2234 	    !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2235 	    !blk_queue_nonrot(td->queue))
2236 		return;
2237 
2238 	index = request_bucket_index(size);
2239 
2240 	latency = get_cpu_ptr(td->latency_buckets[op]);
2241 	latency[index].total_latency += time;
2242 	latency[index].samples++;
2243 	put_cpu_ptr(td->latency_buckets[op]);
2244 }
2245 
2246 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2247 {
2248 	struct request_queue *q = rq->q;
2249 	struct throtl_data *td = q->td;
2250 
2251 	throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2252 			     time_ns >> 10);
2253 }
2254 
2255 void blk_throtl_bio_endio(struct bio *bio)
2256 {
2257 	struct blkcg_gq *blkg;
2258 	struct throtl_grp *tg;
2259 	u64 finish_time_ns;
2260 	unsigned long finish_time;
2261 	unsigned long start_time;
2262 	unsigned long lat;
2263 	int rw = bio_data_dir(bio);
2264 
2265 	blkg = bio->bi_blkg;
2266 	if (!blkg)
2267 		return;
2268 	tg = blkg_to_tg(blkg);
2269 
2270 	finish_time_ns = ktime_get_ns();
2271 	tg->last_finish_time = finish_time_ns >> 10;
2272 
2273 	start_time = bio_issue_time(&bio->bi_issue) >> 10;
2274 	finish_time = __bio_issue_time(finish_time_ns) >> 10;
2275 	if (!start_time || finish_time <= start_time)
2276 		return;
2277 
2278 	lat = finish_time - start_time;
2279 	/* this is only for bio based driver */
2280 	if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2281 		throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2282 				     bio_op(bio), lat);
2283 
2284 	if (tg->latency_target && lat >= tg->td->filtered_latency) {
2285 		int bucket;
2286 		unsigned int threshold;
2287 
2288 		bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2289 		threshold = tg->td->avg_buckets[rw][bucket].latency +
2290 			tg->latency_target;
2291 		if (lat > threshold)
2292 			tg->bad_bio_cnt++;
2293 		/*
2294 		 * Not race free, could get wrong count, which means cgroups
2295 		 * will be throttled
2296 		 */
2297 		tg->bio_cnt++;
2298 	}
2299 
2300 	if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2301 		tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2302 		tg->bio_cnt /= 2;
2303 		tg->bad_bio_cnt /= 2;
2304 	}
2305 }
2306 #endif
2307 
2308 /*
2309  * Dispatch all bios from all children tg's queued on @parent_sq.  On
2310  * return, @parent_sq is guaranteed to not have any active children tg's
2311  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2312  */
2313 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2314 {
2315 	struct throtl_grp *tg;
2316 
2317 	while ((tg = throtl_rb_first(parent_sq))) {
2318 		struct throtl_service_queue *sq = &tg->service_queue;
2319 		struct bio *bio;
2320 
2321 		throtl_dequeue_tg(tg);
2322 
2323 		while ((bio = throtl_peek_queued(&sq->queued[READ])))
2324 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2325 		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2326 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2327 	}
2328 }
2329 
2330 /**
2331  * blk_throtl_drain - drain throttled bios
2332  * @q: request_queue to drain throttled bios for
2333  *
2334  * Dispatch all currently throttled bios on @q through ->make_request_fn().
2335  */
2336 void blk_throtl_drain(struct request_queue *q)
2337 	__releases(&q->queue_lock) __acquires(&q->queue_lock)
2338 {
2339 	struct throtl_data *td = q->td;
2340 	struct blkcg_gq *blkg;
2341 	struct cgroup_subsys_state *pos_css;
2342 	struct bio *bio;
2343 	int rw;
2344 
2345 	rcu_read_lock();
2346 
2347 	/*
2348 	 * Drain each tg while doing post-order walk on the blkg tree, so
2349 	 * that all bios are propagated to td->service_queue.  It'd be
2350 	 * better to walk service_queue tree directly but blkg walk is
2351 	 * easier.
2352 	 */
2353 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2354 		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2355 
2356 	/* finally, transfer bios from top-level tg's into the td */
2357 	tg_drain_bios(&td->service_queue);
2358 
2359 	rcu_read_unlock();
2360 	spin_unlock_irq(&q->queue_lock);
2361 
2362 	/* all bios now should be in td->service_queue, issue them */
2363 	for (rw = READ; rw <= WRITE; rw++)
2364 		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2365 						NULL)))
2366 			generic_make_request(bio);
2367 
2368 	spin_lock_irq(&q->queue_lock);
2369 }
2370 
2371 int blk_throtl_init(struct request_queue *q)
2372 {
2373 	struct throtl_data *td;
2374 	int ret;
2375 
2376 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2377 	if (!td)
2378 		return -ENOMEM;
2379 	td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2380 		LATENCY_BUCKET_SIZE, __alignof__(u64));
2381 	if (!td->latency_buckets[READ]) {
2382 		kfree(td);
2383 		return -ENOMEM;
2384 	}
2385 	td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2386 		LATENCY_BUCKET_SIZE, __alignof__(u64));
2387 	if (!td->latency_buckets[WRITE]) {
2388 		free_percpu(td->latency_buckets[READ]);
2389 		kfree(td);
2390 		return -ENOMEM;
2391 	}
2392 
2393 	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2394 	throtl_service_queue_init(&td->service_queue);
2395 
2396 	q->td = td;
2397 	td->queue = q;
2398 
2399 	td->limit_valid[LIMIT_MAX] = true;
2400 	td->limit_index = LIMIT_MAX;
2401 	td->low_upgrade_time = jiffies;
2402 	td->low_downgrade_time = jiffies;
2403 
2404 	/* activate policy */
2405 	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2406 	if (ret) {
2407 		free_percpu(td->latency_buckets[READ]);
2408 		free_percpu(td->latency_buckets[WRITE]);
2409 		kfree(td);
2410 	}
2411 	return ret;
2412 }
2413 
2414 void blk_throtl_exit(struct request_queue *q)
2415 {
2416 	BUG_ON(!q->td);
2417 	throtl_shutdown_wq(q);
2418 	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2419 	free_percpu(q->td->latency_buckets[READ]);
2420 	free_percpu(q->td->latency_buckets[WRITE]);
2421 	kfree(q->td);
2422 }
2423 
2424 void blk_throtl_register_queue(struct request_queue *q)
2425 {
2426 	struct throtl_data *td;
2427 	int i;
2428 
2429 	td = q->td;
2430 	BUG_ON(!td);
2431 
2432 	if (blk_queue_nonrot(q)) {
2433 		td->throtl_slice = DFL_THROTL_SLICE_SSD;
2434 		td->filtered_latency = LATENCY_FILTERED_SSD;
2435 	} else {
2436 		td->throtl_slice = DFL_THROTL_SLICE_HD;
2437 		td->filtered_latency = LATENCY_FILTERED_HD;
2438 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2439 			td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2440 			td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2441 		}
2442 	}
2443 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2444 	/* if no low limit, use previous default */
2445 	td->throtl_slice = DFL_THROTL_SLICE_HD;
2446 #endif
2447 
2448 	td->track_bio_latency = !queue_is_mq(q);
2449 	if (!td->track_bio_latency)
2450 		blk_stat_enable_accounting(q);
2451 }
2452 
2453 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2454 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2455 {
2456 	if (!q->td)
2457 		return -EINVAL;
2458 	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2459 }
2460 
2461 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2462 	const char *page, size_t count)
2463 {
2464 	unsigned long v;
2465 	unsigned long t;
2466 
2467 	if (!q->td)
2468 		return -EINVAL;
2469 	if (kstrtoul(page, 10, &v))
2470 		return -EINVAL;
2471 	t = msecs_to_jiffies(v);
2472 	if (t == 0 || t > MAX_THROTL_SLICE)
2473 		return -EINVAL;
2474 	q->td->throtl_slice = t;
2475 	return count;
2476 }
2477 #endif
2478 
2479 static int __init throtl_init(void)
2480 {
2481 	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2482 	if (!kthrotld_workqueue)
2483 		panic("Failed to create kthrotld\n");
2484 
2485 	return blkcg_policy_register(&blkcg_policy_throtl);
2486 }
2487 
2488 module_init(throtl_init);
2489