xref: /linux/block/blk-throttle.c (revision be54f8c558027a218423134dd9b8c7c46d92204a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include "blk.h"
14 #include "blk-cgroup-rwstat.h"
15 #include "blk-stat.h"
16 #include "blk-throttle.h"
17 
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
20 
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
23 
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
28 
29 /* A workqueue to queue throttle related work */
30 static struct workqueue_struct *kthrotld_workqueue;
31 
32 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
33 
34 struct throtl_data
35 {
36 	/* service tree for active throtl groups */
37 	struct throtl_service_queue service_queue;
38 
39 	struct request_queue *queue;
40 
41 	/* Total Number of queued bios on READ and WRITE lists */
42 	unsigned int nr_queued[2];
43 
44 	unsigned int throtl_slice;
45 
46 	/* Work for dispatching throttled bios */
47 	struct work_struct dispatch_work;
48 
49 	bool track_bio_latency;
50 };
51 
52 static void throtl_pending_timer_fn(struct timer_list *t);
53 
tg_to_blkg(struct throtl_grp * tg)54 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
55 {
56 	return pd_to_blkg(&tg->pd);
57 }
58 
59 /**
60  * sq_to_tg - return the throl_grp the specified service queue belongs to
61  * @sq: the throtl_service_queue of interest
62  *
63  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
64  * embedded in throtl_data, %NULL is returned.
65  */
sq_to_tg(struct throtl_service_queue * sq)66 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
67 {
68 	if (sq && sq->parent_sq)
69 		return container_of(sq, struct throtl_grp, service_queue);
70 	else
71 		return NULL;
72 }
73 
74 /**
75  * sq_to_td - return throtl_data the specified service queue belongs to
76  * @sq: the throtl_service_queue of interest
77  *
78  * A service_queue can be embedded in either a throtl_grp or throtl_data.
79  * Determine the associated throtl_data accordingly and return it.
80  */
sq_to_td(struct throtl_service_queue * sq)81 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
82 {
83 	struct throtl_grp *tg = sq_to_tg(sq);
84 
85 	if (tg)
86 		return tg->td;
87 	else
88 		return container_of(sq, struct throtl_data, service_queue);
89 }
90 
tg_bps_limit(struct throtl_grp * tg,int rw)91 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
92 {
93 	struct blkcg_gq *blkg = tg_to_blkg(tg);
94 
95 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
96 		return U64_MAX;
97 
98 	return tg->bps[rw];
99 }
100 
tg_iops_limit(struct throtl_grp * tg,int rw)101 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
102 {
103 	struct blkcg_gq *blkg = tg_to_blkg(tg);
104 
105 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
106 		return UINT_MAX;
107 
108 	return tg->iops[rw];
109 }
110 
111 /**
112  * throtl_log - log debug message via blktrace
113  * @sq: the service_queue being reported
114  * @fmt: printf format string
115  * @args: printf args
116  *
117  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
118  * throtl_grp; otherwise, just "throtl".
119  */
120 #define throtl_log(sq, fmt, args...)	do {				\
121 	struct throtl_grp *__tg = sq_to_tg((sq));			\
122 	struct throtl_data *__td = sq_to_td((sq));			\
123 									\
124 	(void)__td;							\
125 	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
126 		break;							\
127 	if ((__tg)) {							\
128 		blk_add_cgroup_trace_msg(__td->queue,			\
129 			&tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
130 	} else {							\
131 		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
132 	}								\
133 } while (0)
134 
throtl_bio_data_size(struct bio * bio)135 static inline unsigned int throtl_bio_data_size(struct bio *bio)
136 {
137 	/* assume it's one sector */
138 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
139 		return 512;
140 	return bio->bi_iter.bi_size;
141 }
142 
throtl_qnode_init(struct throtl_qnode * qn,struct throtl_grp * tg)143 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
144 {
145 	INIT_LIST_HEAD(&qn->node);
146 	bio_list_init(&qn->bios_bps);
147 	bio_list_init(&qn->bios_iops);
148 	qn->tg = tg;
149 }
150 
151 /**
152  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
153  * @bio: bio being added
154  * @qn: qnode to add bio to
155  * @sq: the service_queue @qn belongs to
156  *
157  * Add @bio to @qn and put @qn on @sq->queued if it's not already on.
158  * @qn->tg's reference count is bumped when @qn is activated.  See the
159  * comment on top of throtl_qnode definition for details.
160  */
throtl_qnode_add_bio(struct bio * bio,struct throtl_qnode * qn,struct throtl_service_queue * sq)161 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
162 				 struct throtl_service_queue *sq)
163 {
164 	bool rw = bio_data_dir(bio);
165 
166 	/*
167 	 * Split bios have already been throttled by bps, so they are
168 	 * directly queued into the iops path.
169 	 */
170 	if (bio_flagged(bio, BIO_TG_BPS_THROTTLED) ||
171 	    bio_flagged(bio, BIO_BPS_THROTTLED)) {
172 		bio_list_add(&qn->bios_iops, bio);
173 		sq->nr_queued_iops[rw]++;
174 	} else {
175 		bio_list_add(&qn->bios_bps, bio);
176 		sq->nr_queued_bps[rw]++;
177 	}
178 
179 	if (list_empty(&qn->node)) {
180 		list_add_tail(&qn->node, &sq->queued[rw]);
181 		blkg_get(tg_to_blkg(qn->tg));
182 	}
183 }
184 
185 /**
186  * throtl_peek_queued - peek the first bio on a qnode list
187  * @queued: the qnode list to peek
188  *
189  * Always take a bio from the head of the iops queue first. If the queue is
190  * empty, we then take it from the bps queue to maintain the overall idea of
191  * fetching bios from the head.
192  */
throtl_peek_queued(struct list_head * queued)193 static struct bio *throtl_peek_queued(struct list_head *queued)
194 {
195 	struct throtl_qnode *qn;
196 	struct bio *bio;
197 
198 	if (list_empty(queued))
199 		return NULL;
200 
201 	qn = list_first_entry(queued, struct throtl_qnode, node);
202 	bio = bio_list_peek(&qn->bios_iops);
203 	if (!bio)
204 		bio = bio_list_peek(&qn->bios_bps);
205 	WARN_ON_ONCE(!bio);
206 	return bio;
207 }
208 
209 /**
210  * throtl_pop_queued - pop the first bio form a qnode list
211  * @sq: the service_queue to pop a bio from
212  * @tg_to_put: optional out argument for throtl_grp to put
213  * @rw: read/write
214  *
215  * Pop the first bio from the qnode list @sq->queued. Note that we firstly
216  * focus on the iops list because bios are ultimately dispatched from it.
217  * After popping, the first qnode is removed from @sq->queued if empty or moved
218  * to the end of @sq->queued so that the popping order is round-robin.
219  *
220  * When the first qnode is removed, its associated throtl_grp should be put
221  * too.  If @tg_to_put is NULL, this function automatically puts it;
222  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
223  * responsible for putting it.
224  */
throtl_pop_queued(struct throtl_service_queue * sq,struct throtl_grp ** tg_to_put,bool rw)225 static struct bio *throtl_pop_queued(struct throtl_service_queue *sq,
226 				     struct throtl_grp **tg_to_put, bool rw)
227 {
228 	struct list_head *queued = &sq->queued[rw];
229 	struct throtl_qnode *qn;
230 	struct bio *bio;
231 
232 	if (list_empty(queued))
233 		return NULL;
234 
235 	qn = list_first_entry(queued, struct throtl_qnode, node);
236 	bio = bio_list_pop(&qn->bios_iops);
237 	if (bio) {
238 		sq->nr_queued_iops[rw]--;
239 	} else {
240 		bio = bio_list_pop(&qn->bios_bps);
241 		if (bio)
242 			sq->nr_queued_bps[rw]--;
243 	}
244 	WARN_ON_ONCE(!bio);
245 
246 	if (bio_list_empty(&qn->bios_bps) && bio_list_empty(&qn->bios_iops)) {
247 		list_del_init(&qn->node);
248 		if (tg_to_put)
249 			*tg_to_put = qn->tg;
250 		else
251 			blkg_put(tg_to_blkg(qn->tg));
252 	} else {
253 		list_move_tail(&qn->node, queued);
254 	}
255 
256 	return bio;
257 }
258 
259 /* init a service_queue, assumes the caller zeroed it */
throtl_service_queue_init(struct throtl_service_queue * sq)260 static void throtl_service_queue_init(struct throtl_service_queue *sq)
261 {
262 	INIT_LIST_HEAD(&sq->queued[READ]);
263 	INIT_LIST_HEAD(&sq->queued[WRITE]);
264 	sq->pending_tree = RB_ROOT_CACHED;
265 	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
266 }
267 
throtl_pd_alloc(struct gendisk * disk,struct blkcg * blkcg,gfp_t gfp)268 static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
269 		struct blkcg *blkcg, gfp_t gfp)
270 {
271 	struct throtl_grp *tg;
272 	int rw;
273 
274 	tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
275 	if (!tg)
276 		return NULL;
277 
278 	if (blkg_rwstat_init(&tg->stat_bytes, gfp))
279 		goto err_free_tg;
280 
281 	if (blkg_rwstat_init(&tg->stat_ios, gfp))
282 		goto err_exit_stat_bytes;
283 
284 	throtl_service_queue_init(&tg->service_queue);
285 
286 	for (rw = READ; rw <= WRITE; rw++) {
287 		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
288 		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
289 	}
290 
291 	RB_CLEAR_NODE(&tg->rb_node);
292 	tg->bps[READ] = U64_MAX;
293 	tg->bps[WRITE] = U64_MAX;
294 	tg->iops[READ] = UINT_MAX;
295 	tg->iops[WRITE] = UINT_MAX;
296 
297 	return &tg->pd;
298 
299 err_exit_stat_bytes:
300 	blkg_rwstat_exit(&tg->stat_bytes);
301 err_free_tg:
302 	kfree(tg);
303 	return NULL;
304 }
305 
throtl_pd_init(struct blkg_policy_data * pd)306 static void throtl_pd_init(struct blkg_policy_data *pd)
307 {
308 	struct throtl_grp *tg = pd_to_tg(pd);
309 	struct blkcg_gq *blkg = tg_to_blkg(tg);
310 	struct throtl_data *td = blkg->q->td;
311 	struct throtl_service_queue *sq = &tg->service_queue;
312 
313 	/*
314 	 * If on the default hierarchy, we switch to properly hierarchical
315 	 * behavior where limits on a given throtl_grp are applied to the
316 	 * whole subtree rather than just the group itself.  e.g. If 16M
317 	 * read_bps limit is set on a parent group, summary bps of
318 	 * parent group and its subtree groups can't exceed 16M for the
319 	 * device.
320 	 *
321 	 * If not on the default hierarchy, the broken flat hierarchy
322 	 * behavior is retained where all throtl_grps are treated as if
323 	 * they're all separate root groups right below throtl_data.
324 	 * Limits of a group don't interact with limits of other groups
325 	 * regardless of the position of the group in the hierarchy.
326 	 */
327 	sq->parent_sq = &td->service_queue;
328 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
329 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
330 	tg->td = td;
331 }
332 
333 /*
334  * Set has_rules[] if @tg or any of its parents have limits configured.
335  * This doesn't require walking up to the top of the hierarchy as the
336  * parent's has_rules[] is guaranteed to be correct.
337  */
tg_update_has_rules(struct throtl_grp * tg)338 static void tg_update_has_rules(struct throtl_grp *tg)
339 {
340 	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
341 	int rw;
342 
343 	for (rw = READ; rw <= WRITE; rw++) {
344 		tg->has_rules_iops[rw] =
345 			(parent_tg && parent_tg->has_rules_iops[rw]) ||
346 			tg_iops_limit(tg, rw) != UINT_MAX;
347 		tg->has_rules_bps[rw] =
348 			(parent_tg && parent_tg->has_rules_bps[rw]) ||
349 			tg_bps_limit(tg, rw) != U64_MAX;
350 	}
351 }
352 
throtl_pd_online(struct blkg_policy_data * pd)353 static void throtl_pd_online(struct blkg_policy_data *pd)
354 {
355 	struct throtl_grp *tg = pd_to_tg(pd);
356 	/*
357 	 * We don't want new groups to escape the limits of its ancestors.
358 	 * Update has_rules[] after a new group is brought online.
359 	 */
360 	tg_update_has_rules(tg);
361 }
362 
throtl_pd_free(struct blkg_policy_data * pd)363 static void throtl_pd_free(struct blkg_policy_data *pd)
364 {
365 	struct throtl_grp *tg = pd_to_tg(pd);
366 
367 	timer_delete_sync(&tg->service_queue.pending_timer);
368 	blkg_rwstat_exit(&tg->stat_bytes);
369 	blkg_rwstat_exit(&tg->stat_ios);
370 	kfree(tg);
371 }
372 
373 static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue * parent_sq)374 throtl_rb_first(struct throtl_service_queue *parent_sq)
375 {
376 	struct rb_node *n;
377 
378 	n = rb_first_cached(&parent_sq->pending_tree);
379 	WARN_ON_ONCE(!n);
380 	if (!n)
381 		return NULL;
382 	return rb_entry_tg(n);
383 }
384 
throtl_rb_erase(struct rb_node * n,struct throtl_service_queue * parent_sq)385 static void throtl_rb_erase(struct rb_node *n,
386 			    struct throtl_service_queue *parent_sq)
387 {
388 	rb_erase_cached(n, &parent_sq->pending_tree);
389 	RB_CLEAR_NODE(n);
390 }
391 
update_min_dispatch_time(struct throtl_service_queue * parent_sq)392 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
393 {
394 	struct throtl_grp *tg;
395 
396 	tg = throtl_rb_first(parent_sq);
397 	if (!tg)
398 		return;
399 
400 	parent_sq->first_pending_disptime = tg->disptime;
401 }
402 
tg_service_queue_add(struct throtl_grp * tg)403 static void tg_service_queue_add(struct throtl_grp *tg)
404 {
405 	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
406 	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
407 	struct rb_node *parent = NULL;
408 	struct throtl_grp *__tg;
409 	unsigned long key = tg->disptime;
410 	bool leftmost = true;
411 
412 	while (*node != NULL) {
413 		parent = *node;
414 		__tg = rb_entry_tg(parent);
415 
416 		if (time_before(key, __tg->disptime))
417 			node = &parent->rb_left;
418 		else {
419 			node = &parent->rb_right;
420 			leftmost = false;
421 		}
422 	}
423 
424 	rb_link_node(&tg->rb_node, parent, node);
425 	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
426 			       leftmost);
427 }
428 
throtl_enqueue_tg(struct throtl_grp * tg)429 static void throtl_enqueue_tg(struct throtl_grp *tg)
430 {
431 	if (!(tg->flags & THROTL_TG_PENDING)) {
432 		tg_service_queue_add(tg);
433 		tg->flags |= THROTL_TG_PENDING;
434 		tg->service_queue.parent_sq->nr_pending++;
435 	}
436 }
437 
throtl_dequeue_tg(struct throtl_grp * tg)438 static void throtl_dequeue_tg(struct throtl_grp *tg)
439 {
440 	if (tg->flags & THROTL_TG_PENDING) {
441 		struct throtl_service_queue *parent_sq =
442 			tg->service_queue.parent_sq;
443 
444 		throtl_rb_erase(&tg->rb_node, parent_sq);
445 		--parent_sq->nr_pending;
446 		tg->flags &= ~THROTL_TG_PENDING;
447 	}
448 }
449 
450 /* Call with queue lock held */
throtl_schedule_pending_timer(struct throtl_service_queue * sq,unsigned long expires)451 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
452 					  unsigned long expires)
453 {
454 	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
455 
456 	/*
457 	 * Since we are adjusting the throttle limit dynamically, the sleep
458 	 * time calculated according to previous limit might be invalid. It's
459 	 * possible the cgroup sleep time is very long and no other cgroups
460 	 * have IO running so notify the limit changes. Make sure the cgroup
461 	 * doesn't sleep too long to avoid the missed notification.
462 	 */
463 	if (time_after(expires, max_expire))
464 		expires = max_expire;
465 	mod_timer(&sq->pending_timer, expires);
466 	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
467 		   expires - jiffies, jiffies);
468 }
469 
470 /**
471  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
472  * @sq: the service_queue to schedule dispatch for
473  * @force: force scheduling
474  *
475  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
476  * dispatch time of the first pending child.  Returns %true if either timer
477  * is armed or there's no pending child left.  %false if the current
478  * dispatch window is still open and the caller should continue
479  * dispatching.
480  *
481  * If @force is %true, the dispatch timer is always scheduled and this
482  * function is guaranteed to return %true.  This is to be used when the
483  * caller can't dispatch itself and needs to invoke pending_timer
484  * unconditionally.  Note that forced scheduling is likely to induce short
485  * delay before dispatch starts even if @sq->first_pending_disptime is not
486  * in the future and thus shouldn't be used in hot paths.
487  */
throtl_schedule_next_dispatch(struct throtl_service_queue * sq,bool force)488 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
489 					  bool force)
490 {
491 	/* any pending children left? */
492 	if (!sq->nr_pending)
493 		return true;
494 
495 	update_min_dispatch_time(sq);
496 
497 	/* is the next dispatch time in the future? */
498 	if (force || time_after(sq->first_pending_disptime, jiffies)) {
499 		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
500 		return true;
501 	}
502 
503 	/* tell the caller to continue dispatching */
504 	return false;
505 }
506 
throtl_start_new_slice_with_credit(struct throtl_grp * tg,bool rw,unsigned long start)507 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
508 		bool rw, unsigned long start)
509 {
510 	tg->bytes_disp[rw] = 0;
511 	tg->io_disp[rw] = 0;
512 
513 	/*
514 	 * Previous slice has expired. We must have trimmed it after last
515 	 * bio dispatch. That means since start of last slice, we never used
516 	 * that bandwidth. Do try to make use of that bandwidth while giving
517 	 * credit.
518 	 */
519 	if (time_after(start, tg->slice_start[rw]))
520 		tg->slice_start[rw] = start;
521 
522 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
523 	throtl_log(&tg->service_queue,
524 		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
525 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
526 		   tg->slice_end[rw], jiffies);
527 }
528 
throtl_start_new_slice(struct throtl_grp * tg,bool rw,bool clear)529 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
530 					  bool clear)
531 {
532 	if (clear) {
533 		tg->bytes_disp[rw] = 0;
534 		tg->io_disp[rw] = 0;
535 	}
536 	tg->slice_start[rw] = jiffies;
537 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
538 
539 	throtl_log(&tg->service_queue,
540 		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
541 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
542 		   tg->slice_end[rw], jiffies);
543 }
544 
throtl_set_slice_end(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)545 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
546 					unsigned long jiffy_end)
547 {
548 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
549 }
550 
throtl_extend_slice(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)551 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
552 				       unsigned long jiffy_end)
553 {
554 	if (!time_before(tg->slice_end[rw], jiffy_end))
555 		return;
556 
557 	throtl_set_slice_end(tg, rw, jiffy_end);
558 	throtl_log(&tg->service_queue,
559 		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
560 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
561 		   tg->slice_end[rw], jiffies);
562 }
563 
564 /* Determine if previously allocated or extended slice is complete or not */
throtl_slice_used(struct throtl_grp * tg,bool rw)565 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
566 {
567 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
568 		return false;
569 
570 	return true;
571 }
572 
sq_queued(struct throtl_service_queue * sq,int type)573 static unsigned int sq_queued(struct throtl_service_queue *sq, int type)
574 {
575 	return sq->nr_queued_bps[type] + sq->nr_queued_iops[type];
576 }
577 
calculate_io_allowed(u32 iops_limit,unsigned long jiffy_elapsed)578 static unsigned int calculate_io_allowed(u32 iops_limit,
579 					 unsigned long jiffy_elapsed)
580 {
581 	unsigned int io_allowed;
582 	u64 tmp;
583 
584 	/*
585 	 * jiffy_elapsed should not be a big value as minimum iops can be
586 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
587 	 * will allow dispatch after 1 second and after that slice should
588 	 * have been trimmed.
589 	 */
590 
591 	tmp = (u64)iops_limit * jiffy_elapsed;
592 	do_div(tmp, HZ);
593 
594 	if (tmp > UINT_MAX)
595 		io_allowed = UINT_MAX;
596 	else
597 		io_allowed = tmp;
598 
599 	return io_allowed;
600 }
601 
calculate_bytes_allowed(u64 bps_limit,unsigned long jiffy_elapsed)602 static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
603 {
604 	/*
605 	 * Can result be wider than 64 bits?
606 	 * We check against 62, not 64, due to ilog2 truncation.
607 	 */
608 	if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
609 		return U64_MAX;
610 	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
611 }
612 
throtl_trim_bps(struct throtl_grp * tg,bool rw,unsigned long time_elapsed)613 static long long throtl_trim_bps(struct throtl_grp *tg, bool rw,
614 				 unsigned long time_elapsed)
615 {
616 	u64 bps_limit = tg_bps_limit(tg, rw);
617 	long long bytes_trim;
618 
619 	if (bps_limit == U64_MAX)
620 		return 0;
621 
622 	/* Need to consider the case of bytes_allowed overflow. */
623 	bytes_trim = calculate_bytes_allowed(bps_limit, time_elapsed);
624 	if (bytes_trim <= 0 || tg->bytes_disp[rw] < bytes_trim) {
625 		bytes_trim = tg->bytes_disp[rw];
626 		tg->bytes_disp[rw] = 0;
627 	} else {
628 		tg->bytes_disp[rw] -= bytes_trim;
629 	}
630 
631 	return bytes_trim;
632 }
633 
throtl_trim_iops(struct throtl_grp * tg,bool rw,unsigned long time_elapsed)634 static int throtl_trim_iops(struct throtl_grp *tg, bool rw,
635 			    unsigned long time_elapsed)
636 {
637 	u32 iops_limit = tg_iops_limit(tg, rw);
638 	int io_trim;
639 
640 	if (iops_limit == UINT_MAX)
641 		return 0;
642 
643 	/* Need to consider the case of io_allowed overflow. */
644 	io_trim = calculate_io_allowed(iops_limit, time_elapsed);
645 	if (io_trim <= 0 || tg->io_disp[rw] < io_trim) {
646 		io_trim = tg->io_disp[rw];
647 		tg->io_disp[rw] = 0;
648 	} else {
649 		tg->io_disp[rw] -= io_trim;
650 	}
651 
652 	return io_trim;
653 }
654 
655 /* Trim the used slices and adjust slice start accordingly */
throtl_trim_slice(struct throtl_grp * tg,bool rw)656 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
657 {
658 	unsigned long time_elapsed;
659 	long long bytes_trim;
660 	int io_trim;
661 
662 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
663 
664 	/*
665 	 * If bps are unlimited (-1), then time slice don't get
666 	 * renewed. Don't try to trim the slice if slice is used. A new
667 	 * slice will start when appropriate.
668 	 */
669 	if (throtl_slice_used(tg, rw))
670 		return;
671 
672 	/*
673 	 * A bio has been dispatched. Also adjust slice_end. It might happen
674 	 * that initially cgroup limit was very low resulting in high
675 	 * slice_end, but later limit was bumped up and bio was dispatched
676 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
677 	 * is bad because it does not allow new slice to start.
678 	 */
679 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
680 
681 	time_elapsed = rounddown(jiffies - tg->slice_start[rw],
682 				 tg->td->throtl_slice);
683 	/* Don't trim slice until at least 2 slices are used */
684 	if (time_elapsed < tg->td->throtl_slice * 2)
685 		return;
686 
687 	/*
688 	 * The bio submission time may be a few jiffies more than the expected
689 	 * waiting time, due to 'extra_bytes' can't be divided in
690 	 * tg_within_bps_limit(), and also due to timer wakeup delay. In this
691 	 * case, adjust slice_start will discard the extra wait time, causing
692 	 * lower rate than expected. Therefore, other than the above rounddown,
693 	 * one extra slice is preserved for deviation.
694 	 */
695 	time_elapsed -= tg->td->throtl_slice;
696 	bytes_trim = throtl_trim_bps(tg, rw, time_elapsed);
697 	io_trim = throtl_trim_iops(tg, rw, time_elapsed);
698 	if (!bytes_trim && !io_trim)
699 		return;
700 
701 	tg->slice_start[rw] += time_elapsed;
702 
703 	throtl_log(&tg->service_queue,
704 		   "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
705 		   rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
706 		   bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
707 		   jiffies);
708 }
709 
__tg_update_carryover(struct throtl_grp * tg,bool rw,long long * bytes,int * ios)710 static void __tg_update_carryover(struct throtl_grp *tg, bool rw,
711 				  long long *bytes, int *ios)
712 {
713 	unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
714 	u64 bps_limit = tg_bps_limit(tg, rw);
715 	u32 iops_limit = tg_iops_limit(tg, rw);
716 	long long bytes_allowed;
717 	int io_allowed;
718 
719 	/*
720 	 * If the queue is empty, carryover handling is not needed. In such cases,
721 	 * tg->[bytes/io]_disp should be reset to 0 to avoid impacting the dispatch
722 	 * of subsequent bios. The same handling applies when the previous BPS/IOPS
723 	 * limit was set to max.
724 	 */
725 	if (sq_queued(&tg->service_queue, rw) == 0) {
726 		tg->bytes_disp[rw] = 0;
727 		tg->io_disp[rw] = 0;
728 		return;
729 	}
730 
731 	/*
732 	 * If config is updated while bios are still throttled, calculate and
733 	 * accumulate how many bytes/ios are waited across changes. And use the
734 	 * calculated carryover (@bytes/@ios) to update [bytes/io]_disp, which
735 	 * will be used to calculate new wait time under new configuration.
736 	 * And we need to consider the case of bytes/io_allowed overflow.
737 	 */
738 	if (bps_limit != U64_MAX) {
739 		bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed);
740 		if (bytes_allowed > 0)
741 			*bytes = bytes_allowed - tg->bytes_disp[rw];
742 	}
743 	if (iops_limit != UINT_MAX) {
744 		io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed);
745 		if (io_allowed > 0)
746 			*ios = io_allowed - tg->io_disp[rw];
747 	}
748 
749 	tg->bytes_disp[rw] = -*bytes;
750 	tg->io_disp[rw] = -*ios;
751 }
752 
tg_update_carryover(struct throtl_grp * tg)753 static void tg_update_carryover(struct throtl_grp *tg)
754 {
755 	long long bytes[2] = {0};
756 	int ios[2] = {0};
757 
758 	__tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]);
759 	__tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]);
760 
761 	/* see comments in struct throtl_grp for meaning of carryover. */
762 	throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
763 		   bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]);
764 }
765 
tg_within_iops_limit(struct throtl_grp * tg,struct bio * bio,u32 iops_limit)766 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
767 				 u32 iops_limit)
768 {
769 	bool rw = bio_data_dir(bio);
770 	int io_allowed;
771 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
772 
773 	jiffy_elapsed = jiffies - tg->slice_start[rw];
774 
775 	/* Round up to the next throttle slice, wait time must be nonzero */
776 	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
777 	io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd);
778 	if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
779 		return 0;
780 
781 	/* Calc approx time to dispatch */
782 	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
783 
784 	/* make sure at least one io can be dispatched after waiting */
785 	jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1);
786 	return jiffy_wait;
787 }
788 
tg_within_bps_limit(struct throtl_grp * tg,struct bio * bio,u64 bps_limit)789 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
790 				u64 bps_limit)
791 {
792 	bool rw = bio_data_dir(bio);
793 	long long bytes_allowed;
794 	u64 extra_bytes;
795 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
796 	unsigned int bio_size = throtl_bio_data_size(bio);
797 
798 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
799 
800 	/* Slice has just started. Consider one slice interval */
801 	if (!jiffy_elapsed)
802 		jiffy_elapsed_rnd = tg->td->throtl_slice;
803 
804 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
805 	bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd);
806 	/* Need to consider the case of bytes_allowed overflow. */
807 	if ((bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
808 	    || bytes_allowed < 0)
809 		return 0;
810 
811 	/* Calc approx time to dispatch */
812 	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
813 	jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
814 
815 	if (!jiffy_wait)
816 		jiffy_wait = 1;
817 
818 	/*
819 	 * This wait time is without taking into consideration the rounding
820 	 * up we did. Add that time also.
821 	 */
822 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
823 	return jiffy_wait;
824 }
825 
throtl_charge_bps_bio(struct throtl_grp * tg,struct bio * bio)826 static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio)
827 {
828 	unsigned int bio_size = throtl_bio_data_size(bio);
829 
830 	/* Charge the bio to the group */
831 	if (!bio_flagged(bio, BIO_BPS_THROTTLED) &&
832 	    !bio_flagged(bio, BIO_TG_BPS_THROTTLED)) {
833 		bio_set_flag(bio, BIO_TG_BPS_THROTTLED);
834 		tg->bytes_disp[bio_data_dir(bio)] += bio_size;
835 	}
836 }
837 
throtl_charge_iops_bio(struct throtl_grp * tg,struct bio * bio)838 static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio)
839 {
840 	bio_clear_flag(bio, BIO_TG_BPS_THROTTLED);
841 	tg->io_disp[bio_data_dir(bio)]++;
842 }
843 
844 /*
845  * If previous slice expired, start a new one otherwise renew/extend existing
846  * slice to make sure it is at least throtl_slice interval long since now. New
847  * slice is started only for empty throttle group. If there is queued bio, that
848  * means there should be an active slice and it should be extended instead.
849  */
tg_update_slice(struct throtl_grp * tg,bool rw)850 static void tg_update_slice(struct throtl_grp *tg, bool rw)
851 {
852 	if (throtl_slice_used(tg, rw) &&
853 	    sq_queued(&tg->service_queue, rw) == 0)
854 		throtl_start_new_slice(tg, rw, true);
855 	else
856 		throtl_extend_slice(tg, rw, jiffies + tg->td->throtl_slice);
857 }
858 
tg_dispatch_bps_time(struct throtl_grp * tg,struct bio * bio)859 static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio)
860 {
861 	bool rw = bio_data_dir(bio);
862 	u64 bps_limit = tg_bps_limit(tg, rw);
863 	unsigned long bps_wait;
864 
865 	/* no need to throttle if this bio's bytes have been accounted */
866 	if (bps_limit == U64_MAX || tg->flags & THROTL_TG_CANCELING ||
867 	    bio_flagged(bio, BIO_BPS_THROTTLED) ||
868 	    bio_flagged(bio, BIO_TG_BPS_THROTTLED))
869 		return 0;
870 
871 	tg_update_slice(tg, rw);
872 	bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
873 	throtl_extend_slice(tg, rw, jiffies + bps_wait);
874 
875 	return bps_wait;
876 }
877 
tg_dispatch_iops_time(struct throtl_grp * tg,struct bio * bio)878 static unsigned long tg_dispatch_iops_time(struct throtl_grp *tg, struct bio *bio)
879 {
880 	bool rw = bio_data_dir(bio);
881 	u32 iops_limit = tg_iops_limit(tg, rw);
882 	unsigned long iops_wait;
883 
884 	if (iops_limit == UINT_MAX || tg->flags & THROTL_TG_CANCELING)
885 		return 0;
886 
887 	tg_update_slice(tg, rw);
888 	iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
889 	throtl_extend_slice(tg, rw, jiffies + iops_wait);
890 
891 	return iops_wait;
892 }
893 
894 /*
895  * Returns approx number of jiffies to wait before this bio is with-in IO rate
896  * and can be moved to other queue or dispatched.
897  */
tg_dispatch_time(struct throtl_grp * tg,struct bio * bio)898 static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
899 {
900 	bool rw = bio_data_dir(bio);
901 	unsigned long wait;
902 
903 	/*
904  	 * Currently whole state machine of group depends on first bio
905 	 * queued in the group bio list. So one should not be calling
906 	 * this function with a different bio if there are other bios
907 	 * queued.
908 	 */
909 	BUG_ON(sq_queued(&tg->service_queue, rw) &&
910 	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
911 
912 	wait = tg_dispatch_bps_time(tg, bio);
913 	if (wait != 0)
914 		return wait;
915 
916 	/*
917 	 * Charge bps here because @bio will be directly placed into the
918 	 * iops queue afterward.
919 	 */
920 	throtl_charge_bps_bio(tg, bio);
921 
922 	return tg_dispatch_iops_time(tg, bio);
923 }
924 
925 /**
926  * throtl_add_bio_tg - add a bio to the specified throtl_grp
927  * @bio: bio to add
928  * @qn: qnode to use
929  * @tg: the target throtl_grp
930  *
931  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
932  * tg->qnode_on_self[] is used.
933  */
throtl_add_bio_tg(struct bio * bio,struct throtl_qnode * qn,struct throtl_grp * tg)934 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
935 			      struct throtl_grp *tg)
936 {
937 	struct throtl_service_queue *sq = &tg->service_queue;
938 	bool rw = bio_data_dir(bio);
939 
940 	if (!qn)
941 		qn = &tg->qnode_on_self[rw];
942 
943 	/*
944 	 * If @tg doesn't currently have any bios queued in the same
945 	 * direction, queueing @bio can change when @tg should be
946 	 * dispatched.  Mark that @tg was empty.  This is automatically
947 	 * cleared on the next tg_update_disptime().
948 	 */
949 	if (sq_queued(sq, rw) == 0)
950 		tg->flags |= THROTL_TG_WAS_EMPTY;
951 
952 	throtl_qnode_add_bio(bio, qn, sq);
953 
954 	/*
955 	 * Since we have split the queues, when the iops queue is
956 	 * previously empty and a new @bio is added into the first @qn,
957 	 * we also need to update the @tg->disptime.
958 	 */
959 	if (bio_flagged(bio, BIO_BPS_THROTTLED) &&
960 	    bio == throtl_peek_queued(&sq->queued[rw]))
961 		tg->flags |= THROTL_TG_IOPS_WAS_EMPTY;
962 
963 	throtl_enqueue_tg(tg);
964 }
965 
tg_update_disptime(struct throtl_grp * tg)966 static void tg_update_disptime(struct throtl_grp *tg)
967 {
968 	struct throtl_service_queue *sq = &tg->service_queue;
969 	unsigned long read_wait = -1, write_wait = -1, min_wait, disptime;
970 	struct bio *bio;
971 
972 	bio = throtl_peek_queued(&sq->queued[READ]);
973 	if (bio)
974 		read_wait = tg_dispatch_time(tg, bio);
975 
976 	bio = throtl_peek_queued(&sq->queued[WRITE]);
977 	if (bio)
978 		write_wait = tg_dispatch_time(tg, bio);
979 
980 	min_wait = min(read_wait, write_wait);
981 	disptime = jiffies + min_wait;
982 
983 	/* Update dispatch time */
984 	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
985 	tg->disptime = disptime;
986 	tg_service_queue_add(tg);
987 
988 	/* see throtl_add_bio_tg() */
989 	tg->flags &= ~THROTL_TG_WAS_EMPTY;
990 	tg->flags &= ~THROTL_TG_IOPS_WAS_EMPTY;
991 }
992 
start_parent_slice_with_credit(struct throtl_grp * child_tg,struct throtl_grp * parent_tg,bool rw)993 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
994 					struct throtl_grp *parent_tg, bool rw)
995 {
996 	if (throtl_slice_used(parent_tg, rw)) {
997 		throtl_start_new_slice_with_credit(parent_tg, rw,
998 				child_tg->slice_start[rw]);
999 	}
1000 
1001 }
1002 
tg_dispatch_one_bio(struct throtl_grp * tg,bool rw)1003 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1004 {
1005 	struct throtl_service_queue *sq = &tg->service_queue;
1006 	struct throtl_service_queue *parent_sq = sq->parent_sq;
1007 	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1008 	struct throtl_grp *tg_to_put = NULL;
1009 	struct bio *bio;
1010 
1011 	/*
1012 	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1013 	 * from @tg may put its reference and @parent_sq might end up
1014 	 * getting released prematurely.  Remember the tg to put and put it
1015 	 * after @bio is transferred to @parent_sq.
1016 	 */
1017 	bio = throtl_pop_queued(sq, &tg_to_put, rw);
1018 
1019 	throtl_charge_iops_bio(tg, bio);
1020 
1021 	/*
1022 	 * If our parent is another tg, we just need to transfer @bio to
1023 	 * the parent using throtl_add_bio_tg().  If our parent is
1024 	 * @td->service_queue, @bio is ready to be issued.  Put it on its
1025 	 * bio_lists[] and decrease total number queued.  The caller is
1026 	 * responsible for issuing these bios.
1027 	 */
1028 	if (parent_tg) {
1029 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1030 		start_parent_slice_with_credit(tg, parent_tg, rw);
1031 	} else {
1032 		bio_set_flag(bio, BIO_BPS_THROTTLED);
1033 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1034 				     parent_sq);
1035 		BUG_ON(tg->td->nr_queued[rw] <= 0);
1036 		tg->td->nr_queued[rw]--;
1037 	}
1038 
1039 	throtl_trim_slice(tg, rw);
1040 
1041 	if (tg_to_put)
1042 		blkg_put(tg_to_blkg(tg_to_put));
1043 }
1044 
throtl_dispatch_tg(struct throtl_grp * tg)1045 static int throtl_dispatch_tg(struct throtl_grp *tg)
1046 {
1047 	struct throtl_service_queue *sq = &tg->service_queue;
1048 	unsigned int nr_reads = 0, nr_writes = 0;
1049 	unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1050 	unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
1051 	struct bio *bio;
1052 
1053 	/* Try to dispatch 75% READS and 25% WRITES */
1054 
1055 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1056 	       tg_dispatch_time(tg, bio) == 0) {
1057 
1058 		tg_dispatch_one_bio(tg, READ);
1059 		nr_reads++;
1060 
1061 		if (nr_reads >= max_nr_reads)
1062 			break;
1063 	}
1064 
1065 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1066 	       tg_dispatch_time(tg, bio) == 0) {
1067 
1068 		tg_dispatch_one_bio(tg, WRITE);
1069 		nr_writes++;
1070 
1071 		if (nr_writes >= max_nr_writes)
1072 			break;
1073 	}
1074 
1075 	return nr_reads + nr_writes;
1076 }
1077 
throtl_select_dispatch(struct throtl_service_queue * parent_sq)1078 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1079 {
1080 	unsigned int nr_disp = 0;
1081 
1082 	while (1) {
1083 		struct throtl_grp *tg;
1084 		struct throtl_service_queue *sq;
1085 
1086 		if (!parent_sq->nr_pending)
1087 			break;
1088 
1089 		tg = throtl_rb_first(parent_sq);
1090 		if (!tg)
1091 			break;
1092 
1093 		if (time_before(jiffies, tg->disptime))
1094 			break;
1095 
1096 		nr_disp += throtl_dispatch_tg(tg);
1097 
1098 		sq = &tg->service_queue;
1099 		if (sq_queued(sq, READ) || sq_queued(sq, WRITE))
1100 			tg_update_disptime(tg);
1101 		else
1102 			throtl_dequeue_tg(tg);
1103 
1104 		if (nr_disp >= THROTL_QUANTUM)
1105 			break;
1106 	}
1107 
1108 	return nr_disp;
1109 }
1110 
1111 /**
1112  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1113  * @t: the pending_timer member of the throtl_service_queue being serviced
1114  *
1115  * This timer is armed when a child throtl_grp with active bio's become
1116  * pending and queued on the service_queue's pending_tree and expires when
1117  * the first child throtl_grp should be dispatched.  This function
1118  * dispatches bio's from the children throtl_grps to the parent
1119  * service_queue.
1120  *
1121  * If the parent's parent is another throtl_grp, dispatching is propagated
1122  * by either arming its pending_timer or repeating dispatch directly.  If
1123  * the top-level service_tree is reached, throtl_data->dispatch_work is
1124  * kicked so that the ready bio's are issued.
1125  */
throtl_pending_timer_fn(struct timer_list * t)1126 static void throtl_pending_timer_fn(struct timer_list *t)
1127 {
1128 	struct throtl_service_queue *sq = timer_container_of(sq, t,
1129 							     pending_timer);
1130 	struct throtl_grp *tg = sq_to_tg(sq);
1131 	struct throtl_data *td = sq_to_td(sq);
1132 	struct throtl_service_queue *parent_sq;
1133 	struct request_queue *q;
1134 	bool dispatched;
1135 	int ret;
1136 
1137 	/* throtl_data may be gone, so figure out request queue by blkg */
1138 	if (tg)
1139 		q = tg->pd.blkg->q;
1140 	else
1141 		q = td->queue;
1142 
1143 	spin_lock_irq(&q->queue_lock);
1144 
1145 	if (!q->root_blkg)
1146 		goto out_unlock;
1147 
1148 again:
1149 	parent_sq = sq->parent_sq;
1150 	dispatched = false;
1151 
1152 	while (true) {
1153 		unsigned int __maybe_unused bio_cnt_r = sq_queued(sq, READ);
1154 		unsigned int __maybe_unused bio_cnt_w = sq_queued(sq, WRITE);
1155 
1156 		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1157 			   bio_cnt_r + bio_cnt_w, bio_cnt_r, bio_cnt_w);
1158 
1159 		ret = throtl_select_dispatch(sq);
1160 		if (ret) {
1161 			throtl_log(sq, "bios disp=%u", ret);
1162 			dispatched = true;
1163 		}
1164 
1165 		if (throtl_schedule_next_dispatch(sq, false))
1166 			break;
1167 
1168 		/* this dispatch windows is still open, relax and repeat */
1169 		spin_unlock_irq(&q->queue_lock);
1170 		cpu_relax();
1171 		spin_lock_irq(&q->queue_lock);
1172 	}
1173 
1174 	if (!dispatched)
1175 		goto out_unlock;
1176 
1177 	if (parent_sq) {
1178 		/* @parent_sq is another throl_grp, propagate dispatch */
1179 		if (tg->flags & THROTL_TG_WAS_EMPTY ||
1180 		    tg->flags & THROTL_TG_IOPS_WAS_EMPTY) {
1181 			tg_update_disptime(tg);
1182 			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1183 				/* window is already open, repeat dispatching */
1184 				sq = parent_sq;
1185 				tg = sq_to_tg(sq);
1186 				goto again;
1187 			}
1188 		}
1189 	} else {
1190 		/* reached the top-level, queue issuing */
1191 		queue_work(kthrotld_workqueue, &td->dispatch_work);
1192 	}
1193 out_unlock:
1194 	spin_unlock_irq(&q->queue_lock);
1195 }
1196 
1197 /**
1198  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1199  * @work: work item being executed
1200  *
1201  * This function is queued for execution when bios reach the bio_lists[]
1202  * of throtl_data->service_queue.  Those bios are ready and issued by this
1203  * function.
1204  */
blk_throtl_dispatch_work_fn(struct work_struct * work)1205 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1206 {
1207 	struct throtl_data *td = container_of(work, struct throtl_data,
1208 					      dispatch_work);
1209 	struct throtl_service_queue *td_sq = &td->service_queue;
1210 	struct request_queue *q = td->queue;
1211 	struct bio_list bio_list_on_stack;
1212 	struct bio *bio;
1213 	struct blk_plug plug;
1214 	int rw;
1215 
1216 	bio_list_init(&bio_list_on_stack);
1217 
1218 	spin_lock_irq(&q->queue_lock);
1219 	for (rw = READ; rw <= WRITE; rw++)
1220 		while ((bio = throtl_pop_queued(td_sq, NULL, rw)))
1221 			bio_list_add(&bio_list_on_stack, bio);
1222 	spin_unlock_irq(&q->queue_lock);
1223 
1224 	if (!bio_list_empty(&bio_list_on_stack)) {
1225 		blk_start_plug(&plug);
1226 		while ((bio = bio_list_pop(&bio_list_on_stack)))
1227 			submit_bio_noacct_nocheck(bio);
1228 		blk_finish_plug(&plug);
1229 	}
1230 }
1231 
tg_prfill_conf_u64(struct seq_file * sf,struct blkg_policy_data * pd,int off)1232 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1233 			      int off)
1234 {
1235 	struct throtl_grp *tg = pd_to_tg(pd);
1236 	u64 v = *(u64 *)((void *)tg + off);
1237 
1238 	if (v == U64_MAX)
1239 		return 0;
1240 	return __blkg_prfill_u64(sf, pd, v);
1241 }
1242 
tg_prfill_conf_uint(struct seq_file * sf,struct blkg_policy_data * pd,int off)1243 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1244 			       int off)
1245 {
1246 	struct throtl_grp *tg = pd_to_tg(pd);
1247 	unsigned int v = *(unsigned int *)((void *)tg + off);
1248 
1249 	if (v == UINT_MAX)
1250 		return 0;
1251 	return __blkg_prfill_u64(sf, pd, v);
1252 }
1253 
tg_print_conf_u64(struct seq_file * sf,void * v)1254 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1255 {
1256 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1257 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1258 	return 0;
1259 }
1260 
tg_print_conf_uint(struct seq_file * sf,void * v)1261 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1262 {
1263 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1264 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1265 	return 0;
1266 }
1267 
tg_conf_updated(struct throtl_grp * tg,bool global)1268 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1269 {
1270 	struct throtl_service_queue *sq = &tg->service_queue;
1271 	struct cgroup_subsys_state *pos_css;
1272 	struct blkcg_gq *blkg;
1273 
1274 	throtl_log(&tg->service_queue,
1275 		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1276 		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1277 		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1278 
1279 	rcu_read_lock();
1280 	/*
1281 	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1282 	 * considered to have rules if either the tg itself or any of its
1283 	 * ancestors has rules.  This identifies groups without any
1284 	 * restrictions in the whole hierarchy and allows them to bypass
1285 	 * blk-throttle.
1286 	 */
1287 	blkg_for_each_descendant_pre(blkg, pos_css,
1288 			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1289 		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1290 
1291 		tg_update_has_rules(this_tg);
1292 		/* ignore root/second level */
1293 		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1294 		    !blkg->parent->parent)
1295 			continue;
1296 	}
1297 	rcu_read_unlock();
1298 
1299 	/*
1300 	 * We're already holding queue_lock and know @tg is valid.  Let's
1301 	 * apply the new config directly.
1302 	 *
1303 	 * Restart the slices for both READ and WRITES. It might happen
1304 	 * that a group's limit are dropped suddenly and we don't want to
1305 	 * account recently dispatched IO with new low rate.
1306 	 */
1307 	throtl_start_new_slice(tg, READ, false);
1308 	throtl_start_new_slice(tg, WRITE, false);
1309 
1310 	if (tg->flags & THROTL_TG_PENDING) {
1311 		tg_update_disptime(tg);
1312 		throtl_schedule_next_dispatch(sq->parent_sq, true);
1313 	}
1314 }
1315 
blk_throtl_init(struct gendisk * disk)1316 static int blk_throtl_init(struct gendisk *disk)
1317 {
1318 	struct request_queue *q = disk->queue;
1319 	struct throtl_data *td;
1320 	unsigned int memflags;
1321 	int ret;
1322 
1323 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1324 	if (!td)
1325 		return -ENOMEM;
1326 
1327 	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1328 	throtl_service_queue_init(&td->service_queue);
1329 
1330 	/*
1331 	 * Freeze queue before activating policy, to synchronize with IO path,
1332 	 * which is protected by 'q_usage_counter'.
1333 	 */
1334 	memflags = blk_mq_freeze_queue(disk->queue);
1335 	blk_mq_quiesce_queue(disk->queue);
1336 
1337 	q->td = td;
1338 	td->queue = q;
1339 
1340 	/* activate policy */
1341 	ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1342 	if (ret) {
1343 		q->td = NULL;
1344 		kfree(td);
1345 		goto out;
1346 	}
1347 
1348 	if (blk_queue_nonrot(q))
1349 		td->throtl_slice = DFL_THROTL_SLICE_SSD;
1350 	else
1351 		td->throtl_slice = DFL_THROTL_SLICE_HD;
1352 	td->track_bio_latency = !queue_is_mq(q);
1353 	if (!td->track_bio_latency)
1354 		blk_stat_enable_accounting(q);
1355 
1356 out:
1357 	blk_mq_unquiesce_queue(disk->queue);
1358 	blk_mq_unfreeze_queue(disk->queue, memflags);
1359 
1360 	return ret;
1361 }
1362 
1363 
tg_set_conf(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool is_u64)1364 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1365 			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1366 {
1367 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1368 	struct blkg_conf_ctx ctx;
1369 	struct throtl_grp *tg;
1370 	int ret;
1371 	u64 v;
1372 
1373 	blkg_conf_init(&ctx, buf);
1374 
1375 	ret = blkg_conf_open_bdev(&ctx);
1376 	if (ret)
1377 		goto out_finish;
1378 
1379 	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1380 		ret = blk_throtl_init(ctx.bdev->bd_disk);
1381 		if (ret)
1382 			goto out_finish;
1383 	}
1384 
1385 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1386 	if (ret)
1387 		goto out_finish;
1388 
1389 	ret = -EINVAL;
1390 	if (sscanf(ctx.body, "%llu", &v) != 1)
1391 		goto out_finish;
1392 	if (!v)
1393 		v = U64_MAX;
1394 
1395 	tg = blkg_to_tg(ctx.blkg);
1396 	tg_update_carryover(tg);
1397 
1398 	if (is_u64)
1399 		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1400 	else
1401 		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1402 
1403 	tg_conf_updated(tg, false);
1404 	ret = 0;
1405 out_finish:
1406 	blkg_conf_exit(&ctx);
1407 	return ret ?: nbytes;
1408 }
1409 
tg_set_conf_u64(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1410 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1411 			       char *buf, size_t nbytes, loff_t off)
1412 {
1413 	return tg_set_conf(of, buf, nbytes, off, true);
1414 }
1415 
tg_set_conf_uint(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1416 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1417 				char *buf, size_t nbytes, loff_t off)
1418 {
1419 	return tg_set_conf(of, buf, nbytes, off, false);
1420 }
1421 
tg_print_rwstat(struct seq_file * sf,void * v)1422 static int tg_print_rwstat(struct seq_file *sf, void *v)
1423 {
1424 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1425 			  blkg_prfill_rwstat, &blkcg_policy_throtl,
1426 			  seq_cft(sf)->private, true);
1427 	return 0;
1428 }
1429 
tg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1430 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1431 				      struct blkg_policy_data *pd, int off)
1432 {
1433 	struct blkg_rwstat_sample sum;
1434 
1435 	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1436 				  &sum);
1437 	return __blkg_prfill_rwstat(sf, pd, &sum);
1438 }
1439 
tg_print_rwstat_recursive(struct seq_file * sf,void * v)1440 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1441 {
1442 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1443 			  tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1444 			  seq_cft(sf)->private, true);
1445 	return 0;
1446 }
1447 
1448 static struct cftype throtl_legacy_files[] = {
1449 	{
1450 		.name = "throttle.read_bps_device",
1451 		.private = offsetof(struct throtl_grp, bps[READ]),
1452 		.seq_show = tg_print_conf_u64,
1453 		.write = tg_set_conf_u64,
1454 	},
1455 	{
1456 		.name = "throttle.write_bps_device",
1457 		.private = offsetof(struct throtl_grp, bps[WRITE]),
1458 		.seq_show = tg_print_conf_u64,
1459 		.write = tg_set_conf_u64,
1460 	},
1461 	{
1462 		.name = "throttle.read_iops_device",
1463 		.private = offsetof(struct throtl_grp, iops[READ]),
1464 		.seq_show = tg_print_conf_uint,
1465 		.write = tg_set_conf_uint,
1466 	},
1467 	{
1468 		.name = "throttle.write_iops_device",
1469 		.private = offsetof(struct throtl_grp, iops[WRITE]),
1470 		.seq_show = tg_print_conf_uint,
1471 		.write = tg_set_conf_uint,
1472 	},
1473 	{
1474 		.name = "throttle.io_service_bytes",
1475 		.private = offsetof(struct throtl_grp, stat_bytes),
1476 		.seq_show = tg_print_rwstat,
1477 	},
1478 	{
1479 		.name = "throttle.io_service_bytes_recursive",
1480 		.private = offsetof(struct throtl_grp, stat_bytes),
1481 		.seq_show = tg_print_rwstat_recursive,
1482 	},
1483 	{
1484 		.name = "throttle.io_serviced",
1485 		.private = offsetof(struct throtl_grp, stat_ios),
1486 		.seq_show = tg_print_rwstat,
1487 	},
1488 	{
1489 		.name = "throttle.io_serviced_recursive",
1490 		.private = offsetof(struct throtl_grp, stat_ios),
1491 		.seq_show = tg_print_rwstat_recursive,
1492 	},
1493 	{ }	/* terminate */
1494 };
1495 
tg_prfill_limit(struct seq_file * sf,struct blkg_policy_data * pd,int off)1496 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1497 			 int off)
1498 {
1499 	struct throtl_grp *tg = pd_to_tg(pd);
1500 	const char *dname = blkg_dev_name(pd->blkg);
1501 	u64 bps_dft;
1502 	unsigned int iops_dft;
1503 
1504 	if (!dname)
1505 		return 0;
1506 
1507 	bps_dft = U64_MAX;
1508 	iops_dft = UINT_MAX;
1509 
1510 	if (tg->bps[READ] == bps_dft &&
1511 	    tg->bps[WRITE] == bps_dft &&
1512 	    tg->iops[READ] == iops_dft &&
1513 	    tg->iops[WRITE] == iops_dft)
1514 		return 0;
1515 
1516 	seq_printf(sf, "%s", dname);
1517 	if (tg->bps[READ] == U64_MAX)
1518 		seq_printf(sf, " rbps=max");
1519 	else
1520 		seq_printf(sf, " rbps=%llu", tg->bps[READ]);
1521 
1522 	if (tg->bps[WRITE] == U64_MAX)
1523 		seq_printf(sf, " wbps=max");
1524 	else
1525 		seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
1526 
1527 	if (tg->iops[READ] == UINT_MAX)
1528 		seq_printf(sf, " riops=max");
1529 	else
1530 		seq_printf(sf, " riops=%u", tg->iops[READ]);
1531 
1532 	if (tg->iops[WRITE] == UINT_MAX)
1533 		seq_printf(sf, " wiops=max");
1534 	else
1535 		seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
1536 
1537 	seq_printf(sf, "\n");
1538 	return 0;
1539 }
1540 
tg_print_limit(struct seq_file * sf,void * v)1541 static int tg_print_limit(struct seq_file *sf, void *v)
1542 {
1543 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1544 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1545 	return 0;
1546 }
1547 
tg_set_limit(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1548 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1549 			  char *buf, size_t nbytes, loff_t off)
1550 {
1551 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1552 	struct blkg_conf_ctx ctx;
1553 	struct throtl_grp *tg;
1554 	u64 v[4];
1555 	int ret;
1556 
1557 	blkg_conf_init(&ctx, buf);
1558 
1559 	ret = blkg_conf_open_bdev(&ctx);
1560 	if (ret)
1561 		goto out_finish;
1562 
1563 	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1564 		ret = blk_throtl_init(ctx.bdev->bd_disk);
1565 		if (ret)
1566 			goto out_finish;
1567 	}
1568 
1569 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1570 	if (ret)
1571 		goto out_finish;
1572 
1573 	tg = blkg_to_tg(ctx.blkg);
1574 	tg_update_carryover(tg);
1575 
1576 	v[0] = tg->bps[READ];
1577 	v[1] = tg->bps[WRITE];
1578 	v[2] = tg->iops[READ];
1579 	v[3] = tg->iops[WRITE];
1580 
1581 	while (true) {
1582 		char tok[27];	/* wiops=18446744073709551616 */
1583 		char *p;
1584 		u64 val = U64_MAX;
1585 		int len;
1586 
1587 		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1588 			break;
1589 		if (tok[0] == '\0')
1590 			break;
1591 		ctx.body += len;
1592 
1593 		ret = -EINVAL;
1594 		p = tok;
1595 		strsep(&p, "=");
1596 		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1597 			goto out_finish;
1598 
1599 		ret = -ERANGE;
1600 		if (!val)
1601 			goto out_finish;
1602 
1603 		ret = -EINVAL;
1604 		if (!strcmp(tok, "rbps"))
1605 			v[0] = val;
1606 		else if (!strcmp(tok, "wbps"))
1607 			v[1] = val;
1608 		else if (!strcmp(tok, "riops"))
1609 			v[2] = min_t(u64, val, UINT_MAX);
1610 		else if (!strcmp(tok, "wiops"))
1611 			v[3] = min_t(u64, val, UINT_MAX);
1612 		else
1613 			goto out_finish;
1614 	}
1615 
1616 	tg->bps[READ] = v[0];
1617 	tg->bps[WRITE] = v[1];
1618 	tg->iops[READ] = v[2];
1619 	tg->iops[WRITE] = v[3];
1620 
1621 	tg_conf_updated(tg, false);
1622 	ret = 0;
1623 out_finish:
1624 	blkg_conf_exit(&ctx);
1625 	return ret ?: nbytes;
1626 }
1627 
1628 static struct cftype throtl_files[] = {
1629 	{
1630 		.name = "max",
1631 		.flags = CFTYPE_NOT_ON_ROOT,
1632 		.seq_show = tg_print_limit,
1633 		.write = tg_set_limit,
1634 	},
1635 	{ }	/* terminate */
1636 };
1637 
throtl_shutdown_wq(struct request_queue * q)1638 static void throtl_shutdown_wq(struct request_queue *q)
1639 {
1640 	struct throtl_data *td = q->td;
1641 
1642 	cancel_work_sync(&td->dispatch_work);
1643 }
1644 
tg_flush_bios(struct throtl_grp * tg)1645 static void tg_flush_bios(struct throtl_grp *tg)
1646 {
1647 	struct throtl_service_queue *sq = &tg->service_queue;
1648 
1649 	if (tg->flags & THROTL_TG_CANCELING)
1650 		return;
1651 	/*
1652 	 * Set the flag to make sure throtl_pending_timer_fn() won't
1653 	 * stop until all throttled bios are dispatched.
1654 	 */
1655 	tg->flags |= THROTL_TG_CANCELING;
1656 
1657 	/*
1658 	 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1659 	 * will be inserted to service queue without THROTL_TG_PENDING
1660 	 * set in tg_update_disptime below. Then IO dispatched from
1661 	 * child in tg_dispatch_one_bio will trigger double insertion
1662 	 * and corrupt the tree.
1663 	 */
1664 	if (!(tg->flags & THROTL_TG_PENDING))
1665 		return;
1666 
1667 	/*
1668 	 * Update disptime after setting the above flag to make sure
1669 	 * throtl_select_dispatch() won't exit without dispatching.
1670 	 */
1671 	tg_update_disptime(tg);
1672 
1673 	throtl_schedule_pending_timer(sq, jiffies + 1);
1674 }
1675 
throtl_pd_offline(struct blkg_policy_data * pd)1676 static void throtl_pd_offline(struct blkg_policy_data *pd)
1677 {
1678 	tg_flush_bios(pd_to_tg(pd));
1679 }
1680 
1681 struct blkcg_policy blkcg_policy_throtl = {
1682 	.dfl_cftypes		= throtl_files,
1683 	.legacy_cftypes		= throtl_legacy_files,
1684 
1685 	.pd_alloc_fn		= throtl_pd_alloc,
1686 	.pd_init_fn		= throtl_pd_init,
1687 	.pd_online_fn		= throtl_pd_online,
1688 	.pd_offline_fn		= throtl_pd_offline,
1689 	.pd_free_fn		= throtl_pd_free,
1690 };
1691 
blk_throtl_cancel_bios(struct gendisk * disk)1692 void blk_throtl_cancel_bios(struct gendisk *disk)
1693 {
1694 	struct request_queue *q = disk->queue;
1695 	struct cgroup_subsys_state *pos_css;
1696 	struct blkcg_gq *blkg;
1697 
1698 	if (!blk_throtl_activated(q))
1699 		return;
1700 
1701 	spin_lock_irq(&q->queue_lock);
1702 	/*
1703 	 * queue_lock is held, rcu lock is not needed here technically.
1704 	 * However, rcu lock is still held to emphasize that following
1705 	 * path need RCU protection and to prevent warning from lockdep.
1706 	 */
1707 	rcu_read_lock();
1708 	blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
1709 		/*
1710 		 * disk_release will call pd_offline_fn to cancel bios.
1711 		 * However, disk_release can't be called if someone get
1712 		 * the refcount of device and issued bios which are
1713 		 * inflight after del_gendisk.
1714 		 * Cancel bios here to ensure no bios are inflight after
1715 		 * del_gendisk.
1716 		 */
1717 		tg_flush_bios(blkg_to_tg(blkg));
1718 	}
1719 	rcu_read_unlock();
1720 	spin_unlock_irq(&q->queue_lock);
1721 }
1722 
tg_within_limit(struct throtl_grp * tg,struct bio * bio,bool rw)1723 static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
1724 {
1725 	struct throtl_service_queue *sq = &tg->service_queue;
1726 
1727 	/*
1728 	 * For a split bio, we need to specifically distinguish whether the
1729 	 * iops queue is empty.
1730 	 */
1731 	if (bio_flagged(bio, BIO_BPS_THROTTLED))
1732 		return sq->nr_queued_iops[rw] == 0 &&
1733 				tg_dispatch_iops_time(tg, bio) == 0;
1734 
1735 	/*
1736 	 * Throtl is FIFO - if bios are already queued, should queue.
1737 	 * If the bps queue is empty and @bio is within the bps limit, charge
1738 	 * bps here for direct placement into the iops queue.
1739 	 */
1740 	if (sq_queued(&tg->service_queue, rw)) {
1741 		if (sq->nr_queued_bps[rw] == 0 &&
1742 		    tg_dispatch_bps_time(tg, bio) == 0)
1743 			throtl_charge_bps_bio(tg, bio);
1744 
1745 		return false;
1746 	}
1747 
1748 	return tg_dispatch_time(tg, bio) == 0;
1749 }
1750 
__blk_throtl_bio(struct bio * bio)1751 bool __blk_throtl_bio(struct bio *bio)
1752 {
1753 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1754 	struct blkcg_gq *blkg = bio->bi_blkg;
1755 	struct throtl_qnode *qn = NULL;
1756 	struct throtl_grp *tg = blkg_to_tg(blkg);
1757 	struct throtl_service_queue *sq;
1758 	bool rw = bio_data_dir(bio);
1759 	bool throttled = false;
1760 	struct throtl_data *td = tg->td;
1761 
1762 	rcu_read_lock();
1763 	spin_lock_irq(&q->queue_lock);
1764 	sq = &tg->service_queue;
1765 
1766 	while (true) {
1767 		if (tg_within_limit(tg, bio, rw)) {
1768 			/* within limits, let's charge and dispatch directly */
1769 			throtl_charge_iops_bio(tg, bio);
1770 
1771 			/*
1772 			 * We need to trim slice even when bios are not being
1773 			 * queued otherwise it might happen that a bio is not
1774 			 * queued for a long time and slice keeps on extending
1775 			 * and trim is not called for a long time. Now if limits
1776 			 * are reduced suddenly we take into account all the IO
1777 			 * dispatched so far at new low rate and * newly queued
1778 			 * IO gets a really long dispatch time.
1779 			 *
1780 			 * So keep on trimming slice even if bio is not queued.
1781 			 */
1782 			throtl_trim_slice(tg, rw);
1783 		} else if (bio_issue_as_root_blkg(bio)) {
1784 			/*
1785 			 * IOs which may cause priority inversions are
1786 			 * dispatched directly, even if they're over limit.
1787 			 *
1788 			 * Charge and dispatch directly, and our throttle
1789 			 * control algorithm is adaptive, and extra IO bytes
1790 			 * will be throttled for paying the debt
1791 			 */
1792 			throtl_charge_bps_bio(tg, bio);
1793 			throtl_charge_iops_bio(tg, bio);
1794 		} else {
1795 			/* if above limits, break to queue */
1796 			break;
1797 		}
1798 
1799 		/*
1800 		 * @bio passed through this layer without being throttled.
1801 		 * Climb up the ladder.  If we're already at the top, it
1802 		 * can be executed directly.
1803 		 */
1804 		qn = &tg->qnode_on_parent[rw];
1805 		sq = sq->parent_sq;
1806 		tg = sq_to_tg(sq);
1807 		if (!tg) {
1808 			bio_set_flag(bio, BIO_BPS_THROTTLED);
1809 			goto out_unlock;
1810 		}
1811 	}
1812 
1813 	/* out-of-limit, queue to @tg */
1814 	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1815 		   rw == READ ? 'R' : 'W',
1816 		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
1817 		   tg_bps_limit(tg, rw),
1818 		   tg->io_disp[rw], tg_iops_limit(tg, rw),
1819 		   sq_queued(sq, READ), sq_queued(sq, WRITE));
1820 
1821 	td->nr_queued[rw]++;
1822 	throtl_add_bio_tg(bio, qn, tg);
1823 	throttled = true;
1824 
1825 	/*
1826 	 * Update @tg's dispatch time and force schedule dispatch if @tg
1827 	 * was empty before @bio, or the iops queue is empty and @bio will
1828 	 * add to.  The forced scheduling isn't likely to cause undue
1829 	 * delay as @bio is likely to be dispatched directly if its @tg's
1830 	 * disptime is not in the future.
1831 	 */
1832 	if (tg->flags & THROTL_TG_WAS_EMPTY ||
1833 	    tg->flags & THROTL_TG_IOPS_WAS_EMPTY) {
1834 		tg_update_disptime(tg);
1835 		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1836 	}
1837 
1838 out_unlock:
1839 	spin_unlock_irq(&q->queue_lock);
1840 
1841 	rcu_read_unlock();
1842 	return throttled;
1843 }
1844 
blk_throtl_exit(struct gendisk * disk)1845 void blk_throtl_exit(struct gendisk *disk)
1846 {
1847 	struct request_queue *q = disk->queue;
1848 
1849 	if (!blk_throtl_activated(q))
1850 		return;
1851 
1852 	timer_delete_sync(&q->td->service_queue.pending_timer);
1853 	throtl_shutdown_wq(q);
1854 	blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
1855 	kfree(q->td);
1856 }
1857 
throtl_init(void)1858 static int __init throtl_init(void)
1859 {
1860 	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1861 	if (!kthrotld_workqueue)
1862 		panic("Failed to create kthrotld\n");
1863 
1864 	return blkcg_policy_register(&blkcg_policy_throtl);
1865 }
1866 
1867 module_init(throtl_init);
1868