xref: /linux/block/blk-iolatency.c (revision 5ff328836dfde0cef9f28c8b8791a90a36d7a183)
1 /*
2  * Block rq-qos base io controller
3  *
4  * This works similar to wbt with a few exceptions
5  *
6  * - It's bio based, so the latency covers the whole block layer in addition to
7  *   the actual io.
8  * - We will throttle all IO that comes in here if we need to.
9  * - We use the mean latency over the 100ms window.  This is because writes can
10  *   be particularly fast, which could give us a false sense of the impact of
11  *   other workloads on our protected workload.
12  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13  *   that we can have as many outstanding bio's as we're allowed to.  Only at
14  *   throttle time do we pay attention to the actual queue depth.
15  *
16  * The hierarchy works like the cpu controller does, we track the latency at
17  * every configured node, and each configured node has it's own independent
18  * queue depth.  This means that we only care about our latency targets at the
19  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
20  * a group at the end of some other path if we're only configred at leaf level.
21  *
22  * Consider the following
23  *
24  *                   root blkg
25  *             /                     \
26  *        fast (target=5ms)     slow (target=10ms)
27  *         /     \                  /        \
28  *       a        b          normal(15ms)   unloved
29  *
30  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31  * an average latency of 5ms.  If it does then we will throttle the "slow"
32  * group.  In the case of "normal", if it exceeds its 15ms target, we will
33  * throttle "unloved", but nobody else.
34  *
35  * In this example "fast", "slow", and "normal" will be the only groups actually
36  * accounting their io latencies.  We have to walk up the heirarchy to the root
37  * on every submit and complete so we can do the appropriate stat recording and
38  * adjust the queue depth of ourselves if needed.
39  *
40  * There are 2 ways we throttle IO.
41  *
42  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
43  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
44  * to 1.  If the group is only ever submitting IO for itself then this is the
45  * only way we throttle.
46  *
47  * 2) Induced delay throttling.  This is for the case that a group is generating
48  * IO that has to be issued by the root cg to avoid priority inversion. So think
49  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
50  * of work done for us on behalf of the root cg and are being asked to scale
51  * down more then we induce a latency at userspace return.  We accumulate the
52  * total amount of time we need to be punished by doing
53  *
54  * total_time += min_lat_nsec - actual_io_completion
55  *
56  * and then at throttle time will do
57  *
58  * throttle_time = min(total_time, NSEC_PER_SEC)
59  *
60  * This induced delay will throttle back the activity that is generating the
61  * root cg issued io's, wethere that's some metadata intensive operation or the
62  * group is using so much memory that it is pushing us into swap.
63  *
64  * Copyright (C) 2018 Josef Bacik
65  */
66 #include <linux/kernel.h>
67 #include <linux/blk_types.h>
68 #include <linux/backing-dev.h>
69 #include <linux/module.h>
70 #include <linux/timer.h>
71 #include <linux/memcontrol.h>
72 #include <linux/sched/loadavg.h>
73 #include <linux/sched/signal.h>
74 #include <trace/events/block.h>
75 #include "blk-rq-qos.h"
76 #include "blk-stat.h"
77 
78 #define DEFAULT_SCALE_COOKIE 1000000U
79 
80 static struct blkcg_policy blkcg_policy_iolatency;
81 struct iolatency_grp;
82 
83 struct blk_iolatency {
84 	struct rq_qos rqos;
85 	struct timer_list timer;
86 	atomic_t enabled;
87 };
88 
89 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
90 {
91 	return container_of(rqos, struct blk_iolatency, rqos);
92 }
93 
94 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
95 {
96 	return atomic_read(&blkiolat->enabled) > 0;
97 }
98 
99 struct child_latency_info {
100 	spinlock_t lock;
101 
102 	/* Last time we adjusted the scale of everybody. */
103 	u64 last_scale_event;
104 
105 	/* The latency that we missed. */
106 	u64 scale_lat;
107 
108 	/* Total io's from all of our children for the last summation. */
109 	u64 nr_samples;
110 
111 	/* The guy who actually changed the latency numbers. */
112 	struct iolatency_grp *scale_grp;
113 
114 	/* Cookie to tell if we need to scale up or down. */
115 	atomic_t scale_cookie;
116 };
117 
118 struct percentile_stats {
119 	u64 total;
120 	u64 missed;
121 };
122 
123 struct latency_stat {
124 	union {
125 		struct percentile_stats ps;
126 		struct blk_rq_stat rqs;
127 	};
128 };
129 
130 struct iolatency_grp {
131 	struct blkg_policy_data pd;
132 	struct latency_stat __percpu *stats;
133 	struct latency_stat cur_stat;
134 	struct blk_iolatency *blkiolat;
135 	struct rq_depth rq_depth;
136 	struct rq_wait rq_wait;
137 	atomic64_t window_start;
138 	atomic_t scale_cookie;
139 	u64 min_lat_nsec;
140 	u64 cur_win_nsec;
141 
142 	/* total running average of our io latency. */
143 	u64 lat_avg;
144 
145 	/* Our current number of IO's for the last summation. */
146 	u64 nr_samples;
147 
148 	bool ssd;
149 	struct child_latency_info child_lat;
150 };
151 
152 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
153 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
154 /*
155  * These are the constants used to fake the fixed-point moving average
156  * calculation just like load average.  The call to calc_load() folds
157  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
158  * window size is bucketed to try to approximately calculate average
159  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
160  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
161  * periods extend the most recent window.
162  */
163 #define BLKIOLATENCY_NR_EXP_FACTORS 5
164 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
165 				      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
166 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
167 	2045, // exp(1/600) - 600 samples
168 	2039, // exp(1/240) - 240 samples
169 	2031, // exp(1/120) - 120 samples
170 	2023, // exp(1/80)  - 80 samples
171 	2014, // exp(1/60)  - 60 samples
172 };
173 
174 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
175 {
176 	return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
177 }
178 
179 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
180 {
181 	return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
182 }
183 
184 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
185 {
186 	return pd_to_blkg(&iolat->pd);
187 }
188 
189 static inline void latency_stat_init(struct iolatency_grp *iolat,
190 				     struct latency_stat *stat)
191 {
192 	if (iolat->ssd) {
193 		stat->ps.total = 0;
194 		stat->ps.missed = 0;
195 	} else
196 		blk_rq_stat_init(&stat->rqs);
197 }
198 
199 static inline void latency_stat_sum(struct iolatency_grp *iolat,
200 				    struct latency_stat *sum,
201 				    struct latency_stat *stat)
202 {
203 	if (iolat->ssd) {
204 		sum->ps.total += stat->ps.total;
205 		sum->ps.missed += stat->ps.missed;
206 	} else
207 		blk_rq_stat_sum(&sum->rqs, &stat->rqs);
208 }
209 
210 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
211 					    u64 req_time)
212 {
213 	struct latency_stat *stat = get_cpu_ptr(iolat->stats);
214 	if (iolat->ssd) {
215 		if (req_time >= iolat->min_lat_nsec)
216 			stat->ps.missed++;
217 		stat->ps.total++;
218 	} else
219 		blk_rq_stat_add(&stat->rqs, req_time);
220 	put_cpu_ptr(stat);
221 }
222 
223 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
224 				  struct latency_stat *stat)
225 {
226 	if (iolat->ssd) {
227 		u64 thresh = div64_u64(stat->ps.total, 10);
228 		thresh = max(thresh, 1ULL);
229 		return stat->ps.missed < thresh;
230 	}
231 	return stat->rqs.mean <= iolat->min_lat_nsec;
232 }
233 
234 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
235 				       struct latency_stat *stat)
236 {
237 	if (iolat->ssd)
238 		return stat->ps.total;
239 	return stat->rqs.nr_samples;
240 }
241 
242 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
243 					      struct latency_stat *stat)
244 {
245 	int exp_idx;
246 
247 	if (iolat->ssd)
248 		return;
249 
250 	/*
251 	 * calc_load() takes in a number stored in fixed point representation.
252 	 * Because we are using this for IO time in ns, the values stored
253 	 * are significantly larger than the FIXED_1 denominator (2048).
254 	 * Therefore, rounding errors in the calculation are negligible and
255 	 * can be ignored.
256 	 */
257 	exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
258 			div64_u64(iolat->cur_win_nsec,
259 				  BLKIOLATENCY_EXP_BUCKET_SIZE));
260 	iolat->lat_avg = calc_load(iolat->lat_avg,
261 				   iolatency_exp_factors[exp_idx],
262 				   stat->rqs.mean);
263 }
264 
265 static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
266 {
267 	atomic_dec(&rqw->inflight);
268 	wake_up(&rqw->wait);
269 }
270 
271 static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
272 {
273 	struct iolatency_grp *iolat = private_data;
274 	return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
275 }
276 
277 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
278 				       struct iolatency_grp *iolat,
279 				       bool issue_as_root,
280 				       bool use_memdelay)
281 {
282 	struct rq_wait *rqw = &iolat->rq_wait;
283 	unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
284 
285 	if (use_delay)
286 		blkcg_schedule_throttle(rqos->q, use_memdelay);
287 
288 	/*
289 	 * To avoid priority inversions we want to just take a slot if we are
290 	 * issuing as root.  If we're being killed off there's no point in
291 	 * delaying things, we may have been killed by OOM so throttling may
292 	 * make recovery take even longer, so just let the IO's through so the
293 	 * task can go away.
294 	 */
295 	if (issue_as_root || fatal_signal_pending(current)) {
296 		atomic_inc(&rqw->inflight);
297 		return;
298 	}
299 
300 	rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
301 }
302 
303 #define SCALE_DOWN_FACTOR 2
304 #define SCALE_UP_FACTOR 4
305 
306 static inline unsigned long scale_amount(unsigned long qd, bool up)
307 {
308 	return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
309 }
310 
311 /*
312  * We scale the qd down faster than we scale up, so we need to use this helper
313  * to adjust the scale_cookie accordingly so we don't prematurely get
314  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
315  *
316  * Each group has their own local copy of the last scale cookie they saw, so if
317  * the global scale cookie goes up or down they know which way they need to go
318  * based on their last knowledge of it.
319  */
320 static void scale_cookie_change(struct blk_iolatency *blkiolat,
321 				struct child_latency_info *lat_info,
322 				bool up)
323 {
324 	unsigned long qd = blkiolat->rqos.q->nr_requests;
325 	unsigned long scale = scale_amount(qd, up);
326 	unsigned long old = atomic_read(&lat_info->scale_cookie);
327 	unsigned long max_scale = qd << 1;
328 	unsigned long diff = 0;
329 
330 	if (old < DEFAULT_SCALE_COOKIE)
331 		diff = DEFAULT_SCALE_COOKIE - old;
332 
333 	if (up) {
334 		if (scale + old > DEFAULT_SCALE_COOKIE)
335 			atomic_set(&lat_info->scale_cookie,
336 				   DEFAULT_SCALE_COOKIE);
337 		else if (diff > qd)
338 			atomic_inc(&lat_info->scale_cookie);
339 		else
340 			atomic_add(scale, &lat_info->scale_cookie);
341 	} else {
342 		/*
343 		 * We don't want to dig a hole so deep that it takes us hours to
344 		 * dig out of it.  Just enough that we don't throttle/unthrottle
345 		 * with jagged workloads but can still unthrottle once pressure
346 		 * has sufficiently dissipated.
347 		 */
348 		if (diff > qd) {
349 			if (diff < max_scale)
350 				atomic_dec(&lat_info->scale_cookie);
351 		} else {
352 			atomic_sub(scale, &lat_info->scale_cookie);
353 		}
354 	}
355 }
356 
357 /*
358  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
359  * queue depth at a time so we don't get wild swings and hopefully dial in to
360  * fairer distribution of the overall queue depth.
361  */
362 static void scale_change(struct iolatency_grp *iolat, bool up)
363 {
364 	unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
365 	unsigned long scale = scale_amount(qd, up);
366 	unsigned long old = iolat->rq_depth.max_depth;
367 
368 	if (old > qd)
369 		old = qd;
370 
371 	if (up) {
372 		if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
373 			return;
374 
375 		if (old < qd) {
376 			old += scale;
377 			old = min(old, qd);
378 			iolat->rq_depth.max_depth = old;
379 			wake_up_all(&iolat->rq_wait.wait);
380 		}
381 	} else {
382 		old >>= 1;
383 		iolat->rq_depth.max_depth = max(old, 1UL);
384 	}
385 }
386 
387 /* Check our parent and see if the scale cookie has changed. */
388 static void check_scale_change(struct iolatency_grp *iolat)
389 {
390 	struct iolatency_grp *parent;
391 	struct child_latency_info *lat_info;
392 	unsigned int cur_cookie;
393 	unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
394 	u64 scale_lat;
395 	unsigned int old;
396 	int direction = 0;
397 
398 	if (lat_to_blkg(iolat)->parent == NULL)
399 		return;
400 
401 	parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
402 	if (!parent)
403 		return;
404 
405 	lat_info = &parent->child_lat;
406 	cur_cookie = atomic_read(&lat_info->scale_cookie);
407 	scale_lat = READ_ONCE(lat_info->scale_lat);
408 
409 	if (cur_cookie < our_cookie)
410 		direction = -1;
411 	else if (cur_cookie > our_cookie)
412 		direction = 1;
413 	else
414 		return;
415 
416 	old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
417 
418 	/* Somebody beat us to the punch, just bail. */
419 	if (old != our_cookie)
420 		return;
421 
422 	if (direction < 0 && iolat->min_lat_nsec) {
423 		u64 samples_thresh;
424 
425 		if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
426 			return;
427 
428 		/*
429 		 * Sometimes high priority groups are their own worst enemy, so
430 		 * instead of taking it out on some poor other group that did 5%
431 		 * or less of the IO's for the last summation just skip this
432 		 * scale down event.
433 		 */
434 		samples_thresh = lat_info->nr_samples * 5;
435 		samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
436 		if (iolat->nr_samples <= samples_thresh)
437 			return;
438 	}
439 
440 	/* We're as low as we can go. */
441 	if (iolat->rq_depth.max_depth == 1 && direction < 0) {
442 		blkcg_use_delay(lat_to_blkg(iolat));
443 		return;
444 	}
445 
446 	/* We're back to the default cookie, unthrottle all the things. */
447 	if (cur_cookie == DEFAULT_SCALE_COOKIE) {
448 		blkcg_clear_delay(lat_to_blkg(iolat));
449 		iolat->rq_depth.max_depth = UINT_MAX;
450 		wake_up_all(&iolat->rq_wait.wait);
451 		return;
452 	}
453 
454 	scale_change(iolat, direction > 0);
455 }
456 
457 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
458 {
459 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
460 	struct blkcg_gq *blkg = bio->bi_blkg;
461 	bool issue_as_root = bio_issue_as_root_blkg(bio);
462 
463 	if (!blk_iolatency_enabled(blkiolat))
464 		return;
465 
466 	while (blkg && blkg->parent) {
467 		struct iolatency_grp *iolat = blkg_to_lat(blkg);
468 		if (!iolat) {
469 			blkg = blkg->parent;
470 			continue;
471 		}
472 
473 		check_scale_change(iolat);
474 		__blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
475 				     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
476 		blkg = blkg->parent;
477 	}
478 	if (!timer_pending(&blkiolat->timer))
479 		mod_timer(&blkiolat->timer, jiffies + HZ);
480 }
481 
482 static void iolatency_record_time(struct iolatency_grp *iolat,
483 				  struct bio_issue *issue, u64 now,
484 				  bool issue_as_root)
485 {
486 	u64 start = bio_issue_time(issue);
487 	u64 req_time;
488 
489 	/*
490 	 * Have to do this so we are truncated to the correct time that our
491 	 * issue is truncated to.
492 	 */
493 	now = __bio_issue_time(now);
494 
495 	if (now <= start)
496 		return;
497 
498 	req_time = now - start;
499 
500 	/*
501 	 * We don't want to count issue_as_root bio's in the cgroups latency
502 	 * statistics as it could skew the numbers downwards.
503 	 */
504 	if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
505 		u64 sub = iolat->min_lat_nsec;
506 		if (req_time < sub)
507 			blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
508 		return;
509 	}
510 
511 	latency_stat_record_time(iolat, req_time);
512 }
513 
514 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
515 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
516 
517 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
518 {
519 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
520 	struct iolatency_grp *parent;
521 	struct child_latency_info *lat_info;
522 	struct latency_stat stat;
523 	unsigned long flags;
524 	int cpu;
525 
526 	latency_stat_init(iolat, &stat);
527 	preempt_disable();
528 	for_each_online_cpu(cpu) {
529 		struct latency_stat *s;
530 		s = per_cpu_ptr(iolat->stats, cpu);
531 		latency_stat_sum(iolat, &stat, s);
532 		latency_stat_init(iolat, s);
533 	}
534 	preempt_enable();
535 
536 	parent = blkg_to_lat(blkg->parent);
537 	if (!parent)
538 		return;
539 
540 	lat_info = &parent->child_lat;
541 
542 	iolat_update_total_lat_avg(iolat, &stat);
543 
544 	/* Everything is ok and we don't need to adjust the scale. */
545 	if (latency_sum_ok(iolat, &stat) &&
546 	    atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
547 		return;
548 
549 	/* Somebody beat us to the punch, just bail. */
550 	spin_lock_irqsave(&lat_info->lock, flags);
551 
552 	latency_stat_sum(iolat, &iolat->cur_stat, &stat);
553 	lat_info->nr_samples -= iolat->nr_samples;
554 	lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
555 	iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
556 
557 	if ((lat_info->last_scale_event >= now ||
558 	    now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
559 		goto out;
560 
561 	if (latency_sum_ok(iolat, &iolat->cur_stat) &&
562 	    latency_sum_ok(iolat, &stat)) {
563 		if (latency_stat_samples(iolat, &iolat->cur_stat) <
564 		    BLKIOLATENCY_MIN_GOOD_SAMPLES)
565 			goto out;
566 		if (lat_info->scale_grp == iolat) {
567 			lat_info->last_scale_event = now;
568 			scale_cookie_change(iolat->blkiolat, lat_info, true);
569 		}
570 	} else if (lat_info->scale_lat == 0 ||
571 		   lat_info->scale_lat >= iolat->min_lat_nsec) {
572 		lat_info->last_scale_event = now;
573 		if (!lat_info->scale_grp ||
574 		    lat_info->scale_lat > iolat->min_lat_nsec) {
575 			WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
576 			lat_info->scale_grp = iolat;
577 		}
578 		scale_cookie_change(iolat->blkiolat, lat_info, false);
579 	}
580 	latency_stat_init(iolat, &iolat->cur_stat);
581 out:
582 	spin_unlock_irqrestore(&lat_info->lock, flags);
583 }
584 
585 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
586 {
587 	struct blkcg_gq *blkg;
588 	struct rq_wait *rqw;
589 	struct iolatency_grp *iolat;
590 	u64 window_start;
591 	u64 now = ktime_to_ns(ktime_get());
592 	bool issue_as_root = bio_issue_as_root_blkg(bio);
593 	bool enabled = false;
594 
595 	blkg = bio->bi_blkg;
596 	if (!blkg || !bio_flagged(bio, BIO_TRACKED))
597 		return;
598 
599 	iolat = blkg_to_lat(bio->bi_blkg);
600 	if (!iolat)
601 		return;
602 
603 	enabled = blk_iolatency_enabled(iolat->blkiolat);
604 	while (blkg && blkg->parent) {
605 		iolat = blkg_to_lat(blkg);
606 		if (!iolat) {
607 			blkg = blkg->parent;
608 			continue;
609 		}
610 		rqw = &iolat->rq_wait;
611 
612 		atomic_dec(&rqw->inflight);
613 		if (!enabled || iolat->min_lat_nsec == 0)
614 			goto next;
615 		iolatency_record_time(iolat, &bio->bi_issue, now,
616 				      issue_as_root);
617 		window_start = atomic64_read(&iolat->window_start);
618 		if (now > window_start &&
619 		    (now - window_start) >= iolat->cur_win_nsec) {
620 			if (atomic64_cmpxchg(&iolat->window_start,
621 					window_start, now) == window_start)
622 				iolatency_check_latencies(iolat, now);
623 		}
624 next:
625 		wake_up(&rqw->wait);
626 		blkg = blkg->parent;
627 	}
628 }
629 
630 static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
631 {
632 	struct blkcg_gq *blkg;
633 
634 	blkg = bio->bi_blkg;
635 	while (blkg && blkg->parent) {
636 		struct rq_wait *rqw;
637 		struct iolatency_grp *iolat;
638 
639 		iolat = blkg_to_lat(blkg);
640 		if (!iolat)
641 			goto next;
642 
643 		rqw = &iolat->rq_wait;
644 		atomic_dec(&rqw->inflight);
645 		wake_up(&rqw->wait);
646 next:
647 		blkg = blkg->parent;
648 	}
649 }
650 
651 static void blkcg_iolatency_exit(struct rq_qos *rqos)
652 {
653 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
654 
655 	del_timer_sync(&blkiolat->timer);
656 	blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
657 	kfree(blkiolat);
658 }
659 
660 static struct rq_qos_ops blkcg_iolatency_ops = {
661 	.throttle = blkcg_iolatency_throttle,
662 	.cleanup = blkcg_iolatency_cleanup,
663 	.done_bio = blkcg_iolatency_done_bio,
664 	.exit = blkcg_iolatency_exit,
665 };
666 
667 static void blkiolatency_timer_fn(struct timer_list *t)
668 {
669 	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
670 	struct blkcg_gq *blkg;
671 	struct cgroup_subsys_state *pos_css;
672 	u64 now = ktime_to_ns(ktime_get());
673 
674 	rcu_read_lock();
675 	blkg_for_each_descendant_pre(blkg, pos_css,
676 				     blkiolat->rqos.q->root_blkg) {
677 		struct iolatency_grp *iolat;
678 		struct child_latency_info *lat_info;
679 		unsigned long flags;
680 		u64 cookie;
681 
682 		/*
683 		 * We could be exiting, don't access the pd unless we have a
684 		 * ref on the blkg.
685 		 */
686 		if (!blkg_tryget(blkg))
687 			continue;
688 
689 		iolat = blkg_to_lat(blkg);
690 		if (!iolat)
691 			goto next;
692 
693 		lat_info = &iolat->child_lat;
694 		cookie = atomic_read(&lat_info->scale_cookie);
695 
696 		if (cookie >= DEFAULT_SCALE_COOKIE)
697 			goto next;
698 
699 		spin_lock_irqsave(&lat_info->lock, flags);
700 		if (lat_info->last_scale_event >= now)
701 			goto next_lock;
702 
703 		/*
704 		 * We scaled down but don't have a scale_grp, scale up and carry
705 		 * on.
706 		 */
707 		if (lat_info->scale_grp == NULL) {
708 			scale_cookie_change(iolat->blkiolat, lat_info, true);
709 			goto next_lock;
710 		}
711 
712 		/*
713 		 * It's been 5 seconds since our last scale event, clear the
714 		 * scale grp in case the group that needed the scale down isn't
715 		 * doing any IO currently.
716 		 */
717 		if (now - lat_info->last_scale_event >=
718 		    ((u64)NSEC_PER_SEC * 5))
719 			lat_info->scale_grp = NULL;
720 next_lock:
721 		spin_unlock_irqrestore(&lat_info->lock, flags);
722 next:
723 		blkg_put(blkg);
724 	}
725 	rcu_read_unlock();
726 }
727 
728 int blk_iolatency_init(struct request_queue *q)
729 {
730 	struct blk_iolatency *blkiolat;
731 	struct rq_qos *rqos;
732 	int ret;
733 
734 	blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
735 	if (!blkiolat)
736 		return -ENOMEM;
737 
738 	rqos = &blkiolat->rqos;
739 	rqos->id = RQ_QOS_CGROUP;
740 	rqos->ops = &blkcg_iolatency_ops;
741 	rqos->q = q;
742 
743 	rq_qos_add(q, rqos);
744 
745 	ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
746 	if (ret) {
747 		rq_qos_del(q, rqos);
748 		kfree(blkiolat);
749 		return ret;
750 	}
751 
752 	timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
753 
754 	return 0;
755 }
756 
757 static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
758 {
759 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
760 	struct blk_iolatency *blkiolat = iolat->blkiolat;
761 	u64 oldval = iolat->min_lat_nsec;
762 
763 	iolat->min_lat_nsec = val;
764 	iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
765 	iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
766 				    BLKIOLATENCY_MAX_WIN_SIZE);
767 
768 	if (!oldval && val)
769 		atomic_inc(&blkiolat->enabled);
770 	if (oldval && !val)
771 		atomic_dec(&blkiolat->enabled);
772 }
773 
774 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
775 {
776 	if (blkg->parent) {
777 		struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
778 		struct child_latency_info *lat_info;
779 		if (!iolat)
780 			return;
781 
782 		lat_info = &iolat->child_lat;
783 		spin_lock(&lat_info->lock);
784 		atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
785 		lat_info->last_scale_event = 0;
786 		lat_info->scale_grp = NULL;
787 		lat_info->scale_lat = 0;
788 		spin_unlock(&lat_info->lock);
789 	}
790 }
791 
792 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
793 			     size_t nbytes, loff_t off)
794 {
795 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
796 	struct blkcg_gq *blkg;
797 	struct blkg_conf_ctx ctx;
798 	struct iolatency_grp *iolat;
799 	char *p, *tok;
800 	u64 lat_val = 0;
801 	u64 oldval;
802 	int ret;
803 
804 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
805 	if (ret)
806 		return ret;
807 
808 	iolat = blkg_to_lat(ctx.blkg);
809 	p = ctx.body;
810 
811 	ret = -EINVAL;
812 	while ((tok = strsep(&p, " "))) {
813 		char key[16];
814 		char val[21];	/* 18446744073709551616 */
815 
816 		if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
817 			goto out;
818 
819 		if (!strcmp(key, "target")) {
820 			u64 v;
821 
822 			if (!strcmp(val, "max"))
823 				lat_val = 0;
824 			else if (sscanf(val, "%llu", &v) == 1)
825 				lat_val = v * NSEC_PER_USEC;
826 			else
827 				goto out;
828 		} else {
829 			goto out;
830 		}
831 	}
832 
833 	/* Walk up the tree to see if our new val is lower than it should be. */
834 	blkg = ctx.blkg;
835 	oldval = iolat->min_lat_nsec;
836 
837 	iolatency_set_min_lat_nsec(blkg, lat_val);
838 	if (oldval != iolat->min_lat_nsec) {
839 		iolatency_clear_scaling(blkg);
840 	}
841 
842 	ret = 0;
843 out:
844 	blkg_conf_finish(&ctx);
845 	return ret ?: nbytes;
846 }
847 
848 static u64 iolatency_prfill_limit(struct seq_file *sf,
849 				  struct blkg_policy_data *pd, int off)
850 {
851 	struct iolatency_grp *iolat = pd_to_lat(pd);
852 	const char *dname = blkg_dev_name(pd->blkg);
853 
854 	if (!dname || !iolat->min_lat_nsec)
855 		return 0;
856 	seq_printf(sf, "%s target=%llu\n",
857 		   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
858 	return 0;
859 }
860 
861 static int iolatency_print_limit(struct seq_file *sf, void *v)
862 {
863 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
864 			  iolatency_prfill_limit,
865 			  &blkcg_policy_iolatency, seq_cft(sf)->private, false);
866 	return 0;
867 }
868 
869 static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
870 				 size_t size)
871 {
872 	struct latency_stat stat;
873 	int cpu;
874 
875 	latency_stat_init(iolat, &stat);
876 	preempt_disable();
877 	for_each_online_cpu(cpu) {
878 		struct latency_stat *s;
879 		s = per_cpu_ptr(iolat->stats, cpu);
880 		latency_stat_sum(iolat, &stat, s);
881 	}
882 	preempt_enable();
883 
884 	if (iolat->rq_depth.max_depth == UINT_MAX)
885 		return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
886 				 (unsigned long long)stat.ps.missed,
887 				 (unsigned long long)stat.ps.total);
888 	return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
889 			 (unsigned long long)stat.ps.missed,
890 			 (unsigned long long)stat.ps.total,
891 			 iolat->rq_depth.max_depth);
892 }
893 
894 static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
895 				size_t size)
896 {
897 	struct iolatency_grp *iolat = pd_to_lat(pd);
898 	unsigned long long avg_lat;
899 	unsigned long long cur_win;
900 
901 	if (iolat->ssd)
902 		return iolatency_ssd_stat(iolat, buf, size);
903 
904 	avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
905 	cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
906 	if (iolat->rq_depth.max_depth == UINT_MAX)
907 		return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
908 				 avg_lat, cur_win);
909 
910 	return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
911 			 iolat->rq_depth.max_depth, avg_lat, cur_win);
912 }
913 
914 
915 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
916 {
917 	struct iolatency_grp *iolat;
918 
919 	iolat = kzalloc_node(sizeof(*iolat), gfp, node);
920 	if (!iolat)
921 		return NULL;
922 	iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
923 				       __alignof__(struct latency_stat), gfp);
924 	if (!iolat->stats) {
925 		kfree(iolat);
926 		return NULL;
927 	}
928 	return &iolat->pd;
929 }
930 
931 static void iolatency_pd_init(struct blkg_policy_data *pd)
932 {
933 	struct iolatency_grp *iolat = pd_to_lat(pd);
934 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
935 	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
936 	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
937 	u64 now = ktime_to_ns(ktime_get());
938 	int cpu;
939 
940 	if (blk_queue_nonrot(blkg->q))
941 		iolat->ssd = true;
942 	else
943 		iolat->ssd = false;
944 
945 	for_each_possible_cpu(cpu) {
946 		struct latency_stat *stat;
947 		stat = per_cpu_ptr(iolat->stats, cpu);
948 		latency_stat_init(iolat, stat);
949 	}
950 
951 	latency_stat_init(iolat, &iolat->cur_stat);
952 	rq_wait_init(&iolat->rq_wait);
953 	spin_lock_init(&iolat->child_lat.lock);
954 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
955 	iolat->rq_depth.max_depth = UINT_MAX;
956 	iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
957 	iolat->blkiolat = blkiolat;
958 	iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
959 	atomic64_set(&iolat->window_start, now);
960 
961 	/*
962 	 * We init things in list order, so the pd for the parent may not be
963 	 * init'ed yet for whatever reason.
964 	 */
965 	if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
966 		struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
967 		atomic_set(&iolat->scale_cookie,
968 			   atomic_read(&parent->child_lat.scale_cookie));
969 	} else {
970 		atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
971 	}
972 
973 	atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
974 }
975 
976 static void iolatency_pd_offline(struct blkg_policy_data *pd)
977 {
978 	struct iolatency_grp *iolat = pd_to_lat(pd);
979 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
980 
981 	iolatency_set_min_lat_nsec(blkg, 0);
982 	iolatency_clear_scaling(blkg);
983 }
984 
985 static void iolatency_pd_free(struct blkg_policy_data *pd)
986 {
987 	struct iolatency_grp *iolat = pd_to_lat(pd);
988 	free_percpu(iolat->stats);
989 	kfree(iolat);
990 }
991 
992 static struct cftype iolatency_files[] = {
993 	{
994 		.name = "latency",
995 		.flags = CFTYPE_NOT_ON_ROOT,
996 		.seq_show = iolatency_print_limit,
997 		.write = iolatency_set_limit,
998 	},
999 	{}
1000 };
1001 
1002 static struct blkcg_policy blkcg_policy_iolatency = {
1003 	.dfl_cftypes	= iolatency_files,
1004 	.pd_alloc_fn	= iolatency_pd_alloc,
1005 	.pd_init_fn	= iolatency_pd_init,
1006 	.pd_offline_fn	= iolatency_pd_offline,
1007 	.pd_free_fn	= iolatency_pd_free,
1008 	.pd_stat_fn	= iolatency_pd_stat,
1009 };
1010 
1011 static int __init iolatency_init(void)
1012 {
1013 	return blkcg_policy_register(&blkcg_policy_iolatency);
1014 }
1015 
1016 static void __exit iolatency_exit(void)
1017 {
1018 	return blkcg_policy_unregister(&blkcg_policy_iolatency);
1019 }
1020 
1021 module_init(iolatency_init);
1022 module_exit(iolatency_exit);
1023