xref: /linux/block/blk-rq-qos.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 #include "blk-rq-qos.h"
2 
3 /*
4  * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
5  * false if 'v' + 1 would be bigger than 'below'.
6  */
7 static bool atomic_inc_below(atomic_t *v, unsigned int below)
8 {
9 	unsigned int cur = atomic_read(v);
10 
11 	for (;;) {
12 		unsigned int old;
13 
14 		if (cur >= below)
15 			return false;
16 		old = atomic_cmpxchg(v, cur, cur + 1);
17 		if (old == cur)
18 			break;
19 		cur = old;
20 	}
21 
22 	return true;
23 }
24 
25 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
26 {
27 	return atomic_inc_below(&rq_wait->inflight, limit);
28 }
29 
30 void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
31 {
32 	struct rq_qos *rqos;
33 
34 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
35 		if (rqos->ops->cleanup)
36 			rqos->ops->cleanup(rqos, bio);
37 	}
38 }
39 
40 void rq_qos_done(struct request_queue *q, struct request *rq)
41 {
42 	struct rq_qos *rqos;
43 
44 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
45 		if (rqos->ops->done)
46 			rqos->ops->done(rqos, rq);
47 	}
48 }
49 
50 void rq_qos_issue(struct request_queue *q, struct request *rq)
51 {
52 	struct rq_qos *rqos;
53 
54 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
55 		if (rqos->ops->issue)
56 			rqos->ops->issue(rqos, rq);
57 	}
58 }
59 
60 void rq_qos_requeue(struct request_queue *q, struct request *rq)
61 {
62 	struct rq_qos *rqos;
63 
64 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
65 		if (rqos->ops->requeue)
66 			rqos->ops->requeue(rqos, rq);
67 	}
68 }
69 
70 void rq_qos_throttle(struct request_queue *q, struct bio *bio,
71 		     spinlock_t *lock)
72 {
73 	struct rq_qos *rqos;
74 
75 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
76 		if (rqos->ops->throttle)
77 			rqos->ops->throttle(rqos, bio, lock);
78 	}
79 }
80 
81 void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
82 {
83 	struct rq_qos *rqos;
84 
85 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
86 		if (rqos->ops->track)
87 			rqos->ops->track(rqos, rq, bio);
88 	}
89 }
90 
91 void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
92 {
93 	struct rq_qos *rqos;
94 
95 	for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
96 		if (rqos->ops->done_bio)
97 			rqos->ops->done_bio(rqos, bio);
98 	}
99 }
100 
101 /*
102  * Return true, if we can't increase the depth further by scaling
103  */
104 bool rq_depth_calc_max_depth(struct rq_depth *rqd)
105 {
106 	unsigned int depth;
107 	bool ret = false;
108 
109 	/*
110 	 * For QD=1 devices, this is a special case. It's important for those
111 	 * to have one request ready when one completes, so force a depth of
112 	 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
113 	 * since the device can't have more than that in flight. If we're
114 	 * scaling down, then keep a setting of 1/1/1.
115 	 */
116 	if (rqd->queue_depth == 1) {
117 		if (rqd->scale_step > 0)
118 			rqd->max_depth = 1;
119 		else {
120 			rqd->max_depth = 2;
121 			ret = true;
122 		}
123 	} else {
124 		/*
125 		 * scale_step == 0 is our default state. If we have suffered
126 		 * latency spikes, step will be > 0, and we shrink the
127 		 * allowed write depths. If step is < 0, we're only doing
128 		 * writes, and we allow a temporarily higher depth to
129 		 * increase performance.
130 		 */
131 		depth = min_t(unsigned int, rqd->default_depth,
132 			      rqd->queue_depth);
133 		if (rqd->scale_step > 0)
134 			depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
135 		else if (rqd->scale_step < 0) {
136 			unsigned int maxd = 3 * rqd->queue_depth / 4;
137 
138 			depth = 1 + ((depth - 1) << -rqd->scale_step);
139 			if (depth > maxd) {
140 				depth = maxd;
141 				ret = true;
142 			}
143 		}
144 
145 		rqd->max_depth = depth;
146 	}
147 
148 	return ret;
149 }
150 
151 void rq_depth_scale_up(struct rq_depth *rqd)
152 {
153 	/*
154 	 * Hit max in previous round, stop here
155 	 */
156 	if (rqd->scaled_max)
157 		return;
158 
159 	rqd->scale_step--;
160 
161 	rqd->scaled_max = rq_depth_calc_max_depth(rqd);
162 }
163 
164 /*
165  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
166  * had a latency violation.
167  */
168 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
169 {
170 	/*
171 	 * Stop scaling down when we've hit the limit. This also prevents
172 	 * ->scale_step from going to crazy values, if the device can't
173 	 * keep up.
174 	 */
175 	if (rqd->max_depth == 1)
176 		return;
177 
178 	if (rqd->scale_step < 0 && hard_throttle)
179 		rqd->scale_step = 0;
180 	else
181 		rqd->scale_step++;
182 
183 	rqd->scaled_max = false;
184 	rq_depth_calc_max_depth(rqd);
185 }
186 
187 void rq_qos_exit(struct request_queue *q)
188 {
189 	while (q->rq_qos) {
190 		struct rq_qos *rqos = q->rq_qos;
191 		q->rq_qos = rqos->next;
192 		rqos->ops->exit(rqos);
193 	}
194 }
195