Lines Matching +full:down +full:- +full:scaling

1 // SPDX-License-Identifier: GPL-2.0
3 #include "blk-rq-qos.h"
23 return atomic_inc_below(&rq_wait->inflight, limit); in rq_wait_inc_below()
29 if (rqos->ops->cleanup) in __rq_qos_cleanup()
30 rqos->ops->cleanup(rqos, bio); in __rq_qos_cleanup()
31 rqos = rqos->next; in __rq_qos_cleanup()
38 if (rqos->ops->done) in __rq_qos_done()
39 rqos->ops->done(rqos, rq); in __rq_qos_done()
40 rqos = rqos->next; in __rq_qos_done()
47 if (rqos->ops->issue) in __rq_qos_issue()
48 rqos->ops->issue(rqos, rq); in __rq_qos_issue()
49 rqos = rqos->next; in __rq_qos_issue()
56 if (rqos->ops->requeue) in __rq_qos_requeue()
57 rqos->ops->requeue(rqos, rq); in __rq_qos_requeue()
58 rqos = rqos->next; in __rq_qos_requeue()
65 if (rqos->ops->throttle) in __rq_qos_throttle()
66 rqos->ops->throttle(rqos, bio); in __rq_qos_throttle()
67 rqos = rqos->next; in __rq_qos_throttle()
74 if (rqos->ops->track) in __rq_qos_track()
75 rqos->ops->track(rqos, rq, bio); in __rq_qos_track()
76 rqos = rqos->next; in __rq_qos_track()
83 if (rqos->ops->merge) in __rq_qos_merge()
84 rqos->ops->merge(rqos, rq, bio); in __rq_qos_merge()
85 rqos = rqos->next; in __rq_qos_merge()
92 if (rqos->ops->done_bio) in __rq_qos_done_bio()
93 rqos->ops->done_bio(rqos, bio); in __rq_qos_done_bio()
94 rqos = rqos->next; in __rq_qos_done_bio()
101 if (rqos->ops->queue_depth_changed) in __rq_qos_queue_depth_changed()
102 rqos->ops->queue_depth_changed(rqos); in __rq_qos_queue_depth_changed()
103 rqos = rqos->next; in __rq_qos_queue_depth_changed()
108 * Return true, if we can't increase the depth further by scaling
120 * scaling down, then keep a setting of 1/1/1. in rq_depth_calc_max_depth()
122 if (rqd->queue_depth == 1) { in rq_depth_calc_max_depth()
123 if (rqd->scale_step > 0) in rq_depth_calc_max_depth()
124 rqd->max_depth = 1; in rq_depth_calc_max_depth()
126 rqd->max_depth = 2; in rq_depth_calc_max_depth()
137 depth = min_t(unsigned int, rqd->default_depth, in rq_depth_calc_max_depth()
138 rqd->queue_depth); in rq_depth_calc_max_depth()
139 if (rqd->scale_step > 0) in rq_depth_calc_max_depth()
140 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step)); in rq_depth_calc_max_depth()
141 else if (rqd->scale_step < 0) { in rq_depth_calc_max_depth()
142 unsigned int maxd = 3 * rqd->queue_depth / 4; in rq_depth_calc_max_depth()
144 depth = 1 + ((depth - 1) << -rqd->scale_step); in rq_depth_calc_max_depth()
151 rqd->max_depth = depth; in rq_depth_calc_max_depth()
157 /* Returns true on success and false if scaling up wasn't possible */
163 if (rqd->scaled_max) in rq_depth_scale_up()
166 rqd->scale_step--; in rq_depth_scale_up()
168 rqd->scaled_max = rq_depth_calc_max_depth(rqd); in rq_depth_scale_up()
173 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
175 * scaling down wasn't possible.
180 * Stop scaling down when we've hit the limit. This also prevents in rq_depth_scale_down()
181 * ->scale_step from going to crazy values, if the device can't in rq_depth_scale_down()
184 if (rqd->max_depth == 1) in rq_depth_scale_down()
187 if (rqd->scale_step < 0 && hard_throttle) in rq_depth_scale_down()
188 rqd->scale_step = 0; in rq_depth_scale_down()
190 rqd->scale_step++; in rq_depth_scale_down()
192 rqd->scaled_max = false; in rq_depth_scale_down()
213 * If we fail to get a budget, return -1 to interrupt the wake up loop in rq_qos_wake_function()
216 if (!data->cb(data->rqw, data->private_data)) in rq_qos_wake_function()
217 return -1; in rq_qos_wake_function()
219 data->got_token = true; in rq_qos_wake_function()
231 * also make sure the waiter will see the latest @data->got_token in rq_qos_wake_function()
234 list_del_init_careful(&curr->entry); in rq_qos_wake_function()
239 * rq_qos_wait - throttle on a rqw if we need to
242 * @acquire_inflight_cb: inc the rqw->inflight counter if we can
248 * inc the rqw->inflight if we have the ability to do so, or return false if not
271 if (!waitqueue_active(&rqw->wait) && acquire_inflight_cb(rqw, private_data)) in rq_qos_wait()
275 first_waiter = prepare_to_wait_exclusive(&rqw->wait, &data.wq, in rq_qos_wait()
283 * will re-check the waiting condition before going to sleep, thus in rq_qos_wait()
287 finish_wait(&rqw->wait, &data.wq); in rq_qos_wait()
296 * the latest @data->got_token. in rq_qos_wait()
310 finish_wait(&rqw->wait, &data.wq); in rq_qos_wait()
315 mutex_lock(&q->rq_qos_mutex); in rq_qos_exit()
316 while (q->rq_qos) { in rq_qos_exit()
317 struct rq_qos *rqos = q->rq_qos; in rq_qos_exit()
318 q->rq_qos = rqos->next; in rq_qos_exit()
319 rqos->ops->exit(rqos); in rq_qos_exit()
322 mutex_unlock(&q->rq_qos_mutex); in rq_qos_exit()
328 struct request_queue *q = disk->queue; in rq_qos_add()
331 lockdep_assert_held(&q->rq_qos_mutex); in rq_qos_add()
333 rqos->disk = disk; in rq_qos_add()
334 rqos->id = id; in rq_qos_add()
335 rqos->ops = ops; in rq_qos_add()
338 * No IO can be in-flight when adding rqos, so freeze queue, which in rq_qos_add()
339 * is fine since we only support rq_qos for blk-mq queue. in rq_qos_add()
343 if (rq_qos_id(q, rqos->id)) in rq_qos_add()
345 rqos->next = q->rq_qos; in rq_qos_add()
346 q->rq_qos = rqos; in rq_qos_add()
351 if (rqos->ops->debugfs_attrs) { in rq_qos_add()
352 mutex_lock(&q->debugfs_mutex); in rq_qos_add()
354 mutex_unlock(&q->debugfs_mutex); in rq_qos_add()
360 return -EBUSY; in rq_qos_add()
365 struct request_queue *q = rqos->disk->queue; in rq_qos_del()
369 lockdep_assert_held(&q->rq_qos_mutex); in rq_qos_del()
372 for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) { in rq_qos_del()
374 *cur = rqos->next; in rq_qos_del()
378 if (!q->rq_qos) in rq_qos_del()
382 mutex_lock(&q->debugfs_mutex); in rq_qos_del()
384 mutex_unlock(&q->debugfs_mutex); in rq_qos_del()