1a7905043SJosef Bacik #include "blk-rq-qos.h" 2a7905043SJosef Bacik 3a7905043SJosef Bacik /* 4a7905043SJosef Bacik * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded, 5a7905043SJosef Bacik * false if 'v' + 1 would be bigger than 'below'. 6a7905043SJosef Bacik */ 722f17952SJosef Bacik static bool atomic_inc_below(atomic_t *v, unsigned int below) 8a7905043SJosef Bacik { 922f17952SJosef Bacik unsigned int cur = atomic_read(v); 10a7905043SJosef Bacik 11a7905043SJosef Bacik for (;;) { 1222f17952SJosef Bacik unsigned int old; 13a7905043SJosef Bacik 14a7905043SJosef Bacik if (cur >= below) 15a7905043SJosef Bacik return false; 16a7905043SJosef Bacik old = atomic_cmpxchg(v, cur, cur + 1); 17a7905043SJosef Bacik if (old == cur) 18a7905043SJosef Bacik break; 19a7905043SJosef Bacik cur = old; 20a7905043SJosef Bacik } 21a7905043SJosef Bacik 22a7905043SJosef Bacik return true; 23a7905043SJosef Bacik } 24a7905043SJosef Bacik 2522f17952SJosef Bacik bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit) 26a7905043SJosef Bacik { 27a7905043SJosef Bacik return atomic_inc_below(&rq_wait->inflight, limit); 28a7905043SJosef Bacik } 29a7905043SJosef Bacik 30*e5045454SJens Axboe void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio) 31a7905043SJosef Bacik { 32*e5045454SJens Axboe do { 33a7905043SJosef Bacik if (rqos->ops->cleanup) 34c1c80384SJosef Bacik rqos->ops->cleanup(rqos, bio); 35*e5045454SJens Axboe rqos = rqos->next; 36*e5045454SJens Axboe } while (rqos); 37a7905043SJosef Bacik } 38a7905043SJosef Bacik 39*e5045454SJens Axboe void __rq_qos_done(struct rq_qos *rqos, struct request *rq) 40a7905043SJosef Bacik { 41*e5045454SJens Axboe do { 42a7905043SJosef Bacik if (rqos->ops->done) 43a7905043SJosef Bacik rqos->ops->done(rqos, rq); 44*e5045454SJens Axboe rqos = rqos->next; 45*e5045454SJens Axboe } while (rqos); 46a7905043SJosef Bacik } 47a7905043SJosef Bacik 48*e5045454SJens Axboe void __rq_qos_issue(struct rq_qos *rqos, struct request *rq) 49a7905043SJosef Bacik { 50*e5045454SJens Axboe do { 51a7905043SJosef Bacik if (rqos->ops->issue) 52a7905043SJosef Bacik rqos->ops->issue(rqos, rq); 53*e5045454SJens Axboe rqos = rqos->next; 54*e5045454SJens Axboe } while (rqos); 55a7905043SJosef Bacik } 56a7905043SJosef Bacik 57*e5045454SJens Axboe void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq) 58a7905043SJosef Bacik { 59*e5045454SJens Axboe do { 60a7905043SJosef Bacik if (rqos->ops->requeue) 61a7905043SJosef Bacik rqos->ops->requeue(rqos, rq); 62*e5045454SJens Axboe rqos = rqos->next; 63*e5045454SJens Axboe } while (rqos); 64a7905043SJosef Bacik } 65a7905043SJosef Bacik 66*e5045454SJens Axboe void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio) 67a7905043SJosef Bacik { 68*e5045454SJens Axboe do { 69a7905043SJosef Bacik if (rqos->ops->throttle) 70d5337560SChristoph Hellwig rqos->ops->throttle(rqos, bio); 71*e5045454SJens Axboe rqos = rqos->next; 72*e5045454SJens Axboe } while (rqos); 73c1c80384SJosef Bacik } 74c1c80384SJosef Bacik 75*e5045454SJens Axboe void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) 76c1c80384SJosef Bacik { 77*e5045454SJens Axboe do { 78c1c80384SJosef Bacik if (rqos->ops->track) 79c1c80384SJosef Bacik rqos->ops->track(rqos, rq, bio); 80*e5045454SJens Axboe rqos = rqos->next; 81*e5045454SJens Axboe } while (rqos); 82a7905043SJosef Bacik } 83a7905043SJosef Bacik 84*e5045454SJens Axboe void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio) 8567b42d0bSJosef Bacik { 86*e5045454SJens Axboe do { 8767b42d0bSJosef Bacik if (rqos->ops->done_bio) 8867b42d0bSJosef Bacik rqos->ops->done_bio(rqos, bio); 89*e5045454SJens Axboe rqos = rqos->next; 90*e5045454SJens Axboe } while (rqos); 9167b42d0bSJosef Bacik } 9267b42d0bSJosef Bacik 93a7905043SJosef Bacik /* 94a7905043SJosef Bacik * Return true, if we can't increase the depth further by scaling 95a7905043SJosef Bacik */ 96a7905043SJosef Bacik bool rq_depth_calc_max_depth(struct rq_depth *rqd) 97a7905043SJosef Bacik { 98a7905043SJosef Bacik unsigned int depth; 99a7905043SJosef Bacik bool ret = false; 100a7905043SJosef Bacik 101a7905043SJosef Bacik /* 102a7905043SJosef Bacik * For QD=1 devices, this is a special case. It's important for those 103a7905043SJosef Bacik * to have one request ready when one completes, so force a depth of 104a7905043SJosef Bacik * 2 for those devices. On the backend, it'll be a depth of 1 anyway, 105a7905043SJosef Bacik * since the device can't have more than that in flight. If we're 106a7905043SJosef Bacik * scaling down, then keep a setting of 1/1/1. 107a7905043SJosef Bacik */ 108a7905043SJosef Bacik if (rqd->queue_depth == 1) { 109a7905043SJosef Bacik if (rqd->scale_step > 0) 110a7905043SJosef Bacik rqd->max_depth = 1; 111a7905043SJosef Bacik else { 112a7905043SJosef Bacik rqd->max_depth = 2; 113a7905043SJosef Bacik ret = true; 114a7905043SJosef Bacik } 115a7905043SJosef Bacik } else { 116a7905043SJosef Bacik /* 117a7905043SJosef Bacik * scale_step == 0 is our default state. If we have suffered 118a7905043SJosef Bacik * latency spikes, step will be > 0, and we shrink the 119a7905043SJosef Bacik * allowed write depths. If step is < 0, we're only doing 120a7905043SJosef Bacik * writes, and we allow a temporarily higher depth to 121a7905043SJosef Bacik * increase performance. 122a7905043SJosef Bacik */ 123a7905043SJosef Bacik depth = min_t(unsigned int, rqd->default_depth, 124a7905043SJosef Bacik rqd->queue_depth); 125a7905043SJosef Bacik if (rqd->scale_step > 0) 126a7905043SJosef Bacik depth = 1 + ((depth - 1) >> min(31, rqd->scale_step)); 127a7905043SJosef Bacik else if (rqd->scale_step < 0) { 128a7905043SJosef Bacik unsigned int maxd = 3 * rqd->queue_depth / 4; 129a7905043SJosef Bacik 130a7905043SJosef Bacik depth = 1 + ((depth - 1) << -rqd->scale_step); 131a7905043SJosef Bacik if (depth > maxd) { 132a7905043SJosef Bacik depth = maxd; 133a7905043SJosef Bacik ret = true; 134a7905043SJosef Bacik } 135a7905043SJosef Bacik } 136a7905043SJosef Bacik 137a7905043SJosef Bacik rqd->max_depth = depth; 138a7905043SJosef Bacik } 139a7905043SJosef Bacik 140a7905043SJosef Bacik return ret; 141a7905043SJosef Bacik } 142a7905043SJosef Bacik 143a7905043SJosef Bacik void rq_depth_scale_up(struct rq_depth *rqd) 144a7905043SJosef Bacik { 145a7905043SJosef Bacik /* 146a7905043SJosef Bacik * Hit max in previous round, stop here 147a7905043SJosef Bacik */ 148a7905043SJosef Bacik if (rqd->scaled_max) 149a7905043SJosef Bacik return; 150a7905043SJosef Bacik 151a7905043SJosef Bacik rqd->scale_step--; 152a7905043SJosef Bacik 153a7905043SJosef Bacik rqd->scaled_max = rq_depth_calc_max_depth(rqd); 154a7905043SJosef Bacik } 155a7905043SJosef Bacik 156a7905043SJosef Bacik /* 157a7905043SJosef Bacik * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we 158a7905043SJosef Bacik * had a latency violation. 159a7905043SJosef Bacik */ 160a7905043SJosef Bacik void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) 161a7905043SJosef Bacik { 162a7905043SJosef Bacik /* 163a7905043SJosef Bacik * Stop scaling down when we've hit the limit. This also prevents 164a7905043SJosef Bacik * ->scale_step from going to crazy values, if the device can't 165a7905043SJosef Bacik * keep up. 166a7905043SJosef Bacik */ 167a7905043SJosef Bacik if (rqd->max_depth == 1) 168a7905043SJosef Bacik return; 169a7905043SJosef Bacik 170a7905043SJosef Bacik if (rqd->scale_step < 0 && hard_throttle) 171a7905043SJosef Bacik rqd->scale_step = 0; 172a7905043SJosef Bacik else 173a7905043SJosef Bacik rqd->scale_step++; 174a7905043SJosef Bacik 175a7905043SJosef Bacik rqd->scaled_max = false; 176a7905043SJosef Bacik rq_depth_calc_max_depth(rqd); 177a7905043SJosef Bacik } 178a7905043SJosef Bacik 179a7905043SJosef Bacik void rq_qos_exit(struct request_queue *q) 180a7905043SJosef Bacik { 181a7905043SJosef Bacik while (q->rq_qos) { 182a7905043SJosef Bacik struct rq_qos *rqos = q->rq_qos; 183a7905043SJosef Bacik q->rq_qos = rqos->next; 184a7905043SJosef Bacik rqos->ops->exit(rqos); 185a7905043SJosef Bacik } 186a7905043SJosef Bacik } 187