xref: /linux/block/blk-rq-qos.h (revision d1cf752d58d59f9222389c14d67951da8e7fbd2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RQ_QOS_H
3 #define RQ_QOS_H
4 
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10 #include <linux/blk-mq.h>
11 
12 #include "blk-mq-debugfs.h"
13 
14 struct blk_mq_debugfs_attr;
15 
16 enum rq_qos_id {
17 	RQ_QOS_WBT,
18 	RQ_QOS_LATENCY,
19 	RQ_QOS_COST,
20 };
21 
22 struct rq_wait {
23 	wait_queue_head_t wait;
24 	atomic_t inflight;
25 };
26 
27 struct rq_qos {
28 	const struct rq_qos_ops *ops;
29 	struct gendisk *disk;
30 	enum rq_qos_id id;
31 	struct rq_qos *next;
32 #ifdef CONFIG_BLK_DEBUG_FS
33 	struct dentry *debugfs_dir;
34 #endif
35 };
36 
37 struct rq_qos_ops {
38 	void (*throttle)(struct rq_qos *, struct bio *);
39 	void (*track)(struct rq_qos *, struct request *, struct bio *);
40 	void (*merge)(struct rq_qos *, struct request *, struct bio *);
41 	void (*issue)(struct rq_qos *, struct request *);
42 	void (*requeue)(struct rq_qos *, struct request *);
43 	void (*done)(struct rq_qos *, struct request *);
44 	void (*done_bio)(struct rq_qos *, struct bio *);
45 	void (*cleanup)(struct rq_qos *, struct bio *);
46 	void (*queue_depth_changed)(struct rq_qos *);
47 	void (*exit)(struct rq_qos *);
48 	const struct blk_mq_debugfs_attr *debugfs_attrs;
49 };
50 
51 struct rq_depth {
52 	unsigned int max_depth;
53 
54 	int scale_step;
55 	bool scaled_max;
56 
57 	unsigned int queue_depth;
58 	unsigned int default_depth;
59 };
60 
rq_qos_id(struct request_queue * q,enum rq_qos_id id)61 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
62 				       enum rq_qos_id id)
63 {
64 	struct rq_qos *rqos;
65 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
66 		if (rqos->id == id)
67 			break;
68 	}
69 	return rqos;
70 }
71 
wbt_rq_qos(struct request_queue * q)72 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
73 {
74 	return rq_qos_id(q, RQ_QOS_WBT);
75 }
76 
iolat_rq_qos(struct request_queue * q)77 static inline struct rq_qos *iolat_rq_qos(struct request_queue *q)
78 {
79 	return rq_qos_id(q, RQ_QOS_LATENCY);
80 }
81 
rq_wait_init(struct rq_wait * rq_wait)82 static inline void rq_wait_init(struct rq_wait *rq_wait)
83 {
84 	atomic_set(&rq_wait->inflight, 0);
85 	init_waitqueue_head(&rq_wait->wait);
86 }
87 
88 int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
89 		const struct rq_qos_ops *ops);
90 void rq_qos_del(struct rq_qos *rqos);
91 
92 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
93 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
94 
95 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
96 		 acquire_inflight_cb_t *acquire_inflight_cb,
97 		 cleanup_cb_t *cleanup_cb);
98 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
99 bool rq_depth_scale_up(struct rq_depth *rqd);
100 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
101 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
102 
103 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
104 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
105 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
106 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
107 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
108 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
109 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
110 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
111 void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
112 
rq_qos_cleanup(struct request_queue * q,struct bio * bio)113 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
114 {
115 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
116 			q->rq_qos)
117 		__rq_qos_cleanup(q->rq_qos, bio);
118 }
119 
rq_qos_done(struct request_queue * q,struct request * rq)120 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
121 {
122 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
123 			q->rq_qos && !blk_rq_is_passthrough(rq))
124 		__rq_qos_done(q->rq_qos, rq);
125 }
126 
rq_qos_issue(struct request_queue * q,struct request * rq)127 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
128 {
129 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
130 			q->rq_qos)
131 		__rq_qos_issue(q->rq_qos, rq);
132 }
133 
rq_qos_requeue(struct request_queue * q,struct request * rq)134 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
135 {
136 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
137 			q->rq_qos)
138 		__rq_qos_requeue(q->rq_qos, rq);
139 }
140 
rq_qos_done_bio(struct bio * bio)141 static inline void rq_qos_done_bio(struct bio *bio)
142 {
143 	struct request_queue *q;
144 
145 	if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) &&
146 			     !bio_flagged(bio, BIO_QOS_MERGED)))
147 		return;
148 
149 	q = bdev_get_queue(bio->bi_bdev);
150 
151 	/*
152 	 * A BIO may carry BIO_QOS_* flags even if the associated request_queue
153 	 * does not have rq_qos enabled. This can happen with stacked block
154 	 * devices — for example, NVMe multipath, where it's possible that the
155 	 * bottom device has QoS enabled but the top device does not. Therefore,
156 	 * always verify that q->rq_qos is present and QoS is enabled before
157 	 * calling __rq_qos_done_bio().
158 	 */
159 	if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
160 		__rq_qos_done_bio(q->rq_qos, bio);
161 }
162 
rq_qos_throttle(struct request_queue * q,struct bio * bio)163 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
164 {
165 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
166 			q->rq_qos) {
167 		bio_set_flag(bio, BIO_QOS_THROTTLED);
168 		__rq_qos_throttle(q->rq_qos, bio);
169 	}
170 }
171 
rq_qos_track(struct request_queue * q,struct request * rq,struct bio * bio)172 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
173 				struct bio *bio)
174 {
175 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
176 			q->rq_qos)
177 		__rq_qos_track(q->rq_qos, rq, bio);
178 }
179 
rq_qos_merge(struct request_queue * q,struct request * rq,struct bio * bio)180 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
181 				struct bio *bio)
182 {
183 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
184 			q->rq_qos) {
185 		bio_set_flag(bio, BIO_QOS_MERGED);
186 		__rq_qos_merge(q->rq_qos, rq, bio);
187 	}
188 }
189 
rq_qos_queue_depth_changed(struct request_queue * q)190 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
191 {
192 	if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
193 			q->rq_qos)
194 		__rq_qos_queue_depth_changed(q->rq_qos);
195 }
196 
197 void rq_qos_exit(struct request_queue *);
198 
199 #endif
200