1 #ifndef RQ_QOS_H 2 #define RQ_QOS_H 3 4 #include <linux/kernel.h> 5 #include <linux/blkdev.h> 6 #include <linux/blk_types.h> 7 #include <linux/atomic.h> 8 #include <linux/wait.h> 9 10 #include "blk-mq-debugfs.h" 11 12 struct blk_mq_debugfs_attr; 13 14 enum rq_qos_id { 15 RQ_QOS_WBT, 16 RQ_QOS_CGROUP, 17 }; 18 19 struct rq_wait { 20 wait_queue_head_t wait; 21 atomic_t inflight; 22 }; 23 24 struct rq_qos { 25 struct rq_qos_ops *ops; 26 struct request_queue *q; 27 enum rq_qos_id id; 28 struct rq_qos *next; 29 #ifdef CONFIG_BLK_DEBUG_FS 30 struct dentry *debugfs_dir; 31 #endif 32 }; 33 34 struct rq_qos_ops { 35 void (*throttle)(struct rq_qos *, struct bio *); 36 void (*track)(struct rq_qos *, struct request *, struct bio *); 37 void (*issue)(struct rq_qos *, struct request *); 38 void (*requeue)(struct rq_qos *, struct request *); 39 void (*done)(struct rq_qos *, struct request *); 40 void (*done_bio)(struct rq_qos *, struct bio *); 41 void (*cleanup)(struct rq_qos *, struct bio *); 42 void (*exit)(struct rq_qos *); 43 const struct blk_mq_debugfs_attr *debugfs_attrs; 44 }; 45 46 struct rq_depth { 47 unsigned int max_depth; 48 49 int scale_step; 50 bool scaled_max; 51 52 unsigned int queue_depth; 53 unsigned int default_depth; 54 }; 55 56 static inline struct rq_qos *rq_qos_id(struct request_queue *q, 57 enum rq_qos_id id) 58 { 59 struct rq_qos *rqos; 60 for (rqos = q->rq_qos; rqos; rqos = rqos->next) { 61 if (rqos->id == id) 62 break; 63 } 64 return rqos; 65 } 66 67 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) 68 { 69 return rq_qos_id(q, RQ_QOS_WBT); 70 } 71 72 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) 73 { 74 return rq_qos_id(q, RQ_QOS_CGROUP); 75 } 76 77 static inline const char *rq_qos_id_to_name(enum rq_qos_id id) 78 { 79 switch (id) { 80 case RQ_QOS_WBT: 81 return "wbt"; 82 case RQ_QOS_CGROUP: 83 return "cgroup"; 84 } 85 return "unknown"; 86 } 87 88 static inline void rq_wait_init(struct rq_wait *rq_wait) 89 { 90 atomic_set(&rq_wait->inflight, 0); 91 init_waitqueue_head(&rq_wait->wait); 92 } 93 94 static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) 95 { 96 rqos->next = q->rq_qos; 97 q->rq_qos = rqos; 98 99 if (rqos->ops->debugfs_attrs) 100 blk_mq_debugfs_register_rqos(rqos); 101 } 102 103 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) 104 { 105 struct rq_qos *cur, *prev = NULL; 106 for (cur = q->rq_qos; cur; cur = cur->next) { 107 if (cur == rqos) { 108 if (prev) 109 prev->next = rqos->next; 110 else 111 q->rq_qos = cur; 112 break; 113 } 114 prev = cur; 115 } 116 117 blk_mq_debugfs_unregister_rqos(rqos); 118 } 119 120 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data); 121 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data); 122 123 void rq_qos_wait(struct rq_wait *rqw, void *private_data, 124 acquire_inflight_cb_t *acquire_inflight_cb, 125 cleanup_cb_t *cleanup_cb); 126 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); 127 void rq_depth_scale_up(struct rq_depth *rqd); 128 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 129 bool rq_depth_calc_max_depth(struct rq_depth *rqd); 130 131 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 132 void __rq_qos_done(struct rq_qos *rqos, struct request *rq); 133 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq); 134 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq); 135 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 136 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 137 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); 138 139 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) 140 { 141 if (q->rq_qos) 142 __rq_qos_cleanup(q->rq_qos, bio); 143 } 144 145 static inline void rq_qos_done(struct request_queue *q, struct request *rq) 146 { 147 if (q->rq_qos) 148 __rq_qos_done(q->rq_qos, rq); 149 } 150 151 static inline void rq_qos_issue(struct request_queue *q, struct request *rq) 152 { 153 if (q->rq_qos) 154 __rq_qos_issue(q->rq_qos, rq); 155 } 156 157 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) 158 { 159 if (q->rq_qos) 160 __rq_qos_requeue(q->rq_qos, rq); 161 } 162 163 static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio) 164 { 165 if (q->rq_qos) 166 __rq_qos_done_bio(q->rq_qos, bio); 167 } 168 169 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) 170 { 171 /* 172 * BIO_TRACKED lets controllers know that a bio went through the 173 * normal rq_qos path. 174 */ 175 bio_set_flag(bio, BIO_TRACKED); 176 if (q->rq_qos) 177 __rq_qos_throttle(q->rq_qos, bio); 178 } 179 180 static inline void rq_qos_track(struct request_queue *q, struct request *rq, 181 struct bio *bio) 182 { 183 if (q->rq_qos) 184 __rq_qos_track(q->rq_qos, rq, bio); 185 } 186 187 void rq_qos_exit(struct request_queue *); 188 189 #endif 190