1 #ifndef BLK_INTERNAL_H 2 #define BLK_INTERNAL_H 3 4 #include <linux/idr.h> 5 #include <linux/blk-mq.h> 6 #include "blk-mq.h" 7 8 /* Amount of time in which a process may batch requests */ 9 #define BLK_BATCH_TIME (HZ/50UL) 10 11 /* Number of requests a "batching" process may submit */ 12 #define BLK_BATCH_REQ 32 13 14 /* Max future timer expiry for timeouts */ 15 #define BLK_MAX_TIMEOUT (5 * HZ) 16 17 #ifdef CONFIG_DEBUG_FS 18 extern struct dentry *blk_debugfs_root; 19 #endif 20 21 struct blk_flush_queue { 22 unsigned int flush_queue_delayed:1; 23 unsigned int flush_pending_idx:1; 24 unsigned int flush_running_idx:1; 25 unsigned long flush_pending_since; 26 struct list_head flush_queue[2]; 27 struct list_head flush_data_in_flight; 28 struct request *flush_rq; 29 30 /* 31 * flush_rq shares tag with this rq, both can't be active 32 * at the same time 33 */ 34 struct request *orig_rq; 35 spinlock_t mq_flush_lock; 36 }; 37 38 extern struct kmem_cache *blk_requestq_cachep; 39 extern struct kmem_cache *request_cachep; 40 extern struct kobj_type blk_queue_ktype; 41 extern struct ida blk_queue_ida; 42 43 static inline struct blk_flush_queue *blk_get_flush_queue( 44 struct request_queue *q, struct blk_mq_ctx *ctx) 45 { 46 if (q->mq_ops) 47 return blk_mq_map_queue(q, ctx->cpu)->fq; 48 return q->fq; 49 } 50 51 static inline void __blk_get_queue(struct request_queue *q) 52 { 53 kobject_get(&q->kobj); 54 } 55 56 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 57 int node, int cmd_size); 58 void blk_free_flush_queue(struct blk_flush_queue *q); 59 60 int blk_init_rl(struct request_list *rl, struct request_queue *q, 61 gfp_t gfp_mask); 62 void blk_exit_rl(struct request_queue *q, struct request_list *rl); 63 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 64 struct bio *bio); 65 void blk_queue_bypass_start(struct request_queue *q); 66 void blk_queue_bypass_end(struct request_queue *q); 67 void __blk_queue_free_tags(struct request_queue *q); 68 void blk_freeze_queue(struct request_queue *q); 69 70 static inline void blk_queue_enter_live(struct request_queue *q) 71 { 72 /* 73 * Given that running in generic_make_request() context 74 * guarantees that a live reference against q_usage_counter has 75 * been established, further references under that same context 76 * need not check that the queue has been frozen (marked dead). 77 */ 78 percpu_ref_get(&q->q_usage_counter); 79 } 80 81 #ifdef CONFIG_BLK_DEV_INTEGRITY 82 void blk_flush_integrity(void); 83 bool __bio_integrity_endio(struct bio *); 84 static inline bool bio_integrity_endio(struct bio *bio) 85 { 86 if (bio_integrity(bio)) 87 return __bio_integrity_endio(bio); 88 return true; 89 } 90 #else 91 static inline void blk_flush_integrity(void) 92 { 93 } 94 static inline bool bio_integrity_endio(struct bio *bio) 95 { 96 return true; 97 } 98 #endif 99 100 void blk_timeout_work(struct work_struct *work); 101 unsigned long blk_rq_timeout(unsigned long timeout); 102 void blk_add_timer(struct request *req); 103 void blk_delete_timer(struct request *); 104 105 106 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 107 struct bio *bio); 108 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 109 struct bio *bio); 110 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 111 struct bio *bio); 112 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 113 unsigned int *request_count, 114 struct request **same_queue_rq); 115 unsigned int blk_plug_queued_count(struct request_queue *q); 116 117 void blk_account_io_start(struct request *req, bool new_io); 118 void blk_account_io_completion(struct request *req, unsigned int bytes); 119 void blk_account_io_done(struct request *req); 120 121 /* 122 * Internal atomic flags for request handling 123 */ 124 enum rq_atomic_flags { 125 REQ_ATOM_COMPLETE = 0, 126 REQ_ATOM_STARTED, 127 REQ_ATOM_POLL_SLEPT, 128 }; 129 130 /* 131 * EH timer and IO completion will both attempt to 'grab' the request, make 132 * sure that only one of them succeeds 133 */ 134 static inline int blk_mark_rq_complete(struct request *rq) 135 { 136 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 137 } 138 139 static inline void blk_clear_rq_complete(struct request *rq) 140 { 141 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 142 } 143 144 /* 145 * Internal elevator interface 146 */ 147 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 148 149 void blk_insert_flush(struct request *rq); 150 151 static inline struct request *__elv_next_request(struct request_queue *q) 152 { 153 struct request *rq; 154 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 155 156 WARN_ON_ONCE(q->mq_ops); 157 158 while (1) { 159 if (!list_empty(&q->queue_head)) { 160 rq = list_entry_rq(q->queue_head.next); 161 return rq; 162 } 163 164 /* 165 * Flush request is running and flush request isn't queueable 166 * in the drive, we can hold the queue till flush request is 167 * finished. Even we don't do this, driver can't dispatch next 168 * requests and will requeue them. And this can improve 169 * throughput too. For example, we have request flush1, write1, 170 * flush 2. flush1 is dispatched, then queue is hold, write1 171 * isn't inserted to queue. After flush1 is finished, flush2 172 * will be dispatched. Since disk cache is already clean, 173 * flush2 will be finished very soon, so looks like flush2 is 174 * folded to flush1. 175 * Since the queue is hold, a flag is set to indicate the queue 176 * should be restarted later. Please see flush_end_io() for 177 * details. 178 */ 179 if (fq->flush_pending_idx != fq->flush_running_idx && 180 !queue_flush_queueable(q)) { 181 fq->flush_queue_delayed = 1; 182 return NULL; 183 } 184 if (unlikely(blk_queue_bypass(q)) || 185 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) 186 return NULL; 187 } 188 } 189 190 static inline void elv_activate_rq(struct request_queue *q, struct request *rq) 191 { 192 struct elevator_queue *e = q->elevator; 193 194 if (e->type->ops.sq.elevator_activate_req_fn) 195 e->type->ops.sq.elevator_activate_req_fn(q, rq); 196 } 197 198 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) 199 { 200 struct elevator_queue *e = q->elevator; 201 202 if (e->type->ops.sq.elevator_deactivate_req_fn) 203 e->type->ops.sq.elevator_deactivate_req_fn(q, rq); 204 } 205 206 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); 207 208 #ifdef CONFIG_FAIL_IO_TIMEOUT 209 int blk_should_fake_timeout(struct request_queue *); 210 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 211 ssize_t part_timeout_store(struct device *, struct device_attribute *, 212 const char *, size_t); 213 #else 214 static inline int blk_should_fake_timeout(struct request_queue *q) 215 { 216 return 0; 217 } 218 #endif 219 220 int ll_back_merge_fn(struct request_queue *q, struct request *req, 221 struct bio *bio); 222 int ll_front_merge_fn(struct request_queue *q, struct request *req, 223 struct bio *bio); 224 struct request *attempt_back_merge(struct request_queue *q, struct request *rq); 225 struct request *attempt_front_merge(struct request_queue *q, struct request *rq); 226 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 227 struct request *next); 228 void blk_recalc_rq_segments(struct request *rq); 229 void blk_rq_set_mixed_merge(struct request *rq); 230 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 231 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 232 233 void blk_queue_congestion_threshold(struct request_queue *q); 234 235 int blk_dev_init(void); 236 237 238 /* 239 * Return the threshold (number of used requests) at which the queue is 240 * considered to be congested. It include a little hysteresis to keep the 241 * context switch rate down. 242 */ 243 static inline int queue_congestion_on_threshold(struct request_queue *q) 244 { 245 return q->nr_congestion_on; 246 } 247 248 /* 249 * The threshold at which a queue is considered to be uncongested 250 */ 251 static inline int queue_congestion_off_threshold(struct request_queue *q) 252 { 253 return q->nr_congestion_off; 254 } 255 256 extern int blk_update_nr_requests(struct request_queue *, unsigned int); 257 258 /* 259 * Contribute to IO statistics IFF: 260 * 261 * a) it's attached to a gendisk, and 262 * b) the queue had IO stats enabled when this request was started, and 263 * c) it's a file system request 264 */ 265 static inline int blk_do_io_stat(struct request *rq) 266 { 267 return rq->rq_disk && 268 (rq->rq_flags & RQF_IO_STAT) && 269 !blk_rq_is_passthrough(rq); 270 } 271 272 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 273 { 274 req->cmd_flags |= REQ_NOMERGE; 275 if (req == q->last_merge) 276 q->last_merge = NULL; 277 } 278 279 /* 280 * Internal io_context interface 281 */ 282 void get_io_context(struct io_context *ioc); 283 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 284 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 285 gfp_t gfp_mask); 286 void ioc_clear_queue(struct request_queue *q); 287 288 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 289 290 /** 291 * rq_ioc - determine io_context for request allocation 292 * @bio: request being allocated is for this bio (can be %NULL) 293 * 294 * Determine io_context to use for request allocation for @bio. May return 295 * %NULL if %current->io_context doesn't exist. 296 */ 297 static inline struct io_context *rq_ioc(struct bio *bio) 298 { 299 #ifdef CONFIG_BLK_CGROUP 300 if (bio && bio->bi_ioc) 301 return bio->bi_ioc; 302 #endif 303 return current->io_context; 304 } 305 306 /** 307 * create_io_context - try to create task->io_context 308 * @gfp_mask: allocation mask 309 * @node: allocation node 310 * 311 * If %current->io_context is %NULL, allocate a new io_context and install 312 * it. Returns the current %current->io_context which may be %NULL if 313 * allocation failed. 314 * 315 * Note that this function can't be called with IRQ disabled because 316 * task_lock which protects %current->io_context is IRQ-unsafe. 317 */ 318 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 319 { 320 WARN_ON_ONCE(irqs_disabled()); 321 if (unlikely(!current->io_context)) 322 create_task_io_context(current, gfp_mask, node); 323 return current->io_context; 324 } 325 326 /* 327 * Internal throttling interface 328 */ 329 #ifdef CONFIG_BLK_DEV_THROTTLING 330 extern void blk_throtl_drain(struct request_queue *q); 331 extern int blk_throtl_init(struct request_queue *q); 332 extern void blk_throtl_exit(struct request_queue *q); 333 extern void blk_throtl_register_queue(struct request_queue *q); 334 #else /* CONFIG_BLK_DEV_THROTTLING */ 335 static inline void blk_throtl_drain(struct request_queue *q) { } 336 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 337 static inline void blk_throtl_exit(struct request_queue *q) { } 338 static inline void blk_throtl_register_queue(struct request_queue *q) { } 339 #endif /* CONFIG_BLK_DEV_THROTTLING */ 340 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 341 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 342 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 343 const char *page, size_t count); 344 extern void blk_throtl_bio_endio(struct bio *bio); 345 extern void blk_throtl_stat_add(struct request *rq, u64 time); 346 #else 347 static inline void blk_throtl_bio_endio(struct bio *bio) { } 348 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 349 #endif 350 351 #ifdef CONFIG_BOUNCE 352 extern int init_emergency_isa_pool(void); 353 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 354 #else 355 static inline int init_emergency_isa_pool(void) 356 { 357 return 0; 358 } 359 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 360 { 361 } 362 #endif /* CONFIG_BOUNCE */ 363 364 #endif /* BLK_INTERNAL_H */ 365