1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/idr.h> 6 #include <linux/blk-mq.h> 7 #include "blk-mq.h" 8 9 /* Amount of time in which a process may batch requests */ 10 #define BLK_BATCH_TIME (HZ/50UL) 11 12 /* Number of requests a "batching" process may submit */ 13 #define BLK_BATCH_REQ 32 14 15 /* Max future timer expiry for timeouts */ 16 #define BLK_MAX_TIMEOUT (5 * HZ) 17 18 #ifdef CONFIG_DEBUG_FS 19 extern struct dentry *blk_debugfs_root; 20 #endif 21 22 struct blk_flush_queue { 23 unsigned int flush_queue_delayed:1; 24 unsigned int flush_pending_idx:1; 25 unsigned int flush_running_idx:1; 26 unsigned long flush_pending_since; 27 struct list_head flush_queue[2]; 28 struct list_head flush_data_in_flight; 29 struct request *flush_rq; 30 31 /* 32 * flush_rq shares tag with this rq, both can't be active 33 * at the same time 34 */ 35 struct request *orig_rq; 36 spinlock_t mq_flush_lock; 37 }; 38 39 extern struct kmem_cache *blk_requestq_cachep; 40 extern struct kmem_cache *request_cachep; 41 extern struct kobj_type blk_queue_ktype; 42 extern struct ida blk_queue_ida; 43 44 /* 45 * @q->queue_lock is set while a queue is being initialized. Since we know 46 * that no other threads access the queue object before @q->queue_lock has 47 * been set, it is safe to manipulate queue flags without holding the 48 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and 49 * blk_init_allocated_queue(). 50 */ 51 static inline void queue_lockdep_assert_held(struct request_queue *q) 52 { 53 if (q->queue_lock) 54 lockdep_assert_held(q->queue_lock); 55 } 56 57 static inline void queue_flag_set_unlocked(unsigned int flag, 58 struct request_queue *q) 59 { 60 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && 61 kref_read(&q->kobj.kref)) 62 lockdep_assert_held(q->queue_lock); 63 __set_bit(flag, &q->queue_flags); 64 } 65 66 static inline void queue_flag_clear_unlocked(unsigned int flag, 67 struct request_queue *q) 68 { 69 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && 70 kref_read(&q->kobj.kref)) 71 lockdep_assert_held(q->queue_lock); 72 __clear_bit(flag, &q->queue_flags); 73 } 74 75 static inline int queue_flag_test_and_clear(unsigned int flag, 76 struct request_queue *q) 77 { 78 queue_lockdep_assert_held(q); 79 80 if (test_bit(flag, &q->queue_flags)) { 81 __clear_bit(flag, &q->queue_flags); 82 return 1; 83 } 84 85 return 0; 86 } 87 88 static inline int queue_flag_test_and_set(unsigned int flag, 89 struct request_queue *q) 90 { 91 queue_lockdep_assert_held(q); 92 93 if (!test_bit(flag, &q->queue_flags)) { 94 __set_bit(flag, &q->queue_flags); 95 return 0; 96 } 97 98 return 1; 99 } 100 101 static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 102 { 103 queue_lockdep_assert_held(q); 104 __set_bit(flag, &q->queue_flags); 105 } 106 107 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 108 { 109 queue_lockdep_assert_held(q); 110 __clear_bit(flag, &q->queue_flags); 111 } 112 113 static inline struct blk_flush_queue *blk_get_flush_queue( 114 struct request_queue *q, struct blk_mq_ctx *ctx) 115 { 116 if (q->mq_ops) 117 return blk_mq_map_queue(q, ctx->cpu)->fq; 118 return q->fq; 119 } 120 121 static inline void __blk_get_queue(struct request_queue *q) 122 { 123 kobject_get(&q->kobj); 124 } 125 126 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 127 int node, int cmd_size); 128 void blk_free_flush_queue(struct blk_flush_queue *q); 129 130 int blk_init_rl(struct request_list *rl, struct request_queue *q, 131 gfp_t gfp_mask); 132 void blk_exit_rl(struct request_queue *q, struct request_list *rl); 133 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 134 struct bio *bio); 135 void blk_queue_bypass_start(struct request_queue *q); 136 void blk_queue_bypass_end(struct request_queue *q); 137 void __blk_queue_free_tags(struct request_queue *q); 138 void blk_freeze_queue(struct request_queue *q); 139 140 static inline void blk_queue_enter_live(struct request_queue *q) 141 { 142 /* 143 * Given that running in generic_make_request() context 144 * guarantees that a live reference against q_usage_counter has 145 * been established, further references under that same context 146 * need not check that the queue has been frozen (marked dead). 147 */ 148 percpu_ref_get(&q->q_usage_counter); 149 } 150 151 #ifdef CONFIG_BLK_DEV_INTEGRITY 152 void blk_flush_integrity(void); 153 bool __bio_integrity_endio(struct bio *); 154 static inline bool bio_integrity_endio(struct bio *bio) 155 { 156 if (bio_integrity(bio)) 157 return __bio_integrity_endio(bio); 158 return true; 159 } 160 #else 161 static inline void blk_flush_integrity(void) 162 { 163 } 164 static inline bool bio_integrity_endio(struct bio *bio) 165 { 166 return true; 167 } 168 #endif 169 170 void blk_timeout_work(struct work_struct *work); 171 unsigned long blk_rq_timeout(unsigned long timeout); 172 void blk_add_timer(struct request *req); 173 void blk_delete_timer(struct request *); 174 175 176 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 177 struct bio *bio); 178 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 179 struct bio *bio); 180 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 181 struct bio *bio); 182 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 183 unsigned int *request_count, 184 struct request **same_queue_rq); 185 unsigned int blk_plug_queued_count(struct request_queue *q); 186 187 void blk_account_io_start(struct request *req, bool new_io); 188 void blk_account_io_completion(struct request *req, unsigned int bytes); 189 void blk_account_io_done(struct request *req, u64 now); 190 191 /* 192 * EH timer and IO completion will both attempt to 'grab' the request, make 193 * sure that only one of them succeeds. Steal the bottom bit of the 194 * __deadline field for this. 195 */ 196 static inline int blk_mark_rq_complete(struct request *rq) 197 { 198 return test_and_set_bit(0, &rq->__deadline); 199 } 200 201 static inline void blk_clear_rq_complete(struct request *rq) 202 { 203 clear_bit(0, &rq->__deadline); 204 } 205 206 static inline bool blk_rq_is_complete(struct request *rq) 207 { 208 return test_bit(0, &rq->__deadline); 209 } 210 211 /* 212 * Internal elevator interface 213 */ 214 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 215 216 void blk_insert_flush(struct request *rq); 217 218 static inline void elv_activate_rq(struct request_queue *q, struct request *rq) 219 { 220 struct elevator_queue *e = q->elevator; 221 222 if (e->type->ops.sq.elevator_activate_req_fn) 223 e->type->ops.sq.elevator_activate_req_fn(q, rq); 224 } 225 226 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) 227 { 228 struct elevator_queue *e = q->elevator; 229 230 if (e->type->ops.sq.elevator_deactivate_req_fn) 231 e->type->ops.sq.elevator_deactivate_req_fn(q, rq); 232 } 233 234 int elevator_init(struct request_queue *); 235 int elevator_init_mq(struct request_queue *q); 236 void elevator_exit(struct request_queue *, struct elevator_queue *); 237 int elv_register_queue(struct request_queue *q); 238 void elv_unregister_queue(struct request_queue *q); 239 240 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); 241 242 #ifdef CONFIG_FAIL_IO_TIMEOUT 243 int blk_should_fake_timeout(struct request_queue *); 244 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 245 ssize_t part_timeout_store(struct device *, struct device_attribute *, 246 const char *, size_t); 247 #else 248 static inline int blk_should_fake_timeout(struct request_queue *q) 249 { 250 return 0; 251 } 252 #endif 253 254 int ll_back_merge_fn(struct request_queue *q, struct request *req, 255 struct bio *bio); 256 int ll_front_merge_fn(struct request_queue *q, struct request *req, 257 struct bio *bio); 258 struct request *attempt_back_merge(struct request_queue *q, struct request *rq); 259 struct request *attempt_front_merge(struct request_queue *q, struct request *rq); 260 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 261 struct request *next); 262 void blk_recalc_rq_segments(struct request *rq); 263 void blk_rq_set_mixed_merge(struct request *rq); 264 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 265 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 266 267 void blk_queue_congestion_threshold(struct request_queue *q); 268 269 int blk_dev_init(void); 270 271 272 /* 273 * Return the threshold (number of used requests) at which the queue is 274 * considered to be congested. It include a little hysteresis to keep the 275 * context switch rate down. 276 */ 277 static inline int queue_congestion_on_threshold(struct request_queue *q) 278 { 279 return q->nr_congestion_on; 280 } 281 282 /* 283 * The threshold at which a queue is considered to be uncongested 284 */ 285 static inline int queue_congestion_off_threshold(struct request_queue *q) 286 { 287 return q->nr_congestion_off; 288 } 289 290 extern int blk_update_nr_requests(struct request_queue *, unsigned int); 291 292 /* 293 * Contribute to IO statistics IFF: 294 * 295 * a) it's attached to a gendisk, and 296 * b) the queue had IO stats enabled when this request was started, and 297 * c) it's a file system request 298 */ 299 static inline int blk_do_io_stat(struct request *rq) 300 { 301 return rq->rq_disk && 302 (rq->rq_flags & RQF_IO_STAT) && 303 !blk_rq_is_passthrough(rq); 304 } 305 306 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 307 { 308 req->cmd_flags |= REQ_NOMERGE; 309 if (req == q->last_merge) 310 q->last_merge = NULL; 311 } 312 313 /* 314 * Steal a bit from this field for legacy IO path atomic IO marking. Note that 315 * setting the deadline clears the bottom bit, potentially clearing the 316 * completed bit. The user has to be OK with this (current ones are fine). 317 */ 318 static inline void blk_rq_set_deadline(struct request *rq, unsigned long time) 319 { 320 rq->__deadline = time & ~0x1UL; 321 } 322 323 static inline unsigned long blk_rq_deadline(struct request *rq) 324 { 325 return rq->__deadline & ~0x1UL; 326 } 327 328 /* 329 * Internal io_context interface 330 */ 331 void get_io_context(struct io_context *ioc); 332 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 333 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 334 gfp_t gfp_mask); 335 void ioc_clear_queue(struct request_queue *q); 336 337 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 338 339 /** 340 * rq_ioc - determine io_context for request allocation 341 * @bio: request being allocated is for this bio (can be %NULL) 342 * 343 * Determine io_context to use for request allocation for @bio. May return 344 * %NULL if %current->io_context doesn't exist. 345 */ 346 static inline struct io_context *rq_ioc(struct bio *bio) 347 { 348 #ifdef CONFIG_BLK_CGROUP 349 if (bio && bio->bi_ioc) 350 return bio->bi_ioc; 351 #endif 352 return current->io_context; 353 } 354 355 /** 356 * create_io_context - try to create task->io_context 357 * @gfp_mask: allocation mask 358 * @node: allocation node 359 * 360 * If %current->io_context is %NULL, allocate a new io_context and install 361 * it. Returns the current %current->io_context which may be %NULL if 362 * allocation failed. 363 * 364 * Note that this function can't be called with IRQ disabled because 365 * task_lock which protects %current->io_context is IRQ-unsafe. 366 */ 367 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 368 { 369 WARN_ON_ONCE(irqs_disabled()); 370 if (unlikely(!current->io_context)) 371 create_task_io_context(current, gfp_mask, node); 372 return current->io_context; 373 } 374 375 /* 376 * Internal throttling interface 377 */ 378 #ifdef CONFIG_BLK_DEV_THROTTLING 379 extern void blk_throtl_drain(struct request_queue *q); 380 extern int blk_throtl_init(struct request_queue *q); 381 extern void blk_throtl_exit(struct request_queue *q); 382 extern void blk_throtl_register_queue(struct request_queue *q); 383 #else /* CONFIG_BLK_DEV_THROTTLING */ 384 static inline void blk_throtl_drain(struct request_queue *q) { } 385 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 386 static inline void blk_throtl_exit(struct request_queue *q) { } 387 static inline void blk_throtl_register_queue(struct request_queue *q) { } 388 #endif /* CONFIG_BLK_DEV_THROTTLING */ 389 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 390 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 391 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 392 const char *page, size_t count); 393 extern void blk_throtl_bio_endio(struct bio *bio); 394 extern void blk_throtl_stat_add(struct request *rq, u64 time); 395 #else 396 static inline void blk_throtl_bio_endio(struct bio *bio) { } 397 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 398 #endif 399 400 #ifdef CONFIG_BOUNCE 401 extern int init_emergency_isa_pool(void); 402 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 403 #else 404 static inline int init_emergency_isa_pool(void) 405 { 406 return 0; 407 } 408 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 409 { 410 } 411 #endif /* CONFIG_BOUNCE */ 412 413 extern void blk_drain_queue(struct request_queue *q); 414 415 #endif /* BLK_INTERNAL_H */ 416