1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/idr.h> 6 #include <linux/blk-mq.h> 7 #include <xen/xen.h> 8 #include "blk-mq.h" 9 #include "blk-mq-sched.h" 10 11 /* Max future timer expiry for timeouts */ 12 #define BLK_MAX_TIMEOUT (5 * HZ) 13 14 #ifdef CONFIG_DEBUG_FS 15 extern struct dentry *blk_debugfs_root; 16 #endif 17 18 struct blk_flush_queue { 19 unsigned int flush_queue_delayed:1; 20 unsigned int flush_pending_idx:1; 21 unsigned int flush_running_idx:1; 22 blk_status_t rq_status; 23 unsigned long flush_pending_since; 24 struct list_head flush_queue[2]; 25 struct list_head flush_data_in_flight; 26 struct request *flush_rq; 27 28 /* 29 * flush_rq shares tag with this rq, both can't be active 30 * at the same time 31 */ 32 struct request *orig_rq; 33 spinlock_t mq_flush_lock; 34 }; 35 36 extern struct kmem_cache *blk_requestq_cachep; 37 extern struct kobj_type blk_queue_ktype; 38 extern struct ida blk_queue_ida; 39 40 static inline struct blk_flush_queue * 41 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) 42 { 43 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; 44 } 45 46 static inline void __blk_get_queue(struct request_queue *q) 47 { 48 kobject_get(&q->kobj); 49 } 50 51 static inline bool 52 is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) 53 { 54 return hctx->fq->flush_rq == req; 55 } 56 57 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 58 int node, int cmd_size, gfp_t flags); 59 void blk_free_flush_queue(struct blk_flush_queue *q); 60 61 void blk_freeze_queue(struct request_queue *q); 62 63 static inline void blk_queue_enter_live(struct request_queue *q) 64 { 65 /* 66 * Given that running in generic_make_request() context 67 * guarantees that a live reference against q_usage_counter has 68 * been established, further references under that same context 69 * need not check that the queue has been frozen (marked dead). 70 */ 71 percpu_ref_get(&q->q_usage_counter); 72 } 73 74 static inline bool biovec_phys_mergeable(struct request_queue *q, 75 struct bio_vec *vec1, struct bio_vec *vec2) 76 { 77 unsigned long mask = queue_segment_boundary(q); 78 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; 79 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; 80 81 if (addr1 + vec1->bv_len != addr2) 82 return false; 83 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 84 return false; 85 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 86 return false; 87 return true; 88 } 89 90 static inline bool __bvec_gap_to_prev(struct request_queue *q, 91 struct bio_vec *bprv, unsigned int offset) 92 { 93 return (offset & queue_virt_boundary(q)) || 94 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 95 } 96 97 /* 98 * Check if adding a bio_vec after bprv with offset would create a gap in 99 * the SG list. Most drivers don't care about this, but some do. 100 */ 101 static inline bool bvec_gap_to_prev(struct request_queue *q, 102 struct bio_vec *bprv, unsigned int offset) 103 { 104 if (!queue_virt_boundary(q)) 105 return false; 106 return __bvec_gap_to_prev(q, bprv, offset); 107 } 108 109 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, 110 unsigned int nr_segs) 111 { 112 rq->nr_phys_segments = nr_segs; 113 rq->__data_len = bio->bi_iter.bi_size; 114 rq->bio = rq->biotail = bio; 115 rq->ioprio = bio_prio(bio); 116 117 if (bio->bi_disk) 118 rq->rq_disk = bio->bi_disk; 119 } 120 121 #ifdef CONFIG_BLK_DEV_INTEGRITY 122 void blk_flush_integrity(void); 123 bool __bio_integrity_endio(struct bio *); 124 void bio_integrity_free(struct bio *bio); 125 static inline bool bio_integrity_endio(struct bio *bio) 126 { 127 if (bio_integrity(bio)) 128 return __bio_integrity_endio(bio); 129 return true; 130 } 131 132 static inline bool integrity_req_gap_back_merge(struct request *req, 133 struct bio *next) 134 { 135 struct bio_integrity_payload *bip = bio_integrity(req->bio); 136 struct bio_integrity_payload *bip_next = bio_integrity(next); 137 138 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 139 bip_next->bip_vec[0].bv_offset); 140 } 141 142 static inline bool integrity_req_gap_front_merge(struct request *req, 143 struct bio *bio) 144 { 145 struct bio_integrity_payload *bip = bio_integrity(bio); 146 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 147 148 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 149 bip_next->bip_vec[0].bv_offset); 150 } 151 #else /* CONFIG_BLK_DEV_INTEGRITY */ 152 static inline bool integrity_req_gap_back_merge(struct request *req, 153 struct bio *next) 154 { 155 return false; 156 } 157 static inline bool integrity_req_gap_front_merge(struct request *req, 158 struct bio *bio) 159 { 160 return false; 161 } 162 163 static inline void blk_flush_integrity(void) 164 { 165 } 166 static inline bool bio_integrity_endio(struct bio *bio) 167 { 168 return true; 169 } 170 static inline void bio_integrity_free(struct bio *bio) 171 { 172 } 173 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 174 175 unsigned long blk_rq_timeout(unsigned long timeout); 176 void blk_add_timer(struct request *req); 177 178 bool bio_attempt_front_merge(struct request *req, struct bio *bio, 179 unsigned int nr_segs); 180 bool bio_attempt_back_merge(struct request *req, struct bio *bio, 181 unsigned int nr_segs); 182 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 183 struct bio *bio); 184 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 185 unsigned int nr_segs, struct request **same_queue_rq); 186 187 void blk_account_io_start(struct request *req, bool new_io); 188 void blk_account_io_completion(struct request *req, unsigned int bytes); 189 void blk_account_io_done(struct request *req, u64 now); 190 191 /* 192 * Internal elevator interface 193 */ 194 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 195 196 void blk_insert_flush(struct request *rq); 197 198 void elevator_init_mq(struct request_queue *q); 199 int elevator_switch_mq(struct request_queue *q, 200 struct elevator_type *new_e); 201 void __elevator_exit(struct request_queue *, struct elevator_queue *); 202 int elv_register_queue(struct request_queue *q, bool uevent); 203 void elv_unregister_queue(struct request_queue *q); 204 205 static inline void elevator_exit(struct request_queue *q, 206 struct elevator_queue *e) 207 { 208 lockdep_assert_held(&q->sysfs_lock); 209 210 blk_mq_sched_free_requests(q); 211 __elevator_exit(q, e); 212 } 213 214 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); 215 216 #ifdef CONFIG_FAIL_IO_TIMEOUT 217 int blk_should_fake_timeout(struct request_queue *); 218 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 219 ssize_t part_timeout_store(struct device *, struct device_attribute *, 220 const char *, size_t); 221 #else 222 static inline int blk_should_fake_timeout(struct request_queue *q) 223 { 224 return 0; 225 } 226 #endif 227 228 void __blk_queue_split(struct request_queue *q, struct bio **bio, 229 unsigned int *nr_segs); 230 int ll_back_merge_fn(struct request *req, struct bio *bio, 231 unsigned int nr_segs); 232 int ll_front_merge_fn(struct request *req, struct bio *bio, 233 unsigned int nr_segs); 234 struct request *attempt_back_merge(struct request_queue *q, struct request *rq); 235 struct request *attempt_front_merge(struct request_queue *q, struct request *rq); 236 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 237 struct request *next); 238 unsigned int blk_recalc_rq_segments(struct request *rq); 239 void blk_rq_set_mixed_merge(struct request *rq); 240 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 241 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 242 243 int blk_dev_init(void); 244 245 /* 246 * Contribute to IO statistics IFF: 247 * 248 * a) it's attached to a gendisk, and 249 * b) the queue had IO stats enabled when this request was started 250 */ 251 static inline bool blk_do_io_stat(struct request *rq) 252 { 253 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT); 254 } 255 256 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 257 { 258 req->cmd_flags |= REQ_NOMERGE; 259 if (req == q->last_merge) 260 q->last_merge = NULL; 261 } 262 263 /* 264 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size 265 * is defined as 'unsigned int', meantime it has to aligned to with logical 266 * block size which is the minimum accepted unit by hardware. 267 */ 268 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) 269 { 270 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; 271 } 272 273 /* 274 * Internal io_context interface 275 */ 276 void get_io_context(struct io_context *ioc); 277 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 278 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 279 gfp_t gfp_mask); 280 void ioc_clear_queue(struct request_queue *q); 281 282 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 283 284 /** 285 * create_io_context - try to create task->io_context 286 * @gfp_mask: allocation mask 287 * @node: allocation node 288 * 289 * If %current->io_context is %NULL, allocate a new io_context and install 290 * it. Returns the current %current->io_context which may be %NULL if 291 * allocation failed. 292 * 293 * Note that this function can't be called with IRQ disabled because 294 * task_lock which protects %current->io_context is IRQ-unsafe. 295 */ 296 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 297 { 298 WARN_ON_ONCE(irqs_disabled()); 299 if (unlikely(!current->io_context)) 300 create_task_io_context(current, gfp_mask, node); 301 return current->io_context; 302 } 303 304 /* 305 * Internal throttling interface 306 */ 307 #ifdef CONFIG_BLK_DEV_THROTTLING 308 extern void blk_throtl_drain(struct request_queue *q); 309 extern int blk_throtl_init(struct request_queue *q); 310 extern void blk_throtl_exit(struct request_queue *q); 311 extern void blk_throtl_register_queue(struct request_queue *q); 312 #else /* CONFIG_BLK_DEV_THROTTLING */ 313 static inline void blk_throtl_drain(struct request_queue *q) { } 314 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 315 static inline void blk_throtl_exit(struct request_queue *q) { } 316 static inline void blk_throtl_register_queue(struct request_queue *q) { } 317 #endif /* CONFIG_BLK_DEV_THROTTLING */ 318 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 319 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 320 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 321 const char *page, size_t count); 322 extern void blk_throtl_bio_endio(struct bio *bio); 323 extern void blk_throtl_stat_add(struct request *rq, u64 time); 324 #else 325 static inline void blk_throtl_bio_endio(struct bio *bio) { } 326 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 327 #endif 328 329 #ifdef CONFIG_BOUNCE 330 extern int init_emergency_isa_pool(void); 331 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 332 #else 333 static inline int init_emergency_isa_pool(void) 334 { 335 return 0; 336 } 337 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 338 { 339 } 340 #endif /* CONFIG_BOUNCE */ 341 342 #ifdef CONFIG_BLK_CGROUP_IOLATENCY 343 extern int blk_iolatency_init(struct request_queue *q); 344 #else 345 static inline int blk_iolatency_init(struct request_queue *q) { return 0; } 346 #endif 347 348 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); 349 350 #ifdef CONFIG_BLK_DEV_ZONED 351 void blk_queue_free_zone_bitmaps(struct request_queue *q); 352 #else 353 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} 354 #endif 355 356 #endif /* BLK_INTERNAL_H */ 357