1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/idr.h> 6 #include <linux/blk-mq.h> 7 #include <linux/part_stat.h> 8 #include <linux/blk-crypto.h> 9 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ 10 #include <xen/xen.h> 11 #include "blk-crypto-internal.h" 12 #include "blk-mq.h" 13 #include "blk-mq-sched.h" 14 15 /* Max future timer expiry for timeouts */ 16 #define BLK_MAX_TIMEOUT (5 * HZ) 17 18 extern struct dentry *blk_debugfs_root; 19 20 struct blk_flush_queue { 21 unsigned int flush_pending_idx:1; 22 unsigned int flush_running_idx:1; 23 blk_status_t rq_status; 24 unsigned long flush_pending_since; 25 struct list_head flush_queue[2]; 26 struct list_head flush_data_in_flight; 27 struct request *flush_rq; 28 29 spinlock_t mq_flush_lock; 30 }; 31 32 extern struct kmem_cache *blk_requestq_cachep; 33 extern struct kobj_type blk_queue_ktype; 34 extern struct ida blk_queue_ida; 35 36 static inline struct blk_flush_queue * 37 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) 38 { 39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; 40 } 41 42 static inline void __blk_get_queue(struct request_queue *q) 43 { 44 kobject_get(&q->kobj); 45 } 46 47 bool is_flush_rq(struct request *req); 48 49 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, 50 gfp_t flags); 51 void blk_free_flush_queue(struct blk_flush_queue *q); 52 53 void blk_freeze_queue(struct request_queue *q); 54 55 #define BIO_INLINE_VECS 4 56 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, 57 gfp_t gfp_mask); 58 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); 59 60 static inline bool biovec_phys_mergeable(struct request_queue *q, 61 struct bio_vec *vec1, struct bio_vec *vec2) 62 { 63 unsigned long mask = queue_segment_boundary(q); 64 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; 65 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; 66 67 if (addr1 + vec1->bv_len != addr2) 68 return false; 69 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 70 return false; 71 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 72 return false; 73 return true; 74 } 75 76 static inline bool __bvec_gap_to_prev(struct request_queue *q, 77 struct bio_vec *bprv, unsigned int offset) 78 { 79 return (offset & queue_virt_boundary(q)) || 80 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 81 } 82 83 /* 84 * Check if adding a bio_vec after bprv with offset would create a gap in 85 * the SG list. Most drivers don't care about this, but some do. 86 */ 87 static inline bool bvec_gap_to_prev(struct request_queue *q, 88 struct bio_vec *bprv, unsigned int offset) 89 { 90 if (!queue_virt_boundary(q)) 91 return false; 92 return __bvec_gap_to_prev(q, bprv, offset); 93 } 94 95 #ifdef CONFIG_BLK_DEV_INTEGRITY 96 void blk_flush_integrity(void); 97 bool __bio_integrity_endio(struct bio *); 98 void bio_integrity_free(struct bio *bio); 99 static inline bool bio_integrity_endio(struct bio *bio) 100 { 101 if (bio_integrity(bio)) 102 return __bio_integrity_endio(bio); 103 return true; 104 } 105 106 bool blk_integrity_merge_rq(struct request_queue *, struct request *, 107 struct request *); 108 bool blk_integrity_merge_bio(struct request_queue *, struct request *, 109 struct bio *); 110 111 static inline bool integrity_req_gap_back_merge(struct request *req, 112 struct bio *next) 113 { 114 struct bio_integrity_payload *bip = bio_integrity(req->bio); 115 struct bio_integrity_payload *bip_next = bio_integrity(next); 116 117 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 118 bip_next->bip_vec[0].bv_offset); 119 } 120 121 static inline bool integrity_req_gap_front_merge(struct request *req, 122 struct bio *bio) 123 { 124 struct bio_integrity_payload *bip = bio_integrity(bio); 125 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 126 127 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 128 bip_next->bip_vec[0].bv_offset); 129 } 130 131 void blk_integrity_add(struct gendisk *); 132 void blk_integrity_del(struct gendisk *); 133 #else /* CONFIG_BLK_DEV_INTEGRITY */ 134 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 135 struct request *r1, struct request *r2) 136 { 137 return true; 138 } 139 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 140 struct request *r, struct bio *b) 141 { 142 return true; 143 } 144 static inline bool integrity_req_gap_back_merge(struct request *req, 145 struct bio *next) 146 { 147 return false; 148 } 149 static inline bool integrity_req_gap_front_merge(struct request *req, 150 struct bio *bio) 151 { 152 return false; 153 } 154 155 static inline void blk_flush_integrity(void) 156 { 157 } 158 static inline bool bio_integrity_endio(struct bio *bio) 159 { 160 return true; 161 } 162 static inline void bio_integrity_free(struct bio *bio) 163 { 164 } 165 static inline void blk_integrity_add(struct gendisk *disk) 166 { 167 } 168 static inline void blk_integrity_del(struct gendisk *disk) 169 { 170 } 171 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 172 173 unsigned long blk_rq_timeout(unsigned long timeout); 174 void blk_add_timer(struct request *req); 175 176 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 177 unsigned int nr_segs, struct request **same_queue_rq); 178 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, 179 struct bio *bio, unsigned int nr_segs); 180 181 void blk_account_io_start(struct request *req); 182 void blk_account_io_done(struct request *req, u64 now); 183 184 /* 185 * Internal elevator interface 186 */ 187 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 188 189 void blk_insert_flush(struct request *rq); 190 191 int elevator_switch_mq(struct request_queue *q, 192 struct elevator_type *new_e); 193 void __elevator_exit(struct request_queue *, struct elevator_queue *); 194 int elv_register_queue(struct request_queue *q, bool uevent); 195 void elv_unregister_queue(struct request_queue *q); 196 197 static inline void elevator_exit(struct request_queue *q, 198 struct elevator_queue *e) 199 { 200 lockdep_assert_held(&q->sysfs_lock); 201 202 blk_mq_sched_free_requests(q); 203 __elevator_exit(q, e); 204 } 205 206 ssize_t part_size_show(struct device *dev, struct device_attribute *attr, 207 char *buf); 208 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, 209 char *buf); 210 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 211 char *buf); 212 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, 213 char *buf); 214 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, 215 const char *buf, size_t count); 216 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 217 ssize_t part_timeout_store(struct device *, struct device_attribute *, 218 const char *, size_t); 219 220 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs); 221 int ll_back_merge_fn(struct request *req, struct bio *bio, 222 unsigned int nr_segs); 223 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, 224 struct request *next); 225 unsigned int blk_recalc_rq_segments(struct request *rq); 226 void blk_rq_set_mixed_merge(struct request *rq); 227 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 228 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 229 230 int blk_dev_init(void); 231 232 /* 233 * Contribute to IO statistics IFF: 234 * 235 * a) it's attached to a gendisk, and 236 * b) the queue had IO stats enabled when this request was started 237 */ 238 static inline bool blk_do_io_stat(struct request *rq) 239 { 240 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT); 241 } 242 243 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 244 { 245 req->cmd_flags |= REQ_NOMERGE; 246 if (req == q->last_merge) 247 q->last_merge = NULL; 248 } 249 250 /* 251 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size 252 * is defined as 'unsigned int', meantime it has to aligned to with logical 253 * block size which is the minimum accepted unit by hardware. 254 */ 255 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) 256 { 257 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; 258 } 259 260 /* 261 * The max bio size which is aligned to q->limits.discard_granularity. This 262 * is a hint to split large discard bio in generic block layer, then if device 263 * driver needs to split the discard bio into smaller ones, their bi_size can 264 * be very probably and easily aligned to discard_granularity of the device's 265 * queue. 266 */ 267 static inline unsigned int bio_aligned_discard_max_sectors( 268 struct request_queue *q) 269 { 270 return round_down(UINT_MAX, q->limits.discard_granularity) >> 271 SECTOR_SHIFT; 272 } 273 274 /* 275 * Internal io_context interface 276 */ 277 void get_io_context(struct io_context *ioc); 278 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 279 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 280 gfp_t gfp_mask); 281 void ioc_clear_queue(struct request_queue *q); 282 283 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 284 285 /* 286 * Internal throttling interface 287 */ 288 #ifdef CONFIG_BLK_DEV_THROTTLING 289 extern int blk_throtl_init(struct request_queue *q); 290 extern void blk_throtl_exit(struct request_queue *q); 291 extern void blk_throtl_register_queue(struct request_queue *q); 292 bool blk_throtl_bio(struct bio *bio); 293 #else /* CONFIG_BLK_DEV_THROTTLING */ 294 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 295 static inline void blk_throtl_exit(struct request_queue *q) { } 296 static inline void blk_throtl_register_queue(struct request_queue *q) { } 297 static inline bool blk_throtl_bio(struct bio *bio) { return false; } 298 #endif /* CONFIG_BLK_DEV_THROTTLING */ 299 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 300 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 301 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 302 const char *page, size_t count); 303 extern void blk_throtl_bio_endio(struct bio *bio); 304 extern void blk_throtl_stat_add(struct request *rq, u64 time); 305 #else 306 static inline void blk_throtl_bio_endio(struct bio *bio) { } 307 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 308 #endif 309 310 void __blk_queue_bounce(struct request_queue *q, struct bio **bio); 311 312 static inline bool blk_queue_may_bounce(struct request_queue *q) 313 { 314 return IS_ENABLED(CONFIG_BOUNCE) && 315 q->limits.bounce == BLK_BOUNCE_HIGH && 316 max_low_pfn >= max_pfn; 317 } 318 319 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 320 { 321 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio))) 322 __blk_queue_bounce(q, bio); 323 } 324 325 #ifdef CONFIG_BLK_CGROUP_IOLATENCY 326 extern int blk_iolatency_init(struct request_queue *q); 327 #else 328 static inline int blk_iolatency_init(struct request_queue *q) { return 0; } 329 #endif 330 331 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); 332 333 #ifdef CONFIG_BLK_DEV_ZONED 334 void blk_queue_free_zone_bitmaps(struct request_queue *q); 335 void blk_queue_clear_zone_settings(struct request_queue *q); 336 #else 337 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} 338 static inline void blk_queue_clear_zone_settings(struct request_queue *q) {} 339 #endif 340 341 int blk_alloc_ext_minor(void); 342 void blk_free_ext_minor(unsigned int minor); 343 char *disk_name(struct gendisk *hd, int partno, char *buf); 344 #define ADDPART_FLAG_NONE 0 345 #define ADDPART_FLAG_RAID 1 346 #define ADDPART_FLAG_WHOLEDISK 2 347 int bdev_add_partition(struct block_device *bdev, int partno, 348 sector_t start, sector_t length); 349 int bdev_del_partition(struct block_device *bdev, int partno); 350 int bdev_resize_partition(struct block_device *bdev, int partno, 351 sector_t start, sector_t length); 352 353 int bio_add_hw_page(struct request_queue *q, struct bio *bio, 354 struct page *page, unsigned int len, unsigned int offset, 355 unsigned int max_sectors, bool *same_page); 356 357 struct request_queue *blk_alloc_queue(int node_id); 358 359 void disk_alloc_events(struct gendisk *disk); 360 void disk_add_events(struct gendisk *disk); 361 void disk_del_events(struct gendisk *disk); 362 void disk_release_events(struct gendisk *disk); 363 extern struct device_attribute dev_attr_events; 364 extern struct device_attribute dev_attr_events_async; 365 extern struct device_attribute dev_attr_events_poll_msecs; 366 367 #endif /* BLK_INTERNAL_H */ 368