1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/bio-integrity.h> 6 #include <linux/blk-crypto.h> 7 #include <linux/lockdep.h> 8 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ 9 #include <linux/sched/sysctl.h> 10 #include <linux/timekeeping.h> 11 #include <xen/xen.h> 12 #include "blk-crypto-internal.h" 13 14 struct elv_change_ctx; 15 16 /* 17 * Default upper limit for the software max_sectors limit used for regular I/Os. 18 * This can be increased through sysfs. 19 * 20 * This should not be confused with the max_hw_sector limit that is entirely 21 * controlled by the block device driver, usually based on hardware limits. 22 */ 23 #define BLK_DEF_MAX_SECTORS_CAP (SZ_4M >> SECTOR_SHIFT) 24 25 #define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9) 26 #define BLK_MIN_SEGMENT_SIZE 4096 27 28 /* Max future timer expiry for timeouts */ 29 #define BLK_MAX_TIMEOUT (5 * HZ) 30 31 extern const struct kobj_type blk_queue_ktype; 32 extern struct dentry *blk_debugfs_root; 33 34 struct blk_flush_queue { 35 spinlock_t mq_flush_lock; 36 unsigned int flush_pending_idx:1; 37 unsigned int flush_running_idx:1; 38 blk_status_t rq_status; 39 unsigned long flush_pending_since; 40 struct list_head flush_queue[2]; 41 unsigned long flush_data_in_flight; 42 struct request *flush_rq; 43 struct rcu_head rcu_head; 44 }; 45 46 bool is_flush_rq(struct request *req); 47 48 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, 49 gfp_t flags); 50 void blk_free_flush_queue(struct blk_flush_queue *q); 51 52 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); 53 bool blk_queue_start_drain(struct request_queue *q); 54 bool __blk_freeze_queue_start(struct request_queue *q, 55 struct task_struct *owner); 56 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 57 void submit_bio_noacct_nocheck(struct bio *bio, bool split); 58 int bio_submit_or_kill(struct bio *bio, unsigned int flags); 59 60 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) 61 { 62 rcu_read_lock(); 63 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) 64 goto fail; 65 66 /* 67 * The code that increments the pm_only counter must ensure that the 68 * counter is globally visible before the queue is unfrozen. 69 */ 70 if (blk_queue_pm_only(q) && 71 (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) 72 goto fail_put; 73 74 rcu_read_unlock(); 75 return true; 76 77 fail_put: 78 blk_queue_exit(q); 79 fail: 80 rcu_read_unlock(); 81 return false; 82 } 83 84 static inline int bio_queue_enter(struct bio *bio) 85 { 86 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 87 88 if (blk_try_enter_queue(q, false)) { 89 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); 90 rwsem_release(&q->io_lockdep_map, _RET_IP_); 91 return 0; 92 } 93 return __bio_queue_enter(q, bio); 94 } 95 96 static inline void blk_wait_io(struct completion *done) 97 { 98 /* Prevent hang_check timer from firing at us during very long I/O */ 99 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; 100 101 if (timeout) 102 while (!wait_for_completion_io_timeout(done, timeout)) 103 ; 104 else 105 wait_for_completion_io(done); 106 } 107 108 struct block_device *blkdev_get_no_open(dev_t dev, bool autoload); 109 void blkdev_put_no_open(struct block_device *bdev); 110 111 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, 112 struct page *page, unsigned len, unsigned offset); 113 114 static inline bool biovec_phys_mergeable(struct request_queue *q, 115 struct bio_vec *vec1, struct bio_vec *vec2) 116 { 117 unsigned long mask = queue_segment_boundary(q); 118 phys_addr_t addr1 = bvec_phys(vec1); 119 phys_addr_t addr2 = bvec_phys(vec2); 120 121 /* 122 * Merging adjacent physical pages may not work correctly under KMSAN 123 * if their metadata pages aren't adjacent. Just disable merging. 124 */ 125 if (IS_ENABLED(CONFIG_KMSAN)) 126 return false; 127 128 if (addr1 + vec1->bv_len != addr2) 129 return false; 130 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 131 return false; 132 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 133 return false; 134 return true; 135 } 136 137 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim, 138 struct bio_vec *bprv, unsigned int offset) 139 { 140 return (offset & lim->virt_boundary_mask) || 141 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); 142 } 143 144 /* 145 * Check if adding a bio_vec after bprv with offset would create a gap in 146 * the SG list. Most drivers don't care about this, but some do. 147 */ 148 static inline bool bvec_gap_to_prev(const struct queue_limits *lim, 149 struct bio_vec *bprv, unsigned int offset) 150 { 151 if (!lim->virt_boundary_mask) 152 return false; 153 return __bvec_gap_to_prev(lim, bprv, offset); 154 } 155 156 static inline bool rq_mergeable(struct request *rq) 157 { 158 if (blk_rq_is_passthrough(rq)) 159 return false; 160 161 if (req_op(rq) == REQ_OP_FLUSH) 162 return false; 163 164 if (req_op(rq) == REQ_OP_WRITE_ZEROES) 165 return false; 166 167 if (req_op(rq) == REQ_OP_ZONE_APPEND) 168 return false; 169 170 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 171 return false; 172 if (rq->rq_flags & RQF_NOMERGE_FLAGS) 173 return false; 174 175 return true; 176 } 177 178 /* 179 * There are two different ways to handle DISCARD merges: 180 * 1) If max_discard_segments > 1, the driver treats every bio as a range and 181 * send the bios to controller together. The ranges don't need to be 182 * contiguous. 183 * 2) Otherwise, the request will be normal read/write requests. The ranges 184 * need to be contiguous. 185 */ 186 static inline bool blk_discard_mergable(struct request *req) 187 { 188 if (req_op(req) == REQ_OP_DISCARD && 189 queue_max_discard_segments(req->q) > 1) 190 return true; 191 return false; 192 } 193 194 static inline unsigned int blk_rq_get_max_segments(struct request *rq) 195 { 196 if (req_op(rq) == REQ_OP_DISCARD) 197 return queue_max_discard_segments(rq->q); 198 return queue_max_segments(rq->q); 199 } 200 201 static inline unsigned int blk_queue_get_max_sectors(struct request *rq) 202 { 203 struct request_queue *q = rq->q; 204 enum req_op op = req_op(rq); 205 206 if (unlikely(op == REQ_OP_DISCARD)) 207 return min(q->limits.max_discard_sectors, 208 UINT_MAX >> SECTOR_SHIFT); 209 210 if (unlikely(op == REQ_OP_SECURE_ERASE)) 211 return min(q->limits.max_secure_erase_sectors, 212 UINT_MAX >> SECTOR_SHIFT); 213 214 if (unlikely(op == REQ_OP_WRITE_ZEROES)) 215 return q->limits.max_write_zeroes_sectors; 216 217 if (rq->cmd_flags & REQ_ATOMIC) 218 return q->limits.atomic_write_max_sectors; 219 220 return q->limits.max_sectors; 221 } 222 223 #ifdef CONFIG_BLK_DEV_INTEGRITY 224 void blk_flush_integrity(void); 225 void bio_integrity_free(struct bio *bio); 226 227 /* 228 * Integrity payloads can either be owned by the submitter, in which case 229 * bio_uninit will free them, or owned and generated by the block layer, 230 * in which case we'll verify them here (for reads) and free them before 231 * the bio is handed back to the submitted. 232 */ 233 bool __bio_integrity_endio(struct bio *bio); 234 static inline bool bio_integrity_endio(struct bio *bio) 235 { 236 struct bio_integrity_payload *bip = bio_integrity(bio); 237 238 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY)) 239 return __bio_integrity_endio(bio); 240 return true; 241 } 242 243 bool blk_integrity_merge_rq(struct request_queue *, struct request *, 244 struct request *); 245 bool blk_integrity_merge_bio(struct request_queue *, struct request *, 246 struct bio *); 247 248 static inline bool integrity_req_gap_back_merge(struct request *req, 249 struct bio *next) 250 { 251 struct bio_integrity_payload *bip = bio_integrity(req->bio); 252 struct bio_integrity_payload *bip_next = bio_integrity(next); 253 254 return bvec_gap_to_prev(&req->q->limits, 255 &bip->bip_vec[bip->bip_vcnt - 1], 256 bip_next->bip_vec[0].bv_offset); 257 } 258 259 static inline bool integrity_req_gap_front_merge(struct request *req, 260 struct bio *bio) 261 { 262 struct bio_integrity_payload *bip = bio_integrity(bio); 263 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 264 265 return bvec_gap_to_prev(&req->q->limits, 266 &bip->bip_vec[bip->bip_vcnt - 1], 267 bip_next->bip_vec[0].bv_offset); 268 } 269 270 extern const struct attribute_group blk_integrity_attr_group; 271 #else /* CONFIG_BLK_DEV_INTEGRITY */ 272 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 273 struct request *r1, struct request *r2) 274 { 275 return true; 276 } 277 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 278 struct request *r, struct bio *b) 279 { 280 return true; 281 } 282 static inline bool integrity_req_gap_back_merge(struct request *req, 283 struct bio *next) 284 { 285 return false; 286 } 287 static inline bool integrity_req_gap_front_merge(struct request *req, 288 struct bio *bio) 289 { 290 return false; 291 } 292 293 static inline void blk_flush_integrity(void) 294 { 295 } 296 static inline bool bio_integrity_endio(struct bio *bio) 297 { 298 return true; 299 } 300 static inline void bio_integrity_free(struct bio *bio) 301 { 302 } 303 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 304 305 unsigned long blk_rq_timeout(unsigned long timeout); 306 void blk_add_timer(struct request *req); 307 308 enum bio_merge_status { 309 BIO_MERGE_OK, 310 BIO_MERGE_NONE, 311 BIO_MERGE_FAILED, 312 }; 313 314 enum bio_merge_status bio_attempt_back_merge(struct request *req, 315 struct bio *bio, unsigned int nr_segs); 316 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 317 unsigned int nr_segs); 318 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, 319 struct bio *bio, unsigned int nr_segs); 320 321 /* 322 * Plug flush limits 323 */ 324 #define BLK_MAX_REQUEST_COUNT 32 325 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 326 327 /* 328 * Internal elevator interface 329 */ 330 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 331 332 bool blk_insert_flush(struct request *rq); 333 334 void elv_update_nr_hw_queues(struct request_queue *q, 335 struct elv_change_ctx *ctx); 336 void elevator_set_default(struct request_queue *q); 337 void elevator_set_none(struct request_queue *q); 338 339 ssize_t part_size_show(struct device *dev, struct device_attribute *attr, 340 char *buf); 341 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, 342 char *buf); 343 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 344 char *buf); 345 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, 346 char *buf); 347 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, 348 const char *buf, size_t count); 349 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 350 ssize_t part_timeout_store(struct device *, struct device_attribute *, 351 const char *, size_t); 352 353 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, 354 unsigned *nsegs); 355 struct bio *bio_split_write_zeroes(struct bio *bio, 356 const struct queue_limits *lim, unsigned *nsegs); 357 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, 358 unsigned *nr_segs); 359 struct bio *bio_split_zone_append(struct bio *bio, 360 const struct queue_limits *lim, unsigned *nr_segs); 361 362 /* 363 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE. 364 * 365 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is 366 * always valid if a bio has data. The check might lead to occasional false 367 * positives when bios are cloned, but compared to the performance impact of 368 * cloned bios themselves the loop below doesn't matter anyway. 369 */ 370 static inline bool bio_may_need_split(struct bio *bio, 371 const struct queue_limits *lim) 372 { 373 const struct bio_vec *bv; 374 375 if (lim->chunk_sectors) 376 return true; 377 378 if (!bio->bi_io_vec) 379 return true; 380 381 bv = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 382 if (bio->bi_iter.bi_size > bv->bv_len - bio->bi_iter.bi_bvec_done) 383 return true; 384 return bv->bv_len + bv->bv_offset > lim->max_fast_segment_size; 385 } 386 387 /** 388 * __bio_split_to_limits - split a bio to fit the queue limits 389 * @bio: bio to be split 390 * @lim: queue limits to split based on 391 * @nr_segs: returns the number of segments in the returned bio 392 * 393 * Check if @bio needs splitting based on the queue limits, and if so split off 394 * a bio fitting the limits from the beginning of @bio and return it. @bio is 395 * shortened to the remainder and re-submitted. 396 * 397 * The split bio is allocated from @q->bio_split, which is provided by the 398 * block layer. 399 */ 400 static inline struct bio *__bio_split_to_limits(struct bio *bio, 401 const struct queue_limits *lim, unsigned int *nr_segs) 402 { 403 switch (bio_op(bio)) { 404 case REQ_OP_READ: 405 case REQ_OP_WRITE: 406 if (bio_may_need_split(bio, lim)) 407 return bio_split_rw(bio, lim, nr_segs); 408 *nr_segs = 1; 409 return bio; 410 case REQ_OP_ZONE_APPEND: 411 return bio_split_zone_append(bio, lim, nr_segs); 412 case REQ_OP_DISCARD: 413 case REQ_OP_SECURE_ERASE: 414 return bio_split_discard(bio, lim, nr_segs); 415 case REQ_OP_WRITE_ZEROES: 416 return bio_split_write_zeroes(bio, lim, nr_segs); 417 default: 418 /* other operations can't be split */ 419 *nr_segs = 0; 420 return bio; 421 } 422 } 423 424 /** 425 * get_max_segment_size() - maximum number of bytes to add as a single segment 426 * @lim: Request queue limits. 427 * @paddr: address of the range to add 428 * @len: maximum length available to add at @paddr 429 * 430 * Returns the maximum number of bytes of the range starting at @paddr that can 431 * be added to a single segment. 432 */ 433 static inline unsigned get_max_segment_size(const struct queue_limits *lim, 434 phys_addr_t paddr, unsigned int len) 435 { 436 /* 437 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 438 * after having calculated the minimum. 439 */ 440 return min_t(unsigned long, len, 441 min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr), 442 (unsigned long)lim->max_segment_size - 1) + 1); 443 } 444 445 int ll_back_merge_fn(struct request *req, struct bio *bio, 446 unsigned int nr_segs); 447 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, 448 struct request *next); 449 unsigned int blk_recalc_rq_segments(struct request *rq); 450 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 451 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 452 453 int blk_set_default_limits(struct queue_limits *lim); 454 void blk_apply_bdi_limits(struct backing_dev_info *bdi, 455 struct queue_limits *lim); 456 int blk_dev_init(void); 457 458 void update_io_ticks(struct block_device *part, unsigned long now, bool end); 459 460 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 461 { 462 req->cmd_flags |= REQ_NOMERGE; 463 if (req == q->last_merge) 464 q->last_merge = NULL; 465 } 466 467 /* 468 * Internal io_context interface 469 */ 470 struct io_cq *ioc_find_get_icq(struct request_queue *q); 471 struct io_cq *ioc_lookup_icq(struct request_queue *q); 472 #ifdef CONFIG_BLK_ICQ 473 void ioc_clear_queue(struct request_queue *q); 474 #else 475 static inline void ioc_clear_queue(struct request_queue *q) 476 { 477 } 478 #endif /* CONFIG_BLK_ICQ */ 479 480 #ifdef CONFIG_BLK_DEV_ZONED 481 void disk_init_zone_resources(struct gendisk *disk); 482 void disk_free_zone_resources(struct gendisk *disk); 483 static inline bool bio_zone_write_plugging(struct bio *bio) 484 { 485 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING); 486 } 487 static inline bool blk_req_bio_is_zone_append(struct request *rq, 488 struct bio *bio) 489 { 490 return req_op(rq) == REQ_OP_ZONE_APPEND || 491 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); 492 } 493 void blk_zone_write_plug_bio_merged(struct bio *bio); 494 void blk_zone_write_plug_init_request(struct request *rq); 495 void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio); 496 void blk_zone_mgmt_bio_endio(struct bio *bio); 497 void blk_zone_write_plug_bio_endio(struct bio *bio); 498 static inline void blk_zone_bio_endio(struct bio *bio) 499 { 500 /* 501 * Zone management BIOs may impact zone write plugs (e.g. a zone reset 502 * changes a zone write plug zone write pointer offset), but these 503 * operation do not go through zone write plugging as they may operate 504 * on zones that do not have a zone write 505 * plug. blk_zone_mgmt_bio_endio() handles the potential changes to zone 506 * write plugs that are present. 507 */ 508 if (op_is_zone_mgmt(bio_op(bio))) { 509 blk_zone_mgmt_bio_endio(bio); 510 return; 511 } 512 513 /* 514 * For write BIOs to zoned devices, signal the completion of the BIO so 515 * that the next write BIO can be submitted by zone write plugging. 516 */ 517 if (bio_zone_write_plugging(bio)) 518 blk_zone_write_plug_bio_endio(bio); 519 } 520 521 void blk_zone_write_plug_finish_request(struct request *rq); 522 static inline void blk_zone_finish_request(struct request *rq) 523 { 524 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING) 525 blk_zone_write_plug_finish_request(rq); 526 } 527 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, 528 unsigned long arg); 529 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, 530 unsigned int cmd, unsigned long arg); 531 #else /* CONFIG_BLK_DEV_ZONED */ 532 static inline void disk_init_zone_resources(struct gendisk *disk) 533 { 534 } 535 static inline void disk_free_zone_resources(struct gendisk *disk) 536 { 537 } 538 static inline bool bio_zone_write_plugging(struct bio *bio) 539 { 540 return false; 541 } 542 static inline bool blk_req_bio_is_zone_append(struct request *req, 543 struct bio *bio) 544 { 545 return false; 546 } 547 static inline void blk_zone_write_plug_bio_merged(struct bio *bio) 548 { 549 } 550 static inline void blk_zone_write_plug_init_request(struct request *rq) 551 { 552 } 553 static inline void blk_zone_append_update_request_bio(struct request *rq, 554 struct bio *bio) 555 { 556 } 557 static inline void blk_zone_bio_endio(struct bio *bio) 558 { 559 } 560 static inline void blk_zone_finish_request(struct request *rq) 561 { 562 } 563 static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 564 unsigned int cmd, unsigned long arg) 565 { 566 return -ENOTTY; 567 } 568 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, 569 blk_mode_t mode, unsigned int cmd, unsigned long arg) 570 { 571 return -ENOTTY; 572 } 573 #endif /* CONFIG_BLK_DEV_ZONED */ 574 575 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); 576 void bdev_add(struct block_device *bdev, dev_t dev); 577 void bdev_unhash(struct block_device *bdev); 578 void bdev_drop(struct block_device *bdev); 579 580 int blk_alloc_ext_minor(void); 581 void blk_free_ext_minor(unsigned int minor); 582 #define ADDPART_FLAG_NONE 0 583 #define ADDPART_FLAG_RAID 1 584 #define ADDPART_FLAG_WHOLEDISK 2 585 #define ADDPART_FLAG_READONLY 4 586 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, 587 sector_t length); 588 int bdev_del_partition(struct gendisk *disk, int partno); 589 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, 590 sector_t length); 591 void drop_partition(struct block_device *part); 592 593 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors); 594 595 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, 596 struct lock_class_key *lkclass); 597 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id); 598 599 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode); 600 601 int disk_alloc_events(struct gendisk *disk); 602 void disk_add_events(struct gendisk *disk); 603 void disk_del_events(struct gendisk *disk); 604 void disk_release_events(struct gendisk *disk); 605 void disk_block_events(struct gendisk *disk); 606 void disk_unblock_events(struct gendisk *disk); 607 void disk_flush_events(struct gendisk *disk, unsigned int mask); 608 extern struct device_attribute dev_attr_events; 609 extern struct device_attribute dev_attr_events_async; 610 extern struct device_attribute dev_attr_events_poll_msecs; 611 612 extern struct attribute_group blk_trace_attr_group; 613 614 blk_mode_t file_to_blk_mode(struct file *file); 615 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, 616 loff_t lstart, loff_t lend); 617 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); 618 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags); 619 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); 620 621 extern const struct address_space_operations def_blk_aops; 622 623 int disk_register_independent_access_ranges(struct gendisk *disk); 624 void disk_unregister_independent_access_ranges(struct gendisk *disk); 625 626 int should_fail_bio(struct bio *bio); 627 #ifdef CONFIG_FAIL_MAKE_REQUEST 628 bool should_fail_request(struct block_device *part, unsigned int bytes); 629 #else /* CONFIG_FAIL_MAKE_REQUEST */ 630 static inline bool should_fail_request(struct block_device *part, 631 unsigned int bytes) 632 { 633 return false; 634 } 635 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 636 637 /* 638 * Optimized request reference counting. Ideally we'd make timeouts be more 639 * clever, as that's the only reason we need references at all... But until 640 * this happens, this is faster than using refcount_t. Also see: 641 * 642 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count") 643 */ 644 #define req_ref_zero_or_close_to_overflow(req) \ 645 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u) 646 647 static inline bool req_ref_inc_not_zero(struct request *req) 648 { 649 return atomic_inc_not_zero(&req->ref); 650 } 651 652 static inline bool req_ref_put_and_test(struct request *req) 653 { 654 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); 655 return atomic_dec_and_test(&req->ref); 656 } 657 658 static inline void req_ref_set(struct request *req, int value) 659 { 660 atomic_set(&req->ref, value); 661 } 662 663 static inline int req_ref_read(struct request *req) 664 { 665 return atomic_read(&req->ref); 666 } 667 668 static inline u64 blk_time_get_ns(void) 669 { 670 struct blk_plug *plug = current->plug; 671 672 if (!plug || !in_task()) 673 return ktime_get_ns(); 674 675 /* 676 * 0 could very well be a valid time, but rather than flag "this is 677 * a valid timestamp" separately, just accept that we'll do an extra 678 * ktime_get_ns() if we just happen to get 0 as the current time. 679 */ 680 if (!plug->cur_ktime) { 681 plug->cur_ktime = ktime_get_ns(); 682 current->flags |= PF_BLOCK_TS; 683 } 684 return plug->cur_ktime; 685 } 686 687 static inline ktime_t blk_time_get(void) 688 { 689 return ns_to_ktime(blk_time_get_ns()); 690 } 691 692 void bdev_release(struct file *bdev_file); 693 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, 694 const struct blk_holder_ops *hops, struct file *bdev_file); 695 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder); 696 697 void bio_integrity_generate(struct bio *bio); 698 blk_status_t bio_integrity_verify(struct bio *bio, 699 struct bvec_iter *saved_iter); 700 701 void blk_integrity_prepare(struct request *rq); 702 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes); 703 704 #ifdef CONFIG_LOCKDEP 705 static inline void blk_freeze_acquire_lock(struct request_queue *q) 706 { 707 if (!q->mq_freeze_disk_dead) 708 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_); 709 if (!q->mq_freeze_queue_dying) 710 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_); 711 } 712 713 static inline void blk_unfreeze_release_lock(struct request_queue *q) 714 { 715 if (!q->mq_freeze_queue_dying) 716 rwsem_release(&q->q_lockdep_map, _RET_IP_); 717 if (!q->mq_freeze_disk_dead) 718 rwsem_release(&q->io_lockdep_map, _RET_IP_); 719 } 720 #else 721 static inline void blk_freeze_acquire_lock(struct request_queue *q) 722 { 723 } 724 static inline void blk_unfreeze_release_lock(struct request_queue *q) 725 { 726 } 727 #endif 728 729 /* 730 * debugfs directory and file creation can trigger fs reclaim, which can enter 731 * back into the block layer request_queue. This can cause deadlock if the 732 * queue is frozen. Use NOIO context together with debugfs_mutex to prevent fs 733 * reclaim from triggering block I/O. 734 */ 735 static inline void blk_debugfs_lock_nomemsave(struct request_queue *q) 736 { 737 mutex_lock(&q->debugfs_mutex); 738 } 739 740 static inline void blk_debugfs_unlock_nomemrestore(struct request_queue *q) 741 { 742 mutex_unlock(&q->debugfs_mutex); 743 } 744 745 static inline unsigned int __must_check blk_debugfs_lock(struct request_queue *q) 746 { 747 unsigned int memflags = memalloc_noio_save(); 748 749 blk_debugfs_lock_nomemsave(q); 750 return memflags; 751 } 752 753 static inline void blk_debugfs_unlock(struct request_queue *q, 754 unsigned int memflags) 755 { 756 blk_debugfs_unlock_nomemrestore(q); 757 memalloc_noio_restore(memflags); 758 } 759 760 #endif /* BLK_INTERNAL_H */ 761