1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef BLK_INTERNAL_H 3 #define BLK_INTERNAL_H 4 5 #include <linux/bio-integrity.h> 6 #include <linux/blk-crypto.h> 7 #include <linux/lockdep.h> 8 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ 9 #include <linux/sched/sysctl.h> 10 #include <linux/timekeeping.h> 11 #include <xen/xen.h> 12 #include "blk-crypto-internal.h" 13 14 struct elevator_type; 15 16 #define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9) 17 18 /* Max future timer expiry for timeouts */ 19 #define BLK_MAX_TIMEOUT (5 * HZ) 20 21 extern struct dentry *blk_debugfs_root; 22 23 struct blk_flush_queue { 24 spinlock_t mq_flush_lock; 25 unsigned int flush_pending_idx:1; 26 unsigned int flush_running_idx:1; 27 blk_status_t rq_status; 28 unsigned long flush_pending_since; 29 struct list_head flush_queue[2]; 30 unsigned long flush_data_in_flight; 31 struct request *flush_rq; 32 }; 33 34 bool is_flush_rq(struct request *req); 35 36 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, 37 gfp_t flags); 38 void blk_free_flush_queue(struct blk_flush_queue *q); 39 40 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); 41 bool blk_queue_start_drain(struct request_queue *q); 42 bool __blk_freeze_queue_start(struct request_queue *q, 43 struct task_struct *owner); 44 int __bio_queue_enter(struct request_queue *q, struct bio *bio); 45 void submit_bio_noacct_nocheck(struct bio *bio); 46 void bio_await_chain(struct bio *bio); 47 48 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) 49 { 50 rcu_read_lock(); 51 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) 52 goto fail; 53 54 /* 55 * The code that increments the pm_only counter must ensure that the 56 * counter is globally visible before the queue is unfrozen. 57 */ 58 if (blk_queue_pm_only(q) && 59 (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) 60 goto fail_put; 61 62 rcu_read_unlock(); 63 return true; 64 65 fail_put: 66 blk_queue_exit(q); 67 fail: 68 rcu_read_unlock(); 69 return false; 70 } 71 72 static inline int bio_queue_enter(struct bio *bio) 73 { 74 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 75 76 if (blk_try_enter_queue(q, false)) { 77 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_); 78 rwsem_release(&q->io_lockdep_map, _RET_IP_); 79 return 0; 80 } 81 return __bio_queue_enter(q, bio); 82 } 83 84 static inline void blk_wait_io(struct completion *done) 85 { 86 /* Prevent hang_check timer from firing at us during very long I/O */ 87 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; 88 89 if (timeout) 90 while (!wait_for_completion_io_timeout(done, timeout)) 91 ; 92 else 93 wait_for_completion_io(done); 94 } 95 96 #define BIO_INLINE_VECS 4 97 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, 98 gfp_t gfp_mask); 99 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); 100 101 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, 102 struct page *page, unsigned len, unsigned offset, 103 bool *same_page); 104 105 static inline bool biovec_phys_mergeable(struct request_queue *q, 106 struct bio_vec *vec1, struct bio_vec *vec2) 107 { 108 unsigned long mask = queue_segment_boundary(q); 109 phys_addr_t addr1 = bvec_phys(vec1); 110 phys_addr_t addr2 = bvec_phys(vec2); 111 112 /* 113 * Merging adjacent physical pages may not work correctly under KMSAN 114 * if their metadata pages aren't adjacent. Just disable merging. 115 */ 116 if (IS_ENABLED(CONFIG_KMSAN)) 117 return false; 118 119 if (addr1 + vec1->bv_len != addr2) 120 return false; 121 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) 122 return false; 123 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) 124 return false; 125 return true; 126 } 127 128 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim, 129 struct bio_vec *bprv, unsigned int offset) 130 { 131 return (offset & lim->virt_boundary_mask) || 132 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); 133 } 134 135 /* 136 * Check if adding a bio_vec after bprv with offset would create a gap in 137 * the SG list. Most drivers don't care about this, but some do. 138 */ 139 static inline bool bvec_gap_to_prev(const struct queue_limits *lim, 140 struct bio_vec *bprv, unsigned int offset) 141 { 142 if (!lim->virt_boundary_mask) 143 return false; 144 return __bvec_gap_to_prev(lim, bprv, offset); 145 } 146 147 static inline bool rq_mergeable(struct request *rq) 148 { 149 if (blk_rq_is_passthrough(rq)) 150 return false; 151 152 if (req_op(rq) == REQ_OP_FLUSH) 153 return false; 154 155 if (req_op(rq) == REQ_OP_WRITE_ZEROES) 156 return false; 157 158 if (req_op(rq) == REQ_OP_ZONE_APPEND) 159 return false; 160 161 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 162 return false; 163 if (rq->rq_flags & RQF_NOMERGE_FLAGS) 164 return false; 165 166 return true; 167 } 168 169 /* 170 * There are two different ways to handle DISCARD merges: 171 * 1) If max_discard_segments > 1, the driver treats every bio as a range and 172 * send the bios to controller together. The ranges don't need to be 173 * contiguous. 174 * 2) Otherwise, the request will be normal read/write requests. The ranges 175 * need to be contiguous. 176 */ 177 static inline bool blk_discard_mergable(struct request *req) 178 { 179 if (req_op(req) == REQ_OP_DISCARD && 180 queue_max_discard_segments(req->q) > 1) 181 return true; 182 return false; 183 } 184 185 static inline unsigned int blk_rq_get_max_segments(struct request *rq) 186 { 187 if (req_op(rq) == REQ_OP_DISCARD) 188 return queue_max_discard_segments(rq->q); 189 return queue_max_segments(rq->q); 190 } 191 192 static inline unsigned int blk_queue_get_max_sectors(struct request *rq) 193 { 194 struct request_queue *q = rq->q; 195 enum req_op op = req_op(rq); 196 197 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 198 return min(q->limits.max_discard_sectors, 199 UINT_MAX >> SECTOR_SHIFT); 200 201 if (unlikely(op == REQ_OP_WRITE_ZEROES)) 202 return q->limits.max_write_zeroes_sectors; 203 204 if (rq->cmd_flags & REQ_ATOMIC) 205 return q->limits.atomic_write_max_sectors; 206 207 return q->limits.max_sectors; 208 } 209 210 #ifdef CONFIG_BLK_DEV_INTEGRITY 211 void blk_flush_integrity(void); 212 void bio_integrity_free(struct bio *bio); 213 214 /* 215 * Integrity payloads can either be owned by the submitter, in which case 216 * bio_uninit will free them, or owned and generated by the block layer, 217 * in which case we'll verify them here (for reads) and free them before 218 * the bio is handed back to the submitted. 219 */ 220 bool __bio_integrity_endio(struct bio *bio); 221 static inline bool bio_integrity_endio(struct bio *bio) 222 { 223 struct bio_integrity_payload *bip = bio_integrity(bio); 224 225 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY)) 226 return __bio_integrity_endio(bio); 227 return true; 228 } 229 230 bool blk_integrity_merge_rq(struct request_queue *, struct request *, 231 struct request *); 232 bool blk_integrity_merge_bio(struct request_queue *, struct request *, 233 struct bio *); 234 235 static inline bool integrity_req_gap_back_merge(struct request *req, 236 struct bio *next) 237 { 238 struct bio_integrity_payload *bip = bio_integrity(req->bio); 239 struct bio_integrity_payload *bip_next = bio_integrity(next); 240 241 return bvec_gap_to_prev(&req->q->limits, 242 &bip->bip_vec[bip->bip_vcnt - 1], 243 bip_next->bip_vec[0].bv_offset); 244 } 245 246 static inline bool integrity_req_gap_front_merge(struct request *req, 247 struct bio *bio) 248 { 249 struct bio_integrity_payload *bip = bio_integrity(bio); 250 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 251 252 return bvec_gap_to_prev(&req->q->limits, 253 &bip->bip_vec[bip->bip_vcnt - 1], 254 bip_next->bip_vec[0].bv_offset); 255 } 256 257 extern const struct attribute_group blk_integrity_attr_group; 258 #else /* CONFIG_BLK_DEV_INTEGRITY */ 259 static inline bool blk_integrity_merge_rq(struct request_queue *rq, 260 struct request *r1, struct request *r2) 261 { 262 return true; 263 } 264 static inline bool blk_integrity_merge_bio(struct request_queue *rq, 265 struct request *r, struct bio *b) 266 { 267 return true; 268 } 269 static inline bool integrity_req_gap_back_merge(struct request *req, 270 struct bio *next) 271 { 272 return false; 273 } 274 static inline bool integrity_req_gap_front_merge(struct request *req, 275 struct bio *bio) 276 { 277 return false; 278 } 279 280 static inline void blk_flush_integrity(void) 281 { 282 } 283 static inline bool bio_integrity_endio(struct bio *bio) 284 { 285 return true; 286 } 287 static inline void bio_integrity_free(struct bio *bio) 288 { 289 } 290 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 291 292 unsigned long blk_rq_timeout(unsigned long timeout); 293 void blk_add_timer(struct request *req); 294 295 enum bio_merge_status { 296 BIO_MERGE_OK, 297 BIO_MERGE_NONE, 298 BIO_MERGE_FAILED, 299 }; 300 301 enum bio_merge_status bio_attempt_back_merge(struct request *req, 302 struct bio *bio, unsigned int nr_segs); 303 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 304 unsigned int nr_segs); 305 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, 306 struct bio *bio, unsigned int nr_segs); 307 308 /* 309 * Plug flush limits 310 */ 311 #define BLK_MAX_REQUEST_COUNT 32 312 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) 313 314 /* 315 * Internal elevator interface 316 */ 317 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 318 319 bool blk_insert_flush(struct request *rq); 320 321 int elevator_switch(struct request_queue *q, struct elevator_type *new_e); 322 void elevator_disable(struct request_queue *q); 323 void elevator_exit(struct request_queue *q); 324 int elv_register_queue(struct request_queue *q, bool uevent); 325 void elv_unregister_queue(struct request_queue *q); 326 327 ssize_t part_size_show(struct device *dev, struct device_attribute *attr, 328 char *buf); 329 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, 330 char *buf); 331 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 332 char *buf); 333 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, 334 char *buf); 335 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, 336 const char *buf, size_t count); 337 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 338 ssize_t part_timeout_store(struct device *, struct device_attribute *, 339 const char *, size_t); 340 341 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, 342 unsigned *nsegs); 343 struct bio *bio_split_write_zeroes(struct bio *bio, 344 const struct queue_limits *lim, unsigned *nsegs); 345 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, 346 unsigned *nr_segs); 347 struct bio *bio_split_zone_append(struct bio *bio, 348 const struct queue_limits *lim, unsigned *nr_segs); 349 350 /* 351 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE. 352 * 353 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is 354 * always valid if a bio has data. The check might lead to occasional false 355 * positives when bios are cloned, but compared to the performance impact of 356 * cloned bios themselves the loop below doesn't matter anyway. 357 */ 358 static inline bool bio_may_need_split(struct bio *bio, 359 const struct queue_limits *lim) 360 { 361 return lim->chunk_sectors || bio->bi_vcnt != 1 || 362 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; 363 } 364 365 /** 366 * __bio_split_to_limits - split a bio to fit the queue limits 367 * @bio: bio to be split 368 * @lim: queue limits to split based on 369 * @nr_segs: returns the number of segments in the returned bio 370 * 371 * Check if @bio needs splitting based on the queue limits, and if so split off 372 * a bio fitting the limits from the beginning of @bio and return it. @bio is 373 * shortened to the remainder and re-submitted. 374 * 375 * The split bio is allocated from @q->bio_split, which is provided by the 376 * block layer. 377 */ 378 static inline struct bio *__bio_split_to_limits(struct bio *bio, 379 const struct queue_limits *lim, unsigned int *nr_segs) 380 { 381 switch (bio_op(bio)) { 382 case REQ_OP_READ: 383 case REQ_OP_WRITE: 384 if (bio_may_need_split(bio, lim)) 385 return bio_split_rw(bio, lim, nr_segs); 386 *nr_segs = 1; 387 return bio; 388 case REQ_OP_ZONE_APPEND: 389 return bio_split_zone_append(bio, lim, nr_segs); 390 case REQ_OP_DISCARD: 391 case REQ_OP_SECURE_ERASE: 392 return bio_split_discard(bio, lim, nr_segs); 393 case REQ_OP_WRITE_ZEROES: 394 return bio_split_write_zeroes(bio, lim, nr_segs); 395 default: 396 /* other operations can't be split */ 397 *nr_segs = 0; 398 return bio; 399 } 400 } 401 402 int ll_back_merge_fn(struct request *req, struct bio *bio, 403 unsigned int nr_segs); 404 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, 405 struct request *next); 406 unsigned int blk_recalc_rq_segments(struct request *rq); 407 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 408 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 409 410 int blk_set_default_limits(struct queue_limits *lim); 411 void blk_apply_bdi_limits(struct backing_dev_info *bdi, 412 struct queue_limits *lim); 413 int blk_dev_init(void); 414 415 void update_io_ticks(struct block_device *part, unsigned long now, bool end); 416 unsigned int part_in_flight(struct block_device *part); 417 418 static inline void req_set_nomerge(struct request_queue *q, struct request *req) 419 { 420 req->cmd_flags |= REQ_NOMERGE; 421 if (req == q->last_merge) 422 q->last_merge = NULL; 423 } 424 425 /* 426 * Internal io_context interface 427 */ 428 struct io_cq *ioc_find_get_icq(struct request_queue *q); 429 struct io_cq *ioc_lookup_icq(struct request_queue *q); 430 #ifdef CONFIG_BLK_ICQ 431 void ioc_clear_queue(struct request_queue *q); 432 #else 433 static inline void ioc_clear_queue(struct request_queue *q) 434 { 435 } 436 #endif /* CONFIG_BLK_ICQ */ 437 438 struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q); 439 440 static inline bool blk_queue_may_bounce(struct request_queue *q) 441 { 442 return IS_ENABLED(CONFIG_BOUNCE) && 443 (q->limits.features & BLK_FEAT_BOUNCE_HIGH) && 444 max_low_pfn >= max_pfn; 445 } 446 447 static inline struct bio *blk_queue_bounce(struct bio *bio, 448 struct request_queue *q) 449 { 450 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio))) 451 return __blk_queue_bounce(bio, q); 452 return bio; 453 } 454 455 #ifdef CONFIG_BLK_DEV_ZONED 456 void disk_init_zone_resources(struct gendisk *disk); 457 void disk_free_zone_resources(struct gendisk *disk); 458 static inline bool bio_zone_write_plugging(struct bio *bio) 459 { 460 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING); 461 } 462 void blk_zone_write_plug_bio_merged(struct bio *bio); 463 void blk_zone_write_plug_init_request(struct request *rq); 464 static inline void blk_zone_update_request_bio(struct request *rq, 465 struct bio *bio) 466 { 467 /* 468 * For zone append requests, the request sector indicates the location 469 * at which the BIO data was written. Return this value to the BIO 470 * issuer through the BIO iter sector. 471 * For plugged zone writes, which include emulated zone append, we need 472 * the original BIO sector so that blk_zone_write_plug_bio_endio() can 473 * lookup the zone write plug. 474 */ 475 if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio)) 476 bio->bi_iter.bi_sector = rq->__sector; 477 } 478 void blk_zone_write_plug_bio_endio(struct bio *bio); 479 static inline void blk_zone_bio_endio(struct bio *bio) 480 { 481 /* 482 * For write BIOs to zoned devices, signal the completion of the BIO so 483 * that the next write BIO can be submitted by zone write plugging. 484 */ 485 if (bio_zone_write_plugging(bio)) 486 blk_zone_write_plug_bio_endio(bio); 487 } 488 489 void blk_zone_write_plug_finish_request(struct request *rq); 490 static inline void blk_zone_finish_request(struct request *rq) 491 { 492 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING) 493 blk_zone_write_plug_finish_request(rq); 494 } 495 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, 496 unsigned long arg); 497 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, 498 unsigned int cmd, unsigned long arg); 499 #else /* CONFIG_BLK_DEV_ZONED */ 500 static inline void disk_init_zone_resources(struct gendisk *disk) 501 { 502 } 503 static inline void disk_free_zone_resources(struct gendisk *disk) 504 { 505 } 506 static inline bool bio_zone_write_plugging(struct bio *bio) 507 { 508 return false; 509 } 510 static inline void blk_zone_write_plug_bio_merged(struct bio *bio) 511 { 512 } 513 static inline void blk_zone_write_plug_init_request(struct request *rq) 514 { 515 } 516 static inline void blk_zone_update_request_bio(struct request *rq, 517 struct bio *bio) 518 { 519 } 520 static inline void blk_zone_bio_endio(struct bio *bio) 521 { 522 } 523 static inline void blk_zone_finish_request(struct request *rq) 524 { 525 } 526 static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 527 unsigned int cmd, unsigned long arg) 528 { 529 return -ENOTTY; 530 } 531 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, 532 blk_mode_t mode, unsigned int cmd, unsigned long arg) 533 { 534 return -ENOTTY; 535 } 536 #endif /* CONFIG_BLK_DEV_ZONED */ 537 538 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); 539 void bdev_add(struct block_device *bdev, dev_t dev); 540 void bdev_unhash(struct block_device *bdev); 541 void bdev_drop(struct block_device *bdev); 542 543 int blk_alloc_ext_minor(void); 544 void blk_free_ext_minor(unsigned int minor); 545 #define ADDPART_FLAG_NONE 0 546 #define ADDPART_FLAG_RAID 1 547 #define ADDPART_FLAG_WHOLEDISK 2 548 #define ADDPART_FLAG_READONLY 4 549 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, 550 sector_t length); 551 int bdev_del_partition(struct gendisk *disk, int partno); 552 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, 553 sector_t length); 554 void drop_partition(struct block_device *part); 555 556 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors); 557 558 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, 559 struct lock_class_key *lkclass); 560 561 /* 562 * Clean up a page appropriately, where the page may be pinned, may have a 563 * ref taken on it or neither. 564 */ 565 static inline void bio_release_page(struct bio *bio, struct page *page) 566 { 567 if (bio_flagged(bio, BIO_PAGE_PINNED)) 568 unpin_user_page(page); 569 } 570 571 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id); 572 573 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode); 574 575 int disk_alloc_events(struct gendisk *disk); 576 void disk_add_events(struct gendisk *disk); 577 void disk_del_events(struct gendisk *disk); 578 void disk_release_events(struct gendisk *disk); 579 void disk_block_events(struct gendisk *disk); 580 void disk_unblock_events(struct gendisk *disk); 581 void disk_flush_events(struct gendisk *disk, unsigned int mask); 582 extern struct device_attribute dev_attr_events; 583 extern struct device_attribute dev_attr_events_async; 584 extern struct device_attribute dev_attr_events_poll_msecs; 585 586 extern struct attribute_group blk_trace_attr_group; 587 588 blk_mode_t file_to_blk_mode(struct file *file); 589 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, 590 loff_t lstart, loff_t lend); 591 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); 592 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags); 593 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); 594 595 extern const struct address_space_operations def_blk_aops; 596 597 int disk_register_independent_access_ranges(struct gendisk *disk); 598 void disk_unregister_independent_access_ranges(struct gendisk *disk); 599 600 #ifdef CONFIG_FAIL_MAKE_REQUEST 601 bool should_fail_request(struct block_device *part, unsigned int bytes); 602 #else /* CONFIG_FAIL_MAKE_REQUEST */ 603 static inline bool should_fail_request(struct block_device *part, 604 unsigned int bytes) 605 { 606 return false; 607 } 608 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 609 610 /* 611 * Optimized request reference counting. Ideally we'd make timeouts be more 612 * clever, as that's the only reason we need references at all... But until 613 * this happens, this is faster than using refcount_t. Also see: 614 * 615 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count") 616 */ 617 #define req_ref_zero_or_close_to_overflow(req) \ 618 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u) 619 620 static inline bool req_ref_inc_not_zero(struct request *req) 621 { 622 return atomic_inc_not_zero(&req->ref); 623 } 624 625 static inline bool req_ref_put_and_test(struct request *req) 626 { 627 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); 628 return atomic_dec_and_test(&req->ref); 629 } 630 631 static inline void req_ref_set(struct request *req, int value) 632 { 633 atomic_set(&req->ref, value); 634 } 635 636 static inline int req_ref_read(struct request *req) 637 { 638 return atomic_read(&req->ref); 639 } 640 641 static inline u64 blk_time_get_ns(void) 642 { 643 struct blk_plug *plug = current->plug; 644 645 if (!plug || !in_task()) 646 return ktime_get_ns(); 647 648 /* 649 * 0 could very well be a valid time, but rather than flag "this is 650 * a valid timestamp" separately, just accept that we'll do an extra 651 * ktime_get_ns() if we just happen to get 0 as the current time. 652 */ 653 if (!plug->cur_ktime) { 654 plug->cur_ktime = ktime_get_ns(); 655 current->flags |= PF_BLOCK_TS; 656 } 657 return plug->cur_ktime; 658 } 659 660 static inline ktime_t blk_time_get(void) 661 { 662 return ns_to_ktime(blk_time_get_ns()); 663 } 664 665 /* 666 * From most significant bit: 667 * 1 bit: reserved for other usage, see below 668 * 12 bits: original size of bio 669 * 51 bits: issue time of bio 670 */ 671 #define BIO_ISSUE_RES_BITS 1 672 #define BIO_ISSUE_SIZE_BITS 12 673 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) 674 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) 675 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) 676 #define BIO_ISSUE_SIZE_MASK \ 677 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) 678 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) 679 680 /* Reserved bit for blk-throtl */ 681 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) 682 683 static inline u64 __bio_issue_time(u64 time) 684 { 685 return time & BIO_ISSUE_TIME_MASK; 686 } 687 688 static inline u64 bio_issue_time(struct bio_issue *issue) 689 { 690 return __bio_issue_time(issue->value); 691 } 692 693 static inline sector_t bio_issue_size(struct bio_issue *issue) 694 { 695 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); 696 } 697 698 static inline void bio_issue_init(struct bio_issue *issue, 699 sector_t size) 700 { 701 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; 702 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | 703 (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) | 704 ((u64)size << BIO_ISSUE_SIZE_SHIFT)); 705 } 706 707 void bdev_release(struct file *bdev_file); 708 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, 709 const struct blk_holder_ops *hops, struct file *bdev_file); 710 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder); 711 712 void blk_integrity_generate(struct bio *bio); 713 void blk_integrity_verify(struct bio *bio); 714 void blk_integrity_prepare(struct request *rq); 715 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes); 716 717 #ifdef CONFIG_LOCKDEP 718 static inline void blk_freeze_acquire_lock(struct request_queue *q) 719 { 720 if (!q->mq_freeze_disk_dead) 721 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_); 722 if (!q->mq_freeze_queue_dying) 723 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_); 724 } 725 726 static inline void blk_unfreeze_release_lock(struct request_queue *q) 727 { 728 if (!q->mq_freeze_queue_dying) 729 rwsem_release(&q->q_lockdep_map, _RET_IP_); 730 if (!q->mq_freeze_disk_dead) 731 rwsem_release(&q->io_lockdep_map, _RET_IP_); 732 } 733 #else 734 static inline void blk_freeze_acquire_lock(struct request_queue *q) 735 { 736 } 737 static inline void blk_unfreeze_release_lock(struct request_queue *q) 738 { 739 } 740 #endif 741 742 #endif /* BLK_INTERNAL_H */ 743