xref: /linux/block/blk.h (revision 6e9a12f85a7567bb9a41d5230468886bd6a27b20)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4 
5 #include <linux/bio-integrity.h>
6 #include <linux/blk-crypto.h>
7 #include <linux/lockdep.h>
8 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
9 #include <linux/sched/sysctl.h>
10 #include <linux/timekeeping.h>
11 #include <xen/xen.h>
12 #include "blk-crypto-internal.h"
13 
14 struct elevator_type;
15 
16 /*
17  * Default upper limit for the software max_sectors limit used for regular I/Os.
18  * This can be increased through sysfs.
19  *
20  * This should not be confused with the max_hw_sector limit that is entirely
21  * controlled by the block device driver, usually based on hardware limits.
22  */
23 #define BLK_DEF_MAX_SECTORS_CAP	(SZ_4M >> SECTOR_SHIFT)
24 
25 #define	BLK_DEV_MAX_SECTORS	(LLONG_MAX >> 9)
26 #define	BLK_MIN_SEGMENT_SIZE	4096
27 
28 /* Max future timer expiry for timeouts */
29 #define BLK_MAX_TIMEOUT		(5 * HZ)
30 
31 extern struct dentry *blk_debugfs_root;
32 
33 struct blk_flush_queue {
34 	spinlock_t		mq_flush_lock;
35 	unsigned int		flush_pending_idx:1;
36 	unsigned int		flush_running_idx:1;
37 	blk_status_t 		rq_status;
38 	unsigned long		flush_pending_since;
39 	struct list_head	flush_queue[2];
40 	unsigned long		flush_data_in_flight;
41 	struct request		*flush_rq;
42 };
43 
44 bool is_flush_rq(struct request *req);
45 
46 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
47 					      gfp_t flags);
48 void blk_free_flush_queue(struct blk_flush_queue *q);
49 
50 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
51 bool blk_queue_start_drain(struct request_queue *q);
52 bool __blk_freeze_queue_start(struct request_queue *q,
53 			      struct task_struct *owner);
54 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
55 void submit_bio_noacct_nocheck(struct bio *bio);
56 void bio_await_chain(struct bio *bio);
57 
58 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
59 {
60 	rcu_read_lock();
61 	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
62 		goto fail;
63 
64 	/*
65 	 * The code that increments the pm_only counter must ensure that the
66 	 * counter is globally visible before the queue is unfrozen.
67 	 */
68 	if (blk_queue_pm_only(q) &&
69 	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
70 		goto fail_put;
71 
72 	rcu_read_unlock();
73 	return true;
74 
75 fail_put:
76 	blk_queue_exit(q);
77 fail:
78 	rcu_read_unlock();
79 	return false;
80 }
81 
82 static inline int bio_queue_enter(struct bio *bio)
83 {
84 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
85 
86 	if (blk_try_enter_queue(q, false)) {
87 		rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
88 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
89 		return 0;
90 	}
91 	return __bio_queue_enter(q, bio);
92 }
93 
94 static inline void blk_wait_io(struct completion *done)
95 {
96 	/* Prevent hang_check timer from firing at us during very long I/O */
97 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
98 
99 	if (timeout)
100 		while (!wait_for_completion_io_timeout(done, timeout))
101 			;
102 	else
103 		wait_for_completion_io(done);
104 }
105 
106 struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
107 void blkdev_put_no_open(struct block_device *bdev);
108 
109 #define BIO_INLINE_VECS 4
110 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
111 		gfp_t gfp_mask);
112 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
113 
114 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
115 		struct page *page, unsigned len, unsigned offset);
116 
117 static inline bool biovec_phys_mergeable(struct request_queue *q,
118 		struct bio_vec *vec1, struct bio_vec *vec2)
119 {
120 	unsigned long mask = queue_segment_boundary(q);
121 	phys_addr_t addr1 = bvec_phys(vec1);
122 	phys_addr_t addr2 = bvec_phys(vec2);
123 
124 	/*
125 	 * Merging adjacent physical pages may not work correctly under KMSAN
126 	 * if their metadata pages aren't adjacent. Just disable merging.
127 	 */
128 	if (IS_ENABLED(CONFIG_KMSAN))
129 		return false;
130 
131 	if (addr1 + vec1->bv_len != addr2)
132 		return false;
133 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
134 		return false;
135 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
136 		return false;
137 	return true;
138 }
139 
140 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
141 		struct bio_vec *bprv, unsigned int offset)
142 {
143 	return (offset & lim->virt_boundary_mask) ||
144 		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
145 }
146 
147 /*
148  * Check if adding a bio_vec after bprv with offset would create a gap in
149  * the SG list. Most drivers don't care about this, but some do.
150  */
151 static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
152 		struct bio_vec *bprv, unsigned int offset)
153 {
154 	if (!lim->virt_boundary_mask)
155 		return false;
156 	return __bvec_gap_to_prev(lim, bprv, offset);
157 }
158 
159 static inline bool rq_mergeable(struct request *rq)
160 {
161 	if (blk_rq_is_passthrough(rq))
162 		return false;
163 
164 	if (req_op(rq) == REQ_OP_FLUSH)
165 		return false;
166 
167 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
168 		return false;
169 
170 	if (req_op(rq) == REQ_OP_ZONE_APPEND)
171 		return false;
172 
173 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
174 		return false;
175 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
176 		return false;
177 
178 	return true;
179 }
180 
181 /*
182  * There are two different ways to handle DISCARD merges:
183  *  1) If max_discard_segments > 1, the driver treats every bio as a range and
184  *     send the bios to controller together. The ranges don't need to be
185  *     contiguous.
186  *  2) Otherwise, the request will be normal read/write requests.  The ranges
187  *     need to be contiguous.
188  */
189 static inline bool blk_discard_mergable(struct request *req)
190 {
191 	if (req_op(req) == REQ_OP_DISCARD &&
192 	    queue_max_discard_segments(req->q) > 1)
193 		return true;
194 	return false;
195 }
196 
197 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
198 {
199 	if (req_op(rq) == REQ_OP_DISCARD)
200 		return queue_max_discard_segments(rq->q);
201 	return queue_max_segments(rq->q);
202 }
203 
204 static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
205 {
206 	struct request_queue *q = rq->q;
207 	enum req_op op = req_op(rq);
208 
209 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
210 		return min(q->limits.max_discard_sectors,
211 			   UINT_MAX >> SECTOR_SHIFT);
212 
213 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
214 		return q->limits.max_write_zeroes_sectors;
215 
216 	if (rq->cmd_flags & REQ_ATOMIC)
217 		return q->limits.atomic_write_max_sectors;
218 
219 	return q->limits.max_sectors;
220 }
221 
222 #ifdef CONFIG_BLK_DEV_INTEGRITY
223 void blk_flush_integrity(void);
224 void bio_integrity_free(struct bio *bio);
225 
226 /*
227  * Integrity payloads can either be owned by the submitter, in which case
228  * bio_uninit will free them, or owned and generated by the block layer,
229  * in which case we'll verify them here (for reads) and free them before
230  * the bio is handed back to the submitted.
231  */
232 bool __bio_integrity_endio(struct bio *bio);
233 static inline bool bio_integrity_endio(struct bio *bio)
234 {
235 	struct bio_integrity_payload *bip = bio_integrity(bio);
236 
237 	if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
238 		return __bio_integrity_endio(bio);
239 	return true;
240 }
241 
242 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
243 		struct request *);
244 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
245 		struct bio *);
246 
247 static inline bool integrity_req_gap_back_merge(struct request *req,
248 		struct bio *next)
249 {
250 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
251 	struct bio_integrity_payload *bip_next = bio_integrity(next);
252 
253 	return bvec_gap_to_prev(&req->q->limits,
254 				&bip->bip_vec[bip->bip_vcnt - 1],
255 				bip_next->bip_vec[0].bv_offset);
256 }
257 
258 static inline bool integrity_req_gap_front_merge(struct request *req,
259 		struct bio *bio)
260 {
261 	struct bio_integrity_payload *bip = bio_integrity(bio);
262 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
263 
264 	return bvec_gap_to_prev(&req->q->limits,
265 				&bip->bip_vec[bip->bip_vcnt - 1],
266 				bip_next->bip_vec[0].bv_offset);
267 }
268 
269 extern const struct attribute_group blk_integrity_attr_group;
270 #else /* CONFIG_BLK_DEV_INTEGRITY */
271 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
272 		struct request *r1, struct request *r2)
273 {
274 	return true;
275 }
276 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
277 		struct request *r, struct bio *b)
278 {
279 	return true;
280 }
281 static inline bool integrity_req_gap_back_merge(struct request *req,
282 		struct bio *next)
283 {
284 	return false;
285 }
286 static inline bool integrity_req_gap_front_merge(struct request *req,
287 		struct bio *bio)
288 {
289 	return false;
290 }
291 
292 static inline void blk_flush_integrity(void)
293 {
294 }
295 static inline bool bio_integrity_endio(struct bio *bio)
296 {
297 	return true;
298 }
299 static inline void bio_integrity_free(struct bio *bio)
300 {
301 }
302 #endif /* CONFIG_BLK_DEV_INTEGRITY */
303 
304 unsigned long blk_rq_timeout(unsigned long timeout);
305 void blk_add_timer(struct request *req);
306 
307 enum bio_merge_status {
308 	BIO_MERGE_OK,
309 	BIO_MERGE_NONE,
310 	BIO_MERGE_FAILED,
311 };
312 
313 enum bio_merge_status bio_attempt_back_merge(struct request *req,
314 		struct bio *bio, unsigned int nr_segs);
315 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
316 		unsigned int nr_segs);
317 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
318 			struct bio *bio, unsigned int nr_segs);
319 
320 /*
321  * Plug flush limits
322  */
323 #define BLK_MAX_REQUEST_COUNT	32
324 #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
325 
326 /*
327  * Internal elevator interface
328  */
329 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
330 
331 bool blk_insert_flush(struct request *rq);
332 
333 void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e);
334 void elevator_set_default(struct request_queue *q);
335 void elevator_set_none(struct request_queue *q);
336 
337 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
338 		char *buf);
339 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
340 		char *buf);
341 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
342 		char *buf);
343 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
344 		char *buf);
345 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
346 		const char *buf, size_t count);
347 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
348 ssize_t part_timeout_store(struct device *, struct device_attribute *,
349 				const char *, size_t);
350 
351 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
352 		unsigned *nsegs);
353 struct bio *bio_split_write_zeroes(struct bio *bio,
354 		const struct queue_limits *lim, unsigned *nsegs);
355 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
356 		unsigned *nr_segs);
357 struct bio *bio_split_zone_append(struct bio *bio,
358 		const struct queue_limits *lim, unsigned *nr_segs);
359 
360 /*
361  * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
362  *
363  * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
364  * always valid if a bio has data.  The check might lead to occasional false
365  * positives when bios are cloned, but compared to the performance impact of
366  * cloned bios themselves the loop below doesn't matter anyway.
367  */
368 static inline bool bio_may_need_split(struct bio *bio,
369 		const struct queue_limits *lim)
370 {
371 	if (lim->chunk_sectors)
372 		return true;
373 	if (bio->bi_vcnt != 1)
374 		return true;
375 	return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
376 		lim->min_segment_size;
377 }
378 
379 /**
380  * __bio_split_to_limits - split a bio to fit the queue limits
381  * @bio:     bio to be split
382  * @lim:     queue limits to split based on
383  * @nr_segs: returns the number of segments in the returned bio
384  *
385  * Check if @bio needs splitting based on the queue limits, and if so split off
386  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
387  * shortened to the remainder and re-submitted.
388  *
389  * The split bio is allocated from @q->bio_split, which is provided by the
390  * block layer.
391  */
392 static inline struct bio *__bio_split_to_limits(struct bio *bio,
393 		const struct queue_limits *lim, unsigned int *nr_segs)
394 {
395 	switch (bio_op(bio)) {
396 	case REQ_OP_READ:
397 	case REQ_OP_WRITE:
398 		if (bio_may_need_split(bio, lim))
399 			return bio_split_rw(bio, lim, nr_segs);
400 		*nr_segs = 1;
401 		return bio;
402 	case REQ_OP_ZONE_APPEND:
403 		return bio_split_zone_append(bio, lim, nr_segs);
404 	case REQ_OP_DISCARD:
405 	case REQ_OP_SECURE_ERASE:
406 		return bio_split_discard(bio, lim, nr_segs);
407 	case REQ_OP_WRITE_ZEROES:
408 		return bio_split_write_zeroes(bio, lim, nr_segs);
409 	default:
410 		/* other operations can't be split */
411 		*nr_segs = 0;
412 		return bio;
413 	}
414 }
415 
416 /**
417  * get_max_segment_size() - maximum number of bytes to add as a single segment
418  * @lim: Request queue limits.
419  * @paddr: address of the range to add
420  * @len: maximum length available to add at @paddr
421  *
422  * Returns the maximum number of bytes of the range starting at @paddr that can
423  * be added to a single segment.
424  */
425 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
426 		phys_addr_t paddr, unsigned int len)
427 {
428 	/*
429 	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
430 	 * after having calculated the minimum.
431 	 */
432 	return min_t(unsigned long, len,
433 		min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
434 		    (unsigned long)lim->max_segment_size - 1) + 1);
435 }
436 
437 int ll_back_merge_fn(struct request *req, struct bio *bio,
438 		unsigned int nr_segs);
439 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
440 				struct request *next);
441 unsigned int blk_recalc_rq_segments(struct request *rq);
442 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
443 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
444 
445 int blk_set_default_limits(struct queue_limits *lim);
446 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
447 		struct queue_limits *lim);
448 int blk_dev_init(void);
449 
450 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
451 
452 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
453 {
454 	req->cmd_flags |= REQ_NOMERGE;
455 	if (req == q->last_merge)
456 		q->last_merge = NULL;
457 }
458 
459 /*
460  * Internal io_context interface
461  */
462 struct io_cq *ioc_find_get_icq(struct request_queue *q);
463 struct io_cq *ioc_lookup_icq(struct request_queue *q);
464 #ifdef CONFIG_BLK_ICQ
465 void ioc_clear_queue(struct request_queue *q);
466 #else
467 static inline void ioc_clear_queue(struct request_queue *q)
468 {
469 }
470 #endif /* CONFIG_BLK_ICQ */
471 
472 #ifdef CONFIG_BLK_DEV_ZONED
473 void disk_init_zone_resources(struct gendisk *disk);
474 void disk_free_zone_resources(struct gendisk *disk);
475 static inline bool bio_zone_write_plugging(struct bio *bio)
476 {
477 	return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
478 }
479 static inline bool blk_req_bio_is_zone_append(struct request *rq,
480 					      struct bio *bio)
481 {
482 	return req_op(rq) == REQ_OP_ZONE_APPEND ||
483 	       bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
484 }
485 void blk_zone_write_plug_bio_merged(struct bio *bio);
486 void blk_zone_write_plug_init_request(struct request *rq);
487 void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio);
488 void blk_zone_write_plug_bio_endio(struct bio *bio);
489 static inline void blk_zone_bio_endio(struct bio *bio)
490 {
491 	/*
492 	 * For write BIOs to zoned devices, signal the completion of the BIO so
493 	 * that the next write BIO can be submitted by zone write plugging.
494 	 */
495 	if (bio_zone_write_plugging(bio))
496 		blk_zone_write_plug_bio_endio(bio);
497 }
498 
499 void blk_zone_write_plug_finish_request(struct request *rq);
500 static inline void blk_zone_finish_request(struct request *rq)
501 {
502 	if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
503 		blk_zone_write_plug_finish_request(rq);
504 }
505 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
506 		unsigned long arg);
507 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
508 		unsigned int cmd, unsigned long arg);
509 #else /* CONFIG_BLK_DEV_ZONED */
510 static inline void disk_init_zone_resources(struct gendisk *disk)
511 {
512 }
513 static inline void disk_free_zone_resources(struct gendisk *disk)
514 {
515 }
516 static inline bool bio_zone_write_plugging(struct bio *bio)
517 {
518 	return false;
519 }
520 static inline bool blk_req_bio_is_zone_append(struct request *req,
521 					      struct bio *bio)
522 {
523 	return false;
524 }
525 static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
526 {
527 }
528 static inline void blk_zone_write_plug_init_request(struct request *rq)
529 {
530 }
531 static inline void blk_zone_append_update_request_bio(struct request *rq,
532 						      struct bio *bio)
533 {
534 }
535 static inline void blk_zone_bio_endio(struct bio *bio)
536 {
537 }
538 static inline void blk_zone_finish_request(struct request *rq)
539 {
540 }
541 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
542 		unsigned int cmd, unsigned long arg)
543 {
544 	return -ENOTTY;
545 }
546 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
547 		blk_mode_t mode, unsigned int cmd, unsigned long arg)
548 {
549 	return -ENOTTY;
550 }
551 #endif /* CONFIG_BLK_DEV_ZONED */
552 
553 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
554 void bdev_add(struct block_device *bdev, dev_t dev);
555 void bdev_unhash(struct block_device *bdev);
556 void bdev_drop(struct block_device *bdev);
557 
558 int blk_alloc_ext_minor(void);
559 void blk_free_ext_minor(unsigned int minor);
560 #define ADDPART_FLAG_NONE	0
561 #define ADDPART_FLAG_RAID	1
562 #define ADDPART_FLAG_WHOLEDISK	2
563 #define ADDPART_FLAG_READONLY	4
564 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
565 		sector_t length);
566 int bdev_del_partition(struct gendisk *disk, int partno);
567 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
568 		sector_t length);
569 void drop_partition(struct block_device *part);
570 
571 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
572 
573 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
574 		struct lock_class_key *lkclass);
575 
576 /*
577  * Clean up a page appropriately, where the page may be pinned, may have a
578  * ref taken on it or neither.
579  */
580 static inline void bio_release_page(struct bio *bio, struct page *page)
581 {
582 	if (bio_flagged(bio, BIO_PAGE_PINNED))
583 		unpin_user_page(page);
584 }
585 
586 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
587 
588 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
589 
590 int disk_alloc_events(struct gendisk *disk);
591 void disk_add_events(struct gendisk *disk);
592 void disk_del_events(struct gendisk *disk);
593 void disk_release_events(struct gendisk *disk);
594 void disk_block_events(struct gendisk *disk);
595 void disk_unblock_events(struct gendisk *disk);
596 void disk_flush_events(struct gendisk *disk, unsigned int mask);
597 extern struct device_attribute dev_attr_events;
598 extern struct device_attribute dev_attr_events_async;
599 extern struct device_attribute dev_attr_events_poll_msecs;
600 
601 extern struct attribute_group blk_trace_attr_group;
602 
603 blk_mode_t file_to_blk_mode(struct file *file);
604 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
605 		loff_t lstart, loff_t lend);
606 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
607 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
608 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
609 
610 extern const struct address_space_operations def_blk_aops;
611 
612 int disk_register_independent_access_ranges(struct gendisk *disk);
613 void disk_unregister_independent_access_ranges(struct gendisk *disk);
614 
615 #ifdef CONFIG_FAIL_MAKE_REQUEST
616 bool should_fail_request(struct block_device *part, unsigned int bytes);
617 #else /* CONFIG_FAIL_MAKE_REQUEST */
618 static inline bool should_fail_request(struct block_device *part,
619 					unsigned int bytes)
620 {
621 	return false;
622 }
623 #endif /* CONFIG_FAIL_MAKE_REQUEST */
624 
625 /*
626  * Optimized request reference counting. Ideally we'd make timeouts be more
627  * clever, as that's the only reason we need references at all... But until
628  * this happens, this is faster than using refcount_t. Also see:
629  *
630  * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
631  */
632 #define req_ref_zero_or_close_to_overflow(req)	\
633 	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
634 
635 static inline bool req_ref_inc_not_zero(struct request *req)
636 {
637 	return atomic_inc_not_zero(&req->ref);
638 }
639 
640 static inline bool req_ref_put_and_test(struct request *req)
641 {
642 	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
643 	return atomic_dec_and_test(&req->ref);
644 }
645 
646 static inline void req_ref_set(struct request *req, int value)
647 {
648 	atomic_set(&req->ref, value);
649 }
650 
651 static inline int req_ref_read(struct request *req)
652 {
653 	return atomic_read(&req->ref);
654 }
655 
656 static inline u64 blk_time_get_ns(void)
657 {
658 	struct blk_plug *plug = current->plug;
659 
660 	if (!plug || !in_task())
661 		return ktime_get_ns();
662 
663 	/*
664 	 * 0 could very well be a valid time, but rather than flag "this is
665 	 * a valid timestamp" separately, just accept that we'll do an extra
666 	 * ktime_get_ns() if we just happen to get 0 as the current time.
667 	 */
668 	if (!plug->cur_ktime) {
669 		plug->cur_ktime = ktime_get_ns();
670 		current->flags |= PF_BLOCK_TS;
671 	}
672 	return plug->cur_ktime;
673 }
674 
675 static inline ktime_t blk_time_get(void)
676 {
677 	return ns_to_ktime(blk_time_get_ns());
678 }
679 
680 /*
681  * From most significant bit:
682  * 1 bit: reserved for other usage, see below
683  * 12 bits: original size of bio
684  * 51 bits: issue time of bio
685  */
686 #define BIO_ISSUE_RES_BITS      1
687 #define BIO_ISSUE_SIZE_BITS     12
688 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
689 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
690 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
691 #define BIO_ISSUE_SIZE_MASK     \
692 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
693 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
694 
695 /* Reserved bit for blk-throtl */
696 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
697 
698 static inline u64 __bio_issue_time(u64 time)
699 {
700 	return time & BIO_ISSUE_TIME_MASK;
701 }
702 
703 static inline u64 bio_issue_time(struct bio_issue *issue)
704 {
705 	return __bio_issue_time(issue->value);
706 }
707 
708 static inline sector_t bio_issue_size(struct bio_issue *issue)
709 {
710 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
711 }
712 
713 static inline void bio_issue_init(struct bio_issue *issue,
714 				       sector_t size)
715 {
716 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
717 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
718 			(blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
719 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
720 }
721 
722 void bdev_release(struct file *bdev_file);
723 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
724 	      const struct blk_holder_ops *hops, struct file *bdev_file);
725 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
726 
727 void blk_integrity_generate(struct bio *bio);
728 void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
729 void blk_integrity_prepare(struct request *rq);
730 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
731 
732 #ifdef CONFIG_LOCKDEP
733 static inline void blk_freeze_acquire_lock(struct request_queue *q)
734 {
735 	if (!q->mq_freeze_disk_dead)
736 		rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
737 	if (!q->mq_freeze_queue_dying)
738 		rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
739 }
740 
741 static inline void blk_unfreeze_release_lock(struct request_queue *q)
742 {
743 	if (!q->mq_freeze_queue_dying)
744 		rwsem_release(&q->q_lockdep_map, _RET_IP_);
745 	if (!q->mq_freeze_disk_dead)
746 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
747 }
748 #else
749 static inline void blk_freeze_acquire_lock(struct request_queue *q)
750 {
751 }
752 static inline void blk_unfreeze_release_lock(struct request_queue *q)
753 {
754 }
755 #endif
756 
757 #endif /* BLK_INTERNAL_H */
758