xref: /linux/block/blk.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4 
5 #include <linux/bio-integrity.h>
6 #include <linux/blk-crypto.h>
7 #include <linux/lockdep.h>
8 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
9 #include <linux/sched/sysctl.h>
10 #include <linux/timekeeping.h>
11 #include <xen/xen.h>
12 #include "blk-crypto-internal.h"
13 
14 struct elevator_type;
15 
16 /* Max future timer expiry for timeouts */
17 #define BLK_MAX_TIMEOUT		(5 * HZ)
18 
19 extern struct dentry *blk_debugfs_root;
20 
21 struct blk_flush_queue {
22 	spinlock_t		mq_flush_lock;
23 	unsigned int		flush_pending_idx:1;
24 	unsigned int		flush_running_idx:1;
25 	blk_status_t 		rq_status;
26 	unsigned long		flush_pending_since;
27 	struct list_head	flush_queue[2];
28 	unsigned long		flush_data_in_flight;
29 	struct request		*flush_rq;
30 };
31 
32 bool is_flush_rq(struct request *req);
33 
34 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
35 					      gfp_t flags);
36 void blk_free_flush_queue(struct blk_flush_queue *q);
37 
38 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
39 bool blk_queue_start_drain(struct request_queue *q);
40 bool __blk_freeze_queue_start(struct request_queue *q,
41 			      struct task_struct *owner);
42 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
43 void submit_bio_noacct_nocheck(struct bio *bio);
44 void bio_await_chain(struct bio *bio);
45 
46 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
47 {
48 	rcu_read_lock();
49 	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
50 		goto fail;
51 
52 	/*
53 	 * The code that increments the pm_only counter must ensure that the
54 	 * counter is globally visible before the queue is unfrozen.
55 	 */
56 	if (blk_queue_pm_only(q) &&
57 	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
58 		goto fail_put;
59 
60 	rcu_read_unlock();
61 	return true;
62 
63 fail_put:
64 	blk_queue_exit(q);
65 fail:
66 	rcu_read_unlock();
67 	return false;
68 }
69 
70 static inline int bio_queue_enter(struct bio *bio)
71 {
72 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
73 
74 	if (blk_try_enter_queue(q, false)) {
75 		rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
76 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
77 		return 0;
78 	}
79 	return __bio_queue_enter(q, bio);
80 }
81 
82 static inline void blk_wait_io(struct completion *done)
83 {
84 	/* Prevent hang_check timer from firing at us during very long I/O */
85 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
86 
87 	if (timeout)
88 		while (!wait_for_completion_io_timeout(done, timeout))
89 			;
90 	else
91 		wait_for_completion_io(done);
92 }
93 
94 #define BIO_INLINE_VECS 4
95 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
96 		gfp_t gfp_mask);
97 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
98 
99 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
100 		struct page *page, unsigned len, unsigned offset,
101 		bool *same_page);
102 
103 static inline bool biovec_phys_mergeable(struct request_queue *q,
104 		struct bio_vec *vec1, struct bio_vec *vec2)
105 {
106 	unsigned long mask = queue_segment_boundary(q);
107 	phys_addr_t addr1 = bvec_phys(vec1);
108 	phys_addr_t addr2 = bvec_phys(vec2);
109 
110 	/*
111 	 * Merging adjacent physical pages may not work correctly under KMSAN
112 	 * if their metadata pages aren't adjacent. Just disable merging.
113 	 */
114 	if (IS_ENABLED(CONFIG_KMSAN))
115 		return false;
116 
117 	if (addr1 + vec1->bv_len != addr2)
118 		return false;
119 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
120 		return false;
121 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
122 		return false;
123 	return true;
124 }
125 
126 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
127 		struct bio_vec *bprv, unsigned int offset)
128 {
129 	return (offset & lim->virt_boundary_mask) ||
130 		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
131 }
132 
133 /*
134  * Check if adding a bio_vec after bprv with offset would create a gap in
135  * the SG list. Most drivers don't care about this, but some do.
136  */
137 static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
138 		struct bio_vec *bprv, unsigned int offset)
139 {
140 	if (!lim->virt_boundary_mask)
141 		return false;
142 	return __bvec_gap_to_prev(lim, bprv, offset);
143 }
144 
145 static inline bool rq_mergeable(struct request *rq)
146 {
147 	if (blk_rq_is_passthrough(rq))
148 		return false;
149 
150 	if (req_op(rq) == REQ_OP_FLUSH)
151 		return false;
152 
153 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
154 		return false;
155 
156 	if (req_op(rq) == REQ_OP_ZONE_APPEND)
157 		return false;
158 
159 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
160 		return false;
161 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
162 		return false;
163 
164 	return true;
165 }
166 
167 /*
168  * There are two different ways to handle DISCARD merges:
169  *  1) If max_discard_segments > 1, the driver treats every bio as a range and
170  *     send the bios to controller together. The ranges don't need to be
171  *     contiguous.
172  *  2) Otherwise, the request will be normal read/write requests.  The ranges
173  *     need to be contiguous.
174  */
175 static inline bool blk_discard_mergable(struct request *req)
176 {
177 	if (req_op(req) == REQ_OP_DISCARD &&
178 	    queue_max_discard_segments(req->q) > 1)
179 		return true;
180 	return false;
181 }
182 
183 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
184 {
185 	if (req_op(rq) == REQ_OP_DISCARD)
186 		return queue_max_discard_segments(rq->q);
187 	return queue_max_segments(rq->q);
188 }
189 
190 static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
191 {
192 	struct request_queue *q = rq->q;
193 	enum req_op op = req_op(rq);
194 
195 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
196 		return min(q->limits.max_discard_sectors,
197 			   UINT_MAX >> SECTOR_SHIFT);
198 
199 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
200 		return q->limits.max_write_zeroes_sectors;
201 
202 	if (rq->cmd_flags & REQ_ATOMIC)
203 		return q->limits.atomic_write_max_sectors;
204 
205 	return q->limits.max_sectors;
206 }
207 
208 #ifdef CONFIG_BLK_DEV_INTEGRITY
209 void blk_flush_integrity(void);
210 void bio_integrity_free(struct bio *bio);
211 
212 /*
213  * Integrity payloads can either be owned by the submitter, in which case
214  * bio_uninit will free them, or owned and generated by the block layer,
215  * in which case we'll verify them here (for reads) and free them before
216  * the bio is handed back to the submitted.
217  */
218 bool __bio_integrity_endio(struct bio *bio);
219 static inline bool bio_integrity_endio(struct bio *bio)
220 {
221 	struct bio_integrity_payload *bip = bio_integrity(bio);
222 
223 	if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
224 		return __bio_integrity_endio(bio);
225 	return true;
226 }
227 
228 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
229 		struct request *);
230 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
231 		struct bio *);
232 
233 static inline bool integrity_req_gap_back_merge(struct request *req,
234 		struct bio *next)
235 {
236 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
237 	struct bio_integrity_payload *bip_next = bio_integrity(next);
238 
239 	return bvec_gap_to_prev(&req->q->limits,
240 				&bip->bip_vec[bip->bip_vcnt - 1],
241 				bip_next->bip_vec[0].bv_offset);
242 }
243 
244 static inline bool integrity_req_gap_front_merge(struct request *req,
245 		struct bio *bio)
246 {
247 	struct bio_integrity_payload *bip = bio_integrity(bio);
248 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
249 
250 	return bvec_gap_to_prev(&req->q->limits,
251 				&bip->bip_vec[bip->bip_vcnt - 1],
252 				bip_next->bip_vec[0].bv_offset);
253 }
254 
255 extern const struct attribute_group blk_integrity_attr_group;
256 #else /* CONFIG_BLK_DEV_INTEGRITY */
257 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
258 		struct request *r1, struct request *r2)
259 {
260 	return true;
261 }
262 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
263 		struct request *r, struct bio *b)
264 {
265 	return true;
266 }
267 static inline bool integrity_req_gap_back_merge(struct request *req,
268 		struct bio *next)
269 {
270 	return false;
271 }
272 static inline bool integrity_req_gap_front_merge(struct request *req,
273 		struct bio *bio)
274 {
275 	return false;
276 }
277 
278 static inline void blk_flush_integrity(void)
279 {
280 }
281 static inline bool bio_integrity_endio(struct bio *bio)
282 {
283 	return true;
284 }
285 static inline void bio_integrity_free(struct bio *bio)
286 {
287 }
288 #endif /* CONFIG_BLK_DEV_INTEGRITY */
289 
290 unsigned long blk_rq_timeout(unsigned long timeout);
291 void blk_add_timer(struct request *req);
292 
293 enum bio_merge_status {
294 	BIO_MERGE_OK,
295 	BIO_MERGE_NONE,
296 	BIO_MERGE_FAILED,
297 };
298 
299 enum bio_merge_status bio_attempt_back_merge(struct request *req,
300 		struct bio *bio, unsigned int nr_segs);
301 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
302 		unsigned int nr_segs);
303 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
304 			struct bio *bio, unsigned int nr_segs);
305 
306 /*
307  * Plug flush limits
308  */
309 #define BLK_MAX_REQUEST_COUNT	32
310 #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
311 
312 /*
313  * Internal elevator interface
314  */
315 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
316 
317 bool blk_insert_flush(struct request *rq);
318 
319 int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
320 void elevator_disable(struct request_queue *q);
321 void elevator_exit(struct request_queue *q);
322 int elv_register_queue(struct request_queue *q, bool uevent);
323 void elv_unregister_queue(struct request_queue *q);
324 
325 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
326 		char *buf);
327 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
328 		char *buf);
329 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
330 		char *buf);
331 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
332 		char *buf);
333 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
334 		const char *buf, size_t count);
335 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
336 ssize_t part_timeout_store(struct device *, struct device_attribute *,
337 				const char *, size_t);
338 
339 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
340 		unsigned *nsegs);
341 struct bio *bio_split_write_zeroes(struct bio *bio,
342 		const struct queue_limits *lim, unsigned *nsegs);
343 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
344 		unsigned *nr_segs);
345 struct bio *bio_split_zone_append(struct bio *bio,
346 		const struct queue_limits *lim, unsigned *nr_segs);
347 
348 /*
349  * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
350  *
351  * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
352  * always valid if a bio has data.  The check might lead to occasional false
353  * positives when bios are cloned, but compared to the performance impact of
354  * cloned bios themselves the loop below doesn't matter anyway.
355  */
356 static inline bool bio_may_need_split(struct bio *bio,
357 		const struct queue_limits *lim)
358 {
359 	return lim->chunk_sectors || bio->bi_vcnt != 1 ||
360 		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
361 }
362 
363 /**
364  * __bio_split_to_limits - split a bio to fit the queue limits
365  * @bio:     bio to be split
366  * @lim:     queue limits to split based on
367  * @nr_segs: returns the number of segments in the returned bio
368  *
369  * Check if @bio needs splitting based on the queue limits, and if so split off
370  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
371  * shortened to the remainder and re-submitted.
372  *
373  * The split bio is allocated from @q->bio_split, which is provided by the
374  * block layer.
375  */
376 static inline struct bio *__bio_split_to_limits(struct bio *bio,
377 		const struct queue_limits *lim, unsigned int *nr_segs)
378 {
379 	switch (bio_op(bio)) {
380 	case REQ_OP_READ:
381 	case REQ_OP_WRITE:
382 		if (bio_may_need_split(bio, lim))
383 			return bio_split_rw(bio, lim, nr_segs);
384 		*nr_segs = 1;
385 		return bio;
386 	case REQ_OP_ZONE_APPEND:
387 		return bio_split_zone_append(bio, lim, nr_segs);
388 	case REQ_OP_DISCARD:
389 	case REQ_OP_SECURE_ERASE:
390 		return bio_split_discard(bio, lim, nr_segs);
391 	case REQ_OP_WRITE_ZEROES:
392 		return bio_split_write_zeroes(bio, lim, nr_segs);
393 	default:
394 		/* other operations can't be split */
395 		*nr_segs = 0;
396 		return bio;
397 	}
398 }
399 
400 int ll_back_merge_fn(struct request *req, struct bio *bio,
401 		unsigned int nr_segs);
402 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
403 				struct request *next);
404 unsigned int blk_recalc_rq_segments(struct request *rq);
405 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
406 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
407 
408 int blk_set_default_limits(struct queue_limits *lim);
409 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
410 		struct queue_limits *lim);
411 int blk_dev_init(void);
412 
413 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
414 unsigned int part_in_flight(struct block_device *part);
415 
416 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
417 {
418 	req->cmd_flags |= REQ_NOMERGE;
419 	if (req == q->last_merge)
420 		q->last_merge = NULL;
421 }
422 
423 /*
424  * Internal io_context interface
425  */
426 struct io_cq *ioc_find_get_icq(struct request_queue *q);
427 struct io_cq *ioc_lookup_icq(struct request_queue *q);
428 #ifdef CONFIG_BLK_ICQ
429 void ioc_clear_queue(struct request_queue *q);
430 #else
431 static inline void ioc_clear_queue(struct request_queue *q)
432 {
433 }
434 #endif /* CONFIG_BLK_ICQ */
435 
436 struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
437 
438 static inline bool blk_queue_may_bounce(struct request_queue *q)
439 {
440 	return IS_ENABLED(CONFIG_BOUNCE) &&
441 		(q->limits.features & BLK_FEAT_BOUNCE_HIGH) &&
442 		max_low_pfn >= max_pfn;
443 }
444 
445 static inline struct bio *blk_queue_bounce(struct bio *bio,
446 		struct request_queue *q)
447 {
448 	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
449 		return __blk_queue_bounce(bio, q);
450 	return bio;
451 }
452 
453 #ifdef CONFIG_BLK_DEV_ZONED
454 void disk_init_zone_resources(struct gendisk *disk);
455 void disk_free_zone_resources(struct gendisk *disk);
456 static inline bool bio_zone_write_plugging(struct bio *bio)
457 {
458 	return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
459 }
460 void blk_zone_write_plug_bio_merged(struct bio *bio);
461 void blk_zone_write_plug_init_request(struct request *rq);
462 static inline void blk_zone_update_request_bio(struct request *rq,
463 					       struct bio *bio)
464 {
465 	/*
466 	 * For zone append requests, the request sector indicates the location
467 	 * at which the BIO data was written. Return this value to the BIO
468 	 * issuer through the BIO iter sector.
469 	 * For plugged zone writes, which include emulated zone append, we need
470 	 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
471 	 * lookup the zone write plug.
472 	 */
473 	if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
474 		bio->bi_iter.bi_sector = rq->__sector;
475 }
476 void blk_zone_write_plug_bio_endio(struct bio *bio);
477 static inline void blk_zone_bio_endio(struct bio *bio)
478 {
479 	/*
480 	 * For write BIOs to zoned devices, signal the completion of the BIO so
481 	 * that the next write BIO can be submitted by zone write plugging.
482 	 */
483 	if (bio_zone_write_plugging(bio))
484 		blk_zone_write_plug_bio_endio(bio);
485 }
486 
487 void blk_zone_write_plug_finish_request(struct request *rq);
488 static inline void blk_zone_finish_request(struct request *rq)
489 {
490 	if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
491 		blk_zone_write_plug_finish_request(rq);
492 }
493 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
494 		unsigned long arg);
495 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
496 		unsigned int cmd, unsigned long arg);
497 #else /* CONFIG_BLK_DEV_ZONED */
498 static inline void disk_init_zone_resources(struct gendisk *disk)
499 {
500 }
501 static inline void disk_free_zone_resources(struct gendisk *disk)
502 {
503 }
504 static inline bool bio_zone_write_plugging(struct bio *bio)
505 {
506 	return false;
507 }
508 static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
509 {
510 }
511 static inline void blk_zone_write_plug_init_request(struct request *rq)
512 {
513 }
514 static inline void blk_zone_update_request_bio(struct request *rq,
515 					       struct bio *bio)
516 {
517 }
518 static inline void blk_zone_bio_endio(struct bio *bio)
519 {
520 }
521 static inline void blk_zone_finish_request(struct request *rq)
522 {
523 }
524 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
525 		unsigned int cmd, unsigned long arg)
526 {
527 	return -ENOTTY;
528 }
529 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
530 		blk_mode_t mode, unsigned int cmd, unsigned long arg)
531 {
532 	return -ENOTTY;
533 }
534 #endif /* CONFIG_BLK_DEV_ZONED */
535 
536 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
537 void bdev_add(struct block_device *bdev, dev_t dev);
538 void bdev_unhash(struct block_device *bdev);
539 void bdev_drop(struct block_device *bdev);
540 
541 int blk_alloc_ext_minor(void);
542 void blk_free_ext_minor(unsigned int minor);
543 #define ADDPART_FLAG_NONE	0
544 #define ADDPART_FLAG_RAID	1
545 #define ADDPART_FLAG_WHOLEDISK	2
546 #define ADDPART_FLAG_READONLY	4
547 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
548 		sector_t length);
549 int bdev_del_partition(struct gendisk *disk, int partno);
550 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
551 		sector_t length);
552 void drop_partition(struct block_device *part);
553 
554 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
555 
556 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
557 		struct lock_class_key *lkclass);
558 
559 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
560 		struct page *page, unsigned int len, unsigned int offset,
561 		unsigned int max_sectors, bool *same_page);
562 
563 int bio_add_hw_folio(struct request_queue *q, struct bio *bio,
564 		struct folio *folio, size_t len, size_t offset,
565 		unsigned int max_sectors, bool *same_page);
566 
567 /*
568  * Clean up a page appropriately, where the page may be pinned, may have a
569  * ref taken on it or neither.
570  */
571 static inline void bio_release_page(struct bio *bio, struct page *page)
572 {
573 	if (bio_flagged(bio, BIO_PAGE_PINNED))
574 		unpin_user_page(page);
575 }
576 
577 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
578 
579 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
580 
581 int disk_alloc_events(struct gendisk *disk);
582 void disk_add_events(struct gendisk *disk);
583 void disk_del_events(struct gendisk *disk);
584 void disk_release_events(struct gendisk *disk);
585 void disk_block_events(struct gendisk *disk);
586 void disk_unblock_events(struct gendisk *disk);
587 void disk_flush_events(struct gendisk *disk, unsigned int mask);
588 extern struct device_attribute dev_attr_events;
589 extern struct device_attribute dev_attr_events_async;
590 extern struct device_attribute dev_attr_events_poll_msecs;
591 
592 extern struct attribute_group blk_trace_attr_group;
593 
594 blk_mode_t file_to_blk_mode(struct file *file);
595 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
596 		loff_t lstart, loff_t lend);
597 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
598 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
599 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
600 
601 extern const struct address_space_operations def_blk_aops;
602 
603 int disk_register_independent_access_ranges(struct gendisk *disk);
604 void disk_unregister_independent_access_ranges(struct gendisk *disk);
605 
606 #ifdef CONFIG_FAIL_MAKE_REQUEST
607 bool should_fail_request(struct block_device *part, unsigned int bytes);
608 #else /* CONFIG_FAIL_MAKE_REQUEST */
609 static inline bool should_fail_request(struct block_device *part,
610 					unsigned int bytes)
611 {
612 	return false;
613 }
614 #endif /* CONFIG_FAIL_MAKE_REQUEST */
615 
616 /*
617  * Optimized request reference counting. Ideally we'd make timeouts be more
618  * clever, as that's the only reason we need references at all... But until
619  * this happens, this is faster than using refcount_t. Also see:
620  *
621  * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
622  */
623 #define req_ref_zero_or_close_to_overflow(req)	\
624 	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
625 
626 static inline bool req_ref_inc_not_zero(struct request *req)
627 {
628 	return atomic_inc_not_zero(&req->ref);
629 }
630 
631 static inline bool req_ref_put_and_test(struct request *req)
632 {
633 	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
634 	return atomic_dec_and_test(&req->ref);
635 }
636 
637 static inline void req_ref_set(struct request *req, int value)
638 {
639 	atomic_set(&req->ref, value);
640 }
641 
642 static inline int req_ref_read(struct request *req)
643 {
644 	return atomic_read(&req->ref);
645 }
646 
647 static inline u64 blk_time_get_ns(void)
648 {
649 	struct blk_plug *plug = current->plug;
650 
651 	if (!plug || !in_task())
652 		return ktime_get_ns();
653 
654 	/*
655 	 * 0 could very well be a valid time, but rather than flag "this is
656 	 * a valid timestamp" separately, just accept that we'll do an extra
657 	 * ktime_get_ns() if we just happen to get 0 as the current time.
658 	 */
659 	if (!plug->cur_ktime) {
660 		plug->cur_ktime = ktime_get_ns();
661 		current->flags |= PF_BLOCK_TS;
662 	}
663 	return plug->cur_ktime;
664 }
665 
666 static inline ktime_t blk_time_get(void)
667 {
668 	return ns_to_ktime(blk_time_get_ns());
669 }
670 
671 /*
672  * From most significant bit:
673  * 1 bit: reserved for other usage, see below
674  * 12 bits: original size of bio
675  * 51 bits: issue time of bio
676  */
677 #define BIO_ISSUE_RES_BITS      1
678 #define BIO_ISSUE_SIZE_BITS     12
679 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
680 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
681 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
682 #define BIO_ISSUE_SIZE_MASK     \
683 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
684 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
685 
686 /* Reserved bit for blk-throtl */
687 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
688 
689 static inline u64 __bio_issue_time(u64 time)
690 {
691 	return time & BIO_ISSUE_TIME_MASK;
692 }
693 
694 static inline u64 bio_issue_time(struct bio_issue *issue)
695 {
696 	return __bio_issue_time(issue->value);
697 }
698 
699 static inline sector_t bio_issue_size(struct bio_issue *issue)
700 {
701 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
702 }
703 
704 static inline void bio_issue_init(struct bio_issue *issue,
705 				       sector_t size)
706 {
707 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
708 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
709 			(blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
710 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
711 }
712 
713 void bdev_release(struct file *bdev_file);
714 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
715 	      const struct blk_holder_ops *hops, struct file *bdev_file);
716 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
717 
718 void blk_integrity_generate(struct bio *bio);
719 void blk_integrity_verify(struct bio *bio);
720 void blk_integrity_prepare(struct request *rq);
721 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
722 
723 static inline void blk_freeze_acquire_lock(struct request_queue *q, bool
724 		disk_dead, bool queue_dying)
725 {
726 	if (!disk_dead)
727 		rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
728 	if (!queue_dying)
729 		rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
730 }
731 
732 static inline void blk_unfreeze_release_lock(struct request_queue *q, bool
733 		disk_dead, bool queue_dying)
734 {
735 	if (!queue_dying)
736 		rwsem_release(&q->q_lockdep_map, _RET_IP_);
737 	if (!disk_dead)
738 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
739 }
740 
741 #endif /* BLK_INTERNAL_H */
742