xref: /linux/block/blk.h (revision 547c5775a742d9c83891b629b75d1d4c8e88d8c0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4 
5 #include <linux/bio-integrity.h>
6 #include <linux/blk-crypto.h>
7 #include <linux/lockdep.h>
8 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
9 #include <linux/sched/sysctl.h>
10 #include <linux/timekeeping.h>
11 #include <xen/xen.h>
12 #include "blk-crypto-internal.h"
13 
14 struct elevator_type;
15 
16 #define	BLK_DEV_MAX_SECTORS	(LLONG_MAX >> 9)
17 #define	BLK_MIN_SEGMENT_SIZE	4096
18 
19 /* Max future timer expiry for timeouts */
20 #define BLK_MAX_TIMEOUT		(5 * HZ)
21 
22 extern struct dentry *blk_debugfs_root;
23 
24 struct blk_flush_queue {
25 	spinlock_t		mq_flush_lock;
26 	unsigned int		flush_pending_idx:1;
27 	unsigned int		flush_running_idx:1;
28 	blk_status_t 		rq_status;
29 	unsigned long		flush_pending_since;
30 	struct list_head	flush_queue[2];
31 	unsigned long		flush_data_in_flight;
32 	struct request		*flush_rq;
33 };
34 
35 bool is_flush_rq(struct request *req);
36 
37 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
38 					      gfp_t flags);
39 void blk_free_flush_queue(struct blk_flush_queue *q);
40 
41 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
42 bool blk_queue_start_drain(struct request_queue *q);
43 bool __blk_freeze_queue_start(struct request_queue *q,
44 			      struct task_struct *owner);
45 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
46 void submit_bio_noacct_nocheck(struct bio *bio);
47 void bio_await_chain(struct bio *bio);
48 
blk_try_enter_queue(struct request_queue * q,bool pm)49 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
50 {
51 	rcu_read_lock();
52 	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
53 		goto fail;
54 
55 	/*
56 	 * The code that increments the pm_only counter must ensure that the
57 	 * counter is globally visible before the queue is unfrozen.
58 	 */
59 	if (blk_queue_pm_only(q) &&
60 	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
61 		goto fail_put;
62 
63 	rcu_read_unlock();
64 	return true;
65 
66 fail_put:
67 	blk_queue_exit(q);
68 fail:
69 	rcu_read_unlock();
70 	return false;
71 }
72 
bio_queue_enter(struct bio * bio)73 static inline int bio_queue_enter(struct bio *bio)
74 {
75 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
76 
77 	if (blk_try_enter_queue(q, false)) {
78 		rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
79 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
80 		return 0;
81 	}
82 	return __bio_queue_enter(q, bio);
83 }
84 
blk_wait_io(struct completion * done)85 static inline void blk_wait_io(struct completion *done)
86 {
87 	/* Prevent hang_check timer from firing at us during very long I/O */
88 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
89 
90 	if (timeout)
91 		while (!wait_for_completion_io_timeout(done, timeout))
92 			;
93 	else
94 		wait_for_completion_io(done);
95 }
96 
97 struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
98 void blkdev_put_no_open(struct block_device *bdev);
99 
100 #define BIO_INLINE_VECS 4
101 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
102 		gfp_t gfp_mask);
103 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
104 
105 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
106 		struct page *page, unsigned len, unsigned offset);
107 
biovec_phys_mergeable(struct request_queue * q,struct bio_vec * vec1,struct bio_vec * vec2)108 static inline bool biovec_phys_mergeable(struct request_queue *q,
109 		struct bio_vec *vec1, struct bio_vec *vec2)
110 {
111 	unsigned long mask = queue_segment_boundary(q);
112 	phys_addr_t addr1 = bvec_phys(vec1);
113 	phys_addr_t addr2 = bvec_phys(vec2);
114 
115 	/*
116 	 * Merging adjacent physical pages may not work correctly under KMSAN
117 	 * if their metadata pages aren't adjacent. Just disable merging.
118 	 */
119 	if (IS_ENABLED(CONFIG_KMSAN))
120 		return false;
121 
122 	if (addr1 + vec1->bv_len != addr2)
123 		return false;
124 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
125 		return false;
126 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
127 		return false;
128 	return true;
129 }
130 
__bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)131 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
132 		struct bio_vec *bprv, unsigned int offset)
133 {
134 	return (offset & lim->virt_boundary_mask) ||
135 		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
136 }
137 
138 /*
139  * Check if adding a bio_vec after bprv with offset would create a gap in
140  * the SG list. Most drivers don't care about this, but some do.
141  */
bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)142 static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
143 		struct bio_vec *bprv, unsigned int offset)
144 {
145 	if (!lim->virt_boundary_mask)
146 		return false;
147 	return __bvec_gap_to_prev(lim, bprv, offset);
148 }
149 
rq_mergeable(struct request * rq)150 static inline bool rq_mergeable(struct request *rq)
151 {
152 	if (blk_rq_is_passthrough(rq))
153 		return false;
154 
155 	if (req_op(rq) == REQ_OP_FLUSH)
156 		return false;
157 
158 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
159 		return false;
160 
161 	if (req_op(rq) == REQ_OP_ZONE_APPEND)
162 		return false;
163 
164 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
165 		return false;
166 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
167 		return false;
168 
169 	return true;
170 }
171 
172 /*
173  * There are two different ways to handle DISCARD merges:
174  *  1) If max_discard_segments > 1, the driver treats every bio as a range and
175  *     send the bios to controller together. The ranges don't need to be
176  *     contiguous.
177  *  2) Otherwise, the request will be normal read/write requests.  The ranges
178  *     need to be contiguous.
179  */
blk_discard_mergable(struct request * req)180 static inline bool blk_discard_mergable(struct request *req)
181 {
182 	if (req_op(req) == REQ_OP_DISCARD &&
183 	    queue_max_discard_segments(req->q) > 1)
184 		return true;
185 	return false;
186 }
187 
blk_rq_get_max_segments(struct request * rq)188 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
189 {
190 	if (req_op(rq) == REQ_OP_DISCARD)
191 		return queue_max_discard_segments(rq->q);
192 	return queue_max_segments(rq->q);
193 }
194 
blk_queue_get_max_sectors(struct request * rq)195 static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
196 {
197 	struct request_queue *q = rq->q;
198 	enum req_op op = req_op(rq);
199 
200 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
201 		return min(q->limits.max_discard_sectors,
202 			   UINT_MAX >> SECTOR_SHIFT);
203 
204 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
205 		return q->limits.max_write_zeroes_sectors;
206 
207 	if (rq->cmd_flags & REQ_ATOMIC)
208 		return q->limits.atomic_write_max_sectors;
209 
210 	return q->limits.max_sectors;
211 }
212 
213 #ifdef CONFIG_BLK_DEV_INTEGRITY
214 void blk_flush_integrity(void);
215 void bio_integrity_free(struct bio *bio);
216 
217 /*
218  * Integrity payloads can either be owned by the submitter, in which case
219  * bio_uninit will free them, or owned and generated by the block layer,
220  * in which case we'll verify them here (for reads) and free them before
221  * the bio is handed back to the submitted.
222  */
223 bool __bio_integrity_endio(struct bio *bio);
bio_integrity_endio(struct bio * bio)224 static inline bool bio_integrity_endio(struct bio *bio)
225 {
226 	struct bio_integrity_payload *bip = bio_integrity(bio);
227 
228 	if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
229 		return __bio_integrity_endio(bio);
230 	return true;
231 }
232 
233 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
234 		struct request *);
235 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
236 		struct bio *);
237 
integrity_req_gap_back_merge(struct request * req,struct bio * next)238 static inline bool integrity_req_gap_back_merge(struct request *req,
239 		struct bio *next)
240 {
241 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
242 	struct bio_integrity_payload *bip_next = bio_integrity(next);
243 
244 	return bvec_gap_to_prev(&req->q->limits,
245 				&bip->bip_vec[bip->bip_vcnt - 1],
246 				bip_next->bip_vec[0].bv_offset);
247 }
248 
integrity_req_gap_front_merge(struct request * req,struct bio * bio)249 static inline bool integrity_req_gap_front_merge(struct request *req,
250 		struct bio *bio)
251 {
252 	struct bio_integrity_payload *bip = bio_integrity(bio);
253 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
254 
255 	return bvec_gap_to_prev(&req->q->limits,
256 				&bip->bip_vec[bip->bip_vcnt - 1],
257 				bip_next->bip_vec[0].bv_offset);
258 }
259 
260 extern const struct attribute_group blk_integrity_attr_group;
261 #else /* CONFIG_BLK_DEV_INTEGRITY */
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)262 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
263 		struct request *r1, struct request *r2)
264 {
265 	return true;
266 }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)267 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
268 		struct request *r, struct bio *b)
269 {
270 	return true;
271 }
integrity_req_gap_back_merge(struct request * req,struct bio * next)272 static inline bool integrity_req_gap_back_merge(struct request *req,
273 		struct bio *next)
274 {
275 	return false;
276 }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)277 static inline bool integrity_req_gap_front_merge(struct request *req,
278 		struct bio *bio)
279 {
280 	return false;
281 }
282 
blk_flush_integrity(void)283 static inline void blk_flush_integrity(void)
284 {
285 }
bio_integrity_endio(struct bio * bio)286 static inline bool bio_integrity_endio(struct bio *bio)
287 {
288 	return true;
289 }
bio_integrity_free(struct bio * bio)290 static inline void bio_integrity_free(struct bio *bio)
291 {
292 }
293 #endif /* CONFIG_BLK_DEV_INTEGRITY */
294 
295 unsigned long blk_rq_timeout(unsigned long timeout);
296 void blk_add_timer(struct request *req);
297 
298 enum bio_merge_status {
299 	BIO_MERGE_OK,
300 	BIO_MERGE_NONE,
301 	BIO_MERGE_FAILED,
302 };
303 
304 enum bio_merge_status bio_attempt_back_merge(struct request *req,
305 		struct bio *bio, unsigned int nr_segs);
306 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
307 		unsigned int nr_segs);
308 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
309 			struct bio *bio, unsigned int nr_segs);
310 
311 /*
312  * Plug flush limits
313  */
314 #define BLK_MAX_REQUEST_COUNT	32
315 #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
316 
317 /*
318  * Internal elevator interface
319  */
320 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
321 
322 bool blk_insert_flush(struct request *rq);
323 
324 void elv_update_nr_hw_queues(struct request_queue *q);
325 void elevator_set_default(struct request_queue *q);
326 void elevator_set_none(struct request_queue *q);
327 
328 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
329 		char *buf);
330 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
331 		char *buf);
332 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
333 		char *buf);
334 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
335 		char *buf);
336 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
337 		const char *buf, size_t count);
338 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
339 ssize_t part_timeout_store(struct device *, struct device_attribute *,
340 				const char *, size_t);
341 
342 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
343 		unsigned *nsegs);
344 struct bio *bio_split_write_zeroes(struct bio *bio,
345 		const struct queue_limits *lim, unsigned *nsegs);
346 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
347 		unsigned *nr_segs);
348 struct bio *bio_split_zone_append(struct bio *bio,
349 		const struct queue_limits *lim, unsigned *nr_segs);
350 
351 /*
352  * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
353  *
354  * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
355  * always valid if a bio has data.  The check might lead to occasional false
356  * positives when bios are cloned, but compared to the performance impact of
357  * cloned bios themselves the loop below doesn't matter anyway.
358  */
bio_may_need_split(struct bio * bio,const struct queue_limits * lim)359 static inline bool bio_may_need_split(struct bio *bio,
360 		const struct queue_limits *lim)
361 {
362 	if (lim->chunk_sectors)
363 		return true;
364 	if (bio->bi_vcnt != 1)
365 		return true;
366 	return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
367 		lim->min_segment_size;
368 }
369 
370 /**
371  * __bio_split_to_limits - split a bio to fit the queue limits
372  * @bio:     bio to be split
373  * @lim:     queue limits to split based on
374  * @nr_segs: returns the number of segments in the returned bio
375  *
376  * Check if @bio needs splitting based on the queue limits, and if so split off
377  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
378  * shortened to the remainder and re-submitted.
379  *
380  * The split bio is allocated from @q->bio_split, which is provided by the
381  * block layer.
382  */
__bio_split_to_limits(struct bio * bio,const struct queue_limits * lim,unsigned int * nr_segs)383 static inline struct bio *__bio_split_to_limits(struct bio *bio,
384 		const struct queue_limits *lim, unsigned int *nr_segs)
385 {
386 	switch (bio_op(bio)) {
387 	case REQ_OP_READ:
388 	case REQ_OP_WRITE:
389 		if (bio_may_need_split(bio, lim))
390 			return bio_split_rw(bio, lim, nr_segs);
391 		*nr_segs = 1;
392 		return bio;
393 	case REQ_OP_ZONE_APPEND:
394 		return bio_split_zone_append(bio, lim, nr_segs);
395 	case REQ_OP_DISCARD:
396 	case REQ_OP_SECURE_ERASE:
397 		return bio_split_discard(bio, lim, nr_segs);
398 	case REQ_OP_WRITE_ZEROES:
399 		return bio_split_write_zeroes(bio, lim, nr_segs);
400 	default:
401 		/* other operations can't be split */
402 		*nr_segs = 0;
403 		return bio;
404 	}
405 }
406 
407 /**
408  * get_max_segment_size() - maximum number of bytes to add as a single segment
409  * @lim: Request queue limits.
410  * @paddr: address of the range to add
411  * @len: maximum length available to add at @paddr
412  *
413  * Returns the maximum number of bytes of the range starting at @paddr that can
414  * be added to a single segment.
415  */
get_max_segment_size(const struct queue_limits * lim,phys_addr_t paddr,unsigned int len)416 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
417 		phys_addr_t paddr, unsigned int len)
418 {
419 	/*
420 	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
421 	 * after having calculated the minimum.
422 	 */
423 	return min_t(unsigned long, len,
424 		min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
425 		    (unsigned long)lim->max_segment_size - 1) + 1);
426 }
427 
428 int ll_back_merge_fn(struct request *req, struct bio *bio,
429 		unsigned int nr_segs);
430 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
431 				struct request *next);
432 unsigned int blk_recalc_rq_segments(struct request *rq);
433 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
434 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
435 
436 int blk_set_default_limits(struct queue_limits *lim);
437 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
438 		struct queue_limits *lim);
439 int blk_dev_init(void);
440 
441 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
442 
req_set_nomerge(struct request_queue * q,struct request * req)443 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
444 {
445 	req->cmd_flags |= REQ_NOMERGE;
446 	if (req == q->last_merge)
447 		q->last_merge = NULL;
448 }
449 
450 /*
451  * Internal io_context interface
452  */
453 struct io_cq *ioc_find_get_icq(struct request_queue *q);
454 struct io_cq *ioc_lookup_icq(struct request_queue *q);
455 #ifdef CONFIG_BLK_ICQ
456 void ioc_clear_queue(struct request_queue *q);
457 #else
ioc_clear_queue(struct request_queue * q)458 static inline void ioc_clear_queue(struct request_queue *q)
459 {
460 }
461 #endif /* CONFIG_BLK_ICQ */
462 
463 #ifdef CONFIG_BLK_DEV_ZONED
464 void disk_init_zone_resources(struct gendisk *disk);
465 void disk_free_zone_resources(struct gendisk *disk);
bio_zone_write_plugging(struct bio * bio)466 static inline bool bio_zone_write_plugging(struct bio *bio)
467 {
468 	return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
469 }
470 void blk_zone_write_plug_bio_merged(struct bio *bio);
471 void blk_zone_write_plug_init_request(struct request *rq);
blk_zone_update_request_bio(struct request * rq,struct bio * bio)472 static inline void blk_zone_update_request_bio(struct request *rq,
473 					       struct bio *bio)
474 {
475 	/*
476 	 * For zone append requests, the request sector indicates the location
477 	 * at which the BIO data was written. Return this value to the BIO
478 	 * issuer through the BIO iter sector.
479 	 * For plugged zone writes, which include emulated zone append, we need
480 	 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
481 	 * lookup the zone write plug.
482 	 */
483 	if (req_op(rq) == REQ_OP_ZONE_APPEND ||
484 	    bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
485 		bio->bi_iter.bi_sector = rq->__sector;
486 }
487 void blk_zone_write_plug_bio_endio(struct bio *bio);
blk_zone_bio_endio(struct bio * bio)488 static inline void blk_zone_bio_endio(struct bio *bio)
489 {
490 	/*
491 	 * For write BIOs to zoned devices, signal the completion of the BIO so
492 	 * that the next write BIO can be submitted by zone write plugging.
493 	 */
494 	if (bio_zone_write_plugging(bio))
495 		blk_zone_write_plug_bio_endio(bio);
496 }
497 
498 void blk_zone_write_plug_finish_request(struct request *rq);
blk_zone_finish_request(struct request * rq)499 static inline void blk_zone_finish_request(struct request *rq)
500 {
501 	if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
502 		blk_zone_write_plug_finish_request(rq);
503 }
504 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
505 		unsigned long arg);
506 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
507 		unsigned int cmd, unsigned long arg);
508 #else /* CONFIG_BLK_DEV_ZONED */
disk_init_zone_resources(struct gendisk * disk)509 static inline void disk_init_zone_resources(struct gendisk *disk)
510 {
511 }
disk_free_zone_resources(struct gendisk * disk)512 static inline void disk_free_zone_resources(struct gendisk *disk)
513 {
514 }
bio_zone_write_plugging(struct bio * bio)515 static inline bool bio_zone_write_plugging(struct bio *bio)
516 {
517 	return false;
518 }
blk_zone_write_plug_bio_merged(struct bio * bio)519 static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
520 {
521 }
blk_zone_write_plug_init_request(struct request * rq)522 static inline void blk_zone_write_plug_init_request(struct request *rq)
523 {
524 }
blk_zone_update_request_bio(struct request * rq,struct bio * bio)525 static inline void blk_zone_update_request_bio(struct request *rq,
526 					       struct bio *bio)
527 {
528 }
blk_zone_bio_endio(struct bio * bio)529 static inline void blk_zone_bio_endio(struct bio *bio)
530 {
531 }
blk_zone_finish_request(struct request * rq)532 static inline void blk_zone_finish_request(struct request *rq)
533 {
534 }
blkdev_report_zones_ioctl(struct block_device * bdev,unsigned int cmd,unsigned long arg)535 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
536 		unsigned int cmd, unsigned long arg)
537 {
538 	return -ENOTTY;
539 }
blkdev_zone_mgmt_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)540 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
541 		blk_mode_t mode, unsigned int cmd, unsigned long arg)
542 {
543 	return -ENOTTY;
544 }
545 #endif /* CONFIG_BLK_DEV_ZONED */
546 
547 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
548 void bdev_add(struct block_device *bdev, dev_t dev);
549 void bdev_unhash(struct block_device *bdev);
550 void bdev_drop(struct block_device *bdev);
551 
552 int blk_alloc_ext_minor(void);
553 void blk_free_ext_minor(unsigned int minor);
554 #define ADDPART_FLAG_NONE	0
555 #define ADDPART_FLAG_RAID	1
556 #define ADDPART_FLAG_WHOLEDISK	2
557 #define ADDPART_FLAG_READONLY	4
558 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
559 		sector_t length);
560 int bdev_del_partition(struct gendisk *disk, int partno);
561 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
562 		sector_t length);
563 void drop_partition(struct block_device *part);
564 
565 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
566 
567 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
568 		struct lock_class_key *lkclass);
569 
570 /*
571  * Clean up a page appropriately, where the page may be pinned, may have a
572  * ref taken on it or neither.
573  */
bio_release_page(struct bio * bio,struct page * page)574 static inline void bio_release_page(struct bio *bio, struct page *page)
575 {
576 	if (bio_flagged(bio, BIO_PAGE_PINNED))
577 		unpin_user_page(page);
578 }
579 
580 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
581 
582 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
583 
584 int disk_alloc_events(struct gendisk *disk);
585 void disk_add_events(struct gendisk *disk);
586 void disk_del_events(struct gendisk *disk);
587 void disk_release_events(struct gendisk *disk);
588 void disk_block_events(struct gendisk *disk);
589 void disk_unblock_events(struct gendisk *disk);
590 void disk_flush_events(struct gendisk *disk, unsigned int mask);
591 extern struct device_attribute dev_attr_events;
592 extern struct device_attribute dev_attr_events_async;
593 extern struct device_attribute dev_attr_events_poll_msecs;
594 
595 extern struct attribute_group blk_trace_attr_group;
596 
597 blk_mode_t file_to_blk_mode(struct file *file);
598 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
599 		loff_t lstart, loff_t lend);
600 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
601 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
602 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
603 
604 extern const struct address_space_operations def_blk_aops;
605 
606 int disk_register_independent_access_ranges(struct gendisk *disk);
607 void disk_unregister_independent_access_ranges(struct gendisk *disk);
608 
609 #ifdef CONFIG_FAIL_MAKE_REQUEST
610 bool should_fail_request(struct block_device *part, unsigned int bytes);
611 #else /* CONFIG_FAIL_MAKE_REQUEST */
should_fail_request(struct block_device * part,unsigned int bytes)612 static inline bool should_fail_request(struct block_device *part,
613 					unsigned int bytes)
614 {
615 	return false;
616 }
617 #endif /* CONFIG_FAIL_MAKE_REQUEST */
618 
619 /*
620  * Optimized request reference counting. Ideally we'd make timeouts be more
621  * clever, as that's the only reason we need references at all... But until
622  * this happens, this is faster than using refcount_t. Also see:
623  *
624  * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
625  */
626 #define req_ref_zero_or_close_to_overflow(req)	\
627 	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
628 
req_ref_inc_not_zero(struct request * req)629 static inline bool req_ref_inc_not_zero(struct request *req)
630 {
631 	return atomic_inc_not_zero(&req->ref);
632 }
633 
req_ref_put_and_test(struct request * req)634 static inline bool req_ref_put_and_test(struct request *req)
635 {
636 	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
637 	return atomic_dec_and_test(&req->ref);
638 }
639 
req_ref_set(struct request * req,int value)640 static inline void req_ref_set(struct request *req, int value)
641 {
642 	atomic_set(&req->ref, value);
643 }
644 
req_ref_read(struct request * req)645 static inline int req_ref_read(struct request *req)
646 {
647 	return atomic_read(&req->ref);
648 }
649 
blk_time_get_ns(void)650 static inline u64 blk_time_get_ns(void)
651 {
652 	struct blk_plug *plug = current->plug;
653 
654 	if (!plug || !in_task())
655 		return ktime_get_ns();
656 
657 	/*
658 	 * 0 could very well be a valid time, but rather than flag "this is
659 	 * a valid timestamp" separately, just accept that we'll do an extra
660 	 * ktime_get_ns() if we just happen to get 0 as the current time.
661 	 */
662 	if (!plug->cur_ktime) {
663 		plug->cur_ktime = ktime_get_ns();
664 		current->flags |= PF_BLOCK_TS;
665 	}
666 	return plug->cur_ktime;
667 }
668 
blk_time_get(void)669 static inline ktime_t blk_time_get(void)
670 {
671 	return ns_to_ktime(blk_time_get_ns());
672 }
673 
674 /*
675  * From most significant bit:
676  * 1 bit: reserved for other usage, see below
677  * 12 bits: original size of bio
678  * 51 bits: issue time of bio
679  */
680 #define BIO_ISSUE_RES_BITS      1
681 #define BIO_ISSUE_SIZE_BITS     12
682 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
683 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
684 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
685 #define BIO_ISSUE_SIZE_MASK     \
686 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
687 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
688 
689 /* Reserved bit for blk-throtl */
690 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
691 
__bio_issue_time(u64 time)692 static inline u64 __bio_issue_time(u64 time)
693 {
694 	return time & BIO_ISSUE_TIME_MASK;
695 }
696 
bio_issue_time(struct bio_issue * issue)697 static inline u64 bio_issue_time(struct bio_issue *issue)
698 {
699 	return __bio_issue_time(issue->value);
700 }
701 
bio_issue_size(struct bio_issue * issue)702 static inline sector_t bio_issue_size(struct bio_issue *issue)
703 {
704 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
705 }
706 
bio_issue_init(struct bio_issue * issue,sector_t size)707 static inline void bio_issue_init(struct bio_issue *issue,
708 				       sector_t size)
709 {
710 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
711 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
712 			(blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
713 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
714 }
715 
716 void bdev_release(struct file *bdev_file);
717 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
718 	      const struct blk_holder_ops *hops, struct file *bdev_file);
719 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
720 
721 void blk_integrity_generate(struct bio *bio);
722 void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
723 void blk_integrity_prepare(struct request *rq);
724 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
725 
726 #ifdef CONFIG_LOCKDEP
blk_freeze_acquire_lock(struct request_queue * q)727 static inline void blk_freeze_acquire_lock(struct request_queue *q)
728 {
729 	if (!q->mq_freeze_disk_dead)
730 		rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
731 	if (!q->mq_freeze_queue_dying)
732 		rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
733 }
734 
blk_unfreeze_release_lock(struct request_queue * q)735 static inline void blk_unfreeze_release_lock(struct request_queue *q)
736 {
737 	if (!q->mq_freeze_queue_dying)
738 		rwsem_release(&q->q_lockdep_map, _RET_IP_);
739 	if (!q->mq_freeze_disk_dead)
740 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
741 }
742 #else
blk_freeze_acquire_lock(struct request_queue * q)743 static inline void blk_freeze_acquire_lock(struct request_queue *q)
744 {
745 }
blk_unfreeze_release_lock(struct request_queue * q)746 static inline void blk_unfreeze_release_lock(struct request_queue *q)
747 {
748 }
749 #endif
750 
751 #endif /* BLK_INTERNAL_H */
752