xref: /linux/block/blk.h (revision f3e3dbcea15e20f7413afd8c791a496f0b80e80b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4 
5 #include <linux/bio-integrity.h>
6 #include <linux/blk-crypto.h>
7 #include <linux/lockdep.h>
8 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
9 #include <linux/sched/sysctl.h>
10 #include <linux/timekeeping.h>
11 #include <xen/xen.h>
12 #include "blk-crypto-internal.h"
13 
14 struct elv_change_ctx;
15 
16 /*
17  * Default upper limit for the software max_sectors limit used for regular I/Os.
18  * This can be increased through sysfs.
19  *
20  * This should not be confused with the max_hw_sector limit that is entirely
21  * controlled by the block device driver, usually based on hardware limits.
22  */
23 #define BLK_DEF_MAX_SECTORS_CAP	(SZ_4M >> SECTOR_SHIFT)
24 
25 #define	BLK_DEV_MAX_SECTORS	(LLONG_MAX >> 9)
26 #define	BLK_MIN_SEGMENT_SIZE	4096
27 
28 /* Max future timer expiry for timeouts */
29 #define BLK_MAX_TIMEOUT		(5 * HZ)
30 
31 extern const struct kobj_type blk_queue_ktype;
32 extern struct dentry *blk_debugfs_root;
33 
34 struct blk_flush_queue {
35 	spinlock_t		mq_flush_lock;
36 	unsigned int		flush_pending_idx:1;
37 	unsigned int		flush_running_idx:1;
38 	blk_status_t 		rq_status;
39 	unsigned long		flush_pending_since;
40 	struct list_head	flush_queue[2];
41 	unsigned long		flush_data_in_flight;
42 	struct request		*flush_rq;
43 	struct rcu_head		rcu_head;
44 };
45 
46 bool is_flush_rq(struct request *req);
47 
48 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
49 					      gfp_t flags);
50 void blk_free_flush_queue(struct blk_flush_queue *q);
51 
52 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
53 bool blk_queue_start_drain(struct request_queue *q);
54 bool __blk_freeze_queue_start(struct request_queue *q,
55 			      struct task_struct *owner);
56 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
57 void submit_bio_noacct_nocheck(struct bio *bio, bool split);
58 int bio_submit_or_kill(struct bio *bio, unsigned int flags);
59 
blk_try_enter_queue(struct request_queue * q,bool pm)60 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
61 {
62 	rcu_read_lock();
63 	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
64 		goto fail;
65 
66 	/*
67 	 * The code that increments the pm_only counter must ensure that the
68 	 * counter is globally visible before the queue is unfrozen.
69 	 */
70 	if (blk_queue_pm_only(q) &&
71 	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
72 		goto fail_put;
73 
74 	rcu_read_unlock();
75 	return true;
76 
77 fail_put:
78 	blk_queue_exit(q);
79 fail:
80 	rcu_read_unlock();
81 	return false;
82 }
83 
bio_queue_enter(struct bio * bio)84 static inline int bio_queue_enter(struct bio *bio)
85 {
86 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
87 
88 	if (blk_try_enter_queue(q, false)) {
89 		rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
90 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
91 		return 0;
92 	}
93 	return __bio_queue_enter(q, bio);
94 }
95 
blk_wait_io(struct completion * done)96 static inline void blk_wait_io(struct completion *done)
97 {
98 	/* Prevent hang_check timer from firing at us during very long I/O */
99 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
100 
101 	if (timeout)
102 		while (!wait_for_completion_io_timeout(done, timeout))
103 			;
104 	else
105 		wait_for_completion_io(done);
106 }
107 
108 struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
109 void blkdev_put_no_open(struct block_device *bdev);
110 
111 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
112 		struct page *page, unsigned len, unsigned offset);
113 
biovec_phys_mergeable(struct request_queue * q,struct bio_vec * vec1,struct bio_vec * vec2)114 static inline bool biovec_phys_mergeable(struct request_queue *q,
115 		struct bio_vec *vec1, struct bio_vec *vec2)
116 {
117 	unsigned long mask = queue_segment_boundary(q);
118 	phys_addr_t addr1 = bvec_phys(vec1);
119 	phys_addr_t addr2 = bvec_phys(vec2);
120 
121 	/*
122 	 * Merging adjacent physical pages may not work correctly under KMSAN
123 	 * if their metadata pages aren't adjacent. Just disable merging.
124 	 */
125 	if (IS_ENABLED(CONFIG_KMSAN))
126 		return false;
127 
128 	if (addr1 + vec1->bv_len != addr2)
129 		return false;
130 	if (!zone_device_pages_have_same_pgmap(vec1->bv_page, vec2->bv_page))
131 		return false;
132 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
133 		return false;
134 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
135 		return false;
136 	return true;
137 }
138 
139 /*
140  * Check if two pages from potentially different zone device pgmaps can
141  * coexist as separate bvec entries in the same bio.
142  *
143  * The block DMA iterator (blk_dma_map_iter_start) caches the P2PDMA mapping
144  * state from the first segment and applies it to all subsequent segments, so
145  * P2PDMA pages from different pgmaps must not be mixed in the same bio.
146  *
147  * Other zone device types (FS_DAX, GENERIC) use the same dma_map_phys() path
148  * as normal RAM.  PRIVATE and COHERENT pages never appear in bios.
149  */
zone_device_pages_compatible(const struct page * a,const struct page * b)150 static inline bool zone_device_pages_compatible(const struct page *a,
151 						const struct page *b)
152 {
153 	if (is_pci_p2pdma_page(a) || is_pci_p2pdma_page(b))
154 		return zone_device_pages_have_same_pgmap(a, b);
155 	return true;
156 }
157 
__bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)158 static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
159 		struct bio_vec *bprv, unsigned int offset)
160 {
161 	return (offset & lim->virt_boundary_mask) ||
162 		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
163 }
164 
165 /*
166  * Check if adding a bio_vec after bprv with offset would create a gap in
167  * the SG list. Most drivers don't care about this, but some do.
168  */
bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)169 static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
170 		struct bio_vec *bprv, unsigned int offset)
171 {
172 	if (!lim->virt_boundary_mask)
173 		return false;
174 	return __bvec_gap_to_prev(lim, bprv, offset);
175 }
176 
rq_mergeable(struct request * rq)177 static inline bool rq_mergeable(struct request *rq)
178 {
179 	if (blk_rq_is_passthrough(rq))
180 		return false;
181 
182 	if (req_op(rq) == REQ_OP_FLUSH)
183 		return false;
184 
185 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
186 		return false;
187 
188 	if (req_op(rq) == REQ_OP_ZONE_APPEND)
189 		return false;
190 
191 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
192 		return false;
193 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
194 		return false;
195 
196 	return true;
197 }
198 
199 /*
200  * There are two different ways to handle DISCARD merges:
201  *  1) If max_discard_segments > 1, the driver treats every bio as a range and
202  *     send the bios to controller together. The ranges don't need to be
203  *     contiguous.
204  *  2) Otherwise, the request will be normal read/write requests.  The ranges
205  *     need to be contiguous.
206  */
blk_discard_mergable(struct request * req)207 static inline bool blk_discard_mergable(struct request *req)
208 {
209 	if (req_op(req) == REQ_OP_DISCARD &&
210 	    queue_max_discard_segments(req->q) > 1)
211 		return true;
212 	return false;
213 }
214 
blk_rq_get_max_segments(struct request * rq)215 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
216 {
217 	if (req_op(rq) == REQ_OP_DISCARD)
218 		return queue_max_discard_segments(rq->q);
219 	return queue_max_segments(rq->q);
220 }
221 
blk_queue_get_max_sectors(struct request * rq)222 static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
223 {
224 	struct request_queue *q = rq->q;
225 	enum req_op op = req_op(rq);
226 
227 	if (unlikely(op == REQ_OP_DISCARD))
228 		return min(q->limits.max_discard_sectors,
229 			   UINT_MAX >> SECTOR_SHIFT);
230 
231 	if (unlikely(op == REQ_OP_SECURE_ERASE))
232 		return min(q->limits.max_secure_erase_sectors,
233 			   UINT_MAX >> SECTOR_SHIFT);
234 
235 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
236 		return q->limits.max_write_zeroes_sectors;
237 
238 	if (rq->cmd_flags & REQ_ATOMIC)
239 		return q->limits.atomic_write_max_sectors;
240 
241 	return q->limits.max_sectors;
242 }
243 
244 #ifdef CONFIG_BLK_DEV_INTEGRITY
245 void blk_flush_integrity(void);
246 void bio_integrity_free(struct bio *bio);
247 
248 /*
249  * Integrity payloads can either be owned by the submitter, in which case
250  * bio_uninit will free them, or owned and generated by the block layer,
251  * in which case we'll verify them here (for reads) and free them before
252  * the bio is handed back to the submitted.
253  */
254 bool __bio_integrity_endio(struct bio *bio);
bio_integrity_endio(struct bio * bio)255 static inline bool bio_integrity_endio(struct bio *bio)
256 {
257 	struct bio_integrity_payload *bip = bio_integrity(bio);
258 
259 	if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
260 		return __bio_integrity_endio(bio);
261 	return true;
262 }
263 
264 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
265 		struct request *);
266 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
267 		struct bio *);
268 
integrity_req_gap_back_merge(struct request * req,struct bio * next)269 static inline bool integrity_req_gap_back_merge(struct request *req,
270 		struct bio *next)
271 {
272 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
273 	struct bio_integrity_payload *bip_next = bio_integrity(next);
274 
275 	return bvec_gap_to_prev(&req->q->limits,
276 				&bip->bip_vec[bip->bip_vcnt - 1],
277 				bip_next->bip_vec[0].bv_offset);
278 }
279 
integrity_req_gap_front_merge(struct request * req,struct bio * bio)280 static inline bool integrity_req_gap_front_merge(struct request *req,
281 		struct bio *bio)
282 {
283 	struct bio_integrity_payload *bip = bio_integrity(bio);
284 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
285 
286 	return bvec_gap_to_prev(&req->q->limits,
287 				&bip->bip_vec[bip->bip_vcnt - 1],
288 				bip_next->bip_vec[0].bv_offset);
289 }
290 
291 extern const struct attribute_group blk_integrity_attr_group;
292 #else /* CONFIG_BLK_DEV_INTEGRITY */
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)293 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
294 		struct request *r1, struct request *r2)
295 {
296 	return true;
297 }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)298 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
299 		struct request *r, struct bio *b)
300 {
301 	return true;
302 }
integrity_req_gap_back_merge(struct request * req,struct bio * next)303 static inline bool integrity_req_gap_back_merge(struct request *req,
304 		struct bio *next)
305 {
306 	return false;
307 }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)308 static inline bool integrity_req_gap_front_merge(struct request *req,
309 		struct bio *bio)
310 {
311 	return false;
312 }
313 
blk_flush_integrity(void)314 static inline void blk_flush_integrity(void)
315 {
316 }
bio_integrity_endio(struct bio * bio)317 static inline bool bio_integrity_endio(struct bio *bio)
318 {
319 	return true;
320 }
bio_integrity_free(struct bio * bio)321 static inline void bio_integrity_free(struct bio *bio)
322 {
323 }
324 #endif /* CONFIG_BLK_DEV_INTEGRITY */
325 
326 unsigned long blk_rq_timeout(unsigned long timeout);
327 void blk_add_timer(struct request *req);
328 
329 enum bio_merge_status {
330 	BIO_MERGE_OK,
331 	BIO_MERGE_NONE,
332 	BIO_MERGE_FAILED,
333 };
334 
335 enum bio_merge_status bio_attempt_back_merge(struct request *req,
336 		struct bio *bio, unsigned int nr_segs);
337 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
338 		unsigned int nr_segs);
339 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
340 			struct bio *bio, unsigned int nr_segs);
341 
342 /*
343  * Plug flush limits
344  */
345 #define BLK_MAX_REQUEST_COUNT	32
346 #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
347 
348 /*
349  * Internal elevator interface
350  */
351 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
352 
353 bool blk_insert_flush(struct request *rq);
354 
355 void elv_update_nr_hw_queues(struct request_queue *q,
356 		struct elv_change_ctx *ctx);
357 void elevator_set_default(struct request_queue *q);
358 void elevator_set_none(struct request_queue *q);
359 
360 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
361 		char *buf);
362 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
363 		char *buf);
364 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
365 		char *buf);
366 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
367 		char *buf);
368 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
369 		const char *buf, size_t count);
370 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
371 ssize_t part_timeout_store(struct device *, struct device_attribute *,
372 				const char *, size_t);
373 
374 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
375 		unsigned *nsegs);
376 struct bio *bio_split_write_zeroes(struct bio *bio,
377 		const struct queue_limits *lim, unsigned *nsegs);
378 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
379 		unsigned *nr_segs);
380 struct bio *bio_split_zone_append(struct bio *bio,
381 		const struct queue_limits *lim, unsigned *nr_segs);
382 
383 /*
384  * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
385  *
386  * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
387  * always valid if a bio has data.  The check might lead to occasional false
388  * positives when bios are cloned, but compared to the performance impact of
389  * cloned bios themselves the loop below doesn't matter anyway.
390  */
bio_may_need_split(struct bio * bio,const struct queue_limits * lim)391 static inline bool bio_may_need_split(struct bio *bio,
392 		const struct queue_limits *lim)
393 {
394 	const struct bio_vec *bv;
395 
396 	if (lim->chunk_sectors)
397 		return true;
398 
399 	if (!bio->bi_io_vec)
400 		return true;
401 
402 	bv = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
403 	if (bio->bi_iter.bi_size > bv->bv_len - bio->bi_iter.bi_bvec_done)
404 		return true;
405 	return bv->bv_len + bv->bv_offset > lim->max_fast_segment_size;
406 }
407 
408 /**
409  * __bio_split_to_limits - split a bio to fit the queue limits
410  * @bio:     bio to be split
411  * @lim:     queue limits to split based on
412  * @nr_segs: returns the number of segments in the returned bio
413  *
414  * Check if @bio needs splitting based on the queue limits, and if so split off
415  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
416  * shortened to the remainder and re-submitted.
417  *
418  * The split bio is allocated from @q->bio_split, which is provided by the
419  * block layer.
420  */
__bio_split_to_limits(struct bio * bio,const struct queue_limits * lim,unsigned int * nr_segs)421 static inline struct bio *__bio_split_to_limits(struct bio *bio,
422 		const struct queue_limits *lim, unsigned int *nr_segs)
423 {
424 	switch (bio_op(bio)) {
425 	case REQ_OP_READ:
426 	case REQ_OP_WRITE:
427 		if (bio_may_need_split(bio, lim))
428 			return bio_split_rw(bio, lim, nr_segs);
429 		*nr_segs = 1;
430 		return bio;
431 	case REQ_OP_ZONE_APPEND:
432 		return bio_split_zone_append(bio, lim, nr_segs);
433 	case REQ_OP_DISCARD:
434 	case REQ_OP_SECURE_ERASE:
435 		return bio_split_discard(bio, lim, nr_segs);
436 	case REQ_OP_WRITE_ZEROES:
437 		return bio_split_write_zeroes(bio, lim, nr_segs);
438 	default:
439 		/* other operations can't be split */
440 		*nr_segs = 0;
441 		return bio;
442 	}
443 }
444 
445 /**
446  * get_max_segment_size() - maximum number of bytes to add as a single segment
447  * @lim: Request queue limits.
448  * @paddr: address of the range to add
449  * @len: maximum length available to add at @paddr
450  *
451  * Returns the maximum number of bytes of the range starting at @paddr that can
452  * be added to a single segment.
453  */
get_max_segment_size(const struct queue_limits * lim,phys_addr_t paddr,unsigned int len)454 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
455 		phys_addr_t paddr, unsigned int len)
456 {
457 	/*
458 	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
459 	 * after having calculated the minimum.
460 	 */
461 	return min_t(unsigned long, len,
462 		min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
463 		    (unsigned long)lim->max_segment_size - 1) + 1);
464 }
465 
466 int ll_back_merge_fn(struct request *req, struct bio *bio,
467 		unsigned int nr_segs);
468 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
469 				struct request *next);
470 unsigned int blk_recalc_rq_segments(struct request *rq);
471 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
472 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
473 
474 int blk_set_default_limits(struct queue_limits *lim);
475 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
476 		struct queue_limits *lim);
477 int blk_dev_init(void);
478 
479 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
480 
req_set_nomerge(struct request_queue * q,struct request * req)481 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
482 {
483 	req->cmd_flags |= REQ_NOMERGE;
484 	if (req == q->last_merge)
485 		q->last_merge = NULL;
486 }
487 
488 /*
489  * Internal io_context interface
490  */
491 struct io_cq *ioc_find_get_icq(struct request_queue *q);
492 struct io_cq *ioc_lookup_icq(struct request_queue *q);
493 #ifdef CONFIG_BLK_ICQ
494 void ioc_clear_queue(struct request_queue *q);
495 #else
ioc_clear_queue(struct request_queue * q)496 static inline void ioc_clear_queue(struct request_queue *q)
497 {
498 }
499 #endif /* CONFIG_BLK_ICQ */
500 
501 #ifdef CONFIG_BLK_DEV_ZONED
502 void disk_init_zone_resources(struct gendisk *disk);
503 void disk_free_zone_resources(struct gendisk *disk);
bio_zone_write_plugging(struct bio * bio)504 static inline bool bio_zone_write_plugging(struct bio *bio)
505 {
506 	return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
507 }
blk_req_bio_is_zone_append(struct request * rq,struct bio * bio)508 static inline bool blk_req_bio_is_zone_append(struct request *rq,
509 					      struct bio *bio)
510 {
511 	return req_op(rq) == REQ_OP_ZONE_APPEND ||
512 	       bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
513 }
514 void blk_zone_write_plug_bio_merged(struct bio *bio);
515 void blk_zone_write_plug_init_request(struct request *rq);
516 void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio);
517 void blk_zone_mgmt_bio_endio(struct bio *bio);
518 void blk_zone_write_plug_bio_endio(struct bio *bio);
blk_zone_bio_endio(struct bio * bio)519 static inline void blk_zone_bio_endio(struct bio *bio)
520 {
521 	/*
522 	 * Zone management BIOs may impact zone write plugs (e.g. a zone reset
523 	 * changes a zone write plug zone write pointer offset), but these
524 	 * operation do not go through zone write plugging as they may operate
525 	 * on zones that do not have a zone write
526 	 * plug. blk_zone_mgmt_bio_endio() handles the potential changes to zone
527 	 * write plugs that are present.
528 	 */
529 	if (op_is_zone_mgmt(bio_op(bio))) {
530 		blk_zone_mgmt_bio_endio(bio);
531 		return;
532 	}
533 
534 	/*
535 	 * For write BIOs to zoned devices, signal the completion of the BIO so
536 	 * that the next write BIO can be submitted by zone write plugging.
537 	 */
538 	if (bio_zone_write_plugging(bio))
539 		blk_zone_write_plug_bio_endio(bio);
540 }
541 
542 void blk_zone_write_plug_finish_request(struct request *rq);
blk_zone_finish_request(struct request * rq)543 static inline void blk_zone_finish_request(struct request *rq)
544 {
545 	if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
546 		blk_zone_write_plug_finish_request(rq);
547 }
548 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
549 		unsigned long arg);
550 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
551 		unsigned int cmd, unsigned long arg);
552 #else /* CONFIG_BLK_DEV_ZONED */
disk_init_zone_resources(struct gendisk * disk)553 static inline void disk_init_zone_resources(struct gendisk *disk)
554 {
555 }
disk_free_zone_resources(struct gendisk * disk)556 static inline void disk_free_zone_resources(struct gendisk *disk)
557 {
558 }
bio_zone_write_plugging(struct bio * bio)559 static inline bool bio_zone_write_plugging(struct bio *bio)
560 {
561 	return false;
562 }
blk_req_bio_is_zone_append(struct request * req,struct bio * bio)563 static inline bool blk_req_bio_is_zone_append(struct request *req,
564 					      struct bio *bio)
565 {
566 	return false;
567 }
blk_zone_write_plug_bio_merged(struct bio * bio)568 static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
569 {
570 }
blk_zone_write_plug_init_request(struct request * rq)571 static inline void blk_zone_write_plug_init_request(struct request *rq)
572 {
573 }
blk_zone_append_update_request_bio(struct request * rq,struct bio * bio)574 static inline void blk_zone_append_update_request_bio(struct request *rq,
575 						      struct bio *bio)
576 {
577 }
blk_zone_bio_endio(struct bio * bio)578 static inline void blk_zone_bio_endio(struct bio *bio)
579 {
580 }
blk_zone_finish_request(struct request * rq)581 static inline void blk_zone_finish_request(struct request *rq)
582 {
583 }
blkdev_report_zones_ioctl(struct block_device * bdev,unsigned int cmd,unsigned long arg)584 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
585 		unsigned int cmd, unsigned long arg)
586 {
587 	return -ENOTTY;
588 }
blkdev_zone_mgmt_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)589 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
590 		blk_mode_t mode, unsigned int cmd, unsigned long arg)
591 {
592 	return -ENOTTY;
593 }
594 #endif /* CONFIG_BLK_DEV_ZONED */
595 
596 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
597 void bdev_add(struct block_device *bdev, dev_t dev);
598 void bdev_unhash(struct block_device *bdev);
599 void bdev_drop(struct block_device *bdev);
600 
601 int blk_alloc_ext_minor(void);
602 void blk_free_ext_minor(unsigned int minor);
603 #define ADDPART_FLAG_NONE	0
604 #define ADDPART_FLAG_RAID	1
605 #define ADDPART_FLAG_WHOLEDISK	2
606 #define ADDPART_FLAG_READONLY	4
607 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
608 		sector_t length);
609 int bdev_del_partition(struct gendisk *disk, int partno);
610 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
611 		sector_t length);
612 void drop_partition(struct block_device *part);
613 
614 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
615 
616 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
617 		struct lock_class_key *lkclass);
618 struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
619 
620 int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
621 
622 int disk_alloc_events(struct gendisk *disk);
623 void disk_add_events(struct gendisk *disk);
624 void disk_del_events(struct gendisk *disk);
625 void disk_release_events(struct gendisk *disk);
626 void disk_block_events(struct gendisk *disk);
627 void disk_unblock_events(struct gendisk *disk);
628 void disk_flush_events(struct gendisk *disk, unsigned int mask);
629 extern struct device_attribute dev_attr_events;
630 extern struct device_attribute dev_attr_events_async;
631 extern struct device_attribute dev_attr_events_poll_msecs;
632 
633 extern struct attribute_group blk_trace_attr_group;
634 
635 blk_mode_t file_to_blk_mode(struct file *file);
636 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
637 		loff_t lstart, loff_t lend);
638 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
639 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
640 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
641 
642 extern const struct address_space_operations def_blk_aops;
643 
644 int disk_register_independent_access_ranges(struct gendisk *disk);
645 void disk_unregister_independent_access_ranges(struct gendisk *disk);
646 
647 int should_fail_bio(struct bio *bio);
648 #ifdef CONFIG_FAIL_MAKE_REQUEST
649 bool should_fail_request(struct block_device *part, unsigned int bytes);
650 #else /* CONFIG_FAIL_MAKE_REQUEST */
should_fail_request(struct block_device * part,unsigned int bytes)651 static inline bool should_fail_request(struct block_device *part,
652 					unsigned int bytes)
653 {
654 	return false;
655 }
656 #endif /* CONFIG_FAIL_MAKE_REQUEST */
657 
658 /*
659  * Optimized request reference counting. Ideally we'd make timeouts be more
660  * clever, as that's the only reason we need references at all... But until
661  * this happens, this is faster than using refcount_t. Also see:
662  *
663  * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
664  */
665 #define req_ref_zero_or_close_to_overflow(req)	\
666 	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
667 
req_ref_inc_not_zero(struct request * req)668 static inline bool req_ref_inc_not_zero(struct request *req)
669 {
670 	return atomic_inc_not_zero(&req->ref);
671 }
672 
req_ref_put_and_test(struct request * req)673 static inline bool req_ref_put_and_test(struct request *req)
674 {
675 	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
676 	return atomic_dec_and_test(&req->ref);
677 }
678 
req_ref_set(struct request * req,int value)679 static inline void req_ref_set(struct request *req, int value)
680 {
681 	atomic_set(&req->ref, value);
682 }
683 
req_ref_read(struct request * req)684 static inline int req_ref_read(struct request *req)
685 {
686 	return atomic_read(&req->ref);
687 }
688 
blk_time_get_ns(void)689 static inline u64 blk_time_get_ns(void)
690 {
691 	struct blk_plug *plug = current->plug;
692 
693 	if (!plug || !in_task())
694 		return ktime_get_ns();
695 
696 	/*
697 	 * 0 could very well be a valid time, but rather than flag "this is
698 	 * a valid timestamp" separately, just accept that we'll do an extra
699 	 * ktime_get_ns() if we just happen to get 0 as the current time.
700 	 */
701 	if (!plug->cur_ktime) {
702 		plug->cur_ktime = ktime_get_ns();
703 		current->flags |= PF_BLOCK_TS;
704 	}
705 	return plug->cur_ktime;
706 }
707 
blk_time_get(void)708 static inline ktime_t blk_time_get(void)
709 {
710 	return ns_to_ktime(blk_time_get_ns());
711 }
712 
713 void bdev_release(struct file *bdev_file);
714 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
715 	      const struct blk_holder_ops *hops, struct file *bdev_file);
716 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
717 
718 void bio_integrity_generate(struct bio *bio);
719 blk_status_t bio_integrity_verify(struct bio *bio,
720 		struct bvec_iter *saved_iter);
721 
722 void blk_integrity_prepare(struct request *rq);
723 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
724 
725 #ifdef CONFIG_LOCKDEP
blk_freeze_acquire_lock(struct request_queue * q)726 static inline void blk_freeze_acquire_lock(struct request_queue *q)
727 {
728 	if (!q->mq_freeze_disk_dead)
729 		rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
730 	if (!q->mq_freeze_queue_dying)
731 		rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
732 }
733 
blk_unfreeze_release_lock(struct request_queue * q)734 static inline void blk_unfreeze_release_lock(struct request_queue *q)
735 {
736 	if (!q->mq_freeze_queue_dying)
737 		rwsem_release(&q->q_lockdep_map, _RET_IP_);
738 	if (!q->mq_freeze_disk_dead)
739 		rwsem_release(&q->io_lockdep_map, _RET_IP_);
740 }
741 #else
blk_freeze_acquire_lock(struct request_queue * q)742 static inline void blk_freeze_acquire_lock(struct request_queue *q)
743 {
744 }
blk_unfreeze_release_lock(struct request_queue * q)745 static inline void blk_unfreeze_release_lock(struct request_queue *q)
746 {
747 }
748 #endif
749 
750 /*
751  * debugfs directory and file creation can trigger fs reclaim, which can enter
752  * back into the block layer request_queue. This can cause deadlock if the
753  * queue is frozen. Use NOIO context together with debugfs_mutex to prevent fs
754  * reclaim from triggering block I/O.
755  */
blk_debugfs_lock_nomemsave(struct request_queue * q)756 static inline void blk_debugfs_lock_nomemsave(struct request_queue *q)
757 {
758 	mutex_lock(&q->debugfs_mutex);
759 }
760 
blk_debugfs_unlock_nomemrestore(struct request_queue * q)761 static inline void blk_debugfs_unlock_nomemrestore(struct request_queue *q)
762 {
763 	mutex_unlock(&q->debugfs_mutex);
764 }
765 
blk_debugfs_lock(struct request_queue * q)766 static inline unsigned int __must_check blk_debugfs_lock(struct request_queue *q)
767 {
768 	unsigned int memflags = memalloc_noio_save();
769 
770 	blk_debugfs_lock_nomemsave(q);
771 	return memflags;
772 }
773 
blk_debugfs_unlock(struct request_queue * q,unsigned int memflags)774 static inline void blk_debugfs_unlock(struct request_queue *q,
775 				      unsigned int memflags)
776 {
777 	blk_debugfs_unlock_nomemrestore(q);
778 	memalloc_noio_restore(memflags);
779 }
780 
781 #endif /* BLK_INTERNAL_H */
782