xref: /linux/include/linux/blkdev.h (revision cc25df3e2e22a956d3a0d427369367b4a901d203)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Portions Copyright (C) 1992 Drew Eckhardt
4  */
5 #ifndef _LINUX_BLKDEV_H
6 #define _LINUX_BLKDEV_H
7 
8 #include <linux/types.h>
9 #include <linux/blk_types.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
12 #include <linux/llist.h>
13 #include <linux/minmax.h>
14 #include <linux/timer.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/bio.h>
18 #include <linux/gfp.h>
19 #include <linux/kdev_t.h>
20 #include <linux/rcupdate.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/blkzoned.h>
23 #include <linux/sched.h>
24 #include <linux/sbitmap.h>
25 #include <linux/uuid.h>
26 #include <linux/xarray.h>
27 #include <linux/file.h>
28 #include <linux/lockdep.h>
29 
30 struct module;
31 struct request_queue;
32 struct elevator_queue;
33 struct blk_trace;
34 struct request;
35 struct sg_io_hdr;
36 struct blkcg_gq;
37 struct blk_flush_queue;
38 struct kiocb;
39 struct pr_ops;
40 struct rq_qos;
41 struct blk_report_zones_args;
42 struct blk_queue_stats;
43 struct blk_stat_callback;
44 struct blk_crypto_profile;
45 
46 extern const struct device_type disk_type;
47 extern const struct device_type part_type;
48 extern const struct class block_class;
49 
50 /*
51  * Maximum number of blkcg policies allowed to be registered concurrently.
52  * Defined here to simplify include dependency.
53  */
54 #define BLKCG_MAX_POLS		6
55 
56 #define DISK_MAX_PARTS			256
57 #define DISK_NAME_LEN			32
58 
59 #define PARTITION_META_INFO_VOLNAMELTH	64
60 /*
61  * Enough for the string representation of any kind of UUID plus NULL.
62  * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
63  */
64 #define PARTITION_META_INFO_UUIDLTH	(UUID_STRING_LEN + 1)
65 
66 struct partition_meta_info {
67 	char uuid[PARTITION_META_INFO_UUIDLTH];
68 	u8 volname[PARTITION_META_INFO_VOLNAMELTH];
69 };
70 
71 /**
72  * DOC: genhd capability flags
73  *
74  * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
75  * removable media.  When set, the device remains present even when media is not
76  * inserted.  Shall not be set for devices which are removed entirely when the
77  * media is removed.
78  *
79  * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
80  * doesn't appear in sysfs, and can't be opened from userspace or using
81  * blkdev_get*. Used for the underlying components of multipath devices.
82  *
83  * ``GENHD_FL_NO_PART``: partition support is disabled.  The kernel will not
84  * scan for partitions from add_disk, and users can't add partitions manually.
85  *
86  */
87 enum {
88 	GENHD_FL_REMOVABLE			= 1 << 0,
89 	GENHD_FL_HIDDEN				= 1 << 1,
90 	GENHD_FL_NO_PART			= 1 << 2,
91 };
92 
93 enum {
94 	DISK_EVENT_MEDIA_CHANGE			= 1 << 0, /* media changed */
95 	DISK_EVENT_EJECT_REQUEST		= 1 << 1, /* eject requested */
96 };
97 
98 enum {
99 	/* Poll even if events_poll_msecs is unset */
100 	DISK_EVENT_FLAG_POLL			= 1 << 0,
101 	/* Forward events to udev */
102 	DISK_EVENT_FLAG_UEVENT			= 1 << 1,
103 	/* Block event polling when open for exclusive write */
104 	DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE	= 1 << 2,
105 };
106 
107 struct disk_events;
108 struct badblocks;
109 
110 enum blk_integrity_checksum {
111 	BLK_INTEGRITY_CSUM_NONE		= 0,
112 	BLK_INTEGRITY_CSUM_IP		= 1,
113 	BLK_INTEGRITY_CSUM_CRC		= 2,
114 	BLK_INTEGRITY_CSUM_CRC64	= 3,
115 } __packed ;
116 
117 struct blk_integrity {
118 	unsigned char				flags;
119 	enum blk_integrity_checksum		csum_type;
120 	unsigned char				metadata_size;
121 	unsigned char				pi_offset;
122 	unsigned char				interval_exp;
123 	unsigned char				tag_size;
124 	unsigned char				pi_tuple_size;
125 };
126 
127 typedef unsigned int __bitwise blk_mode_t;
128 
129 /* open for reading */
130 #define BLK_OPEN_READ		((__force blk_mode_t)(1 << 0))
131 /* open for writing */
132 #define BLK_OPEN_WRITE		((__force blk_mode_t)(1 << 1))
133 /* open exclusively (vs other exclusive openers */
134 #define BLK_OPEN_EXCL		((__force blk_mode_t)(1 << 2))
135 /* opened with O_NDELAY */
136 #define BLK_OPEN_NDELAY		((__force blk_mode_t)(1 << 3))
137 /* open for "writes" only for ioctls (specialy hack for floppy.c) */
138 #define BLK_OPEN_WRITE_IOCTL	((__force blk_mode_t)(1 << 4))
139 /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
140 #define BLK_OPEN_RESTRICT_WRITES	((__force blk_mode_t)(1 << 5))
141 /* return partition scanning errors */
142 #define BLK_OPEN_STRICT_SCAN	((__force blk_mode_t)(1 << 6))
143 
144 struct gendisk {
145 	/*
146 	 * major/first_minor/minors should not be set by any new driver, the
147 	 * block core will take care of allocating them automatically.
148 	 */
149 	int major;
150 	int first_minor;
151 	int minors;
152 
153 	char disk_name[DISK_NAME_LEN];	/* name of major driver */
154 
155 	unsigned short events;		/* supported events */
156 	unsigned short event_flags;	/* flags related to event processing */
157 
158 	struct xarray part_tbl;
159 	struct block_device *part0;
160 
161 	const struct block_device_operations *fops;
162 	struct request_queue *queue;
163 	void *private_data;
164 
165 	struct bio_set bio_split;
166 
167 	int flags;
168 	unsigned long state;
169 #define GD_NEED_PART_SCAN		0
170 #define GD_READ_ONLY			1
171 #define GD_DEAD				2
172 #define GD_NATIVE_CAPACITY		3
173 #define GD_ADDED			4
174 #define GD_SUPPRESS_PART_SCAN		5
175 #define GD_OWNS_QUEUE			6
176 #define GD_ZONE_APPEND_USED		7
177 
178 	struct mutex open_mutex;	/* open/close mutex */
179 	unsigned open_partitions;	/* number of open partitions */
180 
181 	struct backing_dev_info	*bdi;
182 	struct kobject queue_kobj;	/* the queue/ directory */
183 	struct kobject *slave_dir;
184 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
185 	struct list_head slave_bdevs;
186 #endif
187 	struct timer_rand_state *random;
188 	struct disk_events *ev;
189 
190 #ifdef CONFIG_BLK_DEV_ZONED
191 	/*
192 	 * Zoned block device information. Reads of this information must be
193 	 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
194 	 * information is only allowed while no requests are being processed.
195 	 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
196 	 */
197 	unsigned int		nr_zones;
198 	unsigned int		zone_capacity;
199 	unsigned int		last_zone_capacity;
200 	u8 __rcu		*zones_cond;
201 	unsigned int		zone_wplugs_hash_bits;
202 	atomic_t		nr_zone_wplugs;
203 	spinlock_t		zone_wplugs_lock;
204 	struct mempool		*zone_wplugs_pool;
205 	struct hlist_head	*zone_wplugs_hash;
206 	struct workqueue_struct *zone_wplugs_wq;
207 #endif /* CONFIG_BLK_DEV_ZONED */
208 
209 #if IS_ENABLED(CONFIG_CDROM)
210 	struct cdrom_device_info *cdi;
211 #endif
212 	int node_id;
213 	struct badblocks *bb;
214 	struct lockdep_map lockdep_map;
215 	u64 diskseq;
216 	blk_mode_t open_mode;
217 
218 	/*
219 	 * Independent sector access ranges. This is always NULL for
220 	 * devices that do not have multiple independent access ranges.
221 	 */
222 	struct blk_independent_access_ranges *ia_ranges;
223 
224 	struct mutex rqos_state_mutex;	/* rqos state change mutex */
225 };
226 
227 /**
228  * disk_openers - returns how many openers are there for a disk
229  * @disk: disk to check
230  *
231  * This returns the number of openers for a disk.  Note that this value is only
232  * stable if disk->open_mutex is held.
233  *
234  * Note: Due to a quirk in the block layer open code, each open partition is
235  * only counted once even if there are multiple openers.
236  */
237 static inline unsigned int disk_openers(struct gendisk *disk)
238 {
239 	return atomic_read(&disk->part0->bd_openers);
240 }
241 
242 /**
243  * disk_has_partscan - return %true if partition scanning is enabled on a disk
244  * @disk: disk to check
245  *
246  * Returns %true if partitions scanning is enabled for @disk, or %false if
247  * partition scanning is disabled either permanently or temporarily.
248  */
249 static inline bool disk_has_partscan(struct gendisk *disk)
250 {
251 	return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
252 		!test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
253 }
254 
255 /*
256  * The gendisk is refcounted by the part0 block_device, and the bd_device
257  * therein is also used for device model presentation in sysfs.
258  */
259 #define dev_to_disk(device) \
260 	(dev_to_bdev(device)->bd_disk)
261 #define disk_to_dev(disk) \
262 	(&((disk)->part0->bd_device))
263 
264 #if IS_REACHABLE(CONFIG_CDROM)
265 #define disk_to_cdi(disk)	((disk)->cdi)
266 #else
267 #define disk_to_cdi(disk)	NULL
268 #endif
269 
270 static inline dev_t disk_devt(struct gendisk *disk)
271 {
272 	return MKDEV(disk->major, disk->first_minor);
273 }
274 
275 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
276 /*
277  * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
278  * however we constrain this to what we can validate and test.
279  */
280 #define BLK_MAX_BLOCK_SIZE      SZ_64K
281 #else
282 #define BLK_MAX_BLOCK_SIZE      PAGE_SIZE
283 #endif
284 
285 
286 /* blk_validate_limits() validates bsize, so drivers don't usually need to */
287 static inline int blk_validate_block_size(unsigned long bsize)
288 {
289 	if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
290 		return -EINVAL;
291 
292 	return 0;
293 }
294 
295 static inline bool blk_op_is_passthrough(blk_opf_t op)
296 {
297 	op &= REQ_OP_MASK;
298 	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
299 }
300 
301 /* flags set by the driver in queue_limits.features */
302 typedef unsigned int __bitwise blk_features_t;
303 
304 /* supports a volatile write cache */
305 #define BLK_FEAT_WRITE_CACHE		((__force blk_features_t)(1u << 0))
306 
307 /* supports passing on the FUA bit */
308 #define BLK_FEAT_FUA			((__force blk_features_t)(1u << 1))
309 
310 /* rotational device (hard drive or floppy) */
311 #define BLK_FEAT_ROTATIONAL		((__force blk_features_t)(1u << 2))
312 
313 /* contributes to the random number pool */
314 #define BLK_FEAT_ADD_RANDOM		((__force blk_features_t)(1u << 3))
315 
316 /* do disk/partitions IO accounting */
317 #define BLK_FEAT_IO_STAT		((__force blk_features_t)(1u << 4))
318 
319 /* don't modify data until writeback is done */
320 #define BLK_FEAT_STABLE_WRITES		((__force blk_features_t)(1u << 5))
321 
322 /* always completes in submit context */
323 #define BLK_FEAT_SYNCHRONOUS		((__force blk_features_t)(1u << 6))
324 
325 /* supports REQ_NOWAIT */
326 #define BLK_FEAT_NOWAIT			((__force blk_features_t)(1u << 7))
327 
328 /* supports DAX */
329 #define BLK_FEAT_DAX			((__force blk_features_t)(1u << 8))
330 
331 /* supports I/O polling */
332 #define BLK_FEAT_POLL			((__force blk_features_t)(1u << 9))
333 
334 /* is a zoned device */
335 #define BLK_FEAT_ZONED			((__force blk_features_t)(1u << 10))
336 
337 /* supports PCI(e) p2p requests */
338 #define BLK_FEAT_PCI_P2PDMA		((__force blk_features_t)(1u << 12))
339 
340 /* skip this queue in blk_mq_(un)quiesce_tagset */
341 #define BLK_FEAT_SKIP_TAGSET_QUIESCE	((__force blk_features_t)(1u << 13))
342 
343 /* undocumented magic for bcache */
344 #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
345 	((__force blk_features_t)(1u << 15))
346 
347 /* atomic writes enabled */
348 #define BLK_FEAT_ATOMIC_WRITES \
349 	((__force blk_features_t)(1u << 16))
350 
351 /*
352  * Flags automatically inherited when stacking limits.
353  */
354 #define BLK_FEAT_INHERIT_MASK \
355 	(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
356 	 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
357 	 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
358 
359 /* internal flags in queue_limits.flags */
360 typedef unsigned int __bitwise blk_flags_t;
361 
362 /* do not send FLUSH/FUA commands despite advertising a write cache */
363 #define BLK_FLAG_WRITE_CACHE_DISABLED	((__force blk_flags_t)(1u << 0))
364 
365 /* I/O topology is misaligned */
366 #define BLK_FLAG_MISALIGNED		((__force blk_flags_t)(1u << 1))
367 
368 /* passthrough command IO accounting */
369 #define BLK_FLAG_IOSTATS_PASSTHROUGH	((__force blk_flags_t)(1u << 2))
370 
371 struct queue_limits {
372 	blk_features_t		features;
373 	blk_flags_t		flags;
374 	unsigned long		seg_boundary_mask;
375 	unsigned long		virt_boundary_mask;
376 
377 	unsigned int		max_hw_sectors;
378 	unsigned int		max_dev_sectors;
379 	unsigned int		chunk_sectors;
380 	unsigned int		max_sectors;
381 	unsigned int		max_user_sectors;
382 	unsigned int		max_segment_size;
383 	unsigned int		max_fast_segment_size;
384 	unsigned int		physical_block_size;
385 	unsigned int		logical_block_size;
386 	unsigned int		alignment_offset;
387 	unsigned int		io_min;
388 	unsigned int		io_opt;
389 	unsigned int		max_discard_sectors;
390 	unsigned int		max_hw_discard_sectors;
391 	unsigned int		max_user_discard_sectors;
392 	unsigned int		max_secure_erase_sectors;
393 	unsigned int		max_write_zeroes_sectors;
394 	unsigned int		max_wzeroes_unmap_sectors;
395 	unsigned int		max_hw_wzeroes_unmap_sectors;
396 	unsigned int		max_user_wzeroes_unmap_sectors;
397 	unsigned int		max_hw_zone_append_sectors;
398 	unsigned int		max_zone_append_sectors;
399 	unsigned int		discard_granularity;
400 	unsigned int		discard_alignment;
401 	unsigned int		zone_write_granularity;
402 
403 	/* atomic write limits */
404 	unsigned int		atomic_write_hw_max;
405 	unsigned int		atomic_write_max_sectors;
406 	unsigned int		atomic_write_hw_boundary;
407 	unsigned int		atomic_write_boundary_sectors;
408 	unsigned int		atomic_write_hw_unit_min;
409 	unsigned int		atomic_write_unit_min;
410 	unsigned int		atomic_write_hw_unit_max;
411 	unsigned int		atomic_write_unit_max;
412 
413 	unsigned short		max_segments;
414 	unsigned short		max_integrity_segments;
415 	unsigned short		max_discard_segments;
416 
417 	unsigned short		max_write_streams;
418 	unsigned int		write_stream_granularity;
419 
420 	unsigned int		max_open_zones;
421 	unsigned int		max_active_zones;
422 
423 	/*
424 	 * Drivers that set dma_alignment to less than 511 must be prepared to
425 	 * handle individual bvec's that are not a multiple of a SECTOR_SIZE
426 	 * due to possible offsets.
427 	 */
428 	unsigned int		dma_alignment;
429 	unsigned int		dma_pad_mask;
430 
431 	struct blk_integrity	integrity;
432 };
433 
434 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
435 			       void *data);
436 
437 int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
438 		     unsigned int idx, struct blk_report_zones_args *args);
439 
440 int blkdev_get_zone_info(struct block_device *bdev, sector_t sector,
441 			 struct blk_zone *zone);
442 
443 #define BLK_ALL_ZONES  ((unsigned int)-1)
444 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
445 		unsigned int nr_zones, report_zones_cb cb, void *data);
446 int blkdev_report_zones_cached(struct block_device *bdev, sector_t sector,
447 		unsigned int nr_zones, report_zones_cb cb, void *data);
448 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
449 		sector_t sectors, sector_t nr_sectors);
450 int blk_revalidate_disk_zones(struct gendisk *disk);
451 
452 /*
453  * Independent access ranges: struct blk_independent_access_range describes
454  * a range of contiguous sectors that can be accessed using device command
455  * execution resources that are independent from the resources used for
456  * other access ranges. This is typically found with single-LUN multi-actuator
457  * HDDs where each access range is served by a different set of heads.
458  * The set of independent ranges supported by the device is defined using
459  * struct blk_independent_access_ranges. The independent ranges must not overlap
460  * and must include all sectors within the disk capacity (no sector holes
461  * allowed).
462  * For a device with multiple ranges, requests targeting sectors in different
463  * ranges can be executed in parallel. A request can straddle an access range
464  * boundary.
465  */
466 struct blk_independent_access_range {
467 	struct kobject		kobj;
468 	sector_t		sector;
469 	sector_t		nr_sectors;
470 };
471 
472 struct blk_independent_access_ranges {
473 	struct kobject				kobj;
474 	bool					sysfs_registered;
475 	unsigned int				nr_ia_ranges;
476 	struct blk_independent_access_range	ia_range[];
477 };
478 
479 struct request_queue {
480 	/*
481 	 * The queue owner gets to use this for whatever they like.
482 	 * ll_rw_blk doesn't touch it.
483 	 */
484 	void			*queuedata;
485 
486 	struct elevator_queue	*elevator;
487 
488 	const struct blk_mq_ops	*mq_ops;
489 
490 	/* sw queues */
491 	struct blk_mq_ctx __percpu	*queue_ctx;
492 
493 	/*
494 	 * various queue flags, see QUEUE_* below
495 	 */
496 	unsigned long		queue_flags;
497 
498 	unsigned int __data_racy rq_timeout;
499 
500 	unsigned int		queue_depth;
501 
502 	refcount_t		refs;
503 
504 	/* hw dispatch queues */
505 	unsigned int		nr_hw_queues;
506 	struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
507 
508 	struct percpu_ref	q_usage_counter;
509 	struct lock_class_key	io_lock_cls_key;
510 	struct lockdep_map	io_lockdep_map;
511 
512 	struct lock_class_key	q_lock_cls_key;
513 	struct lockdep_map	q_lockdep_map;
514 
515 	struct request		*last_merge;
516 
517 	spinlock_t		queue_lock;
518 
519 	int			quiesce_depth;
520 
521 	struct gendisk		*disk;
522 
523 	/*
524 	 * mq queue kobject
525 	 */
526 	struct kobject *mq_kobj;
527 
528 	struct queue_limits	limits;
529 
530 #ifdef CONFIG_PM
531 	struct device		*dev;
532 	enum rpm_status		rpm_status;
533 #endif
534 
535 	/*
536 	 * Number of contexts that have called blk_set_pm_only(). If this
537 	 * counter is above zero then only RQF_PM requests are processed.
538 	 */
539 	atomic_t		pm_only;
540 
541 	struct blk_queue_stats	*stats;
542 	struct rq_qos		*rq_qos;
543 	struct mutex		rq_qos_mutex;
544 
545 	/*
546 	 * ida allocated id for this queue.  Used to index queues from
547 	 * ioctx.
548 	 */
549 	int			id;
550 
551 	/*
552 	 * queue settings
553 	 */
554 	unsigned long		nr_requests;	/* Max # of requests */
555 
556 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
557 	struct blk_crypto_profile *crypto_profile;
558 	struct kobject *crypto_kobject;
559 #endif
560 
561 	struct timer_list	timeout;
562 	struct work_struct	timeout_work;
563 
564 	atomic_t		nr_active_requests_shared_tags;
565 
566 	struct blk_mq_tags	*sched_shared_tags;
567 
568 	struct list_head	icq_list;
569 #ifdef CONFIG_BLK_CGROUP
570 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
571 	struct blkcg_gq		*root_blkg;
572 	struct list_head	blkg_list;
573 	struct mutex		blkcg_mutex;
574 #endif
575 
576 	int			node;
577 
578 	spinlock_t		requeue_lock;
579 	struct list_head	requeue_list;
580 	struct delayed_work	requeue_work;
581 
582 #ifdef CONFIG_BLK_DEV_IO_TRACE
583 	struct blk_trace __rcu	*blk_trace;
584 #endif
585 	/*
586 	 * for flush operations
587 	 */
588 	struct blk_flush_queue	*fq;
589 	struct list_head	flush_list;
590 
591 	/*
592 	 * Protects against I/O scheduler switching, particularly when updating
593 	 * q->elevator. Since the elevator update code path may also modify q->
594 	 * nr_requests and wbt latency, this lock also protects the sysfs attrs
595 	 * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
596 	 * may modify hctx tags, reserved-tags and cpumask, so this lock also
597 	 * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
598 	 * order during an elevator or nr_hw_queue update, first freeze the
599 	 * queue, then acquire ->elevator_lock.
600 	 */
601 	struct mutex		elevator_lock;
602 
603 	struct mutex		sysfs_lock;
604 	/*
605 	 * Protects queue limits and also sysfs attribute read_ahead_kb.
606 	 */
607 	struct mutex		limits_lock;
608 
609 	/*
610 	 * for reusing dead hctx instance in case of updating
611 	 * nr_hw_queues
612 	 */
613 	struct list_head	unused_hctx_list;
614 	spinlock_t		unused_hctx_lock;
615 
616 	int			mq_freeze_depth;
617 
618 #ifdef CONFIG_BLK_DEV_THROTTLING
619 	/* Throttle data */
620 	struct throtl_data *td;
621 #endif
622 	struct rcu_head		rcu_head;
623 #ifdef CONFIG_LOCKDEP
624 	struct task_struct	*mq_freeze_owner;
625 	int			mq_freeze_owner_depth;
626 	/*
627 	 * Records disk & queue state in current context, used in unfreeze
628 	 * queue
629 	 */
630 	bool			mq_freeze_disk_dead;
631 	bool			mq_freeze_queue_dying;
632 #endif
633 	wait_queue_head_t	mq_freeze_wq;
634 	/*
635 	 * Protect concurrent access to q_usage_counter by
636 	 * percpu_ref_kill() and percpu_ref_reinit().
637 	 */
638 	struct mutex		mq_freeze_lock;
639 
640 	struct blk_mq_tag_set	*tag_set;
641 	struct list_head	tag_set_list;
642 
643 	struct dentry		*debugfs_dir;
644 	struct dentry		*sched_debugfs_dir;
645 	struct dentry		*rqos_debugfs_dir;
646 	/*
647 	 * Serializes all debugfs metadata operations using the above dentries.
648 	 */
649 	struct mutex		debugfs_mutex;
650 };
651 
652 /* Keep blk_queue_flag_name[] in sync with the definitions below */
653 enum {
654 	QUEUE_FLAG_DYING,		/* queue being torn down */
655 	QUEUE_FLAG_NOMERGES,		/* disable merge attempts */
656 	QUEUE_FLAG_SAME_COMP,		/* complete on same CPU-group */
657 	QUEUE_FLAG_FAIL_IO,		/* fake timeout */
658 	QUEUE_FLAG_NOXMERGES,		/* No extended merges */
659 	QUEUE_FLAG_SAME_FORCE,		/* force complete on same CPU */
660 	QUEUE_FLAG_INIT_DONE,		/* queue is initialized */
661 	QUEUE_FLAG_STATS,		/* track IO start and completion times */
662 	QUEUE_FLAG_REGISTERED,		/* queue has been registered to a disk */
663 	QUEUE_FLAG_QUIESCED,		/* queue has been quiesced */
664 	QUEUE_FLAG_RQ_ALLOC_TIME,	/* record rq->alloc_time_ns */
665 	QUEUE_FLAG_HCTX_ACTIVE,		/* at least one blk-mq hctx is active */
666 	QUEUE_FLAG_SQ_SCHED,		/* single queue style io dispatch */
667 	QUEUE_FLAG_DISABLE_WBT_DEF,	/* for sched to disable/enable wbt */
668 	QUEUE_FLAG_NO_ELV_SWITCH,	/* can't switch elevator any more */
669 	QUEUE_FLAG_QOS_ENABLED,		/* qos is enabled */
670 	QUEUE_FLAG_BIO_ISSUE_TIME,	/* record bio->issue_time_ns */
671 	QUEUE_FLAG_MAX
672 };
673 
674 #define QUEUE_FLAG_MQ_DEFAULT	(1UL << QUEUE_FLAG_SAME_COMP)
675 
676 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
677 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
678 
679 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
680 #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
681 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
682 #define blk_queue_noxmerges(q)	\
683 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
684 #define blk_queue_nonrot(q)	(!((q)->limits.features & BLK_FEAT_ROTATIONAL))
685 #define blk_queue_io_stat(q)	((q)->limits.features & BLK_FEAT_IO_STAT)
686 #define blk_queue_passthrough_stat(q)	\
687 	((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
688 #define blk_queue_dax(q)	((q)->limits.features & BLK_FEAT_DAX)
689 #define blk_queue_pci_p2pdma(q)	((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
690 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
691 #define blk_queue_rq_alloc_time(q)	\
692 	test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
693 #else
694 #define blk_queue_rq_alloc_time(q)	false
695 #endif
696 
697 #define blk_noretry_request(rq) \
698 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
699 			     REQ_FAILFAST_DRIVER))
700 #define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
701 #define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only)
702 #define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
703 #define blk_queue_sq_sched(q)	test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
704 #define blk_queue_skip_tagset_quiesce(q) \
705 	((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
706 #define blk_queue_disable_wbt(q)	\
707 	test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
708 #define blk_queue_no_elv_switch(q)	\
709 	test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
710 
711 extern void blk_set_pm_only(struct request_queue *q);
712 extern void blk_clear_pm_only(struct request_queue *q);
713 
714 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
715 
716 #define dma_map_bvec(dev, bv, dir, attrs) \
717 	dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
718 	(dir), (attrs))
719 
720 static inline bool queue_is_mq(struct request_queue *q)
721 {
722 	return q->mq_ops;
723 }
724 
725 #ifdef CONFIG_PM
726 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
727 {
728 	return q->rpm_status;
729 }
730 #else
731 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
732 {
733 	return RPM_ACTIVE;
734 }
735 #endif
736 
737 static inline bool blk_queue_is_zoned(struct request_queue *q)
738 {
739 	return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
740 		(q->limits.features & BLK_FEAT_ZONED);
741 }
742 
743 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
744 {
745 	if (!blk_queue_is_zoned(disk->queue))
746 		return 0;
747 	return sector >> ilog2(disk->queue->limits.chunk_sectors);
748 }
749 
750 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
751 {
752 	return bdev->bd_disk->queue->limits.max_open_zones;
753 }
754 
755 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
756 {
757 	return bdev->bd_disk->queue->limits.max_active_zones;
758 }
759 
760 static inline unsigned int blk_queue_depth(struct request_queue *q)
761 {
762 	if (q->queue_depth)
763 		return q->queue_depth;
764 
765 	return q->nr_requests;
766 }
767 
768 /*
769  * default timeout for SG_IO if none specified
770  */
771 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
772 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
773 
774 /* This should not be used directly - use rq_for_each_segment */
775 #define for_each_bio(_bio)		\
776 	for (; _bio; _bio = _bio->bi_next)
777 
778 int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
779 				 const struct attribute_group **groups,
780 				 struct fwnode_handle *fwnode);
781 int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
782 				 const struct attribute_group **groups);
783 static inline int __must_check add_disk(struct gendisk *disk)
784 {
785 	return device_add_disk(NULL, disk, NULL);
786 }
787 void del_gendisk(struct gendisk *gp);
788 void invalidate_disk(struct gendisk *disk);
789 void set_disk_ro(struct gendisk *disk, bool read_only);
790 void disk_uevent(struct gendisk *disk, enum kobject_action action);
791 
792 static inline u8 bdev_partno(const struct block_device *bdev)
793 {
794 	return atomic_read(&bdev->__bd_flags) & BD_PARTNO;
795 }
796 
797 static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
798 {
799 	return atomic_read(&bdev->__bd_flags) & flag;
800 }
801 
802 static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
803 {
804 	atomic_or(flag, &bdev->__bd_flags);
805 }
806 
807 static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
808 {
809 	atomic_andnot(flag, &bdev->__bd_flags);
810 }
811 
812 static inline bool get_disk_ro(struct gendisk *disk)
813 {
814 	return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
815 		test_bit(GD_READ_ONLY, &disk->state);
816 }
817 
818 static inline bool bdev_read_only(struct block_device *bdev)
819 {
820 	return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
821 }
822 
823 bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
824 void disk_force_media_change(struct gendisk *disk);
825 void bdev_mark_dead(struct block_device *bdev, bool surprise);
826 
827 void add_disk_randomness(struct gendisk *disk) __latent_entropy;
828 void rand_initialize_disk(struct gendisk *disk);
829 
830 static inline sector_t get_start_sect(struct block_device *bdev)
831 {
832 	return bdev->bd_start_sect;
833 }
834 
835 static inline sector_t bdev_nr_sectors(struct block_device *bdev)
836 {
837 	return bdev->bd_nr_sectors;
838 }
839 
840 static inline loff_t bdev_nr_bytes(struct block_device *bdev)
841 {
842 	return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
843 }
844 
845 static inline sector_t get_capacity(struct gendisk *disk)
846 {
847 	return bdev_nr_sectors(disk->part0);
848 }
849 
850 static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
851 {
852 	return bdev_nr_sectors(sb->s_bdev) >>
853 		(sb->s_blocksize_bits - SECTOR_SHIFT);
854 }
855 
856 #ifdef CONFIG_BLK_DEV_ZONED
857 static inline unsigned int disk_nr_zones(struct gendisk *disk)
858 {
859 	return disk->nr_zones;
860 }
861 
862 /**
863  * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
864  *				   write plugging
865  * @bio: The BIO being submitted
866  *
867  * Return true whenever @bio execution needs to be handled through zone
868  * write plugging (using blk_zone_plug_bio()). Return false otherwise.
869  */
870 static inline bool bio_needs_zone_write_plugging(struct bio *bio)
871 {
872 	enum req_op op = bio_op(bio);
873 
874 	/*
875 	 * Only zoned block devices have a zone write plug hash table. But not
876 	 * all of them have one (e.g. DM devices may not need one).
877 	 */
878 	if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
879 		return false;
880 
881 	/* Only write operations need zone write plugging. */
882 	if (!op_is_write(op))
883 		return false;
884 
885 	/* Ignore empty flush */
886 	if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
887 		return false;
888 
889 	/* Ignore BIOs that already have been handled by zone write plugging. */
890 	if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
891 		return false;
892 
893 	/*
894 	 * All zone write operations must be handled through zone write plugging
895 	 * using blk_zone_plug_bio().
896 	 */
897 	switch (op) {
898 	case REQ_OP_ZONE_APPEND:
899 	case REQ_OP_WRITE:
900 	case REQ_OP_WRITE_ZEROES:
901 	case REQ_OP_ZONE_FINISH:
902 	case REQ_OP_ZONE_RESET:
903 	case REQ_OP_ZONE_RESET_ALL:
904 		return true;
905 	default:
906 		return false;
907 	}
908 }
909 
910 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
911 
912 /**
913  * disk_zone_capacity - returns the zone capacity of zone containing @sector
914  * @disk:	disk to work with
915  * @sector:	sector number within the querying zone
916  *
917  * Returns the zone capacity of a zone containing @sector. @sector can be any
918  * sector in the zone.
919  */
920 static inline unsigned int disk_zone_capacity(struct gendisk *disk,
921 					      sector_t sector)
922 {
923 	sector_t zone_sectors = disk->queue->limits.chunk_sectors;
924 
925 	if (sector + zone_sectors >= get_capacity(disk))
926 		return disk->last_zone_capacity;
927 	return disk->zone_capacity;
928 }
929 static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
930 					      sector_t pos)
931 {
932 	return disk_zone_capacity(bdev->bd_disk, pos);
933 }
934 
935 bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector);
936 
937 #else /* CONFIG_BLK_DEV_ZONED */
938 static inline unsigned int disk_nr_zones(struct gendisk *disk)
939 {
940 	return 0;
941 }
942 
943 static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
944 {
945 	return false;
946 }
947 
948 static inline bool bio_needs_zone_write_plugging(struct bio *bio)
949 {
950 	return false;
951 }
952 
953 static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
954 {
955 	return false;
956 }
957 #endif /* CONFIG_BLK_DEV_ZONED */
958 
959 static inline unsigned int bdev_nr_zones(struct block_device *bdev)
960 {
961 	return disk_nr_zones(bdev->bd_disk);
962 }
963 
964 int bdev_disk_changed(struct gendisk *disk, bool invalidate);
965 
966 void put_disk(struct gendisk *disk);
967 struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
968 		struct lock_class_key *lkclass);
969 
970 /**
971  * blk_alloc_disk - allocate a gendisk structure
972  * @lim: queue limits to be used for this disk.
973  * @node_id: numa node to allocate on
974  *
975  * Allocate and pre-initialize a gendisk structure for use with BIO based
976  * drivers.
977  *
978  * Returns an ERR_PTR on error, else the allocated disk.
979  *
980  * Context: can sleep
981  */
982 #define blk_alloc_disk(lim, node_id)					\
983 ({									\
984 	static struct lock_class_key __key;				\
985 									\
986 	__blk_alloc_disk(lim, node_id, &__key);				\
987 })
988 
989 int __register_blkdev(unsigned int major, const char *name,
990 		void (*probe)(dev_t devt));
991 #define register_blkdev(major, name) \
992 	__register_blkdev(major, name, NULL)
993 void unregister_blkdev(unsigned int major, const char *name);
994 
995 bool disk_check_media_change(struct gendisk *disk);
996 void set_capacity(struct gendisk *disk, sector_t size);
997 
998 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
999 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
1000 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
1001 #else
1002 static inline int bd_link_disk_holder(struct block_device *bdev,
1003 				      struct gendisk *disk)
1004 {
1005 	return 0;
1006 }
1007 static inline void bd_unlink_disk_holder(struct block_device *bdev,
1008 					 struct gendisk *disk)
1009 {
1010 }
1011 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
1012 
1013 dev_t part_devt(struct gendisk *disk, u8 partno);
1014 void inc_diskseq(struct gendisk *disk);
1015 void blk_request_module(dev_t devt);
1016 
1017 extern int blk_register_queue(struct gendisk *disk);
1018 extern void blk_unregister_queue(struct gendisk *disk);
1019 void submit_bio_noacct(struct bio *bio);
1020 struct bio *bio_split_to_limits(struct bio *bio);
1021 struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
1022 				    struct bio_set *bs);
1023 
1024 extern int blk_lld_busy(struct request_queue *q);
1025 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
1026 extern void blk_queue_exit(struct request_queue *q);
1027 extern void blk_sync_queue(struct request_queue *q);
1028 
1029 /* Helper to convert REQ_OP_XXX to its string format XXX */
1030 extern const char *blk_op_str(enum req_op op);
1031 
1032 int blk_status_to_errno(blk_status_t status);
1033 blk_status_t errno_to_blk_status(int errno);
1034 const char *blk_status_to_str(blk_status_t status);
1035 
1036 /* only poll the hardware once, don't continue until a completion was found */
1037 #define BLK_POLL_ONESHOT		(1 << 0)
1038 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
1039 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
1040 			unsigned int flags);
1041 
1042 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1043 {
1044 	return bdev->bd_queue;	/* this is never NULL */
1045 }
1046 
1047 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
1048 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
1049 
1050 static inline unsigned int bio_zone_no(struct bio *bio)
1051 {
1052 	return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
1053 }
1054 
1055 static inline bool bio_straddles_zones(struct bio *bio)
1056 {
1057 	return bio_sectors(bio) &&
1058 		bio_zone_no(bio) !=
1059 		disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
1060 }
1061 
1062 /*
1063  * Return how much within the boundary is left to be used for I/O at a given
1064  * offset.
1065  */
1066 static inline unsigned int blk_boundary_sectors_left(sector_t offset,
1067 		unsigned int boundary_sectors)
1068 {
1069 	if (unlikely(!is_power_of_2(boundary_sectors)))
1070 		return boundary_sectors - sector_div(offset, boundary_sectors);
1071 	return boundary_sectors - (offset & (boundary_sectors - 1));
1072 }
1073 
1074 /**
1075  * queue_limits_start_update - start an atomic update of queue limits
1076  * @q:		queue to update
1077  *
1078  * This functions starts an atomic update of the queue limits.  It takes a lock
1079  * to prevent other updates and returns a snapshot of the current limits that
1080  * the caller can modify.  The caller must call queue_limits_commit_update()
1081  * to finish the update.
1082  *
1083  * Context: process context.
1084  */
1085 static inline struct queue_limits
1086 queue_limits_start_update(struct request_queue *q)
1087 {
1088 	mutex_lock(&q->limits_lock);
1089 	return q->limits;
1090 }
1091 int queue_limits_commit_update_frozen(struct request_queue *q,
1092 		struct queue_limits *lim);
1093 int queue_limits_commit_update(struct request_queue *q,
1094 		struct queue_limits *lim);
1095 int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
1096 int blk_validate_limits(struct queue_limits *lim);
1097 
1098 /**
1099  * queue_limits_cancel_update - cancel an atomic update of queue limits
1100  * @q:		queue to update
1101  *
1102  * This functions cancels an atomic update of the queue limits started by
1103  * queue_limits_start_update() and should be used when an error occurs after
1104  * starting update.
1105  */
1106 static inline void queue_limits_cancel_update(struct request_queue *q)
1107 {
1108 	mutex_unlock(&q->limits_lock);
1109 }
1110 
1111 /*
1112  * These helpers are for drivers that have sloppy feature negotiation and might
1113  * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
1114  * completion handler when the device returned an indicator that the respective
1115  * feature is not actually supported.  They are racy and the driver needs to
1116  * cope with that.  Try to avoid this scheme if you can.
1117  */
1118 static inline void blk_queue_disable_discard(struct request_queue *q)
1119 {
1120 	q->limits.max_discard_sectors = 0;
1121 }
1122 
1123 static inline void blk_queue_disable_secure_erase(struct request_queue *q)
1124 {
1125 	q->limits.max_secure_erase_sectors = 0;
1126 }
1127 
1128 static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
1129 {
1130 	q->limits.max_write_zeroes_sectors = 0;
1131 	q->limits.max_wzeroes_unmap_sectors = 0;
1132 }
1133 
1134 /*
1135  * Access functions for manipulating queue properties
1136  */
1137 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1138 extern void blk_set_stacking_limits(struct queue_limits *lim);
1139 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1140 			    sector_t offset);
1141 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
1142 		sector_t offset, const char *pfx);
1143 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1144 
1145 struct blk_independent_access_ranges *
1146 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
1147 void disk_set_independent_access_ranges(struct gendisk *disk,
1148 				struct blk_independent_access_ranges *iars);
1149 
1150 bool __must_check blk_get_queue(struct request_queue *);
1151 extern void blk_put_queue(struct request_queue *);
1152 
1153 void blk_mark_disk_dead(struct gendisk *disk);
1154 
1155 struct rq_list {
1156 	struct request *head;
1157 	struct request *tail;
1158 };
1159 
1160 #ifdef CONFIG_BLOCK
1161 /*
1162  * blk_plug permits building a queue of related requests by holding the I/O
1163  * fragments for a short period. This allows merging of sequential requests
1164  * into single larger request. As the requests are moved from a per-task list to
1165  * the device's request_queue in a batch, this results in improved scalability
1166  * as the lock contention for request_queue lock is reduced.
1167  *
1168  * It is ok not to disable preemption when adding the request to the plug list
1169  * or when attempting a merge. For details, please see schedule() where
1170  * blk_flush_plug() is called.
1171  */
1172 struct blk_plug {
1173 	struct rq_list mq_list; /* blk-mq requests */
1174 
1175 	/* if ios_left is > 1, we can batch tag/rq allocations */
1176 	struct rq_list cached_rqs;
1177 	u64 cur_ktime;
1178 	unsigned short nr_ios;
1179 
1180 	unsigned short rq_count;
1181 
1182 	bool multiple_queues;
1183 	bool has_elevator;
1184 
1185 	struct list_head cb_list; /* md requires an unplug callback */
1186 };
1187 
1188 struct blk_plug_cb;
1189 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1190 struct blk_plug_cb {
1191 	struct list_head list;
1192 	blk_plug_cb_fn callback;
1193 	void *data;
1194 };
1195 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1196 					     void *data, int size);
1197 extern void blk_start_plug(struct blk_plug *);
1198 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
1199 extern void blk_finish_plug(struct blk_plug *);
1200 
1201 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
1202 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1203 {
1204 	if (plug)
1205 		__blk_flush_plug(plug, async);
1206 }
1207 
1208 /*
1209  * tsk == current here
1210  */
1211 static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
1212 {
1213 	struct blk_plug *plug = tsk->plug;
1214 
1215 	if (plug)
1216 		plug->cur_ktime = 0;
1217 	current->flags &= ~PF_BLOCK_TS;
1218 }
1219 
1220 int blkdev_issue_flush(struct block_device *bdev);
1221 long nr_blockdev_pages(void);
1222 #else /* CONFIG_BLOCK */
1223 struct blk_plug {
1224 };
1225 
1226 static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
1227 					 unsigned short nr_ios)
1228 {
1229 }
1230 
1231 static inline void blk_start_plug(struct blk_plug *plug)
1232 {
1233 }
1234 
1235 static inline void blk_finish_plug(struct blk_plug *plug)
1236 {
1237 }
1238 
1239 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1240 {
1241 }
1242 
1243 static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
1244 {
1245 }
1246 
1247 static inline int blkdev_issue_flush(struct block_device *bdev)
1248 {
1249 	return 0;
1250 }
1251 
1252 static inline long nr_blockdev_pages(void)
1253 {
1254 	return 0;
1255 }
1256 #endif /* CONFIG_BLOCK */
1257 
1258 extern void blk_io_schedule(void);
1259 
1260 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1261 		sector_t nr_sects, gfp_t gfp_mask);
1262 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1263 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
1264 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
1265 		sector_t nr_sects, gfp_t gfp);
1266 
1267 #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
1268 #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
1269 #define BLKDEV_ZERO_KILLABLE	(1 << 2)  /* interruptible by fatal signals */
1270 
1271 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1272 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1273 		unsigned flags);
1274 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1275 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1276 
1277 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1278 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1279 {
1280 	return blkdev_issue_discard(sb->s_bdev,
1281 				    block << (sb->s_blocksize_bits -
1282 					      SECTOR_SHIFT),
1283 				    nr_blocks << (sb->s_blocksize_bits -
1284 						  SECTOR_SHIFT),
1285 				    gfp_mask);
1286 }
1287 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1288 		sector_t nr_blocks, gfp_t gfp_mask)
1289 {
1290 	return blkdev_issue_zeroout(sb->s_bdev,
1291 				    block << (sb->s_blocksize_bits -
1292 					      SECTOR_SHIFT),
1293 				    nr_blocks << (sb->s_blocksize_bits -
1294 						  SECTOR_SHIFT),
1295 				    gfp_mask, 0);
1296 }
1297 
1298 static inline bool bdev_is_partition(struct block_device *bdev)
1299 {
1300 	return bdev_partno(bdev) != 0;
1301 }
1302 
1303 enum blk_default_limits {
1304 	BLK_MAX_SEGMENTS	= 128,
1305 	BLK_SAFE_MAX_SECTORS	= 255,
1306 	BLK_MAX_SEGMENT_SIZE	= 65536,
1307 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
1308 };
1309 
1310 static inline struct queue_limits *bdev_limits(struct block_device *bdev)
1311 {
1312 	return &bdev_get_queue(bdev)->limits;
1313 }
1314 
1315 static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1316 {
1317 	return q->limits.seg_boundary_mask;
1318 }
1319 
1320 static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1321 {
1322 	return q->limits.virt_boundary_mask;
1323 }
1324 
1325 static inline unsigned int queue_max_sectors(const struct request_queue *q)
1326 {
1327 	return q->limits.max_sectors;
1328 }
1329 
1330 static inline unsigned int queue_max_bytes(struct request_queue *q)
1331 {
1332 	return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
1333 }
1334 
1335 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1336 {
1337 	return q->limits.max_hw_sectors;
1338 }
1339 
1340 static inline unsigned short queue_max_segments(const struct request_queue *q)
1341 {
1342 	return q->limits.max_segments;
1343 }
1344 
1345 static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1346 {
1347 	return q->limits.max_discard_segments;
1348 }
1349 
1350 static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1351 {
1352 	return q->limits.max_segment_size;
1353 }
1354 
1355 static inline bool queue_emulates_zone_append(struct request_queue *q)
1356 {
1357 	return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
1358 }
1359 
1360 static inline bool bdev_emulates_zone_append(struct block_device *bdev)
1361 {
1362 	return queue_emulates_zone_append(bdev_get_queue(bdev));
1363 }
1364 
1365 static inline unsigned int
1366 bdev_max_zone_append_sectors(struct block_device *bdev)
1367 {
1368 	return bdev_limits(bdev)->max_zone_append_sectors;
1369 }
1370 
1371 static inline unsigned int bdev_max_segments(struct block_device *bdev)
1372 {
1373 	return queue_max_segments(bdev_get_queue(bdev));
1374 }
1375 
1376 static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
1377 {
1378 	if (bdev_is_partition(bdev))
1379 		return 0;
1380 	return bdev_limits(bdev)->max_write_streams;
1381 }
1382 
1383 static inline unsigned queue_logical_block_size(const struct request_queue *q)
1384 {
1385 	return q->limits.logical_block_size;
1386 }
1387 
1388 static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1389 {
1390 	return queue_logical_block_size(bdev_get_queue(bdev));
1391 }
1392 
1393 static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1394 {
1395 	return q->limits.physical_block_size;
1396 }
1397 
1398 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1399 {
1400 	return queue_physical_block_size(bdev_get_queue(bdev));
1401 }
1402 
1403 static inline unsigned int queue_io_min(const struct request_queue *q)
1404 {
1405 	return q->limits.io_min;
1406 }
1407 
1408 static inline unsigned int bdev_io_min(struct block_device *bdev)
1409 {
1410 	return queue_io_min(bdev_get_queue(bdev));
1411 }
1412 
1413 static inline unsigned int queue_io_opt(const struct request_queue *q)
1414 {
1415 	return q->limits.io_opt;
1416 }
1417 
1418 static inline unsigned int bdev_io_opt(struct block_device *bdev)
1419 {
1420 	return queue_io_opt(bdev_get_queue(bdev));
1421 }
1422 
1423 static inline unsigned int
1424 queue_zone_write_granularity(const struct request_queue *q)
1425 {
1426 	return q->limits.zone_write_granularity;
1427 }
1428 
1429 static inline unsigned int
1430 bdev_zone_write_granularity(struct block_device *bdev)
1431 {
1432 	return queue_zone_write_granularity(bdev_get_queue(bdev));
1433 }
1434 
1435 int bdev_alignment_offset(struct block_device *bdev);
1436 unsigned int bdev_discard_alignment(struct block_device *bdev);
1437 
1438 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1439 {
1440 	return bdev_limits(bdev)->max_discard_sectors;
1441 }
1442 
1443 static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
1444 {
1445 	return bdev_limits(bdev)->discard_granularity;
1446 }
1447 
1448 static inline unsigned int
1449 bdev_max_secure_erase_sectors(struct block_device *bdev)
1450 {
1451 	return bdev_limits(bdev)->max_secure_erase_sectors;
1452 }
1453 
1454 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1455 {
1456 	return bdev_limits(bdev)->max_write_zeroes_sectors;
1457 }
1458 
1459 static inline unsigned int
1460 bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
1461 {
1462 	return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
1463 }
1464 
1465 static inline bool bdev_nonrot(struct block_device *bdev)
1466 {
1467 	return blk_queue_nonrot(bdev_get_queue(bdev));
1468 }
1469 
1470 static inline bool bdev_synchronous(struct block_device *bdev)
1471 {
1472 	return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
1473 }
1474 
1475 static inline bool bdev_stable_writes(struct block_device *bdev)
1476 {
1477 	struct request_queue *q = bdev_get_queue(bdev);
1478 
1479 	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1480 	    q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
1481 		return true;
1482 	return q->limits.features & BLK_FEAT_STABLE_WRITES;
1483 }
1484 
1485 static inline bool blk_queue_write_cache(struct request_queue *q)
1486 {
1487 	return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
1488 		!(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
1489 }
1490 
1491 static inline bool bdev_write_cache(struct block_device *bdev)
1492 {
1493 	return blk_queue_write_cache(bdev_get_queue(bdev));
1494 }
1495 
1496 static inline bool bdev_fua(struct block_device *bdev)
1497 {
1498 	return bdev_limits(bdev)->features & BLK_FEAT_FUA;
1499 }
1500 
1501 static inline bool bdev_nowait(struct block_device *bdev)
1502 {
1503 	return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
1504 }
1505 
1506 static inline bool bdev_is_zoned(struct block_device *bdev)
1507 {
1508 	return blk_queue_is_zoned(bdev_get_queue(bdev));
1509 }
1510 
1511 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
1512 {
1513 	return disk_zone_no(bdev->bd_disk, sec);
1514 }
1515 
1516 static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1517 {
1518 	struct request_queue *q = bdev_get_queue(bdev);
1519 
1520 	if (!blk_queue_is_zoned(q))
1521 		return 0;
1522 	return q->limits.chunk_sectors;
1523 }
1524 
1525 static inline sector_t bdev_zone_start(struct block_device *bdev,
1526 				       sector_t sector)
1527 {
1528 	return sector & ~(bdev_zone_sectors(bdev) - 1);
1529 }
1530 
1531 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
1532 						   sector_t sector)
1533 {
1534 	return sector & (bdev_zone_sectors(bdev) - 1);
1535 }
1536 
1537 static inline sector_t bio_offset_from_zone_start(struct bio *bio)
1538 {
1539 	return bdev_offset_from_zone_start(bio->bi_bdev,
1540 					   bio->bi_iter.bi_sector);
1541 }
1542 
1543 static inline bool bdev_is_zone_start(struct block_device *bdev,
1544 				      sector_t sector)
1545 {
1546 	return bdev_offset_from_zone_start(bdev, sector) == 0;
1547 }
1548 
1549 /* Check whether @sector is a multiple of the zone size. */
1550 static inline bool bdev_is_zone_aligned(struct block_device *bdev,
1551 					sector_t sector)
1552 {
1553 	return bdev_is_zone_start(bdev, sector);
1554 }
1555 
1556 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
1557 			   sector_t nr_sects, gfp_t gfp_mask);
1558 
1559 static inline unsigned int queue_dma_alignment(const struct request_queue *q)
1560 {
1561 	return q->limits.dma_alignment;
1562 }
1563 
1564 static inline unsigned int
1565 queue_atomic_write_unit_max_bytes(const struct request_queue *q)
1566 {
1567 	return q->limits.atomic_write_unit_max;
1568 }
1569 
1570 static inline unsigned int
1571 queue_atomic_write_unit_min_bytes(const struct request_queue *q)
1572 {
1573 	return q->limits.atomic_write_unit_min;
1574 }
1575 
1576 static inline unsigned int
1577 queue_atomic_write_boundary_bytes(const struct request_queue *q)
1578 {
1579 	return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
1580 }
1581 
1582 static inline unsigned int
1583 queue_atomic_write_max_bytes(const struct request_queue *q)
1584 {
1585 	return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
1586 }
1587 
1588 static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
1589 {
1590 	return queue_dma_alignment(bdev_get_queue(bdev));
1591 }
1592 
1593 static inline unsigned int
1594 blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
1595 {
1596 	return lim->dma_alignment | lim->dma_pad_mask;
1597 }
1598 
1599 static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
1600 				 unsigned int len)
1601 {
1602 	unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
1603 
1604 	return !(addr & alignment) && !(len & alignment);
1605 }
1606 
1607 /* assumes size > 256 */
1608 static inline unsigned int blksize_bits(unsigned int size)
1609 {
1610 	return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
1611 }
1612 
1613 int kblockd_schedule_work(struct work_struct *work);
1614 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1615 
1616 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1617 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1618 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1619 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
1620 
1621 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1622 
1623 bool blk_crypto_register(struct blk_crypto_profile *profile,
1624 			 struct request_queue *q);
1625 
1626 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1627 
1628 static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
1629 				       struct request_queue *q)
1630 {
1631 	return true;
1632 }
1633 
1634 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1635 
1636 enum blk_unique_id {
1637 	/* these match the Designator Types specified in SPC */
1638 	BLK_UID_T10	= 1,
1639 	BLK_UID_EUI64	= 2,
1640 	BLK_UID_NAA	= 3,
1641 };
1642 
1643 struct block_device_operations {
1644 	void (*submit_bio)(struct bio *bio);
1645 	int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
1646 			unsigned int flags);
1647 	int (*open)(struct gendisk *disk, blk_mode_t mode);
1648 	void (*release)(struct gendisk *disk);
1649 	int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
1650 			unsigned cmd, unsigned long arg);
1651 	int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
1652 			unsigned cmd, unsigned long arg);
1653 	unsigned int (*check_events) (struct gendisk *disk,
1654 				      unsigned int clearing);
1655 	void (*unlock_native_capacity) (struct gendisk *);
1656 	int (*getgeo)(struct gendisk *, struct hd_geometry *);
1657 	int (*set_read_only)(struct block_device *bdev, bool ro);
1658 	void (*free_disk)(struct gendisk *disk);
1659 	/* this callback is with swap_lock and sometimes page table lock held */
1660 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1661 	int (*report_zones)(struct gendisk *, sector_t sector,
1662 			    unsigned int nr_zones,
1663 			    struct blk_report_zones_args *args);
1664 	char *(*devnode)(struct gendisk *disk, umode_t *mode);
1665 	/* returns the length of the identifier or a negative errno: */
1666 	int (*get_unique_id)(struct gendisk *disk, u8 id[16],
1667 			enum blk_unique_id id_type);
1668 	struct module *owner;
1669 	const struct pr_ops *pr_ops;
1670 
1671 	/*
1672 	 * Special callback for probing GPT entry at a given sector.
1673 	 * Needed by Android devices, used by GPT scanner and MMC blk
1674 	 * driver.
1675 	 */
1676 	int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
1677 };
1678 
1679 #ifdef CONFIG_COMPAT
1680 extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
1681 				      unsigned int, unsigned long);
1682 #else
1683 #define blkdev_compat_ptr_ioctl NULL
1684 #endif
1685 
1686 static inline void blk_wake_io_task(struct task_struct *waiter)
1687 {
1688 	/*
1689 	 * If we're polling, the task itself is doing the completions. For
1690 	 * that case, we don't need to signal a wakeup, it's enough to just
1691 	 * mark us as RUNNING.
1692 	 */
1693 	if (waiter == current)
1694 		__set_current_state(TASK_RUNNING);
1695 	else
1696 		wake_up_process(waiter);
1697 }
1698 
1699 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
1700 				 unsigned long start_time);
1701 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1702 		      unsigned int sectors, unsigned long start_time);
1703 
1704 unsigned long bio_start_io_acct(struct bio *bio);
1705 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1706 		struct block_device *orig_bdev);
1707 
1708 /**
1709  * bio_end_io_acct - end I/O accounting for bio based drivers
1710  * @bio:	bio to end account for
1711  * @start_time:	start time returned by bio_start_io_acct()
1712  */
1713 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1714 {
1715 	return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1716 }
1717 
1718 int bdev_validate_blocksize(struct block_device *bdev, int block_size);
1719 int set_blocksize(struct file *file, int size);
1720 
1721 int lookup_bdev(const char *pathname, dev_t *dev);
1722 
1723 void blkdev_show(struct seq_file *seqf, off_t offset);
1724 
1725 #define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
1726 #define BDEVT_SIZE	10	/* Largest string for MAJ:MIN for blkdev */
1727 #ifdef CONFIG_BLOCK
1728 #define BLKDEV_MAJOR_MAX	512
1729 #else
1730 #define BLKDEV_MAJOR_MAX	0
1731 #endif
1732 
1733 struct blk_holder_ops {
1734 	void (*mark_dead)(struct block_device *bdev, bool surprise);
1735 
1736 	/*
1737 	 * Sync the file system mounted on the block device.
1738 	 */
1739 	void (*sync)(struct block_device *bdev);
1740 
1741 	/*
1742 	 * Freeze the file system mounted on the block device.
1743 	 */
1744 	int (*freeze)(struct block_device *bdev);
1745 
1746 	/*
1747 	 * Thaw the file system mounted on the block device.
1748 	 */
1749 	int (*thaw)(struct block_device *bdev);
1750 };
1751 
1752 /*
1753  * For filesystems using @fs_holder_ops, the @holder argument passed to
1754  * helpers used to open and claim block devices via
1755  * bd_prepare_to_claim() must point to a superblock.
1756  */
1757 extern const struct blk_holder_ops fs_holder_ops;
1758 
1759 /*
1760  * Return the correct open flags for blkdev_get_by_* for super block flags
1761  * as stored in sb->s_flags.
1762  */
1763 #define sb_open_mode(flags) \
1764 	(BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
1765 	 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
1766 
1767 struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1768 		const struct blk_holder_ops *hops);
1769 struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
1770 		void *holder, const struct blk_holder_ops *hops);
1771 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
1772 		const struct blk_holder_ops *hops);
1773 void bd_abort_claiming(struct block_device *bdev, void *holder);
1774 
1775 struct block_device *I_BDEV(struct inode *inode);
1776 struct block_device *file_bdev(struct file *bdev_file);
1777 bool disk_live(struct gendisk *disk);
1778 unsigned int block_size(struct block_device *bdev);
1779 
1780 #ifdef CONFIG_BLOCK
1781 void invalidate_bdev(struct block_device *bdev);
1782 int sync_blockdev(struct block_device *bdev);
1783 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
1784 int sync_blockdev_nowait(struct block_device *bdev);
1785 void sync_bdevs(bool wait);
1786 void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
1787 void printk_all_partitions(void);
1788 int __init early_lookup_bdev(const char *pathname, dev_t *dev);
1789 #else
1790 static inline void invalidate_bdev(struct block_device *bdev)
1791 {
1792 }
1793 static inline int sync_blockdev(struct block_device *bdev)
1794 {
1795 	return 0;
1796 }
1797 static inline int sync_blockdev_nowait(struct block_device *bdev)
1798 {
1799 	return 0;
1800 }
1801 static inline void sync_bdevs(bool wait)
1802 {
1803 }
1804 static inline void bdev_statx(const struct path *path, struct kstat *stat,
1805 		u32 request_mask)
1806 {
1807 }
1808 static inline void printk_all_partitions(void)
1809 {
1810 }
1811 static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
1812 {
1813 	return -EINVAL;
1814 }
1815 #endif /* CONFIG_BLOCK */
1816 
1817 int bdev_freeze(struct block_device *bdev);
1818 int bdev_thaw(struct block_device *bdev);
1819 void bdev_fput(struct file *bdev_file);
1820 
1821 struct io_comp_batch {
1822 	struct rq_list req_list;
1823 	bool need_ts;
1824 	void (*complete)(struct io_comp_batch *);
1825 };
1826 
1827 static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
1828 						struct queue_limits *limits)
1829 {
1830 	unsigned int alignment = max(limits->atomic_write_hw_unit_min,
1831 				limits->atomic_write_hw_boundary);
1832 
1833 	return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
1834 }
1835 
1836 static inline bool bdev_can_atomic_write(struct block_device *bdev)
1837 {
1838 	struct request_queue *bd_queue = bdev->bd_queue;
1839 	struct queue_limits *limits = &bd_queue->limits;
1840 
1841 	if (!limits->atomic_write_unit_min)
1842 		return false;
1843 
1844 	if (bdev_is_partition(bdev))
1845 		return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
1846 							limits);
1847 
1848 	return true;
1849 }
1850 
1851 static inline unsigned int
1852 bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
1853 {
1854 	if (!bdev_can_atomic_write(bdev))
1855 		return 0;
1856 	return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
1857 }
1858 
1859 static inline unsigned int
1860 bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
1861 {
1862 	if (!bdev_can_atomic_write(bdev))
1863 		return 0;
1864 	return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
1865 }
1866 
1867 static inline int bio_split_rw_at(struct bio *bio,
1868 		const struct queue_limits *lim,
1869 		unsigned *segs, unsigned max_bytes)
1870 {
1871 	return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
1872 }
1873 
1874 #define DEFINE_IO_COMP_BATCH(name)	struct io_comp_batch name = { }
1875 
1876 #endif /* _LINUX_BLKDEV_H */
1877