1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Portions Copyright (C) 1992 Drew Eckhardt
4 */
5 #ifndef _LINUX_BLKDEV_H
6 #define _LINUX_BLKDEV_H
7
8 #include <linux/types.h>
9 #include <linux/blk_types.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
12 #include <linux/llist.h>
13 #include <linux/minmax.h>
14 #include <linux/timer.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/bio.h>
18 #include <linux/gfp.h>
19 #include <linux/kdev_t.h>
20 #include <linux/rcupdate.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/blkzoned.h>
23 #include <linux/sched.h>
24 #include <linux/sbitmap.h>
25 #include <linux/uuid.h>
26 #include <linux/xarray.h>
27 #include <linux/file.h>
28 #include <linux/lockdep.h>
29
30 struct module;
31 struct request_queue;
32 struct elevator_queue;
33 struct blk_trace;
34 struct request;
35 struct sg_io_hdr;
36 struct blkcg_gq;
37 struct blk_flush_queue;
38 struct kiocb;
39 struct pr_ops;
40 struct rq_qos;
41 struct blk_queue_stats;
42 struct blk_stat_callback;
43 struct blk_crypto_profile;
44
45 extern const struct device_type disk_type;
46 extern const struct device_type part_type;
47 extern const struct class block_class;
48
49 /*
50 * Maximum number of blkcg policies allowed to be registered concurrently.
51 * Defined here to simplify include dependency.
52 */
53 #define BLKCG_MAX_POLS 6
54
55 #define DISK_MAX_PARTS 256
56 #define DISK_NAME_LEN 32
57
58 #define PARTITION_META_INFO_VOLNAMELTH 64
59 /*
60 * Enough for the string representation of any kind of UUID plus NULL.
61 * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
62 */
63 #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
64
65 struct partition_meta_info {
66 char uuid[PARTITION_META_INFO_UUIDLTH];
67 u8 volname[PARTITION_META_INFO_VOLNAMELTH];
68 };
69
70 /**
71 * DOC: genhd capability flags
72 *
73 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
74 * removable media. When set, the device remains present even when media is not
75 * inserted. Shall not be set for devices which are removed entirely when the
76 * media is removed.
77 *
78 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
79 * doesn't appear in sysfs, and can't be opened from userspace or using
80 * blkdev_get*. Used for the underlying components of multipath devices.
81 *
82 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
83 * scan for partitions from add_disk, and users can't add partitions manually.
84 *
85 */
86 enum {
87 GENHD_FL_REMOVABLE = 1 << 0,
88 GENHD_FL_HIDDEN = 1 << 1,
89 GENHD_FL_NO_PART = 1 << 2,
90 };
91
92 enum {
93 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
94 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
95 };
96
97 enum {
98 /* Poll even if events_poll_msecs is unset */
99 DISK_EVENT_FLAG_POLL = 1 << 0,
100 /* Forward events to udev */
101 DISK_EVENT_FLAG_UEVENT = 1 << 1,
102 /* Block event polling when open for exclusive write */
103 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
104 };
105
106 struct disk_events;
107 struct badblocks;
108
109 enum blk_integrity_checksum {
110 BLK_INTEGRITY_CSUM_NONE = 0,
111 BLK_INTEGRITY_CSUM_IP = 1,
112 BLK_INTEGRITY_CSUM_CRC = 2,
113 BLK_INTEGRITY_CSUM_CRC64 = 3,
114 } __packed ;
115
116 struct blk_integrity {
117 unsigned char flags;
118 enum blk_integrity_checksum csum_type;
119 unsigned char metadata_size;
120 unsigned char pi_offset;
121 unsigned char interval_exp;
122 unsigned char tag_size;
123 unsigned char pi_tuple_size;
124 };
125
126 typedef unsigned int __bitwise blk_mode_t;
127
128 /* open for reading */
129 #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
130 /* open for writing */
131 #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
132 /* open exclusively (vs other exclusive openers */
133 #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
134 /* opened with O_NDELAY */
135 #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
136 /* open for "writes" only for ioctls (specialy hack for floppy.c) */
137 #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
138 /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
139 #define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
140 /* return partition scanning errors */
141 #define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
142
143 struct gendisk {
144 /*
145 * major/first_minor/minors should not be set by any new driver, the
146 * block core will take care of allocating them automatically.
147 */
148 int major;
149 int first_minor;
150 int minors;
151
152 char disk_name[DISK_NAME_LEN]; /* name of major driver */
153
154 unsigned short events; /* supported events */
155 unsigned short event_flags; /* flags related to event processing */
156
157 struct xarray part_tbl;
158 struct block_device *part0;
159
160 const struct block_device_operations *fops;
161 struct request_queue *queue;
162 void *private_data;
163
164 struct bio_set bio_split;
165
166 int flags;
167 unsigned long state;
168 #define GD_NEED_PART_SCAN 0
169 #define GD_READ_ONLY 1
170 #define GD_DEAD 2
171 #define GD_NATIVE_CAPACITY 3
172 #define GD_ADDED 4
173 #define GD_SUPPRESS_PART_SCAN 5
174 #define GD_OWNS_QUEUE 6
175
176 struct mutex open_mutex; /* open/close mutex */
177 unsigned open_partitions; /* number of open partitions */
178
179 struct backing_dev_info *bdi;
180 struct kobject queue_kobj; /* the queue/ directory */
181 struct kobject *slave_dir;
182 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
183 struct list_head slave_bdevs;
184 #endif
185 struct timer_rand_state *random;
186 struct disk_events *ev;
187
188 #ifdef CONFIG_BLK_DEV_ZONED
189 /*
190 * Zoned block device information. Reads of this information must be
191 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
192 * information is only allowed while no requests are being processed.
193 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
194 */
195 unsigned int nr_zones;
196 unsigned int zone_capacity;
197 unsigned int last_zone_capacity;
198 unsigned long __rcu *conv_zones_bitmap;
199 unsigned int zone_wplugs_hash_bits;
200 atomic_t nr_zone_wplugs;
201 spinlock_t zone_wplugs_lock;
202 struct mempool *zone_wplugs_pool;
203 struct hlist_head *zone_wplugs_hash;
204 struct workqueue_struct *zone_wplugs_wq;
205 #endif /* CONFIG_BLK_DEV_ZONED */
206
207 #if IS_ENABLED(CONFIG_CDROM)
208 struct cdrom_device_info *cdi;
209 #endif
210 int node_id;
211 struct badblocks *bb;
212 struct lockdep_map lockdep_map;
213 u64 diskseq;
214 blk_mode_t open_mode;
215
216 /*
217 * Independent sector access ranges. This is always NULL for
218 * devices that do not have multiple independent access ranges.
219 */
220 struct blk_independent_access_ranges *ia_ranges;
221
222 struct mutex rqos_state_mutex; /* rqos state change mutex */
223 };
224
225 /**
226 * disk_openers - returns how many openers are there for a disk
227 * @disk: disk to check
228 *
229 * This returns the number of openers for a disk. Note that this value is only
230 * stable if disk->open_mutex is held.
231 *
232 * Note: Due to a quirk in the block layer open code, each open partition is
233 * only counted once even if there are multiple openers.
234 */
disk_openers(struct gendisk * disk)235 static inline unsigned int disk_openers(struct gendisk *disk)
236 {
237 return atomic_read(&disk->part0->bd_openers);
238 }
239
240 /**
241 * disk_has_partscan - return %true if partition scanning is enabled on a disk
242 * @disk: disk to check
243 *
244 * Returns %true if partitions scanning is enabled for @disk, or %false if
245 * partition scanning is disabled either permanently or temporarily.
246 */
disk_has_partscan(struct gendisk * disk)247 static inline bool disk_has_partscan(struct gendisk *disk)
248 {
249 return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
250 !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
251 }
252
253 /*
254 * The gendisk is refcounted by the part0 block_device, and the bd_device
255 * therein is also used for device model presentation in sysfs.
256 */
257 #define dev_to_disk(device) \
258 (dev_to_bdev(device)->bd_disk)
259 #define disk_to_dev(disk) \
260 (&((disk)->part0->bd_device))
261
262 #if IS_REACHABLE(CONFIG_CDROM)
263 #define disk_to_cdi(disk) ((disk)->cdi)
264 #else
265 #define disk_to_cdi(disk) NULL
266 #endif
267
disk_devt(struct gendisk * disk)268 static inline dev_t disk_devt(struct gendisk *disk)
269 {
270 return MKDEV(disk->major, disk->first_minor);
271 }
272
273 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
274 /*
275 * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
276 * however we constrain this to what we can validate and test.
277 */
278 #define BLK_MAX_BLOCK_SIZE SZ_64K
279 #else
280 #define BLK_MAX_BLOCK_SIZE PAGE_SIZE
281 #endif
282
283
284 /* blk_validate_limits() validates bsize, so drivers don't usually need to */
blk_validate_block_size(unsigned long bsize)285 static inline int blk_validate_block_size(unsigned long bsize)
286 {
287 if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
288 return -EINVAL;
289
290 return 0;
291 }
292
blk_op_is_passthrough(blk_opf_t op)293 static inline bool blk_op_is_passthrough(blk_opf_t op)
294 {
295 op &= REQ_OP_MASK;
296 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
297 }
298
299 /* flags set by the driver in queue_limits.features */
300 typedef unsigned int __bitwise blk_features_t;
301
302 /* supports a volatile write cache */
303 #define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
304
305 /* supports passing on the FUA bit */
306 #define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
307
308 /* rotational device (hard drive or floppy) */
309 #define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
310
311 /* contributes to the random number pool */
312 #define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
313
314 /* do disk/partitions IO accounting */
315 #define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
316
317 /* don't modify data until writeback is done */
318 #define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
319
320 /* always completes in submit context */
321 #define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
322
323 /* supports REQ_NOWAIT */
324 #define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
325
326 /* supports DAX */
327 #define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
328
329 /* supports I/O polling */
330 #define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
331
332 /* is a zoned device */
333 #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
334
335 /* supports PCI(e) p2p requests */
336 #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
337
338 /* skip this queue in blk_mq_(un)quiesce_tagset */
339 #define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
340
341 /* undocumented magic for bcache */
342 #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
343 ((__force blk_features_t)(1u << 15))
344
345 /* atomic writes enabled */
346 #define BLK_FEAT_ATOMIC_WRITES \
347 ((__force blk_features_t)(1u << 16))
348
349 /*
350 * Flags automatically inherited when stacking limits.
351 */
352 #define BLK_FEAT_INHERIT_MASK \
353 (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
354 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
355 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
356
357 /* internal flags in queue_limits.flags */
358 typedef unsigned int __bitwise blk_flags_t;
359
360 /* do not send FLUSH/FUA commands despite advertising a write cache */
361 #define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
362
363 /* I/O topology is misaligned */
364 #define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
365
366 /* passthrough command IO accounting */
367 #define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2))
368
369 struct queue_limits {
370 blk_features_t features;
371 blk_flags_t flags;
372 unsigned long seg_boundary_mask;
373 unsigned long virt_boundary_mask;
374
375 unsigned int max_hw_sectors;
376 unsigned int max_dev_sectors;
377 unsigned int chunk_sectors;
378 unsigned int max_sectors;
379 unsigned int max_user_sectors;
380 unsigned int max_segment_size;
381 unsigned int min_segment_size;
382 unsigned int physical_block_size;
383 unsigned int logical_block_size;
384 unsigned int alignment_offset;
385 unsigned int io_min;
386 unsigned int io_opt;
387 unsigned int max_discard_sectors;
388 unsigned int max_hw_discard_sectors;
389 unsigned int max_user_discard_sectors;
390 unsigned int max_secure_erase_sectors;
391 unsigned int max_write_zeroes_sectors;
392 unsigned int max_wzeroes_unmap_sectors;
393 unsigned int max_hw_wzeroes_unmap_sectors;
394 unsigned int max_user_wzeroes_unmap_sectors;
395 unsigned int max_hw_zone_append_sectors;
396 unsigned int max_zone_append_sectors;
397 unsigned int discard_granularity;
398 unsigned int discard_alignment;
399 unsigned int zone_write_granularity;
400
401 /* atomic write limits */
402 unsigned int atomic_write_hw_max;
403 unsigned int atomic_write_max_sectors;
404 unsigned int atomic_write_hw_boundary;
405 unsigned int atomic_write_boundary_sectors;
406 unsigned int atomic_write_hw_unit_min;
407 unsigned int atomic_write_unit_min;
408 unsigned int atomic_write_hw_unit_max;
409 unsigned int atomic_write_unit_max;
410
411 unsigned short max_segments;
412 unsigned short max_integrity_segments;
413 unsigned short max_discard_segments;
414
415 unsigned short max_write_streams;
416 unsigned int write_stream_granularity;
417
418 unsigned int max_open_zones;
419 unsigned int max_active_zones;
420
421 /*
422 * Drivers that set dma_alignment to less than 511 must be prepared to
423 * handle individual bvec's that are not a multiple of a SECTOR_SIZE
424 * due to possible offsets.
425 */
426 unsigned int dma_alignment;
427 unsigned int dma_pad_mask;
428
429 struct blk_integrity integrity;
430 };
431
432 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
433 void *data);
434
435 #define BLK_ALL_ZONES ((unsigned int)-1)
436 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
437 unsigned int nr_zones, report_zones_cb cb, void *data);
438 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
439 sector_t sectors, sector_t nr_sectors);
440 int blk_revalidate_disk_zones(struct gendisk *disk);
441
442 /*
443 * Independent access ranges: struct blk_independent_access_range describes
444 * a range of contiguous sectors that can be accessed using device command
445 * execution resources that are independent from the resources used for
446 * other access ranges. This is typically found with single-LUN multi-actuator
447 * HDDs where each access range is served by a different set of heads.
448 * The set of independent ranges supported by the device is defined using
449 * struct blk_independent_access_ranges. The independent ranges must not overlap
450 * and must include all sectors within the disk capacity (no sector holes
451 * allowed).
452 * For a device with multiple ranges, requests targeting sectors in different
453 * ranges can be executed in parallel. A request can straddle an access range
454 * boundary.
455 */
456 struct blk_independent_access_range {
457 struct kobject kobj;
458 sector_t sector;
459 sector_t nr_sectors;
460 };
461
462 struct blk_independent_access_ranges {
463 struct kobject kobj;
464 bool sysfs_registered;
465 unsigned int nr_ia_ranges;
466 struct blk_independent_access_range ia_range[];
467 };
468
469 struct request_queue {
470 /*
471 * The queue owner gets to use this for whatever they like.
472 * ll_rw_blk doesn't touch it.
473 */
474 void *queuedata;
475
476 struct elevator_queue *elevator;
477
478 const struct blk_mq_ops *mq_ops;
479
480 /* sw queues */
481 struct blk_mq_ctx __percpu *queue_ctx;
482
483 /*
484 * various queue flags, see QUEUE_* below
485 */
486 unsigned long queue_flags;
487
488 unsigned int rq_timeout;
489
490 unsigned int queue_depth;
491
492 refcount_t refs;
493
494 /* hw dispatch queues */
495 unsigned int nr_hw_queues;
496 struct xarray hctx_table;
497
498 struct percpu_ref q_usage_counter;
499 struct lock_class_key io_lock_cls_key;
500 struct lockdep_map io_lockdep_map;
501
502 struct lock_class_key q_lock_cls_key;
503 struct lockdep_map q_lockdep_map;
504
505 struct request *last_merge;
506
507 spinlock_t queue_lock;
508
509 int quiesce_depth;
510
511 struct gendisk *disk;
512
513 /*
514 * mq queue kobject
515 */
516 struct kobject *mq_kobj;
517
518 struct queue_limits limits;
519
520 #ifdef CONFIG_PM
521 struct device *dev;
522 enum rpm_status rpm_status;
523 #endif
524
525 /*
526 * Number of contexts that have called blk_set_pm_only(). If this
527 * counter is above zero then only RQF_PM requests are processed.
528 */
529 atomic_t pm_only;
530
531 struct blk_queue_stats *stats;
532 struct rq_qos *rq_qos;
533 struct mutex rq_qos_mutex;
534
535 /*
536 * ida allocated id for this queue. Used to index queues from
537 * ioctx.
538 */
539 int id;
540
541 /*
542 * queue settings
543 */
544 unsigned long nr_requests; /* Max # of requests */
545
546 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
547 struct blk_crypto_profile *crypto_profile;
548 struct kobject *crypto_kobject;
549 #endif
550
551 struct timer_list timeout;
552 struct work_struct timeout_work;
553
554 atomic_t nr_active_requests_shared_tags;
555
556 struct blk_mq_tags *sched_shared_tags;
557
558 struct list_head icq_list;
559 #ifdef CONFIG_BLK_CGROUP
560 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
561 struct blkcg_gq *root_blkg;
562 struct list_head blkg_list;
563 struct mutex blkcg_mutex;
564 #endif
565
566 int node;
567
568 spinlock_t requeue_lock;
569 struct list_head requeue_list;
570 struct delayed_work requeue_work;
571
572 #ifdef CONFIG_BLK_DEV_IO_TRACE
573 struct blk_trace __rcu *blk_trace;
574 #endif
575 /*
576 * for flush operations
577 */
578 struct blk_flush_queue *fq;
579 struct list_head flush_list;
580
581 /*
582 * Protects against I/O scheduler switching, particularly when updating
583 * q->elevator. Since the elevator update code path may also modify q->
584 * nr_requests and wbt latency, this lock also protects the sysfs attrs
585 * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
586 * may modify hctx tags, reserved-tags and cpumask, so this lock also
587 * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
588 * order during an elevator or nr_hw_queue update, first freeze the
589 * queue, then acquire ->elevator_lock.
590 */
591 struct mutex elevator_lock;
592
593 struct mutex sysfs_lock;
594 /*
595 * Protects queue limits and also sysfs attribute read_ahead_kb.
596 */
597 struct mutex limits_lock;
598
599 /*
600 * for reusing dead hctx instance in case of updating
601 * nr_hw_queues
602 */
603 struct list_head unused_hctx_list;
604 spinlock_t unused_hctx_lock;
605
606 int mq_freeze_depth;
607
608 #ifdef CONFIG_BLK_DEV_THROTTLING
609 /* Throttle data */
610 struct throtl_data *td;
611 #endif
612 struct rcu_head rcu_head;
613 #ifdef CONFIG_LOCKDEP
614 struct task_struct *mq_freeze_owner;
615 int mq_freeze_owner_depth;
616 /*
617 * Records disk & queue state in current context, used in unfreeze
618 * queue
619 */
620 bool mq_freeze_disk_dead;
621 bool mq_freeze_queue_dying;
622 #endif
623 wait_queue_head_t mq_freeze_wq;
624 /*
625 * Protect concurrent access to q_usage_counter by
626 * percpu_ref_kill() and percpu_ref_reinit().
627 */
628 struct mutex mq_freeze_lock;
629
630 struct blk_mq_tag_set *tag_set;
631 struct list_head tag_set_list;
632
633 struct dentry *debugfs_dir;
634 struct dentry *sched_debugfs_dir;
635 struct dentry *rqos_debugfs_dir;
636 /*
637 * Serializes all debugfs metadata operations using the above dentries.
638 */
639 struct mutex debugfs_mutex;
640 };
641
642 /* Keep blk_queue_flag_name[] in sync with the definitions below */
643 enum {
644 QUEUE_FLAG_DYING, /* queue being torn down */
645 QUEUE_FLAG_NOMERGES, /* disable merge attempts */
646 QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */
647 QUEUE_FLAG_FAIL_IO, /* fake timeout */
648 QUEUE_FLAG_NOXMERGES, /* No extended merges */
649 QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */
650 QUEUE_FLAG_INIT_DONE, /* queue is initialized */
651 QUEUE_FLAG_STATS, /* track IO start and completion times */
652 QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
653 QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
654 QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
655 QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
656 QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
657 QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
658 QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
659 QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
660 QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
661 QUEUE_FLAG_MAX
662 };
663
664 #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
665
666 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
667 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
668
669 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
670 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
671 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
672 #define blk_queue_noxmerges(q) \
673 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
674 #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
675 #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
676 #define blk_queue_passthrough_stat(q) \
677 ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
678 #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
679 #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
680 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
681 #define blk_queue_rq_alloc_time(q) \
682 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
683 #else
684 #define blk_queue_rq_alloc_time(q) false
685 #endif
686
687 #define blk_noretry_request(rq) \
688 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
689 REQ_FAILFAST_DRIVER))
690 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
691 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
692 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
693 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
694 #define blk_queue_skip_tagset_quiesce(q) \
695 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
696 #define blk_queue_disable_wbt(q) \
697 test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
698 #define blk_queue_no_elv_switch(q) \
699 test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
700
701 extern void blk_set_pm_only(struct request_queue *q);
702 extern void blk_clear_pm_only(struct request_queue *q);
703
704 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
705
706 #define dma_map_bvec(dev, bv, dir, attrs) \
707 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
708 (dir), (attrs))
709
queue_is_mq(struct request_queue * q)710 static inline bool queue_is_mq(struct request_queue *q)
711 {
712 return q->mq_ops;
713 }
714
715 #ifdef CONFIG_PM
queue_rpm_status(struct request_queue * q)716 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
717 {
718 return q->rpm_status;
719 }
720 #else
queue_rpm_status(struct request_queue * q)721 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
722 {
723 return RPM_ACTIVE;
724 }
725 #endif
726
blk_queue_is_zoned(struct request_queue * q)727 static inline bool blk_queue_is_zoned(struct request_queue *q)
728 {
729 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
730 (q->limits.features & BLK_FEAT_ZONED);
731 }
732
disk_zone_no(struct gendisk * disk,sector_t sector)733 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
734 {
735 if (!blk_queue_is_zoned(disk->queue))
736 return 0;
737 return sector >> ilog2(disk->queue->limits.chunk_sectors);
738 }
739
bdev_max_open_zones(struct block_device * bdev)740 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
741 {
742 return bdev->bd_disk->queue->limits.max_open_zones;
743 }
744
bdev_max_active_zones(struct block_device * bdev)745 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
746 {
747 return bdev->bd_disk->queue->limits.max_active_zones;
748 }
749
blk_queue_depth(struct request_queue * q)750 static inline unsigned int blk_queue_depth(struct request_queue *q)
751 {
752 if (q->queue_depth)
753 return q->queue_depth;
754
755 return q->nr_requests;
756 }
757
758 /*
759 * default timeout for SG_IO if none specified
760 */
761 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
762 #define BLK_MIN_SG_TIMEOUT (7 * HZ)
763
764 /* This should not be used directly - use rq_for_each_segment */
765 #define for_each_bio(_bio) \
766 for (; _bio; _bio = _bio->bi_next)
767
768 int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
769 const struct attribute_group **groups,
770 struct fwnode_handle *fwnode);
771 int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
772 const struct attribute_group **groups);
add_disk(struct gendisk * disk)773 static inline int __must_check add_disk(struct gendisk *disk)
774 {
775 return device_add_disk(NULL, disk, NULL);
776 }
777 void del_gendisk(struct gendisk *gp);
778 void invalidate_disk(struct gendisk *disk);
779 void set_disk_ro(struct gendisk *disk, bool read_only);
780 void disk_uevent(struct gendisk *disk, enum kobject_action action);
781
bdev_partno(const struct block_device * bdev)782 static inline u8 bdev_partno(const struct block_device *bdev)
783 {
784 return atomic_read(&bdev->__bd_flags) & BD_PARTNO;
785 }
786
bdev_test_flag(const struct block_device * bdev,unsigned flag)787 static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
788 {
789 return atomic_read(&bdev->__bd_flags) & flag;
790 }
791
bdev_set_flag(struct block_device * bdev,unsigned flag)792 static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
793 {
794 atomic_or(flag, &bdev->__bd_flags);
795 }
796
bdev_clear_flag(struct block_device * bdev,unsigned flag)797 static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
798 {
799 atomic_andnot(flag, &bdev->__bd_flags);
800 }
801
get_disk_ro(struct gendisk * disk)802 static inline bool get_disk_ro(struct gendisk *disk)
803 {
804 return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
805 test_bit(GD_READ_ONLY, &disk->state);
806 }
807
bdev_read_only(struct block_device * bdev)808 static inline bool bdev_read_only(struct block_device *bdev)
809 {
810 return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
811 }
812
813 bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
814 void disk_force_media_change(struct gendisk *disk);
815 void bdev_mark_dead(struct block_device *bdev, bool surprise);
816
817 void add_disk_randomness(struct gendisk *disk) __latent_entropy;
818 void rand_initialize_disk(struct gendisk *disk);
819
get_start_sect(struct block_device * bdev)820 static inline sector_t get_start_sect(struct block_device *bdev)
821 {
822 return bdev->bd_start_sect;
823 }
824
bdev_nr_sectors(struct block_device * bdev)825 static inline sector_t bdev_nr_sectors(struct block_device *bdev)
826 {
827 return bdev->bd_nr_sectors;
828 }
829
bdev_nr_bytes(struct block_device * bdev)830 static inline loff_t bdev_nr_bytes(struct block_device *bdev)
831 {
832 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
833 }
834
get_capacity(struct gendisk * disk)835 static inline sector_t get_capacity(struct gendisk *disk)
836 {
837 return bdev_nr_sectors(disk->part0);
838 }
839
sb_bdev_nr_blocks(struct super_block * sb)840 static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
841 {
842 return bdev_nr_sectors(sb->s_bdev) >>
843 (sb->s_blocksize_bits - SECTOR_SHIFT);
844 }
845
846 #ifdef CONFIG_BLK_DEV_ZONED
disk_nr_zones(struct gendisk * disk)847 static inline unsigned int disk_nr_zones(struct gendisk *disk)
848 {
849 return disk->nr_zones;
850 }
851
852 /**
853 * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
854 * write plugging
855 * @bio: The BIO being submitted
856 *
857 * Return true whenever @bio execution needs to be handled through zone
858 * write plugging (using blk_zone_plug_bio()). Return false otherwise.
859 */
bio_needs_zone_write_plugging(struct bio * bio)860 static inline bool bio_needs_zone_write_plugging(struct bio *bio)
861 {
862 enum req_op op = bio_op(bio);
863
864 /*
865 * Only zoned block devices have a zone write plug hash table. But not
866 * all of them have one (e.g. DM devices may not need one).
867 */
868 if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
869 return false;
870
871 /* Only write operations need zone write plugging. */
872 if (!op_is_write(op))
873 return false;
874
875 /* Ignore empty flush */
876 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
877 return false;
878
879 /* Ignore BIOs that already have been handled by zone write plugging. */
880 if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
881 return false;
882
883 /*
884 * All zone write operations must be handled through zone write plugging
885 * using blk_zone_plug_bio().
886 */
887 switch (op) {
888 case REQ_OP_ZONE_APPEND:
889 case REQ_OP_WRITE:
890 case REQ_OP_WRITE_ZEROES:
891 case REQ_OP_ZONE_FINISH:
892 case REQ_OP_ZONE_RESET:
893 case REQ_OP_ZONE_RESET_ALL:
894 return true;
895 default:
896 return false;
897 }
898 }
899
900 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
901
902 /**
903 * disk_zone_capacity - returns the zone capacity of zone containing @sector
904 * @disk: disk to work with
905 * @sector: sector number within the querying zone
906 *
907 * Returns the zone capacity of a zone containing @sector. @sector can be any
908 * sector in the zone.
909 */
disk_zone_capacity(struct gendisk * disk,sector_t sector)910 static inline unsigned int disk_zone_capacity(struct gendisk *disk,
911 sector_t sector)
912 {
913 sector_t zone_sectors = disk->queue->limits.chunk_sectors;
914
915 if (sector + zone_sectors >= get_capacity(disk))
916 return disk->last_zone_capacity;
917 return disk->zone_capacity;
918 }
bdev_zone_capacity(struct block_device * bdev,sector_t pos)919 static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
920 sector_t pos)
921 {
922 return disk_zone_capacity(bdev->bd_disk, pos);
923 }
924 #else /* CONFIG_BLK_DEV_ZONED */
disk_nr_zones(struct gendisk * disk)925 static inline unsigned int disk_nr_zones(struct gendisk *disk)
926 {
927 return 0;
928 }
929
bio_needs_zone_write_plugging(struct bio * bio)930 static inline bool bio_needs_zone_write_plugging(struct bio *bio)
931 {
932 return false;
933 }
934
blk_zone_plug_bio(struct bio * bio,unsigned int nr_segs)935 static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
936 {
937 return false;
938 }
939 #endif /* CONFIG_BLK_DEV_ZONED */
940
bdev_nr_zones(struct block_device * bdev)941 static inline unsigned int bdev_nr_zones(struct block_device *bdev)
942 {
943 return disk_nr_zones(bdev->bd_disk);
944 }
945
946 int bdev_disk_changed(struct gendisk *disk, bool invalidate);
947
948 void put_disk(struct gendisk *disk);
949 struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
950 struct lock_class_key *lkclass);
951
952 /**
953 * blk_alloc_disk - allocate a gendisk structure
954 * @lim: queue limits to be used for this disk.
955 * @node_id: numa node to allocate on
956 *
957 * Allocate and pre-initialize a gendisk structure for use with BIO based
958 * drivers.
959 *
960 * Returns an ERR_PTR on error, else the allocated disk.
961 *
962 * Context: can sleep
963 */
964 #define blk_alloc_disk(lim, node_id) \
965 ({ \
966 static struct lock_class_key __key; \
967 \
968 __blk_alloc_disk(lim, node_id, &__key); \
969 })
970
971 int __register_blkdev(unsigned int major, const char *name,
972 void (*probe)(dev_t devt));
973 #define register_blkdev(major, name) \
974 __register_blkdev(major, name, NULL)
975 void unregister_blkdev(unsigned int major, const char *name);
976
977 bool disk_check_media_change(struct gendisk *disk);
978 void set_capacity(struct gendisk *disk, sector_t size);
979
980 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
981 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
982 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
983 #else
bd_link_disk_holder(struct block_device * bdev,struct gendisk * disk)984 static inline int bd_link_disk_holder(struct block_device *bdev,
985 struct gendisk *disk)
986 {
987 return 0;
988 }
bd_unlink_disk_holder(struct block_device * bdev,struct gendisk * disk)989 static inline void bd_unlink_disk_holder(struct block_device *bdev,
990 struct gendisk *disk)
991 {
992 }
993 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
994
995 dev_t part_devt(struct gendisk *disk, u8 partno);
996 void inc_diskseq(struct gendisk *disk);
997 void blk_request_module(dev_t devt);
998
999 extern int blk_register_queue(struct gendisk *disk);
1000 extern void blk_unregister_queue(struct gendisk *disk);
1001 void submit_bio_noacct(struct bio *bio);
1002 struct bio *bio_split_to_limits(struct bio *bio);
1003 struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
1004 struct bio_set *bs);
1005
1006 extern int blk_lld_busy(struct request_queue *q);
1007 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
1008 extern void blk_queue_exit(struct request_queue *q);
1009 extern void blk_sync_queue(struct request_queue *q);
1010
1011 /* Helper to convert REQ_OP_XXX to its string format XXX */
1012 extern const char *blk_op_str(enum req_op op);
1013
1014 int blk_status_to_errno(blk_status_t status);
1015 blk_status_t errno_to_blk_status(int errno);
1016 const char *blk_status_to_str(blk_status_t status);
1017
1018 /* only poll the hardware once, don't continue until a completion was found */
1019 #define BLK_POLL_ONESHOT (1 << 0)
1020 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
1021 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
1022 unsigned int flags);
1023
bdev_get_queue(struct block_device * bdev)1024 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1025 {
1026 return bdev->bd_queue; /* this is never NULL */
1027 }
1028
1029 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
1030 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
1031
bio_zone_no(struct bio * bio)1032 static inline unsigned int bio_zone_no(struct bio *bio)
1033 {
1034 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
1035 }
1036
bio_straddles_zones(struct bio * bio)1037 static inline bool bio_straddles_zones(struct bio *bio)
1038 {
1039 return bio_sectors(bio) &&
1040 bio_zone_no(bio) !=
1041 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
1042 }
1043
1044 /*
1045 * Return how much within the boundary is left to be used for I/O at a given
1046 * offset.
1047 */
blk_boundary_sectors_left(sector_t offset,unsigned int boundary_sectors)1048 static inline unsigned int blk_boundary_sectors_left(sector_t offset,
1049 unsigned int boundary_sectors)
1050 {
1051 if (unlikely(!is_power_of_2(boundary_sectors)))
1052 return boundary_sectors - sector_div(offset, boundary_sectors);
1053 return boundary_sectors - (offset & (boundary_sectors - 1));
1054 }
1055
1056 /**
1057 * queue_limits_start_update - start an atomic update of queue limits
1058 * @q: queue to update
1059 *
1060 * This functions starts an atomic update of the queue limits. It takes a lock
1061 * to prevent other updates and returns a snapshot of the current limits that
1062 * the caller can modify. The caller must call queue_limits_commit_update()
1063 * to finish the update.
1064 *
1065 * Context: process context.
1066 */
1067 static inline struct queue_limits
queue_limits_start_update(struct request_queue * q)1068 queue_limits_start_update(struct request_queue *q)
1069 {
1070 mutex_lock(&q->limits_lock);
1071 return q->limits;
1072 }
1073 int queue_limits_commit_update_frozen(struct request_queue *q,
1074 struct queue_limits *lim);
1075 int queue_limits_commit_update(struct request_queue *q,
1076 struct queue_limits *lim);
1077 int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
1078 int blk_validate_limits(struct queue_limits *lim);
1079
1080 /**
1081 * queue_limits_cancel_update - cancel an atomic update of queue limits
1082 * @q: queue to update
1083 *
1084 * This functions cancels an atomic update of the queue limits started by
1085 * queue_limits_start_update() and should be used when an error occurs after
1086 * starting update.
1087 */
queue_limits_cancel_update(struct request_queue * q)1088 static inline void queue_limits_cancel_update(struct request_queue *q)
1089 {
1090 mutex_unlock(&q->limits_lock);
1091 }
1092
1093 /*
1094 * These helpers are for drivers that have sloppy feature negotiation and might
1095 * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
1096 * completion handler when the device returned an indicator that the respective
1097 * feature is not actually supported. They are racy and the driver needs to
1098 * cope with that. Try to avoid this scheme if you can.
1099 */
blk_queue_disable_discard(struct request_queue * q)1100 static inline void blk_queue_disable_discard(struct request_queue *q)
1101 {
1102 q->limits.max_discard_sectors = 0;
1103 }
1104
blk_queue_disable_secure_erase(struct request_queue * q)1105 static inline void blk_queue_disable_secure_erase(struct request_queue *q)
1106 {
1107 q->limits.max_secure_erase_sectors = 0;
1108 }
1109
blk_queue_disable_write_zeroes(struct request_queue * q)1110 static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
1111 {
1112 q->limits.max_write_zeroes_sectors = 0;
1113 q->limits.max_wzeroes_unmap_sectors = 0;
1114 }
1115
1116 /*
1117 * Access functions for manipulating queue properties
1118 */
1119 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1120 extern void blk_set_stacking_limits(struct queue_limits *lim);
1121 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1122 sector_t offset);
1123 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
1124 sector_t offset, const char *pfx);
1125 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1126
1127 struct blk_independent_access_ranges *
1128 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
1129 void disk_set_independent_access_ranges(struct gendisk *disk,
1130 struct blk_independent_access_ranges *iars);
1131
1132 bool __must_check blk_get_queue(struct request_queue *);
1133 extern void blk_put_queue(struct request_queue *);
1134
1135 void blk_mark_disk_dead(struct gendisk *disk);
1136
1137 struct rq_list {
1138 struct request *head;
1139 struct request *tail;
1140 };
1141
1142 #ifdef CONFIG_BLOCK
1143 /*
1144 * blk_plug permits building a queue of related requests by holding the I/O
1145 * fragments for a short period. This allows merging of sequential requests
1146 * into single larger request. As the requests are moved from a per-task list to
1147 * the device's request_queue in a batch, this results in improved scalability
1148 * as the lock contention for request_queue lock is reduced.
1149 *
1150 * It is ok not to disable preemption when adding the request to the plug list
1151 * or when attempting a merge. For details, please see schedule() where
1152 * blk_flush_plug() is called.
1153 */
1154 struct blk_plug {
1155 struct rq_list mq_list; /* blk-mq requests */
1156
1157 /* if ios_left is > 1, we can batch tag/rq allocations */
1158 struct rq_list cached_rqs;
1159 u64 cur_ktime;
1160 unsigned short nr_ios;
1161
1162 unsigned short rq_count;
1163
1164 bool multiple_queues;
1165 bool has_elevator;
1166
1167 struct list_head cb_list; /* md requires an unplug callback */
1168 };
1169
1170 struct blk_plug_cb;
1171 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1172 struct blk_plug_cb {
1173 struct list_head list;
1174 blk_plug_cb_fn callback;
1175 void *data;
1176 };
1177 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1178 void *data, int size);
1179 extern void blk_start_plug(struct blk_plug *);
1180 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
1181 extern void blk_finish_plug(struct blk_plug *);
1182
1183 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
blk_flush_plug(struct blk_plug * plug,bool async)1184 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1185 {
1186 if (plug)
1187 __blk_flush_plug(plug, async);
1188 }
1189
1190 /*
1191 * tsk == current here
1192 */
blk_plug_invalidate_ts(struct task_struct * tsk)1193 static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
1194 {
1195 struct blk_plug *plug = tsk->plug;
1196
1197 if (plug)
1198 plug->cur_ktime = 0;
1199 current->flags &= ~PF_BLOCK_TS;
1200 }
1201
1202 int blkdev_issue_flush(struct block_device *bdev);
1203 long nr_blockdev_pages(void);
1204 #else /* CONFIG_BLOCK */
1205 struct blk_plug {
1206 };
1207
blk_start_plug_nr_ios(struct blk_plug * plug,unsigned short nr_ios)1208 static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
1209 unsigned short nr_ios)
1210 {
1211 }
1212
blk_start_plug(struct blk_plug * plug)1213 static inline void blk_start_plug(struct blk_plug *plug)
1214 {
1215 }
1216
blk_finish_plug(struct blk_plug * plug)1217 static inline void blk_finish_plug(struct blk_plug *plug)
1218 {
1219 }
1220
blk_flush_plug(struct blk_plug * plug,bool async)1221 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1222 {
1223 }
1224
blk_plug_invalidate_ts(struct task_struct * tsk)1225 static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
1226 {
1227 }
1228
blkdev_issue_flush(struct block_device * bdev)1229 static inline int blkdev_issue_flush(struct block_device *bdev)
1230 {
1231 return 0;
1232 }
1233
nr_blockdev_pages(void)1234 static inline long nr_blockdev_pages(void)
1235 {
1236 return 0;
1237 }
1238 #endif /* CONFIG_BLOCK */
1239
1240 extern void blk_io_schedule(void);
1241
1242 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1243 sector_t nr_sects, gfp_t gfp_mask);
1244 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1245 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
1246 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
1247 sector_t nr_sects, gfp_t gfp);
1248
1249 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
1250 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
1251 #define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
1252
1253 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1254 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1255 unsigned flags);
1256 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1257 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1258
sb_issue_discard(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask,unsigned long flags)1259 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1260 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1261 {
1262 return blkdev_issue_discard(sb->s_bdev,
1263 block << (sb->s_blocksize_bits -
1264 SECTOR_SHIFT),
1265 nr_blocks << (sb->s_blocksize_bits -
1266 SECTOR_SHIFT),
1267 gfp_mask);
1268 }
sb_issue_zeroout(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask)1269 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1270 sector_t nr_blocks, gfp_t gfp_mask)
1271 {
1272 return blkdev_issue_zeroout(sb->s_bdev,
1273 block << (sb->s_blocksize_bits -
1274 SECTOR_SHIFT),
1275 nr_blocks << (sb->s_blocksize_bits -
1276 SECTOR_SHIFT),
1277 gfp_mask, 0);
1278 }
1279
bdev_is_partition(struct block_device * bdev)1280 static inline bool bdev_is_partition(struct block_device *bdev)
1281 {
1282 return bdev_partno(bdev) != 0;
1283 }
1284
1285 enum blk_default_limits {
1286 BLK_MAX_SEGMENTS = 128,
1287 BLK_SAFE_MAX_SECTORS = 255,
1288 BLK_MAX_SEGMENT_SIZE = 65536,
1289 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1290 };
1291
bdev_limits(struct block_device * bdev)1292 static inline struct queue_limits *bdev_limits(struct block_device *bdev)
1293 {
1294 return &bdev_get_queue(bdev)->limits;
1295 }
1296
queue_segment_boundary(const struct request_queue * q)1297 static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1298 {
1299 return q->limits.seg_boundary_mask;
1300 }
1301
queue_virt_boundary(const struct request_queue * q)1302 static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1303 {
1304 return q->limits.virt_boundary_mask;
1305 }
1306
queue_max_sectors(const struct request_queue * q)1307 static inline unsigned int queue_max_sectors(const struct request_queue *q)
1308 {
1309 return q->limits.max_sectors;
1310 }
1311
queue_max_bytes(struct request_queue * q)1312 static inline unsigned int queue_max_bytes(struct request_queue *q)
1313 {
1314 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
1315 }
1316
queue_max_hw_sectors(const struct request_queue * q)1317 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1318 {
1319 return q->limits.max_hw_sectors;
1320 }
1321
queue_max_segments(const struct request_queue * q)1322 static inline unsigned short queue_max_segments(const struct request_queue *q)
1323 {
1324 return q->limits.max_segments;
1325 }
1326
queue_max_discard_segments(const struct request_queue * q)1327 static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1328 {
1329 return q->limits.max_discard_segments;
1330 }
1331
queue_max_segment_size(const struct request_queue * q)1332 static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1333 {
1334 return q->limits.max_segment_size;
1335 }
1336
queue_emulates_zone_append(struct request_queue * q)1337 static inline bool queue_emulates_zone_append(struct request_queue *q)
1338 {
1339 return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
1340 }
1341
bdev_emulates_zone_append(struct block_device * bdev)1342 static inline bool bdev_emulates_zone_append(struct block_device *bdev)
1343 {
1344 return queue_emulates_zone_append(bdev_get_queue(bdev));
1345 }
1346
1347 static inline unsigned int
bdev_max_zone_append_sectors(struct block_device * bdev)1348 bdev_max_zone_append_sectors(struct block_device *bdev)
1349 {
1350 return bdev_limits(bdev)->max_zone_append_sectors;
1351 }
1352
bdev_max_segments(struct block_device * bdev)1353 static inline unsigned int bdev_max_segments(struct block_device *bdev)
1354 {
1355 return queue_max_segments(bdev_get_queue(bdev));
1356 }
1357
bdev_max_write_streams(struct block_device * bdev)1358 static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
1359 {
1360 if (bdev_is_partition(bdev))
1361 return 0;
1362 return bdev_limits(bdev)->max_write_streams;
1363 }
1364
queue_logical_block_size(const struct request_queue * q)1365 static inline unsigned queue_logical_block_size(const struct request_queue *q)
1366 {
1367 return q->limits.logical_block_size;
1368 }
1369
bdev_logical_block_size(struct block_device * bdev)1370 static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1371 {
1372 return queue_logical_block_size(bdev_get_queue(bdev));
1373 }
1374
queue_physical_block_size(const struct request_queue * q)1375 static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1376 {
1377 return q->limits.physical_block_size;
1378 }
1379
bdev_physical_block_size(struct block_device * bdev)1380 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1381 {
1382 return queue_physical_block_size(bdev_get_queue(bdev));
1383 }
1384
queue_io_min(const struct request_queue * q)1385 static inline unsigned int queue_io_min(const struct request_queue *q)
1386 {
1387 return q->limits.io_min;
1388 }
1389
bdev_io_min(struct block_device * bdev)1390 static inline unsigned int bdev_io_min(struct block_device *bdev)
1391 {
1392 return queue_io_min(bdev_get_queue(bdev));
1393 }
1394
queue_io_opt(const struct request_queue * q)1395 static inline unsigned int queue_io_opt(const struct request_queue *q)
1396 {
1397 return q->limits.io_opt;
1398 }
1399
bdev_io_opt(struct block_device * bdev)1400 static inline unsigned int bdev_io_opt(struct block_device *bdev)
1401 {
1402 return queue_io_opt(bdev_get_queue(bdev));
1403 }
1404
1405 static inline unsigned int
queue_zone_write_granularity(const struct request_queue * q)1406 queue_zone_write_granularity(const struct request_queue *q)
1407 {
1408 return q->limits.zone_write_granularity;
1409 }
1410
1411 static inline unsigned int
bdev_zone_write_granularity(struct block_device * bdev)1412 bdev_zone_write_granularity(struct block_device *bdev)
1413 {
1414 return queue_zone_write_granularity(bdev_get_queue(bdev));
1415 }
1416
1417 int bdev_alignment_offset(struct block_device *bdev);
1418 unsigned int bdev_discard_alignment(struct block_device *bdev);
1419
bdev_max_discard_sectors(struct block_device * bdev)1420 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1421 {
1422 return bdev_limits(bdev)->max_discard_sectors;
1423 }
1424
bdev_discard_granularity(struct block_device * bdev)1425 static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
1426 {
1427 return bdev_limits(bdev)->discard_granularity;
1428 }
1429
1430 static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device * bdev)1431 bdev_max_secure_erase_sectors(struct block_device *bdev)
1432 {
1433 return bdev_limits(bdev)->max_secure_erase_sectors;
1434 }
1435
bdev_write_zeroes_sectors(struct block_device * bdev)1436 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1437 {
1438 return bdev_limits(bdev)->max_write_zeroes_sectors;
1439 }
1440
1441 static inline unsigned int
bdev_write_zeroes_unmap_sectors(struct block_device * bdev)1442 bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
1443 {
1444 return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
1445 }
1446
bdev_nonrot(struct block_device * bdev)1447 static inline bool bdev_nonrot(struct block_device *bdev)
1448 {
1449 return blk_queue_nonrot(bdev_get_queue(bdev));
1450 }
1451
bdev_synchronous(struct block_device * bdev)1452 static inline bool bdev_synchronous(struct block_device *bdev)
1453 {
1454 return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
1455 }
1456
bdev_stable_writes(struct block_device * bdev)1457 static inline bool bdev_stable_writes(struct block_device *bdev)
1458 {
1459 struct request_queue *q = bdev_get_queue(bdev);
1460
1461 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1462 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
1463 return true;
1464 return q->limits.features & BLK_FEAT_STABLE_WRITES;
1465 }
1466
blk_queue_write_cache(struct request_queue * q)1467 static inline bool blk_queue_write_cache(struct request_queue *q)
1468 {
1469 return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
1470 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
1471 }
1472
bdev_write_cache(struct block_device * bdev)1473 static inline bool bdev_write_cache(struct block_device *bdev)
1474 {
1475 return blk_queue_write_cache(bdev_get_queue(bdev));
1476 }
1477
bdev_fua(struct block_device * bdev)1478 static inline bool bdev_fua(struct block_device *bdev)
1479 {
1480 return bdev_limits(bdev)->features & BLK_FEAT_FUA;
1481 }
1482
bdev_nowait(struct block_device * bdev)1483 static inline bool bdev_nowait(struct block_device *bdev)
1484 {
1485 return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
1486 }
1487
bdev_is_zoned(struct block_device * bdev)1488 static inline bool bdev_is_zoned(struct block_device *bdev)
1489 {
1490 return blk_queue_is_zoned(bdev_get_queue(bdev));
1491 }
1492
bdev_zone_no(struct block_device * bdev,sector_t sec)1493 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
1494 {
1495 return disk_zone_no(bdev->bd_disk, sec);
1496 }
1497
bdev_zone_sectors(struct block_device * bdev)1498 static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1499 {
1500 struct request_queue *q = bdev_get_queue(bdev);
1501
1502 if (!blk_queue_is_zoned(q))
1503 return 0;
1504 return q->limits.chunk_sectors;
1505 }
1506
bdev_offset_from_zone_start(struct block_device * bdev,sector_t sector)1507 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
1508 sector_t sector)
1509 {
1510 return sector & (bdev_zone_sectors(bdev) - 1);
1511 }
1512
bio_offset_from_zone_start(struct bio * bio)1513 static inline sector_t bio_offset_from_zone_start(struct bio *bio)
1514 {
1515 return bdev_offset_from_zone_start(bio->bi_bdev,
1516 bio->bi_iter.bi_sector);
1517 }
1518
bdev_is_zone_start(struct block_device * bdev,sector_t sector)1519 static inline bool bdev_is_zone_start(struct block_device *bdev,
1520 sector_t sector)
1521 {
1522 return bdev_offset_from_zone_start(bdev, sector) == 0;
1523 }
1524
1525 /* Check whether @sector is a multiple of the zone size. */
bdev_is_zone_aligned(struct block_device * bdev,sector_t sector)1526 static inline bool bdev_is_zone_aligned(struct block_device *bdev,
1527 sector_t sector)
1528 {
1529 return bdev_is_zone_start(bdev, sector);
1530 }
1531
1532 /**
1533 * bdev_zone_is_seq - check if a sector belongs to a sequential write zone
1534 * @bdev: block device to check
1535 * @sector: sector number
1536 *
1537 * Check if @sector on @bdev is contained in a sequential write required zone.
1538 */
bdev_zone_is_seq(struct block_device * bdev,sector_t sector)1539 static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
1540 {
1541 bool is_seq = false;
1542
1543 #if IS_ENABLED(CONFIG_BLK_DEV_ZONED)
1544 if (bdev_is_zoned(bdev)) {
1545 struct gendisk *disk = bdev->bd_disk;
1546 unsigned long *bitmap;
1547
1548 rcu_read_lock();
1549 bitmap = rcu_dereference(disk->conv_zones_bitmap);
1550 is_seq = !bitmap ||
1551 !test_bit(disk_zone_no(disk, sector), bitmap);
1552 rcu_read_unlock();
1553 }
1554 #endif
1555
1556 return is_seq;
1557 }
1558
1559 int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
1560 sector_t nr_sects, gfp_t gfp_mask);
1561
queue_dma_alignment(const struct request_queue * q)1562 static inline unsigned int queue_dma_alignment(const struct request_queue *q)
1563 {
1564 return q->limits.dma_alignment;
1565 }
1566
1567 static inline unsigned int
queue_atomic_write_unit_max_bytes(const struct request_queue * q)1568 queue_atomic_write_unit_max_bytes(const struct request_queue *q)
1569 {
1570 return q->limits.atomic_write_unit_max;
1571 }
1572
1573 static inline unsigned int
queue_atomic_write_unit_min_bytes(const struct request_queue * q)1574 queue_atomic_write_unit_min_bytes(const struct request_queue *q)
1575 {
1576 return q->limits.atomic_write_unit_min;
1577 }
1578
1579 static inline unsigned int
queue_atomic_write_boundary_bytes(const struct request_queue * q)1580 queue_atomic_write_boundary_bytes(const struct request_queue *q)
1581 {
1582 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
1583 }
1584
1585 static inline unsigned int
queue_atomic_write_max_bytes(const struct request_queue * q)1586 queue_atomic_write_max_bytes(const struct request_queue *q)
1587 {
1588 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
1589 }
1590
bdev_dma_alignment(struct block_device * bdev)1591 static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
1592 {
1593 return queue_dma_alignment(bdev_get_queue(bdev));
1594 }
1595
1596 static inline unsigned int
blk_lim_dma_alignment_and_pad(struct queue_limits * lim)1597 blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
1598 {
1599 return lim->dma_alignment | lim->dma_pad_mask;
1600 }
1601
blk_rq_aligned(struct request_queue * q,unsigned long addr,unsigned int len)1602 static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
1603 unsigned int len)
1604 {
1605 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
1606
1607 return !(addr & alignment) && !(len & alignment);
1608 }
1609
1610 /* assumes size > 256 */
blksize_bits(unsigned int size)1611 static inline unsigned int blksize_bits(unsigned int size)
1612 {
1613 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
1614 }
1615
1616 int kblockd_schedule_work(struct work_struct *work);
1617 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1618
1619 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1620 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1621 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1622 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1623
1624 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1625
1626 bool blk_crypto_register(struct blk_crypto_profile *profile,
1627 struct request_queue *q);
1628
1629 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1630
blk_crypto_register(struct blk_crypto_profile * profile,struct request_queue * q)1631 static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
1632 struct request_queue *q)
1633 {
1634 return true;
1635 }
1636
1637 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1638
1639 enum blk_unique_id {
1640 /* these match the Designator Types specified in SPC */
1641 BLK_UID_T10 = 1,
1642 BLK_UID_EUI64 = 2,
1643 BLK_UID_NAA = 3,
1644 };
1645
1646 struct block_device_operations {
1647 void (*submit_bio)(struct bio *bio);
1648 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
1649 unsigned int flags);
1650 int (*open)(struct gendisk *disk, blk_mode_t mode);
1651 void (*release)(struct gendisk *disk);
1652 int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
1653 unsigned cmd, unsigned long arg);
1654 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
1655 unsigned cmd, unsigned long arg);
1656 unsigned int (*check_events) (struct gendisk *disk,
1657 unsigned int clearing);
1658 void (*unlock_native_capacity) (struct gendisk *);
1659 int (*getgeo)(struct gendisk *, struct hd_geometry *);
1660 int (*set_read_only)(struct block_device *bdev, bool ro);
1661 void (*free_disk)(struct gendisk *disk);
1662 /* this callback is with swap_lock and sometimes page table lock held */
1663 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1664 int (*report_zones)(struct gendisk *, sector_t sector,
1665 unsigned int nr_zones, report_zones_cb cb, void *data);
1666 char *(*devnode)(struct gendisk *disk, umode_t *mode);
1667 /* returns the length of the identifier or a negative errno: */
1668 int (*get_unique_id)(struct gendisk *disk, u8 id[16],
1669 enum blk_unique_id id_type);
1670 struct module *owner;
1671 const struct pr_ops *pr_ops;
1672
1673 /*
1674 * Special callback for probing GPT entry at a given sector.
1675 * Needed by Android devices, used by GPT scanner and MMC blk
1676 * driver.
1677 */
1678 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
1679 };
1680
1681 #ifdef CONFIG_COMPAT
1682 extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
1683 unsigned int, unsigned long);
1684 #else
1685 #define blkdev_compat_ptr_ioctl NULL
1686 #endif
1687
blk_wake_io_task(struct task_struct * waiter)1688 static inline void blk_wake_io_task(struct task_struct *waiter)
1689 {
1690 /*
1691 * If we're polling, the task itself is doing the completions. For
1692 * that case, we don't need to signal a wakeup, it's enough to just
1693 * mark us as RUNNING.
1694 */
1695 if (waiter == current)
1696 __set_current_state(TASK_RUNNING);
1697 else
1698 wake_up_process(waiter);
1699 }
1700
1701 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
1702 unsigned long start_time);
1703 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1704 unsigned int sectors, unsigned long start_time);
1705
1706 unsigned long bio_start_io_acct(struct bio *bio);
1707 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1708 struct block_device *orig_bdev);
1709
1710 /**
1711 * bio_end_io_acct - end I/O accounting for bio based drivers
1712 * @bio: bio to end account for
1713 * @start_time: start time returned by bio_start_io_acct()
1714 */
bio_end_io_acct(struct bio * bio,unsigned long start_time)1715 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1716 {
1717 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1718 }
1719
1720 int bdev_validate_blocksize(struct block_device *bdev, int block_size);
1721 int set_blocksize(struct file *file, int size);
1722
1723 int lookup_bdev(const char *pathname, dev_t *dev);
1724
1725 void blkdev_show(struct seq_file *seqf, off_t offset);
1726
1727 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1728 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
1729 #ifdef CONFIG_BLOCK
1730 #define BLKDEV_MAJOR_MAX 512
1731 #else
1732 #define BLKDEV_MAJOR_MAX 0
1733 #endif
1734
1735 struct blk_holder_ops {
1736 void (*mark_dead)(struct block_device *bdev, bool surprise);
1737
1738 /*
1739 * Sync the file system mounted on the block device.
1740 */
1741 void (*sync)(struct block_device *bdev);
1742
1743 /*
1744 * Freeze the file system mounted on the block device.
1745 */
1746 int (*freeze)(struct block_device *bdev);
1747
1748 /*
1749 * Thaw the file system mounted on the block device.
1750 */
1751 int (*thaw)(struct block_device *bdev);
1752 };
1753
1754 /*
1755 * For filesystems using @fs_holder_ops, the @holder argument passed to
1756 * helpers used to open and claim block devices via
1757 * bd_prepare_to_claim() must point to a superblock.
1758 */
1759 extern const struct blk_holder_ops fs_holder_ops;
1760
1761 /*
1762 * Return the correct open flags for blkdev_get_by_* for super block flags
1763 * as stored in sb->s_flags.
1764 */
1765 #define sb_open_mode(flags) \
1766 (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
1767 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
1768
1769 struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1770 const struct blk_holder_ops *hops);
1771 struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
1772 void *holder, const struct blk_holder_ops *hops);
1773 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
1774 const struct blk_holder_ops *hops);
1775 void bd_abort_claiming(struct block_device *bdev, void *holder);
1776
1777 struct block_device *I_BDEV(struct inode *inode);
1778 struct block_device *file_bdev(struct file *bdev_file);
1779 bool disk_live(struct gendisk *disk);
1780 unsigned int block_size(struct block_device *bdev);
1781
1782 #ifdef CONFIG_BLOCK
1783 void invalidate_bdev(struct block_device *bdev);
1784 int sync_blockdev(struct block_device *bdev);
1785 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
1786 int sync_blockdev_nowait(struct block_device *bdev);
1787 void sync_bdevs(bool wait);
1788 void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
1789 void printk_all_partitions(void);
1790 int __init early_lookup_bdev(const char *pathname, dev_t *dev);
1791 #else
invalidate_bdev(struct block_device * bdev)1792 static inline void invalidate_bdev(struct block_device *bdev)
1793 {
1794 }
sync_blockdev(struct block_device * bdev)1795 static inline int sync_blockdev(struct block_device *bdev)
1796 {
1797 return 0;
1798 }
sync_blockdev_nowait(struct block_device * bdev)1799 static inline int sync_blockdev_nowait(struct block_device *bdev)
1800 {
1801 return 0;
1802 }
sync_bdevs(bool wait)1803 static inline void sync_bdevs(bool wait)
1804 {
1805 }
bdev_statx(const struct path * path,struct kstat * stat,u32 request_mask)1806 static inline void bdev_statx(const struct path *path, struct kstat *stat,
1807 u32 request_mask)
1808 {
1809 }
printk_all_partitions(void)1810 static inline void printk_all_partitions(void)
1811 {
1812 }
early_lookup_bdev(const char * pathname,dev_t * dev)1813 static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
1814 {
1815 return -EINVAL;
1816 }
1817 #endif /* CONFIG_BLOCK */
1818
1819 int bdev_freeze(struct block_device *bdev);
1820 int bdev_thaw(struct block_device *bdev);
1821 void bdev_fput(struct file *bdev_file);
1822
1823 struct io_comp_batch {
1824 struct rq_list req_list;
1825 bool need_ts;
1826 void (*complete)(struct io_comp_batch *);
1827 };
1828
blk_atomic_write_start_sect_aligned(sector_t sector,struct queue_limits * limits)1829 static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
1830 struct queue_limits *limits)
1831 {
1832 unsigned int alignment = max(limits->atomic_write_hw_unit_min,
1833 limits->atomic_write_hw_boundary);
1834
1835 return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
1836 }
1837
bdev_can_atomic_write(struct block_device * bdev)1838 static inline bool bdev_can_atomic_write(struct block_device *bdev)
1839 {
1840 struct request_queue *bd_queue = bdev->bd_queue;
1841 struct queue_limits *limits = &bd_queue->limits;
1842
1843 if (!limits->atomic_write_unit_min)
1844 return false;
1845
1846 if (bdev_is_partition(bdev))
1847 return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
1848 limits);
1849
1850 return true;
1851 }
1852
1853 static inline unsigned int
bdev_atomic_write_unit_min_bytes(struct block_device * bdev)1854 bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
1855 {
1856 if (!bdev_can_atomic_write(bdev))
1857 return 0;
1858 return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
1859 }
1860
1861 static inline unsigned int
bdev_atomic_write_unit_max_bytes(struct block_device * bdev)1862 bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
1863 {
1864 if (!bdev_can_atomic_write(bdev))
1865 return 0;
1866 return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
1867 }
1868
bio_split_rw_at(struct bio * bio,const struct queue_limits * lim,unsigned * segs,unsigned max_bytes)1869 static inline int bio_split_rw_at(struct bio *bio,
1870 const struct queue_limits *lim,
1871 unsigned *segs, unsigned max_bytes)
1872 {
1873 return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
1874 }
1875
bio_iov_iter_get_bdev_pages(struct bio * bio,struct iov_iter * iter,struct block_device * bdev)1876 static inline int bio_iov_iter_get_bdev_pages(struct bio *bio,
1877 struct iov_iter *iter, struct block_device *bdev)
1878 {
1879 return bio_iov_iter_get_pages_aligned(bio, iter,
1880 bdev_logical_block_size(bdev) - 1);
1881 }
1882
1883 #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
1884
1885 #endif /* _LINUX_BLKDEV_H */
1886