1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Portions Copyright (C) 1992 Drew Eckhardt 4 */ 5 #ifndef _LINUX_BLKDEV_H 6 #define _LINUX_BLKDEV_H 7 8 #include <linux/types.h> 9 #include <linux/blk_types.h> 10 #include <linux/device.h> 11 #include <linux/list.h> 12 #include <linux/llist.h> 13 #include <linux/minmax.h> 14 #include <linux/timer.h> 15 #include <linux/workqueue.h> 16 #include <linux/wait.h> 17 #include <linux/bio.h> 18 #include <linux/gfp.h> 19 #include <linux/kdev_t.h> 20 #include <linux/rcupdate.h> 21 #include <linux/percpu-refcount.h> 22 #include <linux/blkzoned.h> 23 #include <linux/sched.h> 24 #include <linux/sbitmap.h> 25 #include <linux/uuid.h> 26 #include <linux/xarray.h> 27 #include <linux/file.h> 28 29 struct module; 30 struct request_queue; 31 struct elevator_queue; 32 struct blk_trace; 33 struct request; 34 struct sg_io_hdr; 35 struct blkcg_gq; 36 struct blk_flush_queue; 37 struct kiocb; 38 struct pr_ops; 39 struct rq_qos; 40 struct blk_queue_stats; 41 struct blk_stat_callback; 42 struct blk_crypto_profile; 43 44 extern const struct device_type disk_type; 45 extern const struct device_type part_type; 46 extern const struct class block_class; 47 48 /* 49 * Maximum number of blkcg policies allowed to be registered concurrently. 50 * Defined here to simplify include dependency. 51 */ 52 #define BLKCG_MAX_POLS 6 53 54 #define DISK_MAX_PARTS 256 55 #define DISK_NAME_LEN 32 56 57 #define PARTITION_META_INFO_VOLNAMELTH 64 58 /* 59 * Enough for the string representation of any kind of UUID plus NULL. 60 * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 61 */ 62 #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 63 64 struct partition_meta_info { 65 char uuid[PARTITION_META_INFO_UUIDLTH]; 66 u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 67 }; 68 69 /** 70 * DOC: genhd capability flags 71 * 72 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 73 * removable media. When set, the device remains present even when media is not 74 * inserted. Shall not be set for devices which are removed entirely when the 75 * media is removed. 76 * 77 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 78 * doesn't appear in sysfs, and can't be opened from userspace or using 79 * blkdev_get*. Used for the underlying components of multipath devices. 80 * 81 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 82 * scan for partitions from add_disk, and users can't add partitions manually. 83 * 84 */ 85 enum { 86 GENHD_FL_REMOVABLE = 1 << 0, 87 GENHD_FL_HIDDEN = 1 << 1, 88 GENHD_FL_NO_PART = 1 << 2, 89 }; 90 91 enum { 92 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 93 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 94 }; 95 96 enum { 97 /* Poll even if events_poll_msecs is unset */ 98 DISK_EVENT_FLAG_POLL = 1 << 0, 99 /* Forward events to udev */ 100 DISK_EVENT_FLAG_UEVENT = 1 << 1, 101 /* Block event polling when open for exclusive write */ 102 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 103 }; 104 105 struct disk_events; 106 struct badblocks; 107 108 struct blk_integrity { 109 const struct blk_integrity_profile *profile; 110 unsigned char flags; 111 unsigned char tuple_size; 112 unsigned char pi_offset; 113 unsigned char interval_exp; 114 unsigned char tag_size; 115 }; 116 117 typedef unsigned int __bitwise blk_mode_t; 118 119 /* open for reading */ 120 #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) 121 /* open for writing */ 122 #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) 123 /* open exclusively (vs other exclusive openers */ 124 #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) 125 /* opened with O_NDELAY */ 126 #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) 127 /* open for "writes" only for ioctls (specialy hack for floppy.c) */ 128 #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) 129 /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */ 130 #define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5)) 131 /* return partition scanning errors */ 132 #define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6)) 133 134 struct gendisk { 135 /* 136 * major/first_minor/minors should not be set by any new driver, the 137 * block core will take care of allocating them automatically. 138 */ 139 int major; 140 int first_minor; 141 int minors; 142 143 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 144 145 unsigned short events; /* supported events */ 146 unsigned short event_flags; /* flags related to event processing */ 147 148 struct xarray part_tbl; 149 struct block_device *part0; 150 151 const struct block_device_operations *fops; 152 struct request_queue *queue; 153 void *private_data; 154 155 struct bio_set bio_split; 156 157 int flags; 158 unsigned long state; 159 #define GD_NEED_PART_SCAN 0 160 #define GD_READ_ONLY 1 161 #define GD_DEAD 2 162 #define GD_NATIVE_CAPACITY 3 163 #define GD_ADDED 4 164 #define GD_SUPPRESS_PART_SCAN 5 165 #define GD_OWNS_QUEUE 6 166 167 struct mutex open_mutex; /* open/close mutex */ 168 unsigned open_partitions; /* number of open partitions */ 169 170 struct backing_dev_info *bdi; 171 struct kobject queue_kobj; /* the queue/ directory */ 172 struct kobject *slave_dir; 173 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 174 struct list_head slave_bdevs; 175 #endif 176 struct timer_rand_state *random; 177 atomic_t sync_io; /* RAID */ 178 struct disk_events *ev; 179 180 #ifdef CONFIG_BLK_DEV_ZONED 181 /* 182 * Zoned block device information. Reads of this information must be 183 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this 184 * information is only allowed while no requests are being processed. 185 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). 186 */ 187 unsigned int nr_zones; 188 unsigned int zone_capacity; 189 unsigned int last_zone_capacity; 190 unsigned long *conv_zones_bitmap; 191 unsigned int zone_wplugs_hash_bits; 192 spinlock_t zone_wplugs_lock; 193 struct mempool_s *zone_wplugs_pool; 194 struct hlist_head *zone_wplugs_hash; 195 struct list_head zone_wplugs_err_list; 196 struct work_struct zone_wplugs_work; 197 struct workqueue_struct *zone_wplugs_wq; 198 #endif /* CONFIG_BLK_DEV_ZONED */ 199 200 #if IS_ENABLED(CONFIG_CDROM) 201 struct cdrom_device_info *cdi; 202 #endif 203 int node_id; 204 struct badblocks *bb; 205 struct lockdep_map lockdep_map; 206 u64 diskseq; 207 blk_mode_t open_mode; 208 209 /* 210 * Independent sector access ranges. This is always NULL for 211 * devices that do not have multiple independent access ranges. 212 */ 213 struct blk_independent_access_ranges *ia_ranges; 214 }; 215 216 /** 217 * disk_openers - returns how many openers are there for a disk 218 * @disk: disk to check 219 * 220 * This returns the number of openers for a disk. Note that this value is only 221 * stable if disk->open_mutex is held. 222 * 223 * Note: Due to a quirk in the block layer open code, each open partition is 224 * only counted once even if there are multiple openers. 225 */ 226 static inline unsigned int disk_openers(struct gendisk *disk) 227 { 228 return atomic_read(&disk->part0->bd_openers); 229 } 230 231 /** 232 * disk_has_partscan - return %true if partition scanning is enabled on a disk 233 * @disk: disk to check 234 * 235 * Returns %true if partitions scanning is enabled for @disk, or %false if 236 * partition scanning is disabled either permanently or temporarily. 237 */ 238 static inline bool disk_has_partscan(struct gendisk *disk) 239 { 240 return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && 241 !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 242 } 243 244 /* 245 * The gendisk is refcounted by the part0 block_device, and the bd_device 246 * therein is also used for device model presentation in sysfs. 247 */ 248 #define dev_to_disk(device) \ 249 (dev_to_bdev(device)->bd_disk) 250 #define disk_to_dev(disk) \ 251 (&((disk)->part0->bd_device)) 252 253 #if IS_REACHABLE(CONFIG_CDROM) 254 #define disk_to_cdi(disk) ((disk)->cdi) 255 #else 256 #define disk_to_cdi(disk) NULL 257 #endif 258 259 static inline dev_t disk_devt(struct gendisk *disk) 260 { 261 return MKDEV(disk->major, disk->first_minor); 262 } 263 264 static inline int blk_validate_block_size(unsigned long bsize) 265 { 266 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 267 return -EINVAL; 268 269 return 0; 270 } 271 272 static inline bool blk_op_is_passthrough(blk_opf_t op) 273 { 274 op &= REQ_OP_MASK; 275 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 276 } 277 278 /* 279 * BLK_BOUNCE_NONE: never bounce (default) 280 * BLK_BOUNCE_HIGH: bounce all highmem pages 281 */ 282 enum blk_bounce { 283 BLK_BOUNCE_NONE, 284 BLK_BOUNCE_HIGH, 285 }; 286 287 struct queue_limits { 288 enum blk_bounce bounce; 289 unsigned long seg_boundary_mask; 290 unsigned long virt_boundary_mask; 291 292 unsigned int max_hw_sectors; 293 unsigned int max_dev_sectors; 294 unsigned int chunk_sectors; 295 unsigned int max_sectors; 296 unsigned int max_user_sectors; 297 unsigned int max_segment_size; 298 unsigned int physical_block_size; 299 unsigned int logical_block_size; 300 unsigned int alignment_offset; 301 unsigned int io_min; 302 unsigned int io_opt; 303 unsigned int max_discard_sectors; 304 unsigned int max_hw_discard_sectors; 305 unsigned int max_user_discard_sectors; 306 unsigned int max_secure_erase_sectors; 307 unsigned int max_write_zeroes_sectors; 308 unsigned int max_zone_append_sectors; 309 unsigned int discard_granularity; 310 unsigned int discard_alignment; 311 unsigned int zone_write_granularity; 312 313 unsigned short max_segments; 314 unsigned short max_integrity_segments; 315 unsigned short max_discard_segments; 316 317 unsigned char misaligned; 318 unsigned char discard_misaligned; 319 unsigned char raid_partial_stripes_expensive; 320 bool zoned; 321 unsigned int max_open_zones; 322 unsigned int max_active_zones; 323 324 /* 325 * Drivers that set dma_alignment to less than 511 must be prepared to 326 * handle individual bvec's that are not a multiple of a SECTOR_SIZE 327 * due to possible offsets. 328 */ 329 unsigned int dma_alignment; 330 }; 331 332 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 333 void *data); 334 335 void disk_set_zoned(struct gendisk *disk); 336 337 #define BLK_ALL_ZONES ((unsigned int)-1) 338 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 339 unsigned int nr_zones, report_zones_cb cb, void *data); 340 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 341 sector_t sectors, sector_t nr_sectors); 342 int blk_revalidate_disk_zones(struct gendisk *disk); 343 344 /* 345 * Independent access ranges: struct blk_independent_access_range describes 346 * a range of contiguous sectors that can be accessed using device command 347 * execution resources that are independent from the resources used for 348 * other access ranges. This is typically found with single-LUN multi-actuator 349 * HDDs where each access range is served by a different set of heads. 350 * The set of independent ranges supported by the device is defined using 351 * struct blk_independent_access_ranges. The independent ranges must not overlap 352 * and must include all sectors within the disk capacity (no sector holes 353 * allowed). 354 * For a device with multiple ranges, requests targeting sectors in different 355 * ranges can be executed in parallel. A request can straddle an access range 356 * boundary. 357 */ 358 struct blk_independent_access_range { 359 struct kobject kobj; 360 sector_t sector; 361 sector_t nr_sectors; 362 }; 363 364 struct blk_independent_access_ranges { 365 struct kobject kobj; 366 bool sysfs_registered; 367 unsigned int nr_ia_ranges; 368 struct blk_independent_access_range ia_range[]; 369 }; 370 371 struct request_queue { 372 /* 373 * The queue owner gets to use this for whatever they like. 374 * ll_rw_blk doesn't touch it. 375 */ 376 void *queuedata; 377 378 struct elevator_queue *elevator; 379 380 const struct blk_mq_ops *mq_ops; 381 382 /* sw queues */ 383 struct blk_mq_ctx __percpu *queue_ctx; 384 385 /* 386 * various queue flags, see QUEUE_* below 387 */ 388 unsigned long queue_flags; 389 390 unsigned int rq_timeout; 391 392 unsigned int queue_depth; 393 394 refcount_t refs; 395 396 /* hw dispatch queues */ 397 unsigned int nr_hw_queues; 398 struct xarray hctx_table; 399 400 struct percpu_ref q_usage_counter; 401 402 struct request *last_merge; 403 404 spinlock_t queue_lock; 405 406 int quiesce_depth; 407 408 struct gendisk *disk; 409 410 /* 411 * mq queue kobject 412 */ 413 struct kobject *mq_kobj; 414 415 struct queue_limits limits; 416 417 #ifdef CONFIG_BLK_DEV_INTEGRITY 418 struct blk_integrity integrity; 419 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 420 421 #ifdef CONFIG_PM 422 struct device *dev; 423 enum rpm_status rpm_status; 424 #endif 425 426 /* 427 * Number of contexts that have called blk_set_pm_only(). If this 428 * counter is above zero then only RQF_PM requests are processed. 429 */ 430 atomic_t pm_only; 431 432 struct blk_queue_stats *stats; 433 struct rq_qos *rq_qos; 434 struct mutex rq_qos_mutex; 435 436 /* 437 * ida allocated id for this queue. Used to index queues from 438 * ioctx. 439 */ 440 int id; 441 442 unsigned int dma_pad_mask; 443 444 /* 445 * queue settings 446 */ 447 unsigned long nr_requests; /* Max # of requests */ 448 449 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 450 struct blk_crypto_profile *crypto_profile; 451 struct kobject *crypto_kobject; 452 #endif 453 454 struct timer_list timeout; 455 struct work_struct timeout_work; 456 457 atomic_t nr_active_requests_shared_tags; 458 459 struct blk_mq_tags *sched_shared_tags; 460 461 struct list_head icq_list; 462 #ifdef CONFIG_BLK_CGROUP 463 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 464 struct blkcg_gq *root_blkg; 465 struct list_head blkg_list; 466 struct mutex blkcg_mutex; 467 #endif 468 469 int node; 470 471 spinlock_t requeue_lock; 472 struct list_head requeue_list; 473 struct delayed_work requeue_work; 474 475 #ifdef CONFIG_BLK_DEV_IO_TRACE 476 struct blk_trace __rcu *blk_trace; 477 #endif 478 /* 479 * for flush operations 480 */ 481 struct blk_flush_queue *fq; 482 struct list_head flush_list; 483 484 struct mutex sysfs_lock; 485 struct mutex sysfs_dir_lock; 486 struct mutex limits_lock; 487 488 /* 489 * for reusing dead hctx instance in case of updating 490 * nr_hw_queues 491 */ 492 struct list_head unused_hctx_list; 493 spinlock_t unused_hctx_lock; 494 495 int mq_freeze_depth; 496 497 #ifdef CONFIG_BLK_DEV_THROTTLING 498 /* Throttle data */ 499 struct throtl_data *td; 500 #endif 501 struct rcu_head rcu_head; 502 wait_queue_head_t mq_freeze_wq; 503 /* 504 * Protect concurrent access to q_usage_counter by 505 * percpu_ref_kill() and percpu_ref_reinit(). 506 */ 507 struct mutex mq_freeze_lock; 508 509 struct blk_mq_tag_set *tag_set; 510 struct list_head tag_set_list; 511 512 struct dentry *debugfs_dir; 513 struct dentry *sched_debugfs_dir; 514 struct dentry *rqos_debugfs_dir; 515 /* 516 * Serializes all debugfs metadata operations using the above dentries. 517 */ 518 struct mutex debugfs_mutex; 519 520 bool mq_sysfs_init_done; 521 }; 522 523 /* Keep blk_queue_flag_name[] in sync with the definitions below */ 524 #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 525 #define QUEUE_FLAG_DYING 1 /* queue being torn down */ 526 #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 527 #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 528 #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 529 #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 530 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 531 #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 532 #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 533 #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 534 #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ 535 #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 536 #define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */ 537 #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 538 #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ 539 #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 540 #define QUEUE_FLAG_WC 17 /* Write back caching */ 541 #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 542 #define QUEUE_FLAG_DAX 19 /* device supports DAX */ 543 #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 544 #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 545 #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 546 #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 547 #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 548 #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 549 #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ 550 #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ 551 #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ 552 #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ 553 554 #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ 555 (1UL << QUEUE_FLAG_SAME_COMP) | \ 556 (1UL << QUEUE_FLAG_NOWAIT)) 557 558 void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 559 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 560 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 561 562 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 563 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 564 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 565 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 566 #define blk_queue_noxmerges(q) \ 567 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 568 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 569 #define blk_queue_stable_writes(q) \ 570 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 571 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 572 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 573 #define blk_queue_zone_resetall(q) \ 574 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 575 #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 576 #define blk_queue_pci_p2pdma(q) \ 577 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 578 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 579 #define blk_queue_rq_alloc_time(q) \ 580 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 581 #else 582 #define blk_queue_rq_alloc_time(q) false 583 #endif 584 585 #define blk_noretry_request(rq) \ 586 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 587 REQ_FAILFAST_DRIVER)) 588 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 589 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 590 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 591 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 592 #define blk_queue_skip_tagset_quiesce(q) \ 593 test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags) 594 595 extern void blk_set_pm_only(struct request_queue *q); 596 extern void blk_clear_pm_only(struct request_queue *q); 597 598 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 599 600 #define dma_map_bvec(dev, bv, dir, attrs) \ 601 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 602 (dir), (attrs)) 603 604 static inline bool queue_is_mq(struct request_queue *q) 605 { 606 return q->mq_ops; 607 } 608 609 #ifdef CONFIG_PM 610 static inline enum rpm_status queue_rpm_status(struct request_queue *q) 611 { 612 return q->rpm_status; 613 } 614 #else 615 static inline enum rpm_status queue_rpm_status(struct request_queue *q) 616 { 617 return RPM_ACTIVE; 618 } 619 #endif 620 621 static inline bool blk_queue_is_zoned(struct request_queue *q) 622 { 623 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned; 624 } 625 626 #ifdef CONFIG_BLK_DEV_ZONED 627 unsigned int bdev_nr_zones(struct block_device *bdev); 628 629 static inline unsigned int disk_nr_zones(struct gendisk *disk) 630 { 631 return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; 632 } 633 634 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 635 { 636 if (!blk_queue_is_zoned(disk->queue)) 637 return 0; 638 return sector >> ilog2(disk->queue->limits.chunk_sectors); 639 } 640 641 static inline void disk_set_max_open_zones(struct gendisk *disk, 642 unsigned int max_open_zones) 643 { 644 disk->queue->limits.max_open_zones = max_open_zones; 645 } 646 647 static inline void disk_set_max_active_zones(struct gendisk *disk, 648 unsigned int max_active_zones) 649 { 650 disk->queue->limits.max_active_zones = max_active_zones; 651 } 652 653 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 654 { 655 return bdev->bd_disk->queue->limits.max_open_zones; 656 } 657 658 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 659 { 660 return bdev->bd_disk->queue->limits.max_active_zones; 661 } 662 663 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); 664 #else /* CONFIG_BLK_DEV_ZONED */ 665 static inline unsigned int bdev_nr_zones(struct block_device *bdev) 666 { 667 return 0; 668 } 669 670 static inline unsigned int disk_nr_zones(struct gendisk *disk) 671 { 672 return 0; 673 } 674 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 675 { 676 return 0; 677 } 678 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 679 { 680 return 0; 681 } 682 683 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 684 { 685 return 0; 686 } 687 static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) 688 { 689 return false; 690 } 691 #endif /* CONFIG_BLK_DEV_ZONED */ 692 693 static inline unsigned int blk_queue_depth(struct request_queue *q) 694 { 695 if (q->queue_depth) 696 return q->queue_depth; 697 698 return q->nr_requests; 699 } 700 701 /* 702 * default timeout for SG_IO if none specified 703 */ 704 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 705 #define BLK_MIN_SG_TIMEOUT (7 * HZ) 706 707 /* This should not be used directly - use rq_for_each_segment */ 708 #define for_each_bio(_bio) \ 709 for (; _bio; _bio = _bio->bi_next) 710 711 int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 712 const struct attribute_group **groups); 713 static inline int __must_check add_disk(struct gendisk *disk) 714 { 715 return device_add_disk(NULL, disk, NULL); 716 } 717 void del_gendisk(struct gendisk *gp); 718 void invalidate_disk(struct gendisk *disk); 719 void set_disk_ro(struct gendisk *disk, bool read_only); 720 void disk_uevent(struct gendisk *disk, enum kobject_action action); 721 722 static inline u8 bdev_partno(const struct block_device *bdev) 723 { 724 return atomic_read(&bdev->__bd_flags) & BD_PARTNO; 725 } 726 727 static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) 728 { 729 return atomic_read(&bdev->__bd_flags) & flag; 730 } 731 732 static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) 733 { 734 atomic_or(flag, &bdev->__bd_flags); 735 } 736 737 static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) 738 { 739 atomic_andnot(flag, &bdev->__bd_flags); 740 } 741 742 static inline int get_disk_ro(struct gendisk *disk) 743 { 744 return bdev_test_flag(disk->part0, BD_READ_ONLY) || 745 test_bit(GD_READ_ONLY, &disk->state); 746 } 747 748 static inline int bdev_read_only(struct block_device *bdev) 749 { 750 return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); 751 } 752 753 bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 754 void disk_force_media_change(struct gendisk *disk); 755 void bdev_mark_dead(struct block_device *bdev, bool surprise); 756 757 void add_disk_randomness(struct gendisk *disk) __latent_entropy; 758 void rand_initialize_disk(struct gendisk *disk); 759 760 static inline sector_t get_start_sect(struct block_device *bdev) 761 { 762 return bdev->bd_start_sect; 763 } 764 765 static inline sector_t bdev_nr_sectors(struct block_device *bdev) 766 { 767 return bdev->bd_nr_sectors; 768 } 769 770 static inline loff_t bdev_nr_bytes(struct block_device *bdev) 771 { 772 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 773 } 774 775 static inline sector_t get_capacity(struct gendisk *disk) 776 { 777 return bdev_nr_sectors(disk->part0); 778 } 779 780 static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 781 { 782 return bdev_nr_sectors(sb->s_bdev) >> 783 (sb->s_blocksize_bits - SECTOR_SHIFT); 784 } 785 786 int bdev_disk_changed(struct gendisk *disk, bool invalidate); 787 788 void put_disk(struct gendisk *disk); 789 struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node, 790 struct lock_class_key *lkclass); 791 792 /** 793 * blk_alloc_disk - allocate a gendisk structure 794 * @lim: queue limits to be used for this disk. 795 * @node_id: numa node to allocate on 796 * 797 * Allocate and pre-initialize a gendisk structure for use with BIO based 798 * drivers. 799 * 800 * Returns an ERR_PTR on error, else the allocated disk. 801 * 802 * Context: can sleep 803 */ 804 #define blk_alloc_disk(lim, node_id) \ 805 ({ \ 806 static struct lock_class_key __key; \ 807 \ 808 __blk_alloc_disk(lim, node_id, &__key); \ 809 }) 810 811 int __register_blkdev(unsigned int major, const char *name, 812 void (*probe)(dev_t devt)); 813 #define register_blkdev(major, name) \ 814 __register_blkdev(major, name, NULL) 815 void unregister_blkdev(unsigned int major, const char *name); 816 817 bool disk_check_media_change(struct gendisk *disk); 818 void set_capacity(struct gendisk *disk, sector_t size); 819 820 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 821 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 822 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 823 #else 824 static inline int bd_link_disk_holder(struct block_device *bdev, 825 struct gendisk *disk) 826 { 827 return 0; 828 } 829 static inline void bd_unlink_disk_holder(struct block_device *bdev, 830 struct gendisk *disk) 831 { 832 } 833 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 834 835 dev_t part_devt(struct gendisk *disk, u8 partno); 836 void inc_diskseq(struct gendisk *disk); 837 void blk_request_module(dev_t devt); 838 839 extern int blk_register_queue(struct gendisk *disk); 840 extern void blk_unregister_queue(struct gendisk *disk); 841 void submit_bio_noacct(struct bio *bio); 842 struct bio *bio_split_to_limits(struct bio *bio); 843 844 extern int blk_lld_busy(struct request_queue *q); 845 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 846 extern void blk_queue_exit(struct request_queue *q); 847 extern void blk_sync_queue(struct request_queue *q); 848 849 /* Helper to convert REQ_OP_XXX to its string format XXX */ 850 extern const char *blk_op_str(enum req_op op); 851 852 int blk_status_to_errno(blk_status_t status); 853 blk_status_t errno_to_blk_status(int errno); 854 const char *blk_status_to_str(blk_status_t status); 855 856 /* only poll the hardware once, don't continue until a completion was found */ 857 #define BLK_POLL_ONESHOT (1 << 0) 858 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 859 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 860 unsigned int flags); 861 862 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 863 { 864 return bdev->bd_queue; /* this is never NULL */ 865 } 866 867 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 868 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 869 870 static inline unsigned int bio_zone_no(struct bio *bio) 871 { 872 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 873 } 874 875 static inline bool bio_straddles_zones(struct bio *bio) 876 { 877 return bio_sectors(bio) && 878 bio_zone_no(bio) != 879 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); 880 } 881 882 /* 883 * Return how much of the chunk is left to be used for I/O at a given offset. 884 */ 885 static inline unsigned int blk_chunk_sectors_left(sector_t offset, 886 unsigned int chunk_sectors) 887 { 888 if (unlikely(!is_power_of_2(chunk_sectors))) 889 return chunk_sectors - sector_div(offset, chunk_sectors); 890 return chunk_sectors - (offset & (chunk_sectors - 1)); 891 } 892 893 /** 894 * queue_limits_start_update - start an atomic update of queue limits 895 * @q: queue to update 896 * 897 * This functions starts an atomic update of the queue limits. It takes a lock 898 * to prevent other updates and returns a snapshot of the current limits that 899 * the caller can modify. The caller must call queue_limits_commit_update() 900 * to finish the update. 901 * 902 * Context: process context. The caller must have frozen the queue or ensured 903 * that there is outstanding I/O by other means. 904 */ 905 static inline struct queue_limits 906 queue_limits_start_update(struct request_queue *q) 907 __acquires(q->limits_lock) 908 { 909 mutex_lock(&q->limits_lock); 910 return q->limits; 911 } 912 int queue_limits_commit_update(struct request_queue *q, 913 struct queue_limits *lim); 914 int queue_limits_set(struct request_queue *q, struct queue_limits *lim); 915 916 /** 917 * queue_limits_cancel_update - cancel an atomic update of queue limits 918 * @q: queue to update 919 * 920 * This functions cancels an atomic update of the queue limits started by 921 * queue_limits_start_update() and should be used when an error occurs after 922 * starting update. 923 */ 924 static inline void queue_limits_cancel_update(struct request_queue *q) 925 { 926 mutex_unlock(&q->limits_lock); 927 } 928 929 /* 930 * Access functions for manipulating queue properties 931 */ 932 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 933 void blk_queue_max_secure_erase_sectors(struct request_queue *q, 934 unsigned int max_sectors); 935 extern void blk_queue_max_discard_sectors(struct request_queue *q, 936 unsigned int max_discard_sectors); 937 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 938 unsigned int max_write_same_sectors); 939 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 940 extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 941 unsigned int max_zone_append_sectors); 942 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 943 void blk_queue_zone_write_granularity(struct request_queue *q, 944 unsigned int size); 945 extern void blk_queue_alignment_offset(struct request_queue *q, 946 unsigned int alignment); 947 void disk_update_readahead(struct gendisk *disk); 948 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 949 extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 950 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 951 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 952 extern void blk_set_stacking_limits(struct queue_limits *lim); 953 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 954 sector_t offset); 955 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 956 sector_t offset, const char *pfx); 957 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 958 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 959 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 960 961 struct blk_independent_access_ranges * 962 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 963 void disk_set_independent_access_ranges(struct gendisk *disk, 964 struct blk_independent_access_ranges *iars); 965 966 bool __must_check blk_get_queue(struct request_queue *); 967 extern void blk_put_queue(struct request_queue *); 968 969 void blk_mark_disk_dead(struct gendisk *disk); 970 971 #ifdef CONFIG_BLOCK 972 /* 973 * blk_plug permits building a queue of related requests by holding the I/O 974 * fragments for a short period. This allows merging of sequential requests 975 * into single larger request. As the requests are moved from a per-task list to 976 * the device's request_queue in a batch, this results in improved scalability 977 * as the lock contention for request_queue lock is reduced. 978 * 979 * It is ok not to disable preemption when adding the request to the plug list 980 * or when attempting a merge. For details, please see schedule() where 981 * blk_flush_plug() is called. 982 */ 983 struct blk_plug { 984 struct request *mq_list; /* blk-mq requests */ 985 986 /* if ios_left is > 1, we can batch tag/rq allocations */ 987 struct request *cached_rq; 988 u64 cur_ktime; 989 unsigned short nr_ios; 990 991 unsigned short rq_count; 992 993 bool multiple_queues; 994 bool has_elevator; 995 996 struct list_head cb_list; /* md requires an unplug callback */ 997 }; 998 999 struct blk_plug_cb; 1000 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1001 struct blk_plug_cb { 1002 struct list_head list; 1003 blk_plug_cb_fn callback; 1004 void *data; 1005 }; 1006 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1007 void *data, int size); 1008 extern void blk_start_plug(struct blk_plug *); 1009 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 1010 extern void blk_finish_plug(struct blk_plug *); 1011 1012 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 1013 static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1014 { 1015 if (plug) 1016 __blk_flush_plug(plug, async); 1017 } 1018 1019 /* 1020 * tsk == current here 1021 */ 1022 static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1023 { 1024 struct blk_plug *plug = tsk->plug; 1025 1026 if (plug) 1027 plug->cur_ktime = 0; 1028 current->flags &= ~PF_BLOCK_TS; 1029 } 1030 1031 int blkdev_issue_flush(struct block_device *bdev); 1032 long nr_blockdev_pages(void); 1033 #else /* CONFIG_BLOCK */ 1034 struct blk_plug { 1035 }; 1036 1037 static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 1038 unsigned short nr_ios) 1039 { 1040 } 1041 1042 static inline void blk_start_plug(struct blk_plug *plug) 1043 { 1044 } 1045 1046 static inline void blk_finish_plug(struct blk_plug *plug) 1047 { 1048 } 1049 1050 static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1051 { 1052 } 1053 1054 static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1055 { 1056 } 1057 1058 static inline int blkdev_issue_flush(struct block_device *bdev) 1059 { 1060 return 0; 1061 } 1062 1063 static inline long nr_blockdev_pages(void) 1064 { 1065 return 0; 1066 } 1067 #endif /* CONFIG_BLOCK */ 1068 1069 extern void blk_io_schedule(void); 1070 1071 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1072 sector_t nr_sects, gfp_t gfp_mask); 1073 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1074 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1075 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1076 sector_t nr_sects, gfp_t gfp); 1077 1078 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1079 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1080 1081 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1082 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1083 unsigned flags); 1084 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1085 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1086 1087 static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1088 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1089 { 1090 return blkdev_issue_discard(sb->s_bdev, 1091 block << (sb->s_blocksize_bits - 1092 SECTOR_SHIFT), 1093 nr_blocks << (sb->s_blocksize_bits - 1094 SECTOR_SHIFT), 1095 gfp_mask); 1096 } 1097 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1098 sector_t nr_blocks, gfp_t gfp_mask) 1099 { 1100 return blkdev_issue_zeroout(sb->s_bdev, 1101 block << (sb->s_blocksize_bits - 1102 SECTOR_SHIFT), 1103 nr_blocks << (sb->s_blocksize_bits - 1104 SECTOR_SHIFT), 1105 gfp_mask, 0); 1106 } 1107 1108 static inline bool bdev_is_partition(struct block_device *bdev) 1109 { 1110 return bdev_partno(bdev) != 0; 1111 } 1112 1113 enum blk_default_limits { 1114 BLK_MAX_SEGMENTS = 128, 1115 BLK_SAFE_MAX_SECTORS = 255, 1116 BLK_MAX_SEGMENT_SIZE = 65536, 1117 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1118 }; 1119 1120 /* 1121 * Default upper limit for the software max_sectors limit used for 1122 * regular file system I/O. This can be increased through sysfs. 1123 * 1124 * Not to be confused with the max_hw_sector limit that is entirely 1125 * controlled by the driver, usually based on hardware limits. 1126 */ 1127 #define BLK_DEF_MAX_SECTORS_CAP 2560u 1128 1129 static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1130 { 1131 return q->limits.seg_boundary_mask; 1132 } 1133 1134 static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1135 { 1136 return q->limits.virt_boundary_mask; 1137 } 1138 1139 static inline unsigned int queue_max_sectors(const struct request_queue *q) 1140 { 1141 return q->limits.max_sectors; 1142 } 1143 1144 static inline unsigned int queue_max_bytes(struct request_queue *q) 1145 { 1146 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1147 } 1148 1149 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1150 { 1151 return q->limits.max_hw_sectors; 1152 } 1153 1154 static inline unsigned short queue_max_segments(const struct request_queue *q) 1155 { 1156 return q->limits.max_segments; 1157 } 1158 1159 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1160 { 1161 return q->limits.max_discard_segments; 1162 } 1163 1164 static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1165 { 1166 return q->limits.max_segment_size; 1167 } 1168 1169 static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l) 1170 { 1171 unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); 1172 1173 return min_not_zero(l->max_zone_append_sectors, max_sectors); 1174 } 1175 1176 static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) 1177 { 1178 if (!blk_queue_is_zoned(q)) 1179 return 0; 1180 1181 return queue_limits_max_zone_append_sectors(&q->limits); 1182 } 1183 1184 static inline bool queue_emulates_zone_append(struct request_queue *q) 1185 { 1186 return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; 1187 } 1188 1189 static inline bool bdev_emulates_zone_append(struct block_device *bdev) 1190 { 1191 return queue_emulates_zone_append(bdev_get_queue(bdev)); 1192 } 1193 1194 static inline unsigned int 1195 bdev_max_zone_append_sectors(struct block_device *bdev) 1196 { 1197 return queue_max_zone_append_sectors(bdev_get_queue(bdev)); 1198 } 1199 1200 static inline unsigned int bdev_max_segments(struct block_device *bdev) 1201 { 1202 return queue_max_segments(bdev_get_queue(bdev)); 1203 } 1204 1205 static inline unsigned queue_logical_block_size(const struct request_queue *q) 1206 { 1207 int retval = 512; 1208 1209 if (q && q->limits.logical_block_size) 1210 retval = q->limits.logical_block_size; 1211 1212 return retval; 1213 } 1214 1215 static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1216 { 1217 return queue_logical_block_size(bdev_get_queue(bdev)); 1218 } 1219 1220 static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1221 { 1222 return q->limits.physical_block_size; 1223 } 1224 1225 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1226 { 1227 return queue_physical_block_size(bdev_get_queue(bdev)); 1228 } 1229 1230 static inline unsigned int queue_io_min(const struct request_queue *q) 1231 { 1232 return q->limits.io_min; 1233 } 1234 1235 static inline int bdev_io_min(struct block_device *bdev) 1236 { 1237 return queue_io_min(bdev_get_queue(bdev)); 1238 } 1239 1240 static inline unsigned int queue_io_opt(const struct request_queue *q) 1241 { 1242 return q->limits.io_opt; 1243 } 1244 1245 static inline int bdev_io_opt(struct block_device *bdev) 1246 { 1247 return queue_io_opt(bdev_get_queue(bdev)); 1248 } 1249 1250 static inline unsigned int 1251 queue_zone_write_granularity(const struct request_queue *q) 1252 { 1253 return q->limits.zone_write_granularity; 1254 } 1255 1256 static inline unsigned int 1257 bdev_zone_write_granularity(struct block_device *bdev) 1258 { 1259 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1260 } 1261 1262 int bdev_alignment_offset(struct block_device *bdev); 1263 unsigned int bdev_discard_alignment(struct block_device *bdev); 1264 1265 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1266 { 1267 return bdev_get_queue(bdev)->limits.max_discard_sectors; 1268 } 1269 1270 static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1271 { 1272 return bdev_get_queue(bdev)->limits.discard_granularity; 1273 } 1274 1275 static inline unsigned int 1276 bdev_max_secure_erase_sectors(struct block_device *bdev) 1277 { 1278 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 1279 } 1280 1281 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1282 { 1283 struct request_queue *q = bdev_get_queue(bdev); 1284 1285 if (q) 1286 return q->limits.max_write_zeroes_sectors; 1287 1288 return 0; 1289 } 1290 1291 static inline bool bdev_nonrot(struct block_device *bdev) 1292 { 1293 return blk_queue_nonrot(bdev_get_queue(bdev)); 1294 } 1295 1296 static inline bool bdev_synchronous(struct block_device *bdev) 1297 { 1298 return test_bit(QUEUE_FLAG_SYNCHRONOUS, 1299 &bdev_get_queue(bdev)->queue_flags); 1300 } 1301 1302 static inline bool bdev_stable_writes(struct block_device *bdev) 1303 { 1304 return test_bit(QUEUE_FLAG_STABLE_WRITES, 1305 &bdev_get_queue(bdev)->queue_flags); 1306 } 1307 1308 static inline bool bdev_write_cache(struct block_device *bdev) 1309 { 1310 return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); 1311 } 1312 1313 static inline bool bdev_fua(struct block_device *bdev) 1314 { 1315 return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); 1316 } 1317 1318 static inline bool bdev_nowait(struct block_device *bdev) 1319 { 1320 return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); 1321 } 1322 1323 static inline bool bdev_is_zoned(struct block_device *bdev) 1324 { 1325 return blk_queue_is_zoned(bdev_get_queue(bdev)); 1326 } 1327 1328 static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1329 { 1330 return disk_zone_no(bdev->bd_disk, sec); 1331 } 1332 1333 static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1334 { 1335 struct request_queue *q = bdev_get_queue(bdev); 1336 1337 if (!blk_queue_is_zoned(q)) 1338 return 0; 1339 return q->limits.chunk_sectors; 1340 } 1341 1342 static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1343 sector_t sector) 1344 { 1345 return sector & (bdev_zone_sectors(bdev) - 1); 1346 } 1347 1348 static inline sector_t bio_offset_from_zone_start(struct bio *bio) 1349 { 1350 return bdev_offset_from_zone_start(bio->bi_bdev, 1351 bio->bi_iter.bi_sector); 1352 } 1353 1354 static inline bool bdev_is_zone_start(struct block_device *bdev, 1355 sector_t sector) 1356 { 1357 return bdev_offset_from_zone_start(bdev, sector) == 0; 1358 } 1359 1360 static inline int queue_dma_alignment(const struct request_queue *q) 1361 { 1362 return q ? q->limits.dma_alignment : 511; 1363 } 1364 1365 static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 1366 { 1367 return queue_dma_alignment(bdev_get_queue(bdev)); 1368 } 1369 1370 static inline bool bdev_iter_is_aligned(struct block_device *bdev, 1371 struct iov_iter *iter) 1372 { 1373 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 1374 bdev_logical_block_size(bdev) - 1); 1375 } 1376 1377 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1378 unsigned int len) 1379 { 1380 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1381 return !(addr & alignment) && !(len & alignment); 1382 } 1383 1384 /* assumes size > 256 */ 1385 static inline unsigned int blksize_bits(unsigned int size) 1386 { 1387 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1388 } 1389 1390 int kblockd_schedule_work(struct work_struct *work); 1391 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1392 1393 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1394 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1395 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1396 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1397 1398 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 1399 1400 bool blk_crypto_register(struct blk_crypto_profile *profile, 1401 struct request_queue *q); 1402 1403 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1404 1405 static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1406 struct request_queue *q) 1407 { 1408 return true; 1409 } 1410 1411 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1412 1413 enum blk_unique_id { 1414 /* these match the Designator Types specified in SPC */ 1415 BLK_UID_T10 = 1, 1416 BLK_UID_EUI64 = 2, 1417 BLK_UID_NAA = 3, 1418 }; 1419 1420 struct block_device_operations { 1421 void (*submit_bio)(struct bio *bio); 1422 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 1423 unsigned int flags); 1424 int (*open)(struct gendisk *disk, blk_mode_t mode); 1425 void (*release)(struct gendisk *disk); 1426 int (*ioctl)(struct block_device *bdev, blk_mode_t mode, 1427 unsigned cmd, unsigned long arg); 1428 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, 1429 unsigned cmd, unsigned long arg); 1430 unsigned int (*check_events) (struct gendisk *disk, 1431 unsigned int clearing); 1432 void (*unlock_native_capacity) (struct gendisk *); 1433 int (*getgeo)(struct block_device *, struct hd_geometry *); 1434 int (*set_read_only)(struct block_device *bdev, bool ro); 1435 void (*free_disk)(struct gendisk *disk); 1436 /* this callback is with swap_lock and sometimes page table lock held */ 1437 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1438 int (*report_zones)(struct gendisk *, sector_t sector, 1439 unsigned int nr_zones, report_zones_cb cb, void *data); 1440 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1441 /* returns the length of the identifier or a negative errno: */ 1442 int (*get_unique_id)(struct gendisk *disk, u8 id[16], 1443 enum blk_unique_id id_type); 1444 struct module *owner; 1445 const struct pr_ops *pr_ops; 1446 1447 /* 1448 * Special callback for probing GPT entry at a given sector. 1449 * Needed by Android devices, used by GPT scanner and MMC blk 1450 * driver. 1451 */ 1452 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 1453 }; 1454 1455 #ifdef CONFIG_COMPAT 1456 extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, 1457 unsigned int, unsigned long); 1458 #else 1459 #define blkdev_compat_ptr_ioctl NULL 1460 #endif 1461 1462 static inline void blk_wake_io_task(struct task_struct *waiter) 1463 { 1464 /* 1465 * If we're polling, the task itself is doing the completions. For 1466 * that case, we don't need to signal a wakeup, it's enough to just 1467 * mark us as RUNNING. 1468 */ 1469 if (waiter == current) 1470 __set_current_state(TASK_RUNNING); 1471 else 1472 wake_up_process(waiter); 1473 } 1474 1475 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1476 unsigned long start_time); 1477 void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1478 unsigned int sectors, unsigned long start_time); 1479 1480 unsigned long bio_start_io_acct(struct bio *bio); 1481 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1482 struct block_device *orig_bdev); 1483 1484 /** 1485 * bio_end_io_acct - end I/O accounting for bio based drivers 1486 * @bio: bio to end account for 1487 * @start_time: start time returned by bio_start_io_acct() 1488 */ 1489 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1490 { 1491 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1492 } 1493 1494 int bdev_read_only(struct block_device *bdev); 1495 int set_blocksize(struct file *file, int size); 1496 1497 int lookup_bdev(const char *pathname, dev_t *dev); 1498 1499 void blkdev_show(struct seq_file *seqf, off_t offset); 1500 1501 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1502 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1503 #ifdef CONFIG_BLOCK 1504 #define BLKDEV_MAJOR_MAX 512 1505 #else 1506 #define BLKDEV_MAJOR_MAX 0 1507 #endif 1508 1509 struct blk_holder_ops { 1510 void (*mark_dead)(struct block_device *bdev, bool surprise); 1511 1512 /* 1513 * Sync the file system mounted on the block device. 1514 */ 1515 void (*sync)(struct block_device *bdev); 1516 1517 /* 1518 * Freeze the file system mounted on the block device. 1519 */ 1520 int (*freeze)(struct block_device *bdev); 1521 1522 /* 1523 * Thaw the file system mounted on the block device. 1524 */ 1525 int (*thaw)(struct block_device *bdev); 1526 }; 1527 1528 /* 1529 * For filesystems using @fs_holder_ops, the @holder argument passed to 1530 * helpers used to open and claim block devices via 1531 * bd_prepare_to_claim() must point to a superblock. 1532 */ 1533 extern const struct blk_holder_ops fs_holder_ops; 1534 1535 /* 1536 * Return the correct open flags for blkdev_get_by_* for super block flags 1537 * as stored in sb->s_flags. 1538 */ 1539 #define sb_open_mode(flags) \ 1540 (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \ 1541 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) 1542 1543 struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, 1544 const struct blk_holder_ops *hops); 1545 struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, 1546 void *holder, const struct blk_holder_ops *hops); 1547 int bd_prepare_to_claim(struct block_device *bdev, void *holder, 1548 const struct blk_holder_ops *hops); 1549 void bd_abort_claiming(struct block_device *bdev, void *holder); 1550 1551 /* just for blk-cgroup, don't use elsewhere */ 1552 struct block_device *blkdev_get_no_open(dev_t dev); 1553 void blkdev_put_no_open(struct block_device *bdev); 1554 1555 struct block_device *I_BDEV(struct inode *inode); 1556 struct block_device *file_bdev(struct file *bdev_file); 1557 bool disk_live(struct gendisk *disk); 1558 unsigned int block_size(struct block_device *bdev); 1559 1560 #ifdef CONFIG_BLOCK 1561 void invalidate_bdev(struct block_device *bdev); 1562 int sync_blockdev(struct block_device *bdev); 1563 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 1564 int sync_blockdev_nowait(struct block_device *bdev); 1565 void sync_bdevs(bool wait); 1566 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat); 1567 void printk_all_partitions(void); 1568 int __init early_lookup_bdev(const char *pathname, dev_t *dev); 1569 #else 1570 static inline void invalidate_bdev(struct block_device *bdev) 1571 { 1572 } 1573 static inline int sync_blockdev(struct block_device *bdev) 1574 { 1575 return 0; 1576 } 1577 static inline int sync_blockdev_nowait(struct block_device *bdev) 1578 { 1579 return 0; 1580 } 1581 static inline void sync_bdevs(bool wait) 1582 { 1583 } 1584 static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) 1585 { 1586 } 1587 static inline void printk_all_partitions(void) 1588 { 1589 } 1590 static inline int early_lookup_bdev(const char *pathname, dev_t *dev) 1591 { 1592 return -EINVAL; 1593 } 1594 #endif /* CONFIG_BLOCK */ 1595 1596 int bdev_freeze(struct block_device *bdev); 1597 int bdev_thaw(struct block_device *bdev); 1598 void bdev_fput(struct file *bdev_file); 1599 1600 struct io_comp_batch { 1601 struct request *req_list; 1602 bool need_ts; 1603 void (*complete)(struct io_comp_batch *); 1604 }; 1605 1606 #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 1607 1608 #endif /* _LINUX_BLKDEV_H */ 1609