1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to setting various queue properties from drivers 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/pagemap.h> 11 #include <linux/backing-dev-defs.h> 12 #include <linux/gcd.h> 13 #include <linux/lcm.h> 14 #include <linux/jiffies.h> 15 #include <linux/gfp.h> 16 #include <linux/dma-mapping.h> 17 18 #include "blk.h" 19 #include "blk-rq-qos.h" 20 #include "blk-wbt.h" 21 22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 23 { 24 q->rq_timeout = timeout; 25 } 26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 27 28 /** 29 * blk_set_stacking_limits - set default limits for stacking devices 30 * @lim: the queue_limits structure to reset 31 * 32 * Prepare queue limits for applying limits from underlying devices using 33 * blk_stack_limits(). 34 */ 35 void blk_set_stacking_limits(struct queue_limits *lim) 36 { 37 memset(lim, 0, sizeof(*lim)); 38 lim->logical_block_size = SECTOR_SIZE; 39 lim->physical_block_size = SECTOR_SIZE; 40 lim->io_min = SECTOR_SIZE; 41 lim->discard_granularity = SECTOR_SIZE; 42 lim->dma_alignment = SECTOR_SIZE - 1; 43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 44 45 /* Inherit limits from component devices */ 46 lim->max_segments = USHRT_MAX; 47 lim->max_discard_segments = USHRT_MAX; 48 lim->max_hw_sectors = UINT_MAX; 49 lim->max_segment_size = UINT_MAX; 50 lim->max_sectors = UINT_MAX; 51 lim->max_dev_sectors = UINT_MAX; 52 lim->max_write_zeroes_sectors = UINT_MAX; 53 lim->max_zone_append_sectors = UINT_MAX; 54 lim->max_user_discard_sectors = UINT_MAX; 55 } 56 EXPORT_SYMBOL(blk_set_stacking_limits); 57 58 static void blk_apply_bdi_limits(struct backing_dev_info *bdi, 59 struct queue_limits *lim) 60 { 61 /* 62 * For read-ahead of large files to be effective, we need to read ahead 63 * at least twice the optimal I/O size. 64 */ 65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT; 67 } 68 69 static int blk_validate_zoned_limits(struct queue_limits *lim) 70 { 71 if (!lim->zoned) { 72 if (WARN_ON_ONCE(lim->max_open_zones) || 73 WARN_ON_ONCE(lim->max_active_zones) || 74 WARN_ON_ONCE(lim->zone_write_granularity) || 75 WARN_ON_ONCE(lim->max_zone_append_sectors)) 76 return -EINVAL; 77 return 0; 78 } 79 80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED))) 81 return -EINVAL; 82 83 if (lim->zone_write_granularity < lim->logical_block_size) 84 lim->zone_write_granularity = lim->logical_block_size; 85 86 if (lim->max_zone_append_sectors) { 87 /* 88 * The Zone Append size is limited by the maximum I/O size 89 * and the zone size given that it can't span zones. 90 */ 91 lim->max_zone_append_sectors = 92 min3(lim->max_hw_sectors, 93 lim->max_zone_append_sectors, 94 lim->chunk_sectors); 95 } 96 97 return 0; 98 } 99 100 /* 101 * Check that the limits in lim are valid, initialize defaults for unset 102 * values, and cap values based on others where needed. 103 */ 104 static int blk_validate_limits(struct queue_limits *lim) 105 { 106 unsigned int max_hw_sectors; 107 108 /* 109 * Unless otherwise specified, default to 512 byte logical blocks and a 110 * physical block size equal to the logical block size. 111 */ 112 if (!lim->logical_block_size) 113 lim->logical_block_size = SECTOR_SIZE; 114 if (lim->physical_block_size < lim->logical_block_size) 115 lim->physical_block_size = lim->logical_block_size; 116 117 /* 118 * The minimum I/O size defaults to the physical block size unless 119 * explicitly overridden. 120 */ 121 if (lim->io_min < lim->physical_block_size) 122 lim->io_min = lim->physical_block_size; 123 124 /* 125 * max_hw_sectors has a somewhat weird default for historical reason, 126 * but driver really should set their own instead of relying on this 127 * value. 128 * 129 * The block layer relies on the fact that every driver can 130 * handle at lest a page worth of data per I/O, and needs the value 131 * aligned to the logical block size. 132 */ 133 if (!lim->max_hw_sectors) 134 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 135 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS)) 136 return -EINVAL; 137 lim->max_hw_sectors = round_down(lim->max_hw_sectors, 138 lim->logical_block_size >> SECTOR_SHIFT); 139 140 /* 141 * The actual max_sectors value is a complex beast and also takes the 142 * max_dev_sectors value (set by SCSI ULPs) and a user configurable 143 * value into account. The ->max_sectors value is always calculated 144 * from these, so directly setting it won't have any effect. 145 */ 146 max_hw_sectors = min_not_zero(lim->max_hw_sectors, 147 lim->max_dev_sectors); 148 if (lim->max_user_sectors) { 149 if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE) 150 return -EINVAL; 151 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors); 152 } else { 153 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP); 154 } 155 lim->max_sectors = round_down(lim->max_sectors, 156 lim->logical_block_size >> SECTOR_SHIFT); 157 158 /* 159 * Random default for the maximum number of segments. Driver should not 160 * rely on this and set their own. 161 */ 162 if (!lim->max_segments) 163 lim->max_segments = BLK_MAX_SEGMENTS; 164 165 lim->max_discard_sectors = 166 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors); 167 168 if (!lim->max_discard_segments) 169 lim->max_discard_segments = 1; 170 171 if (lim->discard_granularity < lim->physical_block_size) 172 lim->discard_granularity = lim->physical_block_size; 173 174 /* 175 * By default there is no limit on the segment boundary alignment, 176 * but if there is one it can't be smaller than the page size as 177 * that would break all the normal I/O patterns. 178 */ 179 if (!lim->seg_boundary_mask) 180 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 181 if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1)) 182 return -EINVAL; 183 184 /* 185 * Stacking device may have both virtual boundary and max segment 186 * size limit, so allow this setting now, and long-term the two 187 * might need to move out of stacking limits since we have immutable 188 * bvec and lower layer bio splitting is supposed to handle the two 189 * correctly. 190 */ 191 if (lim->virt_boundary_mask) { 192 if (!lim->max_segment_size) 193 lim->max_segment_size = UINT_MAX; 194 } else { 195 /* 196 * The maximum segment size has an odd historic 64k default that 197 * drivers probably should override. Just like the I/O size we 198 * require drivers to at least handle a full page per segment. 199 */ 200 if (!lim->max_segment_size) 201 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 202 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE)) 203 return -EINVAL; 204 } 205 206 /* 207 * We require drivers to at least do logical block aligned I/O, but 208 * historically could not check for that due to the separate calls 209 * to set the limits. Once the transition is finished the check 210 * below should be narrowed down to check the logical block size. 211 */ 212 if (!lim->dma_alignment) 213 lim->dma_alignment = SECTOR_SIZE - 1; 214 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE)) 215 return -EINVAL; 216 217 if (lim->alignment_offset) { 218 lim->alignment_offset &= (lim->physical_block_size - 1); 219 lim->misaligned = 0; 220 } 221 222 return blk_validate_zoned_limits(lim); 223 } 224 225 /* 226 * Set the default limits for a newly allocated queue. @lim contains the 227 * initial limits set by the driver, which could be no limit in which case 228 * all fields are cleared to zero. 229 */ 230 int blk_set_default_limits(struct queue_limits *lim) 231 { 232 /* 233 * Most defaults are set by capping the bounds in blk_validate_limits, 234 * but max_user_discard_sectors is special and needs an explicit 235 * initialization to the max value here. 236 */ 237 lim->max_user_discard_sectors = UINT_MAX; 238 return blk_validate_limits(lim); 239 } 240 241 /** 242 * queue_limits_commit_update - commit an atomic update of queue limits 243 * @q: queue to update 244 * @lim: limits to apply 245 * 246 * Apply the limits in @lim that were obtained from queue_limits_start_update() 247 * and updated by the caller to @q. 248 * 249 * Returns 0 if successful, else a negative error code. 250 */ 251 int queue_limits_commit_update(struct request_queue *q, 252 struct queue_limits *lim) 253 __releases(q->limits_lock) 254 { 255 int error = blk_validate_limits(lim); 256 257 if (!error) { 258 q->limits = *lim; 259 if (q->disk) 260 blk_apply_bdi_limits(q->disk->bdi, lim); 261 } 262 mutex_unlock(&q->limits_lock); 263 return error; 264 } 265 EXPORT_SYMBOL_GPL(queue_limits_commit_update); 266 267 /** 268 * queue_limits_set - apply queue limits to queue 269 * @q: queue to update 270 * @lim: limits to apply 271 * 272 * Apply the limits in @lim that were freshly initialized to @q. 273 * To update existing limits use queue_limits_start_update() and 274 * queue_limits_commit_update() instead. 275 * 276 * Returns 0 if successful, else a negative error code. 277 */ 278 int queue_limits_set(struct request_queue *q, struct queue_limits *lim) 279 { 280 mutex_lock(&q->limits_lock); 281 return queue_limits_commit_update(q, lim); 282 } 283 EXPORT_SYMBOL_GPL(queue_limits_set); 284 285 /** 286 * blk_queue_bounce_limit - set bounce buffer limit for queue 287 * @q: the request queue for the device 288 * @bounce: bounce limit to enforce 289 * 290 * Description: 291 * Force bouncing for ISA DMA ranges or highmem. 292 * 293 * DEPRECATED, don't use in new code. 294 **/ 295 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) 296 { 297 q->limits.bounce = bounce; 298 } 299 EXPORT_SYMBOL(blk_queue_bounce_limit); 300 301 /** 302 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 303 * @q: the request queue for the device 304 * @max_hw_sectors: max hardware sectors in the usual 512b unit 305 * 306 * Description: 307 * Enables a low level driver to set a hard upper limit, 308 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 309 * the device driver based upon the capabilities of the I/O 310 * controller. 311 * 312 * max_dev_sectors is a hard limit imposed by the storage device for 313 * READ/WRITE requests. It is set by the disk driver. 314 * 315 * max_sectors is a soft limit imposed by the block layer for 316 * filesystem type requests. This value can be overridden on a 317 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 318 * The soft limit can not exceed max_hw_sectors. 319 **/ 320 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 321 { 322 struct queue_limits *limits = &q->limits; 323 unsigned int max_sectors; 324 325 if ((max_hw_sectors << 9) < PAGE_SIZE) { 326 max_hw_sectors = 1 << (PAGE_SHIFT - 9); 327 pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors); 328 } 329 330 max_hw_sectors = round_down(max_hw_sectors, 331 limits->logical_block_size >> SECTOR_SHIFT); 332 limits->max_hw_sectors = max_hw_sectors; 333 334 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); 335 336 if (limits->max_user_sectors) 337 max_sectors = min(max_sectors, limits->max_user_sectors); 338 else 339 max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP); 340 341 max_sectors = round_down(max_sectors, 342 limits->logical_block_size >> SECTOR_SHIFT); 343 limits->max_sectors = max_sectors; 344 345 if (!q->disk) 346 return; 347 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); 348 } 349 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 350 351 /** 352 * blk_queue_chunk_sectors - set size of the chunk for this queue 353 * @q: the request queue for the device 354 * @chunk_sectors: chunk sectors in the usual 512b unit 355 * 356 * Description: 357 * If a driver doesn't want IOs to cross a given chunk size, it can set 358 * this limit and prevent merging across chunks. Note that the block layer 359 * must accept a page worth of data at any offset. So if the crossing of 360 * chunks is a hard limitation in the driver, it must still be prepared 361 * to split single page bios. 362 **/ 363 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 364 { 365 q->limits.chunk_sectors = chunk_sectors; 366 } 367 EXPORT_SYMBOL(blk_queue_chunk_sectors); 368 369 /** 370 * blk_queue_max_discard_sectors - set max sectors for a single discard 371 * @q: the request queue for the device 372 * @max_discard_sectors: maximum number of sectors to discard 373 **/ 374 void blk_queue_max_discard_sectors(struct request_queue *q, 375 unsigned int max_discard_sectors) 376 { 377 struct queue_limits *lim = &q->limits; 378 379 lim->max_hw_discard_sectors = max_discard_sectors; 380 lim->max_discard_sectors = 381 min(max_discard_sectors, lim->max_user_discard_sectors); 382 } 383 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 384 385 /** 386 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase 387 * @q: the request queue for the device 388 * @max_sectors: maximum number of sectors to secure_erase 389 **/ 390 void blk_queue_max_secure_erase_sectors(struct request_queue *q, 391 unsigned int max_sectors) 392 { 393 q->limits.max_secure_erase_sectors = max_sectors; 394 } 395 EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors); 396 397 /** 398 * blk_queue_max_write_zeroes_sectors - set max sectors for a single 399 * write zeroes 400 * @q: the request queue for the device 401 * @max_write_zeroes_sectors: maximum number of sectors to write per command 402 **/ 403 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 404 unsigned int max_write_zeroes_sectors) 405 { 406 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; 407 } 408 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); 409 410 /** 411 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append 412 * @q: the request queue for the device 413 * @max_zone_append_sectors: maximum number of sectors to write per command 414 **/ 415 void blk_queue_max_zone_append_sectors(struct request_queue *q, 416 unsigned int max_zone_append_sectors) 417 { 418 unsigned int max_sectors; 419 420 if (WARN_ON(!blk_queue_is_zoned(q))) 421 return; 422 423 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); 424 max_sectors = min(q->limits.chunk_sectors, max_sectors); 425 426 /* 427 * Signal eventual driver bugs resulting in the max_zone_append sectors limit 428 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, 429 * or the max_hw_sectors limit not set. 430 */ 431 WARN_ON(!max_sectors); 432 433 q->limits.max_zone_append_sectors = max_sectors; 434 } 435 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); 436 437 /** 438 * blk_queue_max_segments - set max hw segments for a request for this queue 439 * @q: the request queue for the device 440 * @max_segments: max number of segments 441 * 442 * Description: 443 * Enables a low level driver to set an upper limit on the number of 444 * hw data segments in a request. 445 **/ 446 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 447 { 448 if (!max_segments) { 449 max_segments = 1; 450 pr_info("%s: set to minimum %u\n", __func__, max_segments); 451 } 452 453 q->limits.max_segments = max_segments; 454 } 455 EXPORT_SYMBOL(blk_queue_max_segments); 456 457 /** 458 * blk_queue_max_discard_segments - set max segments for discard requests 459 * @q: the request queue for the device 460 * @max_segments: max number of segments 461 * 462 * Description: 463 * Enables a low level driver to set an upper limit on the number of 464 * segments in a discard request. 465 **/ 466 void blk_queue_max_discard_segments(struct request_queue *q, 467 unsigned short max_segments) 468 { 469 q->limits.max_discard_segments = max_segments; 470 } 471 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); 472 473 /** 474 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 475 * @q: the request queue for the device 476 * @max_size: max size of segment in bytes 477 * 478 * Description: 479 * Enables a low level driver to set an upper limit on the size of a 480 * coalesced segment 481 **/ 482 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 483 { 484 if (max_size < PAGE_SIZE) { 485 max_size = PAGE_SIZE; 486 pr_info("%s: set to minimum %u\n", __func__, max_size); 487 } 488 489 /* see blk_queue_virt_boundary() for the explanation */ 490 WARN_ON_ONCE(q->limits.virt_boundary_mask); 491 492 q->limits.max_segment_size = max_size; 493 } 494 EXPORT_SYMBOL(blk_queue_max_segment_size); 495 496 /** 497 * blk_queue_logical_block_size - set logical block size for the queue 498 * @q: the request queue for the device 499 * @size: the logical block size, in bytes 500 * 501 * Description: 502 * This should be set to the lowest possible block size that the 503 * storage device can address. The default of 512 covers most 504 * hardware. 505 **/ 506 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) 507 { 508 struct queue_limits *limits = &q->limits; 509 510 limits->logical_block_size = size; 511 512 if (limits->discard_granularity < limits->logical_block_size) 513 limits->discard_granularity = limits->logical_block_size; 514 515 if (limits->physical_block_size < size) 516 limits->physical_block_size = size; 517 518 if (limits->io_min < limits->physical_block_size) 519 limits->io_min = limits->physical_block_size; 520 521 limits->max_hw_sectors = 522 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); 523 limits->max_sectors = 524 round_down(limits->max_sectors, size >> SECTOR_SHIFT); 525 } 526 EXPORT_SYMBOL(blk_queue_logical_block_size); 527 528 /** 529 * blk_queue_physical_block_size - set physical block size for the queue 530 * @q: the request queue for the device 531 * @size: the physical block size, in bytes 532 * 533 * Description: 534 * This should be set to the lowest possible sector size that the 535 * hardware can operate on without reverting to read-modify-write 536 * operations. 537 */ 538 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 539 { 540 q->limits.physical_block_size = size; 541 542 if (q->limits.physical_block_size < q->limits.logical_block_size) 543 q->limits.physical_block_size = q->limits.logical_block_size; 544 545 if (q->limits.discard_granularity < q->limits.physical_block_size) 546 q->limits.discard_granularity = q->limits.physical_block_size; 547 548 if (q->limits.io_min < q->limits.physical_block_size) 549 q->limits.io_min = q->limits.physical_block_size; 550 } 551 EXPORT_SYMBOL(blk_queue_physical_block_size); 552 553 /** 554 * blk_queue_zone_write_granularity - set zone write granularity for the queue 555 * @q: the request queue for the zoned device 556 * @size: the zone write granularity size, in bytes 557 * 558 * Description: 559 * This should be set to the lowest possible size allowing to write in 560 * sequential zones of a zoned block device. 561 */ 562 void blk_queue_zone_write_granularity(struct request_queue *q, 563 unsigned int size) 564 { 565 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 566 return; 567 568 q->limits.zone_write_granularity = size; 569 570 if (q->limits.zone_write_granularity < q->limits.logical_block_size) 571 q->limits.zone_write_granularity = q->limits.logical_block_size; 572 } 573 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity); 574 575 /** 576 * blk_queue_alignment_offset - set physical block alignment offset 577 * @q: the request queue for the device 578 * @offset: alignment offset in bytes 579 * 580 * Description: 581 * Some devices are naturally misaligned to compensate for things like 582 * the legacy DOS partition table 63-sector offset. Low-level drivers 583 * should call this function for devices whose first sector is not 584 * naturally aligned. 585 */ 586 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 587 { 588 q->limits.alignment_offset = 589 offset & (q->limits.physical_block_size - 1); 590 q->limits.misaligned = 0; 591 } 592 EXPORT_SYMBOL(blk_queue_alignment_offset); 593 594 void disk_update_readahead(struct gendisk *disk) 595 { 596 blk_apply_bdi_limits(disk->bdi, &disk->queue->limits); 597 } 598 EXPORT_SYMBOL_GPL(disk_update_readahead); 599 600 /** 601 * blk_limits_io_min - set minimum request size for a device 602 * @limits: the queue limits 603 * @min: smallest I/O size in bytes 604 * 605 * Description: 606 * Some devices have an internal block size bigger than the reported 607 * hardware sector size. This function can be used to signal the 608 * smallest I/O the device can perform without incurring a performance 609 * penalty. 610 */ 611 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 612 { 613 limits->io_min = min; 614 615 if (limits->io_min < limits->logical_block_size) 616 limits->io_min = limits->logical_block_size; 617 618 if (limits->io_min < limits->physical_block_size) 619 limits->io_min = limits->physical_block_size; 620 } 621 EXPORT_SYMBOL(blk_limits_io_min); 622 623 /** 624 * blk_queue_io_min - set minimum request size for the queue 625 * @q: the request queue for the device 626 * @min: smallest I/O size in bytes 627 * 628 * Description: 629 * Storage devices may report a granularity or preferred minimum I/O 630 * size which is the smallest request the device can perform without 631 * incurring a performance penalty. For disk drives this is often the 632 * physical block size. For RAID arrays it is often the stripe chunk 633 * size. A properly aligned multiple of minimum_io_size is the 634 * preferred request size for workloads where a high number of I/O 635 * operations is desired. 636 */ 637 void blk_queue_io_min(struct request_queue *q, unsigned int min) 638 { 639 blk_limits_io_min(&q->limits, min); 640 } 641 EXPORT_SYMBOL(blk_queue_io_min); 642 643 /** 644 * blk_limits_io_opt - set optimal request size for a device 645 * @limits: the queue limits 646 * @opt: smallest I/O size in bytes 647 * 648 * Description: 649 * Storage devices may report an optimal I/O size, which is the 650 * device's preferred unit for sustained I/O. This is rarely reported 651 * for disk drives. For RAID arrays it is usually the stripe width or 652 * the internal track size. A properly aligned multiple of 653 * optimal_io_size is the preferred request size for workloads where 654 * sustained throughput is desired. 655 */ 656 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 657 { 658 limits->io_opt = opt; 659 } 660 EXPORT_SYMBOL(blk_limits_io_opt); 661 662 /** 663 * blk_queue_io_opt - set optimal request size for the queue 664 * @q: the request queue for the device 665 * @opt: optimal request size in bytes 666 * 667 * Description: 668 * Storage devices may report an optimal I/O size, which is the 669 * device's preferred unit for sustained I/O. This is rarely reported 670 * for disk drives. For RAID arrays it is usually the stripe width or 671 * the internal track size. A properly aligned multiple of 672 * optimal_io_size is the preferred request size for workloads where 673 * sustained throughput is desired. 674 */ 675 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 676 { 677 blk_limits_io_opt(&q->limits, opt); 678 if (!q->disk) 679 return; 680 q->disk->bdi->ra_pages = 681 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 682 } 683 EXPORT_SYMBOL(blk_queue_io_opt); 684 685 static int queue_limit_alignment_offset(const struct queue_limits *lim, 686 sector_t sector) 687 { 688 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 689 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) 690 << SECTOR_SHIFT; 691 692 return (granularity + lim->alignment_offset - alignment) % granularity; 693 } 694 695 static unsigned int queue_limit_discard_alignment( 696 const struct queue_limits *lim, sector_t sector) 697 { 698 unsigned int alignment, granularity, offset; 699 700 if (!lim->max_discard_sectors) 701 return 0; 702 703 /* Why are these in bytes, not sectors? */ 704 alignment = lim->discard_alignment >> SECTOR_SHIFT; 705 granularity = lim->discard_granularity >> SECTOR_SHIFT; 706 if (!granularity) 707 return 0; 708 709 /* Offset of the partition start in 'granularity' sectors */ 710 offset = sector_div(sector, granularity); 711 712 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 713 offset = (granularity + alignment - offset) % granularity; 714 715 /* Turn it back into bytes, gaah */ 716 return offset << SECTOR_SHIFT; 717 } 718 719 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) 720 { 721 sectors = round_down(sectors, lbs >> SECTOR_SHIFT); 722 if (sectors < PAGE_SIZE >> SECTOR_SHIFT) 723 sectors = PAGE_SIZE >> SECTOR_SHIFT; 724 return sectors; 725 } 726 727 /** 728 * blk_stack_limits - adjust queue_limits for stacked devices 729 * @t: the stacking driver limits (top device) 730 * @b: the underlying queue limits (bottom, component device) 731 * @start: first data sector within component device 732 * 733 * Description: 734 * This function is used by stacking drivers like MD and DM to ensure 735 * that all component devices have compatible block sizes and 736 * alignments. The stacking driver must provide a queue_limits 737 * struct (top) and then iteratively call the stacking function for 738 * all component (bottom) devices. The stacking function will 739 * attempt to combine the values and ensure proper alignment. 740 * 741 * Returns 0 if the top and bottom queue_limits are compatible. The 742 * top device's block sizes and alignment offsets may be adjusted to 743 * ensure alignment with the bottom device. If no compatible sizes 744 * and alignments exist, -1 is returned and the resulting top 745 * queue_limits will have the misaligned flag set to indicate that 746 * the alignment_offset is undefined. 747 */ 748 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 749 sector_t start) 750 { 751 unsigned int top, bottom, alignment, ret = 0; 752 753 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 754 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 755 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); 756 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, 757 b->max_write_zeroes_sectors); 758 t->max_zone_append_sectors = min(t->max_zone_append_sectors, 759 b->max_zone_append_sectors); 760 t->bounce = max(t->bounce, b->bounce); 761 762 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 763 b->seg_boundary_mask); 764 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 765 b->virt_boundary_mask); 766 767 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 768 t->max_discard_segments = min_not_zero(t->max_discard_segments, 769 b->max_discard_segments); 770 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 771 b->max_integrity_segments); 772 773 t->max_segment_size = min_not_zero(t->max_segment_size, 774 b->max_segment_size); 775 776 t->misaligned |= b->misaligned; 777 778 alignment = queue_limit_alignment_offset(b, start); 779 780 /* Bottom device has different alignment. Check that it is 781 * compatible with the current top alignment. 782 */ 783 if (t->alignment_offset != alignment) { 784 785 top = max(t->physical_block_size, t->io_min) 786 + t->alignment_offset; 787 bottom = max(b->physical_block_size, b->io_min) + alignment; 788 789 /* Verify that top and bottom intervals line up */ 790 if (max(top, bottom) % min(top, bottom)) { 791 t->misaligned = 1; 792 ret = -1; 793 } 794 } 795 796 t->logical_block_size = max(t->logical_block_size, 797 b->logical_block_size); 798 799 t->physical_block_size = max(t->physical_block_size, 800 b->physical_block_size); 801 802 t->io_min = max(t->io_min, b->io_min); 803 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 804 t->dma_alignment = max(t->dma_alignment, b->dma_alignment); 805 806 /* Set non-power-of-2 compatible chunk_sectors boundary */ 807 if (b->chunk_sectors) 808 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); 809 810 /* Physical block size a multiple of the logical block size? */ 811 if (t->physical_block_size & (t->logical_block_size - 1)) { 812 t->physical_block_size = t->logical_block_size; 813 t->misaligned = 1; 814 ret = -1; 815 } 816 817 /* Minimum I/O a multiple of the physical block size? */ 818 if (t->io_min & (t->physical_block_size - 1)) { 819 t->io_min = t->physical_block_size; 820 t->misaligned = 1; 821 ret = -1; 822 } 823 824 /* Optimal I/O a multiple of the physical block size? */ 825 if (t->io_opt & (t->physical_block_size - 1)) { 826 t->io_opt = 0; 827 t->misaligned = 1; 828 ret = -1; 829 } 830 831 /* chunk_sectors a multiple of the physical block size? */ 832 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { 833 t->chunk_sectors = 0; 834 t->misaligned = 1; 835 ret = -1; 836 } 837 838 t->raid_partial_stripes_expensive = 839 max(t->raid_partial_stripes_expensive, 840 b->raid_partial_stripes_expensive); 841 842 /* Find lowest common alignment_offset */ 843 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 844 % max(t->physical_block_size, t->io_min); 845 846 /* Verify that new alignment_offset is on a logical block boundary */ 847 if (t->alignment_offset & (t->logical_block_size - 1)) { 848 t->misaligned = 1; 849 ret = -1; 850 } 851 852 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); 853 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); 854 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); 855 856 /* Discard alignment and granularity */ 857 if (b->discard_granularity) { 858 alignment = queue_limit_discard_alignment(b, start); 859 860 if (t->discard_granularity != 0 && 861 t->discard_alignment != alignment) { 862 top = t->discard_granularity + t->discard_alignment; 863 bottom = b->discard_granularity + alignment; 864 865 /* Verify that top and bottom intervals line up */ 866 if ((max(top, bottom) % min(top, bottom)) != 0) 867 t->discard_misaligned = 1; 868 } 869 870 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 871 b->max_discard_sectors); 872 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 873 b->max_hw_discard_sectors); 874 t->discard_granularity = max(t->discard_granularity, 875 b->discard_granularity); 876 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 877 t->discard_granularity; 878 } 879 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors, 880 b->max_secure_erase_sectors); 881 t->zone_write_granularity = max(t->zone_write_granularity, 882 b->zone_write_granularity); 883 t->zoned = max(t->zoned, b->zoned); 884 if (!t->zoned) { 885 t->zone_write_granularity = 0; 886 t->max_zone_append_sectors = 0; 887 } 888 return ret; 889 } 890 EXPORT_SYMBOL(blk_stack_limits); 891 892 /** 893 * queue_limits_stack_bdev - adjust queue_limits for stacked devices 894 * @t: the stacking driver limits (top device) 895 * @bdev: the underlying block device (bottom) 896 * @offset: offset to beginning of data within component device 897 * @pfx: prefix to use for warnings logged 898 * 899 * Description: 900 * This function is used by stacking drivers like MD and DM to ensure 901 * that all component devices have compatible block sizes and 902 * alignments. The stacking driver must provide a queue_limits 903 * struct (top) and then iteratively call the stacking function for 904 * all component (bottom) devices. The stacking function will 905 * attempt to combine the values and ensure proper alignment. 906 */ 907 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 908 sector_t offset, const char *pfx) 909 { 910 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits, 911 get_start_sect(bdev) + offset)) 912 pr_notice("%s: Warning: Device %pg is misaligned\n", 913 pfx, bdev); 914 } 915 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev); 916 917 /** 918 * blk_queue_update_dma_pad - update pad mask 919 * @q: the request queue for the device 920 * @mask: pad mask 921 * 922 * Update dma pad mask. 923 * 924 * Appending pad buffer to a request modifies the last entry of a 925 * scatter list such that it includes the pad buffer. 926 **/ 927 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 928 { 929 if (mask > q->dma_pad_mask) 930 q->dma_pad_mask = mask; 931 } 932 EXPORT_SYMBOL(blk_queue_update_dma_pad); 933 934 /** 935 * blk_queue_segment_boundary - set boundary rules for segment merging 936 * @q: the request queue for the device 937 * @mask: the memory boundary mask 938 **/ 939 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 940 { 941 if (mask < PAGE_SIZE - 1) { 942 mask = PAGE_SIZE - 1; 943 pr_info("%s: set to minimum %lx\n", __func__, mask); 944 } 945 946 q->limits.seg_boundary_mask = mask; 947 } 948 EXPORT_SYMBOL(blk_queue_segment_boundary); 949 950 /** 951 * blk_queue_virt_boundary - set boundary rules for bio merging 952 * @q: the request queue for the device 953 * @mask: the memory boundary mask 954 **/ 955 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 956 { 957 q->limits.virt_boundary_mask = mask; 958 959 /* 960 * Devices that require a virtual boundary do not support scatter/gather 961 * I/O natively, but instead require a descriptor list entry for each 962 * page (which might not be idential to the Linux PAGE_SIZE). Because 963 * of that they are not limited by our notion of "segment size". 964 */ 965 if (mask) 966 q->limits.max_segment_size = UINT_MAX; 967 } 968 EXPORT_SYMBOL(blk_queue_virt_boundary); 969 970 /** 971 * blk_queue_dma_alignment - set dma length and memory alignment 972 * @q: the request queue for the device 973 * @mask: alignment mask 974 * 975 * description: 976 * set required memory and length alignment for direct dma transactions. 977 * this is used when building direct io requests for the queue. 978 * 979 **/ 980 void blk_queue_dma_alignment(struct request_queue *q, int mask) 981 { 982 q->limits.dma_alignment = mask; 983 } 984 EXPORT_SYMBOL(blk_queue_dma_alignment); 985 986 /** 987 * blk_queue_update_dma_alignment - update dma length and memory alignment 988 * @q: the request queue for the device 989 * @mask: alignment mask 990 * 991 * description: 992 * update required memory and length alignment for direct dma transactions. 993 * If the requested alignment is larger than the current alignment, then 994 * the current queue alignment is updated to the new value, otherwise it 995 * is left alone. The design of this is to allow multiple objects 996 * (driver, device, transport etc) to set their respective 997 * alignments without having them interfere. 998 * 999 **/ 1000 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 1001 { 1002 BUG_ON(mask > PAGE_SIZE); 1003 1004 if (mask > q->limits.dma_alignment) 1005 q->limits.dma_alignment = mask; 1006 } 1007 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 1008 1009 /** 1010 * blk_set_queue_depth - tell the block layer about the device queue depth 1011 * @q: the request queue for the device 1012 * @depth: queue depth 1013 * 1014 */ 1015 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) 1016 { 1017 q->queue_depth = depth; 1018 rq_qos_queue_depth_changed(q); 1019 } 1020 EXPORT_SYMBOL(blk_set_queue_depth); 1021 1022 /** 1023 * blk_queue_write_cache - configure queue's write cache 1024 * @q: the request queue for the device 1025 * @wc: write back cache on or off 1026 * @fua: device supports FUA writes, if true 1027 * 1028 * Tell the block layer about the write cache of @q. 1029 */ 1030 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 1031 { 1032 if (wc) { 1033 blk_queue_flag_set(QUEUE_FLAG_HW_WC, q); 1034 blk_queue_flag_set(QUEUE_FLAG_WC, q); 1035 } else { 1036 blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q); 1037 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 1038 } 1039 if (fua) 1040 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 1041 else 1042 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 1043 } 1044 EXPORT_SYMBOL_GPL(blk_queue_write_cache); 1045 1046 /** 1047 * blk_queue_required_elevator_features - Set a queue required elevator features 1048 * @q: the request queue for the target device 1049 * @features: Required elevator features OR'ed together 1050 * 1051 * Tell the block layer that for the device controlled through @q, only the 1052 * only elevators that can be used are those that implement at least the set of 1053 * features specified by @features. 1054 */ 1055 void blk_queue_required_elevator_features(struct request_queue *q, 1056 unsigned int features) 1057 { 1058 q->required_elevator_features = features; 1059 } 1060 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); 1061 1062 /** 1063 * blk_queue_can_use_dma_map_merging - configure queue for merging segments. 1064 * @q: the request queue for the device 1065 * @dev: the device pointer for dma 1066 * 1067 * Tell the block layer about merging the segments by dma map of @q. 1068 */ 1069 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 1070 struct device *dev) 1071 { 1072 unsigned long boundary = dma_get_merge_boundary(dev); 1073 1074 if (!boundary) 1075 return false; 1076 1077 /* No need to update max_segment_size. see blk_queue_virt_boundary() */ 1078 blk_queue_virt_boundary(q, boundary); 1079 1080 return true; 1081 } 1082 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); 1083 1084 /** 1085 * disk_set_zoned - inidicate a zoned device 1086 * @disk: gendisk to configure 1087 */ 1088 void disk_set_zoned(struct gendisk *disk) 1089 { 1090 struct request_queue *q = disk->queue; 1091 1092 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); 1093 1094 /* 1095 * Set the zone write granularity to the device logical block 1096 * size by default. The driver can change this value if needed. 1097 */ 1098 q->limits.zoned = true; 1099 blk_queue_zone_write_granularity(q, queue_logical_block_size(q)); 1100 } 1101 EXPORT_SYMBOL_GPL(disk_set_zoned); 1102 1103 int bdev_alignment_offset(struct block_device *bdev) 1104 { 1105 struct request_queue *q = bdev_get_queue(bdev); 1106 1107 if (q->limits.misaligned) 1108 return -1; 1109 if (bdev_is_partition(bdev)) 1110 return queue_limit_alignment_offset(&q->limits, 1111 bdev->bd_start_sect); 1112 return q->limits.alignment_offset; 1113 } 1114 EXPORT_SYMBOL_GPL(bdev_alignment_offset); 1115 1116 unsigned int bdev_discard_alignment(struct block_device *bdev) 1117 { 1118 struct request_queue *q = bdev_get_queue(bdev); 1119 1120 if (bdev_is_partition(bdev)) 1121 return queue_limit_discard_alignment(&q->limits, 1122 bdev->bd_start_sect); 1123 return q->limits.discard_alignment; 1124 } 1125 EXPORT_SYMBOL_GPL(bdev_discard_alignment); 1126