1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to setting various queue properties from drivers 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/pagemap.h> 11 #include <linux/backing-dev-defs.h> 12 #include <linux/gcd.h> 13 #include <linux/lcm.h> 14 #include <linux/jiffies.h> 15 #include <linux/gfp.h> 16 #include <linux/dma-mapping.h> 17 18 #include "blk.h" 19 #include "blk-rq-qos.h" 20 #include "blk-wbt.h" 21 22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 23 { 24 q->rq_timeout = timeout; 25 } 26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 27 28 /** 29 * blk_set_stacking_limits - set default limits for stacking devices 30 * @lim: the queue_limits structure to reset 31 * 32 * Prepare queue limits for applying limits from underlying devices using 33 * blk_stack_limits(). 34 */ 35 void blk_set_stacking_limits(struct queue_limits *lim) 36 { 37 memset(lim, 0, sizeof(*lim)); 38 lim->logical_block_size = SECTOR_SIZE; 39 lim->physical_block_size = SECTOR_SIZE; 40 lim->io_min = SECTOR_SIZE; 41 lim->discard_granularity = SECTOR_SIZE; 42 lim->dma_alignment = SECTOR_SIZE - 1; 43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 44 45 /* Inherit limits from component devices */ 46 lim->max_segments = USHRT_MAX; 47 lim->max_discard_segments = USHRT_MAX; 48 lim->max_hw_sectors = UINT_MAX; 49 lim->max_segment_size = UINT_MAX; 50 lim->max_sectors = UINT_MAX; 51 lim->max_dev_sectors = UINT_MAX; 52 lim->max_write_zeroes_sectors = UINT_MAX; 53 lim->max_zone_append_sectors = UINT_MAX; 54 lim->max_user_discard_sectors = UINT_MAX; 55 } 56 EXPORT_SYMBOL(blk_set_stacking_limits); 57 58 static void blk_apply_bdi_limits(struct backing_dev_info *bdi, 59 struct queue_limits *lim) 60 { 61 /* 62 * For read-ahead of large files to be effective, we need to read ahead 63 * at least twice the optimal I/O size. 64 */ 65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT; 67 } 68 69 static int blk_validate_zoned_limits(struct queue_limits *lim) 70 { 71 if (!lim->zoned) { 72 if (WARN_ON_ONCE(lim->max_open_zones) || 73 WARN_ON_ONCE(lim->max_active_zones) || 74 WARN_ON_ONCE(lim->zone_write_granularity) || 75 WARN_ON_ONCE(lim->max_zone_append_sectors)) 76 return -EINVAL; 77 return 0; 78 } 79 80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED))) 81 return -EINVAL; 82 83 if (lim->zone_write_granularity < lim->logical_block_size) 84 lim->zone_write_granularity = lim->logical_block_size; 85 86 if (lim->max_zone_append_sectors) { 87 /* 88 * The Zone Append size is limited by the maximum I/O size 89 * and the zone size given that it can't span zones. 90 */ 91 lim->max_zone_append_sectors = 92 min3(lim->max_hw_sectors, 93 lim->max_zone_append_sectors, 94 lim->chunk_sectors); 95 } 96 97 return 0; 98 } 99 100 /* 101 * Check that the limits in lim are valid, initialize defaults for unset 102 * values, and cap values based on others where needed. 103 */ 104 static int blk_validate_limits(struct queue_limits *lim) 105 { 106 unsigned int max_hw_sectors; 107 108 /* 109 * Unless otherwise specified, default to 512 byte logical blocks and a 110 * physical block size equal to the logical block size. 111 */ 112 if (!lim->logical_block_size) 113 lim->logical_block_size = SECTOR_SIZE; 114 if (lim->physical_block_size < lim->logical_block_size) 115 lim->physical_block_size = lim->logical_block_size; 116 117 /* 118 * The minimum I/O size defaults to the physical block size unless 119 * explicitly overridden. 120 */ 121 if (lim->io_min < lim->physical_block_size) 122 lim->io_min = lim->physical_block_size; 123 124 /* 125 * max_hw_sectors has a somewhat weird default for historical reason, 126 * but driver really should set their own instead of relying on this 127 * value. 128 * 129 * The block layer relies on the fact that every driver can 130 * handle at lest a page worth of data per I/O, and needs the value 131 * aligned to the logical block size. 132 */ 133 if (!lim->max_hw_sectors) 134 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 135 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS)) 136 return -EINVAL; 137 lim->max_hw_sectors = round_down(lim->max_hw_sectors, 138 lim->logical_block_size >> SECTOR_SHIFT); 139 140 /* 141 * The actual max_sectors value is a complex beast and also takes the 142 * max_dev_sectors value (set by SCSI ULPs) and a user configurable 143 * value into account. The ->max_sectors value is always calculated 144 * from these, so directly setting it won't have any effect. 145 */ 146 max_hw_sectors = min_not_zero(lim->max_hw_sectors, 147 lim->max_dev_sectors); 148 if (lim->max_user_sectors) { 149 if (lim->max_user_sectors > max_hw_sectors || 150 lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE) 151 return -EINVAL; 152 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors); 153 } else { 154 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP); 155 } 156 lim->max_sectors = round_down(lim->max_sectors, 157 lim->logical_block_size >> SECTOR_SHIFT); 158 159 /* 160 * Random default for the maximum number of segments. Driver should not 161 * rely on this and set their own. 162 */ 163 if (!lim->max_segments) 164 lim->max_segments = BLK_MAX_SEGMENTS; 165 166 lim->max_discard_sectors = 167 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors); 168 169 if (!lim->max_discard_segments) 170 lim->max_discard_segments = 1; 171 172 if (lim->discard_granularity < lim->physical_block_size) 173 lim->discard_granularity = lim->physical_block_size; 174 175 /* 176 * By default there is no limit on the segment boundary alignment, 177 * but if there is one it can't be smaller than the page size as 178 * that would break all the normal I/O patterns. 179 */ 180 if (!lim->seg_boundary_mask) 181 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 182 if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1)) 183 return -EINVAL; 184 185 /* 186 * Devices that require a virtual boundary do not support scatter/gather 187 * I/O natively, but instead require a descriptor list entry for each 188 * page (which might not be identical to the Linux PAGE_SIZE). Because 189 * of that they are not limited by our notion of "segment size". 190 */ 191 if (lim->virt_boundary_mask) { 192 if (WARN_ON_ONCE(lim->max_segment_size && 193 lim->max_segment_size != UINT_MAX)) 194 return -EINVAL; 195 lim->max_segment_size = UINT_MAX; 196 } else { 197 /* 198 * The maximum segment size has an odd historic 64k default that 199 * drivers probably should override. Just like the I/O size we 200 * require drivers to at least handle a full page per segment. 201 */ 202 if (!lim->max_segment_size) 203 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 204 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE)) 205 return -EINVAL; 206 } 207 208 /* 209 * We require drivers to at least do logical block aligned I/O, but 210 * historically could not check for that due to the separate calls 211 * to set the limits. Once the transition is finished the check 212 * below should be narrowed down to check the logical block size. 213 */ 214 if (!lim->dma_alignment) 215 lim->dma_alignment = SECTOR_SIZE - 1; 216 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE)) 217 return -EINVAL; 218 219 if (lim->alignment_offset) { 220 lim->alignment_offset &= (lim->physical_block_size - 1); 221 lim->misaligned = 0; 222 } 223 224 return blk_validate_zoned_limits(lim); 225 } 226 227 /* 228 * Set the default limits for a newly allocated queue. @lim contains the 229 * initial limits set by the driver, which could be no limit in which case 230 * all fields are cleared to zero. 231 */ 232 int blk_set_default_limits(struct queue_limits *lim) 233 { 234 /* 235 * Most defaults are set by capping the bounds in blk_validate_limits, 236 * but max_user_discard_sectors is special and needs an explicit 237 * initialization to the max value here. 238 */ 239 lim->max_user_discard_sectors = UINT_MAX; 240 return blk_validate_limits(lim); 241 } 242 243 /** 244 * queue_limits_commit_update - commit an atomic update of queue limits 245 * @q: queue to update 246 * @lim: limits to apply 247 * 248 * Apply the limits in @lim that were obtained from queue_limits_start_update() 249 * and updated by the caller to @q. 250 * 251 * Returns 0 if successful, else a negative error code. 252 */ 253 int queue_limits_commit_update(struct request_queue *q, 254 struct queue_limits *lim) 255 __releases(q->limits_lock) 256 { 257 int error = blk_validate_limits(lim); 258 259 if (!error) { 260 q->limits = *lim; 261 if (q->disk) 262 blk_apply_bdi_limits(q->disk->bdi, lim); 263 } 264 mutex_unlock(&q->limits_lock); 265 return error; 266 } 267 EXPORT_SYMBOL_GPL(queue_limits_commit_update); 268 269 /** 270 * queue_limits_set - apply queue limits to queue 271 * @q: queue to update 272 * @lim: limits to apply 273 * 274 * Apply the limits in @lim that were freshly initialized to @q. 275 * To update existing limits use queue_limits_start_update() and 276 * queue_limits_commit_update() instead. 277 * 278 * Returns 0 if successful, else a negative error code. 279 */ 280 int queue_limits_set(struct request_queue *q, struct queue_limits *lim) 281 { 282 mutex_lock(&q->limits_lock); 283 return queue_limits_commit_update(q, lim); 284 } 285 EXPORT_SYMBOL_GPL(queue_limits_set); 286 287 /** 288 * blk_queue_bounce_limit - set bounce buffer limit for queue 289 * @q: the request queue for the device 290 * @bounce: bounce limit to enforce 291 * 292 * Description: 293 * Force bouncing for ISA DMA ranges or highmem. 294 * 295 * DEPRECATED, don't use in new code. 296 **/ 297 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) 298 { 299 q->limits.bounce = bounce; 300 } 301 EXPORT_SYMBOL(blk_queue_bounce_limit); 302 303 /** 304 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 305 * @q: the request queue for the device 306 * @max_hw_sectors: max hardware sectors in the usual 512b unit 307 * 308 * Description: 309 * Enables a low level driver to set a hard upper limit, 310 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 311 * the device driver based upon the capabilities of the I/O 312 * controller. 313 * 314 * max_dev_sectors is a hard limit imposed by the storage device for 315 * READ/WRITE requests. It is set by the disk driver. 316 * 317 * max_sectors is a soft limit imposed by the block layer for 318 * filesystem type requests. This value can be overridden on a 319 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 320 * The soft limit can not exceed max_hw_sectors. 321 **/ 322 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 323 { 324 struct queue_limits *limits = &q->limits; 325 unsigned int max_sectors; 326 327 if ((max_hw_sectors << 9) < PAGE_SIZE) { 328 max_hw_sectors = 1 << (PAGE_SHIFT - 9); 329 pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors); 330 } 331 332 max_hw_sectors = round_down(max_hw_sectors, 333 limits->logical_block_size >> SECTOR_SHIFT); 334 limits->max_hw_sectors = max_hw_sectors; 335 336 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); 337 338 if (limits->max_user_sectors) 339 max_sectors = min(max_sectors, limits->max_user_sectors); 340 else 341 max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP); 342 343 max_sectors = round_down(max_sectors, 344 limits->logical_block_size >> SECTOR_SHIFT); 345 limits->max_sectors = max_sectors; 346 347 if (!q->disk) 348 return; 349 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); 350 } 351 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 352 353 /** 354 * blk_queue_chunk_sectors - set size of the chunk for this queue 355 * @q: the request queue for the device 356 * @chunk_sectors: chunk sectors in the usual 512b unit 357 * 358 * Description: 359 * If a driver doesn't want IOs to cross a given chunk size, it can set 360 * this limit and prevent merging across chunks. Note that the block layer 361 * must accept a page worth of data at any offset. So if the crossing of 362 * chunks is a hard limitation in the driver, it must still be prepared 363 * to split single page bios. 364 **/ 365 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 366 { 367 q->limits.chunk_sectors = chunk_sectors; 368 } 369 EXPORT_SYMBOL(blk_queue_chunk_sectors); 370 371 /** 372 * blk_queue_max_discard_sectors - set max sectors for a single discard 373 * @q: the request queue for the device 374 * @max_discard_sectors: maximum number of sectors to discard 375 **/ 376 void blk_queue_max_discard_sectors(struct request_queue *q, 377 unsigned int max_discard_sectors) 378 { 379 struct queue_limits *lim = &q->limits; 380 381 lim->max_hw_discard_sectors = max_discard_sectors; 382 lim->max_discard_sectors = 383 min(max_discard_sectors, lim->max_user_discard_sectors); 384 } 385 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 386 387 /** 388 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase 389 * @q: the request queue for the device 390 * @max_sectors: maximum number of sectors to secure_erase 391 **/ 392 void blk_queue_max_secure_erase_sectors(struct request_queue *q, 393 unsigned int max_sectors) 394 { 395 q->limits.max_secure_erase_sectors = max_sectors; 396 } 397 EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors); 398 399 /** 400 * blk_queue_max_write_zeroes_sectors - set max sectors for a single 401 * write zeroes 402 * @q: the request queue for the device 403 * @max_write_zeroes_sectors: maximum number of sectors to write per command 404 **/ 405 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 406 unsigned int max_write_zeroes_sectors) 407 { 408 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; 409 } 410 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); 411 412 /** 413 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append 414 * @q: the request queue for the device 415 * @max_zone_append_sectors: maximum number of sectors to write per command 416 **/ 417 void blk_queue_max_zone_append_sectors(struct request_queue *q, 418 unsigned int max_zone_append_sectors) 419 { 420 unsigned int max_sectors; 421 422 if (WARN_ON(!blk_queue_is_zoned(q))) 423 return; 424 425 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); 426 max_sectors = min(q->limits.chunk_sectors, max_sectors); 427 428 /* 429 * Signal eventual driver bugs resulting in the max_zone_append sectors limit 430 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, 431 * or the max_hw_sectors limit not set. 432 */ 433 WARN_ON(!max_sectors); 434 435 q->limits.max_zone_append_sectors = max_sectors; 436 } 437 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); 438 439 /** 440 * blk_queue_max_segments - set max hw segments for a request for this queue 441 * @q: the request queue for the device 442 * @max_segments: max number of segments 443 * 444 * Description: 445 * Enables a low level driver to set an upper limit on the number of 446 * hw data segments in a request. 447 **/ 448 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 449 { 450 if (!max_segments) { 451 max_segments = 1; 452 pr_info("%s: set to minimum %u\n", __func__, max_segments); 453 } 454 455 q->limits.max_segments = max_segments; 456 } 457 EXPORT_SYMBOL(blk_queue_max_segments); 458 459 /** 460 * blk_queue_max_discard_segments - set max segments for discard requests 461 * @q: the request queue for the device 462 * @max_segments: max number of segments 463 * 464 * Description: 465 * Enables a low level driver to set an upper limit on the number of 466 * segments in a discard request. 467 **/ 468 void blk_queue_max_discard_segments(struct request_queue *q, 469 unsigned short max_segments) 470 { 471 q->limits.max_discard_segments = max_segments; 472 } 473 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); 474 475 /** 476 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 477 * @q: the request queue for the device 478 * @max_size: max size of segment in bytes 479 * 480 * Description: 481 * Enables a low level driver to set an upper limit on the size of a 482 * coalesced segment 483 **/ 484 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 485 { 486 if (max_size < PAGE_SIZE) { 487 max_size = PAGE_SIZE; 488 pr_info("%s: set to minimum %u\n", __func__, max_size); 489 } 490 491 /* see blk_queue_virt_boundary() for the explanation */ 492 WARN_ON_ONCE(q->limits.virt_boundary_mask); 493 494 q->limits.max_segment_size = max_size; 495 } 496 EXPORT_SYMBOL(blk_queue_max_segment_size); 497 498 /** 499 * blk_queue_logical_block_size - set logical block size for the queue 500 * @q: the request queue for the device 501 * @size: the logical block size, in bytes 502 * 503 * Description: 504 * This should be set to the lowest possible block size that the 505 * storage device can address. The default of 512 covers most 506 * hardware. 507 **/ 508 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) 509 { 510 struct queue_limits *limits = &q->limits; 511 512 limits->logical_block_size = size; 513 514 if (limits->discard_granularity < limits->logical_block_size) 515 limits->discard_granularity = limits->logical_block_size; 516 517 if (limits->physical_block_size < size) 518 limits->physical_block_size = size; 519 520 if (limits->io_min < limits->physical_block_size) 521 limits->io_min = limits->physical_block_size; 522 523 limits->max_hw_sectors = 524 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); 525 limits->max_sectors = 526 round_down(limits->max_sectors, size >> SECTOR_SHIFT); 527 } 528 EXPORT_SYMBOL(blk_queue_logical_block_size); 529 530 /** 531 * blk_queue_physical_block_size - set physical block size for the queue 532 * @q: the request queue for the device 533 * @size: the physical block size, in bytes 534 * 535 * Description: 536 * This should be set to the lowest possible sector size that the 537 * hardware can operate on without reverting to read-modify-write 538 * operations. 539 */ 540 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 541 { 542 q->limits.physical_block_size = size; 543 544 if (q->limits.physical_block_size < q->limits.logical_block_size) 545 q->limits.physical_block_size = q->limits.logical_block_size; 546 547 if (q->limits.discard_granularity < q->limits.physical_block_size) 548 q->limits.discard_granularity = q->limits.physical_block_size; 549 550 if (q->limits.io_min < q->limits.physical_block_size) 551 q->limits.io_min = q->limits.physical_block_size; 552 } 553 EXPORT_SYMBOL(blk_queue_physical_block_size); 554 555 /** 556 * blk_queue_zone_write_granularity - set zone write granularity for the queue 557 * @q: the request queue for the zoned device 558 * @size: the zone write granularity size, in bytes 559 * 560 * Description: 561 * This should be set to the lowest possible size allowing to write in 562 * sequential zones of a zoned block device. 563 */ 564 void blk_queue_zone_write_granularity(struct request_queue *q, 565 unsigned int size) 566 { 567 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 568 return; 569 570 q->limits.zone_write_granularity = size; 571 572 if (q->limits.zone_write_granularity < q->limits.logical_block_size) 573 q->limits.zone_write_granularity = q->limits.logical_block_size; 574 } 575 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity); 576 577 /** 578 * blk_queue_alignment_offset - set physical block alignment offset 579 * @q: the request queue for the device 580 * @offset: alignment offset in bytes 581 * 582 * Description: 583 * Some devices are naturally misaligned to compensate for things like 584 * the legacy DOS partition table 63-sector offset. Low-level drivers 585 * should call this function for devices whose first sector is not 586 * naturally aligned. 587 */ 588 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 589 { 590 q->limits.alignment_offset = 591 offset & (q->limits.physical_block_size - 1); 592 q->limits.misaligned = 0; 593 } 594 EXPORT_SYMBOL(blk_queue_alignment_offset); 595 596 void disk_update_readahead(struct gendisk *disk) 597 { 598 blk_apply_bdi_limits(disk->bdi, &disk->queue->limits); 599 } 600 EXPORT_SYMBOL_GPL(disk_update_readahead); 601 602 /** 603 * blk_limits_io_min - set minimum request size for a device 604 * @limits: the queue limits 605 * @min: smallest I/O size in bytes 606 * 607 * Description: 608 * Some devices have an internal block size bigger than the reported 609 * hardware sector size. This function can be used to signal the 610 * smallest I/O the device can perform without incurring a performance 611 * penalty. 612 */ 613 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 614 { 615 limits->io_min = min; 616 617 if (limits->io_min < limits->logical_block_size) 618 limits->io_min = limits->logical_block_size; 619 620 if (limits->io_min < limits->physical_block_size) 621 limits->io_min = limits->physical_block_size; 622 } 623 EXPORT_SYMBOL(blk_limits_io_min); 624 625 /** 626 * blk_queue_io_min - set minimum request size for the queue 627 * @q: the request queue for the device 628 * @min: smallest I/O size in bytes 629 * 630 * Description: 631 * Storage devices may report a granularity or preferred minimum I/O 632 * size which is the smallest request the device can perform without 633 * incurring a performance penalty. For disk drives this is often the 634 * physical block size. For RAID arrays it is often the stripe chunk 635 * size. A properly aligned multiple of minimum_io_size is the 636 * preferred request size for workloads where a high number of I/O 637 * operations is desired. 638 */ 639 void blk_queue_io_min(struct request_queue *q, unsigned int min) 640 { 641 blk_limits_io_min(&q->limits, min); 642 } 643 EXPORT_SYMBOL(blk_queue_io_min); 644 645 /** 646 * blk_limits_io_opt - set optimal request size for a device 647 * @limits: the queue limits 648 * @opt: smallest I/O size in bytes 649 * 650 * Description: 651 * Storage devices may report an optimal I/O size, which is the 652 * device's preferred unit for sustained I/O. This is rarely reported 653 * for disk drives. For RAID arrays it is usually the stripe width or 654 * the internal track size. A properly aligned multiple of 655 * optimal_io_size is the preferred request size for workloads where 656 * sustained throughput is desired. 657 */ 658 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 659 { 660 limits->io_opt = opt; 661 } 662 EXPORT_SYMBOL(blk_limits_io_opt); 663 664 /** 665 * blk_queue_io_opt - set optimal request size for the queue 666 * @q: the request queue for the device 667 * @opt: optimal request size in bytes 668 * 669 * Description: 670 * Storage devices may report an optimal I/O size, which is the 671 * device's preferred unit for sustained I/O. This is rarely reported 672 * for disk drives. For RAID arrays it is usually the stripe width or 673 * the internal track size. A properly aligned multiple of 674 * optimal_io_size is the preferred request size for workloads where 675 * sustained throughput is desired. 676 */ 677 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 678 { 679 blk_limits_io_opt(&q->limits, opt); 680 if (!q->disk) 681 return; 682 q->disk->bdi->ra_pages = 683 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 684 } 685 EXPORT_SYMBOL(blk_queue_io_opt); 686 687 static int queue_limit_alignment_offset(const struct queue_limits *lim, 688 sector_t sector) 689 { 690 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 691 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) 692 << SECTOR_SHIFT; 693 694 return (granularity + lim->alignment_offset - alignment) % granularity; 695 } 696 697 static unsigned int queue_limit_discard_alignment( 698 const struct queue_limits *lim, sector_t sector) 699 { 700 unsigned int alignment, granularity, offset; 701 702 if (!lim->max_discard_sectors) 703 return 0; 704 705 /* Why are these in bytes, not sectors? */ 706 alignment = lim->discard_alignment >> SECTOR_SHIFT; 707 granularity = lim->discard_granularity >> SECTOR_SHIFT; 708 if (!granularity) 709 return 0; 710 711 /* Offset of the partition start in 'granularity' sectors */ 712 offset = sector_div(sector, granularity); 713 714 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 715 offset = (granularity + alignment - offset) % granularity; 716 717 /* Turn it back into bytes, gaah */ 718 return offset << SECTOR_SHIFT; 719 } 720 721 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) 722 { 723 sectors = round_down(sectors, lbs >> SECTOR_SHIFT); 724 if (sectors < PAGE_SIZE >> SECTOR_SHIFT) 725 sectors = PAGE_SIZE >> SECTOR_SHIFT; 726 return sectors; 727 } 728 729 /** 730 * blk_stack_limits - adjust queue_limits for stacked devices 731 * @t: the stacking driver limits (top device) 732 * @b: the underlying queue limits (bottom, component device) 733 * @start: first data sector within component device 734 * 735 * Description: 736 * This function is used by stacking drivers like MD and DM to ensure 737 * that all component devices have compatible block sizes and 738 * alignments. The stacking driver must provide a queue_limits 739 * struct (top) and then iteratively call the stacking function for 740 * all component (bottom) devices. The stacking function will 741 * attempt to combine the values and ensure proper alignment. 742 * 743 * Returns 0 if the top and bottom queue_limits are compatible. The 744 * top device's block sizes and alignment offsets may be adjusted to 745 * ensure alignment with the bottom device. If no compatible sizes 746 * and alignments exist, -1 is returned and the resulting top 747 * queue_limits will have the misaligned flag set to indicate that 748 * the alignment_offset is undefined. 749 */ 750 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 751 sector_t start) 752 { 753 unsigned int top, bottom, alignment, ret = 0; 754 755 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 756 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 757 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); 758 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, 759 b->max_write_zeroes_sectors); 760 t->max_zone_append_sectors = min(t->max_zone_append_sectors, 761 b->max_zone_append_sectors); 762 t->bounce = max(t->bounce, b->bounce); 763 764 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 765 b->seg_boundary_mask); 766 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 767 b->virt_boundary_mask); 768 769 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 770 t->max_discard_segments = min_not_zero(t->max_discard_segments, 771 b->max_discard_segments); 772 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 773 b->max_integrity_segments); 774 775 t->max_segment_size = min_not_zero(t->max_segment_size, 776 b->max_segment_size); 777 778 t->misaligned |= b->misaligned; 779 780 alignment = queue_limit_alignment_offset(b, start); 781 782 /* Bottom device has different alignment. Check that it is 783 * compatible with the current top alignment. 784 */ 785 if (t->alignment_offset != alignment) { 786 787 top = max(t->physical_block_size, t->io_min) 788 + t->alignment_offset; 789 bottom = max(b->physical_block_size, b->io_min) + alignment; 790 791 /* Verify that top and bottom intervals line up */ 792 if (max(top, bottom) % min(top, bottom)) { 793 t->misaligned = 1; 794 ret = -1; 795 } 796 } 797 798 t->logical_block_size = max(t->logical_block_size, 799 b->logical_block_size); 800 801 t->physical_block_size = max(t->physical_block_size, 802 b->physical_block_size); 803 804 t->io_min = max(t->io_min, b->io_min); 805 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 806 t->dma_alignment = max(t->dma_alignment, b->dma_alignment); 807 808 /* Set non-power-of-2 compatible chunk_sectors boundary */ 809 if (b->chunk_sectors) 810 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); 811 812 /* Physical block size a multiple of the logical block size? */ 813 if (t->physical_block_size & (t->logical_block_size - 1)) { 814 t->physical_block_size = t->logical_block_size; 815 t->misaligned = 1; 816 ret = -1; 817 } 818 819 /* Minimum I/O a multiple of the physical block size? */ 820 if (t->io_min & (t->physical_block_size - 1)) { 821 t->io_min = t->physical_block_size; 822 t->misaligned = 1; 823 ret = -1; 824 } 825 826 /* Optimal I/O a multiple of the physical block size? */ 827 if (t->io_opt & (t->physical_block_size - 1)) { 828 t->io_opt = 0; 829 t->misaligned = 1; 830 ret = -1; 831 } 832 833 /* chunk_sectors a multiple of the physical block size? */ 834 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { 835 t->chunk_sectors = 0; 836 t->misaligned = 1; 837 ret = -1; 838 } 839 840 t->raid_partial_stripes_expensive = 841 max(t->raid_partial_stripes_expensive, 842 b->raid_partial_stripes_expensive); 843 844 /* Find lowest common alignment_offset */ 845 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 846 % max(t->physical_block_size, t->io_min); 847 848 /* Verify that new alignment_offset is on a logical block boundary */ 849 if (t->alignment_offset & (t->logical_block_size - 1)) { 850 t->misaligned = 1; 851 ret = -1; 852 } 853 854 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); 855 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); 856 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); 857 858 /* Discard alignment and granularity */ 859 if (b->discard_granularity) { 860 alignment = queue_limit_discard_alignment(b, start); 861 862 if (t->discard_granularity != 0 && 863 t->discard_alignment != alignment) { 864 top = t->discard_granularity + t->discard_alignment; 865 bottom = b->discard_granularity + alignment; 866 867 /* Verify that top and bottom intervals line up */ 868 if ((max(top, bottom) % min(top, bottom)) != 0) 869 t->discard_misaligned = 1; 870 } 871 872 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 873 b->max_discard_sectors); 874 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 875 b->max_hw_discard_sectors); 876 t->discard_granularity = max(t->discard_granularity, 877 b->discard_granularity); 878 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 879 t->discard_granularity; 880 } 881 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors, 882 b->max_secure_erase_sectors); 883 t->zone_write_granularity = max(t->zone_write_granularity, 884 b->zone_write_granularity); 885 t->zoned = max(t->zoned, b->zoned); 886 if (!t->zoned) { 887 t->zone_write_granularity = 0; 888 t->max_zone_append_sectors = 0; 889 } 890 return ret; 891 } 892 EXPORT_SYMBOL(blk_stack_limits); 893 894 /** 895 * queue_limits_stack_bdev - adjust queue_limits for stacked devices 896 * @t: the stacking driver limits (top device) 897 * @bdev: the underlying block device (bottom) 898 * @offset: offset to beginning of data within component device 899 * @pfx: prefix to use for warnings logged 900 * 901 * Description: 902 * This function is used by stacking drivers like MD and DM to ensure 903 * that all component devices have compatible block sizes and 904 * alignments. The stacking driver must provide a queue_limits 905 * struct (top) and then iteratively call the stacking function for 906 * all component (bottom) devices. The stacking function will 907 * attempt to combine the values and ensure proper alignment. 908 */ 909 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 910 sector_t offset, const char *pfx) 911 { 912 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits, 913 get_start_sect(bdev) + offset)) 914 pr_notice("%s: Warning: Device %pg is misaligned\n", 915 pfx, bdev); 916 } 917 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev); 918 919 /** 920 * blk_queue_update_dma_pad - update pad mask 921 * @q: the request queue for the device 922 * @mask: pad mask 923 * 924 * Update dma pad mask. 925 * 926 * Appending pad buffer to a request modifies the last entry of a 927 * scatter list such that it includes the pad buffer. 928 **/ 929 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 930 { 931 if (mask > q->dma_pad_mask) 932 q->dma_pad_mask = mask; 933 } 934 EXPORT_SYMBOL(blk_queue_update_dma_pad); 935 936 /** 937 * blk_queue_segment_boundary - set boundary rules for segment merging 938 * @q: the request queue for the device 939 * @mask: the memory boundary mask 940 **/ 941 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 942 { 943 if (mask < PAGE_SIZE - 1) { 944 mask = PAGE_SIZE - 1; 945 pr_info("%s: set to minimum %lx\n", __func__, mask); 946 } 947 948 q->limits.seg_boundary_mask = mask; 949 } 950 EXPORT_SYMBOL(blk_queue_segment_boundary); 951 952 /** 953 * blk_queue_virt_boundary - set boundary rules for bio merging 954 * @q: the request queue for the device 955 * @mask: the memory boundary mask 956 **/ 957 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 958 { 959 q->limits.virt_boundary_mask = mask; 960 961 /* 962 * Devices that require a virtual boundary do not support scatter/gather 963 * I/O natively, but instead require a descriptor list entry for each 964 * page (which might not be idential to the Linux PAGE_SIZE). Because 965 * of that they are not limited by our notion of "segment size". 966 */ 967 if (mask) 968 q->limits.max_segment_size = UINT_MAX; 969 } 970 EXPORT_SYMBOL(blk_queue_virt_boundary); 971 972 /** 973 * blk_queue_dma_alignment - set dma length and memory alignment 974 * @q: the request queue for the device 975 * @mask: alignment mask 976 * 977 * description: 978 * set required memory and length alignment for direct dma transactions. 979 * this is used when building direct io requests for the queue. 980 * 981 **/ 982 void blk_queue_dma_alignment(struct request_queue *q, int mask) 983 { 984 q->limits.dma_alignment = mask; 985 } 986 EXPORT_SYMBOL(blk_queue_dma_alignment); 987 988 /** 989 * blk_queue_update_dma_alignment - update dma length and memory alignment 990 * @q: the request queue for the device 991 * @mask: alignment mask 992 * 993 * description: 994 * update required memory and length alignment for direct dma transactions. 995 * If the requested alignment is larger than the current alignment, then 996 * the current queue alignment is updated to the new value, otherwise it 997 * is left alone. The design of this is to allow multiple objects 998 * (driver, device, transport etc) to set their respective 999 * alignments without having them interfere. 1000 * 1001 **/ 1002 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 1003 { 1004 BUG_ON(mask > PAGE_SIZE); 1005 1006 if (mask > q->limits.dma_alignment) 1007 q->limits.dma_alignment = mask; 1008 } 1009 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 1010 1011 /** 1012 * blk_set_queue_depth - tell the block layer about the device queue depth 1013 * @q: the request queue for the device 1014 * @depth: queue depth 1015 * 1016 */ 1017 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) 1018 { 1019 q->queue_depth = depth; 1020 rq_qos_queue_depth_changed(q); 1021 } 1022 EXPORT_SYMBOL(blk_set_queue_depth); 1023 1024 /** 1025 * blk_queue_write_cache - configure queue's write cache 1026 * @q: the request queue for the device 1027 * @wc: write back cache on or off 1028 * @fua: device supports FUA writes, if true 1029 * 1030 * Tell the block layer about the write cache of @q. 1031 */ 1032 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 1033 { 1034 if (wc) { 1035 blk_queue_flag_set(QUEUE_FLAG_HW_WC, q); 1036 blk_queue_flag_set(QUEUE_FLAG_WC, q); 1037 } else { 1038 blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q); 1039 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 1040 } 1041 if (fua) 1042 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 1043 else 1044 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 1045 } 1046 EXPORT_SYMBOL_GPL(blk_queue_write_cache); 1047 1048 /** 1049 * blk_queue_required_elevator_features - Set a queue required elevator features 1050 * @q: the request queue for the target device 1051 * @features: Required elevator features OR'ed together 1052 * 1053 * Tell the block layer that for the device controlled through @q, only the 1054 * only elevators that can be used are those that implement at least the set of 1055 * features specified by @features. 1056 */ 1057 void blk_queue_required_elevator_features(struct request_queue *q, 1058 unsigned int features) 1059 { 1060 q->required_elevator_features = features; 1061 } 1062 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); 1063 1064 /** 1065 * blk_queue_can_use_dma_map_merging - configure queue for merging segments. 1066 * @q: the request queue for the device 1067 * @dev: the device pointer for dma 1068 * 1069 * Tell the block layer about merging the segments by dma map of @q. 1070 */ 1071 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 1072 struct device *dev) 1073 { 1074 unsigned long boundary = dma_get_merge_boundary(dev); 1075 1076 if (!boundary) 1077 return false; 1078 1079 /* No need to update max_segment_size. see blk_queue_virt_boundary() */ 1080 blk_queue_virt_boundary(q, boundary); 1081 1082 return true; 1083 } 1084 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); 1085 1086 /** 1087 * disk_set_zoned - inidicate a zoned device 1088 * @disk: gendisk to configure 1089 */ 1090 void disk_set_zoned(struct gendisk *disk) 1091 { 1092 struct request_queue *q = disk->queue; 1093 1094 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); 1095 1096 /* 1097 * Set the zone write granularity to the device logical block 1098 * size by default. The driver can change this value if needed. 1099 */ 1100 q->limits.zoned = true; 1101 blk_queue_zone_write_granularity(q, queue_logical_block_size(q)); 1102 } 1103 EXPORT_SYMBOL_GPL(disk_set_zoned); 1104 1105 int bdev_alignment_offset(struct block_device *bdev) 1106 { 1107 struct request_queue *q = bdev_get_queue(bdev); 1108 1109 if (q->limits.misaligned) 1110 return -1; 1111 if (bdev_is_partition(bdev)) 1112 return queue_limit_alignment_offset(&q->limits, 1113 bdev->bd_start_sect); 1114 return q->limits.alignment_offset; 1115 } 1116 EXPORT_SYMBOL_GPL(bdev_alignment_offset); 1117 1118 unsigned int bdev_discard_alignment(struct block_device *bdev) 1119 { 1120 struct request_queue *q = bdev_get_queue(bdev); 1121 1122 if (bdev_is_partition(bdev)) 1123 return queue_limit_discard_alignment(&q->limits, 1124 bdev->bd_start_sect); 1125 return q->limits.discard_alignment; 1126 } 1127 EXPORT_SYMBOL_GPL(bdev_discard_alignment); 1128