1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to setting various queue properties from drivers 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/pagemap.h> 11 #include <linux/gcd.h> 12 #include <linux/lcm.h> 13 #include <linux/jiffies.h> 14 #include <linux/gfp.h> 15 #include <linux/dma-mapping.h> 16 17 #include "blk.h" 18 #include "blk-wbt.h" 19 20 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 21 { 22 q->rq_timeout = timeout; 23 } 24 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 25 26 /** 27 * blk_set_default_limits - reset limits to default values 28 * @lim: the queue_limits structure to reset 29 * 30 * Description: 31 * Returns a queue_limit struct to its default state. 32 */ 33 void blk_set_default_limits(struct queue_limits *lim) 34 { 35 lim->bio_max_bytes = UINT_MAX; 36 lim->max_segments = BLK_MAX_SEGMENTS; 37 lim->max_discard_segments = 1; 38 lim->max_integrity_segments = 0; 39 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 40 lim->virt_boundary_mask = 0; 41 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 42 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 43 lim->max_dev_sectors = 0; 44 lim->chunk_sectors = 0; 45 lim->max_write_same_sectors = 0; 46 lim->max_write_zeroes_sectors = 0; 47 lim->max_zone_append_sectors = 0; 48 lim->max_discard_sectors = 0; 49 lim->max_hw_discard_sectors = 0; 50 lim->discard_granularity = 0; 51 lim->discard_alignment = 0; 52 lim->discard_misaligned = 0; 53 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 54 lim->bounce = BLK_BOUNCE_NONE; 55 lim->alignment_offset = 0; 56 lim->io_opt = 0; 57 lim->misaligned = 0; 58 lim->zoned = BLK_ZONED_NONE; 59 lim->zone_write_granularity = 0; 60 } 61 EXPORT_SYMBOL(blk_set_default_limits); 62 63 /** 64 * blk_set_stacking_limits - set default limits for stacking devices 65 * @lim: the queue_limits structure to reset 66 * 67 * Description: 68 * Returns a queue_limit struct to its default state. Should be used 69 * by stacking drivers like DM that have no internal limits. 70 */ 71 void blk_set_stacking_limits(struct queue_limits *lim) 72 { 73 blk_set_default_limits(lim); 74 75 /* Inherit limits from component devices */ 76 lim->max_segments = USHRT_MAX; 77 lim->max_discard_segments = USHRT_MAX; 78 lim->max_hw_sectors = UINT_MAX; 79 lim->max_segment_size = UINT_MAX; 80 lim->max_sectors = UINT_MAX; 81 lim->max_dev_sectors = UINT_MAX; 82 lim->max_write_same_sectors = UINT_MAX; 83 lim->max_write_zeroes_sectors = UINT_MAX; 84 lim->max_zone_append_sectors = UINT_MAX; 85 } 86 EXPORT_SYMBOL(blk_set_stacking_limits); 87 88 /** 89 * blk_queue_bounce_limit - set bounce buffer limit for queue 90 * @q: the request queue for the device 91 * @bounce: bounce limit to enforce 92 * 93 * Description: 94 * Force bouncing for ISA DMA ranges or highmem. 95 * 96 * DEPRECATED, don't use in new code. 97 **/ 98 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) 99 { 100 q->limits.bounce = bounce; 101 } 102 EXPORT_SYMBOL(blk_queue_bounce_limit); 103 104 /** 105 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 106 * @q: the request queue for the device 107 * @max_hw_sectors: max hardware sectors in the usual 512b unit 108 * 109 * Description: 110 * Enables a low level driver to set a hard upper limit, 111 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 112 * the device driver based upon the capabilities of the I/O 113 * controller. 114 * 115 * max_dev_sectors is a hard limit imposed by the storage device for 116 * READ/WRITE requests. It is set by the disk driver. 117 * 118 * max_sectors is a soft limit imposed by the block layer for 119 * filesystem type requests. This value can be overridden on a 120 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 121 * The soft limit can not exceed max_hw_sectors. 122 **/ 123 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 124 { 125 struct queue_limits *limits = &q->limits; 126 unsigned int max_sectors; 127 128 if ((max_hw_sectors << 9) < PAGE_SIZE) { 129 max_hw_sectors = 1 << (PAGE_SHIFT - 9); 130 printk(KERN_INFO "%s: set to minimum %d\n", 131 __func__, max_hw_sectors); 132 } 133 134 max_hw_sectors = round_down(max_hw_sectors, 135 limits->logical_block_size >> SECTOR_SHIFT); 136 limits->max_hw_sectors = max_hw_sectors; 137 138 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); 139 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); 140 max_sectors = round_down(max_sectors, 141 limits->logical_block_size >> SECTOR_SHIFT); 142 limits->max_sectors = max_sectors; 143 144 if (check_shl_overflow(max_sectors, SECTOR_SHIFT, 145 &limits->bio_max_bytes)) 146 limits->bio_max_bytes = UINT_MAX; 147 148 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); 149 } 150 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 151 152 /** 153 * blk_queue_chunk_sectors - set size of the chunk for this queue 154 * @q: the request queue for the device 155 * @chunk_sectors: chunk sectors in the usual 512b unit 156 * 157 * Description: 158 * If a driver doesn't want IOs to cross a given chunk size, it can set 159 * this limit and prevent merging across chunks. Note that the block layer 160 * must accept a page worth of data at any offset. So if the crossing of 161 * chunks is a hard limitation in the driver, it must still be prepared 162 * to split single page bios. 163 **/ 164 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 165 { 166 q->limits.chunk_sectors = chunk_sectors; 167 } 168 EXPORT_SYMBOL(blk_queue_chunk_sectors); 169 170 /** 171 * blk_queue_max_discard_sectors - set max sectors for a single discard 172 * @q: the request queue for the device 173 * @max_discard_sectors: maximum number of sectors to discard 174 **/ 175 void blk_queue_max_discard_sectors(struct request_queue *q, 176 unsigned int max_discard_sectors) 177 { 178 q->limits.max_hw_discard_sectors = max_discard_sectors; 179 q->limits.max_discard_sectors = max_discard_sectors; 180 } 181 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 182 183 /** 184 * blk_queue_max_write_same_sectors - set max sectors for a single write same 185 * @q: the request queue for the device 186 * @max_write_same_sectors: maximum number of sectors to write per command 187 **/ 188 void blk_queue_max_write_same_sectors(struct request_queue *q, 189 unsigned int max_write_same_sectors) 190 { 191 q->limits.max_write_same_sectors = max_write_same_sectors; 192 } 193 EXPORT_SYMBOL(blk_queue_max_write_same_sectors); 194 195 /** 196 * blk_queue_max_write_zeroes_sectors - set max sectors for a single 197 * write zeroes 198 * @q: the request queue for the device 199 * @max_write_zeroes_sectors: maximum number of sectors to write per command 200 **/ 201 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 202 unsigned int max_write_zeroes_sectors) 203 { 204 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; 205 } 206 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); 207 208 /** 209 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append 210 * @q: the request queue for the device 211 * @max_zone_append_sectors: maximum number of sectors to write per command 212 **/ 213 void blk_queue_max_zone_append_sectors(struct request_queue *q, 214 unsigned int max_zone_append_sectors) 215 { 216 unsigned int max_sectors; 217 218 if (WARN_ON(!blk_queue_is_zoned(q))) 219 return; 220 221 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); 222 max_sectors = min(q->limits.chunk_sectors, max_sectors); 223 224 /* 225 * Signal eventual driver bugs resulting in the max_zone_append sectors limit 226 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, 227 * or the max_hw_sectors limit not set. 228 */ 229 WARN_ON(!max_sectors); 230 231 q->limits.max_zone_append_sectors = max_sectors; 232 } 233 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); 234 235 /** 236 * blk_queue_max_segments - set max hw segments for a request for this queue 237 * @q: the request queue for the device 238 * @max_segments: max number of segments 239 * 240 * Description: 241 * Enables a low level driver to set an upper limit on the number of 242 * hw data segments in a request. 243 **/ 244 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 245 { 246 if (!max_segments) { 247 max_segments = 1; 248 printk(KERN_INFO "%s: set to minimum %d\n", 249 __func__, max_segments); 250 } 251 252 q->limits.max_segments = max_segments; 253 } 254 EXPORT_SYMBOL(blk_queue_max_segments); 255 256 /** 257 * blk_queue_max_discard_segments - set max segments for discard requests 258 * @q: the request queue for the device 259 * @max_segments: max number of segments 260 * 261 * Description: 262 * Enables a low level driver to set an upper limit on the number of 263 * segments in a discard request. 264 **/ 265 void blk_queue_max_discard_segments(struct request_queue *q, 266 unsigned short max_segments) 267 { 268 q->limits.max_discard_segments = max_segments; 269 } 270 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); 271 272 /** 273 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 274 * @q: the request queue for the device 275 * @max_size: max size of segment in bytes 276 * 277 * Description: 278 * Enables a low level driver to set an upper limit on the size of a 279 * coalesced segment 280 **/ 281 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 282 { 283 if (max_size < PAGE_SIZE) { 284 max_size = PAGE_SIZE; 285 printk(KERN_INFO "%s: set to minimum %d\n", 286 __func__, max_size); 287 } 288 289 /* see blk_queue_virt_boundary() for the explanation */ 290 WARN_ON_ONCE(q->limits.virt_boundary_mask); 291 292 q->limits.max_segment_size = max_size; 293 } 294 EXPORT_SYMBOL(blk_queue_max_segment_size); 295 296 /** 297 * blk_queue_logical_block_size - set logical block size for the queue 298 * @q: the request queue for the device 299 * @size: the logical block size, in bytes 300 * 301 * Description: 302 * This should be set to the lowest possible block size that the 303 * storage device can address. The default of 512 covers most 304 * hardware. 305 **/ 306 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) 307 { 308 struct queue_limits *limits = &q->limits; 309 310 limits->logical_block_size = size; 311 312 if (limits->physical_block_size < size) 313 limits->physical_block_size = size; 314 315 if (limits->io_min < limits->physical_block_size) 316 limits->io_min = limits->physical_block_size; 317 318 limits->max_hw_sectors = 319 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); 320 limits->max_sectors = 321 round_down(limits->max_sectors, size >> SECTOR_SHIFT); 322 } 323 EXPORT_SYMBOL(blk_queue_logical_block_size); 324 325 /** 326 * blk_queue_physical_block_size - set physical block size for the queue 327 * @q: the request queue for the device 328 * @size: the physical block size, in bytes 329 * 330 * Description: 331 * This should be set to the lowest possible sector size that the 332 * hardware can operate on without reverting to read-modify-write 333 * operations. 334 */ 335 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 336 { 337 q->limits.physical_block_size = size; 338 339 if (q->limits.physical_block_size < q->limits.logical_block_size) 340 q->limits.physical_block_size = q->limits.logical_block_size; 341 342 if (q->limits.io_min < q->limits.physical_block_size) 343 q->limits.io_min = q->limits.physical_block_size; 344 } 345 EXPORT_SYMBOL(blk_queue_physical_block_size); 346 347 /** 348 * blk_queue_zone_write_granularity - set zone write granularity for the queue 349 * @q: the request queue for the zoned device 350 * @size: the zone write granularity size, in bytes 351 * 352 * Description: 353 * This should be set to the lowest possible size allowing to write in 354 * sequential zones of a zoned block device. 355 */ 356 void blk_queue_zone_write_granularity(struct request_queue *q, 357 unsigned int size) 358 { 359 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 360 return; 361 362 q->limits.zone_write_granularity = size; 363 364 if (q->limits.zone_write_granularity < q->limits.logical_block_size) 365 q->limits.zone_write_granularity = q->limits.logical_block_size; 366 } 367 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity); 368 369 /** 370 * blk_queue_alignment_offset - set physical block alignment offset 371 * @q: the request queue for the device 372 * @offset: alignment offset in bytes 373 * 374 * Description: 375 * Some devices are naturally misaligned to compensate for things like 376 * the legacy DOS partition table 63-sector offset. Low-level drivers 377 * should call this function for devices whose first sector is not 378 * naturally aligned. 379 */ 380 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 381 { 382 q->limits.alignment_offset = 383 offset & (q->limits.physical_block_size - 1); 384 q->limits.misaligned = 0; 385 } 386 EXPORT_SYMBOL(blk_queue_alignment_offset); 387 388 void blk_queue_update_readahead(struct request_queue *q) 389 { 390 /* 391 * For read-ahead of large files to be effective, we need to read ahead 392 * at least twice the optimal I/O size. 393 */ 394 q->backing_dev_info->ra_pages = 395 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 396 q->backing_dev_info->io_pages = 397 queue_max_sectors(q) >> (PAGE_SHIFT - 9); 398 } 399 EXPORT_SYMBOL_GPL(blk_queue_update_readahead); 400 401 /** 402 * blk_limits_io_min - set minimum request size for a device 403 * @limits: the queue limits 404 * @min: smallest I/O size in bytes 405 * 406 * Description: 407 * Some devices have an internal block size bigger than the reported 408 * hardware sector size. This function can be used to signal the 409 * smallest I/O the device can perform without incurring a performance 410 * penalty. 411 */ 412 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 413 { 414 limits->io_min = min; 415 416 if (limits->io_min < limits->logical_block_size) 417 limits->io_min = limits->logical_block_size; 418 419 if (limits->io_min < limits->physical_block_size) 420 limits->io_min = limits->physical_block_size; 421 } 422 EXPORT_SYMBOL(blk_limits_io_min); 423 424 /** 425 * blk_queue_io_min - set minimum request size for the queue 426 * @q: the request queue for the device 427 * @min: smallest I/O size in bytes 428 * 429 * Description: 430 * Storage devices may report a granularity or preferred minimum I/O 431 * size which is the smallest request the device can perform without 432 * incurring a performance penalty. For disk drives this is often the 433 * physical block size. For RAID arrays it is often the stripe chunk 434 * size. A properly aligned multiple of minimum_io_size is the 435 * preferred request size for workloads where a high number of I/O 436 * operations is desired. 437 */ 438 void blk_queue_io_min(struct request_queue *q, unsigned int min) 439 { 440 blk_limits_io_min(&q->limits, min); 441 } 442 EXPORT_SYMBOL(blk_queue_io_min); 443 444 /** 445 * blk_limits_io_opt - set optimal request size for a device 446 * @limits: the queue limits 447 * @opt: smallest I/O size in bytes 448 * 449 * Description: 450 * Storage devices may report an optimal I/O size, which is the 451 * device's preferred unit for sustained I/O. This is rarely reported 452 * for disk drives. For RAID arrays it is usually the stripe width or 453 * the internal track size. A properly aligned multiple of 454 * optimal_io_size is the preferred request size for workloads where 455 * sustained throughput is desired. 456 */ 457 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 458 { 459 limits->io_opt = opt; 460 } 461 EXPORT_SYMBOL(blk_limits_io_opt); 462 463 /** 464 * blk_queue_io_opt - set optimal request size for the queue 465 * @q: the request queue for the device 466 * @opt: optimal request size in bytes 467 * 468 * Description: 469 * Storage devices may report an optimal I/O size, which is the 470 * device's preferred unit for sustained I/O. This is rarely reported 471 * for disk drives. For RAID arrays it is usually the stripe width or 472 * the internal track size. A properly aligned multiple of 473 * optimal_io_size is the preferred request size for workloads where 474 * sustained throughput is desired. 475 */ 476 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 477 { 478 blk_limits_io_opt(&q->limits, opt); 479 q->backing_dev_info->ra_pages = 480 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 481 } 482 EXPORT_SYMBOL(blk_queue_io_opt); 483 484 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) 485 { 486 sectors = round_down(sectors, lbs >> SECTOR_SHIFT); 487 if (sectors < PAGE_SIZE >> SECTOR_SHIFT) 488 sectors = PAGE_SIZE >> SECTOR_SHIFT; 489 return sectors; 490 } 491 492 /** 493 * blk_stack_limits - adjust queue_limits for stacked devices 494 * @t: the stacking driver limits (top device) 495 * @b: the underlying queue limits (bottom, component device) 496 * @start: first data sector within component device 497 * 498 * Description: 499 * This function is used by stacking drivers like MD and DM to ensure 500 * that all component devices have compatible block sizes and 501 * alignments. The stacking driver must provide a queue_limits 502 * struct (top) and then iteratively call the stacking function for 503 * all component (bottom) devices. The stacking function will 504 * attempt to combine the values and ensure proper alignment. 505 * 506 * Returns 0 if the top and bottom queue_limits are compatible. The 507 * top device's block sizes and alignment offsets may be adjusted to 508 * ensure alignment with the bottom device. If no compatible sizes 509 * and alignments exist, -1 is returned and the resulting top 510 * queue_limits will have the misaligned flag set to indicate that 511 * the alignment_offset is undefined. 512 */ 513 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 514 sector_t start) 515 { 516 unsigned int top, bottom, alignment, ret = 0; 517 518 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 519 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 520 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); 521 t->max_write_same_sectors = min(t->max_write_same_sectors, 522 b->max_write_same_sectors); 523 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, 524 b->max_write_zeroes_sectors); 525 t->max_zone_append_sectors = min(t->max_zone_append_sectors, 526 b->max_zone_append_sectors); 527 t->bounce = max(t->bounce, b->bounce); 528 529 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 530 b->seg_boundary_mask); 531 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 532 b->virt_boundary_mask); 533 534 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 535 t->max_discard_segments = min_not_zero(t->max_discard_segments, 536 b->max_discard_segments); 537 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 538 b->max_integrity_segments); 539 540 t->max_segment_size = min_not_zero(t->max_segment_size, 541 b->max_segment_size); 542 543 t->misaligned |= b->misaligned; 544 545 alignment = queue_limit_alignment_offset(b, start); 546 547 /* Bottom device has different alignment. Check that it is 548 * compatible with the current top alignment. 549 */ 550 if (t->alignment_offset != alignment) { 551 552 top = max(t->physical_block_size, t->io_min) 553 + t->alignment_offset; 554 bottom = max(b->physical_block_size, b->io_min) + alignment; 555 556 /* Verify that top and bottom intervals line up */ 557 if (max(top, bottom) % min(top, bottom)) { 558 t->misaligned = 1; 559 ret = -1; 560 } 561 } 562 563 t->logical_block_size = max(t->logical_block_size, 564 b->logical_block_size); 565 566 t->physical_block_size = max(t->physical_block_size, 567 b->physical_block_size); 568 569 t->io_min = max(t->io_min, b->io_min); 570 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 571 572 /* Set non-power-of-2 compatible chunk_sectors boundary */ 573 if (b->chunk_sectors) 574 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); 575 576 /* Physical block size a multiple of the logical block size? */ 577 if (t->physical_block_size & (t->logical_block_size - 1)) { 578 t->physical_block_size = t->logical_block_size; 579 t->misaligned = 1; 580 ret = -1; 581 } 582 583 /* Minimum I/O a multiple of the physical block size? */ 584 if (t->io_min & (t->physical_block_size - 1)) { 585 t->io_min = t->physical_block_size; 586 t->misaligned = 1; 587 ret = -1; 588 } 589 590 /* Optimal I/O a multiple of the physical block size? */ 591 if (t->io_opt & (t->physical_block_size - 1)) { 592 t->io_opt = 0; 593 t->misaligned = 1; 594 ret = -1; 595 } 596 597 /* chunk_sectors a multiple of the physical block size? */ 598 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { 599 t->chunk_sectors = 0; 600 t->misaligned = 1; 601 ret = -1; 602 } 603 604 t->raid_partial_stripes_expensive = 605 max(t->raid_partial_stripes_expensive, 606 b->raid_partial_stripes_expensive); 607 608 /* Find lowest common alignment_offset */ 609 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 610 % max(t->physical_block_size, t->io_min); 611 612 /* Verify that new alignment_offset is on a logical block boundary */ 613 if (t->alignment_offset & (t->logical_block_size - 1)) { 614 t->misaligned = 1; 615 ret = -1; 616 } 617 618 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); 619 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); 620 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); 621 622 /* Discard alignment and granularity */ 623 if (b->discard_granularity) { 624 alignment = queue_limit_discard_alignment(b, start); 625 626 if (t->discard_granularity != 0 && 627 t->discard_alignment != alignment) { 628 top = t->discard_granularity + t->discard_alignment; 629 bottom = b->discard_granularity + alignment; 630 631 /* Verify that top and bottom intervals line up */ 632 if ((max(top, bottom) % min(top, bottom)) != 0) 633 t->discard_misaligned = 1; 634 } 635 636 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 637 b->max_discard_sectors); 638 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 639 b->max_hw_discard_sectors); 640 t->discard_granularity = max(t->discard_granularity, 641 b->discard_granularity); 642 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 643 t->discard_granularity; 644 } 645 646 t->zone_write_granularity = max(t->zone_write_granularity, 647 b->zone_write_granularity); 648 t->zoned = max(t->zoned, b->zoned); 649 return ret; 650 } 651 EXPORT_SYMBOL(blk_stack_limits); 652 653 /** 654 * disk_stack_limits - adjust queue limits for stacked drivers 655 * @disk: MD/DM gendisk (top) 656 * @bdev: the underlying block device (bottom) 657 * @offset: offset to beginning of data within component device 658 * 659 * Description: 660 * Merges the limits for a top level gendisk and a bottom level 661 * block_device. 662 */ 663 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 664 sector_t offset) 665 { 666 struct request_queue *t = disk->queue; 667 668 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, 669 get_start_sect(bdev) + (offset >> 9)) < 0) { 670 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 671 672 disk_name(disk, 0, top); 673 bdevname(bdev, bottom); 674 675 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 676 top, bottom); 677 } 678 679 blk_queue_update_readahead(disk->queue); 680 } 681 EXPORT_SYMBOL(disk_stack_limits); 682 683 /** 684 * blk_queue_update_dma_pad - update pad mask 685 * @q: the request queue for the device 686 * @mask: pad mask 687 * 688 * Update dma pad mask. 689 * 690 * Appending pad buffer to a request modifies the last entry of a 691 * scatter list such that it includes the pad buffer. 692 **/ 693 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 694 { 695 if (mask > q->dma_pad_mask) 696 q->dma_pad_mask = mask; 697 } 698 EXPORT_SYMBOL(blk_queue_update_dma_pad); 699 700 /** 701 * blk_queue_segment_boundary - set boundary rules for segment merging 702 * @q: the request queue for the device 703 * @mask: the memory boundary mask 704 **/ 705 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 706 { 707 if (mask < PAGE_SIZE - 1) { 708 mask = PAGE_SIZE - 1; 709 printk(KERN_INFO "%s: set to minimum %lx\n", 710 __func__, mask); 711 } 712 713 q->limits.seg_boundary_mask = mask; 714 } 715 EXPORT_SYMBOL(blk_queue_segment_boundary); 716 717 /** 718 * blk_queue_virt_boundary - set boundary rules for bio merging 719 * @q: the request queue for the device 720 * @mask: the memory boundary mask 721 **/ 722 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 723 { 724 q->limits.virt_boundary_mask = mask; 725 726 /* 727 * Devices that require a virtual boundary do not support scatter/gather 728 * I/O natively, but instead require a descriptor list entry for each 729 * page (which might not be idential to the Linux PAGE_SIZE). Because 730 * of that they are not limited by our notion of "segment size". 731 */ 732 if (mask) 733 q->limits.max_segment_size = UINT_MAX; 734 } 735 EXPORT_SYMBOL(blk_queue_virt_boundary); 736 737 /** 738 * blk_queue_dma_alignment - set dma length and memory alignment 739 * @q: the request queue for the device 740 * @mask: alignment mask 741 * 742 * description: 743 * set required memory and length alignment for direct dma transactions. 744 * this is used when building direct io requests for the queue. 745 * 746 **/ 747 void blk_queue_dma_alignment(struct request_queue *q, int mask) 748 { 749 q->dma_alignment = mask; 750 } 751 EXPORT_SYMBOL(blk_queue_dma_alignment); 752 753 /** 754 * blk_queue_update_dma_alignment - update dma length and memory alignment 755 * @q: the request queue for the device 756 * @mask: alignment mask 757 * 758 * description: 759 * update required memory and length alignment for direct dma transactions. 760 * If the requested alignment is larger than the current alignment, then 761 * the current queue alignment is updated to the new value, otherwise it 762 * is left alone. The design of this is to allow multiple objects 763 * (driver, device, transport etc) to set their respective 764 * alignments without having them interfere. 765 * 766 **/ 767 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 768 { 769 BUG_ON(mask > PAGE_SIZE); 770 771 if (mask > q->dma_alignment) 772 q->dma_alignment = mask; 773 } 774 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 775 776 /** 777 * blk_set_queue_depth - tell the block layer about the device queue depth 778 * @q: the request queue for the device 779 * @depth: queue depth 780 * 781 */ 782 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) 783 { 784 q->queue_depth = depth; 785 rq_qos_queue_depth_changed(q); 786 } 787 EXPORT_SYMBOL(blk_set_queue_depth); 788 789 /** 790 * blk_queue_write_cache - configure queue's write cache 791 * @q: the request queue for the device 792 * @wc: write back cache on or off 793 * @fua: device supports FUA writes, if true 794 * 795 * Tell the block layer about the write cache of @q. 796 */ 797 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 798 { 799 if (wc) 800 blk_queue_flag_set(QUEUE_FLAG_WC, q); 801 else 802 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 803 if (fua) 804 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 805 else 806 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 807 808 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); 809 } 810 EXPORT_SYMBOL_GPL(blk_queue_write_cache); 811 812 /** 813 * blk_queue_required_elevator_features - Set a queue required elevator features 814 * @q: the request queue for the target device 815 * @features: Required elevator features OR'ed together 816 * 817 * Tell the block layer that for the device controlled through @q, only the 818 * only elevators that can be used are those that implement at least the set of 819 * features specified by @features. 820 */ 821 void blk_queue_required_elevator_features(struct request_queue *q, 822 unsigned int features) 823 { 824 q->required_elevator_features = features; 825 } 826 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); 827 828 /** 829 * blk_queue_can_use_dma_map_merging - configure queue for merging segments. 830 * @q: the request queue for the device 831 * @dev: the device pointer for dma 832 * 833 * Tell the block layer about merging the segments by dma map of @q. 834 */ 835 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 836 struct device *dev) 837 { 838 unsigned long boundary = dma_get_merge_boundary(dev); 839 840 if (!boundary) 841 return false; 842 843 /* No need to update max_segment_size. see blk_queue_virt_boundary() */ 844 blk_queue_virt_boundary(q, boundary); 845 846 return true; 847 } 848 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); 849 850 /** 851 * blk_queue_set_zoned - configure a disk queue zoned model. 852 * @disk: the gendisk of the queue to configure 853 * @model: the zoned model to set 854 * 855 * Set the zoned model of the request queue of @disk according to @model. 856 * When @model is BLK_ZONED_HM (host managed), this should be called only 857 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option). 858 * If @model specifies BLK_ZONED_HA (host aware), the effective model used 859 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions 860 * on the disk. 861 */ 862 void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) 863 { 864 struct request_queue *q = disk->queue; 865 866 switch (model) { 867 case BLK_ZONED_HM: 868 /* 869 * Host managed devices are supported only if 870 * CONFIG_BLK_DEV_ZONED is enabled. 871 */ 872 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); 873 break; 874 case BLK_ZONED_HA: 875 /* 876 * Host aware devices can be treated either as regular block 877 * devices (similar to drive managed devices) or as zoned block 878 * devices to take advantage of the zone command set, similarly 879 * to host managed devices. We try the latter if there are no 880 * partitions and zoned block device support is enabled, else 881 * we do nothing special as far as the block layer is concerned. 882 */ 883 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || 884 !xa_empty(&disk->part_tbl)) 885 model = BLK_ZONED_NONE; 886 break; 887 case BLK_ZONED_NONE: 888 default: 889 if (WARN_ON_ONCE(model != BLK_ZONED_NONE)) 890 model = BLK_ZONED_NONE; 891 break; 892 } 893 894 q->limits.zoned = model; 895 if (model != BLK_ZONED_NONE) { 896 /* 897 * Set the zone write granularity to the device logical block 898 * size by default. The driver can change this value if needed. 899 */ 900 blk_queue_zone_write_granularity(q, 901 queue_logical_block_size(q)); 902 } else { 903 blk_queue_clear_zone_settings(q); 904 } 905 } 906 EXPORT_SYMBOL_GPL(blk_queue_set_zoned); 907