1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to setting various queue properties from drivers 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/gcd.h> 11 #include <linux/lcm.h> 12 #include <linux/jiffies.h> 13 #include <linux/gfp.h> 14 #include <linux/dma-mapping.h> 15 16 #include "blk.h" 17 #include "blk-wbt.h" 18 19 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 20 { 21 q->rq_timeout = timeout; 22 } 23 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 24 25 /** 26 * blk_set_default_limits - reset limits to default values 27 * @lim: the queue_limits structure to reset 28 * 29 * Description: 30 * Returns a queue_limit struct to its default state. 31 */ 32 void blk_set_default_limits(struct queue_limits *lim) 33 { 34 lim->max_segments = BLK_MAX_SEGMENTS; 35 lim->max_discard_segments = 1; 36 lim->max_integrity_segments = 0; 37 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 38 lim->virt_boundary_mask = 0; 39 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 40 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 41 lim->max_dev_sectors = 0; 42 lim->chunk_sectors = 0; 43 lim->max_write_same_sectors = 0; 44 lim->max_write_zeroes_sectors = 0; 45 lim->max_zone_append_sectors = 0; 46 lim->max_discard_sectors = 0; 47 lim->max_hw_discard_sectors = 0; 48 lim->discard_granularity = 0; 49 lim->discard_alignment = 0; 50 lim->discard_misaligned = 0; 51 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 52 lim->bounce = BLK_BOUNCE_NONE; 53 lim->alignment_offset = 0; 54 lim->io_opt = 0; 55 lim->misaligned = 0; 56 lim->zoned = BLK_ZONED_NONE; 57 lim->zone_write_granularity = 0; 58 } 59 EXPORT_SYMBOL(blk_set_default_limits); 60 61 /** 62 * blk_set_stacking_limits - set default limits for stacking devices 63 * @lim: the queue_limits structure to reset 64 * 65 * Description: 66 * Returns a queue_limit struct to its default state. Should be used 67 * by stacking drivers like DM that have no internal limits. 68 */ 69 void blk_set_stacking_limits(struct queue_limits *lim) 70 { 71 blk_set_default_limits(lim); 72 73 /* Inherit limits from component devices */ 74 lim->max_segments = USHRT_MAX; 75 lim->max_discard_segments = USHRT_MAX; 76 lim->max_hw_sectors = UINT_MAX; 77 lim->max_segment_size = UINT_MAX; 78 lim->max_sectors = UINT_MAX; 79 lim->max_dev_sectors = UINT_MAX; 80 lim->max_write_same_sectors = UINT_MAX; 81 lim->max_write_zeroes_sectors = UINT_MAX; 82 lim->max_zone_append_sectors = UINT_MAX; 83 } 84 EXPORT_SYMBOL(blk_set_stacking_limits); 85 86 /** 87 * blk_queue_bounce_limit - set bounce buffer limit for queue 88 * @q: the request queue for the device 89 * @bounce: bounce limit to enforce 90 * 91 * Description: 92 * Force bouncing for ISA DMA ranges or highmem. 93 * 94 * DEPRECATED, don't use in new code. 95 **/ 96 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) 97 { 98 q->limits.bounce = bounce; 99 } 100 EXPORT_SYMBOL(blk_queue_bounce_limit); 101 102 /** 103 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 104 * @q: the request queue for the device 105 * @max_hw_sectors: max hardware sectors in the usual 512b unit 106 * 107 * Description: 108 * Enables a low level driver to set a hard upper limit, 109 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 110 * the device driver based upon the capabilities of the I/O 111 * controller. 112 * 113 * max_dev_sectors is a hard limit imposed by the storage device for 114 * READ/WRITE requests. It is set by the disk driver. 115 * 116 * max_sectors is a soft limit imposed by the block layer for 117 * filesystem type requests. This value can be overridden on a 118 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 119 * The soft limit can not exceed max_hw_sectors. 120 **/ 121 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 122 { 123 struct queue_limits *limits = &q->limits; 124 unsigned int max_sectors; 125 126 if ((max_hw_sectors << 9) < PAGE_SIZE) { 127 max_hw_sectors = 1 << (PAGE_SHIFT - 9); 128 printk(KERN_INFO "%s: set to minimum %d\n", 129 __func__, max_hw_sectors); 130 } 131 132 max_hw_sectors = round_down(max_hw_sectors, 133 limits->logical_block_size >> SECTOR_SHIFT); 134 limits->max_hw_sectors = max_hw_sectors; 135 136 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); 137 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); 138 max_sectors = round_down(max_sectors, 139 limits->logical_block_size >> SECTOR_SHIFT); 140 limits->max_sectors = max_sectors; 141 142 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); 143 } 144 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 145 146 /** 147 * blk_queue_chunk_sectors - set size of the chunk for this queue 148 * @q: the request queue for the device 149 * @chunk_sectors: chunk sectors in the usual 512b unit 150 * 151 * Description: 152 * If a driver doesn't want IOs to cross a given chunk size, it can set 153 * this limit and prevent merging across chunks. Note that the block layer 154 * must accept a page worth of data at any offset. So if the crossing of 155 * chunks is a hard limitation in the driver, it must still be prepared 156 * to split single page bios. 157 **/ 158 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) 159 { 160 q->limits.chunk_sectors = chunk_sectors; 161 } 162 EXPORT_SYMBOL(blk_queue_chunk_sectors); 163 164 /** 165 * blk_queue_max_discard_sectors - set max sectors for a single discard 166 * @q: the request queue for the device 167 * @max_discard_sectors: maximum number of sectors to discard 168 **/ 169 void blk_queue_max_discard_sectors(struct request_queue *q, 170 unsigned int max_discard_sectors) 171 { 172 q->limits.max_hw_discard_sectors = max_discard_sectors; 173 q->limits.max_discard_sectors = max_discard_sectors; 174 } 175 EXPORT_SYMBOL(blk_queue_max_discard_sectors); 176 177 /** 178 * blk_queue_max_write_same_sectors - set max sectors for a single write same 179 * @q: the request queue for the device 180 * @max_write_same_sectors: maximum number of sectors to write per command 181 **/ 182 void blk_queue_max_write_same_sectors(struct request_queue *q, 183 unsigned int max_write_same_sectors) 184 { 185 q->limits.max_write_same_sectors = max_write_same_sectors; 186 } 187 EXPORT_SYMBOL(blk_queue_max_write_same_sectors); 188 189 /** 190 * blk_queue_max_write_zeroes_sectors - set max sectors for a single 191 * write zeroes 192 * @q: the request queue for the device 193 * @max_write_zeroes_sectors: maximum number of sectors to write per command 194 **/ 195 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 196 unsigned int max_write_zeroes_sectors) 197 { 198 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; 199 } 200 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); 201 202 /** 203 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append 204 * @q: the request queue for the device 205 * @max_zone_append_sectors: maximum number of sectors to write per command 206 **/ 207 void blk_queue_max_zone_append_sectors(struct request_queue *q, 208 unsigned int max_zone_append_sectors) 209 { 210 unsigned int max_sectors; 211 212 if (WARN_ON(!blk_queue_is_zoned(q))) 213 return; 214 215 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); 216 max_sectors = min(q->limits.chunk_sectors, max_sectors); 217 218 /* 219 * Signal eventual driver bugs resulting in the max_zone_append sectors limit 220 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, 221 * or the max_hw_sectors limit not set. 222 */ 223 WARN_ON(!max_sectors); 224 225 q->limits.max_zone_append_sectors = max_sectors; 226 } 227 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); 228 229 /** 230 * blk_queue_max_segments - set max hw segments for a request for this queue 231 * @q: the request queue for the device 232 * @max_segments: max number of segments 233 * 234 * Description: 235 * Enables a low level driver to set an upper limit on the number of 236 * hw data segments in a request. 237 **/ 238 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) 239 { 240 if (!max_segments) { 241 max_segments = 1; 242 printk(KERN_INFO "%s: set to minimum %d\n", 243 __func__, max_segments); 244 } 245 246 q->limits.max_segments = max_segments; 247 } 248 EXPORT_SYMBOL(blk_queue_max_segments); 249 250 /** 251 * blk_queue_max_discard_segments - set max segments for discard requests 252 * @q: the request queue for the device 253 * @max_segments: max number of segments 254 * 255 * Description: 256 * Enables a low level driver to set an upper limit on the number of 257 * segments in a discard request. 258 **/ 259 void blk_queue_max_discard_segments(struct request_queue *q, 260 unsigned short max_segments) 261 { 262 q->limits.max_discard_segments = max_segments; 263 } 264 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); 265 266 /** 267 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 268 * @q: the request queue for the device 269 * @max_size: max size of segment in bytes 270 * 271 * Description: 272 * Enables a low level driver to set an upper limit on the size of a 273 * coalesced segment 274 **/ 275 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 276 { 277 if (max_size < PAGE_SIZE) { 278 max_size = PAGE_SIZE; 279 printk(KERN_INFO "%s: set to minimum %d\n", 280 __func__, max_size); 281 } 282 283 /* see blk_queue_virt_boundary() for the explanation */ 284 WARN_ON_ONCE(q->limits.virt_boundary_mask); 285 286 q->limits.max_segment_size = max_size; 287 } 288 EXPORT_SYMBOL(blk_queue_max_segment_size); 289 290 /** 291 * blk_queue_logical_block_size - set logical block size for the queue 292 * @q: the request queue for the device 293 * @size: the logical block size, in bytes 294 * 295 * Description: 296 * This should be set to the lowest possible block size that the 297 * storage device can address. The default of 512 covers most 298 * hardware. 299 **/ 300 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) 301 { 302 struct queue_limits *limits = &q->limits; 303 304 limits->logical_block_size = size; 305 306 if (limits->physical_block_size < size) 307 limits->physical_block_size = size; 308 309 if (limits->io_min < limits->physical_block_size) 310 limits->io_min = limits->physical_block_size; 311 312 limits->max_hw_sectors = 313 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT); 314 limits->max_sectors = 315 round_down(limits->max_sectors, size >> SECTOR_SHIFT); 316 } 317 EXPORT_SYMBOL(blk_queue_logical_block_size); 318 319 /** 320 * blk_queue_physical_block_size - set physical block size for the queue 321 * @q: the request queue for the device 322 * @size: the physical block size, in bytes 323 * 324 * Description: 325 * This should be set to the lowest possible sector size that the 326 * hardware can operate on without reverting to read-modify-write 327 * operations. 328 */ 329 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) 330 { 331 q->limits.physical_block_size = size; 332 333 if (q->limits.physical_block_size < q->limits.logical_block_size) 334 q->limits.physical_block_size = q->limits.logical_block_size; 335 336 if (q->limits.io_min < q->limits.physical_block_size) 337 q->limits.io_min = q->limits.physical_block_size; 338 } 339 EXPORT_SYMBOL(blk_queue_physical_block_size); 340 341 /** 342 * blk_queue_zone_write_granularity - set zone write granularity for the queue 343 * @q: the request queue for the zoned device 344 * @size: the zone write granularity size, in bytes 345 * 346 * Description: 347 * This should be set to the lowest possible size allowing to write in 348 * sequential zones of a zoned block device. 349 */ 350 void blk_queue_zone_write_granularity(struct request_queue *q, 351 unsigned int size) 352 { 353 if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) 354 return; 355 356 q->limits.zone_write_granularity = size; 357 358 if (q->limits.zone_write_granularity < q->limits.logical_block_size) 359 q->limits.zone_write_granularity = q->limits.logical_block_size; 360 } 361 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity); 362 363 /** 364 * blk_queue_alignment_offset - set physical block alignment offset 365 * @q: the request queue for the device 366 * @offset: alignment offset in bytes 367 * 368 * Description: 369 * Some devices are naturally misaligned to compensate for things like 370 * the legacy DOS partition table 63-sector offset. Low-level drivers 371 * should call this function for devices whose first sector is not 372 * naturally aligned. 373 */ 374 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 375 { 376 q->limits.alignment_offset = 377 offset & (q->limits.physical_block_size - 1); 378 q->limits.misaligned = 0; 379 } 380 EXPORT_SYMBOL(blk_queue_alignment_offset); 381 382 void blk_queue_update_readahead(struct request_queue *q) 383 { 384 /* 385 * For read-ahead of large files to be effective, we need to read ahead 386 * at least twice the optimal I/O size. 387 */ 388 q->backing_dev_info->ra_pages = 389 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 390 q->backing_dev_info->io_pages = 391 queue_max_sectors(q) >> (PAGE_SHIFT - 9); 392 } 393 EXPORT_SYMBOL_GPL(blk_queue_update_readahead); 394 395 /** 396 * blk_limits_io_min - set minimum request size for a device 397 * @limits: the queue limits 398 * @min: smallest I/O size in bytes 399 * 400 * Description: 401 * Some devices have an internal block size bigger than the reported 402 * hardware sector size. This function can be used to signal the 403 * smallest I/O the device can perform without incurring a performance 404 * penalty. 405 */ 406 void blk_limits_io_min(struct queue_limits *limits, unsigned int min) 407 { 408 limits->io_min = min; 409 410 if (limits->io_min < limits->logical_block_size) 411 limits->io_min = limits->logical_block_size; 412 413 if (limits->io_min < limits->physical_block_size) 414 limits->io_min = limits->physical_block_size; 415 } 416 EXPORT_SYMBOL(blk_limits_io_min); 417 418 /** 419 * blk_queue_io_min - set minimum request size for the queue 420 * @q: the request queue for the device 421 * @min: smallest I/O size in bytes 422 * 423 * Description: 424 * Storage devices may report a granularity or preferred minimum I/O 425 * size which is the smallest request the device can perform without 426 * incurring a performance penalty. For disk drives this is often the 427 * physical block size. For RAID arrays it is often the stripe chunk 428 * size. A properly aligned multiple of minimum_io_size is the 429 * preferred request size for workloads where a high number of I/O 430 * operations is desired. 431 */ 432 void blk_queue_io_min(struct request_queue *q, unsigned int min) 433 { 434 blk_limits_io_min(&q->limits, min); 435 } 436 EXPORT_SYMBOL(blk_queue_io_min); 437 438 /** 439 * blk_limits_io_opt - set optimal request size for a device 440 * @limits: the queue limits 441 * @opt: smallest I/O size in bytes 442 * 443 * Description: 444 * Storage devices may report an optimal I/O size, which is the 445 * device's preferred unit for sustained I/O. This is rarely reported 446 * for disk drives. For RAID arrays it is usually the stripe width or 447 * the internal track size. A properly aligned multiple of 448 * optimal_io_size is the preferred request size for workloads where 449 * sustained throughput is desired. 450 */ 451 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) 452 { 453 limits->io_opt = opt; 454 } 455 EXPORT_SYMBOL(blk_limits_io_opt); 456 457 /** 458 * blk_queue_io_opt - set optimal request size for the queue 459 * @q: the request queue for the device 460 * @opt: optimal request size in bytes 461 * 462 * Description: 463 * Storage devices may report an optimal I/O size, which is the 464 * device's preferred unit for sustained I/O. This is rarely reported 465 * for disk drives. For RAID arrays it is usually the stripe width or 466 * the internal track size. A properly aligned multiple of 467 * optimal_io_size is the preferred request size for workloads where 468 * sustained throughput is desired. 469 */ 470 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 471 { 472 blk_limits_io_opt(&q->limits, opt); 473 q->backing_dev_info->ra_pages = 474 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); 475 } 476 EXPORT_SYMBOL(blk_queue_io_opt); 477 478 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) 479 { 480 sectors = round_down(sectors, lbs >> SECTOR_SHIFT); 481 if (sectors < PAGE_SIZE >> SECTOR_SHIFT) 482 sectors = PAGE_SIZE >> SECTOR_SHIFT; 483 return sectors; 484 } 485 486 /** 487 * blk_stack_limits - adjust queue_limits for stacked devices 488 * @t: the stacking driver limits (top device) 489 * @b: the underlying queue limits (bottom, component device) 490 * @start: first data sector within component device 491 * 492 * Description: 493 * This function is used by stacking drivers like MD and DM to ensure 494 * that all component devices have compatible block sizes and 495 * alignments. The stacking driver must provide a queue_limits 496 * struct (top) and then iteratively call the stacking function for 497 * all component (bottom) devices. The stacking function will 498 * attempt to combine the values and ensure proper alignment. 499 * 500 * Returns 0 if the top and bottom queue_limits are compatible. The 501 * top device's block sizes and alignment offsets may be adjusted to 502 * ensure alignment with the bottom device. If no compatible sizes 503 * and alignments exist, -1 is returned and the resulting top 504 * queue_limits will have the misaligned flag set to indicate that 505 * the alignment_offset is undefined. 506 */ 507 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 508 sector_t start) 509 { 510 unsigned int top, bottom, alignment, ret = 0; 511 512 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 513 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 514 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); 515 t->max_write_same_sectors = min(t->max_write_same_sectors, 516 b->max_write_same_sectors); 517 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, 518 b->max_write_zeroes_sectors); 519 t->max_zone_append_sectors = min(t->max_zone_append_sectors, 520 b->max_zone_append_sectors); 521 t->bounce = max(t->bounce, b->bounce); 522 523 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 524 b->seg_boundary_mask); 525 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, 526 b->virt_boundary_mask); 527 528 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 529 t->max_discard_segments = min_not_zero(t->max_discard_segments, 530 b->max_discard_segments); 531 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, 532 b->max_integrity_segments); 533 534 t->max_segment_size = min_not_zero(t->max_segment_size, 535 b->max_segment_size); 536 537 t->misaligned |= b->misaligned; 538 539 alignment = queue_limit_alignment_offset(b, start); 540 541 /* Bottom device has different alignment. Check that it is 542 * compatible with the current top alignment. 543 */ 544 if (t->alignment_offset != alignment) { 545 546 top = max(t->physical_block_size, t->io_min) 547 + t->alignment_offset; 548 bottom = max(b->physical_block_size, b->io_min) + alignment; 549 550 /* Verify that top and bottom intervals line up */ 551 if (max(top, bottom) % min(top, bottom)) { 552 t->misaligned = 1; 553 ret = -1; 554 } 555 } 556 557 t->logical_block_size = max(t->logical_block_size, 558 b->logical_block_size); 559 560 t->physical_block_size = max(t->physical_block_size, 561 b->physical_block_size); 562 563 t->io_min = max(t->io_min, b->io_min); 564 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 565 566 /* Set non-power-of-2 compatible chunk_sectors boundary */ 567 if (b->chunk_sectors) 568 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); 569 570 /* Physical block size a multiple of the logical block size? */ 571 if (t->physical_block_size & (t->logical_block_size - 1)) { 572 t->physical_block_size = t->logical_block_size; 573 t->misaligned = 1; 574 ret = -1; 575 } 576 577 /* Minimum I/O a multiple of the physical block size? */ 578 if (t->io_min & (t->physical_block_size - 1)) { 579 t->io_min = t->physical_block_size; 580 t->misaligned = 1; 581 ret = -1; 582 } 583 584 /* Optimal I/O a multiple of the physical block size? */ 585 if (t->io_opt & (t->physical_block_size - 1)) { 586 t->io_opt = 0; 587 t->misaligned = 1; 588 ret = -1; 589 } 590 591 /* chunk_sectors a multiple of the physical block size? */ 592 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { 593 t->chunk_sectors = 0; 594 t->misaligned = 1; 595 ret = -1; 596 } 597 598 t->raid_partial_stripes_expensive = 599 max(t->raid_partial_stripes_expensive, 600 b->raid_partial_stripes_expensive); 601 602 /* Find lowest common alignment_offset */ 603 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) 604 % max(t->physical_block_size, t->io_min); 605 606 /* Verify that new alignment_offset is on a logical block boundary */ 607 if (t->alignment_offset & (t->logical_block_size - 1)) { 608 t->misaligned = 1; 609 ret = -1; 610 } 611 612 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); 613 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); 614 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); 615 616 /* Discard alignment and granularity */ 617 if (b->discard_granularity) { 618 alignment = queue_limit_discard_alignment(b, start); 619 620 if (t->discard_granularity != 0 && 621 t->discard_alignment != alignment) { 622 top = t->discard_granularity + t->discard_alignment; 623 bottom = b->discard_granularity + alignment; 624 625 /* Verify that top and bottom intervals line up */ 626 if ((max(top, bottom) % min(top, bottom)) != 0) 627 t->discard_misaligned = 1; 628 } 629 630 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, 631 b->max_discard_sectors); 632 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, 633 b->max_hw_discard_sectors); 634 t->discard_granularity = max(t->discard_granularity, 635 b->discard_granularity); 636 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % 637 t->discard_granularity; 638 } 639 640 t->zone_write_granularity = max(t->zone_write_granularity, 641 b->zone_write_granularity); 642 t->zoned = max(t->zoned, b->zoned); 643 return ret; 644 } 645 EXPORT_SYMBOL(blk_stack_limits); 646 647 /** 648 * disk_stack_limits - adjust queue limits for stacked drivers 649 * @disk: MD/DM gendisk (top) 650 * @bdev: the underlying block device (bottom) 651 * @offset: offset to beginning of data within component device 652 * 653 * Description: 654 * Merges the limits for a top level gendisk and a bottom level 655 * block_device. 656 */ 657 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 658 sector_t offset) 659 { 660 struct request_queue *t = disk->queue; 661 662 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, 663 get_start_sect(bdev) + (offset >> 9)) < 0) { 664 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 665 666 disk_name(disk, 0, top); 667 bdevname(bdev, bottom); 668 669 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 670 top, bottom); 671 } 672 673 blk_queue_update_readahead(disk->queue); 674 } 675 EXPORT_SYMBOL(disk_stack_limits); 676 677 /** 678 * blk_queue_update_dma_pad - update pad mask 679 * @q: the request queue for the device 680 * @mask: pad mask 681 * 682 * Update dma pad mask. 683 * 684 * Appending pad buffer to a request modifies the last entry of a 685 * scatter list such that it includes the pad buffer. 686 **/ 687 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 688 { 689 if (mask > q->dma_pad_mask) 690 q->dma_pad_mask = mask; 691 } 692 EXPORT_SYMBOL(blk_queue_update_dma_pad); 693 694 /** 695 * blk_queue_segment_boundary - set boundary rules for segment merging 696 * @q: the request queue for the device 697 * @mask: the memory boundary mask 698 **/ 699 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 700 { 701 if (mask < PAGE_SIZE - 1) { 702 mask = PAGE_SIZE - 1; 703 printk(KERN_INFO "%s: set to minimum %lx\n", 704 __func__, mask); 705 } 706 707 q->limits.seg_boundary_mask = mask; 708 } 709 EXPORT_SYMBOL(blk_queue_segment_boundary); 710 711 /** 712 * blk_queue_virt_boundary - set boundary rules for bio merging 713 * @q: the request queue for the device 714 * @mask: the memory boundary mask 715 **/ 716 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) 717 { 718 q->limits.virt_boundary_mask = mask; 719 720 /* 721 * Devices that require a virtual boundary do not support scatter/gather 722 * I/O natively, but instead require a descriptor list entry for each 723 * page (which might not be idential to the Linux PAGE_SIZE). Because 724 * of that they are not limited by our notion of "segment size". 725 */ 726 if (mask) 727 q->limits.max_segment_size = UINT_MAX; 728 } 729 EXPORT_SYMBOL(blk_queue_virt_boundary); 730 731 /** 732 * blk_queue_dma_alignment - set dma length and memory alignment 733 * @q: the request queue for the device 734 * @mask: alignment mask 735 * 736 * description: 737 * set required memory and length alignment for direct dma transactions. 738 * this is used when building direct io requests for the queue. 739 * 740 **/ 741 void blk_queue_dma_alignment(struct request_queue *q, int mask) 742 { 743 q->dma_alignment = mask; 744 } 745 EXPORT_SYMBOL(blk_queue_dma_alignment); 746 747 /** 748 * blk_queue_update_dma_alignment - update dma length and memory alignment 749 * @q: the request queue for the device 750 * @mask: alignment mask 751 * 752 * description: 753 * update required memory and length alignment for direct dma transactions. 754 * If the requested alignment is larger than the current alignment, then 755 * the current queue alignment is updated to the new value, otherwise it 756 * is left alone. The design of this is to allow multiple objects 757 * (driver, device, transport etc) to set their respective 758 * alignments without having them interfere. 759 * 760 **/ 761 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 762 { 763 BUG_ON(mask > PAGE_SIZE); 764 765 if (mask > q->dma_alignment) 766 q->dma_alignment = mask; 767 } 768 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 769 770 /** 771 * blk_set_queue_depth - tell the block layer about the device queue depth 772 * @q: the request queue for the device 773 * @depth: queue depth 774 * 775 */ 776 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) 777 { 778 q->queue_depth = depth; 779 rq_qos_queue_depth_changed(q); 780 } 781 EXPORT_SYMBOL(blk_set_queue_depth); 782 783 /** 784 * blk_queue_write_cache - configure queue's write cache 785 * @q: the request queue for the device 786 * @wc: write back cache on or off 787 * @fua: device supports FUA writes, if true 788 * 789 * Tell the block layer about the write cache of @q. 790 */ 791 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 792 { 793 if (wc) 794 blk_queue_flag_set(QUEUE_FLAG_WC, q); 795 else 796 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 797 if (fua) 798 blk_queue_flag_set(QUEUE_FLAG_FUA, q); 799 else 800 blk_queue_flag_clear(QUEUE_FLAG_FUA, q); 801 802 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); 803 } 804 EXPORT_SYMBOL_GPL(blk_queue_write_cache); 805 806 /** 807 * blk_queue_required_elevator_features - Set a queue required elevator features 808 * @q: the request queue for the target device 809 * @features: Required elevator features OR'ed together 810 * 811 * Tell the block layer that for the device controlled through @q, only the 812 * only elevators that can be used are those that implement at least the set of 813 * features specified by @features. 814 */ 815 void blk_queue_required_elevator_features(struct request_queue *q, 816 unsigned int features) 817 { 818 q->required_elevator_features = features; 819 } 820 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); 821 822 /** 823 * blk_queue_can_use_dma_map_merging - configure queue for merging segments. 824 * @q: the request queue for the device 825 * @dev: the device pointer for dma 826 * 827 * Tell the block layer about merging the segments by dma map of @q. 828 */ 829 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 830 struct device *dev) 831 { 832 unsigned long boundary = dma_get_merge_boundary(dev); 833 834 if (!boundary) 835 return false; 836 837 /* No need to update max_segment_size. see blk_queue_virt_boundary() */ 838 blk_queue_virt_boundary(q, boundary); 839 840 return true; 841 } 842 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); 843 844 /** 845 * blk_queue_set_zoned - configure a disk queue zoned model. 846 * @disk: the gendisk of the queue to configure 847 * @model: the zoned model to set 848 * 849 * Set the zoned model of the request queue of @disk according to @model. 850 * When @model is BLK_ZONED_HM (host managed), this should be called only 851 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option). 852 * If @model specifies BLK_ZONED_HA (host aware), the effective model used 853 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions 854 * on the disk. 855 */ 856 void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) 857 { 858 struct request_queue *q = disk->queue; 859 860 switch (model) { 861 case BLK_ZONED_HM: 862 /* 863 * Host managed devices are supported only if 864 * CONFIG_BLK_DEV_ZONED is enabled. 865 */ 866 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); 867 break; 868 case BLK_ZONED_HA: 869 /* 870 * Host aware devices can be treated either as regular block 871 * devices (similar to drive managed devices) or as zoned block 872 * devices to take advantage of the zone command set, similarly 873 * to host managed devices. We try the latter if there are no 874 * partitions and zoned block device support is enabled, else 875 * we do nothing special as far as the block layer is concerned. 876 */ 877 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || 878 !xa_empty(&disk->part_tbl)) 879 model = BLK_ZONED_NONE; 880 break; 881 case BLK_ZONED_NONE: 882 default: 883 if (WARN_ON_ONCE(model != BLK_ZONED_NONE)) 884 model = BLK_ZONED_NONE; 885 break; 886 } 887 888 q->limits.zoned = model; 889 if (model != BLK_ZONED_NONE) { 890 /* 891 * Set the zone write granularity to the device logical block 892 * size by default. The driver can change this value if needed. 893 */ 894 blk_queue_zone_write_granularity(q, 895 queue_logical_block_size(q)); 896 } else { 897 blk_queue_clear_zone_settings(q); 898 } 899 } 900 EXPORT_SYMBOL_GPL(blk_queue_set_zoned); 901