xref: /linux/block/blk-settings.c (revision 6490bec6d5bf1001032c5efea94bdf5b5104bce9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21 
22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 	q->rq_timeout = timeout;
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27 
28 /**
29  * blk_set_stacking_limits - set default limits for stacking devices
30  * @lim:  the queue_limits structure to reset
31  *
32  * Prepare queue limits for applying limits from underlying devices using
33  * blk_stack_limits().
34  */
35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 	memset(lim, 0, sizeof(*lim));
38 	lim->logical_block_size = SECTOR_SIZE;
39 	lim->physical_block_size = SECTOR_SIZE;
40 	lim->io_min = SECTOR_SIZE;
41 	lim->discard_granularity = SECTOR_SIZE;
42 	lim->dma_alignment = SECTOR_SIZE - 1;
43 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 
45 	/* Inherit limits from component devices */
46 	lim->max_segments = USHRT_MAX;
47 	lim->max_discard_segments = USHRT_MAX;
48 	lim->max_hw_sectors = UINT_MAX;
49 	lim->max_segment_size = UINT_MAX;
50 	lim->max_sectors = UINT_MAX;
51 	lim->max_dev_sectors = UINT_MAX;
52 	lim->max_write_zeroes_sectors = UINT_MAX;
53 	lim->max_zone_append_sectors = UINT_MAX;
54 	lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57 
58 static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 		struct queue_limits *lim)
60 {
61 	/*
62 	 * For read-ahead of large files to be effective, we need to read ahead
63 	 * at least twice the optimal I/O size.
64 	 */
65 	bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67 }
68 
69 static int blk_validate_zoned_limits(struct queue_limits *lim)
70 {
71 	if (!lim->zoned) {
72 		if (WARN_ON_ONCE(lim->max_open_zones) ||
73 		    WARN_ON_ONCE(lim->max_active_zones) ||
74 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
75 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
76 			return -EINVAL;
77 		return 0;
78 	}
79 
80 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 		return -EINVAL;
82 
83 	if (lim->zone_write_granularity < lim->logical_block_size)
84 		lim->zone_write_granularity = lim->logical_block_size;
85 
86 	if (lim->max_zone_append_sectors) {
87 		/*
88 		 * The Zone Append size is limited by the maximum I/O size
89 		 * and the zone size given that it can't span zones.
90 		 */
91 		lim->max_zone_append_sectors =
92 			min3(lim->max_hw_sectors,
93 			     lim->max_zone_append_sectors,
94 			     lim->chunk_sectors);
95 	}
96 
97 	return 0;
98 }
99 
100 /*
101  * Check that the limits in lim are valid, initialize defaults for unset
102  * values, and cap values based on others where needed.
103  */
104 static int blk_validate_limits(struct queue_limits *lim)
105 {
106 	unsigned int max_hw_sectors;
107 
108 	/*
109 	 * Unless otherwise specified, default to 512 byte logical blocks and a
110 	 * physical block size equal to the logical block size.
111 	 */
112 	if (!lim->logical_block_size)
113 		lim->logical_block_size = SECTOR_SIZE;
114 	if (lim->physical_block_size < lim->logical_block_size)
115 		lim->physical_block_size = lim->logical_block_size;
116 
117 	/*
118 	 * The minimum I/O size defaults to the physical block size unless
119 	 * explicitly overridden.
120 	 */
121 	if (lim->io_min < lim->physical_block_size)
122 		lim->io_min = lim->physical_block_size;
123 
124 	/*
125 	 * max_hw_sectors has a somewhat weird default for historical reason,
126 	 * but driver really should set their own instead of relying on this
127 	 * value.
128 	 *
129 	 * The block layer relies on the fact that every driver can
130 	 * handle at lest a page worth of data per I/O, and needs the value
131 	 * aligned to the logical block size.
132 	 */
133 	if (!lim->max_hw_sectors)
134 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
135 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
136 		return -EINVAL;
137 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
138 			lim->logical_block_size >> SECTOR_SHIFT);
139 
140 	/*
141 	 * The actual max_sectors value is a complex beast and also takes the
142 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
143 	 * value into account.  The ->max_sectors value is always calculated
144 	 * from these, so directly setting it won't have any effect.
145 	 */
146 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
147 				lim->max_dev_sectors);
148 	if (lim->max_user_sectors) {
149 		if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
150 			return -EINVAL;
151 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
152 	} else {
153 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
154 	}
155 	lim->max_sectors = round_down(lim->max_sectors,
156 			lim->logical_block_size >> SECTOR_SHIFT);
157 
158 	/*
159 	 * Random default for the maximum number of segments.  Driver should not
160 	 * rely on this and set their own.
161 	 */
162 	if (!lim->max_segments)
163 		lim->max_segments = BLK_MAX_SEGMENTS;
164 
165 	lim->max_discard_sectors =
166 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
167 
168 	if (!lim->max_discard_segments)
169 		lim->max_discard_segments = 1;
170 
171 	if (lim->discard_granularity < lim->physical_block_size)
172 		lim->discard_granularity = lim->physical_block_size;
173 
174 	/*
175 	 * By default there is no limit on the segment boundary alignment,
176 	 * but if there is one it can't be smaller than the page size as
177 	 * that would break all the normal I/O patterns.
178 	 */
179 	if (!lim->seg_boundary_mask)
180 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
181 	if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
182 		return -EINVAL;
183 
184 	/*
185 	 * Devices that require a virtual boundary do not support scatter/gather
186 	 * I/O natively, but instead require a descriptor list entry for each
187 	 * page (which might not be identical to the Linux PAGE_SIZE).  Because
188 	 * of that they are not limited by our notion of "segment size".
189 	 */
190 	if (lim->virt_boundary_mask) {
191 		if (WARN_ON_ONCE(lim->max_segment_size &&
192 				 lim->max_segment_size != UINT_MAX))
193 			return -EINVAL;
194 		lim->max_segment_size = UINT_MAX;
195 	} else {
196 		/*
197 		 * The maximum segment size has an odd historic 64k default that
198 		 * drivers probably should override.  Just like the I/O size we
199 		 * require drivers to at least handle a full page per segment.
200 		 */
201 		if (!lim->max_segment_size)
202 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
203 		if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
204 			return -EINVAL;
205 	}
206 
207 	/*
208 	 * We require drivers to at least do logical block aligned I/O, but
209 	 * historically could not check for that due to the separate calls
210 	 * to set the limits.  Once the transition is finished the check
211 	 * below should be narrowed down to check the logical block size.
212 	 */
213 	if (!lim->dma_alignment)
214 		lim->dma_alignment = SECTOR_SIZE - 1;
215 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
216 		return -EINVAL;
217 
218 	if (lim->alignment_offset) {
219 		lim->alignment_offset &= (lim->physical_block_size - 1);
220 		lim->misaligned = 0;
221 	}
222 
223 	return blk_validate_zoned_limits(lim);
224 }
225 
226 /*
227  * Set the default limits for a newly allocated queue.  @lim contains the
228  * initial limits set by the driver, which could be no limit in which case
229  * all fields are cleared to zero.
230  */
231 int blk_set_default_limits(struct queue_limits *lim)
232 {
233 	/*
234 	 * Most defaults are set by capping the bounds in blk_validate_limits,
235 	 * but max_user_discard_sectors is special and needs an explicit
236 	 * initialization to the max value here.
237 	 */
238 	lim->max_user_discard_sectors = UINT_MAX;
239 	return blk_validate_limits(lim);
240 }
241 
242 /**
243  * queue_limits_commit_update - commit an atomic update of queue limits
244  * @q:		queue to update
245  * @lim:	limits to apply
246  *
247  * Apply the limits in @lim that were obtained from queue_limits_start_update()
248  * and updated by the caller to @q.
249  *
250  * Returns 0 if successful, else a negative error code.
251  */
252 int queue_limits_commit_update(struct request_queue *q,
253 		struct queue_limits *lim)
254 	__releases(q->limits_lock)
255 {
256 	int error = blk_validate_limits(lim);
257 
258 	if (!error) {
259 		q->limits = *lim;
260 		if (q->disk)
261 			blk_apply_bdi_limits(q->disk->bdi, lim);
262 	}
263 	mutex_unlock(&q->limits_lock);
264 	return error;
265 }
266 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
267 
268 /**
269  * queue_limits_set - apply queue limits to queue
270  * @q:		queue to update
271  * @lim:	limits to apply
272  *
273  * Apply the limits in @lim that were freshly initialized to @q.
274  * To update existing limits use queue_limits_start_update() and
275  * queue_limits_commit_update() instead.
276  *
277  * Returns 0 if successful, else a negative error code.
278  */
279 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
280 {
281 	mutex_lock(&q->limits_lock);
282 	return queue_limits_commit_update(q, lim);
283 }
284 EXPORT_SYMBOL_GPL(queue_limits_set);
285 
286 /**
287  * blk_queue_bounce_limit - set bounce buffer limit for queue
288  * @q: the request queue for the device
289  * @bounce: bounce limit to enforce
290  *
291  * Description:
292  *    Force bouncing for ISA DMA ranges or highmem.
293  *
294  *    DEPRECATED, don't use in new code.
295  **/
296 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
297 {
298 	q->limits.bounce = bounce;
299 }
300 EXPORT_SYMBOL(blk_queue_bounce_limit);
301 
302 /**
303  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
304  * @q:  the request queue for the device
305  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
306  *
307  * Description:
308  *    Enables a low level driver to set a hard upper limit,
309  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
310  *    the device driver based upon the capabilities of the I/O
311  *    controller.
312  *
313  *    max_dev_sectors is a hard limit imposed by the storage device for
314  *    READ/WRITE requests. It is set by the disk driver.
315  *
316  *    max_sectors is a soft limit imposed by the block layer for
317  *    filesystem type requests.  This value can be overridden on a
318  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
319  *    The soft limit can not exceed max_hw_sectors.
320  **/
321 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
322 {
323 	struct queue_limits *limits = &q->limits;
324 	unsigned int max_sectors;
325 
326 	if ((max_hw_sectors << 9) < PAGE_SIZE) {
327 		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
328 		pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
329 	}
330 
331 	max_hw_sectors = round_down(max_hw_sectors,
332 				    limits->logical_block_size >> SECTOR_SHIFT);
333 	limits->max_hw_sectors = max_hw_sectors;
334 
335 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
336 
337 	if (limits->max_user_sectors)
338 		max_sectors = min(max_sectors, limits->max_user_sectors);
339 	else
340 		max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
341 
342 	max_sectors = round_down(max_sectors,
343 				 limits->logical_block_size >> SECTOR_SHIFT);
344 	limits->max_sectors = max_sectors;
345 
346 	if (!q->disk)
347 		return;
348 	q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
349 }
350 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
351 
352 /**
353  * blk_queue_chunk_sectors - set size of the chunk for this queue
354  * @q:  the request queue for the device
355  * @chunk_sectors:  chunk sectors in the usual 512b unit
356  *
357  * Description:
358  *    If a driver doesn't want IOs to cross a given chunk size, it can set
359  *    this limit and prevent merging across chunks. Note that the block layer
360  *    must accept a page worth of data at any offset. So if the crossing of
361  *    chunks is a hard limitation in the driver, it must still be prepared
362  *    to split single page bios.
363  **/
364 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
365 {
366 	q->limits.chunk_sectors = chunk_sectors;
367 }
368 EXPORT_SYMBOL(blk_queue_chunk_sectors);
369 
370 /**
371  * blk_queue_max_discard_sectors - set max sectors for a single discard
372  * @q:  the request queue for the device
373  * @max_discard_sectors: maximum number of sectors to discard
374  **/
375 void blk_queue_max_discard_sectors(struct request_queue *q,
376 		unsigned int max_discard_sectors)
377 {
378 	struct queue_limits *lim = &q->limits;
379 
380 	lim->max_hw_discard_sectors = max_discard_sectors;
381 	lim->max_discard_sectors =
382 		min(max_discard_sectors, lim->max_user_discard_sectors);
383 }
384 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
385 
386 /**
387  * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
388  * @q:  the request queue for the device
389  * @max_sectors: maximum number of sectors to secure_erase
390  **/
391 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
392 		unsigned int max_sectors)
393 {
394 	q->limits.max_secure_erase_sectors = max_sectors;
395 }
396 EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
397 
398 /**
399  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
400  *                                      write zeroes
401  * @q:  the request queue for the device
402  * @max_write_zeroes_sectors: maximum number of sectors to write per command
403  **/
404 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
405 		unsigned int max_write_zeroes_sectors)
406 {
407 	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
408 }
409 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
410 
411 /**
412  * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
413  * @q:  the request queue for the device
414  * @max_zone_append_sectors: maximum number of sectors to write per command
415  **/
416 void blk_queue_max_zone_append_sectors(struct request_queue *q,
417 		unsigned int max_zone_append_sectors)
418 {
419 	unsigned int max_sectors;
420 
421 	if (WARN_ON(!blk_queue_is_zoned(q)))
422 		return;
423 
424 	max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
425 	max_sectors = min(q->limits.chunk_sectors, max_sectors);
426 
427 	/*
428 	 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
429 	 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
430 	 * or the max_hw_sectors limit not set.
431 	 */
432 	WARN_ON(!max_sectors);
433 
434 	q->limits.max_zone_append_sectors = max_sectors;
435 }
436 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
437 
438 /**
439  * blk_queue_max_segments - set max hw segments for a request for this queue
440  * @q:  the request queue for the device
441  * @max_segments:  max number of segments
442  *
443  * Description:
444  *    Enables a low level driver to set an upper limit on the number of
445  *    hw data segments in a request.
446  **/
447 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
448 {
449 	if (!max_segments) {
450 		max_segments = 1;
451 		pr_info("%s: set to minimum %u\n", __func__, max_segments);
452 	}
453 
454 	q->limits.max_segments = max_segments;
455 }
456 EXPORT_SYMBOL(blk_queue_max_segments);
457 
458 /**
459  * blk_queue_max_discard_segments - set max segments for discard requests
460  * @q:  the request queue for the device
461  * @max_segments:  max number of segments
462  *
463  * Description:
464  *    Enables a low level driver to set an upper limit on the number of
465  *    segments in a discard request.
466  **/
467 void blk_queue_max_discard_segments(struct request_queue *q,
468 		unsigned short max_segments)
469 {
470 	q->limits.max_discard_segments = max_segments;
471 }
472 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
473 
474 /**
475  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
476  * @q:  the request queue for the device
477  * @max_size:  max size of segment in bytes
478  *
479  * Description:
480  *    Enables a low level driver to set an upper limit on the size of a
481  *    coalesced segment
482  **/
483 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
484 {
485 	if (max_size < PAGE_SIZE) {
486 		max_size = PAGE_SIZE;
487 		pr_info("%s: set to minimum %u\n", __func__, max_size);
488 	}
489 
490 	/* see blk_queue_virt_boundary() for the explanation */
491 	WARN_ON_ONCE(q->limits.virt_boundary_mask);
492 
493 	q->limits.max_segment_size = max_size;
494 }
495 EXPORT_SYMBOL(blk_queue_max_segment_size);
496 
497 /**
498  * blk_queue_logical_block_size - set logical block size for the queue
499  * @q:  the request queue for the device
500  * @size:  the logical block size, in bytes
501  *
502  * Description:
503  *   This should be set to the lowest possible block size that the
504  *   storage device can address.  The default of 512 covers most
505  *   hardware.
506  **/
507 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
508 {
509 	struct queue_limits *limits = &q->limits;
510 
511 	limits->logical_block_size = size;
512 
513 	if (limits->discard_granularity < limits->logical_block_size)
514 		limits->discard_granularity = limits->logical_block_size;
515 
516 	if (limits->physical_block_size < size)
517 		limits->physical_block_size = size;
518 
519 	if (limits->io_min < limits->physical_block_size)
520 		limits->io_min = limits->physical_block_size;
521 
522 	limits->max_hw_sectors =
523 		round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
524 	limits->max_sectors =
525 		round_down(limits->max_sectors, size >> SECTOR_SHIFT);
526 }
527 EXPORT_SYMBOL(blk_queue_logical_block_size);
528 
529 /**
530  * blk_queue_physical_block_size - set physical block size for the queue
531  * @q:  the request queue for the device
532  * @size:  the physical block size, in bytes
533  *
534  * Description:
535  *   This should be set to the lowest possible sector size that the
536  *   hardware can operate on without reverting to read-modify-write
537  *   operations.
538  */
539 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
540 {
541 	q->limits.physical_block_size = size;
542 
543 	if (q->limits.physical_block_size < q->limits.logical_block_size)
544 		q->limits.physical_block_size = q->limits.logical_block_size;
545 
546 	if (q->limits.discard_granularity < q->limits.physical_block_size)
547 		q->limits.discard_granularity = q->limits.physical_block_size;
548 
549 	if (q->limits.io_min < q->limits.physical_block_size)
550 		q->limits.io_min = q->limits.physical_block_size;
551 }
552 EXPORT_SYMBOL(blk_queue_physical_block_size);
553 
554 /**
555  * blk_queue_zone_write_granularity - set zone write granularity for the queue
556  * @q:  the request queue for the zoned device
557  * @size:  the zone write granularity size, in bytes
558  *
559  * Description:
560  *   This should be set to the lowest possible size allowing to write in
561  *   sequential zones of a zoned block device.
562  */
563 void blk_queue_zone_write_granularity(struct request_queue *q,
564 				      unsigned int size)
565 {
566 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
567 		return;
568 
569 	q->limits.zone_write_granularity = size;
570 
571 	if (q->limits.zone_write_granularity < q->limits.logical_block_size)
572 		q->limits.zone_write_granularity = q->limits.logical_block_size;
573 }
574 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
575 
576 /**
577  * blk_queue_alignment_offset - set physical block alignment offset
578  * @q:	the request queue for the device
579  * @offset: alignment offset in bytes
580  *
581  * Description:
582  *   Some devices are naturally misaligned to compensate for things like
583  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
584  *   should call this function for devices whose first sector is not
585  *   naturally aligned.
586  */
587 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
588 {
589 	q->limits.alignment_offset =
590 		offset & (q->limits.physical_block_size - 1);
591 	q->limits.misaligned = 0;
592 }
593 EXPORT_SYMBOL(blk_queue_alignment_offset);
594 
595 void disk_update_readahead(struct gendisk *disk)
596 {
597 	blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
598 }
599 EXPORT_SYMBOL_GPL(disk_update_readahead);
600 
601 /**
602  * blk_limits_io_min - set minimum request size for a device
603  * @limits: the queue limits
604  * @min:  smallest I/O size in bytes
605  *
606  * Description:
607  *   Some devices have an internal block size bigger than the reported
608  *   hardware sector size.  This function can be used to signal the
609  *   smallest I/O the device can perform without incurring a performance
610  *   penalty.
611  */
612 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
613 {
614 	limits->io_min = min;
615 
616 	if (limits->io_min < limits->logical_block_size)
617 		limits->io_min = limits->logical_block_size;
618 
619 	if (limits->io_min < limits->physical_block_size)
620 		limits->io_min = limits->physical_block_size;
621 }
622 EXPORT_SYMBOL(blk_limits_io_min);
623 
624 /**
625  * blk_queue_io_min - set minimum request size for the queue
626  * @q:	the request queue for the device
627  * @min:  smallest I/O size in bytes
628  *
629  * Description:
630  *   Storage devices may report a granularity or preferred minimum I/O
631  *   size which is the smallest request the device can perform without
632  *   incurring a performance penalty.  For disk drives this is often the
633  *   physical block size.  For RAID arrays it is often the stripe chunk
634  *   size.  A properly aligned multiple of minimum_io_size is the
635  *   preferred request size for workloads where a high number of I/O
636  *   operations is desired.
637  */
638 void blk_queue_io_min(struct request_queue *q, unsigned int min)
639 {
640 	blk_limits_io_min(&q->limits, min);
641 }
642 EXPORT_SYMBOL(blk_queue_io_min);
643 
644 /**
645  * blk_limits_io_opt - set optimal request size for a device
646  * @limits: the queue limits
647  * @opt:  smallest I/O size in bytes
648  *
649  * Description:
650  *   Storage devices may report an optimal I/O size, which is the
651  *   device's preferred unit for sustained I/O.  This is rarely reported
652  *   for disk drives.  For RAID arrays it is usually the stripe width or
653  *   the internal track size.  A properly aligned multiple of
654  *   optimal_io_size is the preferred request size for workloads where
655  *   sustained throughput is desired.
656  */
657 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
658 {
659 	limits->io_opt = opt;
660 }
661 EXPORT_SYMBOL(blk_limits_io_opt);
662 
663 /**
664  * blk_queue_io_opt - set optimal request size for the queue
665  * @q:	the request queue for the device
666  * @opt:  optimal request size in bytes
667  *
668  * Description:
669  *   Storage devices may report an optimal I/O size, which is the
670  *   device's preferred unit for sustained I/O.  This is rarely reported
671  *   for disk drives.  For RAID arrays it is usually the stripe width or
672  *   the internal track size.  A properly aligned multiple of
673  *   optimal_io_size is the preferred request size for workloads where
674  *   sustained throughput is desired.
675  */
676 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
677 {
678 	blk_limits_io_opt(&q->limits, opt);
679 	if (!q->disk)
680 		return;
681 	q->disk->bdi->ra_pages =
682 		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
683 }
684 EXPORT_SYMBOL(blk_queue_io_opt);
685 
686 static int queue_limit_alignment_offset(const struct queue_limits *lim,
687 		sector_t sector)
688 {
689 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
690 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
691 		<< SECTOR_SHIFT;
692 
693 	return (granularity + lim->alignment_offset - alignment) % granularity;
694 }
695 
696 static unsigned int queue_limit_discard_alignment(
697 		const struct queue_limits *lim, sector_t sector)
698 {
699 	unsigned int alignment, granularity, offset;
700 
701 	if (!lim->max_discard_sectors)
702 		return 0;
703 
704 	/* Why are these in bytes, not sectors? */
705 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
706 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
707 	if (!granularity)
708 		return 0;
709 
710 	/* Offset of the partition start in 'granularity' sectors */
711 	offset = sector_div(sector, granularity);
712 
713 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
714 	offset = (granularity + alignment - offset) % granularity;
715 
716 	/* Turn it back into bytes, gaah */
717 	return offset << SECTOR_SHIFT;
718 }
719 
720 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
721 {
722 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
723 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
724 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
725 	return sectors;
726 }
727 
728 /**
729  * blk_stack_limits - adjust queue_limits for stacked devices
730  * @t:	the stacking driver limits (top device)
731  * @b:  the underlying queue limits (bottom, component device)
732  * @start:  first data sector within component device
733  *
734  * Description:
735  *    This function is used by stacking drivers like MD and DM to ensure
736  *    that all component devices have compatible block sizes and
737  *    alignments.  The stacking driver must provide a queue_limits
738  *    struct (top) and then iteratively call the stacking function for
739  *    all component (bottom) devices.  The stacking function will
740  *    attempt to combine the values and ensure proper alignment.
741  *
742  *    Returns 0 if the top and bottom queue_limits are compatible.  The
743  *    top device's block sizes and alignment offsets may be adjusted to
744  *    ensure alignment with the bottom device. If no compatible sizes
745  *    and alignments exist, -1 is returned and the resulting top
746  *    queue_limits will have the misaligned flag set to indicate that
747  *    the alignment_offset is undefined.
748  */
749 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
750 		     sector_t start)
751 {
752 	unsigned int top, bottom, alignment, ret = 0;
753 
754 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
755 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
756 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
757 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
758 					b->max_write_zeroes_sectors);
759 	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
760 					b->max_zone_append_sectors);
761 	t->bounce = max(t->bounce, b->bounce);
762 
763 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
764 					    b->seg_boundary_mask);
765 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
766 					    b->virt_boundary_mask);
767 
768 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
769 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
770 					       b->max_discard_segments);
771 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
772 						 b->max_integrity_segments);
773 
774 	t->max_segment_size = min_not_zero(t->max_segment_size,
775 					   b->max_segment_size);
776 
777 	t->misaligned |= b->misaligned;
778 
779 	alignment = queue_limit_alignment_offset(b, start);
780 
781 	/* Bottom device has different alignment.  Check that it is
782 	 * compatible with the current top alignment.
783 	 */
784 	if (t->alignment_offset != alignment) {
785 
786 		top = max(t->physical_block_size, t->io_min)
787 			+ t->alignment_offset;
788 		bottom = max(b->physical_block_size, b->io_min) + alignment;
789 
790 		/* Verify that top and bottom intervals line up */
791 		if (max(top, bottom) % min(top, bottom)) {
792 			t->misaligned = 1;
793 			ret = -1;
794 		}
795 	}
796 
797 	t->logical_block_size = max(t->logical_block_size,
798 				    b->logical_block_size);
799 
800 	t->physical_block_size = max(t->physical_block_size,
801 				     b->physical_block_size);
802 
803 	t->io_min = max(t->io_min, b->io_min);
804 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
805 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
806 
807 	/* Set non-power-of-2 compatible chunk_sectors boundary */
808 	if (b->chunk_sectors)
809 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
810 
811 	/* Physical block size a multiple of the logical block size? */
812 	if (t->physical_block_size & (t->logical_block_size - 1)) {
813 		t->physical_block_size = t->logical_block_size;
814 		t->misaligned = 1;
815 		ret = -1;
816 	}
817 
818 	/* Minimum I/O a multiple of the physical block size? */
819 	if (t->io_min & (t->physical_block_size - 1)) {
820 		t->io_min = t->physical_block_size;
821 		t->misaligned = 1;
822 		ret = -1;
823 	}
824 
825 	/* Optimal I/O a multiple of the physical block size? */
826 	if (t->io_opt & (t->physical_block_size - 1)) {
827 		t->io_opt = 0;
828 		t->misaligned = 1;
829 		ret = -1;
830 	}
831 
832 	/* chunk_sectors a multiple of the physical block size? */
833 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
834 		t->chunk_sectors = 0;
835 		t->misaligned = 1;
836 		ret = -1;
837 	}
838 
839 	t->raid_partial_stripes_expensive =
840 		max(t->raid_partial_stripes_expensive,
841 		    b->raid_partial_stripes_expensive);
842 
843 	/* Find lowest common alignment_offset */
844 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
845 		% max(t->physical_block_size, t->io_min);
846 
847 	/* Verify that new alignment_offset is on a logical block boundary */
848 	if (t->alignment_offset & (t->logical_block_size - 1)) {
849 		t->misaligned = 1;
850 		ret = -1;
851 	}
852 
853 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
854 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
855 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
856 
857 	/* Discard alignment and granularity */
858 	if (b->discard_granularity) {
859 		alignment = queue_limit_discard_alignment(b, start);
860 
861 		if (t->discard_granularity != 0 &&
862 		    t->discard_alignment != alignment) {
863 			top = t->discard_granularity + t->discard_alignment;
864 			bottom = b->discard_granularity + alignment;
865 
866 			/* Verify that top and bottom intervals line up */
867 			if ((max(top, bottom) % min(top, bottom)) != 0)
868 				t->discard_misaligned = 1;
869 		}
870 
871 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
872 						      b->max_discard_sectors);
873 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
874 							 b->max_hw_discard_sectors);
875 		t->discard_granularity = max(t->discard_granularity,
876 					     b->discard_granularity);
877 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
878 			t->discard_granularity;
879 	}
880 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
881 						   b->max_secure_erase_sectors);
882 	t->zone_write_granularity = max(t->zone_write_granularity,
883 					b->zone_write_granularity);
884 	t->zoned = max(t->zoned, b->zoned);
885 	if (!t->zoned) {
886 		t->zone_write_granularity = 0;
887 		t->max_zone_append_sectors = 0;
888 	}
889 	return ret;
890 }
891 EXPORT_SYMBOL(blk_stack_limits);
892 
893 /**
894  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
895  * @t:	the stacking driver limits (top device)
896  * @bdev:  the underlying block device (bottom)
897  * @offset:  offset to beginning of data within component device
898  * @pfx: prefix to use for warnings logged
899  *
900  * Description:
901  *    This function is used by stacking drivers like MD and DM to ensure
902  *    that all component devices have compatible block sizes and
903  *    alignments.  The stacking driver must provide a queue_limits
904  *    struct (top) and then iteratively call the stacking function for
905  *    all component (bottom) devices.  The stacking function will
906  *    attempt to combine the values and ensure proper alignment.
907  */
908 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
909 		sector_t offset, const char *pfx)
910 {
911 	if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
912 			get_start_sect(bdev) + offset))
913 		pr_notice("%s: Warning: Device %pg is misaligned\n",
914 			pfx, bdev);
915 }
916 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
917 
918 /**
919  * blk_queue_update_dma_pad - update pad mask
920  * @q:     the request queue for the device
921  * @mask:  pad mask
922  *
923  * Update dma pad mask.
924  *
925  * Appending pad buffer to a request modifies the last entry of a
926  * scatter list such that it includes the pad buffer.
927  **/
928 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
929 {
930 	if (mask > q->dma_pad_mask)
931 		q->dma_pad_mask = mask;
932 }
933 EXPORT_SYMBOL(blk_queue_update_dma_pad);
934 
935 /**
936  * blk_queue_segment_boundary - set boundary rules for segment merging
937  * @q:  the request queue for the device
938  * @mask:  the memory boundary mask
939  **/
940 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
941 {
942 	if (mask < PAGE_SIZE - 1) {
943 		mask = PAGE_SIZE - 1;
944 		pr_info("%s: set to minimum %lx\n", __func__, mask);
945 	}
946 
947 	q->limits.seg_boundary_mask = mask;
948 }
949 EXPORT_SYMBOL(blk_queue_segment_boundary);
950 
951 /**
952  * blk_queue_virt_boundary - set boundary rules for bio merging
953  * @q:  the request queue for the device
954  * @mask:  the memory boundary mask
955  **/
956 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
957 {
958 	q->limits.virt_boundary_mask = mask;
959 
960 	/*
961 	 * Devices that require a virtual boundary do not support scatter/gather
962 	 * I/O natively, but instead require a descriptor list entry for each
963 	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
964 	 * of that they are not limited by our notion of "segment size".
965 	 */
966 	if (mask)
967 		q->limits.max_segment_size = UINT_MAX;
968 }
969 EXPORT_SYMBOL(blk_queue_virt_boundary);
970 
971 /**
972  * blk_queue_dma_alignment - set dma length and memory alignment
973  * @q:     the request queue for the device
974  * @mask:  alignment mask
975  *
976  * description:
977  *    set required memory and length alignment for direct dma transactions.
978  *    this is used when building direct io requests for the queue.
979  *
980  **/
981 void blk_queue_dma_alignment(struct request_queue *q, int mask)
982 {
983 	q->limits.dma_alignment = mask;
984 }
985 EXPORT_SYMBOL(blk_queue_dma_alignment);
986 
987 /**
988  * blk_queue_update_dma_alignment - update dma length and memory alignment
989  * @q:     the request queue for the device
990  * @mask:  alignment mask
991  *
992  * description:
993  *    update required memory and length alignment for direct dma transactions.
994  *    If the requested alignment is larger than the current alignment, then
995  *    the current queue alignment is updated to the new value, otherwise it
996  *    is left alone.  The design of this is to allow multiple objects
997  *    (driver, device, transport etc) to set their respective
998  *    alignments without having them interfere.
999  *
1000  **/
1001 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
1002 {
1003 	BUG_ON(mask > PAGE_SIZE);
1004 
1005 	if (mask > q->limits.dma_alignment)
1006 		q->limits.dma_alignment = mask;
1007 }
1008 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
1009 
1010 /**
1011  * blk_set_queue_depth - tell the block layer about the device queue depth
1012  * @q:		the request queue for the device
1013  * @depth:		queue depth
1014  *
1015  */
1016 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1017 {
1018 	q->queue_depth = depth;
1019 	rq_qos_queue_depth_changed(q);
1020 }
1021 EXPORT_SYMBOL(blk_set_queue_depth);
1022 
1023 /**
1024  * blk_queue_write_cache - configure queue's write cache
1025  * @q:		the request queue for the device
1026  * @wc:		write back cache on or off
1027  * @fua:	device supports FUA writes, if true
1028  *
1029  * Tell the block layer about the write cache of @q.
1030  */
1031 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
1032 {
1033 	if (wc) {
1034 		blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
1035 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
1036 	} else {
1037 		blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
1038 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
1039 	}
1040 	if (fua)
1041 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
1042 	else
1043 		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
1044 }
1045 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
1046 
1047 /**
1048  * blk_queue_required_elevator_features - Set a queue required elevator features
1049  * @q:		the request queue for the target device
1050  * @features:	Required elevator features OR'ed together
1051  *
1052  * Tell the block layer that for the device controlled through @q, only the
1053  * only elevators that can be used are those that implement at least the set of
1054  * features specified by @features.
1055  */
1056 void blk_queue_required_elevator_features(struct request_queue *q,
1057 					  unsigned int features)
1058 {
1059 	q->required_elevator_features = features;
1060 }
1061 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
1062 
1063 /**
1064  * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
1065  * @q:		the request queue for the device
1066  * @dev:	the device pointer for dma
1067  *
1068  * Tell the block layer about merging the segments by dma map of @q.
1069  */
1070 bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1071 				       struct device *dev)
1072 {
1073 	unsigned long boundary = dma_get_merge_boundary(dev);
1074 
1075 	if (!boundary)
1076 		return false;
1077 
1078 	/* No need to update max_segment_size. see blk_queue_virt_boundary() */
1079 	blk_queue_virt_boundary(q, boundary);
1080 
1081 	return true;
1082 }
1083 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
1084 
1085 /**
1086  * disk_set_zoned - inidicate a zoned device
1087  * @disk:	gendisk to configure
1088  */
1089 void disk_set_zoned(struct gendisk *disk)
1090 {
1091 	struct request_queue *q = disk->queue;
1092 
1093 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
1094 
1095 	/*
1096 	 * Set the zone write granularity to the device logical block
1097 	 * size by default. The driver can change this value if needed.
1098 	 */
1099 	q->limits.zoned = true;
1100 	blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
1101 }
1102 EXPORT_SYMBOL_GPL(disk_set_zoned);
1103 
1104 int bdev_alignment_offset(struct block_device *bdev)
1105 {
1106 	struct request_queue *q = bdev_get_queue(bdev);
1107 
1108 	if (q->limits.misaligned)
1109 		return -1;
1110 	if (bdev_is_partition(bdev))
1111 		return queue_limit_alignment_offset(&q->limits,
1112 				bdev->bd_start_sect);
1113 	return q->limits.alignment_offset;
1114 }
1115 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1116 
1117 unsigned int bdev_discard_alignment(struct block_device *bdev)
1118 {
1119 	struct request_queue *q = bdev_get_queue(bdev);
1120 
1121 	if (bdev_is_partition(bdev))
1122 		return queue_limit_discard_alignment(&q->limits,
1123 				bdev->bd_start_sect);
1124 	return q->limits.discard_alignment;
1125 }
1126 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1127