xref: /linux/block/blk-settings.c (revision 5ddb88f22eb97218d9295e69c39e0ff7cc64e09c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21 
22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 	q->rq_timeout = timeout;
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27 
28 /**
29  * blk_set_stacking_limits - set default limits for stacking devices
30  * @lim:  the queue_limits structure to reset
31  *
32  * Prepare queue limits for applying limits from underlying devices using
33  * blk_stack_limits().
34  */
35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 	memset(lim, 0, sizeof(*lim));
38 	lim->logical_block_size = SECTOR_SIZE;
39 	lim->physical_block_size = SECTOR_SIZE;
40 	lim->io_min = SECTOR_SIZE;
41 	lim->discard_granularity = SECTOR_SIZE;
42 	lim->dma_alignment = SECTOR_SIZE - 1;
43 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 
45 	/* Inherit limits from component devices */
46 	lim->max_segments = USHRT_MAX;
47 	lim->max_discard_segments = USHRT_MAX;
48 	lim->max_hw_sectors = UINT_MAX;
49 	lim->max_segment_size = UINT_MAX;
50 	lim->max_sectors = UINT_MAX;
51 	lim->max_dev_sectors = UINT_MAX;
52 	lim->max_write_zeroes_sectors = UINT_MAX;
53 	lim->max_zone_append_sectors = UINT_MAX;
54 	lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57 
58 static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 		struct queue_limits *lim)
60 {
61 	/*
62 	 * For read-ahead of large files to be effective, we need to read ahead
63 	 * at least twice the optimal I/O size.
64 	 */
65 	bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67 }
68 
69 static int blk_validate_zoned_limits(struct queue_limits *lim)
70 {
71 	if (!(lim->features & BLK_FEAT_ZONED)) {
72 		if (WARN_ON_ONCE(lim->max_open_zones) ||
73 		    WARN_ON_ONCE(lim->max_active_zones) ||
74 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
75 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
76 			return -EINVAL;
77 		return 0;
78 	}
79 
80 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 		return -EINVAL;
82 
83 	/*
84 	 * Given that active zones include open zones, the maximum number of
85 	 * open zones cannot be larger than the maximum number of active zones.
86 	 */
87 	if (lim->max_active_zones &&
88 	    lim->max_open_zones > lim->max_active_zones)
89 		return -EINVAL;
90 
91 	if (lim->zone_write_granularity < lim->logical_block_size)
92 		lim->zone_write_granularity = lim->logical_block_size;
93 
94 	if (lim->max_zone_append_sectors) {
95 		/*
96 		 * The Zone Append size is limited by the maximum I/O size
97 		 * and the zone size given that it can't span zones.
98 		 */
99 		lim->max_zone_append_sectors =
100 			min3(lim->max_hw_sectors,
101 			     lim->max_zone_append_sectors,
102 			     lim->chunk_sectors);
103 	}
104 
105 	return 0;
106 }
107 
108 static int blk_validate_integrity_limits(struct queue_limits *lim)
109 {
110 	struct blk_integrity *bi = &lim->integrity;
111 
112 	if (!bi->tuple_size) {
113 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
114 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
115 			pr_warn("invalid PI settings.\n");
116 			return -EINVAL;
117 		}
118 		return 0;
119 	}
120 
121 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
122 		pr_warn("integrity support disabled.\n");
123 		return -EINVAL;
124 	}
125 
126 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
127 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
128 		pr_warn("ref tag not support without checksum.\n");
129 		return -EINVAL;
130 	}
131 
132 	if (!bi->interval_exp)
133 		bi->interval_exp = ilog2(lim->logical_block_size);
134 
135 	return 0;
136 }
137 
138 /*
139  * Check that the limits in lim are valid, initialize defaults for unset
140  * values, and cap values based on others where needed.
141  */
142 static int blk_validate_limits(struct queue_limits *lim)
143 {
144 	unsigned int max_hw_sectors;
145 	unsigned int logical_block_sectors;
146 	int err;
147 
148 	/*
149 	 * Unless otherwise specified, default to 512 byte logical blocks and a
150 	 * physical block size equal to the logical block size.
151 	 */
152 	if (!lim->logical_block_size)
153 		lim->logical_block_size = SECTOR_SIZE;
154 	if (lim->physical_block_size < lim->logical_block_size)
155 		lim->physical_block_size = lim->logical_block_size;
156 
157 	/*
158 	 * The minimum I/O size defaults to the physical block size unless
159 	 * explicitly overridden.
160 	 */
161 	if (lim->io_min < lim->physical_block_size)
162 		lim->io_min = lim->physical_block_size;
163 
164 	/*
165 	 * max_hw_sectors has a somewhat weird default for historical reason,
166 	 * but driver really should set their own instead of relying on this
167 	 * value.
168 	 *
169 	 * The block layer relies on the fact that every driver can
170 	 * handle at lest a page worth of data per I/O, and needs the value
171 	 * aligned to the logical block size.
172 	 */
173 	if (!lim->max_hw_sectors)
174 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
175 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
176 		return -EINVAL;
177 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
178 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
179 		return -EINVAL;
180 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
181 			logical_block_sectors);
182 
183 	/*
184 	 * The actual max_sectors value is a complex beast and also takes the
185 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
186 	 * value into account.  The ->max_sectors value is always calculated
187 	 * from these, so directly setting it won't have any effect.
188 	 */
189 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
190 				lim->max_dev_sectors);
191 	if (lim->max_user_sectors) {
192 		if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
193 			return -EINVAL;
194 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
195 	} else if (lim->io_opt) {
196 		lim->max_sectors =
197 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
198 	} else if (lim->io_min &&
199 		   lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
200 		lim->max_sectors =
201 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
202 	} else {
203 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
204 	}
205 	lim->max_sectors = round_down(lim->max_sectors,
206 			logical_block_sectors);
207 
208 	/*
209 	 * Random default for the maximum number of segments.  Driver should not
210 	 * rely on this and set their own.
211 	 */
212 	if (!lim->max_segments)
213 		lim->max_segments = BLK_MAX_SEGMENTS;
214 
215 	lim->max_discard_sectors =
216 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
217 
218 	if (!lim->max_discard_segments)
219 		lim->max_discard_segments = 1;
220 
221 	if (lim->discard_granularity < lim->physical_block_size)
222 		lim->discard_granularity = lim->physical_block_size;
223 
224 	/*
225 	 * By default there is no limit on the segment boundary alignment,
226 	 * but if there is one it can't be smaller than the page size as
227 	 * that would break all the normal I/O patterns.
228 	 */
229 	if (!lim->seg_boundary_mask)
230 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
231 	if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
232 		return -EINVAL;
233 
234 	/*
235 	 * Stacking device may have both virtual boundary and max segment
236 	 * size limit, so allow this setting now, and long-term the two
237 	 * might need to move out of stacking limits since we have immutable
238 	 * bvec and lower layer bio splitting is supposed to handle the two
239 	 * correctly.
240 	 */
241 	if (lim->virt_boundary_mask) {
242 		if (!lim->max_segment_size)
243 			lim->max_segment_size = UINT_MAX;
244 	} else {
245 		/*
246 		 * The maximum segment size has an odd historic 64k default that
247 		 * drivers probably should override.  Just like the I/O size we
248 		 * require drivers to at least handle a full page per segment.
249 		 */
250 		if (!lim->max_segment_size)
251 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
252 		if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
253 			return -EINVAL;
254 	}
255 
256 	/*
257 	 * We require drivers to at least do logical block aligned I/O, but
258 	 * historically could not check for that due to the separate calls
259 	 * to set the limits.  Once the transition is finished the check
260 	 * below should be narrowed down to check the logical block size.
261 	 */
262 	if (!lim->dma_alignment)
263 		lim->dma_alignment = SECTOR_SIZE - 1;
264 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
265 		return -EINVAL;
266 
267 	if (lim->alignment_offset) {
268 		lim->alignment_offset &= (lim->physical_block_size - 1);
269 		lim->misaligned = 0;
270 	}
271 
272 	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
273 		lim->features &= ~BLK_FEAT_FUA;
274 
275 	err = blk_validate_integrity_limits(lim);
276 	if (err)
277 		return err;
278 	return blk_validate_zoned_limits(lim);
279 }
280 
281 /*
282  * Set the default limits for a newly allocated queue.  @lim contains the
283  * initial limits set by the driver, which could be no limit in which case
284  * all fields are cleared to zero.
285  */
286 int blk_set_default_limits(struct queue_limits *lim)
287 {
288 	/*
289 	 * Most defaults are set by capping the bounds in blk_validate_limits,
290 	 * but max_user_discard_sectors is special and needs an explicit
291 	 * initialization to the max value here.
292 	 */
293 	lim->max_user_discard_sectors = UINT_MAX;
294 	return blk_validate_limits(lim);
295 }
296 
297 /**
298  * queue_limits_commit_update - commit an atomic update of queue limits
299  * @q:		queue to update
300  * @lim:	limits to apply
301  *
302  * Apply the limits in @lim that were obtained from queue_limits_start_update()
303  * and updated by the caller to @q.
304  *
305  * Returns 0 if successful, else a negative error code.
306  */
307 int queue_limits_commit_update(struct request_queue *q,
308 		struct queue_limits *lim)
309 {
310 	int error;
311 
312 	error = blk_validate_limits(lim);
313 	if (error)
314 		goto out_unlock;
315 
316 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
317 	if (q->crypto_profile && lim->integrity.tag_size) {
318 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
319 		error = -EINVAL;
320 		goto out_unlock;
321 	}
322 #endif
323 
324 	q->limits = *lim;
325 	if (q->disk)
326 		blk_apply_bdi_limits(q->disk->bdi, lim);
327 out_unlock:
328 	mutex_unlock(&q->limits_lock);
329 	return error;
330 }
331 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
332 
333 /**
334  * queue_limits_set - apply queue limits to queue
335  * @q:		queue to update
336  * @lim:	limits to apply
337  *
338  * Apply the limits in @lim that were freshly initialized to @q.
339  * To update existing limits use queue_limits_start_update() and
340  * queue_limits_commit_update() instead.
341  *
342  * Returns 0 if successful, else a negative error code.
343  */
344 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
345 {
346 	mutex_lock(&q->limits_lock);
347 	return queue_limits_commit_update(q, lim);
348 }
349 EXPORT_SYMBOL_GPL(queue_limits_set);
350 
351 void disk_update_readahead(struct gendisk *disk)
352 {
353 	blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
354 }
355 EXPORT_SYMBOL_GPL(disk_update_readahead);
356 
357 /**
358  * blk_limits_io_min - set minimum request size for a device
359  * @limits: the queue limits
360  * @min:  smallest I/O size in bytes
361  *
362  * Description:
363  *   Some devices have an internal block size bigger than the reported
364  *   hardware sector size.  This function can be used to signal the
365  *   smallest I/O the device can perform without incurring a performance
366  *   penalty.
367  */
368 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
369 {
370 	limits->io_min = min;
371 
372 	if (limits->io_min < limits->logical_block_size)
373 		limits->io_min = limits->logical_block_size;
374 
375 	if (limits->io_min < limits->physical_block_size)
376 		limits->io_min = limits->physical_block_size;
377 }
378 EXPORT_SYMBOL(blk_limits_io_min);
379 
380 /**
381  * blk_limits_io_opt - set optimal request size for a device
382  * @limits: the queue limits
383  * @opt:  smallest I/O size in bytes
384  *
385  * Description:
386  *   Storage devices may report an optimal I/O size, which is the
387  *   device's preferred unit for sustained I/O.  This is rarely reported
388  *   for disk drives.  For RAID arrays it is usually the stripe width or
389  *   the internal track size.  A properly aligned multiple of
390  *   optimal_io_size is the preferred request size for workloads where
391  *   sustained throughput is desired.
392  */
393 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
394 {
395 	limits->io_opt = opt;
396 }
397 EXPORT_SYMBOL(blk_limits_io_opt);
398 
399 static int queue_limit_alignment_offset(const struct queue_limits *lim,
400 		sector_t sector)
401 {
402 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
403 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
404 		<< SECTOR_SHIFT;
405 
406 	return (granularity + lim->alignment_offset - alignment) % granularity;
407 }
408 
409 static unsigned int queue_limit_discard_alignment(
410 		const struct queue_limits *lim, sector_t sector)
411 {
412 	unsigned int alignment, granularity, offset;
413 
414 	if (!lim->max_discard_sectors)
415 		return 0;
416 
417 	/* Why are these in bytes, not sectors? */
418 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
419 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
420 	if (!granularity)
421 		return 0;
422 
423 	/* Offset of the partition start in 'granularity' sectors */
424 	offset = sector_div(sector, granularity);
425 
426 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
427 	offset = (granularity + alignment - offset) % granularity;
428 
429 	/* Turn it back into bytes, gaah */
430 	return offset << SECTOR_SHIFT;
431 }
432 
433 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
434 {
435 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
436 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
437 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
438 	return sectors;
439 }
440 
441 /**
442  * blk_stack_limits - adjust queue_limits for stacked devices
443  * @t:	the stacking driver limits (top device)
444  * @b:  the underlying queue limits (bottom, component device)
445  * @start:  first data sector within component device
446  *
447  * Description:
448  *    This function is used by stacking drivers like MD and DM to ensure
449  *    that all component devices have compatible block sizes and
450  *    alignments.  The stacking driver must provide a queue_limits
451  *    struct (top) and then iteratively call the stacking function for
452  *    all component (bottom) devices.  The stacking function will
453  *    attempt to combine the values and ensure proper alignment.
454  *
455  *    Returns 0 if the top and bottom queue_limits are compatible.  The
456  *    top device's block sizes and alignment offsets may be adjusted to
457  *    ensure alignment with the bottom device. If no compatible sizes
458  *    and alignments exist, -1 is returned and the resulting top
459  *    queue_limits will have the misaligned flag set to indicate that
460  *    the alignment_offset is undefined.
461  */
462 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
463 		     sector_t start)
464 {
465 	unsigned int top, bottom, alignment, ret = 0;
466 
467 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
468 
469 	/*
470 	 * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the
471 	 * stacking driver and all underlying devices.  The stacking driver sets
472 	 * the flags before stacking the limits, and this will clear the flags
473 	 * if any of the underlying devices does not support it.
474 	 */
475 	if (!(b->features & BLK_FEAT_NOWAIT))
476 		t->features &= ~BLK_FEAT_NOWAIT;
477 	if (!(b->features & BLK_FEAT_POLL))
478 		t->features &= ~BLK_FEAT_POLL;
479 
480 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
481 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
482 			b->max_user_sectors);
483 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
484 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
485 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
486 					b->max_write_zeroes_sectors);
487 	t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
488 					 queue_limits_max_zone_append_sectors(b));
489 
490 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
491 					    b->seg_boundary_mask);
492 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
493 					    b->virt_boundary_mask);
494 
495 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
496 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
497 					       b->max_discard_segments);
498 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
499 						 b->max_integrity_segments);
500 
501 	t->max_segment_size = min_not_zero(t->max_segment_size,
502 					   b->max_segment_size);
503 
504 	t->misaligned |= b->misaligned;
505 
506 	alignment = queue_limit_alignment_offset(b, start);
507 
508 	/* Bottom device has different alignment.  Check that it is
509 	 * compatible with the current top alignment.
510 	 */
511 	if (t->alignment_offset != alignment) {
512 
513 		top = max(t->physical_block_size, t->io_min)
514 			+ t->alignment_offset;
515 		bottom = max(b->physical_block_size, b->io_min) + alignment;
516 
517 		/* Verify that top and bottom intervals line up */
518 		if (max(top, bottom) % min(top, bottom)) {
519 			t->misaligned = 1;
520 			ret = -1;
521 		}
522 	}
523 
524 	t->logical_block_size = max(t->logical_block_size,
525 				    b->logical_block_size);
526 
527 	t->physical_block_size = max(t->physical_block_size,
528 				     b->physical_block_size);
529 
530 	t->io_min = max(t->io_min, b->io_min);
531 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
532 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
533 
534 	/* Set non-power-of-2 compatible chunk_sectors boundary */
535 	if (b->chunk_sectors)
536 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
537 
538 	/* Physical block size a multiple of the logical block size? */
539 	if (t->physical_block_size & (t->logical_block_size - 1)) {
540 		t->physical_block_size = t->logical_block_size;
541 		t->misaligned = 1;
542 		ret = -1;
543 	}
544 
545 	/* Minimum I/O a multiple of the physical block size? */
546 	if (t->io_min & (t->physical_block_size - 1)) {
547 		t->io_min = t->physical_block_size;
548 		t->misaligned = 1;
549 		ret = -1;
550 	}
551 
552 	/* Optimal I/O a multiple of the physical block size? */
553 	if (t->io_opt & (t->physical_block_size - 1)) {
554 		t->io_opt = 0;
555 		t->misaligned = 1;
556 		ret = -1;
557 	}
558 
559 	/* chunk_sectors a multiple of the physical block size? */
560 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
561 		t->chunk_sectors = 0;
562 		t->misaligned = 1;
563 		ret = -1;
564 	}
565 
566 	t->raid_partial_stripes_expensive =
567 		max(t->raid_partial_stripes_expensive,
568 		    b->raid_partial_stripes_expensive);
569 
570 	/* Find lowest common alignment_offset */
571 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
572 		% max(t->physical_block_size, t->io_min);
573 
574 	/* Verify that new alignment_offset is on a logical block boundary */
575 	if (t->alignment_offset & (t->logical_block_size - 1)) {
576 		t->misaligned = 1;
577 		ret = -1;
578 	}
579 
580 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
581 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
582 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
583 
584 	/* Discard alignment and granularity */
585 	if (b->discard_granularity) {
586 		alignment = queue_limit_discard_alignment(b, start);
587 
588 		if (t->discard_granularity != 0 &&
589 		    t->discard_alignment != alignment) {
590 			top = t->discard_granularity + t->discard_alignment;
591 			bottom = b->discard_granularity + alignment;
592 
593 			/* Verify that top and bottom intervals line up */
594 			if ((max(top, bottom) % min(top, bottom)) != 0)
595 				t->discard_misaligned = 1;
596 		}
597 
598 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
599 						      b->max_discard_sectors);
600 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
601 							 b->max_hw_discard_sectors);
602 		t->discard_granularity = max(t->discard_granularity,
603 					     b->discard_granularity);
604 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
605 			t->discard_granularity;
606 	}
607 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
608 						   b->max_secure_erase_sectors);
609 	t->zone_write_granularity = max(t->zone_write_granularity,
610 					b->zone_write_granularity);
611 	if (!(t->features & BLK_FEAT_ZONED)) {
612 		t->zone_write_granularity = 0;
613 		t->max_zone_append_sectors = 0;
614 	}
615 	return ret;
616 }
617 EXPORT_SYMBOL(blk_stack_limits);
618 
619 /**
620  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
621  * @t:	the stacking driver limits (top device)
622  * @bdev:  the underlying block device (bottom)
623  * @offset:  offset to beginning of data within component device
624  * @pfx: prefix to use for warnings logged
625  *
626  * Description:
627  *    This function is used by stacking drivers like MD and DM to ensure
628  *    that all component devices have compatible block sizes and
629  *    alignments.  The stacking driver must provide a queue_limits
630  *    struct (top) and then iteratively call the stacking function for
631  *    all component (bottom) devices.  The stacking function will
632  *    attempt to combine the values and ensure proper alignment.
633  */
634 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
635 		sector_t offset, const char *pfx)
636 {
637 	if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
638 			get_start_sect(bdev) + offset))
639 		pr_notice("%s: Warning: Device %pg is misaligned\n",
640 			pfx, bdev);
641 }
642 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
643 
644 /**
645  * queue_limits_stack_integrity - stack integrity profile
646  * @t: target queue limits
647  * @b: base queue limits
648  *
649  * Check if the integrity profile in the @b can be stacked into the
650  * target @t.  Stacking is possible if either:
651  *
652  *   a) does not have any integrity information stacked into it yet
653  *   b) the integrity profile in @b is identical to the one in @t
654  *
655  * If @b can be stacked into @t, return %true.  Else return %false and clear the
656  * integrity information in @t.
657  */
658 bool queue_limits_stack_integrity(struct queue_limits *t,
659 		struct queue_limits *b)
660 {
661 	struct blk_integrity *ti = &t->integrity;
662 	struct blk_integrity *bi = &b->integrity;
663 
664 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
665 		return true;
666 
667 	if (!ti->tuple_size) {
668 		/* inherit the settings from the first underlying device */
669 		if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
670 			ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
671 				(bi->flags & BLK_INTEGRITY_REF_TAG);
672 			ti->csum_type = bi->csum_type;
673 			ti->tuple_size = bi->tuple_size;
674 			ti->pi_offset = bi->pi_offset;
675 			ti->interval_exp = bi->interval_exp;
676 			ti->tag_size = bi->tag_size;
677 			goto done;
678 		}
679 		if (!bi->tuple_size)
680 			goto done;
681 	}
682 
683 	if (ti->tuple_size != bi->tuple_size)
684 		goto incompatible;
685 	if (ti->interval_exp != bi->interval_exp)
686 		goto incompatible;
687 	if (ti->tag_size != bi->tag_size)
688 		goto incompatible;
689 	if (ti->csum_type != bi->csum_type)
690 		goto incompatible;
691 	if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
692 	    (bi->flags & BLK_INTEGRITY_REF_TAG))
693 		goto incompatible;
694 
695 done:
696 	ti->flags |= BLK_INTEGRITY_STACKED;
697 	return true;
698 
699 incompatible:
700 	memset(ti, 0, sizeof(*ti));
701 	return false;
702 }
703 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
704 
705 /**
706  * blk_queue_update_dma_pad - update pad mask
707  * @q:     the request queue for the device
708  * @mask:  pad mask
709  *
710  * Update dma pad mask.
711  *
712  * Appending pad buffer to a request modifies the last entry of a
713  * scatter list such that it includes the pad buffer.
714  **/
715 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
716 {
717 	if (mask > q->dma_pad_mask)
718 		q->dma_pad_mask = mask;
719 }
720 EXPORT_SYMBOL(blk_queue_update_dma_pad);
721 
722 /**
723  * blk_set_queue_depth - tell the block layer about the device queue depth
724  * @q:		the request queue for the device
725  * @depth:		queue depth
726  *
727  */
728 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
729 {
730 	q->queue_depth = depth;
731 	rq_qos_queue_depth_changed(q);
732 }
733 EXPORT_SYMBOL(blk_set_queue_depth);
734 
735 int bdev_alignment_offset(struct block_device *bdev)
736 {
737 	struct request_queue *q = bdev_get_queue(bdev);
738 
739 	if (q->limits.misaligned)
740 		return -1;
741 	if (bdev_is_partition(bdev))
742 		return queue_limit_alignment_offset(&q->limits,
743 				bdev->bd_start_sect);
744 	return q->limits.alignment_offset;
745 }
746 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
747 
748 unsigned int bdev_discard_alignment(struct block_device *bdev)
749 {
750 	struct request_queue *q = bdev_get_queue(bdev);
751 
752 	if (bdev_is_partition(bdev))
753 		return queue_limit_discard_alignment(&q->limits,
754 				bdev->bd_start_sect);
755 	return q->limits.discard_alignment;
756 }
757 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
758