xref: /linux/block/blk-settings.c (revision 5e3b7009f116f684ac6b93d8924506154f3b1f6d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21 
22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 	q->rq_timeout = timeout;
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27 
28 /**
29  * blk_set_stacking_limits - set default limits for stacking devices
30  * @lim:  the queue_limits structure to reset
31  *
32  * Prepare queue limits for applying limits from underlying devices using
33  * blk_stack_limits().
34  */
35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 	memset(lim, 0, sizeof(*lim));
38 	lim->logical_block_size = SECTOR_SIZE;
39 	lim->physical_block_size = SECTOR_SIZE;
40 	lim->io_min = SECTOR_SIZE;
41 	lim->discard_granularity = SECTOR_SIZE;
42 	lim->dma_alignment = SECTOR_SIZE - 1;
43 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 
45 	/* Inherit limits from component devices */
46 	lim->max_segments = USHRT_MAX;
47 	lim->max_discard_segments = USHRT_MAX;
48 	lim->max_hw_sectors = UINT_MAX;
49 	lim->max_segment_size = UINT_MAX;
50 	lim->max_sectors = UINT_MAX;
51 	lim->max_dev_sectors = UINT_MAX;
52 	lim->max_write_zeroes_sectors = UINT_MAX;
53 	lim->max_zone_append_sectors = UINT_MAX;
54 	lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57 
58 static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 		struct queue_limits *lim)
60 {
61 	/*
62 	 * For read-ahead of large files to be effective, we need to read ahead
63 	 * at least twice the optimal I/O size.
64 	 */
65 	bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67 }
68 
69 static int blk_validate_zoned_limits(struct queue_limits *lim)
70 {
71 	if (!lim->zoned) {
72 		if (WARN_ON_ONCE(lim->max_open_zones) ||
73 		    WARN_ON_ONCE(lim->max_active_zones) ||
74 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
75 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
76 			return -EINVAL;
77 		return 0;
78 	}
79 
80 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 		return -EINVAL;
82 
83 	if (lim->zone_write_granularity < lim->logical_block_size)
84 		lim->zone_write_granularity = lim->logical_block_size;
85 
86 	if (lim->max_zone_append_sectors) {
87 		/*
88 		 * The Zone Append size is limited by the maximum I/O size
89 		 * and the zone size given that it can't span zones.
90 		 */
91 		lim->max_zone_append_sectors =
92 			min3(lim->max_hw_sectors,
93 			     lim->max_zone_append_sectors,
94 			     lim->chunk_sectors);
95 	}
96 
97 	return 0;
98 }
99 
100 static int blk_validate_integrity_limits(struct queue_limits *lim)
101 {
102 	struct blk_integrity *bi = &lim->integrity;
103 
104 	if (!bi->tuple_size) {
105 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
106 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
107 			pr_warn("invalid PI settings.\n");
108 			return -EINVAL;
109 		}
110 		return 0;
111 	}
112 
113 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
114 		pr_warn("integrity support disabled.\n");
115 		return -EINVAL;
116 	}
117 
118 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
119 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
120 		pr_warn("ref tag not support without checksum.\n");
121 		return -EINVAL;
122 	}
123 
124 	if (!bi->interval_exp)
125 		bi->interval_exp = ilog2(lim->logical_block_size);
126 
127 	return 0;
128 }
129 
130 /*
131  * Check that the limits in lim are valid, initialize defaults for unset
132  * values, and cap values based on others where needed.
133  */
134 static int blk_validate_limits(struct queue_limits *lim)
135 {
136 	unsigned int max_hw_sectors;
137 	unsigned int logical_block_sectors;
138 	int err;
139 
140 	/*
141 	 * Unless otherwise specified, default to 512 byte logical blocks and a
142 	 * physical block size equal to the logical block size.
143 	 */
144 	if (!lim->logical_block_size)
145 		lim->logical_block_size = SECTOR_SIZE;
146 	if (lim->physical_block_size < lim->logical_block_size)
147 		lim->physical_block_size = lim->logical_block_size;
148 
149 	/*
150 	 * The minimum I/O size defaults to the physical block size unless
151 	 * explicitly overridden.
152 	 */
153 	if (lim->io_min < lim->physical_block_size)
154 		lim->io_min = lim->physical_block_size;
155 
156 	/*
157 	 * max_hw_sectors has a somewhat weird default for historical reason,
158 	 * but driver really should set their own instead of relying on this
159 	 * value.
160 	 *
161 	 * The block layer relies on the fact that every driver can
162 	 * handle at lest a page worth of data per I/O, and needs the value
163 	 * aligned to the logical block size.
164 	 */
165 	if (!lim->max_hw_sectors)
166 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
167 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
168 		return -EINVAL;
169 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
170 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
171 		return -EINVAL;
172 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
173 			logical_block_sectors);
174 
175 	/*
176 	 * The actual max_sectors value is a complex beast and also takes the
177 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
178 	 * value into account.  The ->max_sectors value is always calculated
179 	 * from these, so directly setting it won't have any effect.
180 	 */
181 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
182 				lim->max_dev_sectors);
183 	if (lim->max_user_sectors) {
184 		if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
185 			return -EINVAL;
186 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
187 	} else if (lim->io_opt) {
188 		lim->max_sectors =
189 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
190 	} else if (lim->io_min &&
191 		   lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
192 		lim->max_sectors =
193 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
194 	} else {
195 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
196 	}
197 	lim->max_sectors = round_down(lim->max_sectors,
198 			logical_block_sectors);
199 
200 	/*
201 	 * Random default for the maximum number of segments.  Driver should not
202 	 * rely on this and set their own.
203 	 */
204 	if (!lim->max_segments)
205 		lim->max_segments = BLK_MAX_SEGMENTS;
206 
207 	lim->max_discard_sectors =
208 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
209 
210 	if (!lim->max_discard_segments)
211 		lim->max_discard_segments = 1;
212 
213 	if (lim->discard_granularity < lim->physical_block_size)
214 		lim->discard_granularity = lim->physical_block_size;
215 
216 	/*
217 	 * By default there is no limit on the segment boundary alignment,
218 	 * but if there is one it can't be smaller than the page size as
219 	 * that would break all the normal I/O patterns.
220 	 */
221 	if (!lim->seg_boundary_mask)
222 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
223 	if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
224 		return -EINVAL;
225 
226 	/*
227 	 * Stacking device may have both virtual boundary and max segment
228 	 * size limit, so allow this setting now, and long-term the two
229 	 * might need to move out of stacking limits since we have immutable
230 	 * bvec and lower layer bio splitting is supposed to handle the two
231 	 * correctly.
232 	 */
233 	if (lim->virt_boundary_mask) {
234 		if (!lim->max_segment_size)
235 			lim->max_segment_size = UINT_MAX;
236 	} else {
237 		/*
238 		 * The maximum segment size has an odd historic 64k default that
239 		 * drivers probably should override.  Just like the I/O size we
240 		 * require drivers to at least handle a full page per segment.
241 		 */
242 		if (!lim->max_segment_size)
243 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
244 		if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
245 			return -EINVAL;
246 	}
247 
248 	/*
249 	 * We require drivers to at least do logical block aligned I/O, but
250 	 * historically could not check for that due to the separate calls
251 	 * to set the limits.  Once the transition is finished the check
252 	 * below should be narrowed down to check the logical block size.
253 	 */
254 	if (!lim->dma_alignment)
255 		lim->dma_alignment = SECTOR_SIZE - 1;
256 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
257 		return -EINVAL;
258 
259 	if (lim->alignment_offset) {
260 		lim->alignment_offset &= (lim->physical_block_size - 1);
261 		lim->misaligned = 0;
262 	}
263 
264 	err = blk_validate_integrity_limits(lim);
265 	if (err)
266 		return err;
267 	return blk_validate_zoned_limits(lim);
268 }
269 
270 /*
271  * Set the default limits for a newly allocated queue.  @lim contains the
272  * initial limits set by the driver, which could be no limit in which case
273  * all fields are cleared to zero.
274  */
275 int blk_set_default_limits(struct queue_limits *lim)
276 {
277 	/*
278 	 * Most defaults are set by capping the bounds in blk_validate_limits,
279 	 * but max_user_discard_sectors is special and needs an explicit
280 	 * initialization to the max value here.
281 	 */
282 	lim->max_user_discard_sectors = UINT_MAX;
283 	return blk_validate_limits(lim);
284 }
285 
286 /**
287  * queue_limits_commit_update - commit an atomic update of queue limits
288  * @q:		queue to update
289  * @lim:	limits to apply
290  *
291  * Apply the limits in @lim that were obtained from queue_limits_start_update()
292  * and updated by the caller to @q.
293  *
294  * Returns 0 if successful, else a negative error code.
295  */
296 int queue_limits_commit_update(struct request_queue *q,
297 		struct queue_limits *lim)
298 	__releases(q->limits_lock)
299 {
300 	int error;
301 
302 	error = blk_validate_limits(lim);
303 	if (error)
304 		goto out_unlock;
305 
306 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
307 	if (q->crypto_profile && lim->integrity.tag_size) {
308 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
309 		error = -EINVAL;
310 		goto out_unlock;
311 	}
312 #endif
313 
314 	q->limits = *lim;
315 	if (q->disk)
316 		blk_apply_bdi_limits(q->disk->bdi, lim);
317 out_unlock:
318 	mutex_unlock(&q->limits_lock);
319 	return error;
320 }
321 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
322 
323 /**
324  * queue_limits_set - apply queue limits to queue
325  * @q:		queue to update
326  * @lim:	limits to apply
327  *
328  * Apply the limits in @lim that were freshly initialized to @q.
329  * To update existing limits use queue_limits_start_update() and
330  * queue_limits_commit_update() instead.
331  *
332  * Returns 0 if successful, else a negative error code.
333  */
334 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
335 {
336 	mutex_lock(&q->limits_lock);
337 	return queue_limits_commit_update(q, lim);
338 }
339 EXPORT_SYMBOL_GPL(queue_limits_set);
340 
341 void disk_update_readahead(struct gendisk *disk)
342 {
343 	blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
344 }
345 EXPORT_SYMBOL_GPL(disk_update_readahead);
346 
347 /**
348  * blk_limits_io_min - set minimum request size for a device
349  * @limits: the queue limits
350  * @min:  smallest I/O size in bytes
351  *
352  * Description:
353  *   Some devices have an internal block size bigger than the reported
354  *   hardware sector size.  This function can be used to signal the
355  *   smallest I/O the device can perform without incurring a performance
356  *   penalty.
357  */
358 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
359 {
360 	limits->io_min = min;
361 
362 	if (limits->io_min < limits->logical_block_size)
363 		limits->io_min = limits->logical_block_size;
364 
365 	if (limits->io_min < limits->physical_block_size)
366 		limits->io_min = limits->physical_block_size;
367 }
368 EXPORT_SYMBOL(blk_limits_io_min);
369 
370 /**
371  * blk_limits_io_opt - set optimal request size for a device
372  * @limits: the queue limits
373  * @opt:  smallest I/O size in bytes
374  *
375  * Description:
376  *   Storage devices may report an optimal I/O size, which is the
377  *   device's preferred unit for sustained I/O.  This is rarely reported
378  *   for disk drives.  For RAID arrays it is usually the stripe width or
379  *   the internal track size.  A properly aligned multiple of
380  *   optimal_io_size is the preferred request size for workloads where
381  *   sustained throughput is desired.
382  */
383 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
384 {
385 	limits->io_opt = opt;
386 }
387 EXPORT_SYMBOL(blk_limits_io_opt);
388 
389 static int queue_limit_alignment_offset(const struct queue_limits *lim,
390 		sector_t sector)
391 {
392 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
393 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
394 		<< SECTOR_SHIFT;
395 
396 	return (granularity + lim->alignment_offset - alignment) % granularity;
397 }
398 
399 static unsigned int queue_limit_discard_alignment(
400 		const struct queue_limits *lim, sector_t sector)
401 {
402 	unsigned int alignment, granularity, offset;
403 
404 	if (!lim->max_discard_sectors)
405 		return 0;
406 
407 	/* Why are these in bytes, not sectors? */
408 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
409 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
410 	if (!granularity)
411 		return 0;
412 
413 	/* Offset of the partition start in 'granularity' sectors */
414 	offset = sector_div(sector, granularity);
415 
416 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
417 	offset = (granularity + alignment - offset) % granularity;
418 
419 	/* Turn it back into bytes, gaah */
420 	return offset << SECTOR_SHIFT;
421 }
422 
423 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
424 {
425 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
426 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
427 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
428 	return sectors;
429 }
430 
431 /**
432  * blk_stack_limits - adjust queue_limits for stacked devices
433  * @t:	the stacking driver limits (top device)
434  * @b:  the underlying queue limits (bottom, component device)
435  * @start:  first data sector within component device
436  *
437  * Description:
438  *    This function is used by stacking drivers like MD and DM to ensure
439  *    that all component devices have compatible block sizes and
440  *    alignments.  The stacking driver must provide a queue_limits
441  *    struct (top) and then iteratively call the stacking function for
442  *    all component (bottom) devices.  The stacking function will
443  *    attempt to combine the values and ensure proper alignment.
444  *
445  *    Returns 0 if the top and bottom queue_limits are compatible.  The
446  *    top device's block sizes and alignment offsets may be adjusted to
447  *    ensure alignment with the bottom device. If no compatible sizes
448  *    and alignments exist, -1 is returned and the resulting top
449  *    queue_limits will have the misaligned flag set to indicate that
450  *    the alignment_offset is undefined.
451  */
452 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
453 		     sector_t start)
454 {
455 	unsigned int top, bottom, alignment, ret = 0;
456 
457 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
458 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
459 			b->max_user_sectors);
460 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
461 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
462 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
463 					b->max_write_zeroes_sectors);
464 	t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
465 					 queue_limits_max_zone_append_sectors(b));
466 	t->bounce = max(t->bounce, b->bounce);
467 
468 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
469 					    b->seg_boundary_mask);
470 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
471 					    b->virt_boundary_mask);
472 
473 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
474 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
475 					       b->max_discard_segments);
476 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
477 						 b->max_integrity_segments);
478 
479 	t->max_segment_size = min_not_zero(t->max_segment_size,
480 					   b->max_segment_size);
481 
482 	t->misaligned |= b->misaligned;
483 
484 	alignment = queue_limit_alignment_offset(b, start);
485 
486 	/* Bottom device has different alignment.  Check that it is
487 	 * compatible with the current top alignment.
488 	 */
489 	if (t->alignment_offset != alignment) {
490 
491 		top = max(t->physical_block_size, t->io_min)
492 			+ t->alignment_offset;
493 		bottom = max(b->physical_block_size, b->io_min) + alignment;
494 
495 		/* Verify that top and bottom intervals line up */
496 		if (max(top, bottom) % min(top, bottom)) {
497 			t->misaligned = 1;
498 			ret = -1;
499 		}
500 	}
501 
502 	t->logical_block_size = max(t->logical_block_size,
503 				    b->logical_block_size);
504 
505 	t->physical_block_size = max(t->physical_block_size,
506 				     b->physical_block_size);
507 
508 	t->io_min = max(t->io_min, b->io_min);
509 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
510 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
511 
512 	/* Set non-power-of-2 compatible chunk_sectors boundary */
513 	if (b->chunk_sectors)
514 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
515 
516 	/* Physical block size a multiple of the logical block size? */
517 	if (t->physical_block_size & (t->logical_block_size - 1)) {
518 		t->physical_block_size = t->logical_block_size;
519 		t->misaligned = 1;
520 		ret = -1;
521 	}
522 
523 	/* Minimum I/O a multiple of the physical block size? */
524 	if (t->io_min & (t->physical_block_size - 1)) {
525 		t->io_min = t->physical_block_size;
526 		t->misaligned = 1;
527 		ret = -1;
528 	}
529 
530 	/* Optimal I/O a multiple of the physical block size? */
531 	if (t->io_opt & (t->physical_block_size - 1)) {
532 		t->io_opt = 0;
533 		t->misaligned = 1;
534 		ret = -1;
535 	}
536 
537 	/* chunk_sectors a multiple of the physical block size? */
538 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
539 		t->chunk_sectors = 0;
540 		t->misaligned = 1;
541 		ret = -1;
542 	}
543 
544 	t->raid_partial_stripes_expensive =
545 		max(t->raid_partial_stripes_expensive,
546 		    b->raid_partial_stripes_expensive);
547 
548 	/* Find lowest common alignment_offset */
549 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
550 		% max(t->physical_block_size, t->io_min);
551 
552 	/* Verify that new alignment_offset is on a logical block boundary */
553 	if (t->alignment_offset & (t->logical_block_size - 1)) {
554 		t->misaligned = 1;
555 		ret = -1;
556 	}
557 
558 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
559 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
560 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
561 
562 	/* Discard alignment and granularity */
563 	if (b->discard_granularity) {
564 		alignment = queue_limit_discard_alignment(b, start);
565 
566 		if (t->discard_granularity != 0 &&
567 		    t->discard_alignment != alignment) {
568 			top = t->discard_granularity + t->discard_alignment;
569 			bottom = b->discard_granularity + alignment;
570 
571 			/* Verify that top and bottom intervals line up */
572 			if ((max(top, bottom) % min(top, bottom)) != 0)
573 				t->discard_misaligned = 1;
574 		}
575 
576 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
577 						      b->max_discard_sectors);
578 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
579 							 b->max_hw_discard_sectors);
580 		t->discard_granularity = max(t->discard_granularity,
581 					     b->discard_granularity);
582 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
583 			t->discard_granularity;
584 	}
585 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
586 						   b->max_secure_erase_sectors);
587 	t->zone_write_granularity = max(t->zone_write_granularity,
588 					b->zone_write_granularity);
589 	t->zoned = max(t->zoned, b->zoned);
590 	if (!t->zoned) {
591 		t->zone_write_granularity = 0;
592 		t->max_zone_append_sectors = 0;
593 	}
594 	return ret;
595 }
596 EXPORT_SYMBOL(blk_stack_limits);
597 
598 /**
599  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
600  * @t:	the stacking driver limits (top device)
601  * @bdev:  the underlying block device (bottom)
602  * @offset:  offset to beginning of data within component device
603  * @pfx: prefix to use for warnings logged
604  *
605  * Description:
606  *    This function is used by stacking drivers like MD and DM to ensure
607  *    that all component devices have compatible block sizes and
608  *    alignments.  The stacking driver must provide a queue_limits
609  *    struct (top) and then iteratively call the stacking function for
610  *    all component (bottom) devices.  The stacking function will
611  *    attempt to combine the values and ensure proper alignment.
612  */
613 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
614 		sector_t offset, const char *pfx)
615 {
616 	if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
617 			get_start_sect(bdev) + offset))
618 		pr_notice("%s: Warning: Device %pg is misaligned\n",
619 			pfx, bdev);
620 }
621 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
622 
623 /**
624  * queue_limits_stack_integrity - stack integrity profile
625  * @t: target queue limits
626  * @b: base queue limits
627  *
628  * Check if the integrity profile in the @b can be stacked into the
629  * target @t.  Stacking is possible if either:
630  *
631  *   a) does not have any integrity information stacked into it yet
632  *   b) the integrity profile in @b is identical to the one in @t
633  *
634  * If @b can be stacked into @t, return %true.  Else return %false and clear the
635  * integrity information in @t.
636  */
637 bool queue_limits_stack_integrity(struct queue_limits *t,
638 		struct queue_limits *b)
639 {
640 	struct blk_integrity *ti = &t->integrity;
641 	struct blk_integrity *bi = &b->integrity;
642 
643 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
644 		return true;
645 
646 	if (!ti->tuple_size) {
647 		/* inherit the settings from the first underlying device */
648 		if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
649 			ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
650 				(bi->flags & BLK_INTEGRITY_REF_TAG);
651 			ti->csum_type = bi->csum_type;
652 			ti->tuple_size = bi->tuple_size;
653 			ti->pi_offset = bi->pi_offset;
654 			ti->interval_exp = bi->interval_exp;
655 			ti->tag_size = bi->tag_size;
656 			goto done;
657 		}
658 		if (!bi->tuple_size)
659 			goto done;
660 	}
661 
662 	if (ti->tuple_size != bi->tuple_size)
663 		goto incompatible;
664 	if (ti->interval_exp != bi->interval_exp)
665 		goto incompatible;
666 	if (ti->tag_size != bi->tag_size)
667 		goto incompatible;
668 	if (ti->csum_type != bi->csum_type)
669 		goto incompatible;
670 	if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
671 	    (bi->flags & BLK_INTEGRITY_REF_TAG))
672 		goto incompatible;
673 
674 done:
675 	ti->flags |= BLK_INTEGRITY_STACKED;
676 	return true;
677 
678 incompatible:
679 	memset(ti, 0, sizeof(*ti));
680 	return false;
681 }
682 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
683 
684 /**
685  * blk_queue_update_dma_pad - update pad mask
686  * @q:     the request queue for the device
687  * @mask:  pad mask
688  *
689  * Update dma pad mask.
690  *
691  * Appending pad buffer to a request modifies the last entry of a
692  * scatter list such that it includes the pad buffer.
693  **/
694 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
695 {
696 	if (mask > q->dma_pad_mask)
697 		q->dma_pad_mask = mask;
698 }
699 EXPORT_SYMBOL(blk_queue_update_dma_pad);
700 
701 /**
702  * blk_set_queue_depth - tell the block layer about the device queue depth
703  * @q:		the request queue for the device
704  * @depth:		queue depth
705  *
706  */
707 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
708 {
709 	q->queue_depth = depth;
710 	rq_qos_queue_depth_changed(q);
711 }
712 EXPORT_SYMBOL(blk_set_queue_depth);
713 
714 /**
715  * blk_queue_write_cache - configure queue's write cache
716  * @q:		the request queue for the device
717  * @wc:		write back cache on or off
718  * @fua:	device supports FUA writes, if true
719  *
720  * Tell the block layer about the write cache of @q.
721  */
722 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
723 {
724 	if (wc) {
725 		blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
726 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
727 	} else {
728 		blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
729 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
730 	}
731 	if (fua)
732 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
733 	else
734 		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
735 }
736 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
737 
738 int bdev_alignment_offset(struct block_device *bdev)
739 {
740 	struct request_queue *q = bdev_get_queue(bdev);
741 
742 	if (q->limits.misaligned)
743 		return -1;
744 	if (bdev_is_partition(bdev))
745 		return queue_limit_alignment_offset(&q->limits,
746 				bdev->bd_start_sect);
747 	return q->limits.alignment_offset;
748 }
749 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
750 
751 unsigned int bdev_discard_alignment(struct block_device *bdev)
752 {
753 	struct request_queue *q = bdev_get_queue(bdev);
754 
755 	if (bdev_is_partition(bdev))
756 		return queue_limit_discard_alignment(&q->limits,
757 				bdev->bd_start_sect);
758 	return q->limits.discard_alignment;
759 }
760 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
761