xref: /linux/block/blk-settings.c (revision b615879dbfea6cf1236acbc3f2fb25ae84e07071)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/t10-pi.h>
18 #include <linux/crc64.h>
19 
20 #include "blk.h"
21 #include "blk-rq-qos.h"
22 #include "blk-wbt.h"
23 
24 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25 {
26 	WRITE_ONCE(q->rq_timeout, timeout);
27 }
28 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29 
30 /**
31  * blk_set_stacking_limits - set default limits for stacking devices
32  * @lim:  the queue_limits structure to reset
33  *
34  * Prepare queue limits for applying limits from underlying devices using
35  * blk_stack_limits().
36  */
37 void blk_set_stacking_limits(struct queue_limits *lim)
38 {
39 	memset(lim, 0, sizeof(*lim));
40 	lim->logical_block_size = SECTOR_SIZE;
41 	lim->physical_block_size = SECTOR_SIZE;
42 	lim->io_min = SECTOR_SIZE;
43 	lim->discard_granularity = SECTOR_SIZE;
44 	lim->dma_alignment = SECTOR_SIZE - 1;
45 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46 
47 	/* Inherit limits from component devices */
48 	lim->max_segments = USHRT_MAX;
49 	lim->max_discard_segments = USHRT_MAX;
50 	lim->max_hw_sectors = UINT_MAX;
51 	lim->max_segment_size = UINT_MAX;
52 	lim->max_sectors = UINT_MAX;
53 	lim->max_dev_sectors = UINT_MAX;
54 	lim->max_write_zeroes_sectors = UINT_MAX;
55 	lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57 	lim->max_hw_zone_append_sectors = UINT_MAX;
58 	lim->max_user_discard_sectors = UINT_MAX;
59 	lim->atomic_write_hw_max = UINT_MAX;
60 }
61 EXPORT_SYMBOL(blk_set_stacking_limits);
62 
63 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
64 		struct queue_limits *lim)
65 {
66 	u64 io_opt = lim->io_opt;
67 
68 	/*
69 	 * For read-ahead of large files to be effective, we need to read ahead
70 	 * at least twice the optimal I/O size. For rotational devices that do
71 	 * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
72 	 * size to avoid falling back to the (rather inefficient) small default
73 	 * read-ahead size.
74 	 *
75 	 * There is no hardware limitation for the read-ahead size and the user
76 	 * might have increased the read-ahead size through sysfs, so don't ever
77 	 * decrease it.
78 	 */
79 	if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
80 		io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
81 
82 	bdi->ra_pages = max3(bdi->ra_pages,
83 				io_opt * 2 >> PAGE_SHIFT,
84 				VM_READAHEAD_PAGES);
85 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
86 }
87 
88 static int blk_validate_zoned_limits(struct queue_limits *lim)
89 {
90 	if (!(lim->features & BLK_FEAT_ZONED)) {
91 		if (WARN_ON_ONCE(lim->max_open_zones) ||
92 		    WARN_ON_ONCE(lim->max_active_zones) ||
93 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
94 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
95 			return -EINVAL;
96 		return 0;
97 	}
98 
99 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
100 		return -EINVAL;
101 
102 	/*
103 	 * Given that active zones include open zones, the maximum number of
104 	 * open zones cannot be larger than the maximum number of active zones.
105 	 */
106 	if (lim->max_active_zones &&
107 	    lim->max_open_zones > lim->max_active_zones)
108 		return -EINVAL;
109 
110 	if (lim->zone_write_granularity < lim->logical_block_size)
111 		lim->zone_write_granularity = lim->logical_block_size;
112 
113 	/*
114 	 * The Zone Append size is limited by the maximum I/O size and the zone
115 	 * size given that it can't span zones.
116 	 *
117 	 * If no max_hw_zone_append_sectors limit is provided, the block layer
118 	 * will emulated it, else we're also bound by the hardware limit.
119 	 */
120 	lim->max_zone_append_sectors =
121 		min_not_zero(lim->max_hw_zone_append_sectors,
122 			min(lim->chunk_sectors, lim->max_hw_sectors));
123 	return 0;
124 }
125 
126 static int blk_validate_integrity_limits(struct queue_limits *lim)
127 {
128 	struct blk_integrity *bi = &lim->integrity;
129 
130 	if (!bi->metadata_size) {
131 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
132 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
133 			pr_warn("invalid PI settings.\n");
134 			return -EINVAL;
135 		}
136 		bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
137 		return 0;
138 	}
139 
140 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
141 		pr_warn("integrity support disabled.\n");
142 		return -EINVAL;
143 	}
144 
145 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
146 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
147 		pr_warn("ref tag not support without checksum.\n");
148 		return -EINVAL;
149 	}
150 
151 	if (bi->pi_tuple_size > bi->metadata_size) {
152 		pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
153 			 bi->pi_tuple_size,
154 			 bi->metadata_size);
155 		return -EINVAL;
156 	}
157 
158 	switch (bi->csum_type) {
159 	case BLK_INTEGRITY_CSUM_NONE:
160 		if (bi->pi_tuple_size) {
161 			pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
162 			return -EINVAL;
163 		}
164 		break;
165 	case BLK_INTEGRITY_CSUM_CRC:
166 	case BLK_INTEGRITY_CSUM_IP:
167 		if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
168 			pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
169 				 sizeof(struct t10_pi_tuple),
170 				 bi->pi_tuple_size);
171 			return -EINVAL;
172 		}
173 		break;
174 	case BLK_INTEGRITY_CSUM_CRC64:
175 		if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
176 			pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
177 				 sizeof(struct crc64_pi_tuple),
178 				 bi->pi_tuple_size);
179 			return -EINVAL;
180 		}
181 		break;
182 	}
183 
184 	if (!bi->interval_exp)
185 		bi->interval_exp = ilog2(lim->logical_block_size);
186 
187 	return 0;
188 }
189 
190 /*
191  * Returns max guaranteed bytes which we can fit in a bio.
192  *
193  * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
194  * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
195  * the first and last segments.
196  */
197 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
198 {
199 	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
200 	unsigned int length;
201 
202 	length = min(max_segments, 2) * lim->logical_block_size;
203 	if (max_segments > 2)
204 		length += (max_segments - 2) * PAGE_SIZE;
205 
206 	return length;
207 }
208 
209 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
210 {
211 	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
212 					blk_queue_max_guaranteed_bio(lim));
213 
214 	unit_limit = rounddown_pow_of_two(unit_limit);
215 
216 	lim->atomic_write_max_sectors =
217 		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
218 			lim->max_hw_sectors);
219 	lim->atomic_write_unit_min =
220 		min(lim->atomic_write_hw_unit_min, unit_limit);
221 	lim->atomic_write_unit_max =
222 		min(lim->atomic_write_hw_unit_max, unit_limit);
223 	lim->atomic_write_boundary_sectors =
224 		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
225 }
226 
227 /*
228  * Test whether any boundary is aligned with any chunk size. Stacked
229  * devices store any stripe size in t->chunk_sectors.
230  */
231 static bool blk_valid_atomic_writes_boundary(unsigned int chunk_sectors,
232 					unsigned int boundary_sectors)
233 {
234 	if (!chunk_sectors || !boundary_sectors)
235 		return true;
236 
237 	if (boundary_sectors > chunk_sectors &&
238 	    boundary_sectors % chunk_sectors)
239 		return false;
240 
241 	if (chunk_sectors > boundary_sectors &&
242 	    chunk_sectors % boundary_sectors)
243 		return false;
244 
245 	return true;
246 }
247 
248 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
249 {
250 	unsigned int boundary_sectors;
251 	unsigned int atomic_write_hw_max_sectors =
252 			lim->atomic_write_hw_max >> SECTOR_SHIFT;
253 
254 	if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
255 		goto unsupported;
256 
257 	/* UINT_MAX indicates stacked limits in initial state */
258 	if (lim->atomic_write_hw_max == UINT_MAX)
259 		goto unsupported;
260 
261 	if (!lim->atomic_write_hw_max)
262 		goto unsupported;
263 
264 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
265 		goto unsupported;
266 
267 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
268 		goto unsupported;
269 
270 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
271 			 lim->atomic_write_hw_unit_max))
272 		goto unsupported;
273 
274 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
275 			 lim->atomic_write_hw_max))
276 		goto unsupported;
277 
278 	if (WARN_ON_ONCE(lim->chunk_sectors &&
279 			atomic_write_hw_max_sectors > lim->chunk_sectors))
280 		goto unsupported;
281 
282 	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
283 
284 	if (boundary_sectors) {
285 		if (WARN_ON_ONCE(lim->atomic_write_hw_max >
286 				 lim->atomic_write_hw_boundary))
287 			goto unsupported;
288 
289 		if (WARN_ON_ONCE(!blk_valid_atomic_writes_boundary(
290 			lim->chunk_sectors, boundary_sectors)))
291 			goto unsupported;
292 
293 		/*
294 		 * The boundary size just needs to be a multiple of unit_max
295 		 * (and not necessarily a power-of-2), so this following check
296 		 * could be relaxed in future.
297 		 * Furthermore, if needed, unit_max could even be reduced so
298 		 * that it is compliant with a !power-of-2 boundary.
299 		 */
300 		if (!is_power_of_2(boundary_sectors))
301 			goto unsupported;
302 	}
303 
304 	blk_atomic_writes_update_limits(lim);
305 	return;
306 
307 unsupported:
308 	lim->atomic_write_max_sectors = 0;
309 	lim->atomic_write_boundary_sectors = 0;
310 	lim->atomic_write_unit_min = 0;
311 	lim->atomic_write_unit_max = 0;
312 }
313 
314 /*
315  * Check that the limits in lim are valid, initialize defaults for unset
316  * values, and cap values based on others where needed.
317  */
318 int blk_validate_limits(struct queue_limits *lim)
319 {
320 	unsigned int max_hw_sectors;
321 	unsigned int logical_block_sectors;
322 	unsigned long seg_size;
323 	int err;
324 
325 	/*
326 	 * Unless otherwise specified, default to 512 byte logical blocks and a
327 	 * physical block size equal to the logical block size.
328 	 */
329 	if (!lim->logical_block_size)
330 		lim->logical_block_size = SECTOR_SIZE;
331 	else if (blk_validate_block_size(lim->logical_block_size)) {
332 		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
333 		return -EINVAL;
334 	}
335 	if (lim->physical_block_size < lim->logical_block_size) {
336 		lim->physical_block_size = lim->logical_block_size;
337 	} else if (!is_power_of_2(lim->physical_block_size)) {
338 		pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
339 		return -EINVAL;
340 	}
341 
342 	/*
343 	 * The minimum I/O size defaults to the physical block size unless
344 	 * explicitly overridden.
345 	 */
346 	if (lim->io_min < lim->physical_block_size)
347 		lim->io_min = lim->physical_block_size;
348 
349 	/*
350 	 * The optimal I/O size may not be aligned to physical block size
351 	 * (because it may be limited by dma engines which have no clue about
352 	 * block size of the disks attached to them), so we round it down here.
353 	 */
354 	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
355 
356 	/*
357 	 * max_hw_sectors has a somewhat weird default for historical reason,
358 	 * but driver really should set their own instead of relying on this
359 	 * value.
360 	 *
361 	 * The block layer relies on the fact that every driver can
362 	 * handle at lest a page worth of data per I/O, and needs the value
363 	 * aligned to the logical block size.
364 	 */
365 	if (!lim->max_hw_sectors)
366 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
367 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
368 		return -EINVAL;
369 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
370 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
371 		return -EINVAL;
372 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
373 			logical_block_sectors);
374 
375 	/*
376 	 * The actual max_sectors value is a complex beast and also takes the
377 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
378 	 * value into account.  The ->max_sectors value is always calculated
379 	 * from these, so directly setting it won't have any effect.
380 	 */
381 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
382 				lim->max_dev_sectors);
383 	if (lim->max_user_sectors) {
384 		if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
385 			return -EINVAL;
386 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
387 	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
388 		lim->max_sectors =
389 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
390 	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
391 		lim->max_sectors =
392 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
393 	} else {
394 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
395 	}
396 	lim->max_sectors = round_down(lim->max_sectors,
397 			logical_block_sectors);
398 
399 	/*
400 	 * Random default for the maximum number of segments.  Driver should not
401 	 * rely on this and set their own.
402 	 */
403 	if (!lim->max_segments)
404 		lim->max_segments = BLK_MAX_SEGMENTS;
405 
406 	if (lim->max_hw_wzeroes_unmap_sectors &&
407 	    lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
408 		return -EINVAL;
409 	lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
410 			lim->max_user_wzeroes_unmap_sectors);
411 
412 	lim->max_discard_sectors =
413 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
414 
415 	/*
416 	 * When discard is not supported, discard_granularity should be reported
417 	 * as 0 to userspace.
418 	 */
419 	if (lim->max_discard_sectors)
420 		lim->discard_granularity =
421 			max(lim->discard_granularity, lim->physical_block_size);
422 	else
423 		lim->discard_granularity = 0;
424 
425 	if (!lim->max_discard_segments)
426 		lim->max_discard_segments = 1;
427 
428 	/*
429 	 * By default there is no limit on the segment boundary alignment,
430 	 * but if there is one it can't be smaller than the page size as
431 	 * that would break all the normal I/O patterns.
432 	 */
433 	if (!lim->seg_boundary_mask)
434 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
435 	if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
436 		return -EINVAL;
437 
438 	/*
439 	 * Stacking device may have both virtual boundary and max segment
440 	 * size limit, so allow this setting now, and long-term the two
441 	 * might need to move out of stacking limits since we have immutable
442 	 * bvec and lower layer bio splitting is supposed to handle the two
443 	 * correctly.
444 	 */
445 	if (lim->virt_boundary_mask) {
446 		if (!lim->max_segment_size)
447 			lim->max_segment_size = UINT_MAX;
448 	} else {
449 		/*
450 		 * The maximum segment size has an odd historic 64k default that
451 		 * drivers probably should override.  Just like the I/O size we
452 		 * require drivers to at least handle a full page per segment.
453 		 */
454 		if (!lim->max_segment_size)
455 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
456 		if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
457 			return -EINVAL;
458 	}
459 
460 	/* setup min segment size for building new segment in fast path */
461 	if (lim->seg_boundary_mask > lim->max_segment_size - 1)
462 		seg_size = lim->max_segment_size;
463 	else
464 		seg_size = lim->seg_boundary_mask + 1;
465 	lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
466 
467 	/*
468 	 * We require drivers to at least do logical block aligned I/O, but
469 	 * historically could not check for that due to the separate calls
470 	 * to set the limits.  Once the transition is finished the check
471 	 * below should be narrowed down to check the logical block size.
472 	 */
473 	if (!lim->dma_alignment)
474 		lim->dma_alignment = SECTOR_SIZE - 1;
475 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
476 		return -EINVAL;
477 
478 	if (lim->alignment_offset) {
479 		lim->alignment_offset &= (lim->physical_block_size - 1);
480 		lim->flags &= ~BLK_FLAG_MISALIGNED;
481 	}
482 
483 	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
484 		lim->features &= ~BLK_FEAT_FUA;
485 
486 	blk_validate_atomic_write_limits(lim);
487 
488 	err = blk_validate_integrity_limits(lim);
489 	if (err)
490 		return err;
491 	return blk_validate_zoned_limits(lim);
492 }
493 EXPORT_SYMBOL_GPL(blk_validate_limits);
494 
495 /*
496  * Set the default limits for a newly allocated queue.  @lim contains the
497  * initial limits set by the driver, which could be no limit in which case
498  * all fields are cleared to zero.
499  */
500 int blk_set_default_limits(struct queue_limits *lim)
501 {
502 	/*
503 	 * Most defaults are set by capping the bounds in blk_validate_limits,
504 	 * but these limits are special and need an explicit initialization to
505 	 * the max value here.
506 	 */
507 	lim->max_user_discard_sectors = UINT_MAX;
508 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
509 	return blk_validate_limits(lim);
510 }
511 
512 /**
513  * queue_limits_commit_update - commit an atomic update of queue limits
514  * @q:		queue to update
515  * @lim:	limits to apply
516  *
517  * Apply the limits in @lim that were obtained from queue_limits_start_update()
518  * and updated by the caller to @q.  The caller must have frozen the queue or
519  * ensure that there are no outstanding I/Os by other means.
520  *
521  * Returns 0 if successful, else a negative error code.
522  */
523 int queue_limits_commit_update(struct request_queue *q,
524 		struct queue_limits *lim)
525 {
526 	int error;
527 
528 	error = blk_validate_limits(lim);
529 	if (error)
530 		goto out_unlock;
531 
532 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
533 	if (q->crypto_profile && lim->integrity.tag_size) {
534 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
535 		error = -EINVAL;
536 		goto out_unlock;
537 	}
538 #endif
539 
540 	q->limits = *lim;
541 	if (q->disk)
542 		blk_apply_bdi_limits(q->disk->bdi, lim);
543 out_unlock:
544 	mutex_unlock(&q->limits_lock);
545 	return error;
546 }
547 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
548 
549 /**
550  * queue_limits_commit_update_frozen - commit an atomic update of queue limits
551  * @q:		queue to update
552  * @lim:	limits to apply
553  *
554  * Apply the limits in @lim that were obtained from queue_limits_start_update()
555  * and updated with the new values by the caller to @q.  Freezes the queue
556  * before the update and unfreezes it after.
557  *
558  * Returns 0 if successful, else a negative error code.
559  */
560 int queue_limits_commit_update_frozen(struct request_queue *q,
561 		struct queue_limits *lim)
562 {
563 	unsigned int memflags;
564 	int ret;
565 
566 	memflags = blk_mq_freeze_queue(q);
567 	ret = queue_limits_commit_update(q, lim);
568 	blk_mq_unfreeze_queue(q, memflags);
569 
570 	return ret;
571 }
572 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
573 
574 /**
575  * queue_limits_set - apply queue limits to queue
576  * @q:		queue to update
577  * @lim:	limits to apply
578  *
579  * Apply the limits in @lim that were freshly initialized to @q.
580  * To update existing limits use queue_limits_start_update() and
581  * queue_limits_commit_update() instead.
582  *
583  * Returns 0 if successful, else a negative error code.
584  */
585 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
586 {
587 	mutex_lock(&q->limits_lock);
588 	return queue_limits_commit_update(q, lim);
589 }
590 EXPORT_SYMBOL_GPL(queue_limits_set);
591 
592 static int queue_limit_alignment_offset(const struct queue_limits *lim,
593 		sector_t sector)
594 {
595 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
596 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
597 		<< SECTOR_SHIFT;
598 
599 	return (granularity + lim->alignment_offset - alignment) % granularity;
600 }
601 
602 static unsigned int queue_limit_discard_alignment(
603 		const struct queue_limits *lim, sector_t sector)
604 {
605 	unsigned int alignment, granularity, offset;
606 
607 	if (!lim->max_discard_sectors)
608 		return 0;
609 
610 	/* Why are these in bytes, not sectors? */
611 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
612 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
613 
614 	/* Offset of the partition start in 'granularity' sectors */
615 	offset = sector_div(sector, granularity);
616 
617 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
618 	offset = (granularity + alignment - offset) % granularity;
619 
620 	/* Turn it back into bytes, gaah */
621 	return offset << SECTOR_SHIFT;
622 }
623 
624 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
625 {
626 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
627 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
628 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
629 	return sectors;
630 }
631 
632 /* Check if second and later bottom devices are compliant */
633 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
634 				struct queue_limits *b)
635 {
636 	/* We're not going to support different boundary sizes.. yet */
637 	if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
638 		return false;
639 
640 	/* Can't support this */
641 	if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
642 		return false;
643 
644 	/* Or this */
645 	if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
646 		return false;
647 
648 	t->atomic_write_hw_max = min(t->atomic_write_hw_max,
649 				b->atomic_write_hw_max);
650 	t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
651 				b->atomic_write_hw_unit_min);
652 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
653 				b->atomic_write_hw_unit_max);
654 	return true;
655 }
656 
657 static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
658 {
659 	unsigned int chunk_bytes;
660 
661 	if (!t->chunk_sectors)
662 		return;
663 
664 	/*
665 	 * If chunk sectors is so large that its value in bytes overflows
666 	 * UINT_MAX, then just shift it down so it definitely will fit.
667 	 * We don't support atomic writes of such a large size anyway.
668 	 */
669 	if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
670 		chunk_bytes = t->chunk_sectors;
671 
672 	/*
673 	 * Find values for limits which work for chunk size.
674 	 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
675 	 * size, as the chunk size is not restricted to a power-of-2.
676 	 * So we need to find highest power-of-2 which works for the chunk
677 	 * size.
678 	 * As an example scenario, we could have t->unit_max = 16K and
679 	 * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
680 	 * value aligned with both limits, i.e. 8K in this example.
681 	 */
682 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
683 					max_pow_of_two_factor(chunk_bytes));
684 
685 	t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
686 					  t->atomic_write_hw_unit_max);
687 	t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
688 }
689 
690 /* Check stacking of first bottom device */
691 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
692 				struct queue_limits *b)
693 {
694 	if (!blk_valid_atomic_writes_boundary(t->chunk_sectors,
695 			b->atomic_write_hw_boundary >> SECTOR_SHIFT))
696 		return false;
697 
698 	t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
699 	t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
700 	t->atomic_write_hw_max = b->atomic_write_hw_max;
701 	t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
702 	return true;
703 }
704 
705 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
706 				struct queue_limits *b, sector_t start)
707 {
708 	if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
709 		goto unsupported;
710 
711 	if (!b->atomic_write_hw_unit_min)
712 		goto unsupported;
713 
714 	if (!blk_atomic_write_start_sect_aligned(start, b))
715 		goto unsupported;
716 
717 	/* UINT_MAX indicates no stacking of bottom devices yet */
718 	if (t->atomic_write_hw_max == UINT_MAX) {
719 		if (!blk_stack_atomic_writes_head(t, b))
720 			goto unsupported;
721 	} else {
722 		if (!blk_stack_atomic_writes_tail(t, b))
723 			goto unsupported;
724 	}
725 	blk_stack_atomic_writes_chunk_sectors(t);
726 	return;
727 
728 unsupported:
729 	t->atomic_write_hw_max = 0;
730 	t->atomic_write_hw_unit_max = 0;
731 	t->atomic_write_hw_unit_min = 0;
732 	t->atomic_write_hw_boundary = 0;
733 }
734 
735 /**
736  * blk_stack_limits - adjust queue_limits for stacked devices
737  * @t:	the stacking driver limits (top device)
738  * @b:  the underlying queue limits (bottom, component device)
739  * @start:  first data sector within component device
740  *
741  * Description:
742  *    This function is used by stacking drivers like MD and DM to ensure
743  *    that all component devices have compatible block sizes and
744  *    alignments.  The stacking driver must provide a queue_limits
745  *    struct (top) and then iteratively call the stacking function for
746  *    all component (bottom) devices.  The stacking function will
747  *    attempt to combine the values and ensure proper alignment.
748  *
749  *    Returns 0 if the top and bottom queue_limits are compatible.  The
750  *    top device's block sizes and alignment offsets may be adjusted to
751  *    ensure alignment with the bottom device. If no compatible sizes
752  *    and alignments exist, -1 is returned and the resulting top
753  *    queue_limits will have the misaligned flag set to indicate that
754  *    the alignment_offset is undefined.
755  */
756 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
757 		     sector_t start)
758 {
759 	unsigned int top, bottom, alignment;
760 	int ret = 0;
761 
762 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
763 
764 	/*
765 	 * Some feaures need to be supported both by the stacking driver and all
766 	 * underlying devices.  The stacking driver sets these flags before
767 	 * stacking the limits, and this will clear the flags if any of the
768 	 * underlying devices does not support it.
769 	 */
770 	if (!(b->features & BLK_FEAT_NOWAIT))
771 		t->features &= ~BLK_FEAT_NOWAIT;
772 	if (!(b->features & BLK_FEAT_POLL))
773 		t->features &= ~BLK_FEAT_POLL;
774 
775 	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
776 
777 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
778 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
779 			b->max_user_sectors);
780 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
781 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
782 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
783 					b->max_write_zeroes_sectors);
784 	t->max_user_wzeroes_unmap_sectors =
785 			min(t->max_user_wzeroes_unmap_sectors,
786 			    b->max_user_wzeroes_unmap_sectors);
787 	t->max_hw_wzeroes_unmap_sectors =
788 			min(t->max_hw_wzeroes_unmap_sectors,
789 			    b->max_hw_wzeroes_unmap_sectors);
790 
791 	t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
792 					b->max_hw_zone_append_sectors);
793 
794 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
795 					    b->seg_boundary_mask);
796 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
797 					    b->virt_boundary_mask);
798 
799 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
800 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
801 					       b->max_discard_segments);
802 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
803 						 b->max_integrity_segments);
804 
805 	t->max_segment_size = min_not_zero(t->max_segment_size,
806 					   b->max_segment_size);
807 
808 	alignment = queue_limit_alignment_offset(b, start);
809 
810 	/* Bottom device has different alignment.  Check that it is
811 	 * compatible with the current top alignment.
812 	 */
813 	if (t->alignment_offset != alignment) {
814 
815 		top = max(t->physical_block_size, t->io_min)
816 			+ t->alignment_offset;
817 		bottom = max(b->physical_block_size, b->io_min) + alignment;
818 
819 		/* Verify that top and bottom intervals line up */
820 		if (max(top, bottom) % min(top, bottom)) {
821 			t->flags |= BLK_FLAG_MISALIGNED;
822 			ret = -1;
823 		}
824 	}
825 
826 	t->logical_block_size = max(t->logical_block_size,
827 				    b->logical_block_size);
828 
829 	t->physical_block_size = max(t->physical_block_size,
830 				     b->physical_block_size);
831 
832 	t->io_min = max(t->io_min, b->io_min);
833 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
834 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
835 
836 	/* Set non-power-of-2 compatible chunk_sectors boundary */
837 	if (b->chunk_sectors)
838 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
839 
840 	/* Physical block size a multiple of the logical block size? */
841 	if (t->physical_block_size & (t->logical_block_size - 1)) {
842 		t->physical_block_size = t->logical_block_size;
843 		t->flags |= BLK_FLAG_MISALIGNED;
844 		ret = -1;
845 	}
846 
847 	/* Minimum I/O a multiple of the physical block size? */
848 	if (t->io_min & (t->physical_block_size - 1)) {
849 		t->io_min = t->physical_block_size;
850 		t->flags |= BLK_FLAG_MISALIGNED;
851 		ret = -1;
852 	}
853 
854 	/* Optimal I/O a multiple of the physical block size? */
855 	if (t->io_opt & (t->physical_block_size - 1)) {
856 		t->io_opt = 0;
857 		t->flags |= BLK_FLAG_MISALIGNED;
858 		ret = -1;
859 	}
860 
861 	/* chunk_sectors a multiple of the physical block size? */
862 	if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
863 		t->chunk_sectors = 0;
864 		t->flags |= BLK_FLAG_MISALIGNED;
865 		ret = -1;
866 	}
867 
868 	/* Find lowest common alignment_offset */
869 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
870 		% max(t->physical_block_size, t->io_min);
871 
872 	/* Verify that new alignment_offset is on a logical block boundary */
873 	if (t->alignment_offset & (t->logical_block_size - 1)) {
874 		t->flags |= BLK_FLAG_MISALIGNED;
875 		ret = -1;
876 	}
877 
878 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
879 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
880 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
881 
882 	/* Discard alignment and granularity */
883 	if (b->discard_granularity) {
884 		alignment = queue_limit_discard_alignment(b, start);
885 
886 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
887 						      b->max_discard_sectors);
888 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
889 							 b->max_hw_discard_sectors);
890 		t->discard_granularity = max(t->discard_granularity,
891 					     b->discard_granularity);
892 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
893 			t->discard_granularity;
894 	}
895 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
896 						   b->max_secure_erase_sectors);
897 	t->zone_write_granularity = max(t->zone_write_granularity,
898 					b->zone_write_granularity);
899 	if (!(t->features & BLK_FEAT_ZONED)) {
900 		t->zone_write_granularity = 0;
901 		t->max_zone_append_sectors = 0;
902 	}
903 	blk_stack_atomic_writes_limits(t, b, start);
904 
905 	return ret;
906 }
907 EXPORT_SYMBOL(blk_stack_limits);
908 
909 /**
910  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
911  * @t:	the stacking driver limits (top device)
912  * @bdev:  the underlying block device (bottom)
913  * @offset:  offset to beginning of data within component device
914  * @pfx: prefix to use for warnings logged
915  *
916  * Description:
917  *    This function is used by stacking drivers like MD and DM to ensure
918  *    that all component devices have compatible block sizes and
919  *    alignments.  The stacking driver must provide a queue_limits
920  *    struct (top) and then iteratively call the stacking function for
921  *    all component (bottom) devices.  The stacking function will
922  *    attempt to combine the values and ensure proper alignment.
923  */
924 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
925 		sector_t offset, const char *pfx)
926 {
927 	if (blk_stack_limits(t, bdev_limits(bdev),
928 			get_start_sect(bdev) + offset))
929 		pr_notice("%s: Warning: Device %pg is misaligned\n",
930 			pfx, bdev);
931 }
932 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
933 
934 /**
935  * queue_limits_stack_integrity - stack integrity profile
936  * @t: target queue limits
937  * @b: base queue limits
938  *
939  * Check if the integrity profile in the @b can be stacked into the
940  * target @t.  Stacking is possible if either:
941  *
942  *   a) does not have any integrity information stacked into it yet
943  *   b) the integrity profile in @b is identical to the one in @t
944  *
945  * If @b can be stacked into @t, return %true.  Else return %false and clear the
946  * integrity information in @t.
947  */
948 bool queue_limits_stack_integrity(struct queue_limits *t,
949 		struct queue_limits *b)
950 {
951 	struct blk_integrity *ti = &t->integrity;
952 	struct blk_integrity *bi = &b->integrity;
953 
954 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
955 		return true;
956 
957 	if (ti->flags & BLK_INTEGRITY_STACKED) {
958 		if (ti->metadata_size != bi->metadata_size)
959 			goto incompatible;
960 		if (ti->interval_exp != bi->interval_exp)
961 			goto incompatible;
962 		if (ti->tag_size != bi->tag_size)
963 			goto incompatible;
964 		if (ti->csum_type != bi->csum_type)
965 			goto incompatible;
966 		if (ti->pi_tuple_size != bi->pi_tuple_size)
967 			goto incompatible;
968 		if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
969 		    (bi->flags & BLK_INTEGRITY_REF_TAG))
970 			goto incompatible;
971 	} else {
972 		ti->flags = BLK_INTEGRITY_STACKED;
973 		ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
974 			     (bi->flags & BLK_INTEGRITY_REF_TAG);
975 		ti->csum_type = bi->csum_type;
976 		ti->pi_tuple_size = bi->pi_tuple_size;
977 		ti->metadata_size = bi->metadata_size;
978 		ti->pi_offset = bi->pi_offset;
979 		ti->interval_exp = bi->interval_exp;
980 		ti->tag_size = bi->tag_size;
981 	}
982 	return true;
983 
984 incompatible:
985 	memset(ti, 0, sizeof(*ti));
986 	return false;
987 }
988 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
989 
990 /**
991  * blk_set_queue_depth - tell the block layer about the device queue depth
992  * @q:		the request queue for the device
993  * @depth:		queue depth
994  *
995  */
996 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
997 {
998 	q->queue_depth = depth;
999 	rq_qos_queue_depth_changed(q);
1000 }
1001 EXPORT_SYMBOL(blk_set_queue_depth);
1002 
1003 int bdev_alignment_offset(struct block_device *bdev)
1004 {
1005 	struct request_queue *q = bdev_get_queue(bdev);
1006 
1007 	if (q->limits.flags & BLK_FLAG_MISALIGNED)
1008 		return -1;
1009 	if (bdev_is_partition(bdev))
1010 		return queue_limit_alignment_offset(&q->limits,
1011 				bdev->bd_start_sect);
1012 	return q->limits.alignment_offset;
1013 }
1014 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1015 
1016 unsigned int bdev_discard_alignment(struct block_device *bdev)
1017 {
1018 	struct request_queue *q = bdev_get_queue(bdev);
1019 
1020 	if (bdev_is_partition(bdev))
1021 		return queue_limit_discard_alignment(&q->limits,
1022 				bdev->bd_start_sect);
1023 	return q->limits.discard_alignment;
1024 }
1025 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1026