xref: /linux/block/blk-settings.c (revision 6e9a12f85a7567bb9a41d5230468886bd6a27b20)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/t10-pi.h>
18 #include <linux/crc64.h>
19 
20 #include "blk.h"
21 #include "blk-rq-qos.h"
22 #include "blk-wbt.h"
23 
24 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25 {
26 	WRITE_ONCE(q->rq_timeout, timeout);
27 }
28 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29 
30 /**
31  * blk_set_stacking_limits - set default limits for stacking devices
32  * @lim:  the queue_limits structure to reset
33  *
34  * Prepare queue limits for applying limits from underlying devices using
35  * blk_stack_limits().
36  */
37 void blk_set_stacking_limits(struct queue_limits *lim)
38 {
39 	memset(lim, 0, sizeof(*lim));
40 	lim->logical_block_size = SECTOR_SIZE;
41 	lim->physical_block_size = SECTOR_SIZE;
42 	lim->io_min = SECTOR_SIZE;
43 	lim->discard_granularity = SECTOR_SIZE;
44 	lim->dma_alignment = SECTOR_SIZE - 1;
45 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46 
47 	/* Inherit limits from component devices */
48 	lim->max_segments = USHRT_MAX;
49 	lim->max_discard_segments = USHRT_MAX;
50 	lim->max_hw_sectors = UINT_MAX;
51 	lim->max_segment_size = UINT_MAX;
52 	lim->max_sectors = UINT_MAX;
53 	lim->max_dev_sectors = UINT_MAX;
54 	lim->max_write_zeroes_sectors = UINT_MAX;
55 	lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57 	lim->max_hw_zone_append_sectors = UINT_MAX;
58 	lim->max_user_discard_sectors = UINT_MAX;
59 }
60 EXPORT_SYMBOL(blk_set_stacking_limits);
61 
62 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
63 		struct queue_limits *lim)
64 {
65 	/*
66 	 * For read-ahead of large files to be effective, we need to read ahead
67 	 * at least twice the optimal I/O size.
68 	 *
69 	 * There is no hardware limitation for the read-ahead size and the user
70 	 * might have increased the read-ahead size through sysfs, so don't ever
71 	 * decrease it.
72 	 */
73 	bdi->ra_pages = max3(bdi->ra_pages,
74 				lim->io_opt * 2 / PAGE_SIZE,
75 				VM_READAHEAD_PAGES);
76 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
77 }
78 
79 static int blk_validate_zoned_limits(struct queue_limits *lim)
80 {
81 	if (!(lim->features & BLK_FEAT_ZONED)) {
82 		if (WARN_ON_ONCE(lim->max_open_zones) ||
83 		    WARN_ON_ONCE(lim->max_active_zones) ||
84 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
85 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
86 			return -EINVAL;
87 		return 0;
88 	}
89 
90 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
91 		return -EINVAL;
92 
93 	/*
94 	 * Given that active zones include open zones, the maximum number of
95 	 * open zones cannot be larger than the maximum number of active zones.
96 	 */
97 	if (lim->max_active_zones &&
98 	    lim->max_open_zones > lim->max_active_zones)
99 		return -EINVAL;
100 
101 	if (lim->zone_write_granularity < lim->logical_block_size)
102 		lim->zone_write_granularity = lim->logical_block_size;
103 
104 	/*
105 	 * The Zone Append size is limited by the maximum I/O size and the zone
106 	 * size given that it can't span zones.
107 	 *
108 	 * If no max_hw_zone_append_sectors limit is provided, the block layer
109 	 * will emulated it, else we're also bound by the hardware limit.
110 	 */
111 	lim->max_zone_append_sectors =
112 		min_not_zero(lim->max_hw_zone_append_sectors,
113 			min(lim->chunk_sectors, lim->max_hw_sectors));
114 	return 0;
115 }
116 
117 static int blk_validate_integrity_limits(struct queue_limits *lim)
118 {
119 	struct blk_integrity *bi = &lim->integrity;
120 
121 	if (!bi->metadata_size) {
122 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
123 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
124 			pr_warn("invalid PI settings.\n");
125 			return -EINVAL;
126 		}
127 		bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
128 		return 0;
129 	}
130 
131 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
132 		pr_warn("integrity support disabled.\n");
133 		return -EINVAL;
134 	}
135 
136 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
137 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
138 		pr_warn("ref tag not support without checksum.\n");
139 		return -EINVAL;
140 	}
141 
142 	if (bi->pi_tuple_size > bi->metadata_size) {
143 		pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
144 			 bi->pi_tuple_size,
145 			 bi->metadata_size);
146 		return -EINVAL;
147 	}
148 
149 	switch (bi->csum_type) {
150 	case BLK_INTEGRITY_CSUM_NONE:
151 		if (bi->pi_tuple_size) {
152 			pr_warn("pi_tuple_size must be 0 when checksum type \
153 				 is none\n");
154 			return -EINVAL;
155 		}
156 		break;
157 	case BLK_INTEGRITY_CSUM_CRC:
158 	case BLK_INTEGRITY_CSUM_IP:
159 		if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
160 			pr_warn("pi_tuple_size mismatch for T10 PI: expected \
161 				 %zu, got %u\n",
162 				 sizeof(struct t10_pi_tuple),
163 				 bi->pi_tuple_size);
164 			return -EINVAL;
165 		}
166 		break;
167 	case BLK_INTEGRITY_CSUM_CRC64:
168 		if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
169 			pr_warn("pi_tuple_size mismatch for CRC64 PI: \
170 				 expected %zu, got %u\n",
171 				 sizeof(struct crc64_pi_tuple),
172 				 bi->pi_tuple_size);
173 			return -EINVAL;
174 		}
175 		break;
176 	}
177 
178 	if (!bi->interval_exp)
179 		bi->interval_exp = ilog2(lim->logical_block_size);
180 
181 	return 0;
182 }
183 
184 /*
185  * Returns max guaranteed bytes which we can fit in a bio.
186  *
187  * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
188  * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
189  * the first and last segments.
190  */
191 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
192 {
193 	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
194 	unsigned int length;
195 
196 	length = min(max_segments, 2) * lim->logical_block_size;
197 	if (max_segments > 2)
198 		length += (max_segments - 2) * PAGE_SIZE;
199 
200 	return length;
201 }
202 
203 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
204 {
205 	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
206 					blk_queue_max_guaranteed_bio(lim));
207 
208 	unit_limit = rounddown_pow_of_two(unit_limit);
209 
210 	lim->atomic_write_max_sectors =
211 		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
212 			lim->max_hw_sectors);
213 	lim->atomic_write_unit_min =
214 		min(lim->atomic_write_hw_unit_min, unit_limit);
215 	lim->atomic_write_unit_max =
216 		min(lim->atomic_write_hw_unit_max, unit_limit);
217 	lim->atomic_write_boundary_sectors =
218 		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
219 }
220 
221 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
222 {
223 	unsigned int boundary_sectors;
224 	unsigned int atomic_write_hw_max_sectors =
225 			lim->atomic_write_hw_max >> SECTOR_SHIFT;
226 
227 	if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
228 		goto unsupported;
229 
230 	if (!lim->atomic_write_hw_max)
231 		goto unsupported;
232 
233 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
234 		goto unsupported;
235 
236 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
237 		goto unsupported;
238 
239 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
240 			 lim->atomic_write_hw_unit_max))
241 		goto unsupported;
242 
243 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
244 			 lim->atomic_write_hw_max))
245 		goto unsupported;
246 
247 	if (WARN_ON_ONCE(lim->chunk_sectors &&
248 			atomic_write_hw_max_sectors > lim->chunk_sectors))
249 		goto unsupported;
250 
251 	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
252 
253 	if (boundary_sectors) {
254 		if (WARN_ON_ONCE(lim->atomic_write_hw_max >
255 				 lim->atomic_write_hw_boundary))
256 			goto unsupported;
257 		/*
258 		 * A feature of boundary support is that it disallows bios to
259 		 * be merged which would result in a merged request which
260 		 * crosses either a chunk sector or atomic write HW boundary,
261 		 * even though chunk sectors may be just set for performance.
262 		 * For simplicity, disallow atomic writes for a chunk sector
263 		 * which is non-zero and smaller than atomic write HW boundary.
264 		 * Furthermore, chunk sectors must be a multiple of atomic
265 		 * write HW boundary. Otherwise boundary support becomes
266 		 * complicated.
267 		 * Devices which do not conform to these rules can be dealt
268 		 * with if and when they show up.
269 		 */
270 		if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
271 			goto unsupported;
272 
273 		/*
274 		 * The boundary size just needs to be a multiple of unit_max
275 		 * (and not necessarily a power-of-2), so this following check
276 		 * could be relaxed in future.
277 		 * Furthermore, if needed, unit_max could even be reduced so
278 		 * that it is compliant with a !power-of-2 boundary.
279 		 */
280 		if (!is_power_of_2(boundary_sectors))
281 			goto unsupported;
282 	}
283 
284 	blk_atomic_writes_update_limits(lim);
285 	return;
286 
287 unsupported:
288 	lim->atomic_write_max_sectors = 0;
289 	lim->atomic_write_boundary_sectors = 0;
290 	lim->atomic_write_unit_min = 0;
291 	lim->atomic_write_unit_max = 0;
292 }
293 
294 /*
295  * Check that the limits in lim are valid, initialize defaults for unset
296  * values, and cap values based on others where needed.
297  */
298 int blk_validate_limits(struct queue_limits *lim)
299 {
300 	unsigned int max_hw_sectors;
301 	unsigned int logical_block_sectors;
302 	unsigned long seg_size;
303 	int err;
304 
305 	/*
306 	 * Unless otherwise specified, default to 512 byte logical blocks and a
307 	 * physical block size equal to the logical block size.
308 	 */
309 	if (!lim->logical_block_size)
310 		lim->logical_block_size = SECTOR_SIZE;
311 	else if (blk_validate_block_size(lim->logical_block_size)) {
312 		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
313 		return -EINVAL;
314 	}
315 	if (lim->physical_block_size < lim->logical_block_size)
316 		lim->physical_block_size = lim->logical_block_size;
317 
318 	/*
319 	 * The minimum I/O size defaults to the physical block size unless
320 	 * explicitly overridden.
321 	 */
322 	if (lim->io_min < lim->physical_block_size)
323 		lim->io_min = lim->physical_block_size;
324 
325 	/*
326 	 * The optimal I/O size may not be aligned to physical block size
327 	 * (because it may be limited by dma engines which have no clue about
328 	 * block size of the disks attached to them), so we round it down here.
329 	 */
330 	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
331 
332 	/*
333 	 * max_hw_sectors has a somewhat weird default for historical reason,
334 	 * but driver really should set their own instead of relying on this
335 	 * value.
336 	 *
337 	 * The block layer relies on the fact that every driver can
338 	 * handle at lest a page worth of data per I/O, and needs the value
339 	 * aligned to the logical block size.
340 	 */
341 	if (!lim->max_hw_sectors)
342 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
343 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
344 		return -EINVAL;
345 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
346 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
347 		return -EINVAL;
348 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
349 			logical_block_sectors);
350 
351 	/*
352 	 * The actual max_sectors value is a complex beast and also takes the
353 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
354 	 * value into account.  The ->max_sectors value is always calculated
355 	 * from these, so directly setting it won't have any effect.
356 	 */
357 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
358 				lim->max_dev_sectors);
359 	if (lim->max_user_sectors) {
360 		if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
361 			return -EINVAL;
362 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
363 	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
364 		lim->max_sectors =
365 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
366 	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
367 		lim->max_sectors =
368 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
369 	} else {
370 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
371 	}
372 	lim->max_sectors = round_down(lim->max_sectors,
373 			logical_block_sectors);
374 
375 	/*
376 	 * Random default for the maximum number of segments.  Driver should not
377 	 * rely on this and set their own.
378 	 */
379 	if (!lim->max_segments)
380 		lim->max_segments = BLK_MAX_SEGMENTS;
381 
382 	if (lim->max_hw_wzeroes_unmap_sectors &&
383 	    lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
384 		return -EINVAL;
385 	lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
386 			lim->max_user_wzeroes_unmap_sectors);
387 
388 	lim->max_discard_sectors =
389 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
390 
391 	if (!lim->max_discard_segments)
392 		lim->max_discard_segments = 1;
393 
394 	if (lim->discard_granularity < lim->physical_block_size)
395 		lim->discard_granularity = lim->physical_block_size;
396 
397 	/*
398 	 * By default there is no limit on the segment boundary alignment,
399 	 * but if there is one it can't be smaller than the page size as
400 	 * that would break all the normal I/O patterns.
401 	 */
402 	if (!lim->seg_boundary_mask)
403 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
404 	if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
405 		return -EINVAL;
406 
407 	/*
408 	 * Stacking device may have both virtual boundary and max segment
409 	 * size limit, so allow this setting now, and long-term the two
410 	 * might need to move out of stacking limits since we have immutable
411 	 * bvec and lower layer bio splitting is supposed to handle the two
412 	 * correctly.
413 	 */
414 	if (lim->virt_boundary_mask) {
415 		if (!lim->max_segment_size)
416 			lim->max_segment_size = UINT_MAX;
417 	} else {
418 		/*
419 		 * The maximum segment size has an odd historic 64k default that
420 		 * drivers probably should override.  Just like the I/O size we
421 		 * require drivers to at least handle a full page per segment.
422 		 */
423 		if (!lim->max_segment_size)
424 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
425 		if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
426 			return -EINVAL;
427 	}
428 
429 	/* setup min segment size for building new segment in fast path */
430 	if (lim->seg_boundary_mask > lim->max_segment_size - 1)
431 		seg_size = lim->max_segment_size;
432 	else
433 		seg_size = lim->seg_boundary_mask + 1;
434 	lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
435 
436 	/*
437 	 * We require drivers to at least do logical block aligned I/O, but
438 	 * historically could not check for that due to the separate calls
439 	 * to set the limits.  Once the transition is finished the check
440 	 * below should be narrowed down to check the logical block size.
441 	 */
442 	if (!lim->dma_alignment)
443 		lim->dma_alignment = SECTOR_SIZE - 1;
444 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
445 		return -EINVAL;
446 
447 	if (lim->alignment_offset) {
448 		lim->alignment_offset &= (lim->physical_block_size - 1);
449 		lim->flags &= ~BLK_FLAG_MISALIGNED;
450 	}
451 
452 	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
453 		lim->features &= ~BLK_FEAT_FUA;
454 
455 	blk_validate_atomic_write_limits(lim);
456 
457 	err = blk_validate_integrity_limits(lim);
458 	if (err)
459 		return err;
460 	return blk_validate_zoned_limits(lim);
461 }
462 EXPORT_SYMBOL_GPL(blk_validate_limits);
463 
464 /*
465  * Set the default limits for a newly allocated queue.  @lim contains the
466  * initial limits set by the driver, which could be no limit in which case
467  * all fields are cleared to zero.
468  */
469 int blk_set_default_limits(struct queue_limits *lim)
470 {
471 	/*
472 	 * Most defaults are set by capping the bounds in blk_validate_limits,
473 	 * but these limits are special and need an explicit initialization to
474 	 * the max value here.
475 	 */
476 	lim->max_user_discard_sectors = UINT_MAX;
477 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
478 	return blk_validate_limits(lim);
479 }
480 
481 /**
482  * queue_limits_commit_update - commit an atomic update of queue limits
483  * @q:		queue to update
484  * @lim:	limits to apply
485  *
486  * Apply the limits in @lim that were obtained from queue_limits_start_update()
487  * and updated by the caller to @q.  The caller must have frozen the queue or
488  * ensure that there are no outstanding I/Os by other means.
489  *
490  * Returns 0 if successful, else a negative error code.
491  */
492 int queue_limits_commit_update(struct request_queue *q,
493 		struct queue_limits *lim)
494 {
495 	int error;
496 
497 	error = blk_validate_limits(lim);
498 	if (error)
499 		goto out_unlock;
500 
501 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
502 	if (q->crypto_profile && lim->integrity.tag_size) {
503 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
504 		error = -EINVAL;
505 		goto out_unlock;
506 	}
507 #endif
508 
509 	q->limits = *lim;
510 	if (q->disk)
511 		blk_apply_bdi_limits(q->disk->bdi, lim);
512 out_unlock:
513 	mutex_unlock(&q->limits_lock);
514 	return error;
515 }
516 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
517 
518 /**
519  * queue_limits_commit_update_frozen - commit an atomic update of queue limits
520  * @q:		queue to update
521  * @lim:	limits to apply
522  *
523  * Apply the limits in @lim that were obtained from queue_limits_start_update()
524  * and updated with the new values by the caller to @q.  Freezes the queue
525  * before the update and unfreezes it after.
526  *
527  * Returns 0 if successful, else a negative error code.
528  */
529 int queue_limits_commit_update_frozen(struct request_queue *q,
530 		struct queue_limits *lim)
531 {
532 	unsigned int memflags;
533 	int ret;
534 
535 	memflags = blk_mq_freeze_queue(q);
536 	ret = queue_limits_commit_update(q, lim);
537 	blk_mq_unfreeze_queue(q, memflags);
538 
539 	return ret;
540 }
541 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
542 
543 /**
544  * queue_limits_set - apply queue limits to queue
545  * @q:		queue to update
546  * @lim:	limits to apply
547  *
548  * Apply the limits in @lim that were freshly initialized to @q.
549  * To update existing limits use queue_limits_start_update() and
550  * queue_limits_commit_update() instead.
551  *
552  * Returns 0 if successful, else a negative error code.
553  */
554 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
555 {
556 	mutex_lock(&q->limits_lock);
557 	return queue_limits_commit_update(q, lim);
558 }
559 EXPORT_SYMBOL_GPL(queue_limits_set);
560 
561 static int queue_limit_alignment_offset(const struct queue_limits *lim,
562 		sector_t sector)
563 {
564 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
565 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
566 		<< SECTOR_SHIFT;
567 
568 	return (granularity + lim->alignment_offset - alignment) % granularity;
569 }
570 
571 static unsigned int queue_limit_discard_alignment(
572 		const struct queue_limits *lim, sector_t sector)
573 {
574 	unsigned int alignment, granularity, offset;
575 
576 	if (!lim->max_discard_sectors)
577 		return 0;
578 
579 	/* Why are these in bytes, not sectors? */
580 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
581 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
582 
583 	/* Offset of the partition start in 'granularity' sectors */
584 	offset = sector_div(sector, granularity);
585 
586 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
587 	offset = (granularity + alignment - offset) % granularity;
588 
589 	/* Turn it back into bytes, gaah */
590 	return offset << SECTOR_SHIFT;
591 }
592 
593 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
594 {
595 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
596 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
597 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
598 	return sectors;
599 }
600 
601 /* Check if second and later bottom devices are compliant */
602 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
603 				struct queue_limits *b)
604 {
605 	/* We're not going to support different boundary sizes.. yet */
606 	if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
607 		return false;
608 
609 	/* Can't support this */
610 	if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
611 		return false;
612 
613 	/* Or this */
614 	if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
615 		return false;
616 
617 	t->atomic_write_hw_max = min(t->atomic_write_hw_max,
618 				b->atomic_write_hw_max);
619 	t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
620 				b->atomic_write_hw_unit_min);
621 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
622 				b->atomic_write_hw_unit_max);
623 	return true;
624 }
625 
626 /* Check for valid boundary of first bottom device */
627 static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
628 				struct queue_limits *b)
629 {
630 	/*
631 	 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
632 	 * devices store chunk sectors in t->io_min.
633 	 */
634 	if (b->atomic_write_hw_boundary > t->io_min &&
635 	    b->atomic_write_hw_boundary % t->io_min)
636 		return false;
637 	if (t->io_min > b->atomic_write_hw_boundary &&
638 	    t->io_min % b->atomic_write_hw_boundary)
639 		return false;
640 
641 	t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
642 	return true;
643 }
644 
645 static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
646 {
647 	unsigned int chunk_bytes;
648 
649 	if (!t->chunk_sectors)
650 		return;
651 
652 	/*
653 	 * If chunk sectors is so large that its value in bytes overflows
654 	 * UINT_MAX, then just shift it down so it definitely will fit.
655 	 * We don't support atomic writes of such a large size anyway.
656 	 */
657 	if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
658 		chunk_bytes = t->chunk_sectors;
659 
660 	/*
661 	 * Find values for limits which work for chunk size.
662 	 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
663 	 * size, as the chunk size is not restricted to a power-of-2.
664 	 * So we need to find highest power-of-2 which works for the chunk
665 	 * size.
666 	 * As an example scenario, we could have t->unit_max = 16K and
667 	 * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
668 	 * value aligned with both limits, i.e. 8K in this example.
669 	 */
670 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
671 					max_pow_of_two_factor(chunk_bytes));
672 
673 	t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
674 					  t->atomic_write_hw_unit_max);
675 	t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
676 }
677 
678 /* Check stacking of first bottom device */
679 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
680 				struct queue_limits *b)
681 {
682 	if (b->atomic_write_hw_boundary &&
683 	    !blk_stack_atomic_writes_boundary_head(t, b))
684 		return false;
685 
686 	t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
687 	t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
688 	t->atomic_write_hw_max = b->atomic_write_hw_max;
689 	return true;
690 }
691 
692 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
693 				struct queue_limits *b, sector_t start)
694 {
695 	if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
696 		goto unsupported;
697 
698 	if (!b->atomic_write_hw_unit_min)
699 		goto unsupported;
700 
701 	if (!blk_atomic_write_start_sect_aligned(start, b))
702 		goto unsupported;
703 
704 	/*
705 	 * If atomic_write_hw_max is set, we have already stacked 1x bottom
706 	 * device, so check for compliance.
707 	 */
708 	if (t->atomic_write_hw_max) {
709 		if (!blk_stack_atomic_writes_tail(t, b))
710 			goto unsupported;
711 		return;
712 	}
713 
714 	if (!blk_stack_atomic_writes_head(t, b))
715 		goto unsupported;
716 	blk_stack_atomic_writes_chunk_sectors(t);
717 	return;
718 
719 unsupported:
720 	t->atomic_write_hw_max = 0;
721 	t->atomic_write_hw_unit_max = 0;
722 	t->atomic_write_hw_unit_min = 0;
723 	t->atomic_write_hw_boundary = 0;
724 }
725 
726 /**
727  * blk_stack_limits - adjust queue_limits for stacked devices
728  * @t:	the stacking driver limits (top device)
729  * @b:  the underlying queue limits (bottom, component device)
730  * @start:  first data sector within component device
731  *
732  * Description:
733  *    This function is used by stacking drivers like MD and DM to ensure
734  *    that all component devices have compatible block sizes and
735  *    alignments.  The stacking driver must provide a queue_limits
736  *    struct (top) and then iteratively call the stacking function for
737  *    all component (bottom) devices.  The stacking function will
738  *    attempt to combine the values and ensure proper alignment.
739  *
740  *    Returns 0 if the top and bottom queue_limits are compatible.  The
741  *    top device's block sizes and alignment offsets may be adjusted to
742  *    ensure alignment with the bottom device. If no compatible sizes
743  *    and alignments exist, -1 is returned and the resulting top
744  *    queue_limits will have the misaligned flag set to indicate that
745  *    the alignment_offset is undefined.
746  */
747 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
748 		     sector_t start)
749 {
750 	unsigned int top, bottom, alignment, ret = 0;
751 
752 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
753 
754 	/*
755 	 * Some feaures need to be supported both by the stacking driver and all
756 	 * underlying devices.  The stacking driver sets these flags before
757 	 * stacking the limits, and this will clear the flags if any of the
758 	 * underlying devices does not support it.
759 	 */
760 	if (!(b->features & BLK_FEAT_NOWAIT))
761 		t->features &= ~BLK_FEAT_NOWAIT;
762 	if (!(b->features & BLK_FEAT_POLL))
763 		t->features &= ~BLK_FEAT_POLL;
764 
765 	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
766 
767 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
768 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
769 			b->max_user_sectors);
770 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
771 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
772 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
773 					b->max_write_zeroes_sectors);
774 	t->max_user_wzeroes_unmap_sectors =
775 			min(t->max_user_wzeroes_unmap_sectors,
776 			    b->max_user_wzeroes_unmap_sectors);
777 	t->max_hw_wzeroes_unmap_sectors =
778 			min(t->max_hw_wzeroes_unmap_sectors,
779 			    b->max_hw_wzeroes_unmap_sectors);
780 
781 	t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
782 					b->max_hw_zone_append_sectors);
783 
784 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
785 					    b->seg_boundary_mask);
786 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
787 					    b->virt_boundary_mask);
788 
789 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
790 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
791 					       b->max_discard_segments);
792 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
793 						 b->max_integrity_segments);
794 
795 	t->max_segment_size = min_not_zero(t->max_segment_size,
796 					   b->max_segment_size);
797 
798 	alignment = queue_limit_alignment_offset(b, start);
799 
800 	/* Bottom device has different alignment.  Check that it is
801 	 * compatible with the current top alignment.
802 	 */
803 	if (t->alignment_offset != alignment) {
804 
805 		top = max(t->physical_block_size, t->io_min)
806 			+ t->alignment_offset;
807 		bottom = max(b->physical_block_size, b->io_min) + alignment;
808 
809 		/* Verify that top and bottom intervals line up */
810 		if (max(top, bottom) % min(top, bottom)) {
811 			t->flags |= BLK_FLAG_MISALIGNED;
812 			ret = -1;
813 		}
814 	}
815 
816 	t->logical_block_size = max(t->logical_block_size,
817 				    b->logical_block_size);
818 
819 	t->physical_block_size = max(t->physical_block_size,
820 				     b->physical_block_size);
821 
822 	t->io_min = max(t->io_min, b->io_min);
823 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
824 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
825 
826 	/* Set non-power-of-2 compatible chunk_sectors boundary */
827 	if (b->chunk_sectors)
828 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
829 
830 	/* Physical block size a multiple of the logical block size? */
831 	if (t->physical_block_size & (t->logical_block_size - 1)) {
832 		t->physical_block_size = t->logical_block_size;
833 		t->flags |= BLK_FLAG_MISALIGNED;
834 		ret = -1;
835 	}
836 
837 	/* Minimum I/O a multiple of the physical block size? */
838 	if (t->io_min & (t->physical_block_size - 1)) {
839 		t->io_min = t->physical_block_size;
840 		t->flags |= BLK_FLAG_MISALIGNED;
841 		ret = -1;
842 	}
843 
844 	/* Optimal I/O a multiple of the physical block size? */
845 	if (t->io_opt & (t->physical_block_size - 1)) {
846 		t->io_opt = 0;
847 		t->flags |= BLK_FLAG_MISALIGNED;
848 		ret = -1;
849 	}
850 
851 	/* chunk_sectors a multiple of the physical block size? */
852 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
853 		t->chunk_sectors = 0;
854 		t->flags |= BLK_FLAG_MISALIGNED;
855 		ret = -1;
856 	}
857 
858 	/* Find lowest common alignment_offset */
859 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
860 		% max(t->physical_block_size, t->io_min);
861 
862 	/* Verify that new alignment_offset is on a logical block boundary */
863 	if (t->alignment_offset & (t->logical_block_size - 1)) {
864 		t->flags |= BLK_FLAG_MISALIGNED;
865 		ret = -1;
866 	}
867 
868 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
869 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
870 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
871 
872 	/* Discard alignment and granularity */
873 	if (b->discard_granularity) {
874 		alignment = queue_limit_discard_alignment(b, start);
875 
876 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
877 						      b->max_discard_sectors);
878 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
879 							 b->max_hw_discard_sectors);
880 		t->discard_granularity = max(t->discard_granularity,
881 					     b->discard_granularity);
882 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
883 			t->discard_granularity;
884 	}
885 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
886 						   b->max_secure_erase_sectors);
887 	t->zone_write_granularity = max(t->zone_write_granularity,
888 					b->zone_write_granularity);
889 	if (!(t->features & BLK_FEAT_ZONED)) {
890 		t->zone_write_granularity = 0;
891 		t->max_zone_append_sectors = 0;
892 	}
893 	blk_stack_atomic_writes_limits(t, b, start);
894 
895 	return ret;
896 }
897 EXPORT_SYMBOL(blk_stack_limits);
898 
899 /**
900  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
901  * @t:	the stacking driver limits (top device)
902  * @bdev:  the underlying block device (bottom)
903  * @offset:  offset to beginning of data within component device
904  * @pfx: prefix to use for warnings logged
905  *
906  * Description:
907  *    This function is used by stacking drivers like MD and DM to ensure
908  *    that all component devices have compatible block sizes and
909  *    alignments.  The stacking driver must provide a queue_limits
910  *    struct (top) and then iteratively call the stacking function for
911  *    all component (bottom) devices.  The stacking function will
912  *    attempt to combine the values and ensure proper alignment.
913  */
914 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
915 		sector_t offset, const char *pfx)
916 {
917 	if (blk_stack_limits(t, bdev_limits(bdev),
918 			get_start_sect(bdev) + offset))
919 		pr_notice("%s: Warning: Device %pg is misaligned\n",
920 			pfx, bdev);
921 }
922 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
923 
924 /**
925  * queue_limits_stack_integrity - stack integrity profile
926  * @t: target queue limits
927  * @b: base queue limits
928  *
929  * Check if the integrity profile in the @b can be stacked into the
930  * target @t.  Stacking is possible if either:
931  *
932  *   a) does not have any integrity information stacked into it yet
933  *   b) the integrity profile in @b is identical to the one in @t
934  *
935  * If @b can be stacked into @t, return %true.  Else return %false and clear the
936  * integrity information in @t.
937  */
938 bool queue_limits_stack_integrity(struct queue_limits *t,
939 		struct queue_limits *b)
940 {
941 	struct blk_integrity *ti = &t->integrity;
942 	struct blk_integrity *bi = &b->integrity;
943 
944 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
945 		return true;
946 
947 	if (ti->flags & BLK_INTEGRITY_STACKED) {
948 		if (ti->metadata_size != bi->metadata_size)
949 			goto incompatible;
950 		if (ti->interval_exp != bi->interval_exp)
951 			goto incompatible;
952 		if (ti->tag_size != bi->tag_size)
953 			goto incompatible;
954 		if (ti->csum_type != bi->csum_type)
955 			goto incompatible;
956 		if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
957 		    (bi->flags & BLK_INTEGRITY_REF_TAG))
958 			goto incompatible;
959 	} else {
960 		ti->flags = BLK_INTEGRITY_STACKED;
961 		ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
962 			     (bi->flags & BLK_INTEGRITY_REF_TAG);
963 		ti->csum_type = bi->csum_type;
964 		ti->metadata_size = bi->metadata_size;
965 		ti->pi_offset = bi->pi_offset;
966 		ti->interval_exp = bi->interval_exp;
967 		ti->tag_size = bi->tag_size;
968 	}
969 	return true;
970 
971 incompatible:
972 	memset(ti, 0, sizeof(*ti));
973 	return false;
974 }
975 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
976 
977 /**
978  * blk_set_queue_depth - tell the block layer about the device queue depth
979  * @q:		the request queue for the device
980  * @depth:		queue depth
981  *
982  */
983 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
984 {
985 	q->queue_depth = depth;
986 	rq_qos_queue_depth_changed(q);
987 }
988 EXPORT_SYMBOL(blk_set_queue_depth);
989 
990 int bdev_alignment_offset(struct block_device *bdev)
991 {
992 	struct request_queue *q = bdev_get_queue(bdev);
993 
994 	if (q->limits.flags & BLK_FLAG_MISALIGNED)
995 		return -1;
996 	if (bdev_is_partition(bdev))
997 		return queue_limit_alignment_offset(&q->limits,
998 				bdev->bd_start_sect);
999 	return q->limits.alignment_offset;
1000 }
1001 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1002 
1003 unsigned int bdev_discard_alignment(struct block_device *bdev)
1004 {
1005 	struct request_queue *q = bdev_get_queue(bdev);
1006 
1007 	if (bdev_is_partition(bdev))
1008 		return queue_limit_discard_alignment(&q->limits,
1009 				bdev->bd_start_sect);
1010 	return q->limits.discard_alignment;
1011 }
1012 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1013