xref: /linux/block/blk-settings.c (revision a3d14d1602ca11429d242d230c31af8f822f614f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21 
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 	WRITE_ONCE(q->rq_timeout, timeout);
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27 
28 /**
29  * blk_set_stacking_limits - set default limits for stacking devices
30  * @lim:  the queue_limits structure to reset
31  *
32  * Prepare queue limits for applying limits from underlying devices using
33  * blk_stack_limits().
34  */
blk_set_stacking_limits(struct queue_limits * lim)35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 	memset(lim, 0, sizeof(*lim));
38 	lim->logical_block_size = SECTOR_SIZE;
39 	lim->physical_block_size = SECTOR_SIZE;
40 	lim->io_min = SECTOR_SIZE;
41 	lim->discard_granularity = SECTOR_SIZE;
42 	lim->dma_alignment = SECTOR_SIZE - 1;
43 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 
45 	/* Inherit limits from component devices */
46 	lim->max_segments = USHRT_MAX;
47 	lim->max_discard_segments = USHRT_MAX;
48 	lim->max_hw_sectors = UINT_MAX;
49 	lim->max_segment_size = UINT_MAX;
50 	lim->max_sectors = UINT_MAX;
51 	lim->max_dev_sectors = UINT_MAX;
52 	lim->max_write_zeroes_sectors = UINT_MAX;
53 	lim->max_hw_zone_append_sectors = UINT_MAX;
54 	lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57 
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)58 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 		struct queue_limits *lim)
60 {
61 	/*
62 	 * For read-ahead of large files to be effective, we need to read ahead
63 	 * at least twice the optimal I/O size.
64 	 *
65 	 * There is no hardware limitation for the read-ahead size and the user
66 	 * might have increased the read-ahead size through sysfs, so don't ever
67 	 * decrease it.
68 	 */
69 	bdi->ra_pages = max3(bdi->ra_pages,
70 				lim->io_opt * 2 / PAGE_SIZE,
71 				VM_READAHEAD_PAGES);
72 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
73 }
74 
blk_validate_zoned_limits(struct queue_limits * lim)75 static int blk_validate_zoned_limits(struct queue_limits *lim)
76 {
77 	if (!(lim->features & BLK_FEAT_ZONED)) {
78 		if (WARN_ON_ONCE(lim->max_open_zones) ||
79 		    WARN_ON_ONCE(lim->max_active_zones) ||
80 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
81 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
82 			return -EINVAL;
83 		return 0;
84 	}
85 
86 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
87 		return -EINVAL;
88 
89 	/*
90 	 * Given that active zones include open zones, the maximum number of
91 	 * open zones cannot be larger than the maximum number of active zones.
92 	 */
93 	if (lim->max_active_zones &&
94 	    lim->max_open_zones > lim->max_active_zones)
95 		return -EINVAL;
96 
97 	if (lim->zone_write_granularity < lim->logical_block_size)
98 		lim->zone_write_granularity = lim->logical_block_size;
99 
100 	/*
101 	 * The Zone Append size is limited by the maximum I/O size and the zone
102 	 * size given that it can't span zones.
103 	 *
104 	 * If no max_hw_zone_append_sectors limit is provided, the block layer
105 	 * will emulated it, else we're also bound by the hardware limit.
106 	 */
107 	lim->max_zone_append_sectors =
108 		min_not_zero(lim->max_hw_zone_append_sectors,
109 			min(lim->chunk_sectors, lim->max_hw_sectors));
110 	return 0;
111 }
112 
blk_validate_integrity_limits(struct queue_limits * lim)113 static int blk_validate_integrity_limits(struct queue_limits *lim)
114 {
115 	struct blk_integrity *bi = &lim->integrity;
116 
117 	if (!bi->tuple_size) {
118 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
119 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
120 			pr_warn("invalid PI settings.\n");
121 			return -EINVAL;
122 		}
123 		bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
124 		return 0;
125 	}
126 
127 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
128 		pr_warn("integrity support disabled.\n");
129 		return -EINVAL;
130 	}
131 
132 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
133 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
134 		pr_warn("ref tag not support without checksum.\n");
135 		return -EINVAL;
136 	}
137 
138 	if (!bi->interval_exp)
139 		bi->interval_exp = ilog2(lim->logical_block_size);
140 
141 	return 0;
142 }
143 
144 /*
145  * Returns max guaranteed bytes which we can fit in a bio.
146  *
147  * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
148  * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
149  * the first and last segments.
150  */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)151 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
152 {
153 	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
154 	unsigned int length;
155 
156 	length = min(max_segments, 2) * lim->logical_block_size;
157 	if (max_segments > 2)
158 		length += (max_segments - 2) * PAGE_SIZE;
159 
160 	return length;
161 }
162 
blk_atomic_writes_update_limits(struct queue_limits * lim)163 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
164 {
165 	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
166 					blk_queue_max_guaranteed_bio(lim));
167 
168 	unit_limit = rounddown_pow_of_two(unit_limit);
169 
170 	lim->atomic_write_max_sectors =
171 		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
172 			lim->max_hw_sectors);
173 	lim->atomic_write_unit_min =
174 		min(lim->atomic_write_hw_unit_min, unit_limit);
175 	lim->atomic_write_unit_max =
176 		min(lim->atomic_write_hw_unit_max, unit_limit);
177 	lim->atomic_write_boundary_sectors =
178 		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
179 }
180 
blk_validate_atomic_write_limits(struct queue_limits * lim)181 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
182 {
183 	unsigned int boundary_sectors;
184 
185 	if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
186 		goto unsupported;
187 
188 	if (!lim->atomic_write_hw_max)
189 		goto unsupported;
190 
191 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
192 		goto unsupported;
193 
194 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
195 		goto unsupported;
196 
197 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
198 			 lim->atomic_write_hw_unit_max))
199 		goto unsupported;
200 
201 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
202 			 lim->atomic_write_hw_max))
203 		goto unsupported;
204 
205 	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
206 
207 	if (boundary_sectors) {
208 		if (WARN_ON_ONCE(lim->atomic_write_hw_max >
209 				 lim->atomic_write_hw_boundary))
210 			goto unsupported;
211 		/*
212 		 * A feature of boundary support is that it disallows bios to
213 		 * be merged which would result in a merged request which
214 		 * crosses either a chunk sector or atomic write HW boundary,
215 		 * even though chunk sectors may be just set for performance.
216 		 * For simplicity, disallow atomic writes for a chunk sector
217 		 * which is non-zero and smaller than atomic write HW boundary.
218 		 * Furthermore, chunk sectors must be a multiple of atomic
219 		 * write HW boundary. Otherwise boundary support becomes
220 		 * complicated.
221 		 * Devices which do not conform to these rules can be dealt
222 		 * with if and when they show up.
223 		 */
224 		if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
225 			goto unsupported;
226 
227 		/*
228 		 * The boundary size just needs to be a multiple of unit_max
229 		 * (and not necessarily a power-of-2), so this following check
230 		 * could be relaxed in future.
231 		 * Furthermore, if needed, unit_max could even be reduced so
232 		 * that it is compliant with a !power-of-2 boundary.
233 		 */
234 		if (!is_power_of_2(boundary_sectors))
235 			goto unsupported;
236 	}
237 
238 	blk_atomic_writes_update_limits(lim);
239 	return;
240 
241 unsupported:
242 	lim->atomic_write_max_sectors = 0;
243 	lim->atomic_write_boundary_sectors = 0;
244 	lim->atomic_write_unit_min = 0;
245 	lim->atomic_write_unit_max = 0;
246 }
247 
248 /*
249  * Check that the limits in lim are valid, initialize defaults for unset
250  * values, and cap values based on others where needed.
251  */
blk_validate_limits(struct queue_limits * lim)252 int blk_validate_limits(struct queue_limits *lim)
253 {
254 	unsigned int max_hw_sectors;
255 	unsigned int logical_block_sectors;
256 	unsigned long seg_size;
257 	int err;
258 
259 	/*
260 	 * Unless otherwise specified, default to 512 byte logical blocks and a
261 	 * physical block size equal to the logical block size.
262 	 */
263 	if (!lim->logical_block_size)
264 		lim->logical_block_size = SECTOR_SIZE;
265 	else if (blk_validate_block_size(lim->logical_block_size)) {
266 		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
267 		return -EINVAL;
268 	}
269 	if (lim->physical_block_size < lim->logical_block_size)
270 		lim->physical_block_size = lim->logical_block_size;
271 
272 	/*
273 	 * The minimum I/O size defaults to the physical block size unless
274 	 * explicitly overridden.
275 	 */
276 	if (lim->io_min < lim->physical_block_size)
277 		lim->io_min = lim->physical_block_size;
278 
279 	/*
280 	 * The optimal I/O size may not be aligned to physical block size
281 	 * (because it may be limited by dma engines which have no clue about
282 	 * block size of the disks attached to them), so we round it down here.
283 	 */
284 	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
285 
286 	/*
287 	 * max_hw_sectors has a somewhat weird default for historical reason,
288 	 * but driver really should set their own instead of relying on this
289 	 * value.
290 	 *
291 	 * The block layer relies on the fact that every driver can
292 	 * handle at lest a page worth of data per I/O, and needs the value
293 	 * aligned to the logical block size.
294 	 */
295 	if (!lim->max_hw_sectors)
296 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
297 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
298 		return -EINVAL;
299 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
300 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
301 		return -EINVAL;
302 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
303 			logical_block_sectors);
304 
305 	/*
306 	 * The actual max_sectors value is a complex beast and also takes the
307 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
308 	 * value into account.  The ->max_sectors value is always calculated
309 	 * from these, so directly setting it won't have any effect.
310 	 */
311 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
312 				lim->max_dev_sectors);
313 	if (lim->max_user_sectors) {
314 		if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
315 			return -EINVAL;
316 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
317 	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
318 		lim->max_sectors =
319 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
320 	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
321 		lim->max_sectors =
322 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
323 	} else {
324 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
325 	}
326 	lim->max_sectors = round_down(lim->max_sectors,
327 			logical_block_sectors);
328 
329 	/*
330 	 * Random default for the maximum number of segments.  Driver should not
331 	 * rely on this and set their own.
332 	 */
333 	if (!lim->max_segments)
334 		lim->max_segments = BLK_MAX_SEGMENTS;
335 
336 	lim->max_discard_sectors =
337 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
338 
339 	if (!lim->max_discard_segments)
340 		lim->max_discard_segments = 1;
341 
342 	if (lim->discard_granularity < lim->physical_block_size)
343 		lim->discard_granularity = lim->physical_block_size;
344 
345 	/*
346 	 * By default there is no limit on the segment boundary alignment,
347 	 * but if there is one it can't be smaller than the page size as
348 	 * that would break all the normal I/O patterns.
349 	 */
350 	if (!lim->seg_boundary_mask)
351 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
352 	if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
353 		return -EINVAL;
354 
355 	/*
356 	 * Stacking device may have both virtual boundary and max segment
357 	 * size limit, so allow this setting now, and long-term the two
358 	 * might need to move out of stacking limits since we have immutable
359 	 * bvec and lower layer bio splitting is supposed to handle the two
360 	 * correctly.
361 	 */
362 	if (lim->virt_boundary_mask) {
363 		if (!lim->max_segment_size)
364 			lim->max_segment_size = UINT_MAX;
365 	} else {
366 		/*
367 		 * The maximum segment size has an odd historic 64k default that
368 		 * drivers probably should override.  Just like the I/O size we
369 		 * require drivers to at least handle a full page per segment.
370 		 */
371 		if (!lim->max_segment_size)
372 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
373 		if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
374 			return -EINVAL;
375 	}
376 
377 	/* setup min segment size for building new segment in fast path */
378 	if (lim->seg_boundary_mask > lim->max_segment_size - 1)
379 		seg_size = lim->max_segment_size;
380 	else
381 		seg_size = lim->seg_boundary_mask + 1;
382 	lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
383 
384 	/*
385 	 * We require drivers to at least do logical block aligned I/O, but
386 	 * historically could not check for that due to the separate calls
387 	 * to set the limits.  Once the transition is finished the check
388 	 * below should be narrowed down to check the logical block size.
389 	 */
390 	if (!lim->dma_alignment)
391 		lim->dma_alignment = SECTOR_SIZE - 1;
392 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
393 		return -EINVAL;
394 
395 	if (lim->alignment_offset) {
396 		lim->alignment_offset &= (lim->physical_block_size - 1);
397 		lim->flags &= ~BLK_FLAG_MISALIGNED;
398 	}
399 
400 	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
401 		lim->features &= ~BLK_FEAT_FUA;
402 
403 	blk_validate_atomic_write_limits(lim);
404 
405 	err = blk_validate_integrity_limits(lim);
406 	if (err)
407 		return err;
408 	return blk_validate_zoned_limits(lim);
409 }
410 EXPORT_SYMBOL_GPL(blk_validate_limits);
411 
412 /*
413  * Set the default limits for a newly allocated queue.  @lim contains the
414  * initial limits set by the driver, which could be no limit in which case
415  * all fields are cleared to zero.
416  */
blk_set_default_limits(struct queue_limits * lim)417 int blk_set_default_limits(struct queue_limits *lim)
418 {
419 	/*
420 	 * Most defaults are set by capping the bounds in blk_validate_limits,
421 	 * but max_user_discard_sectors is special and needs an explicit
422 	 * initialization to the max value here.
423 	 */
424 	lim->max_user_discard_sectors = UINT_MAX;
425 	return blk_validate_limits(lim);
426 }
427 
428 /**
429  * queue_limits_commit_update - commit an atomic update of queue limits
430  * @q:		queue to update
431  * @lim:	limits to apply
432  *
433  * Apply the limits in @lim that were obtained from queue_limits_start_update()
434  * and updated by the caller to @q.  The caller must have frozen the queue or
435  * ensure that there are no outstanding I/Os by other means.
436  *
437  * Returns 0 if successful, else a negative error code.
438  */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)439 int queue_limits_commit_update(struct request_queue *q,
440 		struct queue_limits *lim)
441 {
442 	int error;
443 
444 	error = blk_validate_limits(lim);
445 	if (error)
446 		goto out_unlock;
447 
448 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
449 	if (q->crypto_profile && lim->integrity.tag_size) {
450 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
451 		error = -EINVAL;
452 		goto out_unlock;
453 	}
454 #endif
455 
456 	q->limits = *lim;
457 	if (q->disk)
458 		blk_apply_bdi_limits(q->disk->bdi, lim);
459 out_unlock:
460 	mutex_unlock(&q->limits_lock);
461 	return error;
462 }
463 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
464 
465 /**
466  * queue_limits_commit_update_frozen - commit an atomic update of queue limits
467  * @q:		queue to update
468  * @lim:	limits to apply
469  *
470  * Apply the limits in @lim that were obtained from queue_limits_start_update()
471  * and updated with the new values by the caller to @q.  Freezes the queue
472  * before the update and unfreezes it after.
473  *
474  * Returns 0 if successful, else a negative error code.
475  */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)476 int queue_limits_commit_update_frozen(struct request_queue *q,
477 		struct queue_limits *lim)
478 {
479 	unsigned int memflags;
480 	int ret;
481 
482 	memflags = blk_mq_freeze_queue(q);
483 	ret = queue_limits_commit_update(q, lim);
484 	blk_mq_unfreeze_queue(q, memflags);
485 
486 	return ret;
487 }
488 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
489 
490 /**
491  * queue_limits_set - apply queue limits to queue
492  * @q:		queue to update
493  * @lim:	limits to apply
494  *
495  * Apply the limits in @lim that were freshly initialized to @q.
496  * To update existing limits use queue_limits_start_update() and
497  * queue_limits_commit_update() instead.
498  *
499  * Returns 0 if successful, else a negative error code.
500  */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)501 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
502 {
503 	mutex_lock(&q->limits_lock);
504 	return queue_limits_commit_update(q, lim);
505 }
506 EXPORT_SYMBOL_GPL(queue_limits_set);
507 
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)508 static int queue_limit_alignment_offset(const struct queue_limits *lim,
509 		sector_t sector)
510 {
511 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
512 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
513 		<< SECTOR_SHIFT;
514 
515 	return (granularity + lim->alignment_offset - alignment) % granularity;
516 }
517 
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)518 static unsigned int queue_limit_discard_alignment(
519 		const struct queue_limits *lim, sector_t sector)
520 {
521 	unsigned int alignment, granularity, offset;
522 
523 	if (!lim->max_discard_sectors)
524 		return 0;
525 
526 	/* Why are these in bytes, not sectors? */
527 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
528 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
529 
530 	/* Offset of the partition start in 'granularity' sectors */
531 	offset = sector_div(sector, granularity);
532 
533 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
534 	offset = (granularity + alignment - offset) % granularity;
535 
536 	/* Turn it back into bytes, gaah */
537 	return offset << SECTOR_SHIFT;
538 }
539 
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)540 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
541 {
542 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
543 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
544 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
545 	return sectors;
546 }
547 
548 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)549 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
550 				struct queue_limits *b)
551 {
552 	/* We're not going to support different boundary sizes.. yet */
553 	if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
554 		return false;
555 
556 	/* Can't support this */
557 	if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
558 		return false;
559 
560 	/* Or this */
561 	if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
562 		return false;
563 
564 	t->atomic_write_hw_max = min(t->atomic_write_hw_max,
565 				b->atomic_write_hw_max);
566 	t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
567 				b->atomic_write_hw_unit_min);
568 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
569 				b->atomic_write_hw_unit_max);
570 	return true;
571 }
572 
573 /* Check for valid boundary of first bottom device */
blk_stack_atomic_writes_boundary_head(struct queue_limits * t,struct queue_limits * b)574 static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
575 				struct queue_limits *b)
576 {
577 	/*
578 	 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
579 	 * devices store chunk sectors in t->io_min.
580 	 */
581 	if (b->atomic_write_hw_boundary > t->io_min &&
582 	    b->atomic_write_hw_boundary % t->io_min)
583 		return false;
584 	if (t->io_min > b->atomic_write_hw_boundary &&
585 	    t->io_min % b->atomic_write_hw_boundary)
586 		return false;
587 
588 	t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
589 	return true;
590 }
591 
592 
593 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)594 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
595 				struct queue_limits *b)
596 {
597 	if (b->atomic_write_hw_boundary &&
598 	    !blk_stack_atomic_writes_boundary_head(t, b))
599 		return false;
600 
601 	if (t->io_min <= SECTOR_SIZE) {
602 		/* No chunk sectors, so use bottom device values directly */
603 		t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
604 		t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
605 		t->atomic_write_hw_max = b->atomic_write_hw_max;
606 		return true;
607 	}
608 
609 	/*
610 	 * Find values for limits which work for chunk size.
611 	 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
612 	 * size (t->io_min), as chunk size is not restricted to a power-of-2.
613 	 * So we need to find highest power-of-2 which works for the chunk
614 	 * size.
615 	 * As an example scenario, we could have b->unit_max = 16K and
616 	 * t->io_min = 24K. For this case, reduce t->unit_max to a value
617 	 * aligned with both limits, i.e. 8K in this example.
618 	 */
619 	t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
620 	while (t->io_min % t->atomic_write_hw_unit_max)
621 		t->atomic_write_hw_unit_max /= 2;
622 
623 	t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
624 					  t->atomic_write_hw_unit_max);
625 	t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
626 
627 	return true;
628 }
629 
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)630 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
631 				struct queue_limits *b, sector_t start)
632 {
633 	if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
634 		goto unsupported;
635 
636 	if (!b->atomic_write_hw_unit_min)
637 		goto unsupported;
638 
639 	if (!blk_atomic_write_start_sect_aligned(start, b))
640 		goto unsupported;
641 
642 	/*
643 	 * If atomic_write_hw_max is set, we have already stacked 1x bottom
644 	 * device, so check for compliance.
645 	 */
646 	if (t->atomic_write_hw_max) {
647 		if (!blk_stack_atomic_writes_tail(t, b))
648 			goto unsupported;
649 		return;
650 	}
651 
652 	if (!blk_stack_atomic_writes_head(t, b))
653 		goto unsupported;
654 	return;
655 
656 unsupported:
657 	t->atomic_write_hw_max = 0;
658 	t->atomic_write_hw_unit_max = 0;
659 	t->atomic_write_hw_unit_min = 0;
660 	t->atomic_write_hw_boundary = 0;
661 }
662 
663 /**
664  * blk_stack_limits - adjust queue_limits for stacked devices
665  * @t:	the stacking driver limits (top device)
666  * @b:  the underlying queue limits (bottom, component device)
667  * @start:  first data sector within component device
668  *
669  * Description:
670  *    This function is used by stacking drivers like MD and DM to ensure
671  *    that all component devices have compatible block sizes and
672  *    alignments.  The stacking driver must provide a queue_limits
673  *    struct (top) and then iteratively call the stacking function for
674  *    all component (bottom) devices.  The stacking function will
675  *    attempt to combine the values and ensure proper alignment.
676  *
677  *    Returns 0 if the top and bottom queue_limits are compatible.  The
678  *    top device's block sizes and alignment offsets may be adjusted to
679  *    ensure alignment with the bottom device. If no compatible sizes
680  *    and alignments exist, -1 is returned and the resulting top
681  *    queue_limits will have the misaligned flag set to indicate that
682  *    the alignment_offset is undefined.
683  */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)684 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
685 		     sector_t start)
686 {
687 	unsigned int top, bottom, alignment, ret = 0;
688 
689 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
690 
691 	/*
692 	 * Some feaures need to be supported both by the stacking driver and all
693 	 * underlying devices.  The stacking driver sets these flags before
694 	 * stacking the limits, and this will clear the flags if any of the
695 	 * underlying devices does not support it.
696 	 */
697 	if (!(b->features & BLK_FEAT_NOWAIT))
698 		t->features &= ~BLK_FEAT_NOWAIT;
699 	if (!(b->features & BLK_FEAT_POLL))
700 		t->features &= ~BLK_FEAT_POLL;
701 
702 	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
703 
704 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
705 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
706 			b->max_user_sectors);
707 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
708 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
709 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
710 					b->max_write_zeroes_sectors);
711 	t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
712 					b->max_hw_zone_append_sectors);
713 
714 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
715 					    b->seg_boundary_mask);
716 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
717 					    b->virt_boundary_mask);
718 
719 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
720 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
721 					       b->max_discard_segments);
722 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
723 						 b->max_integrity_segments);
724 
725 	t->max_segment_size = min_not_zero(t->max_segment_size,
726 					   b->max_segment_size);
727 
728 	alignment = queue_limit_alignment_offset(b, start);
729 
730 	/* Bottom device has different alignment.  Check that it is
731 	 * compatible with the current top alignment.
732 	 */
733 	if (t->alignment_offset != alignment) {
734 
735 		top = max(t->physical_block_size, t->io_min)
736 			+ t->alignment_offset;
737 		bottom = max(b->physical_block_size, b->io_min) + alignment;
738 
739 		/* Verify that top and bottom intervals line up */
740 		if (max(top, bottom) % min(top, bottom)) {
741 			t->flags |= BLK_FLAG_MISALIGNED;
742 			ret = -1;
743 		}
744 	}
745 
746 	t->logical_block_size = max(t->logical_block_size,
747 				    b->logical_block_size);
748 
749 	t->physical_block_size = max(t->physical_block_size,
750 				     b->physical_block_size);
751 
752 	t->io_min = max(t->io_min, b->io_min);
753 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
754 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
755 
756 	/* Set non-power-of-2 compatible chunk_sectors boundary */
757 	if (b->chunk_sectors)
758 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
759 
760 	/* Physical block size a multiple of the logical block size? */
761 	if (t->physical_block_size & (t->logical_block_size - 1)) {
762 		t->physical_block_size = t->logical_block_size;
763 		t->flags |= BLK_FLAG_MISALIGNED;
764 		ret = -1;
765 	}
766 
767 	/* Minimum I/O a multiple of the physical block size? */
768 	if (t->io_min & (t->physical_block_size - 1)) {
769 		t->io_min = t->physical_block_size;
770 		t->flags |= BLK_FLAG_MISALIGNED;
771 		ret = -1;
772 	}
773 
774 	/* Optimal I/O a multiple of the physical block size? */
775 	if (t->io_opt & (t->physical_block_size - 1)) {
776 		t->io_opt = 0;
777 		t->flags |= BLK_FLAG_MISALIGNED;
778 		ret = -1;
779 	}
780 
781 	/* chunk_sectors a multiple of the physical block size? */
782 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
783 		t->chunk_sectors = 0;
784 		t->flags |= BLK_FLAG_MISALIGNED;
785 		ret = -1;
786 	}
787 
788 	/* Find lowest common alignment_offset */
789 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
790 		% max(t->physical_block_size, t->io_min);
791 
792 	/* Verify that new alignment_offset is on a logical block boundary */
793 	if (t->alignment_offset & (t->logical_block_size - 1)) {
794 		t->flags |= BLK_FLAG_MISALIGNED;
795 		ret = -1;
796 	}
797 
798 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
799 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
800 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
801 
802 	/* Discard alignment and granularity */
803 	if (b->discard_granularity) {
804 		alignment = queue_limit_discard_alignment(b, start);
805 
806 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
807 						      b->max_discard_sectors);
808 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
809 							 b->max_hw_discard_sectors);
810 		t->discard_granularity = max(t->discard_granularity,
811 					     b->discard_granularity);
812 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
813 			t->discard_granularity;
814 	}
815 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
816 						   b->max_secure_erase_sectors);
817 	t->zone_write_granularity = max(t->zone_write_granularity,
818 					b->zone_write_granularity);
819 	if (!(t->features & BLK_FEAT_ZONED)) {
820 		t->zone_write_granularity = 0;
821 		t->max_zone_append_sectors = 0;
822 	}
823 	blk_stack_atomic_writes_limits(t, b, start);
824 
825 	return ret;
826 }
827 EXPORT_SYMBOL(blk_stack_limits);
828 
829 /**
830  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
831  * @t:	the stacking driver limits (top device)
832  * @bdev:  the underlying block device (bottom)
833  * @offset:  offset to beginning of data within component device
834  * @pfx: prefix to use for warnings logged
835  *
836  * Description:
837  *    This function is used by stacking drivers like MD and DM to ensure
838  *    that all component devices have compatible block sizes and
839  *    alignments.  The stacking driver must provide a queue_limits
840  *    struct (top) and then iteratively call the stacking function for
841  *    all component (bottom) devices.  The stacking function will
842  *    attempt to combine the values and ensure proper alignment.
843  */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)844 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
845 		sector_t offset, const char *pfx)
846 {
847 	if (blk_stack_limits(t, bdev_limits(bdev),
848 			get_start_sect(bdev) + offset))
849 		pr_notice("%s: Warning: Device %pg is misaligned\n",
850 			pfx, bdev);
851 }
852 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
853 
854 /**
855  * queue_limits_stack_integrity - stack integrity profile
856  * @t: target queue limits
857  * @b: base queue limits
858  *
859  * Check if the integrity profile in the @b can be stacked into the
860  * target @t.  Stacking is possible if either:
861  *
862  *   a) does not have any integrity information stacked into it yet
863  *   b) the integrity profile in @b is identical to the one in @t
864  *
865  * If @b can be stacked into @t, return %true.  Else return %false and clear the
866  * integrity information in @t.
867  */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)868 bool queue_limits_stack_integrity(struct queue_limits *t,
869 		struct queue_limits *b)
870 {
871 	struct blk_integrity *ti = &t->integrity;
872 	struct blk_integrity *bi = &b->integrity;
873 
874 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
875 		return true;
876 
877 	if (ti->flags & BLK_INTEGRITY_STACKED) {
878 		if (ti->tuple_size != bi->tuple_size)
879 			goto incompatible;
880 		if (ti->interval_exp != bi->interval_exp)
881 			goto incompatible;
882 		if (ti->tag_size != bi->tag_size)
883 			goto incompatible;
884 		if (ti->csum_type != bi->csum_type)
885 			goto incompatible;
886 		if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
887 		    (bi->flags & BLK_INTEGRITY_REF_TAG))
888 			goto incompatible;
889 	} else {
890 		ti->flags = BLK_INTEGRITY_STACKED;
891 		ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
892 			     (bi->flags & BLK_INTEGRITY_REF_TAG);
893 		ti->csum_type = bi->csum_type;
894 		ti->tuple_size = bi->tuple_size;
895 		ti->pi_offset = bi->pi_offset;
896 		ti->interval_exp = bi->interval_exp;
897 		ti->tag_size = bi->tag_size;
898 	}
899 	return true;
900 
901 incompatible:
902 	memset(ti, 0, sizeof(*ti));
903 	return false;
904 }
905 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
906 
907 /**
908  * blk_set_queue_depth - tell the block layer about the device queue depth
909  * @q:		the request queue for the device
910  * @depth:		queue depth
911  *
912  */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)913 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
914 {
915 	q->queue_depth = depth;
916 	rq_qos_queue_depth_changed(q);
917 }
918 EXPORT_SYMBOL(blk_set_queue_depth);
919 
bdev_alignment_offset(struct block_device * bdev)920 int bdev_alignment_offset(struct block_device *bdev)
921 {
922 	struct request_queue *q = bdev_get_queue(bdev);
923 
924 	if (q->limits.flags & BLK_FLAG_MISALIGNED)
925 		return -1;
926 	if (bdev_is_partition(bdev))
927 		return queue_limit_alignment_offset(&q->limits,
928 				bdev->bd_start_sect);
929 	return q->limits.alignment_offset;
930 }
931 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
932 
bdev_discard_alignment(struct block_device * bdev)933 unsigned int bdev_discard_alignment(struct block_device *bdev)
934 {
935 	struct request_queue *q = bdev_get_queue(bdev);
936 
937 	if (bdev_is_partition(bdev))
938 		return queue_limit_discard_alignment(&q->limits,
939 				bdev->bd_start_sect);
940 	return q->limits.discard_alignment;
941 }
942 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
943