xref: /linux/block/blk-settings.c (revision 276f98efb64a2c31c099465ace78d3054c662a0f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21 
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 	q->rq_timeout = timeout;
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27 
28 /**
29  * blk_set_stacking_limits - set default limits for stacking devices
30  * @lim:  the queue_limits structure to reset
31  *
32  * Prepare queue limits for applying limits from underlying devices using
33  * blk_stack_limits().
34  */
blk_set_stacking_limits(struct queue_limits * lim)35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 	memset(lim, 0, sizeof(*lim));
38 	lim->logical_block_size = SECTOR_SIZE;
39 	lim->physical_block_size = SECTOR_SIZE;
40 	lim->io_min = SECTOR_SIZE;
41 	lim->discard_granularity = SECTOR_SIZE;
42 	lim->dma_alignment = SECTOR_SIZE - 1;
43 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 
45 	/* Inherit limits from component devices */
46 	lim->max_segments = USHRT_MAX;
47 	lim->max_discard_segments = USHRT_MAX;
48 	lim->max_hw_sectors = UINT_MAX;
49 	lim->max_segment_size = UINT_MAX;
50 	lim->max_sectors = UINT_MAX;
51 	lim->max_dev_sectors = UINT_MAX;
52 	lim->max_write_zeroes_sectors = UINT_MAX;
53 	lim->max_hw_zone_append_sectors = UINT_MAX;
54 	lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57 
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)58 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 		struct queue_limits *lim)
60 {
61 	/*
62 	 * For read-ahead of large files to be effective, we need to read ahead
63 	 * at least twice the optimal I/O size.
64 	 */
65 	bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67 }
68 
blk_validate_zoned_limits(struct queue_limits * lim)69 static int blk_validate_zoned_limits(struct queue_limits *lim)
70 {
71 	if (!(lim->features & BLK_FEAT_ZONED)) {
72 		if (WARN_ON_ONCE(lim->max_open_zones) ||
73 		    WARN_ON_ONCE(lim->max_active_zones) ||
74 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
75 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
76 			return -EINVAL;
77 		return 0;
78 	}
79 
80 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 		return -EINVAL;
82 
83 	/*
84 	 * Given that active zones include open zones, the maximum number of
85 	 * open zones cannot be larger than the maximum number of active zones.
86 	 */
87 	if (lim->max_active_zones &&
88 	    lim->max_open_zones > lim->max_active_zones)
89 		return -EINVAL;
90 
91 	if (lim->zone_write_granularity < lim->logical_block_size)
92 		lim->zone_write_granularity = lim->logical_block_size;
93 
94 	/*
95 	 * The Zone Append size is limited by the maximum I/O size and the zone
96 	 * size given that it can't span zones.
97 	 *
98 	 * If no max_hw_zone_append_sectors limit is provided, the block layer
99 	 * will emulated it, else we're also bound by the hardware limit.
100 	 */
101 	lim->max_zone_append_sectors =
102 		min_not_zero(lim->max_hw_zone_append_sectors,
103 			min(lim->chunk_sectors, lim->max_hw_sectors));
104 	return 0;
105 }
106 
blk_validate_integrity_limits(struct queue_limits * lim)107 static int blk_validate_integrity_limits(struct queue_limits *lim)
108 {
109 	struct blk_integrity *bi = &lim->integrity;
110 
111 	if (!bi->tuple_size) {
112 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
113 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
114 			pr_warn("invalid PI settings.\n");
115 			return -EINVAL;
116 		}
117 		return 0;
118 	}
119 
120 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
121 		pr_warn("integrity support disabled.\n");
122 		return -EINVAL;
123 	}
124 
125 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
126 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
127 		pr_warn("ref tag not support without checksum.\n");
128 		return -EINVAL;
129 	}
130 
131 	if (!bi->interval_exp)
132 		bi->interval_exp = ilog2(lim->logical_block_size);
133 
134 	return 0;
135 }
136 
137 /*
138  * Returns max guaranteed bytes which we can fit in a bio.
139  *
140  * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
141  * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
142  * the first and last segments.
143  */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)144 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
145 {
146 	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
147 	unsigned int length;
148 
149 	length = min(max_segments, 2) * lim->logical_block_size;
150 	if (max_segments > 2)
151 		length += (max_segments - 2) * PAGE_SIZE;
152 
153 	return length;
154 }
155 
blk_atomic_writes_update_limits(struct queue_limits * lim)156 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
157 {
158 	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
159 					blk_queue_max_guaranteed_bio(lim));
160 
161 	unit_limit = rounddown_pow_of_two(unit_limit);
162 
163 	lim->atomic_write_max_sectors =
164 		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
165 			lim->max_hw_sectors);
166 	lim->atomic_write_unit_min =
167 		min(lim->atomic_write_hw_unit_min, unit_limit);
168 	lim->atomic_write_unit_max =
169 		min(lim->atomic_write_hw_unit_max, unit_limit);
170 	lim->atomic_write_boundary_sectors =
171 		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
172 }
173 
blk_validate_atomic_write_limits(struct queue_limits * lim)174 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
175 {
176 	unsigned int boundary_sectors;
177 
178 	if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
179 		goto unsupported;
180 
181 	if (!lim->atomic_write_hw_max)
182 		goto unsupported;
183 
184 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
185 		goto unsupported;
186 
187 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
188 		goto unsupported;
189 
190 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
191 			 lim->atomic_write_hw_unit_max))
192 		goto unsupported;
193 
194 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
195 			 lim->atomic_write_hw_max))
196 		goto unsupported;
197 
198 	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
199 
200 	if (boundary_sectors) {
201 		if (WARN_ON_ONCE(lim->atomic_write_hw_max >
202 				 lim->atomic_write_hw_boundary))
203 			goto unsupported;
204 		/*
205 		 * A feature of boundary support is that it disallows bios to
206 		 * be merged which would result in a merged request which
207 		 * crosses either a chunk sector or atomic write HW boundary,
208 		 * even though chunk sectors may be just set for performance.
209 		 * For simplicity, disallow atomic writes for a chunk sector
210 		 * which is non-zero and smaller than atomic write HW boundary.
211 		 * Furthermore, chunk sectors must be a multiple of atomic
212 		 * write HW boundary. Otherwise boundary support becomes
213 		 * complicated.
214 		 * Devices which do not conform to these rules can be dealt
215 		 * with if and when they show up.
216 		 */
217 		if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
218 			goto unsupported;
219 
220 		/*
221 		 * The boundary size just needs to be a multiple of unit_max
222 		 * (and not necessarily a power-of-2), so this following check
223 		 * could be relaxed in future.
224 		 * Furthermore, if needed, unit_max could even be reduced so
225 		 * that it is compliant with a !power-of-2 boundary.
226 		 */
227 		if (!is_power_of_2(boundary_sectors))
228 			goto unsupported;
229 	}
230 
231 	blk_atomic_writes_update_limits(lim);
232 	return;
233 
234 unsupported:
235 	lim->atomic_write_max_sectors = 0;
236 	lim->atomic_write_boundary_sectors = 0;
237 	lim->atomic_write_unit_min = 0;
238 	lim->atomic_write_unit_max = 0;
239 }
240 
241 /*
242  * Check that the limits in lim are valid, initialize defaults for unset
243  * values, and cap values based on others where needed.
244  */
blk_validate_limits(struct queue_limits * lim)245 int blk_validate_limits(struct queue_limits *lim)
246 {
247 	unsigned int max_hw_sectors;
248 	unsigned int logical_block_sectors;
249 	unsigned long seg_size;
250 	int err;
251 
252 	/*
253 	 * Unless otherwise specified, default to 512 byte logical blocks and a
254 	 * physical block size equal to the logical block size.
255 	 */
256 	if (!lim->logical_block_size)
257 		lim->logical_block_size = SECTOR_SIZE;
258 	else if (blk_validate_block_size(lim->logical_block_size)) {
259 		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
260 		return -EINVAL;
261 	}
262 	if (lim->physical_block_size < lim->logical_block_size)
263 		lim->physical_block_size = lim->logical_block_size;
264 
265 	/*
266 	 * The minimum I/O size defaults to the physical block size unless
267 	 * explicitly overridden.
268 	 */
269 	if (lim->io_min < lim->physical_block_size)
270 		lim->io_min = lim->physical_block_size;
271 
272 	/*
273 	 * The optimal I/O size may not be aligned to physical block size
274 	 * (because it may be limited by dma engines which have no clue about
275 	 * block size of the disks attached to them), so we round it down here.
276 	 */
277 	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
278 
279 	/*
280 	 * max_hw_sectors has a somewhat weird default for historical reason,
281 	 * but driver really should set their own instead of relying on this
282 	 * value.
283 	 *
284 	 * The block layer relies on the fact that every driver can
285 	 * handle at lest a page worth of data per I/O, and needs the value
286 	 * aligned to the logical block size.
287 	 */
288 	if (!lim->max_hw_sectors)
289 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
290 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
291 		return -EINVAL;
292 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
293 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
294 		return -EINVAL;
295 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
296 			logical_block_sectors);
297 
298 	/*
299 	 * The actual max_sectors value is a complex beast and also takes the
300 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
301 	 * value into account.  The ->max_sectors value is always calculated
302 	 * from these, so directly setting it won't have any effect.
303 	 */
304 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
305 				lim->max_dev_sectors);
306 	if (lim->max_user_sectors) {
307 		if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
308 			return -EINVAL;
309 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
310 	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
311 		lim->max_sectors =
312 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
313 	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
314 		lim->max_sectors =
315 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
316 	} else {
317 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
318 	}
319 	lim->max_sectors = round_down(lim->max_sectors,
320 			logical_block_sectors);
321 
322 	/*
323 	 * Random default for the maximum number of segments.  Driver should not
324 	 * rely on this and set their own.
325 	 */
326 	if (!lim->max_segments)
327 		lim->max_segments = BLK_MAX_SEGMENTS;
328 
329 	lim->max_discard_sectors =
330 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
331 
332 	if (!lim->max_discard_segments)
333 		lim->max_discard_segments = 1;
334 
335 	if (lim->discard_granularity < lim->physical_block_size)
336 		lim->discard_granularity = lim->physical_block_size;
337 
338 	/*
339 	 * By default there is no limit on the segment boundary alignment,
340 	 * but if there is one it can't be smaller than the page size as
341 	 * that would break all the normal I/O patterns.
342 	 */
343 	if (!lim->seg_boundary_mask)
344 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
345 	if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
346 		return -EINVAL;
347 
348 	/*
349 	 * Stacking device may have both virtual boundary and max segment
350 	 * size limit, so allow this setting now, and long-term the two
351 	 * might need to move out of stacking limits since we have immutable
352 	 * bvec and lower layer bio splitting is supposed to handle the two
353 	 * correctly.
354 	 */
355 	if (lim->virt_boundary_mask) {
356 		if (!lim->max_segment_size)
357 			lim->max_segment_size = UINT_MAX;
358 	} else {
359 		/*
360 		 * The maximum segment size has an odd historic 64k default that
361 		 * drivers probably should override.  Just like the I/O size we
362 		 * require drivers to at least handle a full page per segment.
363 		 */
364 		if (!lim->max_segment_size)
365 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
366 		if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
367 			return -EINVAL;
368 	}
369 
370 	/* setup min segment size for building new segment in fast path */
371 	if (lim->seg_boundary_mask > lim->max_segment_size - 1)
372 		seg_size = lim->max_segment_size;
373 	else
374 		seg_size = lim->seg_boundary_mask + 1;
375 	lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
376 
377 	/*
378 	 * We require drivers to at least do logical block aligned I/O, but
379 	 * historically could not check for that due to the separate calls
380 	 * to set the limits.  Once the transition is finished the check
381 	 * below should be narrowed down to check the logical block size.
382 	 */
383 	if (!lim->dma_alignment)
384 		lim->dma_alignment = SECTOR_SIZE - 1;
385 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
386 		return -EINVAL;
387 
388 	if (lim->alignment_offset) {
389 		lim->alignment_offset &= (lim->physical_block_size - 1);
390 		lim->flags &= ~BLK_FLAG_MISALIGNED;
391 	}
392 
393 	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
394 		lim->features &= ~BLK_FEAT_FUA;
395 
396 	blk_validate_atomic_write_limits(lim);
397 
398 	err = blk_validate_integrity_limits(lim);
399 	if (err)
400 		return err;
401 	return blk_validate_zoned_limits(lim);
402 }
403 EXPORT_SYMBOL_GPL(blk_validate_limits);
404 
405 /*
406  * Set the default limits for a newly allocated queue.  @lim contains the
407  * initial limits set by the driver, which could be no limit in which case
408  * all fields are cleared to zero.
409  */
blk_set_default_limits(struct queue_limits * lim)410 int blk_set_default_limits(struct queue_limits *lim)
411 {
412 	/*
413 	 * Most defaults are set by capping the bounds in blk_validate_limits,
414 	 * but max_user_discard_sectors is special and needs an explicit
415 	 * initialization to the max value here.
416 	 */
417 	lim->max_user_discard_sectors = UINT_MAX;
418 	return blk_validate_limits(lim);
419 }
420 
421 /**
422  * queue_limits_commit_update - commit an atomic update of queue limits
423  * @q:		queue to update
424  * @lim:	limits to apply
425  *
426  * Apply the limits in @lim that were obtained from queue_limits_start_update()
427  * and updated by the caller to @q.  The caller must have frozen the queue or
428  * ensure that there are no outstanding I/Os by other means.
429  *
430  * Returns 0 if successful, else a negative error code.
431  */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)432 int queue_limits_commit_update(struct request_queue *q,
433 		struct queue_limits *lim)
434 {
435 	int error;
436 
437 	error = blk_validate_limits(lim);
438 	if (error)
439 		goto out_unlock;
440 
441 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
442 	if (q->crypto_profile && lim->integrity.tag_size) {
443 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
444 		error = -EINVAL;
445 		goto out_unlock;
446 	}
447 #endif
448 
449 	q->limits = *lim;
450 	if (q->disk)
451 		blk_apply_bdi_limits(q->disk->bdi, lim);
452 out_unlock:
453 	mutex_unlock(&q->limits_lock);
454 	return error;
455 }
456 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
457 
458 /**
459  * queue_limits_commit_update_frozen - commit an atomic update of queue limits
460  * @q:		queue to update
461  * @lim:	limits to apply
462  *
463  * Apply the limits in @lim that were obtained from queue_limits_start_update()
464  * and updated with the new values by the caller to @q.  Freezes the queue
465  * before the update and unfreezes it after.
466  *
467  * Returns 0 if successful, else a negative error code.
468  */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)469 int queue_limits_commit_update_frozen(struct request_queue *q,
470 		struct queue_limits *lim)
471 {
472 	unsigned int memflags;
473 	int ret;
474 
475 	memflags = blk_mq_freeze_queue(q);
476 	ret = queue_limits_commit_update(q, lim);
477 	blk_mq_unfreeze_queue(q, memflags);
478 
479 	return ret;
480 }
481 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
482 
483 /**
484  * queue_limits_set - apply queue limits to queue
485  * @q:		queue to update
486  * @lim:	limits to apply
487  *
488  * Apply the limits in @lim that were freshly initialized to @q.
489  * To update existing limits use queue_limits_start_update() and
490  * queue_limits_commit_update() instead.
491  *
492  * Returns 0 if successful, else a negative error code.
493  */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)494 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
495 {
496 	mutex_lock(&q->limits_lock);
497 	return queue_limits_commit_update(q, lim);
498 }
499 EXPORT_SYMBOL_GPL(queue_limits_set);
500 
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)501 static int queue_limit_alignment_offset(const struct queue_limits *lim,
502 		sector_t sector)
503 {
504 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
505 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
506 		<< SECTOR_SHIFT;
507 
508 	return (granularity + lim->alignment_offset - alignment) % granularity;
509 }
510 
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)511 static unsigned int queue_limit_discard_alignment(
512 		const struct queue_limits *lim, sector_t sector)
513 {
514 	unsigned int alignment, granularity, offset;
515 
516 	if (!lim->max_discard_sectors)
517 		return 0;
518 
519 	/* Why are these in bytes, not sectors? */
520 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
521 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
522 
523 	/* Offset of the partition start in 'granularity' sectors */
524 	offset = sector_div(sector, granularity);
525 
526 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
527 	offset = (granularity + alignment - offset) % granularity;
528 
529 	/* Turn it back into bytes, gaah */
530 	return offset << SECTOR_SHIFT;
531 }
532 
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)533 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
534 {
535 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
536 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
537 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
538 	return sectors;
539 }
540 
541 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)542 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
543 				struct queue_limits *b)
544 {
545 	/* We're not going to support different boundary sizes.. yet */
546 	if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
547 		return false;
548 
549 	/* Can't support this */
550 	if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
551 		return false;
552 
553 	/* Or this */
554 	if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
555 		return false;
556 
557 	t->atomic_write_hw_max = min(t->atomic_write_hw_max,
558 				b->atomic_write_hw_max);
559 	t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
560 				b->atomic_write_hw_unit_min);
561 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
562 				b->atomic_write_hw_unit_max);
563 	return true;
564 }
565 
566 /* Check for valid boundary of first bottom device */
blk_stack_atomic_writes_boundary_head(struct queue_limits * t,struct queue_limits * b)567 static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
568 				struct queue_limits *b)
569 {
570 	/*
571 	 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
572 	 * devices store chunk sectors in t->io_min.
573 	 */
574 	if (b->atomic_write_hw_boundary > t->io_min &&
575 	    b->atomic_write_hw_boundary % t->io_min)
576 		return false;
577 	if (t->io_min > b->atomic_write_hw_boundary &&
578 	    t->io_min % b->atomic_write_hw_boundary)
579 		return false;
580 
581 	t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
582 	return true;
583 }
584 
585 
586 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)587 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
588 				struct queue_limits *b)
589 {
590 	if (b->atomic_write_hw_boundary &&
591 	    !blk_stack_atomic_writes_boundary_head(t, b))
592 		return false;
593 
594 	if (t->io_min <= SECTOR_SIZE) {
595 		/* No chunk sectors, so use bottom device values directly */
596 		t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
597 		t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
598 		t->atomic_write_hw_max = b->atomic_write_hw_max;
599 		return true;
600 	}
601 
602 	/*
603 	 * Find values for limits which work for chunk size.
604 	 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
605 	 * size (t->io_min), as chunk size is not restricted to a power-of-2.
606 	 * So we need to find highest power-of-2 which works for the chunk
607 	 * size.
608 	 * As an example scenario, we could have b->unit_max = 16K and
609 	 * t->io_min = 24K. For this case, reduce t->unit_max to a value
610 	 * aligned with both limits, i.e. 8K in this example.
611 	 */
612 	t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
613 	while (t->io_min % t->atomic_write_hw_unit_max)
614 		t->atomic_write_hw_unit_max /= 2;
615 
616 	t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
617 					  t->atomic_write_hw_unit_max);
618 	t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
619 
620 	return true;
621 }
622 
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)623 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
624 				struct queue_limits *b, sector_t start)
625 {
626 	if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
627 		goto unsupported;
628 
629 	if (!b->atomic_write_hw_unit_min)
630 		goto unsupported;
631 
632 	if (!blk_atomic_write_start_sect_aligned(start, b))
633 		goto unsupported;
634 
635 	/*
636 	 * If atomic_write_hw_max is set, we have already stacked 1x bottom
637 	 * device, so check for compliance.
638 	 */
639 	if (t->atomic_write_hw_max) {
640 		if (!blk_stack_atomic_writes_tail(t, b))
641 			goto unsupported;
642 		return;
643 	}
644 
645 	if (!blk_stack_atomic_writes_head(t, b))
646 		goto unsupported;
647 	return;
648 
649 unsupported:
650 	t->atomic_write_hw_max = 0;
651 	t->atomic_write_hw_unit_max = 0;
652 	t->atomic_write_hw_unit_min = 0;
653 	t->atomic_write_hw_boundary = 0;
654 }
655 
656 /**
657  * blk_stack_limits - adjust queue_limits for stacked devices
658  * @t:	the stacking driver limits (top device)
659  * @b:  the underlying queue limits (bottom, component device)
660  * @start:  first data sector within component device
661  *
662  * Description:
663  *    This function is used by stacking drivers like MD and DM to ensure
664  *    that all component devices have compatible block sizes and
665  *    alignments.  The stacking driver must provide a queue_limits
666  *    struct (top) and then iteratively call the stacking function for
667  *    all component (bottom) devices.  The stacking function will
668  *    attempt to combine the values and ensure proper alignment.
669  *
670  *    Returns 0 if the top and bottom queue_limits are compatible.  The
671  *    top device's block sizes and alignment offsets may be adjusted to
672  *    ensure alignment with the bottom device. If no compatible sizes
673  *    and alignments exist, -1 is returned and the resulting top
674  *    queue_limits will have the misaligned flag set to indicate that
675  *    the alignment_offset is undefined.
676  */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)677 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
678 		     sector_t start)
679 {
680 	unsigned int top, bottom, alignment, ret = 0;
681 
682 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
683 
684 	/*
685 	 * Some feaures need to be supported both by the stacking driver and all
686 	 * underlying devices.  The stacking driver sets these flags before
687 	 * stacking the limits, and this will clear the flags if any of the
688 	 * underlying devices does not support it.
689 	 */
690 	if (!(b->features & BLK_FEAT_NOWAIT))
691 		t->features &= ~BLK_FEAT_NOWAIT;
692 	if (!(b->features & BLK_FEAT_POLL))
693 		t->features &= ~BLK_FEAT_POLL;
694 
695 	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
696 
697 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
698 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
699 			b->max_user_sectors);
700 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
701 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
702 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
703 					b->max_write_zeroes_sectors);
704 	t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
705 					b->max_hw_zone_append_sectors);
706 
707 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
708 					    b->seg_boundary_mask);
709 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
710 					    b->virt_boundary_mask);
711 
712 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
713 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
714 					       b->max_discard_segments);
715 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
716 						 b->max_integrity_segments);
717 
718 	t->max_segment_size = min_not_zero(t->max_segment_size,
719 					   b->max_segment_size);
720 
721 	alignment = queue_limit_alignment_offset(b, start);
722 
723 	/* Bottom device has different alignment.  Check that it is
724 	 * compatible with the current top alignment.
725 	 */
726 	if (t->alignment_offset != alignment) {
727 
728 		top = max(t->physical_block_size, t->io_min)
729 			+ t->alignment_offset;
730 		bottom = max(b->physical_block_size, b->io_min) + alignment;
731 
732 		/* Verify that top and bottom intervals line up */
733 		if (max(top, bottom) % min(top, bottom)) {
734 			t->flags |= BLK_FLAG_MISALIGNED;
735 			ret = -1;
736 		}
737 	}
738 
739 	t->logical_block_size = max(t->logical_block_size,
740 				    b->logical_block_size);
741 
742 	t->physical_block_size = max(t->physical_block_size,
743 				     b->physical_block_size);
744 
745 	t->io_min = max(t->io_min, b->io_min);
746 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
747 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
748 
749 	/* Set non-power-of-2 compatible chunk_sectors boundary */
750 	if (b->chunk_sectors)
751 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
752 
753 	/* Physical block size a multiple of the logical block size? */
754 	if (t->physical_block_size & (t->logical_block_size - 1)) {
755 		t->physical_block_size = t->logical_block_size;
756 		t->flags |= BLK_FLAG_MISALIGNED;
757 		ret = -1;
758 	}
759 
760 	/* Minimum I/O a multiple of the physical block size? */
761 	if (t->io_min & (t->physical_block_size - 1)) {
762 		t->io_min = t->physical_block_size;
763 		t->flags |= BLK_FLAG_MISALIGNED;
764 		ret = -1;
765 	}
766 
767 	/* Optimal I/O a multiple of the physical block size? */
768 	if (t->io_opt & (t->physical_block_size - 1)) {
769 		t->io_opt = 0;
770 		t->flags |= BLK_FLAG_MISALIGNED;
771 		ret = -1;
772 	}
773 
774 	/* chunk_sectors a multiple of the physical block size? */
775 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
776 		t->chunk_sectors = 0;
777 		t->flags |= BLK_FLAG_MISALIGNED;
778 		ret = -1;
779 	}
780 
781 	/* Find lowest common alignment_offset */
782 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
783 		% max(t->physical_block_size, t->io_min);
784 
785 	/* Verify that new alignment_offset is on a logical block boundary */
786 	if (t->alignment_offset & (t->logical_block_size - 1)) {
787 		t->flags |= BLK_FLAG_MISALIGNED;
788 		ret = -1;
789 	}
790 
791 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
792 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
793 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
794 
795 	/* Discard alignment and granularity */
796 	if (b->discard_granularity) {
797 		alignment = queue_limit_discard_alignment(b, start);
798 
799 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
800 						      b->max_discard_sectors);
801 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
802 							 b->max_hw_discard_sectors);
803 		t->discard_granularity = max(t->discard_granularity,
804 					     b->discard_granularity);
805 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
806 			t->discard_granularity;
807 	}
808 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
809 						   b->max_secure_erase_sectors);
810 	t->zone_write_granularity = max(t->zone_write_granularity,
811 					b->zone_write_granularity);
812 	if (!(t->features & BLK_FEAT_ZONED)) {
813 		t->zone_write_granularity = 0;
814 		t->max_zone_append_sectors = 0;
815 	}
816 	blk_stack_atomic_writes_limits(t, b, start);
817 
818 	return ret;
819 }
820 EXPORT_SYMBOL(blk_stack_limits);
821 
822 /**
823  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
824  * @t:	the stacking driver limits (top device)
825  * @bdev:  the underlying block device (bottom)
826  * @offset:  offset to beginning of data within component device
827  * @pfx: prefix to use for warnings logged
828  *
829  * Description:
830  *    This function is used by stacking drivers like MD and DM to ensure
831  *    that all component devices have compatible block sizes and
832  *    alignments.  The stacking driver must provide a queue_limits
833  *    struct (top) and then iteratively call the stacking function for
834  *    all component (bottom) devices.  The stacking function will
835  *    attempt to combine the values and ensure proper alignment.
836  */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)837 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
838 		sector_t offset, const char *pfx)
839 {
840 	if (blk_stack_limits(t, bdev_limits(bdev),
841 			get_start_sect(bdev) + offset))
842 		pr_notice("%s: Warning: Device %pg is misaligned\n",
843 			pfx, bdev);
844 }
845 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
846 
847 /**
848  * queue_limits_stack_integrity - stack integrity profile
849  * @t: target queue limits
850  * @b: base queue limits
851  *
852  * Check if the integrity profile in the @b can be stacked into the
853  * target @t.  Stacking is possible if either:
854  *
855  *   a) does not have any integrity information stacked into it yet
856  *   b) the integrity profile in @b is identical to the one in @t
857  *
858  * If @b can be stacked into @t, return %true.  Else return %false and clear the
859  * integrity information in @t.
860  */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)861 bool queue_limits_stack_integrity(struct queue_limits *t,
862 		struct queue_limits *b)
863 {
864 	struct blk_integrity *ti = &t->integrity;
865 	struct blk_integrity *bi = &b->integrity;
866 
867 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
868 		return true;
869 
870 	if (!ti->tuple_size) {
871 		/* inherit the settings from the first underlying device */
872 		if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
873 			ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
874 				(bi->flags & BLK_INTEGRITY_REF_TAG);
875 			ti->csum_type = bi->csum_type;
876 			ti->tuple_size = bi->tuple_size;
877 			ti->pi_offset = bi->pi_offset;
878 			ti->interval_exp = bi->interval_exp;
879 			ti->tag_size = bi->tag_size;
880 			goto done;
881 		}
882 		if (!bi->tuple_size)
883 			goto done;
884 	}
885 
886 	if (ti->tuple_size != bi->tuple_size)
887 		goto incompatible;
888 	if (ti->interval_exp != bi->interval_exp)
889 		goto incompatible;
890 	if (ti->tag_size != bi->tag_size)
891 		goto incompatible;
892 	if (ti->csum_type != bi->csum_type)
893 		goto incompatible;
894 	if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
895 	    (bi->flags & BLK_INTEGRITY_REF_TAG))
896 		goto incompatible;
897 
898 done:
899 	ti->flags |= BLK_INTEGRITY_STACKED;
900 	return true;
901 
902 incompatible:
903 	memset(ti, 0, sizeof(*ti));
904 	return false;
905 }
906 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
907 
908 /**
909  * blk_set_queue_depth - tell the block layer about the device queue depth
910  * @q:		the request queue for the device
911  * @depth:		queue depth
912  *
913  */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)914 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
915 {
916 	q->queue_depth = depth;
917 	rq_qos_queue_depth_changed(q);
918 }
919 EXPORT_SYMBOL(blk_set_queue_depth);
920 
bdev_alignment_offset(struct block_device * bdev)921 int bdev_alignment_offset(struct block_device *bdev)
922 {
923 	struct request_queue *q = bdev_get_queue(bdev);
924 
925 	if (q->limits.flags & BLK_FLAG_MISALIGNED)
926 		return -1;
927 	if (bdev_is_partition(bdev))
928 		return queue_limit_alignment_offset(&q->limits,
929 				bdev->bd_start_sect);
930 	return q->limits.alignment_offset;
931 }
932 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
933 
bdev_discard_alignment(struct block_device * bdev)934 unsigned int bdev_discard_alignment(struct block_device *bdev)
935 {
936 	struct request_queue *q = bdev_get_queue(bdev);
937 
938 	if (bdev_is_partition(bdev))
939 		return queue_limit_discard_alignment(&q->limits,
940 				bdev->bd_start_sect);
941 	return q->limits.discard_alignment;
942 }
943 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
944