xref: /linux/block/blk-settings.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/t10-pi.h>
18 #include <linux/crc64.h>
19 
20 #include "blk.h"
21 #include "blk-rq-qos.h"
22 #include "blk-wbt.h"
23 
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)24 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25 {
26 	WRITE_ONCE(q->rq_timeout, timeout);
27 }
28 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29 
30 /**
31  * blk_set_stacking_limits - set default limits for stacking devices
32  * @lim:  the queue_limits structure to reset
33  *
34  * Prepare queue limits for applying limits from underlying devices using
35  * blk_stack_limits().
36  */
blk_set_stacking_limits(struct queue_limits * lim)37 void blk_set_stacking_limits(struct queue_limits *lim)
38 {
39 	memset(lim, 0, sizeof(*lim));
40 	lim->logical_block_size = SECTOR_SIZE;
41 	lim->physical_block_size = SECTOR_SIZE;
42 	lim->io_min = SECTOR_SIZE;
43 	lim->discard_granularity = SECTOR_SIZE;
44 	lim->dma_alignment = SECTOR_SIZE - 1;
45 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46 
47 	/* Inherit limits from component devices */
48 	lim->max_segments = USHRT_MAX;
49 	lim->max_discard_segments = USHRT_MAX;
50 	lim->max_hw_sectors = UINT_MAX;
51 	lim->max_segment_size = UINT_MAX;
52 	lim->max_sectors = UINT_MAX;
53 	lim->max_dev_sectors = UINT_MAX;
54 	lim->max_write_zeroes_sectors = UINT_MAX;
55 	lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57 	lim->max_hw_zone_append_sectors = UINT_MAX;
58 	lim->max_user_discard_sectors = UINT_MAX;
59 	lim->atomic_write_hw_max = UINT_MAX;
60 }
61 EXPORT_SYMBOL(blk_set_stacking_limits);
62 
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)63 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
64 		struct queue_limits *lim)
65 {
66 	u64 io_opt = lim->io_opt;
67 
68 	/*
69 	 * For read-ahead of large files to be effective, we need to read ahead
70 	 * at least twice the optimal I/O size. For rotational devices that do
71 	 * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
72 	 * size to avoid falling back to the (rather inefficient) small default
73 	 * read-ahead size.
74 	 *
75 	 * There is no hardware limitation for the read-ahead size and the user
76 	 * might have increased the read-ahead size through sysfs, so don't ever
77 	 * decrease it.
78 	 */
79 	if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
80 		io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
81 
82 	bdi->ra_pages = max3(bdi->ra_pages,
83 				io_opt * 2 >> PAGE_SHIFT,
84 				VM_READAHEAD_PAGES);
85 	bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
86 }
87 
blk_validate_zoned_limits(struct queue_limits * lim)88 static int blk_validate_zoned_limits(struct queue_limits *lim)
89 {
90 	if (!(lim->features & BLK_FEAT_ZONED)) {
91 		if (WARN_ON_ONCE(lim->max_open_zones) ||
92 		    WARN_ON_ONCE(lim->max_active_zones) ||
93 		    WARN_ON_ONCE(lim->zone_write_granularity) ||
94 		    WARN_ON_ONCE(lim->max_zone_append_sectors))
95 			return -EINVAL;
96 		return 0;
97 	}
98 
99 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
100 		return -EINVAL;
101 
102 	/*
103 	 * Given that active zones include open zones, the maximum number of
104 	 * open zones cannot be larger than the maximum number of active zones.
105 	 */
106 	if (lim->max_active_zones &&
107 	    lim->max_open_zones > lim->max_active_zones)
108 		return -EINVAL;
109 
110 	if (lim->zone_write_granularity < lim->logical_block_size)
111 		lim->zone_write_granularity = lim->logical_block_size;
112 
113 	/*
114 	 * The Zone Append size is limited by the maximum I/O size and the zone
115 	 * size given that it can't span zones.
116 	 *
117 	 * If no max_hw_zone_append_sectors limit is provided, the block layer
118 	 * will emulated it, else we're also bound by the hardware limit.
119 	 */
120 	lim->max_zone_append_sectors =
121 		min_not_zero(lim->max_hw_zone_append_sectors,
122 			min(lim->chunk_sectors, lim->max_hw_sectors));
123 	return 0;
124 }
125 
126 /*
127  * Maximum size of I/O that needs a block layer integrity buffer.  Limited
128  * by the number of intervals for which we can fit the integrity buffer into
129  * the buffer size.  Because the buffer is a single segment it is also limited
130  * by the maximum segment size.
131  */
max_integrity_io_size(struct queue_limits * lim)132 static inline unsigned int max_integrity_io_size(struct queue_limits *lim)
133 {
134 	return min_t(unsigned int, lim->max_segment_size,
135 		(BLK_INTEGRITY_MAX_SIZE / lim->integrity.metadata_size) <<
136 			lim->integrity.interval_exp);
137 }
138 
blk_validate_integrity_limits(struct queue_limits * lim)139 static int blk_validate_integrity_limits(struct queue_limits *lim)
140 {
141 	struct blk_integrity *bi = &lim->integrity;
142 
143 	if (!bi->metadata_size) {
144 		if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
145 		    bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
146 			pr_warn("invalid PI settings.\n");
147 			return -EINVAL;
148 		}
149 		bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
150 		return 0;
151 	}
152 
153 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
154 		pr_warn("integrity support disabled.\n");
155 		return -EINVAL;
156 	}
157 
158 	if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
159 	    (bi->flags & BLK_INTEGRITY_REF_TAG)) {
160 		pr_warn("ref tag not support without checksum.\n");
161 		return -EINVAL;
162 	}
163 
164 	if (bi->pi_tuple_size > bi->metadata_size) {
165 		pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
166 			 bi->pi_tuple_size,
167 			 bi->metadata_size);
168 		return -EINVAL;
169 	}
170 
171 	switch (bi->csum_type) {
172 	case BLK_INTEGRITY_CSUM_NONE:
173 		if (bi->pi_tuple_size) {
174 			pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
175 			return -EINVAL;
176 		}
177 		break;
178 	case BLK_INTEGRITY_CSUM_CRC:
179 	case BLK_INTEGRITY_CSUM_IP:
180 		if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
181 			pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
182 				 sizeof(struct t10_pi_tuple),
183 				 bi->pi_tuple_size);
184 			return -EINVAL;
185 		}
186 		break;
187 	case BLK_INTEGRITY_CSUM_CRC64:
188 		if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
189 			pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
190 				 sizeof(struct crc64_pi_tuple),
191 				 bi->pi_tuple_size);
192 			return -EINVAL;
193 		}
194 		break;
195 	}
196 
197 	if (!bi->interval_exp)
198 		bi->interval_exp = ilog2(lim->logical_block_size);
199 
200 	/*
201 	 * The PI generation / validation helpers do not expect intervals to
202 	 * straddle multiple bio_vecs.  Enforce alignment so that those are
203 	 * never generated, and that each buffer is aligned as expected.
204 	 */
205 	if (bi->csum_type) {
206 		lim->dma_alignment = max(lim->dma_alignment,
207 					(1U << bi->interval_exp) - 1);
208 	}
209 
210 	/*
211 	 * The block layer automatically adds integrity data for bios that don't
212 	 * already have it.  Limit the I/O size so that a single maximum size
213 	 * metadata segment can cover the integrity data for the entire I/O.
214 	 */
215 	lim->max_sectors = min(lim->max_sectors,
216 		max_integrity_io_size(lim) >> SECTOR_SHIFT);
217 
218 	return 0;
219 }
220 
221 /*
222  * Returns max guaranteed bytes which we can fit in a bio.
223  *
224  * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
225  * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
226  * the first and last segments.
227  */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)228 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
229 {
230 	unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
231 	unsigned int length;
232 
233 	length = min(max_segments, 2) * lim->logical_block_size;
234 	if (max_segments > 2)
235 		length += (max_segments - 2) * PAGE_SIZE;
236 
237 	return length;
238 }
239 
blk_atomic_writes_update_limits(struct queue_limits * lim)240 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
241 {
242 	unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
243 					blk_queue_max_guaranteed_bio(lim));
244 
245 	unit_limit = rounddown_pow_of_two(unit_limit);
246 
247 	lim->atomic_write_max_sectors =
248 		min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
249 			lim->max_hw_sectors);
250 	lim->atomic_write_unit_min =
251 		min(lim->atomic_write_hw_unit_min, unit_limit);
252 	lim->atomic_write_unit_max =
253 		min(lim->atomic_write_hw_unit_max, unit_limit);
254 	lim->atomic_write_boundary_sectors =
255 		lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
256 }
257 
258 /*
259  * Test whether any boundary is aligned with any chunk size. Stacked
260  * devices store any stripe size in t->chunk_sectors.
261  */
blk_valid_atomic_writes_boundary(unsigned int chunk_sectors,unsigned int boundary_sectors)262 static bool blk_valid_atomic_writes_boundary(unsigned int chunk_sectors,
263 					unsigned int boundary_sectors)
264 {
265 	if (!chunk_sectors || !boundary_sectors)
266 		return true;
267 
268 	if (boundary_sectors > chunk_sectors &&
269 	    boundary_sectors % chunk_sectors)
270 		return false;
271 
272 	if (chunk_sectors > boundary_sectors &&
273 	    chunk_sectors % boundary_sectors)
274 		return false;
275 
276 	return true;
277 }
278 
blk_validate_atomic_write_limits(struct queue_limits * lim)279 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
280 {
281 	unsigned int boundary_sectors;
282 	unsigned int atomic_write_hw_max_sectors =
283 			lim->atomic_write_hw_max >> SECTOR_SHIFT;
284 
285 	if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
286 		goto unsupported;
287 
288 	/* UINT_MAX indicates stacked limits in initial state */
289 	if (lim->atomic_write_hw_max == UINT_MAX)
290 		goto unsupported;
291 
292 	if (!lim->atomic_write_hw_max)
293 		goto unsupported;
294 
295 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
296 		goto unsupported;
297 
298 	if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
299 		goto unsupported;
300 
301 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
302 			 lim->atomic_write_hw_unit_max))
303 		goto unsupported;
304 
305 	if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
306 			 lim->atomic_write_hw_max))
307 		goto unsupported;
308 
309 	if (WARN_ON_ONCE(lim->chunk_sectors &&
310 			atomic_write_hw_max_sectors > lim->chunk_sectors))
311 		goto unsupported;
312 
313 	boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
314 
315 	if (boundary_sectors) {
316 		if (WARN_ON_ONCE(lim->atomic_write_hw_max >
317 				 lim->atomic_write_hw_boundary))
318 			goto unsupported;
319 
320 		if (WARN_ON_ONCE(!blk_valid_atomic_writes_boundary(
321 			lim->chunk_sectors, boundary_sectors)))
322 			goto unsupported;
323 
324 		/*
325 		 * The boundary size just needs to be a multiple of unit_max
326 		 * (and not necessarily a power-of-2), so this following check
327 		 * could be relaxed in future.
328 		 * Furthermore, if needed, unit_max could even be reduced so
329 		 * that it is compliant with a !power-of-2 boundary.
330 		 */
331 		if (!is_power_of_2(boundary_sectors))
332 			goto unsupported;
333 	}
334 
335 	blk_atomic_writes_update_limits(lim);
336 	return;
337 
338 unsupported:
339 	lim->atomic_write_max_sectors = 0;
340 	lim->atomic_write_boundary_sectors = 0;
341 	lim->atomic_write_unit_min = 0;
342 	lim->atomic_write_unit_max = 0;
343 }
344 
345 /*
346  * Check that the limits in lim are valid, initialize defaults for unset
347  * values, and cap values based on others where needed.
348  */
blk_validate_limits(struct queue_limits * lim)349 int blk_validate_limits(struct queue_limits *lim)
350 {
351 	unsigned int max_hw_sectors;
352 	unsigned int logical_block_sectors;
353 	unsigned long seg_size;
354 	int err;
355 
356 	/*
357 	 * Unless otherwise specified, default to 512 byte logical blocks and a
358 	 * physical block size equal to the logical block size.
359 	 */
360 	if (!lim->logical_block_size)
361 		lim->logical_block_size = SECTOR_SIZE;
362 	else if (blk_validate_block_size(lim->logical_block_size)) {
363 		pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
364 		return -EINVAL;
365 	}
366 	if (lim->physical_block_size < lim->logical_block_size) {
367 		lim->physical_block_size = lim->logical_block_size;
368 	} else if (!is_power_of_2(lim->physical_block_size)) {
369 		pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
370 		return -EINVAL;
371 	}
372 
373 	/*
374 	 * The minimum I/O size defaults to the physical block size unless
375 	 * explicitly overridden.
376 	 */
377 	if (lim->io_min < lim->physical_block_size)
378 		lim->io_min = lim->physical_block_size;
379 
380 	/*
381 	 * The optimal I/O size may not be aligned to physical block size
382 	 * (because it may be limited by dma engines which have no clue about
383 	 * block size of the disks attached to them), so we round it down here.
384 	 */
385 	lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
386 
387 	/*
388 	 * max_hw_sectors has a somewhat weird default for historical reason,
389 	 * but driver really should set their own instead of relying on this
390 	 * value.
391 	 *
392 	 * The block layer relies on the fact that every driver can
393 	 * handle at lest a page worth of data per I/O, and needs the value
394 	 * aligned to the logical block size.
395 	 */
396 	if (!lim->max_hw_sectors)
397 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
398 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
399 		return -EINVAL;
400 	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
401 	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
402 		return -EINVAL;
403 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
404 			logical_block_sectors);
405 
406 	/*
407 	 * The actual max_sectors value is a complex beast and also takes the
408 	 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
409 	 * value into account.  The ->max_sectors value is always calculated
410 	 * from these, so directly setting it won't have any effect.
411 	 */
412 	max_hw_sectors = min_not_zero(lim->max_hw_sectors,
413 				lim->max_dev_sectors);
414 	if (lim->max_user_sectors) {
415 		if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
416 			return -EINVAL;
417 		lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
418 	} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
419 		lim->max_sectors =
420 			min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
421 	} else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
422 		lim->max_sectors =
423 			min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
424 	} else {
425 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
426 	}
427 	lim->max_sectors = round_down(lim->max_sectors,
428 			logical_block_sectors);
429 
430 	/*
431 	 * Random default for the maximum number of segments.  Driver should not
432 	 * rely on this and set their own.
433 	 */
434 	if (!lim->max_segments)
435 		lim->max_segments = BLK_MAX_SEGMENTS;
436 
437 	if (lim->max_hw_wzeroes_unmap_sectors &&
438 	    lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
439 		return -EINVAL;
440 	lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
441 			lim->max_user_wzeroes_unmap_sectors);
442 
443 	lim->max_discard_sectors =
444 		min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
445 
446 	/*
447 	 * When discard is not supported, discard_granularity should be reported
448 	 * as 0 to userspace.
449 	 */
450 	if (lim->max_discard_sectors)
451 		lim->discard_granularity =
452 			max(lim->discard_granularity, lim->physical_block_size);
453 	else
454 		lim->discard_granularity = 0;
455 
456 	if (!lim->max_discard_segments)
457 		lim->max_discard_segments = 1;
458 
459 	/*
460 	 * By default there is no limit on the segment boundary alignment,
461 	 * but if there is one it can't be smaller than the page size as
462 	 * that would break all the normal I/O patterns.
463 	 */
464 	if (!lim->seg_boundary_mask)
465 		lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
466 	if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
467 		return -EINVAL;
468 
469 	/*
470 	 * Stacking device may have both virtual boundary and max segment
471 	 * size limit, so allow this setting now, and long-term the two
472 	 * might need to move out of stacking limits since we have immutable
473 	 * bvec and lower layer bio splitting is supposed to handle the two
474 	 * correctly.
475 	 */
476 	if (lim->virt_boundary_mask) {
477 		if (!lim->max_segment_size)
478 			lim->max_segment_size = UINT_MAX;
479 	} else {
480 		/*
481 		 * The maximum segment size has an odd historic 64k default that
482 		 * drivers probably should override.  Just like the I/O size we
483 		 * require drivers to at least handle a full page per segment.
484 		 */
485 		if (!lim->max_segment_size)
486 			lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
487 		if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
488 			return -EINVAL;
489 	}
490 
491 	/* setup max segment size for building new segment in fast path */
492 	if (lim->seg_boundary_mask > lim->max_segment_size - 1)
493 		seg_size = lim->max_segment_size;
494 	else
495 		seg_size = lim->seg_boundary_mask + 1;
496 	lim->max_fast_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
497 
498 	/*
499 	 * We require drivers to at least do logical block aligned I/O, but
500 	 * historically could not check for that due to the separate calls
501 	 * to set the limits.  Once the transition is finished the check
502 	 * below should be narrowed down to check the logical block size.
503 	 */
504 	if (!lim->dma_alignment)
505 		lim->dma_alignment = SECTOR_SIZE - 1;
506 	if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
507 		return -EINVAL;
508 
509 	if (lim->alignment_offset) {
510 		lim->alignment_offset &= (lim->physical_block_size - 1);
511 		lim->flags &= ~BLK_FLAG_MISALIGNED;
512 	}
513 
514 	if (!(lim->features & BLK_FEAT_WRITE_CACHE))
515 		lim->features &= ~BLK_FEAT_FUA;
516 
517 	blk_validate_atomic_write_limits(lim);
518 
519 	err = blk_validate_integrity_limits(lim);
520 	if (err)
521 		return err;
522 	return blk_validate_zoned_limits(lim);
523 }
524 EXPORT_SYMBOL_GPL(blk_validate_limits);
525 
526 /*
527  * Set the default limits for a newly allocated queue.  @lim contains the
528  * initial limits set by the driver, which could be no limit in which case
529  * all fields are cleared to zero.
530  */
blk_set_default_limits(struct queue_limits * lim)531 int blk_set_default_limits(struct queue_limits *lim)
532 {
533 	/*
534 	 * Most defaults are set by capping the bounds in blk_validate_limits,
535 	 * but these limits are special and need an explicit initialization to
536 	 * the max value here.
537 	 */
538 	lim->max_user_discard_sectors = UINT_MAX;
539 	lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
540 	return blk_validate_limits(lim);
541 }
542 
543 /**
544  * queue_limits_commit_update - commit an atomic update of queue limits
545  * @q:		queue to update
546  * @lim:	limits to apply
547  *
548  * Apply the limits in @lim that were obtained from queue_limits_start_update()
549  * and updated by the caller to @q.  The caller must have frozen the queue or
550  * ensure that there are no outstanding I/Os by other means.
551  *
552  * Returns 0 if successful, else a negative error code.
553  */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)554 int queue_limits_commit_update(struct request_queue *q,
555 		struct queue_limits *lim)
556 {
557 	int error;
558 
559 	lockdep_assert_held(&q->limits_lock);
560 
561 	error = blk_validate_limits(lim);
562 	if (error)
563 		goto out_unlock;
564 
565 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
566 	if (q->crypto_profile && lim->integrity.tag_size) {
567 		pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
568 		error = -EINVAL;
569 		goto out_unlock;
570 	}
571 #endif
572 
573 	q->limits = *lim;
574 	if (q->disk)
575 		blk_apply_bdi_limits(q->disk->bdi, lim);
576 out_unlock:
577 	mutex_unlock(&q->limits_lock);
578 	return error;
579 }
580 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
581 
582 /**
583  * queue_limits_commit_update_frozen - commit an atomic update of queue limits
584  * @q:		queue to update
585  * @lim:	limits to apply
586  *
587  * Apply the limits in @lim that were obtained from queue_limits_start_update()
588  * and updated with the new values by the caller to @q.  Freezes the queue
589  * before the update and unfreezes it after.
590  *
591  * Returns 0 if successful, else a negative error code.
592  */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)593 int queue_limits_commit_update_frozen(struct request_queue *q,
594 		struct queue_limits *lim)
595 {
596 	unsigned int memflags;
597 	int ret;
598 
599 	memflags = blk_mq_freeze_queue(q);
600 	ret = queue_limits_commit_update(q, lim);
601 	blk_mq_unfreeze_queue(q, memflags);
602 
603 	return ret;
604 }
605 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
606 
607 /**
608  * queue_limits_set - apply queue limits to queue
609  * @q:		queue to update
610  * @lim:	limits to apply
611  *
612  * Apply the limits in @lim that were freshly initialized to @q.
613  * To update existing limits use queue_limits_start_update() and
614  * queue_limits_commit_update() instead.
615  *
616  * Returns 0 if successful, else a negative error code.
617  */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)618 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
619 {
620 	mutex_lock(&q->limits_lock);
621 	return queue_limits_commit_update(q, lim);
622 }
623 EXPORT_SYMBOL_GPL(queue_limits_set);
624 
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)625 static int queue_limit_alignment_offset(const struct queue_limits *lim,
626 		sector_t sector)
627 {
628 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
629 	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
630 		<< SECTOR_SHIFT;
631 
632 	return (granularity + lim->alignment_offset - alignment) % granularity;
633 }
634 
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)635 static unsigned int queue_limit_discard_alignment(
636 		const struct queue_limits *lim, sector_t sector)
637 {
638 	unsigned int alignment, granularity, offset;
639 
640 	if (!lim->max_discard_sectors)
641 		return 0;
642 
643 	/* Why are these in bytes, not sectors? */
644 	alignment = lim->discard_alignment >> SECTOR_SHIFT;
645 	granularity = lim->discard_granularity >> SECTOR_SHIFT;
646 
647 	/* Offset of the partition start in 'granularity' sectors */
648 	offset = sector_div(sector, granularity);
649 
650 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
651 	offset = (granularity + alignment - offset) % granularity;
652 
653 	/* Turn it back into bytes, gaah */
654 	return offset << SECTOR_SHIFT;
655 }
656 
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)657 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
658 {
659 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
660 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
661 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
662 	return sectors;
663 }
664 
665 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)666 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
667 				struct queue_limits *b)
668 {
669 	/* We're not going to support different boundary sizes.. yet */
670 	if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
671 		return false;
672 
673 	/* Can't support this */
674 	if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
675 		return false;
676 
677 	/* Or this */
678 	if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
679 		return false;
680 
681 	t->atomic_write_hw_max = min(t->atomic_write_hw_max,
682 				b->atomic_write_hw_max);
683 	t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
684 				b->atomic_write_hw_unit_min);
685 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
686 				b->atomic_write_hw_unit_max);
687 	return true;
688 }
689 
blk_stack_atomic_writes_chunk_sectors(struct queue_limits * t)690 static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
691 {
692 	unsigned int chunk_bytes;
693 
694 	if (!t->chunk_sectors)
695 		return;
696 
697 	/*
698 	 * If chunk sectors is so large that its value in bytes overflows
699 	 * UINT_MAX, then just shift it down so it definitely will fit.
700 	 * We don't support atomic writes of such a large size anyway.
701 	 */
702 	if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
703 		chunk_bytes = t->chunk_sectors;
704 
705 	/*
706 	 * Find values for limits which work for chunk size.
707 	 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
708 	 * size, as the chunk size is not restricted to a power-of-2.
709 	 * So we need to find highest power-of-2 which works for the chunk
710 	 * size.
711 	 * As an example scenario, we could have t->unit_max = 16K and
712 	 * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
713 	 * value aligned with both limits, i.e. 8K in this example.
714 	 */
715 	t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
716 					max_pow_of_two_factor(chunk_bytes));
717 
718 	t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
719 					  t->atomic_write_hw_unit_max);
720 	t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
721 }
722 
723 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)724 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
725 				struct queue_limits *b)
726 {
727 	if (!blk_valid_atomic_writes_boundary(t->chunk_sectors,
728 			b->atomic_write_hw_boundary >> SECTOR_SHIFT))
729 		return false;
730 
731 	t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
732 	t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
733 	t->atomic_write_hw_max = b->atomic_write_hw_max;
734 	t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
735 	return true;
736 }
737 
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)738 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
739 				struct queue_limits *b, sector_t start)
740 {
741 	if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
742 		goto unsupported;
743 
744 	if (!b->atomic_write_hw_unit_min)
745 		goto unsupported;
746 
747 	if (!blk_atomic_write_start_sect_aligned(start, b))
748 		goto unsupported;
749 
750 	/* UINT_MAX indicates no stacking of bottom devices yet */
751 	if (t->atomic_write_hw_max == UINT_MAX) {
752 		if (!blk_stack_atomic_writes_head(t, b))
753 			goto unsupported;
754 	} else {
755 		if (!blk_stack_atomic_writes_tail(t, b))
756 			goto unsupported;
757 	}
758 	blk_stack_atomic_writes_chunk_sectors(t);
759 	return;
760 
761 unsupported:
762 	t->atomic_write_hw_max = 0;
763 	t->atomic_write_hw_unit_max = 0;
764 	t->atomic_write_hw_unit_min = 0;
765 	t->atomic_write_hw_boundary = 0;
766 }
767 
768 /**
769  * blk_stack_limits - adjust queue_limits for stacked devices
770  * @t:	the stacking driver limits (top device)
771  * @b:  the underlying queue limits (bottom, component device)
772  * @start:  first data sector within component device
773  *
774  * Description:
775  *    This function is used by stacking drivers like MD and DM to ensure
776  *    that all component devices have compatible block sizes and
777  *    alignments.  The stacking driver must provide a queue_limits
778  *    struct (top) and then iteratively call the stacking function for
779  *    all component (bottom) devices.  The stacking function will
780  *    attempt to combine the values and ensure proper alignment.
781  *
782  *    Returns 0 if the top and bottom queue_limits are compatible.  The
783  *    top device's block sizes and alignment offsets may be adjusted to
784  *    ensure alignment with the bottom device. If no compatible sizes
785  *    and alignments exist, -1 is returned and the resulting top
786  *    queue_limits will have the misaligned flag set to indicate that
787  *    the alignment_offset is undefined.
788  */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)789 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
790 		     sector_t start)
791 {
792 	unsigned int top, bottom, alignment;
793 	int ret = 0;
794 
795 	t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
796 
797 	/*
798 	 * Some feaures need to be supported both by the stacking driver and all
799 	 * underlying devices.  The stacking driver sets these flags before
800 	 * stacking the limits, and this will clear the flags if any of the
801 	 * underlying devices does not support it.
802 	 */
803 	if (!(b->features & BLK_FEAT_NOWAIT))
804 		t->features &= ~BLK_FEAT_NOWAIT;
805 	if (!(b->features & BLK_FEAT_POLL))
806 		t->features &= ~BLK_FEAT_POLL;
807 
808 	t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
809 
810 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
811 	t->max_user_sectors = min_not_zero(t->max_user_sectors,
812 			b->max_user_sectors);
813 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
814 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
815 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
816 					b->max_write_zeroes_sectors);
817 	t->max_user_wzeroes_unmap_sectors =
818 			min(t->max_user_wzeroes_unmap_sectors,
819 			    b->max_user_wzeroes_unmap_sectors);
820 	t->max_hw_wzeroes_unmap_sectors =
821 			min(t->max_hw_wzeroes_unmap_sectors,
822 			    b->max_hw_wzeroes_unmap_sectors);
823 
824 	t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
825 					b->max_hw_zone_append_sectors);
826 
827 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
828 					    b->seg_boundary_mask);
829 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
830 					    b->virt_boundary_mask);
831 
832 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
833 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
834 					       b->max_discard_segments);
835 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
836 						 b->max_integrity_segments);
837 
838 	t->max_segment_size = min_not_zero(t->max_segment_size,
839 					   b->max_segment_size);
840 
841 	alignment = queue_limit_alignment_offset(b, start);
842 
843 	/* Bottom device has different alignment.  Check that it is
844 	 * compatible with the current top alignment.
845 	 */
846 	if (t->alignment_offset != alignment) {
847 
848 		top = max(t->physical_block_size, t->io_min)
849 			+ t->alignment_offset;
850 		bottom = max(b->physical_block_size, b->io_min) + alignment;
851 
852 		/* Verify that top and bottom intervals line up */
853 		if (max(top, bottom) % min(top, bottom)) {
854 			t->flags |= BLK_FLAG_MISALIGNED;
855 			ret = -1;
856 		}
857 	}
858 
859 	t->logical_block_size = max(t->logical_block_size,
860 				    b->logical_block_size);
861 
862 	t->physical_block_size = max(t->physical_block_size,
863 				     b->physical_block_size);
864 
865 	t->io_min = max(t->io_min, b->io_min);
866 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
867 	t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
868 
869 	/* Set non-power-of-2 compatible chunk_sectors boundary */
870 	if (b->chunk_sectors)
871 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
872 
873 	/* Physical block size a multiple of the logical block size? */
874 	if (t->physical_block_size & (t->logical_block_size - 1)) {
875 		t->physical_block_size = t->logical_block_size;
876 		t->flags |= BLK_FLAG_MISALIGNED;
877 		ret = -1;
878 	}
879 
880 	/* Minimum I/O a multiple of the physical block size? */
881 	if (t->io_min & (t->physical_block_size - 1)) {
882 		t->io_min = t->physical_block_size;
883 		t->flags |= BLK_FLAG_MISALIGNED;
884 		ret = -1;
885 	}
886 
887 	/* Optimal I/O a multiple of the physical block size? */
888 	if (t->io_opt & (t->physical_block_size - 1)) {
889 		t->io_opt = 0;
890 		t->flags |= BLK_FLAG_MISALIGNED;
891 		ret = -1;
892 	}
893 
894 	/* chunk_sectors a multiple of the physical block size? */
895 	if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
896 		t->chunk_sectors = 0;
897 		t->flags |= BLK_FLAG_MISALIGNED;
898 		ret = -1;
899 	}
900 
901 	/* Find lowest common alignment_offset */
902 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
903 		% max(t->physical_block_size, t->io_min);
904 
905 	/* Verify that new alignment_offset is on a logical block boundary */
906 	if (t->alignment_offset & (t->logical_block_size - 1)) {
907 		t->flags |= BLK_FLAG_MISALIGNED;
908 		ret = -1;
909 	}
910 
911 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
912 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
913 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
914 
915 	/* Discard alignment and granularity */
916 	if (b->discard_granularity) {
917 		alignment = queue_limit_discard_alignment(b, start);
918 
919 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
920 						      b->max_discard_sectors);
921 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
922 							 b->max_hw_discard_sectors);
923 		t->discard_granularity = max(t->discard_granularity,
924 					     b->discard_granularity);
925 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
926 			t->discard_granularity;
927 	}
928 	t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
929 						   b->max_secure_erase_sectors);
930 	t->zone_write_granularity = max(t->zone_write_granularity,
931 					b->zone_write_granularity);
932 	if (!(t->features & BLK_FEAT_ZONED)) {
933 		t->zone_write_granularity = 0;
934 		t->max_zone_append_sectors = 0;
935 	}
936 	blk_stack_atomic_writes_limits(t, b, start);
937 
938 	return ret;
939 }
940 EXPORT_SYMBOL(blk_stack_limits);
941 
942 /**
943  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
944  * @t:	the stacking driver limits (top device)
945  * @bdev:  the underlying block device (bottom)
946  * @offset:  offset to beginning of data within component device
947  * @pfx: prefix to use for warnings logged
948  *
949  * Description:
950  *    This function is used by stacking drivers like MD and DM to ensure
951  *    that all component devices have compatible block sizes and
952  *    alignments.  The stacking driver must provide a queue_limits
953  *    struct (top) and then iteratively call the stacking function for
954  *    all component (bottom) devices.  The stacking function will
955  *    attempt to combine the values and ensure proper alignment.
956  */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)957 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
958 		sector_t offset, const char *pfx)
959 {
960 	if (blk_stack_limits(t, bdev_limits(bdev),
961 			get_start_sect(bdev) + offset))
962 		pr_notice("%s: Warning: Device %pg is misaligned\n",
963 			pfx, bdev);
964 }
965 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
966 
967 /**
968  * queue_limits_stack_integrity - stack integrity profile
969  * @t: target queue limits
970  * @b: base queue limits
971  *
972  * Check if the integrity profile in the @b can be stacked into the
973  * target @t.  Stacking is possible if either:
974  *
975  *   a) does not have any integrity information stacked into it yet
976  *   b) the integrity profile in @b is identical to the one in @t
977  *
978  * If @b can be stacked into @t, return %true.  Else return %false and clear the
979  * integrity information in @t.
980  */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)981 bool queue_limits_stack_integrity(struct queue_limits *t,
982 		struct queue_limits *b)
983 {
984 	struct blk_integrity *ti = &t->integrity;
985 	struct blk_integrity *bi = &b->integrity;
986 
987 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
988 		return true;
989 
990 	if (ti->flags & BLK_INTEGRITY_STACKED) {
991 		if (ti->metadata_size != bi->metadata_size)
992 			goto incompatible;
993 		if (ti->interval_exp != bi->interval_exp)
994 			goto incompatible;
995 		if (ti->tag_size != bi->tag_size)
996 			goto incompatible;
997 		if (ti->csum_type != bi->csum_type)
998 			goto incompatible;
999 		if (ti->pi_tuple_size != bi->pi_tuple_size)
1000 			goto incompatible;
1001 		if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
1002 		    (bi->flags & BLK_INTEGRITY_REF_TAG))
1003 			goto incompatible;
1004 	} else {
1005 		ti->flags = BLK_INTEGRITY_STACKED;
1006 		ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
1007 			     (bi->flags & BLK_INTEGRITY_REF_TAG);
1008 		ti->csum_type = bi->csum_type;
1009 		ti->pi_tuple_size = bi->pi_tuple_size;
1010 		ti->metadata_size = bi->metadata_size;
1011 		ti->pi_offset = bi->pi_offset;
1012 		ti->interval_exp = bi->interval_exp;
1013 		ti->tag_size = bi->tag_size;
1014 	}
1015 	return true;
1016 
1017 incompatible:
1018 	memset(ti, 0, sizeof(*ti));
1019 	return false;
1020 }
1021 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
1022 
1023 /**
1024  * blk_set_queue_depth - tell the block layer about the device queue depth
1025  * @q:		the request queue for the device
1026  * @depth:		queue depth
1027  *
1028  */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)1029 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1030 {
1031 	q->queue_depth = depth;
1032 	rq_qos_queue_depth_changed(q);
1033 }
1034 EXPORT_SYMBOL(blk_set_queue_depth);
1035 
bdev_alignment_offset(struct block_device * bdev)1036 int bdev_alignment_offset(struct block_device *bdev)
1037 {
1038 	struct request_queue *q = bdev_get_queue(bdev);
1039 
1040 	if (q->limits.flags & BLK_FLAG_MISALIGNED)
1041 		return -1;
1042 	if (bdev_is_partition(bdev))
1043 		return queue_limit_alignment_offset(&q->limits,
1044 				bdev->bd_start_sect);
1045 	return q->limits.alignment_offset;
1046 }
1047 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1048 
bdev_discard_alignment(struct block_device * bdev)1049 unsigned int bdev_discard_alignment(struct block_device *bdev)
1050 {
1051 	struct request_queue *q = bdev_get_queue(bdev);
1052 
1053 	if (bdev_is_partition(bdev))
1054 		return queue_limit_discard_alignment(&q->limits,
1055 				bdev->bd_start_sect);
1056 	return q->limits.discard_alignment;
1057 }
1058 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1059