1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/t10-pi.h>
18 #include <linux/crc64.h>
19
20 #include "blk.h"
21 #include "blk-rq-qos.h"
22 #include "blk-wbt.h"
23
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)24 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25 {
26 WRITE_ONCE(q->rq_timeout, timeout);
27 }
28 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29
30 /**
31 * blk_set_stacking_limits - set default limits for stacking devices
32 * @lim: the queue_limits structure to reset
33 *
34 * Prepare queue limits for applying limits from underlying devices using
35 * blk_stack_limits().
36 */
blk_set_stacking_limits(struct queue_limits * lim)37 void blk_set_stacking_limits(struct queue_limits *lim)
38 {
39 memset(lim, 0, sizeof(*lim));
40 lim->logical_block_size = SECTOR_SIZE;
41 lim->physical_block_size = SECTOR_SIZE;
42 lim->io_min = SECTOR_SIZE;
43 lim->discard_granularity = SECTOR_SIZE;
44 lim->dma_alignment = SECTOR_SIZE - 1;
45 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46
47 /* Inherit limits from component devices */
48 lim->max_segments = USHRT_MAX;
49 lim->max_discard_segments = USHRT_MAX;
50 lim->max_hw_sectors = UINT_MAX;
51 lim->max_segment_size = UINT_MAX;
52 lim->max_sectors = UINT_MAX;
53 lim->max_dev_sectors = UINT_MAX;
54 lim->max_write_zeroes_sectors = UINT_MAX;
55 lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56 lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57 lim->max_hw_zone_append_sectors = UINT_MAX;
58 lim->max_user_discard_sectors = UINT_MAX;
59 lim->atomic_write_hw_max = UINT_MAX;
60 }
61 EXPORT_SYMBOL(blk_set_stacking_limits);
62
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)63 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
64 struct queue_limits *lim)
65 {
66 u64 io_opt = lim->io_opt;
67
68 /*
69 * For read-ahead of large files to be effective, we need to read ahead
70 * at least twice the optimal I/O size. For rotational devices that do
71 * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
72 * size to avoid falling back to the (rather inefficient) small default
73 * read-ahead size.
74 *
75 * There is no hardware limitation for the read-ahead size and the user
76 * might have increased the read-ahead size through sysfs, so don't ever
77 * decrease it.
78 */
79 if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
80 io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
81
82 bdi->ra_pages = max3(bdi->ra_pages,
83 io_opt * 2 >> PAGE_SHIFT,
84 VM_READAHEAD_PAGES);
85 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
86 }
87
blk_validate_zoned_limits(struct queue_limits * lim)88 static int blk_validate_zoned_limits(struct queue_limits *lim)
89 {
90 if (!(lim->features & BLK_FEAT_ZONED)) {
91 if (WARN_ON_ONCE(lim->max_open_zones) ||
92 WARN_ON_ONCE(lim->max_active_zones) ||
93 WARN_ON_ONCE(lim->zone_write_granularity) ||
94 WARN_ON_ONCE(lim->max_zone_append_sectors))
95 return -EINVAL;
96 return 0;
97 }
98
99 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
100 return -EINVAL;
101
102 /*
103 * Given that active zones include open zones, the maximum number of
104 * open zones cannot be larger than the maximum number of active zones.
105 */
106 if (lim->max_active_zones &&
107 lim->max_open_zones > lim->max_active_zones)
108 return -EINVAL;
109
110 if (lim->zone_write_granularity < lim->logical_block_size)
111 lim->zone_write_granularity = lim->logical_block_size;
112
113 /*
114 * The Zone Append size is limited by the maximum I/O size and the zone
115 * size given that it can't span zones.
116 *
117 * If no max_hw_zone_append_sectors limit is provided, the block layer
118 * will emulated it, else we're also bound by the hardware limit.
119 */
120 lim->max_zone_append_sectors =
121 min_not_zero(lim->max_hw_zone_append_sectors,
122 min(lim->chunk_sectors, lim->max_hw_sectors));
123 return 0;
124 }
125
blk_validate_integrity_limits(struct queue_limits * lim)126 static int blk_validate_integrity_limits(struct queue_limits *lim)
127 {
128 struct blk_integrity *bi = &lim->integrity;
129
130 if (!bi->metadata_size) {
131 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
132 bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
133 pr_warn("invalid PI settings.\n");
134 return -EINVAL;
135 }
136 bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
137 return 0;
138 }
139
140 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
141 pr_warn("integrity support disabled.\n");
142 return -EINVAL;
143 }
144
145 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
146 (bi->flags & BLK_INTEGRITY_REF_TAG)) {
147 pr_warn("ref tag not support without checksum.\n");
148 return -EINVAL;
149 }
150
151 if (bi->pi_tuple_size > bi->metadata_size) {
152 pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
153 bi->pi_tuple_size,
154 bi->metadata_size);
155 return -EINVAL;
156 }
157
158 switch (bi->csum_type) {
159 case BLK_INTEGRITY_CSUM_NONE:
160 if (bi->pi_tuple_size) {
161 pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
162 return -EINVAL;
163 }
164 break;
165 case BLK_INTEGRITY_CSUM_CRC:
166 case BLK_INTEGRITY_CSUM_IP:
167 if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
168 pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
169 sizeof(struct t10_pi_tuple),
170 bi->pi_tuple_size);
171 return -EINVAL;
172 }
173 break;
174 case BLK_INTEGRITY_CSUM_CRC64:
175 if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
176 pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
177 sizeof(struct crc64_pi_tuple),
178 bi->pi_tuple_size);
179 return -EINVAL;
180 }
181 break;
182 }
183
184 if (!bi->interval_exp)
185 bi->interval_exp = ilog2(lim->logical_block_size);
186
187 /*
188 * The PI generation / validation helpers do not expect intervals to
189 * straddle multiple bio_vecs. Enforce alignment so that those are
190 * never generated, and that each buffer is aligned as expected.
191 */
192 if (bi->csum_type) {
193 lim->dma_alignment = max(lim->dma_alignment,
194 (1U << bi->interval_exp) - 1);
195 }
196
197 return 0;
198 }
199
200 /*
201 * Returns max guaranteed bytes which we can fit in a bio.
202 *
203 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
204 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
205 * the first and last segments.
206 */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)207 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
208 {
209 unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
210 unsigned int length;
211
212 length = min(max_segments, 2) * lim->logical_block_size;
213 if (max_segments > 2)
214 length += (max_segments - 2) * PAGE_SIZE;
215
216 return length;
217 }
218
blk_atomic_writes_update_limits(struct queue_limits * lim)219 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
220 {
221 unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
222 blk_queue_max_guaranteed_bio(lim));
223
224 unit_limit = rounddown_pow_of_two(unit_limit);
225
226 lim->atomic_write_max_sectors =
227 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
228 lim->max_hw_sectors);
229 lim->atomic_write_unit_min =
230 min(lim->atomic_write_hw_unit_min, unit_limit);
231 lim->atomic_write_unit_max =
232 min(lim->atomic_write_hw_unit_max, unit_limit);
233 lim->atomic_write_boundary_sectors =
234 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
235 }
236
237 /*
238 * Test whether any boundary is aligned with any chunk size. Stacked
239 * devices store any stripe size in t->chunk_sectors.
240 */
blk_valid_atomic_writes_boundary(unsigned int chunk_sectors,unsigned int boundary_sectors)241 static bool blk_valid_atomic_writes_boundary(unsigned int chunk_sectors,
242 unsigned int boundary_sectors)
243 {
244 if (!chunk_sectors || !boundary_sectors)
245 return true;
246
247 if (boundary_sectors > chunk_sectors &&
248 boundary_sectors % chunk_sectors)
249 return false;
250
251 if (chunk_sectors > boundary_sectors &&
252 chunk_sectors % boundary_sectors)
253 return false;
254
255 return true;
256 }
257
blk_validate_atomic_write_limits(struct queue_limits * lim)258 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
259 {
260 unsigned int boundary_sectors;
261 unsigned int atomic_write_hw_max_sectors =
262 lim->atomic_write_hw_max >> SECTOR_SHIFT;
263
264 if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
265 goto unsupported;
266
267 /* UINT_MAX indicates stacked limits in initial state */
268 if (lim->atomic_write_hw_max == UINT_MAX)
269 goto unsupported;
270
271 if (!lim->atomic_write_hw_max)
272 goto unsupported;
273
274 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
275 goto unsupported;
276
277 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
278 goto unsupported;
279
280 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
281 lim->atomic_write_hw_unit_max))
282 goto unsupported;
283
284 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
285 lim->atomic_write_hw_max))
286 goto unsupported;
287
288 if (WARN_ON_ONCE(lim->chunk_sectors &&
289 atomic_write_hw_max_sectors > lim->chunk_sectors))
290 goto unsupported;
291
292 boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
293
294 if (boundary_sectors) {
295 if (WARN_ON_ONCE(lim->atomic_write_hw_max >
296 lim->atomic_write_hw_boundary))
297 goto unsupported;
298
299 if (WARN_ON_ONCE(!blk_valid_atomic_writes_boundary(
300 lim->chunk_sectors, boundary_sectors)))
301 goto unsupported;
302
303 /*
304 * The boundary size just needs to be a multiple of unit_max
305 * (and not necessarily a power-of-2), so this following check
306 * could be relaxed in future.
307 * Furthermore, if needed, unit_max could even be reduced so
308 * that it is compliant with a !power-of-2 boundary.
309 */
310 if (!is_power_of_2(boundary_sectors))
311 goto unsupported;
312 }
313
314 blk_atomic_writes_update_limits(lim);
315 return;
316
317 unsupported:
318 lim->atomic_write_max_sectors = 0;
319 lim->atomic_write_boundary_sectors = 0;
320 lim->atomic_write_unit_min = 0;
321 lim->atomic_write_unit_max = 0;
322 }
323
324 /*
325 * Check that the limits in lim are valid, initialize defaults for unset
326 * values, and cap values based on others where needed.
327 */
blk_validate_limits(struct queue_limits * lim)328 int blk_validate_limits(struct queue_limits *lim)
329 {
330 unsigned int max_hw_sectors;
331 unsigned int logical_block_sectors;
332 unsigned long seg_size;
333 int err;
334
335 /*
336 * Unless otherwise specified, default to 512 byte logical blocks and a
337 * physical block size equal to the logical block size.
338 */
339 if (!lim->logical_block_size)
340 lim->logical_block_size = SECTOR_SIZE;
341 else if (blk_validate_block_size(lim->logical_block_size)) {
342 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
343 return -EINVAL;
344 }
345 if (lim->physical_block_size < lim->logical_block_size) {
346 lim->physical_block_size = lim->logical_block_size;
347 } else if (!is_power_of_2(lim->physical_block_size)) {
348 pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
349 return -EINVAL;
350 }
351
352 /*
353 * The minimum I/O size defaults to the physical block size unless
354 * explicitly overridden.
355 */
356 if (lim->io_min < lim->physical_block_size)
357 lim->io_min = lim->physical_block_size;
358
359 /*
360 * The optimal I/O size may not be aligned to physical block size
361 * (because it may be limited by dma engines which have no clue about
362 * block size of the disks attached to them), so we round it down here.
363 */
364 lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
365
366 /*
367 * max_hw_sectors has a somewhat weird default for historical reason,
368 * but driver really should set their own instead of relying on this
369 * value.
370 *
371 * The block layer relies on the fact that every driver can
372 * handle at lest a page worth of data per I/O, and needs the value
373 * aligned to the logical block size.
374 */
375 if (!lim->max_hw_sectors)
376 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
377 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
378 return -EINVAL;
379 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
380 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
381 return -EINVAL;
382 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
383 logical_block_sectors);
384
385 /*
386 * The actual max_sectors value is a complex beast and also takes the
387 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
388 * value into account. The ->max_sectors value is always calculated
389 * from these, so directly setting it won't have any effect.
390 */
391 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
392 lim->max_dev_sectors);
393 if (lim->max_user_sectors) {
394 if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
395 return -EINVAL;
396 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
397 } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
398 lim->max_sectors =
399 min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
400 } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
401 lim->max_sectors =
402 min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
403 } else {
404 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
405 }
406 lim->max_sectors = round_down(lim->max_sectors,
407 logical_block_sectors);
408
409 /*
410 * Random default for the maximum number of segments. Driver should not
411 * rely on this and set their own.
412 */
413 if (!lim->max_segments)
414 lim->max_segments = BLK_MAX_SEGMENTS;
415
416 if (lim->max_hw_wzeroes_unmap_sectors &&
417 lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
418 return -EINVAL;
419 lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
420 lim->max_user_wzeroes_unmap_sectors);
421
422 lim->max_discard_sectors =
423 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
424
425 /*
426 * When discard is not supported, discard_granularity should be reported
427 * as 0 to userspace.
428 */
429 if (lim->max_discard_sectors)
430 lim->discard_granularity =
431 max(lim->discard_granularity, lim->physical_block_size);
432 else
433 lim->discard_granularity = 0;
434
435 if (!lim->max_discard_segments)
436 lim->max_discard_segments = 1;
437
438 /*
439 * By default there is no limit on the segment boundary alignment,
440 * but if there is one it can't be smaller than the page size as
441 * that would break all the normal I/O patterns.
442 */
443 if (!lim->seg_boundary_mask)
444 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
445 if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
446 return -EINVAL;
447
448 /*
449 * Stacking device may have both virtual boundary and max segment
450 * size limit, so allow this setting now, and long-term the two
451 * might need to move out of stacking limits since we have immutable
452 * bvec and lower layer bio splitting is supposed to handle the two
453 * correctly.
454 */
455 if (lim->virt_boundary_mask) {
456 if (!lim->max_segment_size)
457 lim->max_segment_size = UINT_MAX;
458 } else {
459 /*
460 * The maximum segment size has an odd historic 64k default that
461 * drivers probably should override. Just like the I/O size we
462 * require drivers to at least handle a full page per segment.
463 */
464 if (!lim->max_segment_size)
465 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
466 if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
467 return -EINVAL;
468 }
469
470 /* setup min segment size for building new segment in fast path */
471 if (lim->seg_boundary_mask > lim->max_segment_size - 1)
472 seg_size = lim->max_segment_size;
473 else
474 seg_size = lim->seg_boundary_mask + 1;
475 lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
476
477 /*
478 * We require drivers to at least do logical block aligned I/O, but
479 * historically could not check for that due to the separate calls
480 * to set the limits. Once the transition is finished the check
481 * below should be narrowed down to check the logical block size.
482 */
483 if (!lim->dma_alignment)
484 lim->dma_alignment = SECTOR_SIZE - 1;
485 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
486 return -EINVAL;
487
488 if (lim->alignment_offset) {
489 lim->alignment_offset &= (lim->physical_block_size - 1);
490 lim->flags &= ~BLK_FLAG_MISALIGNED;
491 }
492
493 if (!(lim->features & BLK_FEAT_WRITE_CACHE))
494 lim->features &= ~BLK_FEAT_FUA;
495
496 blk_validate_atomic_write_limits(lim);
497
498 err = blk_validate_integrity_limits(lim);
499 if (err)
500 return err;
501 return blk_validate_zoned_limits(lim);
502 }
503 EXPORT_SYMBOL_GPL(blk_validate_limits);
504
505 /*
506 * Set the default limits for a newly allocated queue. @lim contains the
507 * initial limits set by the driver, which could be no limit in which case
508 * all fields are cleared to zero.
509 */
blk_set_default_limits(struct queue_limits * lim)510 int blk_set_default_limits(struct queue_limits *lim)
511 {
512 /*
513 * Most defaults are set by capping the bounds in blk_validate_limits,
514 * but these limits are special and need an explicit initialization to
515 * the max value here.
516 */
517 lim->max_user_discard_sectors = UINT_MAX;
518 lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
519 return blk_validate_limits(lim);
520 }
521
522 /**
523 * queue_limits_commit_update - commit an atomic update of queue limits
524 * @q: queue to update
525 * @lim: limits to apply
526 *
527 * Apply the limits in @lim that were obtained from queue_limits_start_update()
528 * and updated by the caller to @q. The caller must have frozen the queue or
529 * ensure that there are no outstanding I/Os by other means.
530 *
531 * Returns 0 if successful, else a negative error code.
532 */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)533 int queue_limits_commit_update(struct request_queue *q,
534 struct queue_limits *lim)
535 {
536 int error;
537
538 error = blk_validate_limits(lim);
539 if (error)
540 goto out_unlock;
541
542 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
543 if (q->crypto_profile && lim->integrity.tag_size) {
544 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
545 error = -EINVAL;
546 goto out_unlock;
547 }
548 #endif
549
550 q->limits = *lim;
551 if (q->disk)
552 blk_apply_bdi_limits(q->disk->bdi, lim);
553 out_unlock:
554 mutex_unlock(&q->limits_lock);
555 return error;
556 }
557 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
558
559 /**
560 * queue_limits_commit_update_frozen - commit an atomic update of queue limits
561 * @q: queue to update
562 * @lim: limits to apply
563 *
564 * Apply the limits in @lim that were obtained from queue_limits_start_update()
565 * and updated with the new values by the caller to @q. Freezes the queue
566 * before the update and unfreezes it after.
567 *
568 * Returns 0 if successful, else a negative error code.
569 */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)570 int queue_limits_commit_update_frozen(struct request_queue *q,
571 struct queue_limits *lim)
572 {
573 unsigned int memflags;
574 int ret;
575
576 memflags = blk_mq_freeze_queue(q);
577 ret = queue_limits_commit_update(q, lim);
578 blk_mq_unfreeze_queue(q, memflags);
579
580 return ret;
581 }
582 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
583
584 /**
585 * queue_limits_set - apply queue limits to queue
586 * @q: queue to update
587 * @lim: limits to apply
588 *
589 * Apply the limits in @lim that were freshly initialized to @q.
590 * To update existing limits use queue_limits_start_update() and
591 * queue_limits_commit_update() instead.
592 *
593 * Returns 0 if successful, else a negative error code.
594 */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)595 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
596 {
597 mutex_lock(&q->limits_lock);
598 return queue_limits_commit_update(q, lim);
599 }
600 EXPORT_SYMBOL_GPL(queue_limits_set);
601
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)602 static int queue_limit_alignment_offset(const struct queue_limits *lim,
603 sector_t sector)
604 {
605 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
606 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
607 << SECTOR_SHIFT;
608
609 return (granularity + lim->alignment_offset - alignment) % granularity;
610 }
611
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)612 static unsigned int queue_limit_discard_alignment(
613 const struct queue_limits *lim, sector_t sector)
614 {
615 unsigned int alignment, granularity, offset;
616
617 if (!lim->max_discard_sectors)
618 return 0;
619
620 /* Why are these in bytes, not sectors? */
621 alignment = lim->discard_alignment >> SECTOR_SHIFT;
622 granularity = lim->discard_granularity >> SECTOR_SHIFT;
623
624 /* Offset of the partition start in 'granularity' sectors */
625 offset = sector_div(sector, granularity);
626
627 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
628 offset = (granularity + alignment - offset) % granularity;
629
630 /* Turn it back into bytes, gaah */
631 return offset << SECTOR_SHIFT;
632 }
633
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)634 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
635 {
636 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
637 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
638 sectors = PAGE_SIZE >> SECTOR_SHIFT;
639 return sectors;
640 }
641
642 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)643 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
644 struct queue_limits *b)
645 {
646 /* We're not going to support different boundary sizes.. yet */
647 if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
648 return false;
649
650 /* Can't support this */
651 if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
652 return false;
653
654 /* Or this */
655 if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
656 return false;
657
658 t->atomic_write_hw_max = min(t->atomic_write_hw_max,
659 b->atomic_write_hw_max);
660 t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
661 b->atomic_write_hw_unit_min);
662 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
663 b->atomic_write_hw_unit_max);
664 return true;
665 }
666
blk_stack_atomic_writes_chunk_sectors(struct queue_limits * t)667 static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
668 {
669 unsigned int chunk_bytes;
670
671 if (!t->chunk_sectors)
672 return;
673
674 /*
675 * If chunk sectors is so large that its value in bytes overflows
676 * UINT_MAX, then just shift it down so it definitely will fit.
677 * We don't support atomic writes of such a large size anyway.
678 */
679 if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
680 chunk_bytes = t->chunk_sectors;
681
682 /*
683 * Find values for limits which work for chunk size.
684 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
685 * size, as the chunk size is not restricted to a power-of-2.
686 * So we need to find highest power-of-2 which works for the chunk
687 * size.
688 * As an example scenario, we could have t->unit_max = 16K and
689 * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
690 * value aligned with both limits, i.e. 8K in this example.
691 */
692 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
693 max_pow_of_two_factor(chunk_bytes));
694
695 t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
696 t->atomic_write_hw_unit_max);
697 t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
698 }
699
700 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)701 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
702 struct queue_limits *b)
703 {
704 if (!blk_valid_atomic_writes_boundary(t->chunk_sectors,
705 b->atomic_write_hw_boundary >> SECTOR_SHIFT))
706 return false;
707
708 t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
709 t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
710 t->atomic_write_hw_max = b->atomic_write_hw_max;
711 t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
712 return true;
713 }
714
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)715 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
716 struct queue_limits *b, sector_t start)
717 {
718 if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
719 goto unsupported;
720
721 if (!b->atomic_write_hw_unit_min)
722 goto unsupported;
723
724 if (!blk_atomic_write_start_sect_aligned(start, b))
725 goto unsupported;
726
727 /* UINT_MAX indicates no stacking of bottom devices yet */
728 if (t->atomic_write_hw_max == UINT_MAX) {
729 if (!blk_stack_atomic_writes_head(t, b))
730 goto unsupported;
731 } else {
732 if (!blk_stack_atomic_writes_tail(t, b))
733 goto unsupported;
734 }
735 blk_stack_atomic_writes_chunk_sectors(t);
736 return;
737
738 unsupported:
739 t->atomic_write_hw_max = 0;
740 t->atomic_write_hw_unit_max = 0;
741 t->atomic_write_hw_unit_min = 0;
742 t->atomic_write_hw_boundary = 0;
743 }
744
745 /**
746 * blk_stack_limits - adjust queue_limits for stacked devices
747 * @t: the stacking driver limits (top device)
748 * @b: the underlying queue limits (bottom, component device)
749 * @start: first data sector within component device
750 *
751 * Description:
752 * This function is used by stacking drivers like MD and DM to ensure
753 * that all component devices have compatible block sizes and
754 * alignments. The stacking driver must provide a queue_limits
755 * struct (top) and then iteratively call the stacking function for
756 * all component (bottom) devices. The stacking function will
757 * attempt to combine the values and ensure proper alignment.
758 *
759 * Returns 0 if the top and bottom queue_limits are compatible. The
760 * top device's block sizes and alignment offsets may be adjusted to
761 * ensure alignment with the bottom device. If no compatible sizes
762 * and alignments exist, -1 is returned and the resulting top
763 * queue_limits will have the misaligned flag set to indicate that
764 * the alignment_offset is undefined.
765 */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)766 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
767 sector_t start)
768 {
769 unsigned int top, bottom, alignment;
770 int ret = 0;
771
772 t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
773
774 /*
775 * Some feaures need to be supported both by the stacking driver and all
776 * underlying devices. The stacking driver sets these flags before
777 * stacking the limits, and this will clear the flags if any of the
778 * underlying devices does not support it.
779 */
780 if (!(b->features & BLK_FEAT_NOWAIT))
781 t->features &= ~BLK_FEAT_NOWAIT;
782 if (!(b->features & BLK_FEAT_POLL))
783 t->features &= ~BLK_FEAT_POLL;
784
785 t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
786
787 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
788 t->max_user_sectors = min_not_zero(t->max_user_sectors,
789 b->max_user_sectors);
790 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
791 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
792 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
793 b->max_write_zeroes_sectors);
794 t->max_user_wzeroes_unmap_sectors =
795 min(t->max_user_wzeroes_unmap_sectors,
796 b->max_user_wzeroes_unmap_sectors);
797 t->max_hw_wzeroes_unmap_sectors =
798 min(t->max_hw_wzeroes_unmap_sectors,
799 b->max_hw_wzeroes_unmap_sectors);
800
801 t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
802 b->max_hw_zone_append_sectors);
803
804 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
805 b->seg_boundary_mask);
806 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
807 b->virt_boundary_mask);
808
809 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
810 t->max_discard_segments = min_not_zero(t->max_discard_segments,
811 b->max_discard_segments);
812 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
813 b->max_integrity_segments);
814
815 t->max_segment_size = min_not_zero(t->max_segment_size,
816 b->max_segment_size);
817
818 alignment = queue_limit_alignment_offset(b, start);
819
820 /* Bottom device has different alignment. Check that it is
821 * compatible with the current top alignment.
822 */
823 if (t->alignment_offset != alignment) {
824
825 top = max(t->physical_block_size, t->io_min)
826 + t->alignment_offset;
827 bottom = max(b->physical_block_size, b->io_min) + alignment;
828
829 /* Verify that top and bottom intervals line up */
830 if (max(top, bottom) % min(top, bottom)) {
831 t->flags |= BLK_FLAG_MISALIGNED;
832 ret = -1;
833 }
834 }
835
836 t->logical_block_size = max(t->logical_block_size,
837 b->logical_block_size);
838
839 t->physical_block_size = max(t->physical_block_size,
840 b->physical_block_size);
841
842 t->io_min = max(t->io_min, b->io_min);
843 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
844 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
845
846 /* Set non-power-of-2 compatible chunk_sectors boundary */
847 if (b->chunk_sectors)
848 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
849
850 /* Physical block size a multiple of the logical block size? */
851 if (t->physical_block_size & (t->logical_block_size - 1)) {
852 t->physical_block_size = t->logical_block_size;
853 t->flags |= BLK_FLAG_MISALIGNED;
854 ret = -1;
855 }
856
857 /* Minimum I/O a multiple of the physical block size? */
858 if (t->io_min & (t->physical_block_size - 1)) {
859 t->io_min = t->physical_block_size;
860 t->flags |= BLK_FLAG_MISALIGNED;
861 ret = -1;
862 }
863
864 /* Optimal I/O a multiple of the physical block size? */
865 if (t->io_opt & (t->physical_block_size - 1)) {
866 t->io_opt = 0;
867 t->flags |= BLK_FLAG_MISALIGNED;
868 ret = -1;
869 }
870
871 /* chunk_sectors a multiple of the physical block size? */
872 if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
873 t->chunk_sectors = 0;
874 t->flags |= BLK_FLAG_MISALIGNED;
875 ret = -1;
876 }
877
878 /* Find lowest common alignment_offset */
879 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
880 % max(t->physical_block_size, t->io_min);
881
882 /* Verify that new alignment_offset is on a logical block boundary */
883 if (t->alignment_offset & (t->logical_block_size - 1)) {
884 t->flags |= BLK_FLAG_MISALIGNED;
885 ret = -1;
886 }
887
888 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
889 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
890 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
891
892 /* Discard alignment and granularity */
893 if (b->discard_granularity) {
894 alignment = queue_limit_discard_alignment(b, start);
895
896 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
897 b->max_discard_sectors);
898 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
899 b->max_hw_discard_sectors);
900 t->discard_granularity = max(t->discard_granularity,
901 b->discard_granularity);
902 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
903 t->discard_granularity;
904 }
905 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
906 b->max_secure_erase_sectors);
907 t->zone_write_granularity = max(t->zone_write_granularity,
908 b->zone_write_granularity);
909 if (!(t->features & BLK_FEAT_ZONED)) {
910 t->zone_write_granularity = 0;
911 t->max_zone_append_sectors = 0;
912 }
913 blk_stack_atomic_writes_limits(t, b, start);
914
915 return ret;
916 }
917 EXPORT_SYMBOL(blk_stack_limits);
918
919 /**
920 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
921 * @t: the stacking driver limits (top device)
922 * @bdev: the underlying block device (bottom)
923 * @offset: offset to beginning of data within component device
924 * @pfx: prefix to use for warnings logged
925 *
926 * Description:
927 * This function is used by stacking drivers like MD and DM to ensure
928 * that all component devices have compatible block sizes and
929 * alignments. The stacking driver must provide a queue_limits
930 * struct (top) and then iteratively call the stacking function for
931 * all component (bottom) devices. The stacking function will
932 * attempt to combine the values and ensure proper alignment.
933 */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)934 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
935 sector_t offset, const char *pfx)
936 {
937 if (blk_stack_limits(t, bdev_limits(bdev),
938 get_start_sect(bdev) + offset))
939 pr_notice("%s: Warning: Device %pg is misaligned\n",
940 pfx, bdev);
941 }
942 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
943
944 /**
945 * queue_limits_stack_integrity - stack integrity profile
946 * @t: target queue limits
947 * @b: base queue limits
948 *
949 * Check if the integrity profile in the @b can be stacked into the
950 * target @t. Stacking is possible if either:
951 *
952 * a) does not have any integrity information stacked into it yet
953 * b) the integrity profile in @b is identical to the one in @t
954 *
955 * If @b can be stacked into @t, return %true. Else return %false and clear the
956 * integrity information in @t.
957 */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)958 bool queue_limits_stack_integrity(struct queue_limits *t,
959 struct queue_limits *b)
960 {
961 struct blk_integrity *ti = &t->integrity;
962 struct blk_integrity *bi = &b->integrity;
963
964 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
965 return true;
966
967 if (ti->flags & BLK_INTEGRITY_STACKED) {
968 if (ti->metadata_size != bi->metadata_size)
969 goto incompatible;
970 if (ti->interval_exp != bi->interval_exp)
971 goto incompatible;
972 if (ti->tag_size != bi->tag_size)
973 goto incompatible;
974 if (ti->csum_type != bi->csum_type)
975 goto incompatible;
976 if (ti->pi_tuple_size != bi->pi_tuple_size)
977 goto incompatible;
978 if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
979 (bi->flags & BLK_INTEGRITY_REF_TAG))
980 goto incompatible;
981 } else {
982 ti->flags = BLK_INTEGRITY_STACKED;
983 ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
984 (bi->flags & BLK_INTEGRITY_REF_TAG);
985 ti->csum_type = bi->csum_type;
986 ti->pi_tuple_size = bi->pi_tuple_size;
987 ti->metadata_size = bi->metadata_size;
988 ti->pi_offset = bi->pi_offset;
989 ti->interval_exp = bi->interval_exp;
990 ti->tag_size = bi->tag_size;
991 }
992 return true;
993
994 incompatible:
995 memset(ti, 0, sizeof(*ti));
996 return false;
997 }
998 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
999
1000 /**
1001 * blk_set_queue_depth - tell the block layer about the device queue depth
1002 * @q: the request queue for the device
1003 * @depth: queue depth
1004 *
1005 */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)1006 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1007 {
1008 q->queue_depth = depth;
1009 rq_qos_queue_depth_changed(q);
1010 }
1011 EXPORT_SYMBOL(blk_set_queue_depth);
1012
bdev_alignment_offset(struct block_device * bdev)1013 int bdev_alignment_offset(struct block_device *bdev)
1014 {
1015 struct request_queue *q = bdev_get_queue(bdev);
1016
1017 if (q->limits.flags & BLK_FLAG_MISALIGNED)
1018 return -1;
1019 if (bdev_is_partition(bdev))
1020 return queue_limit_alignment_offset(&q->limits,
1021 bdev->bd_start_sect);
1022 return q->limits.alignment_offset;
1023 }
1024 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1025
bdev_discard_alignment(struct block_device * bdev)1026 unsigned int bdev_discard_alignment(struct block_device *bdev)
1027 {
1028 struct request_queue *q = bdev_get_queue(bdev);
1029
1030 if (bdev_is_partition(bdev))
1031 return queue_limit_discard_alignment(&q->limits,
1032 bdev->bd_start_sect);
1033 return q->limits.discard_alignment;
1034 }
1035 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1036