1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/t10-pi.h>
18 #include <linux/crc64.h>
19
20 #include "blk.h"
21 #include "blk-rq-qos.h"
22 #include "blk-wbt.h"
23
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)24 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25 {
26 WRITE_ONCE(q->rq_timeout, timeout);
27 }
28 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29
30 /**
31 * blk_set_stacking_limits - set default limits for stacking devices
32 * @lim: the queue_limits structure to reset
33 *
34 * Prepare queue limits for applying limits from underlying devices using
35 * blk_stack_limits().
36 */
blk_set_stacking_limits(struct queue_limits * lim)37 void blk_set_stacking_limits(struct queue_limits *lim)
38 {
39 memset(lim, 0, sizeof(*lim));
40 lim->logical_block_size = SECTOR_SIZE;
41 lim->physical_block_size = SECTOR_SIZE;
42 lim->io_min = SECTOR_SIZE;
43 lim->discard_granularity = SECTOR_SIZE;
44 lim->dma_alignment = SECTOR_SIZE - 1;
45 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46
47 /* Inherit limits from component devices */
48 lim->max_segments = USHRT_MAX;
49 lim->max_discard_segments = USHRT_MAX;
50 lim->max_hw_sectors = UINT_MAX;
51 lim->max_segment_size = UINT_MAX;
52 lim->max_sectors = UINT_MAX;
53 lim->max_dev_sectors = UINT_MAX;
54 lim->max_write_zeroes_sectors = UINT_MAX;
55 lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56 lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57 lim->max_hw_zone_append_sectors = UINT_MAX;
58 lim->max_user_discard_sectors = UINT_MAX;
59 }
60 EXPORT_SYMBOL(blk_set_stacking_limits);
61
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)62 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
63 struct queue_limits *lim)
64 {
65 u64 io_opt = lim->io_opt;
66
67 /*
68 * For read-ahead of large files to be effective, we need to read ahead
69 * at least twice the optimal I/O size. For rotational devices that do
70 * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
71 * size to avoid falling back to the (rather inefficient) small default
72 * read-ahead size.
73 *
74 * There is no hardware limitation for the read-ahead size and the user
75 * might have increased the read-ahead size through sysfs, so don't ever
76 * decrease it.
77 */
78 if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
79 io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
80
81 bdi->ra_pages = max3(bdi->ra_pages,
82 io_opt * 2 >> PAGE_SHIFT,
83 VM_READAHEAD_PAGES);
84 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
85 }
86
blk_validate_zoned_limits(struct queue_limits * lim)87 static int blk_validate_zoned_limits(struct queue_limits *lim)
88 {
89 if (!(lim->features & BLK_FEAT_ZONED)) {
90 if (WARN_ON_ONCE(lim->max_open_zones) ||
91 WARN_ON_ONCE(lim->max_active_zones) ||
92 WARN_ON_ONCE(lim->zone_write_granularity) ||
93 WARN_ON_ONCE(lim->max_zone_append_sectors))
94 return -EINVAL;
95 return 0;
96 }
97
98 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
99 return -EINVAL;
100
101 /*
102 * Given that active zones include open zones, the maximum number of
103 * open zones cannot be larger than the maximum number of active zones.
104 */
105 if (lim->max_active_zones &&
106 lim->max_open_zones > lim->max_active_zones)
107 return -EINVAL;
108
109 if (lim->zone_write_granularity < lim->logical_block_size)
110 lim->zone_write_granularity = lim->logical_block_size;
111
112 /*
113 * The Zone Append size is limited by the maximum I/O size and the zone
114 * size given that it can't span zones.
115 *
116 * If no max_hw_zone_append_sectors limit is provided, the block layer
117 * will emulated it, else we're also bound by the hardware limit.
118 */
119 lim->max_zone_append_sectors =
120 min_not_zero(lim->max_hw_zone_append_sectors,
121 min(lim->chunk_sectors, lim->max_hw_sectors));
122 return 0;
123 }
124
blk_validate_integrity_limits(struct queue_limits * lim)125 static int blk_validate_integrity_limits(struct queue_limits *lim)
126 {
127 struct blk_integrity *bi = &lim->integrity;
128
129 if (!bi->metadata_size) {
130 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
131 bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
132 pr_warn("invalid PI settings.\n");
133 return -EINVAL;
134 }
135 bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
136 return 0;
137 }
138
139 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
140 pr_warn("integrity support disabled.\n");
141 return -EINVAL;
142 }
143
144 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
145 (bi->flags & BLK_INTEGRITY_REF_TAG)) {
146 pr_warn("ref tag not support without checksum.\n");
147 return -EINVAL;
148 }
149
150 if (bi->pi_tuple_size > bi->metadata_size) {
151 pr_warn("pi_tuple_size (%u) exceeds metadata_size (%u)\n",
152 bi->pi_tuple_size,
153 bi->metadata_size);
154 return -EINVAL;
155 }
156
157 switch (bi->csum_type) {
158 case BLK_INTEGRITY_CSUM_NONE:
159 if (bi->pi_tuple_size) {
160 pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
161 return -EINVAL;
162 }
163 break;
164 case BLK_INTEGRITY_CSUM_CRC:
165 case BLK_INTEGRITY_CSUM_IP:
166 if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
167 pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
168 sizeof(struct t10_pi_tuple),
169 bi->pi_tuple_size);
170 return -EINVAL;
171 }
172 break;
173 case BLK_INTEGRITY_CSUM_CRC64:
174 if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
175 pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
176 sizeof(struct crc64_pi_tuple),
177 bi->pi_tuple_size);
178 return -EINVAL;
179 }
180 break;
181 }
182
183 if (!bi->interval_exp)
184 bi->interval_exp = ilog2(lim->logical_block_size);
185
186 return 0;
187 }
188
189 /*
190 * Returns max guaranteed bytes which we can fit in a bio.
191 *
192 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
193 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
194 * the first and last segments.
195 */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)196 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
197 {
198 unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
199 unsigned int length;
200
201 length = min(max_segments, 2) * lim->logical_block_size;
202 if (max_segments > 2)
203 length += (max_segments - 2) * PAGE_SIZE;
204
205 return length;
206 }
207
blk_atomic_writes_update_limits(struct queue_limits * lim)208 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
209 {
210 unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
211 blk_queue_max_guaranteed_bio(lim));
212
213 unit_limit = rounddown_pow_of_two(unit_limit);
214
215 lim->atomic_write_max_sectors =
216 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
217 lim->max_hw_sectors);
218 lim->atomic_write_unit_min =
219 min(lim->atomic_write_hw_unit_min, unit_limit);
220 lim->atomic_write_unit_max =
221 min(lim->atomic_write_hw_unit_max, unit_limit);
222 lim->atomic_write_boundary_sectors =
223 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
224 }
225
blk_validate_atomic_write_limits(struct queue_limits * lim)226 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
227 {
228 unsigned int boundary_sectors;
229 unsigned int atomic_write_hw_max_sectors =
230 lim->atomic_write_hw_max >> SECTOR_SHIFT;
231
232 if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
233 goto unsupported;
234
235 if (!lim->atomic_write_hw_max)
236 goto unsupported;
237
238 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
239 goto unsupported;
240
241 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
242 goto unsupported;
243
244 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
245 lim->atomic_write_hw_unit_max))
246 goto unsupported;
247
248 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
249 lim->atomic_write_hw_max))
250 goto unsupported;
251
252 if (WARN_ON_ONCE(lim->chunk_sectors &&
253 atomic_write_hw_max_sectors > lim->chunk_sectors))
254 goto unsupported;
255
256 boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
257
258 if (boundary_sectors) {
259 if (WARN_ON_ONCE(lim->atomic_write_hw_max >
260 lim->atomic_write_hw_boundary))
261 goto unsupported;
262 /*
263 * A feature of boundary support is that it disallows bios to
264 * be merged which would result in a merged request which
265 * crosses either a chunk sector or atomic write HW boundary,
266 * even though chunk sectors may be just set for performance.
267 * For simplicity, disallow atomic writes for a chunk sector
268 * which is non-zero and smaller than atomic write HW boundary.
269 * Furthermore, chunk sectors must be a multiple of atomic
270 * write HW boundary. Otherwise boundary support becomes
271 * complicated.
272 * Devices which do not conform to these rules can be dealt
273 * with if and when they show up.
274 */
275 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
276 goto unsupported;
277
278 /*
279 * The boundary size just needs to be a multiple of unit_max
280 * (and not necessarily a power-of-2), so this following check
281 * could be relaxed in future.
282 * Furthermore, if needed, unit_max could even be reduced so
283 * that it is compliant with a !power-of-2 boundary.
284 */
285 if (!is_power_of_2(boundary_sectors))
286 goto unsupported;
287 }
288
289 blk_atomic_writes_update_limits(lim);
290 return;
291
292 unsupported:
293 lim->atomic_write_max_sectors = 0;
294 lim->atomic_write_boundary_sectors = 0;
295 lim->atomic_write_unit_min = 0;
296 lim->atomic_write_unit_max = 0;
297 }
298
299 /*
300 * Check that the limits in lim are valid, initialize defaults for unset
301 * values, and cap values based on others where needed.
302 */
blk_validate_limits(struct queue_limits * lim)303 int blk_validate_limits(struct queue_limits *lim)
304 {
305 unsigned int max_hw_sectors;
306 unsigned int logical_block_sectors;
307 unsigned long seg_size;
308 int err;
309
310 /*
311 * Unless otherwise specified, default to 512 byte logical blocks and a
312 * physical block size equal to the logical block size.
313 */
314 if (!lim->logical_block_size)
315 lim->logical_block_size = SECTOR_SIZE;
316 else if (blk_validate_block_size(lim->logical_block_size)) {
317 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
318 return -EINVAL;
319 }
320 if (lim->physical_block_size < lim->logical_block_size) {
321 lim->physical_block_size = lim->logical_block_size;
322 } else if (!is_power_of_2(lim->physical_block_size)) {
323 pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
324 return -EINVAL;
325 }
326
327 /*
328 * The minimum I/O size defaults to the physical block size unless
329 * explicitly overridden.
330 */
331 if (lim->io_min < lim->physical_block_size)
332 lim->io_min = lim->physical_block_size;
333
334 /*
335 * The optimal I/O size may not be aligned to physical block size
336 * (because it may be limited by dma engines which have no clue about
337 * block size of the disks attached to them), so we round it down here.
338 */
339 lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
340
341 /*
342 * max_hw_sectors has a somewhat weird default for historical reason,
343 * but driver really should set their own instead of relying on this
344 * value.
345 *
346 * The block layer relies on the fact that every driver can
347 * handle at lest a page worth of data per I/O, and needs the value
348 * aligned to the logical block size.
349 */
350 if (!lim->max_hw_sectors)
351 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
352 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
353 return -EINVAL;
354 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
355 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
356 return -EINVAL;
357 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
358 logical_block_sectors);
359
360 /*
361 * The actual max_sectors value is a complex beast and also takes the
362 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
363 * value into account. The ->max_sectors value is always calculated
364 * from these, so directly setting it won't have any effect.
365 */
366 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
367 lim->max_dev_sectors);
368 if (lim->max_user_sectors) {
369 if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
370 return -EINVAL;
371 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
372 } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
373 lim->max_sectors =
374 min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
375 } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
376 lim->max_sectors =
377 min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
378 } else {
379 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
380 }
381 lim->max_sectors = round_down(lim->max_sectors,
382 logical_block_sectors);
383
384 /*
385 * Random default for the maximum number of segments. Driver should not
386 * rely on this and set their own.
387 */
388 if (!lim->max_segments)
389 lim->max_segments = BLK_MAX_SEGMENTS;
390
391 if (lim->max_hw_wzeroes_unmap_sectors &&
392 lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
393 return -EINVAL;
394 lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
395 lim->max_user_wzeroes_unmap_sectors);
396
397 lim->max_discard_sectors =
398 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
399
400 /*
401 * When discard is not supported, discard_granularity should be reported
402 * as 0 to userspace.
403 */
404 if (lim->max_discard_sectors)
405 lim->discard_granularity =
406 max(lim->discard_granularity, lim->physical_block_size);
407 else
408 lim->discard_granularity = 0;
409
410 if (!lim->max_discard_segments)
411 lim->max_discard_segments = 1;
412
413 /*
414 * By default there is no limit on the segment boundary alignment,
415 * but if there is one it can't be smaller than the page size as
416 * that would break all the normal I/O patterns.
417 */
418 if (!lim->seg_boundary_mask)
419 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
420 if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
421 return -EINVAL;
422
423 /*
424 * Stacking device may have both virtual boundary and max segment
425 * size limit, so allow this setting now, and long-term the two
426 * might need to move out of stacking limits since we have immutable
427 * bvec and lower layer bio splitting is supposed to handle the two
428 * correctly.
429 */
430 if (lim->virt_boundary_mask) {
431 if (!lim->max_segment_size)
432 lim->max_segment_size = UINT_MAX;
433 } else {
434 /*
435 * The maximum segment size has an odd historic 64k default that
436 * drivers probably should override. Just like the I/O size we
437 * require drivers to at least handle a full page per segment.
438 */
439 if (!lim->max_segment_size)
440 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
441 if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
442 return -EINVAL;
443 }
444
445 /* setup min segment size for building new segment in fast path */
446 if (lim->seg_boundary_mask > lim->max_segment_size - 1)
447 seg_size = lim->max_segment_size;
448 else
449 seg_size = lim->seg_boundary_mask + 1;
450 lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
451
452 /*
453 * We require drivers to at least do logical block aligned I/O, but
454 * historically could not check for that due to the separate calls
455 * to set the limits. Once the transition is finished the check
456 * below should be narrowed down to check the logical block size.
457 */
458 if (!lim->dma_alignment)
459 lim->dma_alignment = SECTOR_SIZE - 1;
460 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
461 return -EINVAL;
462
463 if (lim->alignment_offset) {
464 lim->alignment_offset &= (lim->physical_block_size - 1);
465 lim->flags &= ~BLK_FLAG_MISALIGNED;
466 }
467
468 if (!(lim->features & BLK_FEAT_WRITE_CACHE))
469 lim->features &= ~BLK_FEAT_FUA;
470
471 blk_validate_atomic_write_limits(lim);
472
473 err = blk_validate_integrity_limits(lim);
474 if (err)
475 return err;
476 return blk_validate_zoned_limits(lim);
477 }
478 EXPORT_SYMBOL_GPL(blk_validate_limits);
479
480 /*
481 * Set the default limits for a newly allocated queue. @lim contains the
482 * initial limits set by the driver, which could be no limit in which case
483 * all fields are cleared to zero.
484 */
blk_set_default_limits(struct queue_limits * lim)485 int blk_set_default_limits(struct queue_limits *lim)
486 {
487 /*
488 * Most defaults are set by capping the bounds in blk_validate_limits,
489 * but these limits are special and need an explicit initialization to
490 * the max value here.
491 */
492 lim->max_user_discard_sectors = UINT_MAX;
493 lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
494 return blk_validate_limits(lim);
495 }
496
497 /**
498 * queue_limits_commit_update - commit an atomic update of queue limits
499 * @q: queue to update
500 * @lim: limits to apply
501 *
502 * Apply the limits in @lim that were obtained from queue_limits_start_update()
503 * and updated by the caller to @q. The caller must have frozen the queue or
504 * ensure that there are no outstanding I/Os by other means.
505 *
506 * Returns 0 if successful, else a negative error code.
507 */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)508 int queue_limits_commit_update(struct request_queue *q,
509 struct queue_limits *lim)
510 {
511 int error;
512
513 error = blk_validate_limits(lim);
514 if (error)
515 goto out_unlock;
516
517 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
518 if (q->crypto_profile && lim->integrity.tag_size) {
519 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
520 error = -EINVAL;
521 goto out_unlock;
522 }
523 #endif
524
525 q->limits = *lim;
526 if (q->disk)
527 blk_apply_bdi_limits(q->disk->bdi, lim);
528 out_unlock:
529 mutex_unlock(&q->limits_lock);
530 return error;
531 }
532 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
533
534 /**
535 * queue_limits_commit_update_frozen - commit an atomic update of queue limits
536 * @q: queue to update
537 * @lim: limits to apply
538 *
539 * Apply the limits in @lim that were obtained from queue_limits_start_update()
540 * and updated with the new values by the caller to @q. Freezes the queue
541 * before the update and unfreezes it after.
542 *
543 * Returns 0 if successful, else a negative error code.
544 */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)545 int queue_limits_commit_update_frozen(struct request_queue *q,
546 struct queue_limits *lim)
547 {
548 unsigned int memflags;
549 int ret;
550
551 memflags = blk_mq_freeze_queue(q);
552 ret = queue_limits_commit_update(q, lim);
553 blk_mq_unfreeze_queue(q, memflags);
554
555 return ret;
556 }
557 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
558
559 /**
560 * queue_limits_set - apply queue limits to queue
561 * @q: queue to update
562 * @lim: limits to apply
563 *
564 * Apply the limits in @lim that were freshly initialized to @q.
565 * To update existing limits use queue_limits_start_update() and
566 * queue_limits_commit_update() instead.
567 *
568 * Returns 0 if successful, else a negative error code.
569 */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)570 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
571 {
572 mutex_lock(&q->limits_lock);
573 return queue_limits_commit_update(q, lim);
574 }
575 EXPORT_SYMBOL_GPL(queue_limits_set);
576
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)577 static int queue_limit_alignment_offset(const struct queue_limits *lim,
578 sector_t sector)
579 {
580 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
581 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
582 << SECTOR_SHIFT;
583
584 return (granularity + lim->alignment_offset - alignment) % granularity;
585 }
586
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)587 static unsigned int queue_limit_discard_alignment(
588 const struct queue_limits *lim, sector_t sector)
589 {
590 unsigned int alignment, granularity, offset;
591
592 if (!lim->max_discard_sectors)
593 return 0;
594
595 /* Why are these in bytes, not sectors? */
596 alignment = lim->discard_alignment >> SECTOR_SHIFT;
597 granularity = lim->discard_granularity >> SECTOR_SHIFT;
598
599 /* Offset of the partition start in 'granularity' sectors */
600 offset = sector_div(sector, granularity);
601
602 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
603 offset = (granularity + alignment - offset) % granularity;
604
605 /* Turn it back into bytes, gaah */
606 return offset << SECTOR_SHIFT;
607 }
608
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)609 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
610 {
611 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
612 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
613 sectors = PAGE_SIZE >> SECTOR_SHIFT;
614 return sectors;
615 }
616
617 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)618 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
619 struct queue_limits *b)
620 {
621 /* We're not going to support different boundary sizes.. yet */
622 if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
623 return false;
624
625 /* Can't support this */
626 if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
627 return false;
628
629 /* Or this */
630 if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
631 return false;
632
633 t->atomic_write_hw_max = min(t->atomic_write_hw_max,
634 b->atomic_write_hw_max);
635 t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
636 b->atomic_write_hw_unit_min);
637 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
638 b->atomic_write_hw_unit_max);
639 return true;
640 }
641
642 /* Check for valid boundary of first bottom device */
blk_stack_atomic_writes_boundary_head(struct queue_limits * t,struct queue_limits * b)643 static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
644 struct queue_limits *b)
645 {
646 /*
647 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
648 * devices store chunk sectors in t->io_min.
649 */
650 if (b->atomic_write_hw_boundary > t->io_min &&
651 b->atomic_write_hw_boundary % t->io_min)
652 return false;
653 if (t->io_min > b->atomic_write_hw_boundary &&
654 t->io_min % b->atomic_write_hw_boundary)
655 return false;
656
657 t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
658 return true;
659 }
660
blk_stack_atomic_writes_chunk_sectors(struct queue_limits * t)661 static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
662 {
663 unsigned int chunk_bytes;
664
665 if (!t->chunk_sectors)
666 return;
667
668 /*
669 * If chunk sectors is so large that its value in bytes overflows
670 * UINT_MAX, then just shift it down so it definitely will fit.
671 * We don't support atomic writes of such a large size anyway.
672 */
673 if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
674 chunk_bytes = t->chunk_sectors;
675
676 /*
677 * Find values for limits which work for chunk size.
678 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
679 * size, as the chunk size is not restricted to a power-of-2.
680 * So we need to find highest power-of-2 which works for the chunk
681 * size.
682 * As an example scenario, we could have t->unit_max = 16K and
683 * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
684 * value aligned with both limits, i.e. 8K in this example.
685 */
686 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
687 max_pow_of_two_factor(chunk_bytes));
688
689 t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
690 t->atomic_write_hw_unit_max);
691 t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
692 }
693
694 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)695 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
696 struct queue_limits *b)
697 {
698 if (b->atomic_write_hw_boundary &&
699 !blk_stack_atomic_writes_boundary_head(t, b))
700 return false;
701
702 t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
703 t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
704 t->atomic_write_hw_max = b->atomic_write_hw_max;
705 return true;
706 }
707
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)708 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
709 struct queue_limits *b, sector_t start)
710 {
711 if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
712 goto unsupported;
713
714 if (!b->atomic_write_hw_unit_min)
715 goto unsupported;
716
717 if (!blk_atomic_write_start_sect_aligned(start, b))
718 goto unsupported;
719
720 /*
721 * If atomic_write_hw_max is set, we have already stacked 1x bottom
722 * device, so check for compliance.
723 */
724 if (t->atomic_write_hw_max) {
725 if (!blk_stack_atomic_writes_tail(t, b))
726 goto unsupported;
727 return;
728 }
729
730 if (!blk_stack_atomic_writes_head(t, b))
731 goto unsupported;
732 blk_stack_atomic_writes_chunk_sectors(t);
733 return;
734
735 unsupported:
736 t->atomic_write_hw_max = 0;
737 t->atomic_write_hw_unit_max = 0;
738 t->atomic_write_hw_unit_min = 0;
739 t->atomic_write_hw_boundary = 0;
740 }
741
742 /**
743 * blk_stack_limits - adjust queue_limits for stacked devices
744 * @t: the stacking driver limits (top device)
745 * @b: the underlying queue limits (bottom, component device)
746 * @start: first data sector within component device
747 *
748 * Description:
749 * This function is used by stacking drivers like MD and DM to ensure
750 * that all component devices have compatible block sizes and
751 * alignments. The stacking driver must provide a queue_limits
752 * struct (top) and then iteratively call the stacking function for
753 * all component (bottom) devices. The stacking function will
754 * attempt to combine the values and ensure proper alignment.
755 *
756 * Returns 0 if the top and bottom queue_limits are compatible. The
757 * top device's block sizes and alignment offsets may be adjusted to
758 * ensure alignment with the bottom device. If no compatible sizes
759 * and alignments exist, -1 is returned and the resulting top
760 * queue_limits will have the misaligned flag set to indicate that
761 * the alignment_offset is undefined.
762 */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)763 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
764 sector_t start)
765 {
766 unsigned int top, bottom, alignment, ret = 0;
767
768 t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
769
770 /*
771 * Some feaures need to be supported both by the stacking driver and all
772 * underlying devices. The stacking driver sets these flags before
773 * stacking the limits, and this will clear the flags if any of the
774 * underlying devices does not support it.
775 */
776 if (!(b->features & BLK_FEAT_NOWAIT))
777 t->features &= ~BLK_FEAT_NOWAIT;
778 if (!(b->features & BLK_FEAT_POLL))
779 t->features &= ~BLK_FEAT_POLL;
780
781 t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
782
783 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
784 t->max_user_sectors = min_not_zero(t->max_user_sectors,
785 b->max_user_sectors);
786 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
787 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
788 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
789 b->max_write_zeroes_sectors);
790 t->max_user_wzeroes_unmap_sectors =
791 min(t->max_user_wzeroes_unmap_sectors,
792 b->max_user_wzeroes_unmap_sectors);
793 t->max_hw_wzeroes_unmap_sectors =
794 min(t->max_hw_wzeroes_unmap_sectors,
795 b->max_hw_wzeroes_unmap_sectors);
796
797 t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
798 b->max_hw_zone_append_sectors);
799
800 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
801 b->seg_boundary_mask);
802 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
803 b->virt_boundary_mask);
804
805 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
806 t->max_discard_segments = min_not_zero(t->max_discard_segments,
807 b->max_discard_segments);
808 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
809 b->max_integrity_segments);
810
811 t->max_segment_size = min_not_zero(t->max_segment_size,
812 b->max_segment_size);
813
814 alignment = queue_limit_alignment_offset(b, start);
815
816 /* Bottom device has different alignment. Check that it is
817 * compatible with the current top alignment.
818 */
819 if (t->alignment_offset != alignment) {
820
821 top = max(t->physical_block_size, t->io_min)
822 + t->alignment_offset;
823 bottom = max(b->physical_block_size, b->io_min) + alignment;
824
825 /* Verify that top and bottom intervals line up */
826 if (max(top, bottom) % min(top, bottom)) {
827 t->flags |= BLK_FLAG_MISALIGNED;
828 ret = -1;
829 }
830 }
831
832 t->logical_block_size = max(t->logical_block_size,
833 b->logical_block_size);
834
835 t->physical_block_size = max(t->physical_block_size,
836 b->physical_block_size);
837
838 t->io_min = max(t->io_min, b->io_min);
839 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
840 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
841
842 /* Set non-power-of-2 compatible chunk_sectors boundary */
843 if (b->chunk_sectors)
844 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
845
846 /* Physical block size a multiple of the logical block size? */
847 if (t->physical_block_size & (t->logical_block_size - 1)) {
848 t->physical_block_size = t->logical_block_size;
849 t->flags |= BLK_FLAG_MISALIGNED;
850 ret = -1;
851 }
852
853 /* Minimum I/O a multiple of the physical block size? */
854 if (t->io_min & (t->physical_block_size - 1)) {
855 t->io_min = t->physical_block_size;
856 t->flags |= BLK_FLAG_MISALIGNED;
857 ret = -1;
858 }
859
860 /* Optimal I/O a multiple of the physical block size? */
861 if (t->io_opt & (t->physical_block_size - 1)) {
862 t->io_opt = 0;
863 t->flags |= BLK_FLAG_MISALIGNED;
864 ret = -1;
865 }
866
867 /* chunk_sectors a multiple of the physical block size? */
868 if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
869 t->chunk_sectors = 0;
870 t->flags |= BLK_FLAG_MISALIGNED;
871 ret = -1;
872 }
873
874 /* Find lowest common alignment_offset */
875 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
876 % max(t->physical_block_size, t->io_min);
877
878 /* Verify that new alignment_offset is on a logical block boundary */
879 if (t->alignment_offset & (t->logical_block_size - 1)) {
880 t->flags |= BLK_FLAG_MISALIGNED;
881 ret = -1;
882 }
883
884 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
885 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
886 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
887
888 /* Discard alignment and granularity */
889 if (b->discard_granularity) {
890 alignment = queue_limit_discard_alignment(b, start);
891
892 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
893 b->max_discard_sectors);
894 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
895 b->max_hw_discard_sectors);
896 t->discard_granularity = max(t->discard_granularity,
897 b->discard_granularity);
898 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
899 t->discard_granularity;
900 }
901 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
902 b->max_secure_erase_sectors);
903 t->zone_write_granularity = max(t->zone_write_granularity,
904 b->zone_write_granularity);
905 if (!(t->features & BLK_FEAT_ZONED)) {
906 t->zone_write_granularity = 0;
907 t->max_zone_append_sectors = 0;
908 }
909 blk_stack_atomic_writes_limits(t, b, start);
910
911 return ret;
912 }
913 EXPORT_SYMBOL(blk_stack_limits);
914
915 /**
916 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
917 * @t: the stacking driver limits (top device)
918 * @bdev: the underlying block device (bottom)
919 * @offset: offset to beginning of data within component device
920 * @pfx: prefix to use for warnings logged
921 *
922 * Description:
923 * This function is used by stacking drivers like MD and DM to ensure
924 * that all component devices have compatible block sizes and
925 * alignments. The stacking driver must provide a queue_limits
926 * struct (top) and then iteratively call the stacking function for
927 * all component (bottom) devices. The stacking function will
928 * attempt to combine the values and ensure proper alignment.
929 */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)930 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
931 sector_t offset, const char *pfx)
932 {
933 if (blk_stack_limits(t, bdev_limits(bdev),
934 get_start_sect(bdev) + offset))
935 pr_notice("%s: Warning: Device %pg is misaligned\n",
936 pfx, bdev);
937 }
938 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
939
940 /**
941 * queue_limits_stack_integrity - stack integrity profile
942 * @t: target queue limits
943 * @b: base queue limits
944 *
945 * Check if the integrity profile in the @b can be stacked into the
946 * target @t. Stacking is possible if either:
947 *
948 * a) does not have any integrity information stacked into it yet
949 * b) the integrity profile in @b is identical to the one in @t
950 *
951 * If @b can be stacked into @t, return %true. Else return %false and clear the
952 * integrity information in @t.
953 */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)954 bool queue_limits_stack_integrity(struct queue_limits *t,
955 struct queue_limits *b)
956 {
957 struct blk_integrity *ti = &t->integrity;
958 struct blk_integrity *bi = &b->integrity;
959
960 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
961 return true;
962
963 if (ti->flags & BLK_INTEGRITY_STACKED) {
964 if (ti->metadata_size != bi->metadata_size)
965 goto incompatible;
966 if (ti->interval_exp != bi->interval_exp)
967 goto incompatible;
968 if (ti->tag_size != bi->tag_size)
969 goto incompatible;
970 if (ti->csum_type != bi->csum_type)
971 goto incompatible;
972 if (ti->pi_tuple_size != bi->pi_tuple_size)
973 goto incompatible;
974 if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
975 (bi->flags & BLK_INTEGRITY_REF_TAG))
976 goto incompatible;
977 } else {
978 ti->flags = BLK_INTEGRITY_STACKED;
979 ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
980 (bi->flags & BLK_INTEGRITY_REF_TAG);
981 ti->csum_type = bi->csum_type;
982 ti->pi_tuple_size = bi->pi_tuple_size;
983 ti->metadata_size = bi->metadata_size;
984 ti->pi_offset = bi->pi_offset;
985 ti->interval_exp = bi->interval_exp;
986 ti->tag_size = bi->tag_size;
987 }
988 return true;
989
990 incompatible:
991 memset(ti, 0, sizeof(*ti));
992 return false;
993 }
994 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
995
996 /**
997 * blk_set_queue_depth - tell the block layer about the device queue depth
998 * @q: the request queue for the device
999 * @depth: queue depth
1000 *
1001 */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)1002 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1003 {
1004 q->queue_depth = depth;
1005 rq_qos_queue_depth_changed(q);
1006 }
1007 EXPORT_SYMBOL(blk_set_queue_depth);
1008
bdev_alignment_offset(struct block_device * bdev)1009 int bdev_alignment_offset(struct block_device *bdev)
1010 {
1011 struct request_queue *q = bdev_get_queue(bdev);
1012
1013 if (q->limits.flags & BLK_FLAG_MISALIGNED)
1014 return -1;
1015 if (bdev_is_partition(bdev))
1016 return queue_limit_alignment_offset(&q->limits,
1017 bdev->bd_start_sect);
1018 return q->limits.alignment_offset;
1019 }
1020 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1021
bdev_discard_alignment(struct block_device * bdev)1022 unsigned int bdev_discard_alignment(struct block_device *bdev)
1023 {
1024 struct request_queue *q = bdev_get_queue(bdev);
1025
1026 if (bdev_is_partition(bdev))
1027 return queue_limit_discard_alignment(&q->limits,
1028 bdev->bd_start_sect);
1029 return q->limits.discard_alignment;
1030 }
1031 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
1032