xref: /linux/block/blk-settings.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * Functions related to setting various queue properties from drivers
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10 #include <linux/gcd.h>
11 #include <linux/lcm.h>
12 #include <linux/jiffies.h>
13 #include <linux/gfp.h>
14 
15 #include "blk.h"
16 
17 unsigned long blk_max_low_pfn;
18 EXPORT_SYMBOL(blk_max_low_pfn);
19 
20 unsigned long blk_max_pfn;
21 
22 /**
23  * blk_queue_prep_rq - set a prepare_request function for queue
24  * @q:		queue
25  * @pfn:	prepare_request function
26  *
27  * It's possible for a queue to register a prepare_request callback which
28  * is invoked before the request is handed to the request_fn. The goal of
29  * the function is to prepare a request for I/O, it can be used to build a
30  * cdb from the request data for instance.
31  *
32  */
33 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
34 {
35 	q->prep_rq_fn = pfn;
36 }
37 EXPORT_SYMBOL(blk_queue_prep_rq);
38 
39 /**
40  * blk_queue_unprep_rq - set an unprepare_request function for queue
41  * @q:		queue
42  * @ufn:	unprepare_request function
43  *
44  * It's possible for a queue to register an unprepare_request callback
45  * which is invoked before the request is finally completed. The goal
46  * of the function is to deallocate any data that was allocated in the
47  * prepare_request callback.
48  *
49  */
50 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
51 {
52 	q->unprep_rq_fn = ufn;
53 }
54 EXPORT_SYMBOL(blk_queue_unprep_rq);
55 
56 /**
57  * blk_queue_merge_bvec - set a merge_bvec function for queue
58  * @q:		queue
59  * @mbfn:	merge_bvec_fn
60  *
61  * Usually queues have static limitations on the max sectors or segments that
62  * we can put in a request. Stacking drivers may have some settings that
63  * are dynamic, and thus we have to query the queue whether it is ok to
64  * add a new bio_vec to a bio at a given offset or not. If the block device
65  * has such limitations, it needs to register a merge_bvec_fn to control
66  * the size of bio's sent to it. Note that a block device *must* allow a
67  * single page to be added to an empty bio. The block device driver may want
68  * to use the bio_split() function to deal with these bio's. By default
69  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
70  * honored.
71  */
72 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
73 {
74 	q->merge_bvec_fn = mbfn;
75 }
76 EXPORT_SYMBOL(blk_queue_merge_bvec);
77 
78 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
79 {
80 	q->softirq_done_fn = fn;
81 }
82 EXPORT_SYMBOL(blk_queue_softirq_done);
83 
84 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
85 {
86 	q->rq_timeout = timeout;
87 }
88 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
89 
90 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
91 {
92 	q->rq_timed_out_fn = fn;
93 }
94 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
95 
96 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
97 {
98 	q->lld_busy_fn = fn;
99 }
100 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101 
102 /**
103  * blk_set_default_limits - reset limits to default values
104  * @lim:  the queue_limits structure to reset
105  *
106  * Description:
107  *   Returns a queue_limit struct to its default state.  Can be used by
108  *   stacking drivers like DM that stage table swaps and reuse an
109  *   existing device queue.
110  */
111 void blk_set_default_limits(struct queue_limits *lim)
112 {
113 	lim->max_segments = BLK_MAX_SEGMENTS;
114 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
115 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
116 	lim->max_sectors = BLK_DEF_MAX_SECTORS;
117 	lim->max_hw_sectors = INT_MAX;
118 	lim->max_discard_sectors = 0;
119 	lim->discard_granularity = 0;
120 	lim->discard_alignment = 0;
121 	lim->discard_misaligned = 0;
122 	lim->discard_zeroes_data = -1;
123 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
124 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
125 	lim->alignment_offset = 0;
126 	lim->io_opt = 0;
127 	lim->misaligned = 0;
128 	lim->no_cluster = 0;
129 }
130 EXPORT_SYMBOL(blk_set_default_limits);
131 
132 /**
133  * blk_queue_make_request - define an alternate make_request function for a device
134  * @q:  the request queue for the device to be affected
135  * @mfn: the alternate make_request function
136  *
137  * Description:
138  *    The normal way for &struct bios to be passed to a device
139  *    driver is for them to be collected into requests on a request
140  *    queue, and then to allow the device driver to select requests
141  *    off that queue when it is ready.  This works well for many block
142  *    devices. However some block devices (typically virtual devices
143  *    such as md or lvm) do not benefit from the processing on the
144  *    request queue, and are served best by having the requests passed
145  *    directly to them.  This can be achieved by providing a function
146  *    to blk_queue_make_request().
147  *
148  * Caveat:
149  *    The driver that does this *must* be able to deal appropriately
150  *    with buffers in "highmemory". This can be accomplished by either calling
151  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
152  *    blk_queue_bounce() to create a buffer in normal memory.
153  **/
154 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
155 {
156 	/*
157 	 * set defaults
158 	 */
159 	q->nr_requests = BLKDEV_MAX_RQ;
160 
161 	q->make_request_fn = mfn;
162 	blk_queue_dma_alignment(q, 511);
163 	blk_queue_congestion_threshold(q);
164 	q->nr_batching = BLK_BATCH_REQ;
165 
166 	q->unplug_thresh = 4;		/* hmm */
167 	q->unplug_delay = msecs_to_jiffies(3);	/* 3 milliseconds */
168 	if (q->unplug_delay == 0)
169 		q->unplug_delay = 1;
170 
171 	q->unplug_timer.function = blk_unplug_timeout;
172 	q->unplug_timer.data = (unsigned long)q;
173 
174 	blk_set_default_limits(&q->limits);
175 	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
176 
177 	/*
178 	 * If the caller didn't supply a lock, fall back to our embedded
179 	 * per-queue locks
180 	 */
181 	if (!q->queue_lock)
182 		q->queue_lock = &q->__queue_lock;
183 
184 	/*
185 	 * by default assume old behaviour and bounce for any highmem page
186 	 */
187 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
188 }
189 EXPORT_SYMBOL(blk_queue_make_request);
190 
191 /**
192  * blk_queue_bounce_limit - set bounce buffer limit for queue
193  * @q: the request queue for the device
194  * @dma_mask: the maximum address the device can handle
195  *
196  * Description:
197  *    Different hardware can have different requirements as to what pages
198  *    it can do I/O directly to. A low level driver can call
199  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
200  *    buffers for doing I/O to pages residing above @dma_mask.
201  **/
202 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
203 {
204 	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
205 	int dma = 0;
206 
207 	q->bounce_gfp = GFP_NOIO;
208 #if BITS_PER_LONG == 64
209 	/*
210 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
211 	 * some IOMMUs can handle everything, but I don't know of a
212 	 * way to test this here.
213 	 */
214 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
215 		dma = 1;
216 	q->limits.bounce_pfn = max_low_pfn;
217 #else
218 	if (b_pfn < blk_max_low_pfn)
219 		dma = 1;
220 	q->limits.bounce_pfn = b_pfn;
221 #endif
222 	if (dma) {
223 		init_emergency_isa_pool();
224 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
225 		q->limits.bounce_pfn = b_pfn;
226 	}
227 }
228 EXPORT_SYMBOL(blk_queue_bounce_limit);
229 
230 /**
231  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
232  * @q:  the request queue for the device
233  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
234  *
235  * Description:
236  *    Enables a low level driver to set a hard upper limit,
237  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
238  *    the device driver based upon the combined capabilities of I/O
239  *    controller and storage device.
240  *
241  *    max_sectors is a soft limit imposed by the block layer for
242  *    filesystem type requests.  This value can be overridden on a
243  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
244  *    The soft limit can not exceed max_hw_sectors.
245  **/
246 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
247 {
248 	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
249 		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
250 		printk(KERN_INFO "%s: set to minimum %d\n",
251 		       __func__, max_hw_sectors);
252 	}
253 
254 	q->limits.max_hw_sectors = max_hw_sectors;
255 	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
256 				      BLK_DEF_MAX_SECTORS);
257 }
258 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
259 
260 /**
261  * blk_queue_max_discard_sectors - set max sectors for a single discard
262  * @q:  the request queue for the device
263  * @max_discard_sectors: maximum number of sectors to discard
264  **/
265 void blk_queue_max_discard_sectors(struct request_queue *q,
266 		unsigned int max_discard_sectors)
267 {
268 	q->limits.max_discard_sectors = max_discard_sectors;
269 }
270 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
271 
272 /**
273  * blk_queue_max_segments - set max hw segments for a request for this queue
274  * @q:  the request queue for the device
275  * @max_segments:  max number of segments
276  *
277  * Description:
278  *    Enables a low level driver to set an upper limit on the number of
279  *    hw data segments in a request.
280  **/
281 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
282 {
283 	if (!max_segments) {
284 		max_segments = 1;
285 		printk(KERN_INFO "%s: set to minimum %d\n",
286 		       __func__, max_segments);
287 	}
288 
289 	q->limits.max_segments = max_segments;
290 }
291 EXPORT_SYMBOL(blk_queue_max_segments);
292 
293 /**
294  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
295  * @q:  the request queue for the device
296  * @max_size:  max size of segment in bytes
297  *
298  * Description:
299  *    Enables a low level driver to set an upper limit on the size of a
300  *    coalesced segment
301  **/
302 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
303 {
304 	if (max_size < PAGE_CACHE_SIZE) {
305 		max_size = PAGE_CACHE_SIZE;
306 		printk(KERN_INFO "%s: set to minimum %d\n",
307 		       __func__, max_size);
308 	}
309 
310 	q->limits.max_segment_size = max_size;
311 }
312 EXPORT_SYMBOL(blk_queue_max_segment_size);
313 
314 /**
315  * blk_queue_logical_block_size - set logical block size for the queue
316  * @q:  the request queue for the device
317  * @size:  the logical block size, in bytes
318  *
319  * Description:
320  *   This should be set to the lowest possible block size that the
321  *   storage device can address.  The default of 512 covers most
322  *   hardware.
323  **/
324 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
325 {
326 	q->limits.logical_block_size = size;
327 
328 	if (q->limits.physical_block_size < size)
329 		q->limits.physical_block_size = size;
330 
331 	if (q->limits.io_min < q->limits.physical_block_size)
332 		q->limits.io_min = q->limits.physical_block_size;
333 }
334 EXPORT_SYMBOL(blk_queue_logical_block_size);
335 
336 /**
337  * blk_queue_physical_block_size - set physical block size for the queue
338  * @q:  the request queue for the device
339  * @size:  the physical block size, in bytes
340  *
341  * Description:
342  *   This should be set to the lowest possible sector size that the
343  *   hardware can operate on without reverting to read-modify-write
344  *   operations.
345  */
346 void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
347 {
348 	q->limits.physical_block_size = size;
349 
350 	if (q->limits.physical_block_size < q->limits.logical_block_size)
351 		q->limits.physical_block_size = q->limits.logical_block_size;
352 
353 	if (q->limits.io_min < q->limits.physical_block_size)
354 		q->limits.io_min = q->limits.physical_block_size;
355 }
356 EXPORT_SYMBOL(blk_queue_physical_block_size);
357 
358 /**
359  * blk_queue_alignment_offset - set physical block alignment offset
360  * @q:	the request queue for the device
361  * @offset: alignment offset in bytes
362  *
363  * Description:
364  *   Some devices are naturally misaligned to compensate for things like
365  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
366  *   should call this function for devices whose first sector is not
367  *   naturally aligned.
368  */
369 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
370 {
371 	q->limits.alignment_offset =
372 		offset & (q->limits.physical_block_size - 1);
373 	q->limits.misaligned = 0;
374 }
375 EXPORT_SYMBOL(blk_queue_alignment_offset);
376 
377 /**
378  * blk_limits_io_min - set minimum request size for a device
379  * @limits: the queue limits
380  * @min:  smallest I/O size in bytes
381  *
382  * Description:
383  *   Some devices have an internal block size bigger than the reported
384  *   hardware sector size.  This function can be used to signal the
385  *   smallest I/O the device can perform without incurring a performance
386  *   penalty.
387  */
388 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
389 {
390 	limits->io_min = min;
391 
392 	if (limits->io_min < limits->logical_block_size)
393 		limits->io_min = limits->logical_block_size;
394 
395 	if (limits->io_min < limits->physical_block_size)
396 		limits->io_min = limits->physical_block_size;
397 }
398 EXPORT_SYMBOL(blk_limits_io_min);
399 
400 /**
401  * blk_queue_io_min - set minimum request size for the queue
402  * @q:	the request queue for the device
403  * @min:  smallest I/O size in bytes
404  *
405  * Description:
406  *   Storage devices may report a granularity or preferred minimum I/O
407  *   size which is the smallest request the device can perform without
408  *   incurring a performance penalty.  For disk drives this is often the
409  *   physical block size.  For RAID arrays it is often the stripe chunk
410  *   size.  A properly aligned multiple of minimum_io_size is the
411  *   preferred request size for workloads where a high number of I/O
412  *   operations is desired.
413  */
414 void blk_queue_io_min(struct request_queue *q, unsigned int min)
415 {
416 	blk_limits_io_min(&q->limits, min);
417 }
418 EXPORT_SYMBOL(blk_queue_io_min);
419 
420 /**
421  * blk_limits_io_opt - set optimal request size for a device
422  * @limits: the queue limits
423  * @opt:  smallest I/O size in bytes
424  *
425  * Description:
426  *   Storage devices may report an optimal I/O size, which is the
427  *   device's preferred unit for sustained I/O.  This is rarely reported
428  *   for disk drives.  For RAID arrays it is usually the stripe width or
429  *   the internal track size.  A properly aligned multiple of
430  *   optimal_io_size is the preferred request size for workloads where
431  *   sustained throughput is desired.
432  */
433 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
434 {
435 	limits->io_opt = opt;
436 }
437 EXPORT_SYMBOL(blk_limits_io_opt);
438 
439 /**
440  * blk_queue_io_opt - set optimal request size for the queue
441  * @q:	the request queue for the device
442  * @opt:  optimal request size in bytes
443  *
444  * Description:
445  *   Storage devices may report an optimal I/O size, which is the
446  *   device's preferred unit for sustained I/O.  This is rarely reported
447  *   for disk drives.  For RAID arrays it is usually the stripe width or
448  *   the internal track size.  A properly aligned multiple of
449  *   optimal_io_size is the preferred request size for workloads where
450  *   sustained throughput is desired.
451  */
452 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
453 {
454 	blk_limits_io_opt(&q->limits, opt);
455 }
456 EXPORT_SYMBOL(blk_queue_io_opt);
457 
458 /*
459  * Returns the minimum that is _not_ zero, unless both are zero.
460  */
461 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
462 
463 /**
464  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
465  * @t:	the stacking driver (top)
466  * @b:  the underlying device (bottom)
467  **/
468 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
469 {
470 	blk_stack_limits(&t->limits, &b->limits, 0);
471 
472 	if (!t->queue_lock)
473 		WARN_ON_ONCE(1);
474 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
475 		unsigned long flags;
476 		spin_lock_irqsave(t->queue_lock, flags);
477 		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
478 		spin_unlock_irqrestore(t->queue_lock, flags);
479 	}
480 }
481 EXPORT_SYMBOL(blk_queue_stack_limits);
482 
483 /**
484  * blk_stack_limits - adjust queue_limits for stacked devices
485  * @t:	the stacking driver limits (top device)
486  * @b:  the underlying queue limits (bottom, component device)
487  * @start:  first data sector within component device
488  *
489  * Description:
490  *    This function is used by stacking drivers like MD and DM to ensure
491  *    that all component devices have compatible block sizes and
492  *    alignments.  The stacking driver must provide a queue_limits
493  *    struct (top) and then iteratively call the stacking function for
494  *    all component (bottom) devices.  The stacking function will
495  *    attempt to combine the values and ensure proper alignment.
496  *
497  *    Returns 0 if the top and bottom queue_limits are compatible.  The
498  *    top device's block sizes and alignment offsets may be adjusted to
499  *    ensure alignment with the bottom device. If no compatible sizes
500  *    and alignments exist, -1 is returned and the resulting top
501  *    queue_limits will have the misaligned flag set to indicate that
502  *    the alignment_offset is undefined.
503  */
504 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
505 		     sector_t start)
506 {
507 	unsigned int top, bottom, alignment, ret = 0;
508 
509 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
510 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
511 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
512 
513 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
514 					    b->seg_boundary_mask);
515 
516 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
517 
518 	t->max_segment_size = min_not_zero(t->max_segment_size,
519 					   b->max_segment_size);
520 
521 	t->misaligned |= b->misaligned;
522 
523 	alignment = queue_limit_alignment_offset(b, start);
524 
525 	/* Bottom device has different alignment.  Check that it is
526 	 * compatible with the current top alignment.
527 	 */
528 	if (t->alignment_offset != alignment) {
529 
530 		top = max(t->physical_block_size, t->io_min)
531 			+ t->alignment_offset;
532 		bottom = max(b->physical_block_size, b->io_min) + alignment;
533 
534 		/* Verify that top and bottom intervals line up */
535 		if (max(top, bottom) & (min(top, bottom) - 1)) {
536 			t->misaligned = 1;
537 			ret = -1;
538 		}
539 	}
540 
541 	t->logical_block_size = max(t->logical_block_size,
542 				    b->logical_block_size);
543 
544 	t->physical_block_size = max(t->physical_block_size,
545 				     b->physical_block_size);
546 
547 	t->io_min = max(t->io_min, b->io_min);
548 	t->io_opt = lcm(t->io_opt, b->io_opt);
549 
550 	t->no_cluster |= b->no_cluster;
551 	t->discard_zeroes_data &= b->discard_zeroes_data;
552 
553 	/* Physical block size a multiple of the logical block size? */
554 	if (t->physical_block_size & (t->logical_block_size - 1)) {
555 		t->physical_block_size = t->logical_block_size;
556 		t->misaligned = 1;
557 		ret = -1;
558 	}
559 
560 	/* Minimum I/O a multiple of the physical block size? */
561 	if (t->io_min & (t->physical_block_size - 1)) {
562 		t->io_min = t->physical_block_size;
563 		t->misaligned = 1;
564 		ret = -1;
565 	}
566 
567 	/* Optimal I/O a multiple of the physical block size? */
568 	if (t->io_opt & (t->physical_block_size - 1)) {
569 		t->io_opt = 0;
570 		t->misaligned = 1;
571 		ret = -1;
572 	}
573 
574 	/* Find lowest common alignment_offset */
575 	t->alignment_offset = lcm(t->alignment_offset, alignment)
576 		& (max(t->physical_block_size, t->io_min) - 1);
577 
578 	/* Verify that new alignment_offset is on a logical block boundary */
579 	if (t->alignment_offset & (t->logical_block_size - 1)) {
580 		t->misaligned = 1;
581 		ret = -1;
582 	}
583 
584 	/* Discard alignment and granularity */
585 	if (b->discard_granularity) {
586 		alignment = queue_limit_discard_alignment(b, start);
587 
588 		if (t->discard_granularity != 0 &&
589 		    t->discard_alignment != alignment) {
590 			top = t->discard_granularity + t->discard_alignment;
591 			bottom = b->discard_granularity + alignment;
592 
593 			/* Verify that top and bottom intervals line up */
594 			if (max(top, bottom) & (min(top, bottom) - 1))
595 				t->discard_misaligned = 1;
596 		}
597 
598 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
599 						      b->max_discard_sectors);
600 		t->discard_granularity = max(t->discard_granularity,
601 					     b->discard_granularity);
602 		t->discard_alignment = lcm(t->discard_alignment, alignment) &
603 			(t->discard_granularity - 1);
604 	}
605 
606 	return ret;
607 }
608 EXPORT_SYMBOL(blk_stack_limits);
609 
610 /**
611  * bdev_stack_limits - adjust queue limits for stacked drivers
612  * @t:	the stacking driver limits (top device)
613  * @bdev:  the component block_device (bottom)
614  * @start:  first data sector within component device
615  *
616  * Description:
617  *    Merges queue limits for a top device and a block_device.  Returns
618  *    0 if alignment didn't change.  Returns -1 if adding the bottom
619  *    device caused misalignment.
620  */
621 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
622 		      sector_t start)
623 {
624 	struct request_queue *bq = bdev_get_queue(bdev);
625 
626 	start += get_start_sect(bdev);
627 
628 	return blk_stack_limits(t, &bq->limits, start);
629 }
630 EXPORT_SYMBOL(bdev_stack_limits);
631 
632 /**
633  * disk_stack_limits - adjust queue limits for stacked drivers
634  * @disk:  MD/DM gendisk (top)
635  * @bdev:  the underlying block device (bottom)
636  * @offset:  offset to beginning of data within component device
637  *
638  * Description:
639  *    Merges the limits for a top level gendisk and a bottom level
640  *    block_device.
641  */
642 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
643 		       sector_t offset)
644 {
645 	struct request_queue *t = disk->queue;
646 	struct request_queue *b = bdev_get_queue(bdev);
647 
648 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
649 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
650 
651 		disk_name(disk, 0, top);
652 		bdevname(bdev, bottom);
653 
654 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
655 		       top, bottom);
656 	}
657 
658 	if (!t->queue_lock)
659 		WARN_ON_ONCE(1);
660 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
661 		unsigned long flags;
662 
663 		spin_lock_irqsave(t->queue_lock, flags);
664 		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
665 			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
666 		spin_unlock_irqrestore(t->queue_lock, flags);
667 	}
668 }
669 EXPORT_SYMBOL(disk_stack_limits);
670 
671 /**
672  * blk_queue_dma_pad - set pad mask
673  * @q:     the request queue for the device
674  * @mask:  pad mask
675  *
676  * Set dma pad mask.
677  *
678  * Appending pad buffer to a request modifies the last entry of a
679  * scatter list such that it includes the pad buffer.
680  **/
681 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
682 {
683 	q->dma_pad_mask = mask;
684 }
685 EXPORT_SYMBOL(blk_queue_dma_pad);
686 
687 /**
688  * blk_queue_update_dma_pad - update pad mask
689  * @q:     the request queue for the device
690  * @mask:  pad mask
691  *
692  * Update dma pad mask.
693  *
694  * Appending pad buffer to a request modifies the last entry of a
695  * scatter list such that it includes the pad buffer.
696  **/
697 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
698 {
699 	if (mask > q->dma_pad_mask)
700 		q->dma_pad_mask = mask;
701 }
702 EXPORT_SYMBOL(blk_queue_update_dma_pad);
703 
704 /**
705  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
706  * @q:  the request queue for the device
707  * @dma_drain_needed: fn which returns non-zero if drain is necessary
708  * @buf:	physically contiguous buffer
709  * @size:	size of the buffer in bytes
710  *
711  * Some devices have excess DMA problems and can't simply discard (or
712  * zero fill) the unwanted piece of the transfer.  They have to have a
713  * real area of memory to transfer it into.  The use case for this is
714  * ATAPI devices in DMA mode.  If the packet command causes a transfer
715  * bigger than the transfer size some HBAs will lock up if there
716  * aren't DMA elements to contain the excess transfer.  What this API
717  * does is adjust the queue so that the buf is always appended
718  * silently to the scatterlist.
719  *
720  * Note: This routine adjusts max_hw_segments to make room for appending
721  * the drain buffer.  If you call blk_queue_max_segments() after calling
722  * this routine, you must set the limit to one fewer than your device
723  * can support otherwise there won't be room for the drain buffer.
724  */
725 int blk_queue_dma_drain(struct request_queue *q,
726 			       dma_drain_needed_fn *dma_drain_needed,
727 			       void *buf, unsigned int size)
728 {
729 	if (queue_max_segments(q) < 2)
730 		return -EINVAL;
731 	/* make room for appending the drain */
732 	blk_queue_max_segments(q, queue_max_segments(q) - 1);
733 	q->dma_drain_needed = dma_drain_needed;
734 	q->dma_drain_buffer = buf;
735 	q->dma_drain_size = size;
736 
737 	return 0;
738 }
739 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
740 
741 /**
742  * blk_queue_segment_boundary - set boundary rules for segment merging
743  * @q:  the request queue for the device
744  * @mask:  the memory boundary mask
745  **/
746 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
747 {
748 	if (mask < PAGE_CACHE_SIZE - 1) {
749 		mask = PAGE_CACHE_SIZE - 1;
750 		printk(KERN_INFO "%s: set to minimum %lx\n",
751 		       __func__, mask);
752 	}
753 
754 	q->limits.seg_boundary_mask = mask;
755 }
756 EXPORT_SYMBOL(blk_queue_segment_boundary);
757 
758 /**
759  * blk_queue_dma_alignment - set dma length and memory alignment
760  * @q:     the request queue for the device
761  * @mask:  alignment mask
762  *
763  * description:
764  *    set required memory and length alignment for direct dma transactions.
765  *    this is used when building direct io requests for the queue.
766  *
767  **/
768 void blk_queue_dma_alignment(struct request_queue *q, int mask)
769 {
770 	q->dma_alignment = mask;
771 }
772 EXPORT_SYMBOL(blk_queue_dma_alignment);
773 
774 /**
775  * blk_queue_update_dma_alignment - update dma length and memory alignment
776  * @q:     the request queue for the device
777  * @mask:  alignment mask
778  *
779  * description:
780  *    update required memory and length alignment for direct dma transactions.
781  *    If the requested alignment is larger than the current alignment, then
782  *    the current queue alignment is updated to the new value, otherwise it
783  *    is left alone.  The design of this is to allow multiple objects
784  *    (driver, device, transport etc) to set their respective
785  *    alignments without having them interfere.
786  *
787  **/
788 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
789 {
790 	BUG_ON(mask > PAGE_SIZE);
791 
792 	if (mask > q->dma_alignment)
793 		q->dma_alignment = mask;
794 }
795 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
796 
797 static int __init blk_settings_init(void)
798 {
799 	blk_max_low_pfn = max_low_pfn - 1;
800 	blk_max_pfn = max_pfn - 1;
801 	return 0;
802 }
803 subsys_initcall(blk_settings_init);
804