xref: /linux/block/blk-settings.c (revision 5bdef865eb358b6f3760e25e591ae115e9eeddef)
1 /*
2  * Functions related to setting various queue properties from drivers
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10 
11 #include "blk.h"
12 
13 unsigned long blk_max_low_pfn;
14 EXPORT_SYMBOL(blk_max_low_pfn);
15 
16 unsigned long blk_max_pfn;
17 
18 /**
19  * blk_queue_prep_rq - set a prepare_request function for queue
20  * @q:		queue
21  * @pfn:	prepare_request function
22  *
23  * It's possible for a queue to register a prepare_request callback which
24  * is invoked before the request is handed to the request_fn. The goal of
25  * the function is to prepare a request for I/O, it can be used to build a
26  * cdb from the request data for instance.
27  *
28  */
29 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
30 {
31 	q->prep_rq_fn = pfn;
32 }
33 EXPORT_SYMBOL(blk_queue_prep_rq);
34 
35 /**
36  * blk_queue_set_discard - set a discard_sectors function for queue
37  * @q:		queue
38  * @dfn:	prepare_discard function
39  *
40  * It's possible for a queue to register a discard callback which is used
41  * to transform a discard request into the appropriate type for the
42  * hardware. If none is registered, then discard requests are failed
43  * with %EOPNOTSUPP.
44  *
45  */
46 void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
47 {
48 	q->prepare_discard_fn = dfn;
49 }
50 EXPORT_SYMBOL(blk_queue_set_discard);
51 
52 /**
53  * blk_queue_merge_bvec - set a merge_bvec function for queue
54  * @q:		queue
55  * @mbfn:	merge_bvec_fn
56  *
57  * Usually queues have static limitations on the max sectors or segments that
58  * we can put in a request. Stacking drivers may have some settings that
59  * are dynamic, and thus we have to query the queue whether it is ok to
60  * add a new bio_vec to a bio at a given offset or not. If the block device
61  * has such limitations, it needs to register a merge_bvec_fn to control
62  * the size of bio's sent to it. Note that a block device *must* allow a
63  * single page to be added to an empty bio. The block device driver may want
64  * to use the bio_split() function to deal with these bio's. By default
65  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
66  * honored.
67  */
68 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
69 {
70 	q->merge_bvec_fn = mbfn;
71 }
72 EXPORT_SYMBOL(blk_queue_merge_bvec);
73 
74 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
75 {
76 	q->softirq_done_fn = fn;
77 }
78 EXPORT_SYMBOL(blk_queue_softirq_done);
79 
80 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
81 {
82 	q->rq_timeout = timeout;
83 }
84 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
85 
86 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
87 {
88 	q->rq_timed_out_fn = fn;
89 }
90 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
91 
92 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
93 {
94 	q->lld_busy_fn = fn;
95 }
96 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
97 
98 /**
99  * blk_set_default_limits - reset limits to default values
100  * @lim:  the queue_limits structure to reset
101  *
102  * Description:
103  *   Returns a queue_limit struct to its default state.  Can be used by
104  *   stacking drivers like DM that stage table swaps and reuse an
105  *   existing device queue.
106  */
107 void blk_set_default_limits(struct queue_limits *lim)
108 {
109 	lim->max_phys_segments = MAX_PHYS_SEGMENTS;
110 	lim->max_hw_segments = MAX_HW_SEGMENTS;
111 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
112 	lim->max_segment_size = MAX_SEGMENT_SIZE;
113 	lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
114 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
115 	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
116 	lim->alignment_offset = 0;
117 	lim->io_opt = 0;
118 	lim->misaligned = 0;
119 	lim->no_cluster = 0;
120 }
121 EXPORT_SYMBOL(blk_set_default_limits);
122 
123 /**
124  * blk_queue_make_request - define an alternate make_request function for a device
125  * @q:  the request queue for the device to be affected
126  * @mfn: the alternate make_request function
127  *
128  * Description:
129  *    The normal way for &struct bios to be passed to a device
130  *    driver is for them to be collected into requests on a request
131  *    queue, and then to allow the device driver to select requests
132  *    off that queue when it is ready.  This works well for many block
133  *    devices. However some block devices (typically virtual devices
134  *    such as md or lvm) do not benefit from the processing on the
135  *    request queue, and are served best by having the requests passed
136  *    directly to them.  This can be achieved by providing a function
137  *    to blk_queue_make_request().
138  *
139  * Caveat:
140  *    The driver that does this *must* be able to deal appropriately
141  *    with buffers in "highmemory". This can be accomplished by either calling
142  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
143  *    blk_queue_bounce() to create a buffer in normal memory.
144  **/
145 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
146 {
147 	/*
148 	 * set defaults
149 	 */
150 	q->nr_requests = BLKDEV_MAX_RQ;
151 
152 	q->make_request_fn = mfn;
153 	blk_queue_dma_alignment(q, 511);
154 	blk_queue_congestion_threshold(q);
155 	q->nr_batching = BLK_BATCH_REQ;
156 
157 	q->unplug_thresh = 4;		/* hmm */
158 	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
159 	if (q->unplug_delay == 0)
160 		q->unplug_delay = 1;
161 
162 	q->unplug_timer.function = blk_unplug_timeout;
163 	q->unplug_timer.data = (unsigned long)q;
164 
165 	blk_set_default_limits(&q->limits);
166 
167 	/*
168 	 * by default assume old behaviour and bounce for any highmem page
169 	 */
170 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
171 }
172 EXPORT_SYMBOL(blk_queue_make_request);
173 
174 /**
175  * blk_queue_bounce_limit - set bounce buffer limit for queue
176  * @q: the request queue for the device
177  * @dma_mask: the maximum address the device can handle
178  *
179  * Description:
180  *    Different hardware can have different requirements as to what pages
181  *    it can do I/O directly to. A low level driver can call
182  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
183  *    buffers for doing I/O to pages residing above @dma_mask.
184  **/
185 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
186 {
187 	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
188 	int dma = 0;
189 
190 	q->bounce_gfp = GFP_NOIO;
191 #if BITS_PER_LONG == 64
192 	/*
193 	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
194 	 * some IOMMUs can handle everything, but I don't know of a
195 	 * way to test this here.
196 	 */
197 	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
198 		dma = 1;
199 	q->limits.bounce_pfn = max_low_pfn;
200 #else
201 	if (b_pfn < blk_max_low_pfn)
202 		dma = 1;
203 	q->limits.bounce_pfn = b_pfn;
204 #endif
205 	if (dma) {
206 		init_emergency_isa_pool();
207 		q->bounce_gfp = GFP_NOIO | GFP_DMA;
208 		q->limits.bounce_pfn = b_pfn;
209 	}
210 }
211 EXPORT_SYMBOL(blk_queue_bounce_limit);
212 
213 /**
214  * blk_queue_max_sectors - set max sectors for a request for this queue
215  * @q:  the request queue for the device
216  * @max_sectors:  max sectors in the usual 512b unit
217  *
218  * Description:
219  *    Enables a low level driver to set an upper limit on the size of
220  *    received requests.
221  **/
222 void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
223 {
224 	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
225 		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
226 		printk(KERN_INFO "%s: set to minimum %d\n",
227 		       __func__, max_sectors);
228 	}
229 
230 	if (BLK_DEF_MAX_SECTORS > max_sectors)
231 		q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
232 	else {
233 		q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
234 		q->limits.max_hw_sectors = max_sectors;
235 	}
236 }
237 EXPORT_SYMBOL(blk_queue_max_sectors);
238 
239 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
240 {
241 	if (BLK_DEF_MAX_SECTORS > max_sectors)
242 		q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
243 	else
244 		q->limits.max_hw_sectors = max_sectors;
245 }
246 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
247 
248 /**
249  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
250  * @q:  the request queue for the device
251  * @max_segments:  max number of segments
252  *
253  * Description:
254  *    Enables a low level driver to set an upper limit on the number of
255  *    physical data segments in a request.  This would be the largest sized
256  *    scatter list the driver could handle.
257  **/
258 void blk_queue_max_phys_segments(struct request_queue *q,
259 				 unsigned short max_segments)
260 {
261 	if (!max_segments) {
262 		max_segments = 1;
263 		printk(KERN_INFO "%s: set to minimum %d\n",
264 		       __func__, max_segments);
265 	}
266 
267 	q->limits.max_phys_segments = max_segments;
268 }
269 EXPORT_SYMBOL(blk_queue_max_phys_segments);
270 
271 /**
272  * blk_queue_max_hw_segments - set max hw segments for a request for this queue
273  * @q:  the request queue for the device
274  * @max_segments:  max number of segments
275  *
276  * Description:
277  *    Enables a low level driver to set an upper limit on the number of
278  *    hw data segments in a request.  This would be the largest number of
279  *    address/length pairs the host adapter can actually give at once
280  *    to the device.
281  **/
282 void blk_queue_max_hw_segments(struct request_queue *q,
283 			       unsigned short max_segments)
284 {
285 	if (!max_segments) {
286 		max_segments = 1;
287 		printk(KERN_INFO "%s: set to minimum %d\n",
288 		       __func__, max_segments);
289 	}
290 
291 	q->limits.max_hw_segments = max_segments;
292 }
293 EXPORT_SYMBOL(blk_queue_max_hw_segments);
294 
295 /**
296  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
297  * @q:  the request queue for the device
298  * @max_size:  max size of segment in bytes
299  *
300  * Description:
301  *    Enables a low level driver to set an upper limit on the size of a
302  *    coalesced segment
303  **/
304 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
305 {
306 	if (max_size < PAGE_CACHE_SIZE) {
307 		max_size = PAGE_CACHE_SIZE;
308 		printk(KERN_INFO "%s: set to minimum %d\n",
309 		       __func__, max_size);
310 	}
311 
312 	q->limits.max_segment_size = max_size;
313 }
314 EXPORT_SYMBOL(blk_queue_max_segment_size);
315 
316 /**
317  * blk_queue_logical_block_size - set logical block size for the queue
318  * @q:  the request queue for the device
319  * @size:  the logical block size, in bytes
320  *
321  * Description:
322  *   This should be set to the lowest possible block size that the
323  *   storage device can address.  The default of 512 covers most
324  *   hardware.
325  **/
326 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
327 {
328 	q->limits.logical_block_size = size;
329 
330 	if (q->limits.physical_block_size < size)
331 		q->limits.physical_block_size = size;
332 
333 	if (q->limits.io_min < q->limits.physical_block_size)
334 		q->limits.io_min = q->limits.physical_block_size;
335 }
336 EXPORT_SYMBOL(blk_queue_logical_block_size);
337 
338 /**
339  * blk_queue_physical_block_size - set physical block size for the queue
340  * @q:  the request queue for the device
341  * @size:  the physical block size, in bytes
342  *
343  * Description:
344  *   This should be set to the lowest possible sector size that the
345  *   hardware can operate on without reverting to read-modify-write
346  *   operations.
347  */
348 void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
349 {
350 	q->limits.physical_block_size = size;
351 
352 	if (q->limits.physical_block_size < q->limits.logical_block_size)
353 		q->limits.physical_block_size = q->limits.logical_block_size;
354 
355 	if (q->limits.io_min < q->limits.physical_block_size)
356 		q->limits.io_min = q->limits.physical_block_size;
357 }
358 EXPORT_SYMBOL(blk_queue_physical_block_size);
359 
360 /**
361  * blk_queue_alignment_offset - set physical block alignment offset
362  * @q:	the request queue for the device
363  * @offset: alignment offset in bytes
364  *
365  * Description:
366  *   Some devices are naturally misaligned to compensate for things like
367  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
368  *   should call this function for devices whose first sector is not
369  *   naturally aligned.
370  */
371 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
372 {
373 	q->limits.alignment_offset =
374 		offset & (q->limits.physical_block_size - 1);
375 	q->limits.misaligned = 0;
376 }
377 EXPORT_SYMBOL(blk_queue_alignment_offset);
378 
379 /**
380  * blk_queue_io_min - set minimum request size for the queue
381  * @q:	the request queue for the device
382  * @min:  smallest I/O size in bytes
383  *
384  * Description:
385  *   Some devices have an internal block size bigger than the reported
386  *   hardware sector size.  This function can be used to signal the
387  *   smallest I/O the device can perform without incurring a performance
388  *   penalty.
389  */
390 void blk_queue_io_min(struct request_queue *q, unsigned int min)
391 {
392 	q->limits.io_min = min;
393 
394 	if (q->limits.io_min < q->limits.logical_block_size)
395 		q->limits.io_min = q->limits.logical_block_size;
396 
397 	if (q->limits.io_min < q->limits.physical_block_size)
398 		q->limits.io_min = q->limits.physical_block_size;
399 }
400 EXPORT_SYMBOL(blk_queue_io_min);
401 
402 /**
403  * blk_queue_io_opt - set optimal request size for the queue
404  * @q:	the request queue for the device
405  * @opt:  optimal request size in bytes
406  *
407  * Description:
408  *   Drivers can call this function to set the preferred I/O request
409  *   size for devices that report such a value.
410  */
411 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
412 {
413 	q->limits.io_opt = opt;
414 }
415 EXPORT_SYMBOL(blk_queue_io_opt);
416 
417 /*
418  * Returns the minimum that is _not_ zero, unless both are zero.
419  */
420 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
421 
422 /**
423  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
424  * @t:	the stacking driver (top)
425  * @b:  the underlying device (bottom)
426  **/
427 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
428 {
429 	/* zero is "infinity" */
430 	t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
431 					     queue_max_sectors(b));
432 
433 	t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
434 						queue_max_hw_sectors(b));
435 
436 	t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
437 						   queue_segment_boundary(b));
438 
439 	t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
440 						   queue_max_phys_segments(b));
441 
442 	t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
443 						 queue_max_hw_segments(b));
444 
445 	t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
446 						  queue_max_segment_size(b));
447 
448 	t->limits.logical_block_size = max(queue_logical_block_size(t),
449 					   queue_logical_block_size(b));
450 
451 	if (!t->queue_lock)
452 		WARN_ON_ONCE(1);
453 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
454 		unsigned long flags;
455 		spin_lock_irqsave(t->queue_lock, flags);
456 		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
457 		spin_unlock_irqrestore(t->queue_lock, flags);
458 	}
459 }
460 EXPORT_SYMBOL(blk_queue_stack_limits);
461 
462 /**
463  * blk_stack_limits - adjust queue_limits for stacked devices
464  * @t:	the stacking driver limits (top)
465  * @b:  the underlying queue limits (bottom)
466  * @offset:  offset to beginning of data within component device
467  *
468  * Description:
469  *    Merges two queue_limit structs.  Returns 0 if alignment didn't
470  *    change.  Returns -1 if adding the bottom device caused
471  *    misalignment.
472  */
473 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
474 		     sector_t offset)
475 {
476 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
477 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
478 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
479 
480 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
481 					    b->seg_boundary_mask);
482 
483 	t->max_phys_segments = min_not_zero(t->max_phys_segments,
484 					    b->max_phys_segments);
485 
486 	t->max_hw_segments = min_not_zero(t->max_hw_segments,
487 					  b->max_hw_segments);
488 
489 	t->max_segment_size = min_not_zero(t->max_segment_size,
490 					   b->max_segment_size);
491 
492 	t->logical_block_size = max(t->logical_block_size,
493 				    b->logical_block_size);
494 
495 	t->physical_block_size = max(t->physical_block_size,
496 				     b->physical_block_size);
497 
498 	t->io_min = max(t->io_min, b->io_min);
499 	t->no_cluster |= b->no_cluster;
500 
501 	/* Bottom device offset aligned? */
502 	if (offset &&
503 	    (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
504 		t->misaligned = 1;
505 		return -1;
506 	}
507 
508 	/* If top has no alignment offset, inherit from bottom */
509 	if (!t->alignment_offset)
510 		t->alignment_offset =
511 			b->alignment_offset & (b->physical_block_size - 1);
512 
513 	/* Top device aligned on logical block boundary? */
514 	if (t->alignment_offset & (t->logical_block_size - 1)) {
515 		t->misaligned = 1;
516 		return -1;
517 	}
518 
519 	return 0;
520 }
521 EXPORT_SYMBOL(blk_stack_limits);
522 
523 /**
524  * disk_stack_limits - adjust queue limits for stacked drivers
525  * @disk:  MD/DM gendisk (top)
526  * @bdev:  the underlying block device (bottom)
527  * @offset:  offset to beginning of data within component device
528  *
529  * Description:
530  *    Merges the limits for two queues.  Returns 0 if alignment
531  *    didn't change.  Returns -1 if adding the bottom device caused
532  *    misalignment.
533  */
534 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
535 		       sector_t offset)
536 {
537 	struct request_queue *t = disk->queue;
538 	struct request_queue *b = bdev_get_queue(bdev);
539 
540 	offset += get_start_sect(bdev) << 9;
541 
542 	if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
543 		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
544 
545 		disk_name(disk, 0, top);
546 		bdevname(bdev, bottom);
547 
548 		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
549 		       top, bottom);
550 	}
551 
552 	if (!t->queue_lock)
553 		WARN_ON_ONCE(1);
554 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
555 		unsigned long flags;
556 
557 		spin_lock_irqsave(t->queue_lock, flags);
558 		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
559 			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
560 		spin_unlock_irqrestore(t->queue_lock, flags);
561 	}
562 }
563 EXPORT_SYMBOL(disk_stack_limits);
564 
565 /**
566  * blk_queue_dma_pad - set pad mask
567  * @q:     the request queue for the device
568  * @mask:  pad mask
569  *
570  * Set dma pad mask.
571  *
572  * Appending pad buffer to a request modifies the last entry of a
573  * scatter list such that it includes the pad buffer.
574  **/
575 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
576 {
577 	q->dma_pad_mask = mask;
578 }
579 EXPORT_SYMBOL(blk_queue_dma_pad);
580 
581 /**
582  * blk_queue_update_dma_pad - update pad mask
583  * @q:     the request queue for the device
584  * @mask:  pad mask
585  *
586  * Update dma pad mask.
587  *
588  * Appending pad buffer to a request modifies the last entry of a
589  * scatter list such that it includes the pad buffer.
590  **/
591 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
592 {
593 	if (mask > q->dma_pad_mask)
594 		q->dma_pad_mask = mask;
595 }
596 EXPORT_SYMBOL(blk_queue_update_dma_pad);
597 
598 /**
599  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
600  * @q:  the request queue for the device
601  * @dma_drain_needed: fn which returns non-zero if drain is necessary
602  * @buf:	physically contiguous buffer
603  * @size:	size of the buffer in bytes
604  *
605  * Some devices have excess DMA problems and can't simply discard (or
606  * zero fill) the unwanted piece of the transfer.  They have to have a
607  * real area of memory to transfer it into.  The use case for this is
608  * ATAPI devices in DMA mode.  If the packet command causes a transfer
609  * bigger than the transfer size some HBAs will lock up if there
610  * aren't DMA elements to contain the excess transfer.  What this API
611  * does is adjust the queue so that the buf is always appended
612  * silently to the scatterlist.
613  *
614  * Note: This routine adjusts max_hw_segments to make room for
615  * appending the drain buffer.  If you call
616  * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
617  * calling this routine, you must set the limit to one fewer than your
618  * device can support otherwise there won't be room for the drain
619  * buffer.
620  */
621 int blk_queue_dma_drain(struct request_queue *q,
622 			       dma_drain_needed_fn *dma_drain_needed,
623 			       void *buf, unsigned int size)
624 {
625 	if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
626 		return -EINVAL;
627 	/* make room for appending the drain */
628 	blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
629 	blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
630 	q->dma_drain_needed = dma_drain_needed;
631 	q->dma_drain_buffer = buf;
632 	q->dma_drain_size = size;
633 
634 	return 0;
635 }
636 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
637 
638 /**
639  * blk_queue_segment_boundary - set boundary rules for segment merging
640  * @q:  the request queue for the device
641  * @mask:  the memory boundary mask
642  **/
643 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
644 {
645 	if (mask < PAGE_CACHE_SIZE - 1) {
646 		mask = PAGE_CACHE_SIZE - 1;
647 		printk(KERN_INFO "%s: set to minimum %lx\n",
648 		       __func__, mask);
649 	}
650 
651 	q->limits.seg_boundary_mask = mask;
652 }
653 EXPORT_SYMBOL(blk_queue_segment_boundary);
654 
655 /**
656  * blk_queue_dma_alignment - set dma length and memory alignment
657  * @q:     the request queue for the device
658  * @mask:  alignment mask
659  *
660  * description:
661  *    set required memory and length alignment for direct dma transactions.
662  *    this is used when building direct io requests for the queue.
663  *
664  **/
665 void blk_queue_dma_alignment(struct request_queue *q, int mask)
666 {
667 	q->dma_alignment = mask;
668 }
669 EXPORT_SYMBOL(blk_queue_dma_alignment);
670 
671 /**
672  * blk_queue_update_dma_alignment - update dma length and memory alignment
673  * @q:     the request queue for the device
674  * @mask:  alignment mask
675  *
676  * description:
677  *    update required memory and length alignment for direct dma transactions.
678  *    If the requested alignment is larger than the current alignment, then
679  *    the current queue alignment is updated to the new value, otherwise it
680  *    is left alone.  The design of this is to allow multiple objects
681  *    (driver, device, transport etc) to set their respective
682  *    alignments without having them interfere.
683  *
684  **/
685 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
686 {
687 	BUG_ON(mask > PAGE_SIZE);
688 
689 	if (mask > q->dma_alignment)
690 		q->dma_alignment = mask;
691 }
692 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
693 
694 static int __init blk_settings_init(void)
695 {
696 	blk_max_low_pfn = max_low_pfn - 1;
697 	blk_max_pfn = max_pfn - 1;
698 	return 0;
699 }
700 subsys_initcall(blk_settings_init);
701