1 /* 2 * Functions related to setting various queue properties from drivers 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 10 11 #include "blk.h" 12 13 unsigned long blk_max_low_pfn; 14 EXPORT_SYMBOL(blk_max_low_pfn); 15 16 unsigned long blk_max_pfn; 17 18 /** 19 * blk_queue_prep_rq - set a prepare_request function for queue 20 * @q: queue 21 * @pfn: prepare_request function 22 * 23 * It's possible for a queue to register a prepare_request callback which 24 * is invoked before the request is handed to the request_fn. The goal of 25 * the function is to prepare a request for I/O, it can be used to build a 26 * cdb from the request data for instance. 27 * 28 */ 29 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) 30 { 31 q->prep_rq_fn = pfn; 32 } 33 EXPORT_SYMBOL(blk_queue_prep_rq); 34 35 /** 36 * blk_queue_set_discard - set a discard_sectors function for queue 37 * @q: queue 38 * @dfn: prepare_discard function 39 * 40 * It's possible for a queue to register a discard callback which is used 41 * to transform a discard request into the appropriate type for the 42 * hardware. If none is registered, then discard requests are failed 43 * with %EOPNOTSUPP. 44 * 45 */ 46 void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) 47 { 48 q->prepare_discard_fn = dfn; 49 } 50 EXPORT_SYMBOL(blk_queue_set_discard); 51 52 /** 53 * blk_queue_merge_bvec - set a merge_bvec function for queue 54 * @q: queue 55 * @mbfn: merge_bvec_fn 56 * 57 * Usually queues have static limitations on the max sectors or segments that 58 * we can put in a request. Stacking drivers may have some settings that 59 * are dynamic, and thus we have to query the queue whether it is ok to 60 * add a new bio_vec to a bio at a given offset or not. If the block device 61 * has such limitations, it needs to register a merge_bvec_fn to control 62 * the size of bio's sent to it. Note that a block device *must* allow a 63 * single page to be added to an empty bio. The block device driver may want 64 * to use the bio_split() function to deal with these bio's. By default 65 * no merge_bvec_fn is defined for a queue, and only the fixed limits are 66 * honored. 67 */ 68 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) 69 { 70 q->merge_bvec_fn = mbfn; 71 } 72 EXPORT_SYMBOL(blk_queue_merge_bvec); 73 74 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 75 { 76 q->softirq_done_fn = fn; 77 } 78 EXPORT_SYMBOL(blk_queue_softirq_done); 79 80 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) 81 { 82 q->rq_timeout = timeout; 83 } 84 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); 85 86 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) 87 { 88 q->rq_timed_out_fn = fn; 89 } 90 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); 91 92 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) 93 { 94 q->lld_busy_fn = fn; 95 } 96 EXPORT_SYMBOL_GPL(blk_queue_lld_busy); 97 98 /** 99 * blk_queue_make_request - define an alternate make_request function for a device 100 * @q: the request queue for the device to be affected 101 * @mfn: the alternate make_request function 102 * 103 * Description: 104 * The normal way for &struct bios to be passed to a device 105 * driver is for them to be collected into requests on a request 106 * queue, and then to allow the device driver to select requests 107 * off that queue when it is ready. This works well for many block 108 * devices. However some block devices (typically virtual devices 109 * such as md or lvm) do not benefit from the processing on the 110 * request queue, and are served best by having the requests passed 111 * directly to them. This can be achieved by providing a function 112 * to blk_queue_make_request(). 113 * 114 * Caveat: 115 * The driver that does this *must* be able to deal appropriately 116 * with buffers in "highmemory". This can be accomplished by either calling 117 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 118 * blk_queue_bounce() to create a buffer in normal memory. 119 **/ 120 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) 121 { 122 /* 123 * set defaults 124 */ 125 q->nr_requests = BLKDEV_MAX_RQ; 126 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 127 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 128 blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); 129 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); 130 131 q->make_request_fn = mfn; 132 q->backing_dev_info.ra_pages = 133 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 134 q->backing_dev_info.state = 0; 135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 137 blk_queue_logical_block_size(q, 512); 138 blk_queue_dma_alignment(q, 511); 139 blk_queue_congestion_threshold(q); 140 q->nr_batching = BLK_BATCH_REQ; 141 142 q->unplug_thresh = 4; /* hmm */ 143 q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ 144 if (q->unplug_delay == 0) 145 q->unplug_delay = 1; 146 147 q->unplug_timer.function = blk_unplug_timeout; 148 q->unplug_timer.data = (unsigned long)q; 149 150 /* 151 * by default assume old behaviour and bounce for any highmem page 152 */ 153 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 154 } 155 EXPORT_SYMBOL(blk_queue_make_request); 156 157 /** 158 * blk_queue_bounce_limit - set bounce buffer limit for queue 159 * @q: the request queue for the device 160 * @dma_mask: the maximum address the device can handle 161 * 162 * Description: 163 * Different hardware can have different requirements as to what pages 164 * it can do I/O directly to. A low level driver can call 165 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 166 * buffers for doing I/O to pages residing above @dma_mask. 167 **/ 168 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) 169 { 170 unsigned long b_pfn = dma_mask >> PAGE_SHIFT; 171 int dma = 0; 172 173 q->bounce_gfp = GFP_NOIO; 174 #if BITS_PER_LONG == 64 175 /* 176 * Assume anything <= 4GB can be handled by IOMMU. Actually 177 * some IOMMUs can handle everything, but I don't know of a 178 * way to test this here. 179 */ 180 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 181 dma = 1; 182 q->limits.bounce_pfn = max_low_pfn; 183 #else 184 if (b_pfn < blk_max_low_pfn) 185 dma = 1; 186 q->limits.bounce_pfn = b_pfn; 187 #endif 188 if (dma) { 189 init_emergency_isa_pool(); 190 q->bounce_gfp = GFP_NOIO | GFP_DMA; 191 q->limits.bounce_pfn = b_pfn; 192 } 193 } 194 EXPORT_SYMBOL(blk_queue_bounce_limit); 195 196 /** 197 * blk_queue_max_sectors - set max sectors for a request for this queue 198 * @q: the request queue for the device 199 * @max_sectors: max sectors in the usual 512b unit 200 * 201 * Description: 202 * Enables a low level driver to set an upper limit on the size of 203 * received requests. 204 **/ 205 void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) 206 { 207 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 208 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 209 printk(KERN_INFO "%s: set to minimum %d\n", 210 __func__, max_sectors); 211 } 212 213 if (BLK_DEF_MAX_SECTORS > max_sectors) 214 q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; 215 else { 216 q->limits.max_sectors = BLK_DEF_MAX_SECTORS; 217 q->limits.max_hw_sectors = max_sectors; 218 } 219 } 220 EXPORT_SYMBOL(blk_queue_max_sectors); 221 222 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) 223 { 224 if (BLK_DEF_MAX_SECTORS > max_sectors) 225 q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS; 226 else 227 q->limits.max_hw_sectors = max_sectors; 228 } 229 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 230 231 /** 232 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 233 * @q: the request queue for the device 234 * @max_segments: max number of segments 235 * 236 * Description: 237 * Enables a low level driver to set an upper limit on the number of 238 * physical data segments in a request. This would be the largest sized 239 * scatter list the driver could handle. 240 **/ 241 void blk_queue_max_phys_segments(struct request_queue *q, 242 unsigned short max_segments) 243 { 244 if (!max_segments) { 245 max_segments = 1; 246 printk(KERN_INFO "%s: set to minimum %d\n", 247 __func__, max_segments); 248 } 249 250 q->limits.max_phys_segments = max_segments; 251 } 252 EXPORT_SYMBOL(blk_queue_max_phys_segments); 253 254 /** 255 * blk_queue_max_hw_segments - set max hw segments for a request for this queue 256 * @q: the request queue for the device 257 * @max_segments: max number of segments 258 * 259 * Description: 260 * Enables a low level driver to set an upper limit on the number of 261 * hw data segments in a request. This would be the largest number of 262 * address/length pairs the host adapter can actually give at once 263 * to the device. 264 **/ 265 void blk_queue_max_hw_segments(struct request_queue *q, 266 unsigned short max_segments) 267 { 268 if (!max_segments) { 269 max_segments = 1; 270 printk(KERN_INFO "%s: set to minimum %d\n", 271 __func__, max_segments); 272 } 273 274 q->limits.max_hw_segments = max_segments; 275 } 276 EXPORT_SYMBOL(blk_queue_max_hw_segments); 277 278 /** 279 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 280 * @q: the request queue for the device 281 * @max_size: max size of segment in bytes 282 * 283 * Description: 284 * Enables a low level driver to set an upper limit on the size of a 285 * coalesced segment 286 **/ 287 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 288 { 289 if (max_size < PAGE_CACHE_SIZE) { 290 max_size = PAGE_CACHE_SIZE; 291 printk(KERN_INFO "%s: set to minimum %d\n", 292 __func__, max_size); 293 } 294 295 q->limits.max_segment_size = max_size; 296 } 297 EXPORT_SYMBOL(blk_queue_max_segment_size); 298 299 /** 300 * blk_queue_logical_block_size - set logical block size for the queue 301 * @q: the request queue for the device 302 * @size: the logical block size, in bytes 303 * 304 * Description: 305 * This should be set to the lowest possible block size that the 306 * storage device can address. The default of 512 covers most 307 * hardware. 308 **/ 309 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) 310 { 311 q->limits.logical_block_size = size; 312 313 if (q->limits.physical_block_size < size) 314 q->limits.physical_block_size = size; 315 316 if (q->limits.io_min < q->limits.physical_block_size) 317 q->limits.io_min = q->limits.physical_block_size; 318 } 319 EXPORT_SYMBOL(blk_queue_logical_block_size); 320 321 /** 322 * blk_queue_physical_block_size - set physical block size for the queue 323 * @q: the request queue for the device 324 * @size: the physical block size, in bytes 325 * 326 * Description: 327 * This should be set to the lowest possible sector size that the 328 * hardware can operate on without reverting to read-modify-write 329 * operations. 330 */ 331 void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) 332 { 333 q->limits.physical_block_size = size; 334 335 if (q->limits.physical_block_size < q->limits.logical_block_size) 336 q->limits.physical_block_size = q->limits.logical_block_size; 337 338 if (q->limits.io_min < q->limits.physical_block_size) 339 q->limits.io_min = q->limits.physical_block_size; 340 } 341 EXPORT_SYMBOL(blk_queue_physical_block_size); 342 343 /** 344 * blk_queue_alignment_offset - set physical block alignment offset 345 * @q: the request queue for the device 346 * @offset: alignment offset in bytes 347 * 348 * Description: 349 * Some devices are naturally misaligned to compensate for things like 350 * the legacy DOS partition table 63-sector offset. Low-level drivers 351 * should call this function for devices whose first sector is not 352 * naturally aligned. 353 */ 354 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 355 { 356 q->limits.alignment_offset = 357 offset & (q->limits.physical_block_size - 1); 358 q->limits.misaligned = 0; 359 } 360 EXPORT_SYMBOL(blk_queue_alignment_offset); 361 362 /** 363 * blk_queue_io_min - set minimum request size for the queue 364 * @q: the request queue for the device 365 * @min: smallest I/O size in bytes 366 * 367 * Description: 368 * Some devices have an internal block size bigger than the reported 369 * hardware sector size. This function can be used to signal the 370 * smallest I/O the device can perform without incurring a performance 371 * penalty. 372 */ 373 void blk_queue_io_min(struct request_queue *q, unsigned int min) 374 { 375 q->limits.io_min = min; 376 377 if (q->limits.io_min < q->limits.logical_block_size) 378 q->limits.io_min = q->limits.logical_block_size; 379 380 if (q->limits.io_min < q->limits.physical_block_size) 381 q->limits.io_min = q->limits.physical_block_size; 382 } 383 EXPORT_SYMBOL(blk_queue_io_min); 384 385 /** 386 * blk_queue_io_opt - set optimal request size for the queue 387 * @q: the request queue for the device 388 * @opt: optimal request size in bytes 389 * 390 * Description: 391 * Drivers can call this function to set the preferred I/O request 392 * size for devices that report such a value. 393 */ 394 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 395 { 396 q->limits.io_opt = opt; 397 } 398 EXPORT_SYMBOL(blk_queue_io_opt); 399 400 /* 401 * Returns the minimum that is _not_ zero, unless both are zero. 402 */ 403 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 404 405 /** 406 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers 407 * @t: the stacking driver (top) 408 * @b: the underlying device (bottom) 409 **/ 410 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 411 { 412 /* zero is "infinity" */ 413 t->limits.max_sectors = min_not_zero(queue_max_sectors(t), 414 queue_max_sectors(b)); 415 416 t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t), 417 queue_max_hw_sectors(b)); 418 419 t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t), 420 queue_segment_boundary(b)); 421 422 t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t), 423 queue_max_phys_segments(b)); 424 425 t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t), 426 queue_max_hw_segments(b)); 427 428 t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t), 429 queue_max_segment_size(b)); 430 431 t->limits.logical_block_size = max(queue_logical_block_size(t), 432 queue_logical_block_size(b)); 433 434 if (!t->queue_lock) 435 WARN_ON_ONCE(1); 436 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 437 unsigned long flags; 438 spin_lock_irqsave(t->queue_lock, flags); 439 queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 440 spin_unlock_irqrestore(t->queue_lock, flags); 441 } 442 } 443 EXPORT_SYMBOL(blk_queue_stack_limits); 444 445 /** 446 * blk_stack_limits - adjust queue_limits for stacked devices 447 * @t: the stacking driver limits (top) 448 * @b: the underlying queue limits (bottom) 449 * @offset: offset to beginning of data within component device 450 * 451 * Description: 452 * Merges two queue_limit structs. Returns 0 if alignment didn't 453 * change. Returns -1 if adding the bottom device caused 454 * misalignment. 455 */ 456 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 457 sector_t offset) 458 { 459 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 460 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 461 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 462 463 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 464 b->seg_boundary_mask); 465 466 t->max_phys_segments = min_not_zero(t->max_phys_segments, 467 b->max_phys_segments); 468 469 t->max_hw_segments = min_not_zero(t->max_hw_segments, 470 b->max_hw_segments); 471 472 t->max_segment_size = min_not_zero(t->max_segment_size, 473 b->max_segment_size); 474 475 t->logical_block_size = max(t->logical_block_size, 476 b->logical_block_size); 477 478 t->physical_block_size = max(t->physical_block_size, 479 b->physical_block_size); 480 481 t->io_min = max(t->io_min, b->io_min); 482 t->no_cluster |= b->no_cluster; 483 484 /* Bottom device offset aligned? */ 485 if (offset && 486 (offset & (b->physical_block_size - 1)) != b->alignment_offset) { 487 t->misaligned = 1; 488 return -1; 489 } 490 491 /* If top has no alignment offset, inherit from bottom */ 492 if (!t->alignment_offset) 493 t->alignment_offset = 494 b->alignment_offset & (b->physical_block_size - 1); 495 496 /* Top device aligned on logical block boundary? */ 497 if (t->alignment_offset & (t->logical_block_size - 1)) { 498 t->misaligned = 1; 499 return -1; 500 } 501 502 return 0; 503 } 504 EXPORT_SYMBOL(blk_stack_limits); 505 506 /** 507 * disk_stack_limits - adjust queue limits for stacked drivers 508 * @disk: MD/DM gendisk (top) 509 * @bdev: the underlying block device (bottom) 510 * @offset: offset to beginning of data within component device 511 * 512 * Description: 513 * Merges the limits for two queues. Returns 0 if alignment 514 * didn't change. Returns -1 if adding the bottom device caused 515 * misalignment. 516 */ 517 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 518 sector_t offset) 519 { 520 struct request_queue *t = disk->queue; 521 struct request_queue *b = bdev_get_queue(bdev); 522 523 offset += get_start_sect(bdev) << 9; 524 525 if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { 526 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 527 528 disk_name(disk, 0, top); 529 bdevname(bdev, bottom); 530 531 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 532 top, bottom); 533 } 534 535 if (!t->queue_lock) 536 WARN_ON_ONCE(1); 537 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 538 unsigned long flags; 539 540 spin_lock_irqsave(t->queue_lock, flags); 541 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 542 queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 543 spin_unlock_irqrestore(t->queue_lock, flags); 544 } 545 } 546 EXPORT_SYMBOL(disk_stack_limits); 547 548 /** 549 * blk_queue_dma_pad - set pad mask 550 * @q: the request queue for the device 551 * @mask: pad mask 552 * 553 * Set dma pad mask. 554 * 555 * Appending pad buffer to a request modifies the last entry of a 556 * scatter list such that it includes the pad buffer. 557 **/ 558 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) 559 { 560 q->dma_pad_mask = mask; 561 } 562 EXPORT_SYMBOL(blk_queue_dma_pad); 563 564 /** 565 * blk_queue_update_dma_pad - update pad mask 566 * @q: the request queue for the device 567 * @mask: pad mask 568 * 569 * Update dma pad mask. 570 * 571 * Appending pad buffer to a request modifies the last entry of a 572 * scatter list such that it includes the pad buffer. 573 **/ 574 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) 575 { 576 if (mask > q->dma_pad_mask) 577 q->dma_pad_mask = mask; 578 } 579 EXPORT_SYMBOL(blk_queue_update_dma_pad); 580 581 /** 582 * blk_queue_dma_drain - Set up a drain buffer for excess dma. 583 * @q: the request queue for the device 584 * @dma_drain_needed: fn which returns non-zero if drain is necessary 585 * @buf: physically contiguous buffer 586 * @size: size of the buffer in bytes 587 * 588 * Some devices have excess DMA problems and can't simply discard (or 589 * zero fill) the unwanted piece of the transfer. They have to have a 590 * real area of memory to transfer it into. The use case for this is 591 * ATAPI devices in DMA mode. If the packet command causes a transfer 592 * bigger than the transfer size some HBAs will lock up if there 593 * aren't DMA elements to contain the excess transfer. What this API 594 * does is adjust the queue so that the buf is always appended 595 * silently to the scatterlist. 596 * 597 * Note: This routine adjusts max_hw_segments to make room for 598 * appending the drain buffer. If you call 599 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after 600 * calling this routine, you must set the limit to one fewer than your 601 * device can support otherwise there won't be room for the drain 602 * buffer. 603 */ 604 int blk_queue_dma_drain(struct request_queue *q, 605 dma_drain_needed_fn *dma_drain_needed, 606 void *buf, unsigned int size) 607 { 608 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) 609 return -EINVAL; 610 /* make room for appending the drain */ 611 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); 612 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); 613 q->dma_drain_needed = dma_drain_needed; 614 q->dma_drain_buffer = buf; 615 q->dma_drain_size = size; 616 617 return 0; 618 } 619 EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 620 621 /** 622 * blk_queue_segment_boundary - set boundary rules for segment merging 623 * @q: the request queue for the device 624 * @mask: the memory boundary mask 625 **/ 626 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 627 { 628 if (mask < PAGE_CACHE_SIZE - 1) { 629 mask = PAGE_CACHE_SIZE - 1; 630 printk(KERN_INFO "%s: set to minimum %lx\n", 631 __func__, mask); 632 } 633 634 q->limits.seg_boundary_mask = mask; 635 } 636 EXPORT_SYMBOL(blk_queue_segment_boundary); 637 638 /** 639 * blk_queue_dma_alignment - set dma length and memory alignment 640 * @q: the request queue for the device 641 * @mask: alignment mask 642 * 643 * description: 644 * set required memory and length alignment for direct dma transactions. 645 * this is used when building direct io requests for the queue. 646 * 647 **/ 648 void blk_queue_dma_alignment(struct request_queue *q, int mask) 649 { 650 q->dma_alignment = mask; 651 } 652 EXPORT_SYMBOL(blk_queue_dma_alignment); 653 654 /** 655 * blk_queue_update_dma_alignment - update dma length and memory alignment 656 * @q: the request queue for the device 657 * @mask: alignment mask 658 * 659 * description: 660 * update required memory and length alignment for direct dma transactions. 661 * If the requested alignment is larger than the current alignment, then 662 * the current queue alignment is updated to the new value, otherwise it 663 * is left alone. The design of this is to allow multiple objects 664 * (driver, device, transport etc) to set their respective 665 * alignments without having them interfere. 666 * 667 **/ 668 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) 669 { 670 BUG_ON(mask > PAGE_SIZE); 671 672 if (mask > q->dma_alignment) 673 q->dma_alignment = mask; 674 } 675 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 676 677 static int __init blk_settings_init(void) 678 { 679 blk_max_low_pfn = max_low_pfn - 1; 680 blk_max_pfn = max_pfn - 1; 681 return 0; 682 } 683 subsys_initcall(blk_settings_init); 684