dma-mapping.c (1928e87bcf185f56008d0746f887b691c1cb8c4a) | dma-mapping.c (24056f525051a9e186af28904b396320e18bf9a0) |
---|---|
1/* 2 * linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12#include <linux/module.h> 13#include <linux/mm.h> 14#include <linux/gfp.h> 15#include <linux/errno.h> 16#include <linux/list.h> 17#include <linux/init.h> 18#include <linux/device.h> 19#include <linux/dma-mapping.h> | 1/* 2 * linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12#include <linux/module.h> 13#include <linux/mm.h> 14#include <linux/gfp.h> 15#include <linux/errno.h> 16#include <linux/list.h> 17#include <linux/init.h> 18#include <linux/device.h> 19#include <linux/dma-mapping.h> |
20#include <linux/highmem.h> | |
21 22#include <asm/memory.h> 23#include <asm/highmem.h> 24#include <asm/cacheflush.h> 25#include <asm/tlbflush.h> 26#include <asm/sizes.h> 27 28static u64 get_coherent_dma_mask(struct device *dev) --- 165 unchanged lines hidden (view full) --- 194 } 195 196 /* 197 * Align the virtual region allocation - maximum alignment is 198 * a section size, minimum is a page size. This helps reduce 199 * fragmentation of the DMA space, and also prevents allocations 200 * smaller than a section from crossing a section boundary. 201 */ | 20 21#include <asm/memory.h> 22#include <asm/highmem.h> 23#include <asm/cacheflush.h> 24#include <asm/tlbflush.h> 25#include <asm/sizes.h> 26 27static u64 get_coherent_dma_mask(struct device *dev) --- 165 unchanged lines hidden (view full) --- 193 } 194 195 /* 196 * Align the virtual region allocation - maximum alignment is 197 * a section size, minimum is a page size. This helps reduce 198 * fragmentation of the DMA space, and also prevents allocations 199 * smaller than a section from crossing a section boundary. 200 */ |
202 bit = fls(size - 1); | 201 bit = fls(size - 1) + 1; |
203 if (bit > SECTION_SHIFT) 204 bit = SECTION_SHIFT; 205 align = 1 << bit; 206 207 /* 208 * Allocate a virtual address in the consistent mapping region. 209 */ 210 c = arm_vmregion_alloc(&consistent_head, align, size, --- 96 unchanged lines hidden (view full) --- 307 return NULL; 308 309 if (!arch_is_coherent()) 310 addr = __dma_alloc_remap(page, size, gfp, prot); 311 else 312 addr = page_address(page); 313 314 if (addr) | 202 if (bit > SECTION_SHIFT) 203 bit = SECTION_SHIFT; 204 align = 1 << bit; 205 206 /* 207 * Allocate a virtual address in the consistent mapping region. 208 */ 209 c = arm_vmregion_alloc(&consistent_head, align, size, --- 96 unchanged lines hidden (view full) --- 306 return NULL; 307 308 if (!arch_is_coherent()) 309 addr = __dma_alloc_remap(page, size, gfp, prot); 310 else 311 addr = page_address(page); 312 313 if (addr) |
315 *handle = page_to_dma(dev, page); | 314 *handle = pfn_to_dma(dev, page_to_pfn(page)); |
316 317 return addr; 318} 319 320/* 321 * Allocate DMA-coherent memory space and return both the kernel remapped 322 * virtual and bus address for that space. 323 */ --- 78 unchanged lines hidden (view full) --- 402 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 403 return; 404 405 size = PAGE_ALIGN(size); 406 407 if (!arch_is_coherent()) 408 __dma_free_remap(cpu_addr, size); 409 | 315 316 return addr; 317} 318 319/* 320 * Allocate DMA-coherent memory space and return both the kernel remapped 321 * virtual and bus address for that space. 322 */ --- 78 unchanged lines hidden (view full) --- 401 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 402 return; 403 404 size = PAGE_ALIGN(size); 405 406 if (!arch_is_coherent()) 407 __dma_free_remap(cpu_addr, size); 408 |
410 __dma_free_buffer(dma_to_page(dev, handle), size); | 409 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size); |
411} 412EXPORT_SYMBOL(dma_free_coherent); 413 414/* 415 * Make an area consistent for devices. 416 * Note: Drivers should NOT use this function directly, as it will break 417 * platforms with CONFIG_DMABOUNCE. 418 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) --- 57 unchanged lines hidden (view full) --- 476 len = PAGE_SIZE - offset; 477 } 478 vaddr = kmap_high_get(page); 479 if (vaddr) { 480 vaddr += offset; 481 op(vaddr, len, dir); 482 kunmap_high(page); 483 } else if (cache_is_vipt()) { | 410} 411EXPORT_SYMBOL(dma_free_coherent); 412 413/* 414 * Make an area consistent for devices. 415 * Note: Drivers should NOT use this function directly, as it will break 416 * platforms with CONFIG_DMABOUNCE. 417 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) --- 57 unchanged lines hidden (view full) --- 475 len = PAGE_SIZE - offset; 476 } 477 vaddr = kmap_high_get(page); 478 if (vaddr) { 479 vaddr += offset; 480 op(vaddr, len, dir); 481 kunmap_high(page); 482 } else if (cache_is_vipt()) { |
484 /* unmapped pages might still be cached */ 485 vaddr = kmap_atomic(page); | 483 pte_t saved_pte; 484 vaddr = kmap_high_l1_vipt(page, &saved_pte); |
486 op(vaddr + offset, len, dir); | 485 op(vaddr + offset, len, dir); |
487 kunmap_atomic(vaddr); | 486 kunmap_high_l1_vipt(page, saved_pte); |
488 } 489 } else { 490 vaddr = page_address(page) + offset; 491 op(vaddr, len, dir); 492 } 493 offset = 0; 494 page++; 495 left -= len; --- 54 unchanged lines hidden (view full) --- 550 * here. 551 */ 552int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 553 enum dma_data_direction dir) 554{ 555 struct scatterlist *s; 556 int i, j; 557 | 487 } 488 } else { 489 vaddr = page_address(page) + offset; 490 op(vaddr, len, dir); 491 } 492 offset = 0; 493 page++; 494 left -= len; --- 54 unchanged lines hidden (view full) --- 549 * here. 550 */ 551int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 552 enum dma_data_direction dir) 553{ 554 struct scatterlist *s; 555 int i, j; 556 |
557 BUG_ON(!valid_dma_direction(dir)); 558 |
|
558 for_each_sg(sg, s, nents, i) { | 559 for_each_sg(sg, s, nents, i) { |
559 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | 560 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, |
560 s->length, dir); 561 if (dma_mapping_error(dev, s->dma_address)) 562 goto bad_mapping; 563 } | 561 s->length, dir); 562 if (dma_mapping_error(dev, s->dma_address)) 563 goto bad_mapping; 564 } |
565 debug_dma_map_sg(dev, sg, nents, nents, dir); |
|
564 return nents; 565 566 bad_mapping: 567 for_each_sg(sg, s, i, j) | 566 return nents; 567 568 bad_mapping: 569 for_each_sg(sg, s, i, j) |
568 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 570 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
569 return 0; 570} 571EXPORT_SYMBOL(dma_map_sg); 572 573/** 574 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 575 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 576 * @sg: list of buffers --- 4 unchanged lines hidden (view full) --- 581 * rules concerning calls here are the same as for dma_unmap_single(). 582 */ 583void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 584 enum dma_data_direction dir) 585{ 586 struct scatterlist *s; 587 int i; 588 | 571 return 0; 572} 573EXPORT_SYMBOL(dma_map_sg); 574 575/** 576 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 577 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 578 * @sg: list of buffers --- 4 unchanged lines hidden (view full) --- 583 * rules concerning calls here are the same as for dma_unmap_single(). 584 */ 585void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 586 enum dma_data_direction dir) 587{ 588 struct scatterlist *s; 589 int i; 590 |
591 debug_dma_unmap_sg(dev, sg, nents, dir); 592 |
|
589 for_each_sg(sg, s, nents, i) | 593 for_each_sg(sg, s, nents, i) |
590 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 594 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
591} 592EXPORT_SYMBOL(dma_unmap_sg); 593 594/** 595 * dma_sync_sg_for_cpu 596 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 597 * @sg: list of buffers 598 * @nents: number of buffers to map (returned from dma_map_sg) --- 8 unchanged lines hidden (view full) --- 607 for_each_sg(sg, s, nents, i) { 608 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, 609 sg_dma_len(s), dir)) 610 continue; 611 612 __dma_page_dev_to_cpu(sg_page(s), s->offset, 613 s->length, dir); 614 } | 595} 596EXPORT_SYMBOL(dma_unmap_sg); 597 598/** 599 * dma_sync_sg_for_cpu 600 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 601 * @sg: list of buffers 602 * @nents: number of buffers to map (returned from dma_map_sg) --- 8 unchanged lines hidden (view full) --- 611 for_each_sg(sg, s, nents, i) { 612 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, 613 sg_dma_len(s), dir)) 614 continue; 615 616 __dma_page_dev_to_cpu(sg_page(s), s->offset, 617 s->length, dir); 618 } |
619 620 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); |
|
615} 616EXPORT_SYMBOL(dma_sync_sg_for_cpu); 617 618/** 619 * dma_sync_sg_for_device 620 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 621 * @sg: list of buffers 622 * @nents: number of buffers to map (returned from dma_map_sg) --- 8 unchanged lines hidden (view full) --- 631 for_each_sg(sg, s, nents, i) { 632 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, 633 sg_dma_len(s), dir)) 634 continue; 635 636 __dma_page_cpu_to_dev(sg_page(s), s->offset, 637 s->length, dir); 638 } | 621} 622EXPORT_SYMBOL(dma_sync_sg_for_cpu); 623 624/** 625 * dma_sync_sg_for_device 626 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 627 * @sg: list of buffers 628 * @nents: number of buffers to map (returned from dma_map_sg) --- 8 unchanged lines hidden (view full) --- 637 for_each_sg(sg, s, nents, i) { 638 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, 639 sg_dma_len(s), dir)) 640 continue; 641 642 __dma_page_cpu_to_dev(sg_page(s), s->offset, 643 s->length, dir); 644 } |
645 646 debug_dma_sync_sg_for_device(dev, sg, nents, dir); |
|
639} 640EXPORT_SYMBOL(dma_sync_sg_for_device); | 647} 648EXPORT_SYMBOL(dma_sync_sg_for_device); |
649 650#define PREALLOC_DMA_DEBUG_ENTRIES 4096 651 652static int __init dma_debug_do_init(void) 653{ 654 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 655 return 0; 656} 657fs_initcall(dma_debug_do_init); |
|