dma-mapping.c (a227fb92a0f5f0dd8282719386e9b3a29f0d16b2) dma-mapping.c (2dc6a016bbedf18f18ad73997e5338307d6dbde9)
1/*
2 * linux/arch/arm/mm/dma-mapping.c
3 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.

--- 15 unchanged lines hidden (view full) ---

24#include <asm/highmem.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/sizes.h>
28#include <asm/mach/arch.h>
29
30#include "mm.h"
31
1/*
2 * linux/arch/arm/mm/dma-mapping.c
3 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.

--- 15 unchanged lines hidden (view full) ---

24#include <asm/highmem.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/sizes.h>
28#include <asm/mach/arch.h>
29
30#include "mm.h"
31
32/**
33 * arm_dma_map_page - map a portion of a page for streaming DMA
34 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
35 * @page: page that buffer resides in
36 * @offset: offset into page for start of buffer
37 * @size: size of buffer to map
38 * @dir: DMA transfer direction
39 *
40 * Ensure that any data held in the cache is appropriately discarded
41 * or written back.
42 *
43 * The device owns this memory once this call has completed. The CPU
44 * can regain ownership by calling dma_unmap_page().
45 */
46static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
47 unsigned long offset, size_t size, enum dma_data_direction dir,
48 struct dma_attrs *attrs)
49{
50 return __dma_map_page(dev, page, offset, size, dir);
51}
52
53/**
54 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
55 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
56 * @handle: DMA address of buffer
57 * @size: size of buffer (same as passed to dma_map_page)
58 * @dir: DMA transfer direction (same as passed to dma_map_page)
59 *
60 * Unmap a page streaming mode DMA translation. The handle and size
61 * must match what was provided in the previous dma_map_page() call.
62 * All other usages are undefined.
63 *
64 * After this call, reads by the CPU to the buffer are guaranteed to see
65 * whatever the device wrote there.
66 */
67static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
68 size_t size, enum dma_data_direction dir,
69 struct dma_attrs *attrs)
70{
71 __dma_unmap_page(dev, handle, size, dir);
72}
73
74static inline void arm_dma_sync_single_for_cpu(struct device *dev,
75 dma_addr_t handle, size_t size, enum dma_data_direction dir)
76{
77 unsigned int offset = handle & (PAGE_SIZE - 1);
78 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
79 if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
80 return;
81
82 __dma_page_dev_to_cpu(page, offset, size, dir);
83}
84
85static inline void arm_dma_sync_single_for_device(struct device *dev,
86 dma_addr_t handle, size_t size, enum dma_data_direction dir)
87{
88 unsigned int offset = handle & (PAGE_SIZE - 1);
89 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
90 if (!dmabounce_sync_for_device(dev, handle, size, dir))
91 return;
92
93 __dma_page_cpu_to_dev(page, offset, size, dir);
94}
95
96static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
97
98struct dma_map_ops arm_dma_ops = {
99 .map_page = arm_dma_map_page,
100 .unmap_page = arm_dma_unmap_page,
101 .map_sg = arm_dma_map_sg,
102 .unmap_sg = arm_dma_unmap_sg,
103 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
104 .sync_single_for_device = arm_dma_sync_single_for_device,
105 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
106 .sync_sg_for_device = arm_dma_sync_sg_for_device,
107 .set_dma_mask = arm_dma_set_mask,
108};
109EXPORT_SYMBOL(arm_dma_ops);
110
32static u64 get_coherent_dma_mask(struct device *dev)
33{
34 u64 mask = (u64)arm_dma_limit;
35
36 if (dev) {
37 mask = dev->coherent_dma_mask;
38
39 /*

--- 416 unchanged lines hidden (view full) ---

456
457 if (!arch_is_coherent())
458 __dma_free_remap(cpu_addr, size);
459
460 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
461}
462EXPORT_SYMBOL(dma_free_coherent);
463
111static u64 get_coherent_dma_mask(struct device *dev)
112{
113 u64 mask = (u64)arm_dma_limit;
114
115 if (dev) {
116 mask = dev->coherent_dma_mask;
117
118 /*

--- 416 unchanged lines hidden (view full) ---

535
536 if (!arch_is_coherent())
537 __dma_free_remap(cpu_addr, size);
538
539 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
540}
541EXPORT_SYMBOL(dma_free_coherent);
542
464/*
465 * Make an area consistent for devices.
466 * Note: Drivers should NOT use this function directly, as it will break
467 * platforms with CONFIG_DMABOUNCE.
468 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
469 */
470void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
471 enum dma_data_direction dir)
472{
473 unsigned long paddr;
474
475 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
476
477 dmac_map_area(kaddr, size, dir);
478
479 paddr = __pa(kaddr);
480 if (dir == DMA_FROM_DEVICE) {
481 outer_inv_range(paddr, paddr + size);
482 } else {
483 outer_clean_range(paddr, paddr + size);
484 }
485 /* FIXME: non-speculating: flush on bidirectional mappings? */
486}
487EXPORT_SYMBOL(___dma_single_cpu_to_dev);
488
489void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
490 enum dma_data_direction dir)
491{
492 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
493
494 /* FIXME: non-speculating: not required */
495 /* don't bother invalidating if DMA to device */
496 if (dir != DMA_TO_DEVICE) {
497 unsigned long paddr = __pa(kaddr);
498 outer_inv_range(paddr, paddr + size);
499 }
500
501 dmac_unmap_area(kaddr, size, dir);
502}
503EXPORT_SYMBOL(___dma_single_dev_to_cpu);
504
505static void dma_cache_maint_page(struct page *page, unsigned long offset,
506 size_t size, enum dma_data_direction dir,
507 void (*op)(const void *, size_t, int))
508{
509 /*
510 * A single sg entry may refer to multiple physically contiguous
511 * pages. But we still need to process highmem pages individually.
512 * If highmem is not configured then the bulk of this loop gets

--- 81 unchanged lines hidden (view full) ---

594 * This is the scatter-gather version of the dma_map_single interface.
595 * Here the scatter gather list elements are each tagged with the
596 * appropriate dma address and length. They are obtained via
597 * sg_dma_{address,length}.
598 *
599 * Device ownership issues as mentioned for dma_map_single are the same
600 * here.
601 */
543static void dma_cache_maint_page(struct page *page, unsigned long offset,
544 size_t size, enum dma_data_direction dir,
545 void (*op)(const void *, size_t, int))
546{
547 /*
548 * A single sg entry may refer to multiple physically contiguous
549 * pages. But we still need to process highmem pages individually.
550 * If highmem is not configured then the bulk of this loop gets

--- 81 unchanged lines hidden (view full) ---

632 * This is the scatter-gather version of the dma_map_single interface.
633 * Here the scatter gather list elements are each tagged with the
634 * appropriate dma address and length. They are obtained via
635 * sg_dma_{address,length}.
636 *
637 * Device ownership issues as mentioned for dma_map_single are the same
638 * here.
639 */
602int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
603 enum dma_data_direction dir)
640int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
641 enum dma_data_direction dir, struct dma_attrs *attrs)
604{
605 struct scatterlist *s;
606 int i, j;
607
642{
643 struct scatterlist *s;
644 int i, j;
645
608 BUG_ON(!valid_dma_direction(dir));
609
610 for_each_sg(sg, s, nents, i) {
611 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
612 s->length, dir);
613 if (dma_mapping_error(dev, s->dma_address))
614 goto bad_mapping;
615 }
646 for_each_sg(sg, s, nents, i) {
647 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
648 s->length, dir);
649 if (dma_mapping_error(dev, s->dma_address))
650 goto bad_mapping;
651 }
616 debug_dma_map_sg(dev, sg, nents, nents, dir);
617 return nents;
618
619 bad_mapping:
620 for_each_sg(sg, s, i, j)
621 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
622 return 0;
623}
652 return nents;
653
654 bad_mapping:
655 for_each_sg(sg, s, i, j)
656 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
657 return 0;
658}
624EXPORT_SYMBOL(dma_map_sg);
625
626/**
627 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
628 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
629 * @sg: list of buffers
630 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
631 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
632 *
633 * Unmap a set of streaming mode DMA translations. Again, CPU access
634 * rules concerning calls here are the same as for dma_unmap_single().
635 */
659
660/**
661 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
662 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
663 * @sg: list of buffers
664 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
665 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
666 *
667 * Unmap a set of streaming mode DMA translations. Again, CPU access
668 * rules concerning calls here are the same as for dma_unmap_single().
669 */
636void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
637 enum dma_data_direction dir)
670void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
671 enum dma_data_direction dir, struct dma_attrs *attrs)
638{
639 struct scatterlist *s;
640 int i;
641
672{
673 struct scatterlist *s;
674 int i;
675
642 debug_dma_unmap_sg(dev, sg, nents, dir);
643
644 for_each_sg(sg, s, nents, i)
645 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
646}
676 for_each_sg(sg, s, nents, i)
677 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
678}
647EXPORT_SYMBOL(dma_unmap_sg);
648
649/**
650 * dma_sync_sg_for_cpu
651 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
652 * @sg: list of buffers
653 * @nents: number of buffers to map (returned from dma_map_sg)
654 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
655 */
679
680/**
681 * dma_sync_sg_for_cpu
682 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
683 * @sg: list of buffers
684 * @nents: number of buffers to map (returned from dma_map_sg)
685 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
686 */
656void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
687void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
657 int nents, enum dma_data_direction dir)
658{
659 struct scatterlist *s;
660 int i;
661
662 for_each_sg(sg, s, nents, i) {
663 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
664 sg_dma_len(s), dir))
665 continue;
666
667 __dma_page_dev_to_cpu(sg_page(s), s->offset,
668 s->length, dir);
669 }
688 int nents, enum dma_data_direction dir)
689{
690 struct scatterlist *s;
691 int i;
692
693 for_each_sg(sg, s, nents, i) {
694 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
695 sg_dma_len(s), dir))
696 continue;
697
698 __dma_page_dev_to_cpu(sg_page(s), s->offset,
699 s->length, dir);
700 }
670
671 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
672}
701}
673EXPORT_SYMBOL(dma_sync_sg_for_cpu);
674
675/**
676 * dma_sync_sg_for_device
677 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
678 * @sg: list of buffers
679 * @nents: number of buffers to map (returned from dma_map_sg)
680 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
681 */
702
703/**
704 * dma_sync_sg_for_device
705 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
706 * @sg: list of buffers
707 * @nents: number of buffers to map (returned from dma_map_sg)
708 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
709 */
682void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
710void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
683 int nents, enum dma_data_direction dir)
684{
685 struct scatterlist *s;
686 int i;
687
688 for_each_sg(sg, s, nents, i) {
689 if (!dmabounce_sync_for_device(dev, sg_dma_address(s),
690 sg_dma_len(s), dir))
691 continue;
692
693 __dma_page_cpu_to_dev(sg_page(s), s->offset,
694 s->length, dir);
695 }
711 int nents, enum dma_data_direction dir)
712{
713 struct scatterlist *s;
714 int i;
715
716 for_each_sg(sg, s, nents, i) {
717 if (!dmabounce_sync_for_device(dev, sg_dma_address(s),
718 sg_dma_len(s), dir))
719 continue;
720
721 __dma_page_cpu_to_dev(sg_page(s), s->offset,
722 s->length, dir);
723 }
696
697 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
698}
724}
699EXPORT_SYMBOL(dma_sync_sg_for_device);
700
701/*
702 * Return whether the given device DMA address mask can be supported
703 * properly. For example, if your device can only drive the low 24-bits
704 * during bus mastering, then you would pass 0x00ffffff as the mask
705 * to this function.
706 */
707int dma_supported(struct device *dev, u64 mask)
708{
709 if (mask < (u64)arm_dma_limit)
710 return 0;
711 return 1;
712}
713EXPORT_SYMBOL(dma_supported);
714
725
726/*
727 * Return whether the given device DMA address mask can be supported
728 * properly. For example, if your device can only drive the low 24-bits
729 * during bus mastering, then you would pass 0x00ffffff as the mask
730 * to this function.
731 */
732int dma_supported(struct device *dev, u64 mask)
733{
734 if (mask < (u64)arm_dma_limit)
735 return 0;
736 return 1;
737}
738EXPORT_SYMBOL(dma_supported);
739
715int dma_set_mask(struct device *dev, u64 dma_mask)
740static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
716{
717 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
718 return -EIO;
719
720#ifndef CONFIG_DMABOUNCE
721 *dev->dma_mask = dma_mask;
722#endif
723
724 return 0;
725}
741{
742 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
743 return -EIO;
744
745#ifndef CONFIG_DMABOUNCE
746 *dev->dma_mask = dma_mask;
747#endif
748
749 return 0;
750}
726EXPORT_SYMBOL(dma_set_mask);
727
728#define PREALLOC_DMA_DEBUG_ENTRIES 4096
729
730static int __init dma_debug_do_init(void)
731{
732#ifdef CONFIG_MMU
733 arm_vmregion_create_proc("dma-mappings", &consistent_head);
734#endif
735 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
736 return 0;
737}
738fs_initcall(dma_debug_do_init);
751
752#define PREALLOC_DMA_DEBUG_ENTRIES 4096
753
754static int __init dma_debug_do_init(void)
755{
756#ifdef CONFIG_MMU
757 arm_vmregion_create_proc("dma-mappings", &consistent_head);
758#endif
759 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
760 return 0;
761}
762fs_initcall(dma_debug_do_init);