xref: /linux/arch/microblaze/kernel/dma.c (revision d229807f669ba3dea9f64467ee965051c4366aed)
1 /*
2  * Copyright (C) 2009-2010 PetaLogix
3  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4  *
5  * Provide default implementations of the DMA mapping callbacks for
6  * directly mapped busses.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <asm/bug.h>
15 #include <asm/cacheflush.h>
16 
17 /*
18  * Generic direct DMA implementation
19  *
20  * This implementation supports a per-device offset that can be applied if
21  * the address at which memory is visible to devices is not 0. Platform code
22  * can set archdata.dma_data to an unsigned long holding the offset. By
23  * default the offset is PCI_DRAM_OFFSET.
24  */
25 static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
26 				size_t size, enum dma_data_direction direction)
27 {
28 	switch (direction) {
29 	case DMA_TO_DEVICE:
30 	case DMA_BIDIRECTIONAL:
31 		flush_dcache_range(paddr + offset, paddr + offset + size);
32 		break;
33 	case DMA_FROM_DEVICE:
34 		invalidate_dcache_range(paddr + offset, paddr + offset + size);
35 		break;
36 	default:
37 		BUG();
38 	}
39 }
40 
41 static unsigned long get_dma_direct_offset(struct device *dev)
42 {
43 	if (likely(dev))
44 		return (unsigned long)dev->archdata.dma_data;
45 
46 	return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
47 }
48 
49 #define NOT_COHERENT_CACHE
50 
51 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
52 				dma_addr_t *dma_handle, gfp_t flag)
53 {
54 #ifdef NOT_COHERENT_CACHE
55 	return consistent_alloc(flag, size, dma_handle);
56 #else
57 	void *ret;
58 	struct page *page;
59 	int node = dev_to_node(dev);
60 
61 	/* ignore region specifiers */
62 	flag  &= ~(__GFP_HIGHMEM);
63 
64 	page = alloc_pages_node(node, flag, get_order(size));
65 	if (page == NULL)
66 		return NULL;
67 	ret = page_address(page);
68 	memset(ret, 0, size);
69 	*dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
70 
71 	return ret;
72 #endif
73 }
74 
75 static void dma_direct_free_coherent(struct device *dev, size_t size,
76 			      void *vaddr, dma_addr_t dma_handle)
77 {
78 #ifdef NOT_COHERENT_CACHE
79 	consistent_free(size, vaddr);
80 #else
81 	free_pages((unsigned long)vaddr, get_order(size));
82 #endif
83 }
84 
85 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
86 			     int nents, enum dma_data_direction direction,
87 			     struct dma_attrs *attrs)
88 {
89 	struct scatterlist *sg;
90 	int i;
91 
92 	/* FIXME this part of code is untested */
93 	for_each_sg(sgl, sg, nents, i) {
94 		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
95 		__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
96 							sg->length, direction);
97 	}
98 
99 	return nents;
100 }
101 
102 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
103 				int nents, enum dma_data_direction direction,
104 				struct dma_attrs *attrs)
105 {
106 }
107 
108 static int dma_direct_dma_supported(struct device *dev, u64 mask)
109 {
110 	return 1;
111 }
112 
113 static inline dma_addr_t dma_direct_map_page(struct device *dev,
114 					     struct page *page,
115 					     unsigned long offset,
116 					     size_t size,
117 					     enum dma_data_direction direction,
118 					     struct dma_attrs *attrs)
119 {
120 	__dma_sync_page(page_to_phys(page), offset, size, direction);
121 	return page_to_phys(page) + offset + get_dma_direct_offset(dev);
122 }
123 
124 static inline void dma_direct_unmap_page(struct device *dev,
125 					 dma_addr_t dma_address,
126 					 size_t size,
127 					 enum dma_data_direction direction,
128 					 struct dma_attrs *attrs)
129 {
130 /* There is not necessary to do cache cleanup
131  *
132  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
133  * dma_address is physical address
134  */
135 	__dma_sync_page(dma_address, 0 , size, direction);
136 }
137 
138 struct dma_map_ops dma_direct_ops = {
139 	.alloc_coherent	= dma_direct_alloc_coherent,
140 	.free_coherent	= dma_direct_free_coherent,
141 	.map_sg		= dma_direct_map_sg,
142 	.unmap_sg	= dma_direct_unmap_sg,
143 	.dma_supported	= dma_direct_dma_supported,
144 	.map_page	= dma_direct_map_page,
145 	.unmap_page	= dma_direct_unmap_page,
146 };
147 EXPORT_SYMBOL(dma_direct_ops);
148 
149 /* Number of entries preallocated for DMA-API debugging */
150 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
151 
152 static int __init dma_init(void)
153 {
154        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
155 
156        return 0;
157 }
158 fs_initcall(dma_init);
159