xref: /linux/arch/mips/mm/dma-noncoherent.c (revision 9cdf083f981b8d37b3212400a359368661385099)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/string.h>
14 #include <linux/dma-mapping.h>
15 
16 #include <asm/cache.h>
17 #include <asm/io.h>
18 
19 /*
20  * Warning on the terminology - Linux calls an uncached area coherent;
21  * MIPS terminology calls memory areas with hardware maintained coherency
22  * coherent.
23  */
24 
25 void *dma_alloc_noncoherent(struct device *dev, size_t size,
26 	dma_addr_t * dma_handle, gfp_t gfp)
27 {
28 	void *ret;
29 	/* ignore region specifiers */
30 	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
31 
32 	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
33 		gfp |= GFP_DMA;
34 	ret = (void *) __get_free_pages(gfp, get_order(size));
35 
36 	if (ret != NULL) {
37 		memset(ret, 0, size);
38 		*dma_handle = virt_to_phys(ret);
39 	}
40 
41 	return ret;
42 }
43 
44 EXPORT_SYMBOL(dma_alloc_noncoherent);
45 
46 void *dma_alloc_coherent(struct device *dev, size_t size,
47 	dma_addr_t * dma_handle, gfp_t gfp)
48 {
49 	void *ret;
50 
51 	ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
52 	if (ret) {
53 		dma_cache_wback_inv((unsigned long) ret, size);
54 		ret = UNCAC_ADDR(ret);
55 	}
56 
57 	return ret;
58 }
59 
60 EXPORT_SYMBOL(dma_alloc_coherent);
61 
62 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
63 	dma_addr_t dma_handle)
64 {
65 	free_pages((unsigned long) vaddr, get_order(size));
66 }
67 
68 EXPORT_SYMBOL(dma_free_noncoherent);
69 
70 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
71 	dma_addr_t dma_handle)
72 {
73 	unsigned long addr = (unsigned long) vaddr;
74 
75 	addr = CAC_ADDR(addr);
76 	free_pages(addr, get_order(size));
77 }
78 
79 EXPORT_SYMBOL(dma_free_coherent);
80 
81 static inline void __dma_sync(unsigned long addr, size_t size,
82 	enum dma_data_direction direction)
83 {
84 	switch (direction) {
85 	case DMA_TO_DEVICE:
86 		dma_cache_wback(addr, size);
87 		break;
88 
89 	case DMA_FROM_DEVICE:
90 		dma_cache_inv(addr, size);
91 		break;
92 
93 	case DMA_BIDIRECTIONAL:
94 		dma_cache_wback_inv(addr, size);
95 		break;
96 
97 	default:
98 		BUG();
99 	}
100 }
101 
102 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
103 	enum dma_data_direction direction)
104 {
105 	unsigned long addr = (unsigned long) ptr;
106 
107 	__dma_sync(addr, size, direction);
108 
109 	return virt_to_phys(ptr);
110 }
111 
112 EXPORT_SYMBOL(dma_map_single);
113 
114 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
115 	enum dma_data_direction direction)
116 {
117 	unsigned long addr;
118 	addr = dma_addr + PAGE_OFFSET;
119 
120 	//__dma_sync(addr, size, direction);
121 }
122 
123 EXPORT_SYMBOL(dma_unmap_single);
124 
125 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
126 	enum dma_data_direction direction)
127 {
128 	int i;
129 
130 	BUG_ON(direction == DMA_NONE);
131 
132 	for (i = 0; i < nents; i++, sg++) {
133 		unsigned long addr;
134 
135 		addr = (unsigned long) page_address(sg->page);
136 		if (addr) {
137 			__dma_sync(addr + sg->offset, sg->length, direction);
138 			sg->dma_address = (dma_addr_t)page_to_phys(sg->page)
139 					  + sg->offset;
140 		}
141 	}
142 
143 	return nents;
144 }
145 
146 EXPORT_SYMBOL(dma_map_sg);
147 
148 dma_addr_t dma_map_page(struct device *dev, struct page *page,
149 	unsigned long offset, size_t size, enum dma_data_direction direction)
150 {
151 	unsigned long addr;
152 
153 	BUG_ON(direction == DMA_NONE);
154 
155 	addr = (unsigned long) page_address(page) + offset;
156 	dma_cache_wback_inv(addr, size);
157 
158 	return page_to_phys(page) + offset;
159 }
160 
161 EXPORT_SYMBOL(dma_map_page);
162 
163 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
164 	enum dma_data_direction direction)
165 {
166 	BUG_ON(direction == DMA_NONE);
167 
168 	if (direction != DMA_TO_DEVICE) {
169 		unsigned long addr;
170 
171 		addr = dma_address + PAGE_OFFSET;
172 		dma_cache_wback_inv(addr, size);
173 	}
174 }
175 
176 EXPORT_SYMBOL(dma_unmap_page);
177 
178 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
179 	enum dma_data_direction direction)
180 {
181 	unsigned long addr;
182 	int i;
183 
184 	BUG_ON(direction == DMA_NONE);
185 
186 	if (direction == DMA_TO_DEVICE)
187 		return;
188 
189 	for (i = 0; i < nhwentries; i++, sg++) {
190 		addr = (unsigned long) page_address(sg->page);
191 		if (addr)
192 			__dma_sync(addr + sg->offset, sg->length, direction);
193 	}
194 }
195 
196 EXPORT_SYMBOL(dma_unmap_sg);
197 
198 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
199 	size_t size, enum dma_data_direction direction)
200 {
201 	unsigned long addr;
202 
203 	BUG_ON(direction == DMA_NONE);
204 
205 	addr = dma_handle + PAGE_OFFSET;
206 	__dma_sync(addr, size, direction);
207 }
208 
209 EXPORT_SYMBOL(dma_sync_single_for_cpu);
210 
211 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
212 	size_t size, enum dma_data_direction direction)
213 {
214 	unsigned long addr;
215 
216 	BUG_ON(direction == DMA_NONE);
217 
218 	addr = dma_handle + PAGE_OFFSET;
219 	__dma_sync(addr, size, direction);
220 }
221 
222 EXPORT_SYMBOL(dma_sync_single_for_device);
223 
224 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
225 	unsigned long offset, size_t size, enum dma_data_direction direction)
226 {
227 	unsigned long addr;
228 
229 	BUG_ON(direction == DMA_NONE);
230 
231 	addr = dma_handle + offset + PAGE_OFFSET;
232 	__dma_sync(addr, size, direction);
233 }
234 
235 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
236 
237 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
238 	unsigned long offset, size_t size, enum dma_data_direction direction)
239 {
240 	unsigned long addr;
241 
242 	BUG_ON(direction == DMA_NONE);
243 
244 	addr = dma_handle + offset + PAGE_OFFSET;
245 	__dma_sync(addr, size, direction);
246 }
247 
248 EXPORT_SYMBOL(dma_sync_single_range_for_device);
249 
250 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
251 	enum dma_data_direction direction)
252 {
253 	int i;
254 
255 	BUG_ON(direction == DMA_NONE);
256 
257 	/* Make sure that gcc doesn't leave the empty loop body.  */
258 	for (i = 0; i < nelems; i++, sg++)
259 		__dma_sync((unsigned long)page_address(sg->page),
260 		           sg->length, direction);
261 }
262 
263 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
264 
265 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
266 	enum dma_data_direction direction)
267 {
268 	int i;
269 
270 	BUG_ON(direction == DMA_NONE);
271 
272 	/* Make sure that gcc doesn't leave the empty loop body.  */
273 	for (i = 0; i < nelems; i++, sg++)
274 		__dma_sync((unsigned long)page_address(sg->page),
275 		           sg->length, direction);
276 }
277 
278 EXPORT_SYMBOL(dma_sync_sg_for_device);
279 
280 int dma_mapping_error(dma_addr_t dma_addr)
281 {
282 	return 0;
283 }
284 
285 EXPORT_SYMBOL(dma_mapping_error);
286 
287 int dma_supported(struct device *dev, u64 mask)
288 {
289 	/*
290 	 * we fall back to GFP_DMA when the mask isn't all 1s,
291 	 * so we can't guarantee allocations that must be
292 	 * within a tighter range than GFP_DMA..
293 	 */
294 	if (mask < 0x00ffffff)
295 		return 0;
296 
297 	return 1;
298 }
299 
300 EXPORT_SYMBOL(dma_supported);
301 
302 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
303 {
304 	return 1;
305 }
306 
307 EXPORT_SYMBOL(dma_is_consistent);
308 
309 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
310 	enum dma_data_direction direction)
311 {
312 	if (direction == DMA_NONE)
313 		return;
314 
315 	dma_cache_wback_inv((unsigned long)vaddr, size);
316 }
317 
318 EXPORT_SYMBOL(dma_cache_sync);
319 
320 /* The DAC routines are a PCIism.. */
321 
322 #ifdef CONFIG_PCI
323 
324 #include <linux/pci.h>
325 
326 dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
327 	struct page *page, unsigned long offset, int direction)
328 {
329 	return (dma64_addr_t)page_to_phys(page) + offset;
330 }
331 
332 EXPORT_SYMBOL(pci_dac_page_to_dma);
333 
334 struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
335 	dma64_addr_t dma_addr)
336 {
337 	return mem_map + (dma_addr >> PAGE_SHIFT);
338 }
339 
340 EXPORT_SYMBOL(pci_dac_dma_to_page);
341 
342 unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
343 	dma64_addr_t dma_addr)
344 {
345 	return dma_addr & ~PAGE_MASK;
346 }
347 
348 EXPORT_SYMBOL(pci_dac_dma_to_offset);
349 
350 void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
351 	dma64_addr_t dma_addr, size_t len, int direction)
352 {
353 	BUG_ON(direction == PCI_DMA_NONE);
354 
355 	dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
356 }
357 
358 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
359 
360 void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
361 	dma64_addr_t dma_addr, size_t len, int direction)
362 {
363 	BUG_ON(direction == PCI_DMA_NONE);
364 
365 	dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
366 }
367 
368 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
369 
370 #endif /* CONFIG_PCI */
371