xref: /linux/arch/sh/mm/consistent.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * arch/sh/mm/consistent.c
3  *
4  * Copyright (C) 2004 - 2007  Paul Mundt
5  *
6  * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/mm.h>
13 #include <linux/platform_device.h>
14 #include <linux/dma-mapping.h>
15 #include <asm/cacheflush.h>
16 #include <asm/addrspace.h>
17 #include <asm/io.h>
18 
19 void *dma_alloc_coherent(struct device *dev, size_t size,
20 			   dma_addr_t *dma_handle, gfp_t gfp)
21 {
22 	void *ret, *ret_nocache;
23 	int order = get_order(size);
24 
25 	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
26 		return ret;
27 
28 	ret = (void *)__get_free_pages(gfp, order);
29 	if (!ret)
30 		return NULL;
31 
32 	memset(ret, 0, size);
33 	/*
34 	 * Pages from the page allocator may have data present in
35 	 * cache. So flush the cache before using uncached memory.
36 	 */
37 	dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
38 
39 	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
40 	if (!ret_nocache) {
41 		free_pages((unsigned long)ret, order);
42 		return NULL;
43 	}
44 
45 	*dma_handle = virt_to_phys(ret);
46 	return ret_nocache;
47 }
48 EXPORT_SYMBOL(dma_alloc_coherent);
49 
50 void dma_free_coherent(struct device *dev, size_t size,
51 			 void *vaddr, dma_addr_t dma_handle)
52 {
53 	int order = get_order(size);
54 
55 	if (!dma_release_from_coherent(dev, order, vaddr)) {
56 		WARN_ON(irqs_disabled());	/* for portability */
57 		free_pages((unsigned long)phys_to_virt(dma_handle), order);
58 		iounmap(vaddr);
59 	}
60 }
61 EXPORT_SYMBOL(dma_free_coherent);
62 
63 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
64 		    enum dma_data_direction direction)
65 {
66 #ifdef CONFIG_CPU_SH5
67 	void *p1addr = vaddr;
68 #else
69 	void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
70 #endif
71 
72 	switch (direction) {
73 	case DMA_FROM_DEVICE:		/* invalidate only */
74 		__flush_invalidate_region(p1addr, size);
75 		break;
76 	case DMA_TO_DEVICE:		/* writeback only */
77 		__flush_wback_region(p1addr, size);
78 		break;
79 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
80 		__flush_purge_region(p1addr, size);
81 		break;
82 	default:
83 		BUG();
84 	}
85 }
86 EXPORT_SYMBOL(dma_cache_sync);
87 
88 static int __init memchunk_setup(char *str)
89 {
90 	return 1; /* accept anything that begins with "memchunk." */
91 }
92 __setup("memchunk.", memchunk_setup);
93 
94 static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
95 {
96 	char *p = boot_command_line;
97 	int k = strlen(name);
98 
99 	while ((p = strstr(p, "memchunk."))) {
100 		p += 9; /* strlen("memchunk.") */
101 		if (!strncmp(name, p, k) && p[k] == '=') {
102 			p += k + 1;
103 			*sizep = memparse(p, NULL);
104 			pr_info("%s: forcing memory chunk size to 0x%08lx\n",
105 				name, *sizep);
106 			break;
107 		}
108 	}
109 }
110 
111 int __init platform_resource_setup_memory(struct platform_device *pdev,
112 					  char *name, unsigned long memsize)
113 {
114 	struct resource *r;
115 	dma_addr_t dma_handle;
116 	void *buf;
117 
118 	r = pdev->resource + pdev->num_resources - 1;
119 	if (r->flags) {
120 		pr_warning("%s: unable to find empty space for resource\n",
121 			name);
122 		return -EINVAL;
123 	}
124 
125 	memchunk_cmdline_override(name, &memsize);
126 	if (!memsize)
127 		return 0;
128 
129 	buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
130 	if (!buf) {
131 		pr_warning("%s: unable to allocate memory\n", name);
132 		return -ENOMEM;
133 	}
134 
135 	memset(buf, 0, memsize);
136 
137 	r->flags = IORESOURCE_MEM;
138 	r->start = dma_handle;
139 	r->end = r->start + memsize - 1;
140 	r->name = name;
141 	return 0;
142 }
143