dma-mapping.c (576d0d552be803b22867ed98a8619d68b1f78bbe) dma-mapping.c (f04b951f6c7eccd85ea7750a5fafa68fb98d6bfa)
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/dma-mapping.h>
6#include <linux/dma-contiguous.h>
7#include <linux/dma-noncoherent.h>
8#include <linux/genalloc.h>
9#include <linux/highmem.h>
10#include <linux/io.h>
11#include <linux/mm.h>
12#include <linux/scatterlist.h>
13#include <linux/types.h>
14#include <linux/version.h>
15#include <asm/cache.h>
16
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/dma-mapping.h>
6#include <linux/dma-contiguous.h>
7#include <linux/dma-noncoherent.h>
8#include <linux/genalloc.h>
9#include <linux/highmem.h>
10#include <linux/io.h>
11#include <linux/mm.h>
12#include <linux/scatterlist.h>
13#include <linux/types.h>
14#include <linux/version.h>
15#include <asm/cache.h>
16
17static struct gen_pool *atomic_pool;
18static size_t atomic_pool_size __initdata = SZ_256K;
19
20static int __init early_coherent_pool(char *p)
21{
22 atomic_pool_size = memparse(p, &p);
23 return 0;
24}
25early_param("coherent_pool", early_coherent_pool);
26
27static int __init atomic_pool_init(void)
28{
17static int __init atomic_pool_init(void)
18{
29 struct page *page;
30 size_t size = atomic_pool_size;
31 void *ptr;
32 int ret;
33
34 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
35 if (!atomic_pool)
36 BUG();
37
38 page = alloc_pages(GFP_KERNEL, get_order(size));
39 if (!page)
40 BUG();
41
42 ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
43 pgprot_noncached(PAGE_KERNEL),
44 __builtin_return_address(0));
45 if (!ptr)
46 BUG();
47
48 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
49 page_to_phys(page), atomic_pool_size, -1);
50 if (ret)
51 BUG();
52
53 gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
54
55 pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
56 atomic_pool_size / 1024);
57
58 pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
59 page_to_phys(page));
60
61 return 0;
19 return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
62}
63postcore_initcall(atomic_pool_init);
64
20}
21postcore_initcall(atomic_pool_init);
22
65static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
66 dma_addr_t *dma_handle)
23void arch_dma_prep_coherent(struct page *page, size_t size)
67{
24{
68 unsigned long addr;
69
70 addr = gen_pool_alloc(atomic_pool, size);
71 if (addr)
72 *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
73
74 return (void *)addr;
75}
76
77static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
78 dma_addr_t dma_handle, unsigned long attrs)
79{
80 gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
81}
82
83static void __dma_clear_buffer(struct page *page, size_t size)
84{
85 if (PageHighMem(page)) {
86 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
87
88 do {
89 void *ptr = kmap_atomic(page);
90 size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
91
92 memset(ptr, 0, _size);

--- 9 unchanged lines hidden (view full) ---

102 } else {
103 void *ptr = page_address(page);
104
105 memset(ptr, 0, size);
106 dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
107 }
108}
109
25 if (PageHighMem(page)) {
26 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
27
28 do {
29 void *ptr = kmap_atomic(page);
30 size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
31
32 memset(ptr, 0, _size);

--- 9 unchanged lines hidden (view full) ---

42 } else {
43 void *ptr = page_address(page);
44
45 memset(ptr, 0, size);
46 dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
47 }
48}
49
110static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
111 dma_addr_t *dma_handle, gfp_t gfp,
112 unsigned long attrs)
113{
114 void *vaddr;
115 struct page *page;
116 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
117
118 if (DMA_ATTR_NON_CONSISTENT & attrs) {
119 pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__);
120 return NULL;
121 }
122
123 if (IS_ENABLED(CONFIG_DMA_CMA))
124 page = dma_alloc_from_contiguous(dev, count, get_order(size),
125 gfp);
126 else
127 page = alloc_pages(gfp, get_order(size));
128
129 if (!page) {
130 pr_err("csky %s no more free pages.\n", __func__);
131 return NULL;
132 }
133
134 *dma_handle = page_to_phys(page);
135
136 __dma_clear_buffer(page, size);
137
138 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
139 return page;
140
141 vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
142 pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
143 if (!vaddr)
144 BUG();
145
146 return vaddr;
147}
148
149static void csky_dma_free_nonatomic(
150 struct device *dev,
151 size_t size,
152 void *vaddr,
153 dma_addr_t dma_handle,
154 unsigned long attrs
155 )
156{
157 struct page *page = phys_to_page(dma_handle);
158 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
159
160 if ((unsigned int)vaddr >= VMALLOC_START)
161 dma_common_free_remap(vaddr, size, VM_USERMAP);
162
163 if (IS_ENABLED(CONFIG_DMA_CMA))
164 dma_release_from_contiguous(dev, page, count);
165 else
166 __free_pages(page, get_order(size));
167}
168
169void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
170 gfp_t gfp, unsigned long attrs)
171{
172 if (gfpflags_allow_blocking(gfp))
173 return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp,
174 attrs);
175 else
176 return csky_dma_alloc_atomic(dev, size, dma_handle);
177}
178
179void arch_dma_free(struct device *dev, size_t size, void *vaddr,
180 dma_addr_t dma_handle, unsigned long attrs)
181{
182 if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size))
183 csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs);
184 else
185 csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs);
186}
187
188static inline void cache_op(phys_addr_t paddr, size_t size,
189 void (*fn)(unsigned long start, unsigned long end))
190{
191 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
192 unsigned int offset = paddr & ~PAGE_MASK;
193 size_t left = size;
194 unsigned long start;
195

--- 59 unchanged lines hidden ---
50static inline void cache_op(phys_addr_t paddr, size_t size,
51 void (*fn)(unsigned long start, unsigned long end))
52{
53 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
54 unsigned int offset = paddr & ~PAGE_MASK;
55 size_t left = size;
56 unsigned long start;
57

--- 59 unchanged lines hidden ---