1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMABUF CMA heap exporter
4 *
5 * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7 *
8 * Also utilizing parts of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
11 */
12
13 #define pr_fmt(fmt) "cma_heap: " fmt
14
15 #include <linux/cma.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-buf/heaps/cma.h>
18 #include <linux/dma-heap.h>
19 #include <linux/dma-map-ops.h>
20 #include <linux/err.h>
21 #include <linux/highmem.h>
22 #include <linux/io.h>
23 #include <linux/mm.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_reserved_mem.h>
27 #include <linux/scatterlist.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30
31 #define DEFAULT_CMA_NAME "default_cma_region"
32
33 static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
34 static unsigned int dma_areas_num __initdata;
35
dma_heap_cma_register_heap(struct cma * cma)36 int __init dma_heap_cma_register_heap(struct cma *cma)
37 {
38 if (dma_areas_num >= ARRAY_SIZE(dma_areas))
39 return -EINVAL;
40
41 dma_areas[dma_areas_num++] = cma;
42
43 return 0;
44 }
45
46 struct cma_heap {
47 struct dma_heap *heap;
48 struct cma *cma;
49 };
50
51 struct cma_heap_buffer {
52 struct cma_heap *heap;
53 struct list_head attachments;
54 struct mutex lock;
55 unsigned long len;
56 struct page *cma_pages;
57 struct page **pages;
58 pgoff_t pagecount;
59 int vmap_cnt;
60 void *vaddr;
61 };
62
63 struct dma_heap_attachment {
64 struct device *dev;
65 struct sg_table table;
66 struct list_head list;
67 bool mapped;
68 };
69
cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)70 static int cma_heap_attach(struct dma_buf *dmabuf,
71 struct dma_buf_attachment *attachment)
72 {
73 struct cma_heap_buffer *buffer = dmabuf->priv;
74 struct dma_heap_attachment *a;
75 int ret;
76
77 a = kzalloc_obj(*a);
78 if (!a)
79 return -ENOMEM;
80
81 ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
82 buffer->pagecount, 0,
83 buffer->pagecount << PAGE_SHIFT,
84 GFP_KERNEL);
85 if (ret) {
86 kfree(a);
87 return ret;
88 }
89
90 a->dev = attachment->dev;
91 INIT_LIST_HEAD(&a->list);
92 a->mapped = false;
93
94 attachment->priv = a;
95
96 mutex_lock(&buffer->lock);
97 list_add(&a->list, &buffer->attachments);
98 mutex_unlock(&buffer->lock);
99
100 return 0;
101 }
102
cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)103 static void cma_heap_detach(struct dma_buf *dmabuf,
104 struct dma_buf_attachment *attachment)
105 {
106 struct cma_heap_buffer *buffer = dmabuf->priv;
107 struct dma_heap_attachment *a = attachment->priv;
108
109 mutex_lock(&buffer->lock);
110 list_del(&a->list);
111 mutex_unlock(&buffer->lock);
112
113 sg_free_table(&a->table);
114 kfree(a);
115 }
116
cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)117 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
118 enum dma_data_direction direction)
119 {
120 struct dma_heap_attachment *a = attachment->priv;
121 struct sg_table *table = &a->table;
122 int ret;
123
124 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
125 if (ret)
126 return ERR_PTR(-ENOMEM);
127 a->mapped = true;
128 return table;
129 }
130
cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)131 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
132 struct sg_table *table,
133 enum dma_data_direction direction)
134 {
135 struct dma_heap_attachment *a = attachment->priv;
136
137 a->mapped = false;
138 dma_unmap_sgtable(attachment->dev, table, direction, 0);
139 }
140
cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)141 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
142 enum dma_data_direction direction)
143 {
144 struct cma_heap_buffer *buffer = dmabuf->priv;
145 struct dma_heap_attachment *a;
146
147 mutex_lock(&buffer->lock);
148
149 if (buffer->vmap_cnt)
150 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
151
152 list_for_each_entry(a, &buffer->attachments, list) {
153 if (!a->mapped)
154 continue;
155 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
156 }
157 mutex_unlock(&buffer->lock);
158
159 return 0;
160 }
161
cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)162 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
163 enum dma_data_direction direction)
164 {
165 struct cma_heap_buffer *buffer = dmabuf->priv;
166 struct dma_heap_attachment *a;
167
168 mutex_lock(&buffer->lock);
169
170 if (buffer->vmap_cnt)
171 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
172
173 list_for_each_entry(a, &buffer->attachments, list) {
174 if (!a->mapped)
175 continue;
176 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
177 }
178 mutex_unlock(&buffer->lock);
179
180 return 0;
181 }
182
cma_heap_vm_fault(struct vm_fault * vmf)183 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
184 {
185 struct vm_area_struct *vma = vmf->vma;
186 struct cma_heap_buffer *buffer = vma->vm_private_data;
187
188 if (vmf->pgoff >= buffer->pagecount)
189 return VM_FAULT_SIGBUS;
190
191 return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
192 }
193
194 static const struct vm_operations_struct dma_heap_vm_ops = {
195 .fault = cma_heap_vm_fault,
196 };
197
cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)198 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
199 {
200 struct cma_heap_buffer *buffer = dmabuf->priv;
201
202 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
203 return -EINVAL;
204
205 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
206
207 vma->vm_ops = &dma_heap_vm_ops;
208 vma->vm_private_data = buffer;
209
210 return 0;
211 }
212
cma_heap_do_vmap(struct cma_heap_buffer * buffer)213 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
214 {
215 void *vaddr;
216
217 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
218 if (!vaddr)
219 return ERR_PTR(-ENOMEM);
220
221 return vaddr;
222 }
223
cma_heap_vmap(struct dma_buf * dmabuf,struct iosys_map * map)224 static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
225 {
226 struct cma_heap_buffer *buffer = dmabuf->priv;
227 void *vaddr;
228 int ret = 0;
229
230 mutex_lock(&buffer->lock);
231 if (buffer->vmap_cnt) {
232 buffer->vmap_cnt++;
233 iosys_map_set_vaddr(map, buffer->vaddr);
234 goto out;
235 }
236
237 vaddr = cma_heap_do_vmap(buffer);
238 if (IS_ERR(vaddr)) {
239 ret = PTR_ERR(vaddr);
240 goto out;
241 }
242 buffer->vaddr = vaddr;
243 buffer->vmap_cnt++;
244 iosys_map_set_vaddr(map, buffer->vaddr);
245 out:
246 mutex_unlock(&buffer->lock);
247
248 return ret;
249 }
250
cma_heap_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)251 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
252 {
253 struct cma_heap_buffer *buffer = dmabuf->priv;
254
255 mutex_lock(&buffer->lock);
256 if (!--buffer->vmap_cnt) {
257 vunmap(buffer->vaddr);
258 buffer->vaddr = NULL;
259 }
260 mutex_unlock(&buffer->lock);
261 iosys_map_clear(map);
262 }
263
cma_heap_dma_buf_release(struct dma_buf * dmabuf)264 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
265 {
266 struct cma_heap_buffer *buffer = dmabuf->priv;
267 struct cma_heap *cma_heap = buffer->heap;
268
269 if (buffer->vmap_cnt > 0) {
270 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
271 vunmap(buffer->vaddr);
272 buffer->vaddr = NULL;
273 }
274
275 /* free page list */
276 kfree(buffer->pages);
277 /* release memory */
278 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
279 kfree(buffer);
280 }
281
282 static const struct dma_buf_ops cma_heap_buf_ops = {
283 .attach = cma_heap_attach,
284 .detach = cma_heap_detach,
285 .map_dma_buf = cma_heap_map_dma_buf,
286 .unmap_dma_buf = cma_heap_unmap_dma_buf,
287 .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
288 .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
289 .mmap = cma_heap_mmap,
290 .vmap = cma_heap_vmap,
291 .vunmap = cma_heap_vunmap,
292 .release = cma_heap_dma_buf_release,
293 };
294
cma_heap_allocate(struct dma_heap * heap,unsigned long len,u32 fd_flags,u64 heap_flags)295 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
296 unsigned long len,
297 u32 fd_flags,
298 u64 heap_flags)
299 {
300 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
301 struct cma_heap_buffer *buffer;
302 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
303 size_t size = PAGE_ALIGN(len);
304 pgoff_t pagecount = size >> PAGE_SHIFT;
305 unsigned long align = get_order(size);
306 struct page *cma_pages;
307 struct dma_buf *dmabuf;
308 int ret = -ENOMEM;
309 pgoff_t pg;
310
311 buffer = kzalloc_obj(*buffer);
312 if (!buffer)
313 return ERR_PTR(-ENOMEM);
314
315 INIT_LIST_HEAD(&buffer->attachments);
316 mutex_init(&buffer->lock);
317 buffer->len = size;
318
319 if (align > CONFIG_CMA_ALIGNMENT)
320 align = CONFIG_CMA_ALIGNMENT;
321
322 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
323 if (!cma_pages)
324 goto free_buffer;
325
326 /* Clear the cma pages */
327 if (PageHighMem(cma_pages)) {
328 unsigned long nr_clear_pages = pagecount;
329 struct page *page = cma_pages;
330
331 while (nr_clear_pages > 0) {
332 void *vaddr = kmap_local_page(page);
333
334 clear_page(vaddr);
335 kunmap_local(vaddr);
336 /*
337 * Avoid wasting time zeroing memory if the process
338 * has been killed by SIGKILL.
339 */
340 if (fatal_signal_pending(current))
341 goto free_cma;
342 page++;
343 nr_clear_pages--;
344 }
345 } else {
346 memset(page_address(cma_pages), 0, size);
347 }
348
349 buffer->pages = kmalloc_objs(*buffer->pages, pagecount);
350 if (!buffer->pages) {
351 ret = -ENOMEM;
352 goto free_cma;
353 }
354
355 for (pg = 0; pg < pagecount; pg++)
356 buffer->pages[pg] = &cma_pages[pg];
357
358 buffer->cma_pages = cma_pages;
359 buffer->heap = cma_heap;
360 buffer->pagecount = pagecount;
361
362 /* create the dmabuf */
363 exp_info.exp_name = dma_heap_get_name(heap);
364 exp_info.ops = &cma_heap_buf_ops;
365 exp_info.size = buffer->len;
366 exp_info.flags = fd_flags;
367 exp_info.priv = buffer;
368 dmabuf = dma_buf_export(&exp_info);
369 if (IS_ERR(dmabuf)) {
370 ret = PTR_ERR(dmabuf);
371 goto free_pages;
372 }
373 return dmabuf;
374
375 free_pages:
376 kfree(buffer->pages);
377 free_cma:
378 cma_release(cma_heap->cma, cma_pages, pagecount);
379 free_buffer:
380 kfree(buffer);
381
382 return ERR_PTR(ret);
383 }
384
385 static const struct dma_heap_ops cma_heap_ops = {
386 .allocate = cma_heap_allocate,
387 };
388
__add_cma_heap(struct cma * cma,const char * name)389 static int __init __add_cma_heap(struct cma *cma, const char *name)
390 {
391 struct dma_heap_export_info exp_info;
392 struct cma_heap *cma_heap;
393
394 cma_heap = kzalloc_obj(*cma_heap);
395 if (!cma_heap)
396 return -ENOMEM;
397 cma_heap->cma = cma;
398
399 exp_info.name = name;
400 exp_info.ops = &cma_heap_ops;
401 exp_info.priv = cma_heap;
402
403 cma_heap->heap = dma_heap_add(&exp_info);
404 if (IS_ERR(cma_heap->heap)) {
405 int ret = PTR_ERR(cma_heap->heap);
406
407 kfree(cma_heap);
408 return ret;
409 }
410
411 return 0;
412 }
413
add_cma_heaps(void)414 static int __init add_cma_heaps(void)
415 {
416 struct cma *default_cma = dev_get_cma_area(NULL);
417 unsigned int i;
418 int ret;
419
420 if (default_cma) {
421 ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
422 if (ret)
423 return ret;
424 }
425
426 for (i = 0; i < dma_areas_num; i++) {
427 struct cma *cma = dma_areas[i];
428
429 ret = __add_cma_heap(cma, cma_get_name(cma));
430 if (ret) {
431 pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
432 continue;
433 }
434
435 }
436
437 return 0;
438 }
439 module_init(add_cma_heaps);
440 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
441