xref: /linux/drivers/dma-buf/heaps/cma_heap.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF CMA heap exporter
4  *
5  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7  *
8  * Also utilizing parts of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *	Andrew F. Davis <afd@ti.com>
11  */
12 
13 #define pr_fmt(fmt) "cma_heap: " fmt
14 
15 #include <linux/cma.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-heap.h>
18 #include <linux/dma-map-ops.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/of_reserved_mem.h>
26 #include <linux/scatterlist.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 
30 #define DEFAULT_CMA_NAME "default_cma_region"
31 
32 struct cma_heap {
33 	struct dma_heap *heap;
34 	struct cma *cma;
35 };
36 
37 struct cma_heap_buffer {
38 	struct cma_heap *heap;
39 	struct list_head attachments;
40 	struct mutex lock;
41 	unsigned long len;
42 	struct page *cma_pages;
43 	struct page **pages;
44 	pgoff_t pagecount;
45 	int vmap_cnt;
46 	void *vaddr;
47 };
48 
49 struct dma_heap_attachment {
50 	struct device *dev;
51 	struct sg_table table;
52 	struct list_head list;
53 	bool mapped;
54 };
55 
56 static int cma_heap_attach(struct dma_buf *dmabuf,
57 			   struct dma_buf_attachment *attachment)
58 {
59 	struct cma_heap_buffer *buffer = dmabuf->priv;
60 	struct dma_heap_attachment *a;
61 	int ret;
62 
63 	a = kzalloc_obj(*a);
64 	if (!a)
65 		return -ENOMEM;
66 
67 	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
68 					buffer->pagecount, 0,
69 					buffer->pagecount << PAGE_SHIFT,
70 					GFP_KERNEL);
71 	if (ret) {
72 		kfree(a);
73 		return ret;
74 	}
75 
76 	a->dev = attachment->dev;
77 	INIT_LIST_HEAD(&a->list);
78 	a->mapped = false;
79 
80 	attachment->priv = a;
81 
82 	mutex_lock(&buffer->lock);
83 	list_add(&a->list, &buffer->attachments);
84 	mutex_unlock(&buffer->lock);
85 
86 	return 0;
87 }
88 
89 static void cma_heap_detach(struct dma_buf *dmabuf,
90 			    struct dma_buf_attachment *attachment)
91 {
92 	struct cma_heap_buffer *buffer = dmabuf->priv;
93 	struct dma_heap_attachment *a = attachment->priv;
94 
95 	mutex_lock(&buffer->lock);
96 	list_del(&a->list);
97 	mutex_unlock(&buffer->lock);
98 
99 	sg_free_table(&a->table);
100 	kfree(a);
101 }
102 
103 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
104 					     enum dma_data_direction direction)
105 {
106 	struct dma_heap_attachment *a = attachment->priv;
107 	struct sg_table *table = &a->table;
108 	int ret;
109 
110 	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
111 	if (ret)
112 		return ERR_PTR(-ENOMEM);
113 	a->mapped = true;
114 	return table;
115 }
116 
117 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
118 				   struct sg_table *table,
119 				   enum dma_data_direction direction)
120 {
121 	struct dma_heap_attachment *a = attachment->priv;
122 
123 	a->mapped = false;
124 	dma_unmap_sgtable(attachment->dev, table, direction, 0);
125 }
126 
127 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
128 					     enum dma_data_direction direction)
129 {
130 	struct cma_heap_buffer *buffer = dmabuf->priv;
131 	struct dma_heap_attachment *a;
132 
133 	mutex_lock(&buffer->lock);
134 
135 	if (buffer->vmap_cnt)
136 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
137 
138 	list_for_each_entry(a, &buffer->attachments, list) {
139 		if (!a->mapped)
140 			continue;
141 		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
142 	}
143 	mutex_unlock(&buffer->lock);
144 
145 	return 0;
146 }
147 
148 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
149 					   enum dma_data_direction direction)
150 {
151 	struct cma_heap_buffer *buffer = dmabuf->priv;
152 	struct dma_heap_attachment *a;
153 
154 	mutex_lock(&buffer->lock);
155 
156 	if (buffer->vmap_cnt)
157 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
158 
159 	list_for_each_entry(a, &buffer->attachments, list) {
160 		if (!a->mapped)
161 			continue;
162 		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
163 	}
164 	mutex_unlock(&buffer->lock);
165 
166 	return 0;
167 }
168 
169 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
170 {
171 	struct vm_area_struct *vma = vmf->vma;
172 	struct cma_heap_buffer *buffer = vma->vm_private_data;
173 
174 	if (vmf->pgoff >= buffer->pagecount)
175 		return VM_FAULT_SIGBUS;
176 
177 	return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
178 }
179 
180 static const struct vm_operations_struct dma_heap_vm_ops = {
181 	.fault = cma_heap_vm_fault,
182 };
183 
184 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
185 {
186 	struct cma_heap_buffer *buffer = dmabuf->priv;
187 
188 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
189 		return -EINVAL;
190 
191 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
192 
193 	vma->vm_ops = &dma_heap_vm_ops;
194 	vma->vm_private_data = buffer;
195 
196 	return 0;
197 }
198 
199 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
200 {
201 	void *vaddr;
202 
203 	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
204 	if (!vaddr)
205 		return ERR_PTR(-ENOMEM);
206 
207 	return vaddr;
208 }
209 
210 static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
211 {
212 	struct cma_heap_buffer *buffer = dmabuf->priv;
213 	void *vaddr;
214 	int ret = 0;
215 
216 	mutex_lock(&buffer->lock);
217 	if (buffer->vmap_cnt) {
218 		buffer->vmap_cnt++;
219 		iosys_map_set_vaddr(map, buffer->vaddr);
220 		goto out;
221 	}
222 
223 	vaddr = cma_heap_do_vmap(buffer);
224 	if (IS_ERR(vaddr)) {
225 		ret = PTR_ERR(vaddr);
226 		goto out;
227 	}
228 	buffer->vaddr = vaddr;
229 	buffer->vmap_cnt++;
230 	iosys_map_set_vaddr(map, buffer->vaddr);
231 out:
232 	mutex_unlock(&buffer->lock);
233 
234 	return ret;
235 }
236 
237 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
238 {
239 	struct cma_heap_buffer *buffer = dmabuf->priv;
240 
241 	mutex_lock(&buffer->lock);
242 	if (!--buffer->vmap_cnt) {
243 		vunmap(buffer->vaddr);
244 		buffer->vaddr = NULL;
245 	}
246 	mutex_unlock(&buffer->lock);
247 	iosys_map_clear(map);
248 }
249 
250 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
251 {
252 	struct cma_heap_buffer *buffer = dmabuf->priv;
253 	struct cma_heap *cma_heap = buffer->heap;
254 
255 	if (buffer->vmap_cnt > 0) {
256 		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
257 		vunmap(buffer->vaddr);
258 		buffer->vaddr = NULL;
259 	}
260 
261 	/* free page list */
262 	kfree(buffer->pages);
263 	/* release memory */
264 	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
265 	kfree(buffer);
266 }
267 
268 static const struct dma_buf_ops cma_heap_buf_ops = {
269 	.attach = cma_heap_attach,
270 	.detach = cma_heap_detach,
271 	.map_dma_buf = cma_heap_map_dma_buf,
272 	.unmap_dma_buf = cma_heap_unmap_dma_buf,
273 	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
274 	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
275 	.mmap = cma_heap_mmap,
276 	.vmap = cma_heap_vmap,
277 	.vunmap = cma_heap_vunmap,
278 	.release = cma_heap_dma_buf_release,
279 };
280 
281 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
282 					 unsigned long len,
283 					 u32 fd_flags,
284 					 u64 heap_flags)
285 {
286 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
287 	struct cma_heap_buffer *buffer;
288 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
289 	size_t size = PAGE_ALIGN(len);
290 	pgoff_t pagecount = size >> PAGE_SHIFT;
291 	unsigned long align = get_order(size);
292 	struct page *cma_pages;
293 	struct dma_buf *dmabuf;
294 	int ret = -ENOMEM;
295 	pgoff_t pg;
296 
297 	buffer = kzalloc_obj(*buffer);
298 	if (!buffer)
299 		return ERR_PTR(-ENOMEM);
300 
301 	INIT_LIST_HEAD(&buffer->attachments);
302 	mutex_init(&buffer->lock);
303 	buffer->len = size;
304 
305 	if (align > CONFIG_CMA_ALIGNMENT)
306 		align = CONFIG_CMA_ALIGNMENT;
307 
308 	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
309 	if (!cma_pages)
310 		goto free_buffer;
311 
312 	/* Clear the cma pages */
313 	if (PageHighMem(cma_pages)) {
314 		unsigned long nr_clear_pages = pagecount;
315 		struct page *page = cma_pages;
316 
317 		while (nr_clear_pages > 0) {
318 			clear_highpage(page);
319 			/*
320 			 * Avoid wasting time zeroing memory if the process
321 			 * has been killed by SIGKILL.
322 			 */
323 			if (fatal_signal_pending(current))
324 				goto free_cma;
325 			page++;
326 			nr_clear_pages--;
327 		}
328 	} else {
329 		clear_pages(page_address(cma_pages), pagecount);
330 	}
331 
332 	buffer->pages = kmalloc_objs(*buffer->pages, pagecount);
333 	if (!buffer->pages) {
334 		ret = -ENOMEM;
335 		goto free_cma;
336 	}
337 
338 	for (pg = 0; pg < pagecount; pg++)
339 		buffer->pages[pg] = &cma_pages[pg];
340 
341 	buffer->cma_pages = cma_pages;
342 	buffer->heap = cma_heap;
343 	buffer->pagecount = pagecount;
344 
345 	/* create the dmabuf */
346 	exp_info.exp_name = dma_heap_get_name(heap);
347 	exp_info.ops = &cma_heap_buf_ops;
348 	exp_info.size = buffer->len;
349 	exp_info.flags = fd_flags;
350 	exp_info.priv = buffer;
351 	dmabuf = dma_buf_export(&exp_info);
352 	if (IS_ERR(dmabuf)) {
353 		ret = PTR_ERR(dmabuf);
354 		goto free_pages;
355 	}
356 	return dmabuf;
357 
358 free_pages:
359 	kfree(buffer->pages);
360 free_cma:
361 	cma_release(cma_heap->cma, cma_pages, pagecount);
362 free_buffer:
363 	kfree(buffer);
364 
365 	return ERR_PTR(ret);
366 }
367 
368 static const struct dma_heap_ops cma_heap_ops = {
369 	.allocate = cma_heap_allocate,
370 };
371 
372 static int __init __add_cma_heap(struct cma *cma, const char *name)
373 {
374 	struct dma_heap_export_info exp_info;
375 	struct cma_heap *cma_heap;
376 
377 	cma_heap = kzalloc_obj(*cma_heap);
378 	if (!cma_heap)
379 		return -ENOMEM;
380 	cma_heap->cma = cma;
381 
382 	exp_info.name = name;
383 	exp_info.ops = &cma_heap_ops;
384 	exp_info.priv = cma_heap;
385 
386 	cma_heap->heap = dma_heap_add(&exp_info);
387 	if (IS_ERR(cma_heap->heap)) {
388 		int ret = PTR_ERR(cma_heap->heap);
389 
390 		kfree(cma_heap);
391 		return ret;
392 	}
393 
394 	return 0;
395 }
396 
397 static int __init add_cma_heaps(void)
398 {
399 	struct cma *default_cma = dev_get_cma_area(NULL);
400 	struct cma *cma;
401 	unsigned int i;
402 	int ret;
403 
404 	if (default_cma) {
405 		ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
406 		if (ret)
407 			return ret;
408 	}
409 
410 	for (i = 0; (cma = dma_contiguous_get_area_by_idx(i)) != NULL; i++) {
411 		ret = __add_cma_heap(cma, cma_get_name(cma));
412 		if (ret) {
413 			pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
414 			continue;
415 		}
416 
417 	}
418 
419 	return 0;
420 }
421 module_init(add_cma_heaps);
422 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
423