xref: /linux/drivers/dma-buf/heaps/system_heap.c (revision 25489a4f556414445d342951615178368ee45cde)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF System heap exporter
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (C) 2019, 2020 Linaro Ltd.
7  *
8  * Portions based off of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *	Andrew F. Davis <afd@ti.com>
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 
24 struct system_heap_buffer {
25 	struct dma_heap *heap;
26 	struct list_head attachments;
27 	struct mutex lock;
28 	unsigned long len;
29 	struct sg_table sg_table;
30 	int vmap_cnt;
31 	void *vaddr;
32 };
33 
34 struct dma_heap_attachment {
35 	struct device *dev;
36 	struct sg_table *table;
37 	struct list_head list;
38 	bool mapped;
39 };
40 
41 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
42 #define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
43 				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
44 				| __GFP_COMP)
45 static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
46 /*
47  * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
48  * to match with the sizes often found in IOMMUs. Using order 4 pages instead
49  * of order 0 pages can significantly improve the performance of many IOMMUs
50  * by reducing TLB pressure and time spent updating page tables.
51  */
52 static const unsigned int orders[] = {8, 4, 0};
53 #define NUM_ORDERS ARRAY_SIZE(orders)
54 
55 static struct sg_table *dup_sg_table(struct sg_table *table)
56 {
57 	struct sg_table *new_table;
58 	int ret, i;
59 	struct scatterlist *sg, *new_sg;
60 
61 	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
62 	if (!new_table)
63 		return ERR_PTR(-ENOMEM);
64 
65 	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
66 	if (ret) {
67 		kfree(new_table);
68 		return ERR_PTR(-ENOMEM);
69 	}
70 
71 	new_sg = new_table->sgl;
72 	for_each_sgtable_sg(table, sg, i) {
73 		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
74 		new_sg = sg_next(new_sg);
75 	}
76 
77 	return new_table;
78 }
79 
80 static int system_heap_attach(struct dma_buf *dmabuf,
81 			      struct dma_buf_attachment *attachment)
82 {
83 	struct system_heap_buffer *buffer = dmabuf->priv;
84 	struct dma_heap_attachment *a;
85 	struct sg_table *table;
86 
87 	a = kzalloc(sizeof(*a), GFP_KERNEL);
88 	if (!a)
89 		return -ENOMEM;
90 
91 	table = dup_sg_table(&buffer->sg_table);
92 	if (IS_ERR(table)) {
93 		kfree(a);
94 		return -ENOMEM;
95 	}
96 
97 	a->table = table;
98 	a->dev = attachment->dev;
99 	INIT_LIST_HEAD(&a->list);
100 	a->mapped = false;
101 
102 	attachment->priv = a;
103 
104 	mutex_lock(&buffer->lock);
105 	list_add(&a->list, &buffer->attachments);
106 	mutex_unlock(&buffer->lock);
107 
108 	return 0;
109 }
110 
111 static void system_heap_detach(struct dma_buf *dmabuf,
112 			       struct dma_buf_attachment *attachment)
113 {
114 	struct system_heap_buffer *buffer = dmabuf->priv;
115 	struct dma_heap_attachment *a = attachment->priv;
116 
117 	mutex_lock(&buffer->lock);
118 	list_del(&a->list);
119 	mutex_unlock(&buffer->lock);
120 
121 	sg_free_table(a->table);
122 	kfree(a->table);
123 	kfree(a);
124 }
125 
126 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
127 						enum dma_data_direction direction)
128 {
129 	struct dma_heap_attachment *a = attachment->priv;
130 	struct sg_table *table = a->table;
131 	int ret;
132 
133 	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
134 	if (ret)
135 		return ERR_PTR(ret);
136 
137 	a->mapped = true;
138 	return table;
139 }
140 
141 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
142 				      struct sg_table *table,
143 				      enum dma_data_direction direction)
144 {
145 	struct dma_heap_attachment *a = attachment->priv;
146 
147 	a->mapped = false;
148 	dma_unmap_sgtable(attachment->dev, table, direction, 0);
149 }
150 
151 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
152 						enum dma_data_direction direction)
153 {
154 	struct system_heap_buffer *buffer = dmabuf->priv;
155 	struct dma_heap_attachment *a;
156 
157 	mutex_lock(&buffer->lock);
158 
159 	if (buffer->vmap_cnt)
160 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
161 
162 	list_for_each_entry(a, &buffer->attachments, list) {
163 		if (!a->mapped)
164 			continue;
165 		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
166 	}
167 	mutex_unlock(&buffer->lock);
168 
169 	return 0;
170 }
171 
172 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
173 					      enum dma_data_direction direction)
174 {
175 	struct system_heap_buffer *buffer = dmabuf->priv;
176 	struct dma_heap_attachment *a;
177 
178 	mutex_lock(&buffer->lock);
179 
180 	if (buffer->vmap_cnt)
181 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
182 
183 	list_for_each_entry(a, &buffer->attachments, list) {
184 		if (!a->mapped)
185 			continue;
186 		dma_sync_sgtable_for_device(a->dev, a->table, direction);
187 	}
188 	mutex_unlock(&buffer->lock);
189 
190 	return 0;
191 }
192 
193 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
194 {
195 	struct system_heap_buffer *buffer = dmabuf->priv;
196 	struct sg_table *table = &buffer->sg_table;
197 	unsigned long addr = vma->vm_start;
198 	struct sg_page_iter piter;
199 	int ret;
200 
201 	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
202 		struct page *page = sg_page_iter_page(&piter);
203 
204 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
205 				      vma->vm_page_prot);
206 		if (ret)
207 			return ret;
208 		addr += PAGE_SIZE;
209 		if (addr >= vma->vm_end)
210 			return 0;
211 	}
212 	return 0;
213 }
214 
215 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
216 {
217 	struct sg_table *table = &buffer->sg_table;
218 	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
219 	struct page **pages = vmalloc(sizeof(struct page *) * npages);
220 	struct page **tmp = pages;
221 	struct sg_page_iter piter;
222 	void *vaddr;
223 
224 	if (!pages)
225 		return ERR_PTR(-ENOMEM);
226 
227 	for_each_sgtable_page(table, &piter, 0) {
228 		WARN_ON(tmp - pages >= npages);
229 		*tmp++ = sg_page_iter_page(&piter);
230 	}
231 
232 	vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
233 	vfree(pages);
234 
235 	if (!vaddr)
236 		return ERR_PTR(-ENOMEM);
237 
238 	return vaddr;
239 }
240 
241 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
242 {
243 	struct system_heap_buffer *buffer = dmabuf->priv;
244 	void *vaddr;
245 	int ret = 0;
246 
247 	mutex_lock(&buffer->lock);
248 	if (buffer->vmap_cnt) {
249 		buffer->vmap_cnt++;
250 		iosys_map_set_vaddr(map, buffer->vaddr);
251 		goto out;
252 	}
253 
254 	vaddr = system_heap_do_vmap(buffer);
255 	if (IS_ERR(vaddr)) {
256 		ret = PTR_ERR(vaddr);
257 		goto out;
258 	}
259 
260 	buffer->vaddr = vaddr;
261 	buffer->vmap_cnt++;
262 	iosys_map_set_vaddr(map, buffer->vaddr);
263 out:
264 	mutex_unlock(&buffer->lock);
265 
266 	return ret;
267 }
268 
269 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
270 {
271 	struct system_heap_buffer *buffer = dmabuf->priv;
272 
273 	mutex_lock(&buffer->lock);
274 	if (!--buffer->vmap_cnt) {
275 		vunmap(buffer->vaddr);
276 		buffer->vaddr = NULL;
277 	}
278 	mutex_unlock(&buffer->lock);
279 	iosys_map_clear(map);
280 }
281 
282 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
283 {
284 	struct system_heap_buffer *buffer = dmabuf->priv;
285 	struct sg_table *table;
286 	struct scatterlist *sg;
287 	int i;
288 
289 	table = &buffer->sg_table;
290 	for_each_sgtable_sg(table, sg, i) {
291 		struct page *page = sg_page(sg);
292 
293 		__free_pages(page, compound_order(page));
294 	}
295 	sg_free_table(table);
296 	kfree(buffer);
297 }
298 
299 static const struct dma_buf_ops system_heap_buf_ops = {
300 	.attach = system_heap_attach,
301 	.detach = system_heap_detach,
302 	.map_dma_buf = system_heap_map_dma_buf,
303 	.unmap_dma_buf = system_heap_unmap_dma_buf,
304 	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
305 	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
306 	.mmap = system_heap_mmap,
307 	.vmap = system_heap_vmap,
308 	.vunmap = system_heap_vunmap,
309 	.release = system_heap_dma_buf_release,
310 };
311 
312 static struct page *alloc_largest_available(unsigned long size,
313 					    unsigned int max_order)
314 {
315 	struct page *page;
316 	int i;
317 
318 	for (i = 0; i < NUM_ORDERS; i++) {
319 		if (size <  (PAGE_SIZE << orders[i]))
320 			continue;
321 		if (max_order < orders[i])
322 			continue;
323 
324 		page = alloc_pages(order_flags[i], orders[i]);
325 		if (!page)
326 			continue;
327 		return page;
328 	}
329 	return NULL;
330 }
331 
332 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
333 					    unsigned long len,
334 					    u32 fd_flags,
335 					    u64 heap_flags)
336 {
337 	struct system_heap_buffer *buffer;
338 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
339 	unsigned long size_remaining = len;
340 	unsigned int max_order = orders[0];
341 	struct dma_buf *dmabuf;
342 	struct sg_table *table;
343 	struct scatterlist *sg;
344 	struct list_head pages;
345 	struct page *page, *tmp_page;
346 	int i, ret = -ENOMEM;
347 
348 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
349 	if (!buffer)
350 		return ERR_PTR(-ENOMEM);
351 
352 	INIT_LIST_HEAD(&buffer->attachments);
353 	mutex_init(&buffer->lock);
354 	buffer->heap = heap;
355 	buffer->len = len;
356 
357 	INIT_LIST_HEAD(&pages);
358 	i = 0;
359 	while (size_remaining > 0) {
360 		/*
361 		 * Avoid trying to allocate memory if the process
362 		 * has been killed by SIGKILL
363 		 */
364 		if (fatal_signal_pending(current)) {
365 			ret = -EINTR;
366 			goto free_buffer;
367 		}
368 
369 		page = alloc_largest_available(size_remaining, max_order);
370 		if (!page)
371 			goto free_buffer;
372 
373 		list_add_tail(&page->lru, &pages);
374 		size_remaining -= page_size(page);
375 		max_order = compound_order(page);
376 		i++;
377 	}
378 
379 	table = &buffer->sg_table;
380 	if (sg_alloc_table(table, i, GFP_KERNEL))
381 		goto free_buffer;
382 
383 	sg = table->sgl;
384 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
385 		sg_set_page(sg, page, page_size(page), 0);
386 		sg = sg_next(sg);
387 		list_del(&page->lru);
388 	}
389 
390 	/* create the dmabuf */
391 	exp_info.exp_name = dma_heap_get_name(heap);
392 	exp_info.ops = &system_heap_buf_ops;
393 	exp_info.size = buffer->len;
394 	exp_info.flags = fd_flags;
395 	exp_info.priv = buffer;
396 	dmabuf = dma_buf_export(&exp_info);
397 	if (IS_ERR(dmabuf)) {
398 		ret = PTR_ERR(dmabuf);
399 		goto free_pages;
400 	}
401 	return dmabuf;
402 
403 free_pages:
404 	for_each_sgtable_sg(table, sg, i) {
405 		struct page *p = sg_page(sg);
406 
407 		__free_pages(p, compound_order(p));
408 	}
409 	sg_free_table(table);
410 free_buffer:
411 	list_for_each_entry_safe(page, tmp_page, &pages, lru)
412 		__free_pages(page, compound_order(page));
413 	kfree(buffer);
414 
415 	return ERR_PTR(ret);
416 }
417 
418 static const struct dma_heap_ops system_heap_ops = {
419 	.allocate = system_heap_allocate,
420 };
421 
422 static int __init system_heap_create(void)
423 {
424 	struct dma_heap_export_info exp_info;
425 	struct dma_heap *sys_heap;
426 
427 	exp_info.name = "system";
428 	exp_info.ops = &system_heap_ops;
429 	exp_info.priv = NULL;
430 
431 	sys_heap = dma_heap_add(&exp_info);
432 	if (IS_ERR(sys_heap))
433 		return PTR_ERR(sys_heap);
434 
435 	return 0;
436 }
437 module_init(system_heap_create);
438