1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMABUF System heap exporter
4 *
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019, 2020 Linaro Ltd.
7 *
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23
24 struct system_heap_buffer {
25 struct dma_heap *heap;
26 struct list_head attachments;
27 struct mutex lock;
28 unsigned long len;
29 struct sg_table sg_table;
30 int vmap_cnt;
31 void *vaddr;
32 };
33
34 struct dma_heap_attachment {
35 struct device *dev;
36 struct sg_table table;
37 struct list_head list;
38 bool mapped;
39 };
40
41 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
42 #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
43 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
44 | __GFP_COMP)
45 static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
46 /*
47 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
48 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
49 * of order 0 pages can significantly improve the performance of many IOMMUs
50 * by reducing TLB pressure and time spent updating page tables.
51 */
52 static const unsigned int orders[] = {8, 4, 0};
53 #define NUM_ORDERS ARRAY_SIZE(orders)
54
dup_sg_table(struct sg_table * from,struct sg_table * to)55 static int dup_sg_table(struct sg_table *from, struct sg_table *to)
56 {
57 struct scatterlist *sg, *new_sg;
58 int ret, i;
59
60 ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
61 if (ret)
62 return ret;
63
64 new_sg = to->sgl;
65 for_each_sgtable_sg(from, sg, i) {
66 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
67 new_sg = sg_next(new_sg);
68 }
69
70 return 0;
71 }
72
system_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)73 static int system_heap_attach(struct dma_buf *dmabuf,
74 struct dma_buf_attachment *attachment)
75 {
76 struct system_heap_buffer *buffer = dmabuf->priv;
77 struct dma_heap_attachment *a;
78 int ret;
79
80 a = kzalloc(sizeof(*a), GFP_KERNEL);
81 if (!a)
82 return -ENOMEM;
83
84 ret = dup_sg_table(&buffer->sg_table, &a->table);
85 if (ret) {
86 kfree(a);
87 return ret;
88 }
89
90 a->dev = attachment->dev;
91 INIT_LIST_HEAD(&a->list);
92 a->mapped = false;
93
94 attachment->priv = a;
95
96 mutex_lock(&buffer->lock);
97 list_add(&a->list, &buffer->attachments);
98 mutex_unlock(&buffer->lock);
99
100 return 0;
101 }
102
system_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)103 static void system_heap_detach(struct dma_buf *dmabuf,
104 struct dma_buf_attachment *attachment)
105 {
106 struct system_heap_buffer *buffer = dmabuf->priv;
107 struct dma_heap_attachment *a = attachment->priv;
108
109 mutex_lock(&buffer->lock);
110 list_del(&a->list);
111 mutex_unlock(&buffer->lock);
112
113 sg_free_table(&a->table);
114 kfree(a);
115 }
116
system_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)117 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
118 enum dma_data_direction direction)
119 {
120 struct dma_heap_attachment *a = attachment->priv;
121 struct sg_table *table = &a->table;
122 int ret;
123
124 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
125 if (ret)
126 return ERR_PTR(ret);
127
128 a->mapped = true;
129 return table;
130 }
131
system_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)132 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
133 struct sg_table *table,
134 enum dma_data_direction direction)
135 {
136 struct dma_heap_attachment *a = attachment->priv;
137
138 a->mapped = false;
139 dma_unmap_sgtable(attachment->dev, table, direction, 0);
140 }
141
system_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)142 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
143 enum dma_data_direction direction)
144 {
145 struct system_heap_buffer *buffer = dmabuf->priv;
146 struct dma_heap_attachment *a;
147
148 mutex_lock(&buffer->lock);
149
150 if (buffer->vmap_cnt)
151 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
152
153 list_for_each_entry(a, &buffer->attachments, list) {
154 if (!a->mapped)
155 continue;
156 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
157 }
158 mutex_unlock(&buffer->lock);
159
160 return 0;
161 }
162
system_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)163 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
164 enum dma_data_direction direction)
165 {
166 struct system_heap_buffer *buffer = dmabuf->priv;
167 struct dma_heap_attachment *a;
168
169 mutex_lock(&buffer->lock);
170
171 if (buffer->vmap_cnt)
172 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
173
174 list_for_each_entry(a, &buffer->attachments, list) {
175 if (!a->mapped)
176 continue;
177 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
178 }
179 mutex_unlock(&buffer->lock);
180
181 return 0;
182 }
183
system_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)184 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
185 {
186 struct system_heap_buffer *buffer = dmabuf->priv;
187 struct sg_table *table = &buffer->sg_table;
188 unsigned long addr = vma->vm_start;
189 unsigned long pgoff = vma->vm_pgoff;
190 struct scatterlist *sg;
191 int i, ret;
192
193 for_each_sgtable_sg(table, sg, i) {
194 unsigned long n = sg->length >> PAGE_SHIFT;
195
196 if (pgoff < n)
197 break;
198 pgoff -= n;
199 }
200
201 for (; sg && addr < vma->vm_end; sg = sg_next(sg)) {
202 unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff;
203 struct page *page = sg_page(sg) + pgoff;
204 unsigned long size = n << PAGE_SHIFT;
205
206 if (addr + size > vma->vm_end)
207 size = vma->vm_end - addr;
208
209 ret = remap_pfn_range(vma, addr, page_to_pfn(page),
210 size, vma->vm_page_prot);
211 if (ret)
212 return ret;
213
214 addr += size;
215 pgoff = 0;
216 }
217
218 return 0;
219 }
220
system_heap_do_vmap(struct system_heap_buffer * buffer)221 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
222 {
223 struct sg_table *table = &buffer->sg_table;
224 int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
225 struct page **pages = vmalloc(sizeof(struct page *) * npages);
226 struct page **tmp = pages;
227 struct sg_page_iter piter;
228 void *vaddr;
229
230 if (!pages)
231 return ERR_PTR(-ENOMEM);
232
233 for_each_sgtable_page(table, &piter, 0) {
234 WARN_ON(tmp - pages >= npages);
235 *tmp++ = sg_page_iter_page(&piter);
236 }
237
238 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
239 vfree(pages);
240
241 if (!vaddr)
242 return ERR_PTR(-ENOMEM);
243
244 return vaddr;
245 }
246
system_heap_vmap(struct dma_buf * dmabuf,struct iosys_map * map)247 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
248 {
249 struct system_heap_buffer *buffer = dmabuf->priv;
250 void *vaddr;
251 int ret = 0;
252
253 mutex_lock(&buffer->lock);
254 if (buffer->vmap_cnt) {
255 buffer->vmap_cnt++;
256 iosys_map_set_vaddr(map, buffer->vaddr);
257 goto out;
258 }
259
260 vaddr = system_heap_do_vmap(buffer);
261 if (IS_ERR(vaddr)) {
262 ret = PTR_ERR(vaddr);
263 goto out;
264 }
265
266 buffer->vaddr = vaddr;
267 buffer->vmap_cnt++;
268 iosys_map_set_vaddr(map, buffer->vaddr);
269 out:
270 mutex_unlock(&buffer->lock);
271
272 return ret;
273 }
274
system_heap_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)275 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
276 {
277 struct system_heap_buffer *buffer = dmabuf->priv;
278
279 mutex_lock(&buffer->lock);
280 if (!--buffer->vmap_cnt) {
281 vunmap(buffer->vaddr);
282 buffer->vaddr = NULL;
283 }
284 mutex_unlock(&buffer->lock);
285 iosys_map_clear(map);
286 }
287
system_heap_dma_buf_release(struct dma_buf * dmabuf)288 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
289 {
290 struct system_heap_buffer *buffer = dmabuf->priv;
291 struct sg_table *table;
292 struct scatterlist *sg;
293 int i;
294
295 table = &buffer->sg_table;
296 for_each_sgtable_sg(table, sg, i) {
297 struct page *page = sg_page(sg);
298
299 __free_pages(page, compound_order(page));
300 }
301 sg_free_table(table);
302 kfree(buffer);
303 }
304
305 static const struct dma_buf_ops system_heap_buf_ops = {
306 .attach = system_heap_attach,
307 .detach = system_heap_detach,
308 .map_dma_buf = system_heap_map_dma_buf,
309 .unmap_dma_buf = system_heap_unmap_dma_buf,
310 .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
311 .end_cpu_access = system_heap_dma_buf_end_cpu_access,
312 .mmap = system_heap_mmap,
313 .vmap = system_heap_vmap,
314 .vunmap = system_heap_vunmap,
315 .release = system_heap_dma_buf_release,
316 };
317
alloc_largest_available(unsigned long size,unsigned int max_order)318 static struct page *alloc_largest_available(unsigned long size,
319 unsigned int max_order)
320 {
321 struct page *page;
322 int i;
323
324 for (i = 0; i < NUM_ORDERS; i++) {
325 if (size < (PAGE_SIZE << orders[i]))
326 continue;
327 if (max_order < orders[i])
328 continue;
329
330 page = alloc_pages(order_flags[i], orders[i]);
331 if (!page)
332 continue;
333 return page;
334 }
335 return NULL;
336 }
337
system_heap_allocate(struct dma_heap * heap,unsigned long len,u32 fd_flags,u64 heap_flags)338 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
339 unsigned long len,
340 u32 fd_flags,
341 u64 heap_flags)
342 {
343 struct system_heap_buffer *buffer;
344 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
345 unsigned long size_remaining = len;
346 unsigned int max_order = orders[0];
347 struct dma_buf *dmabuf;
348 struct sg_table *table;
349 struct scatterlist *sg;
350 struct list_head pages;
351 struct page *page, *tmp_page;
352 int i, ret = -ENOMEM;
353
354 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
355 if (!buffer)
356 return ERR_PTR(-ENOMEM);
357
358 INIT_LIST_HEAD(&buffer->attachments);
359 mutex_init(&buffer->lock);
360 buffer->heap = heap;
361 buffer->len = len;
362
363 INIT_LIST_HEAD(&pages);
364 i = 0;
365 while (size_remaining > 0) {
366 /*
367 * Avoid trying to allocate memory if the process
368 * has been killed by SIGKILL
369 */
370 if (fatal_signal_pending(current)) {
371 ret = -EINTR;
372 goto free_buffer;
373 }
374
375 page = alloc_largest_available(size_remaining, max_order);
376 if (!page)
377 goto free_buffer;
378
379 list_add_tail(&page->lru, &pages);
380 size_remaining -= page_size(page);
381 max_order = compound_order(page);
382 i++;
383 }
384
385 table = &buffer->sg_table;
386 if (sg_alloc_table(table, i, GFP_KERNEL))
387 goto free_buffer;
388
389 sg = table->sgl;
390 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
391 sg_set_page(sg, page, page_size(page), 0);
392 sg = sg_next(sg);
393 list_del(&page->lru);
394 }
395
396 /* create the dmabuf */
397 exp_info.exp_name = dma_heap_get_name(heap);
398 exp_info.ops = &system_heap_buf_ops;
399 exp_info.size = buffer->len;
400 exp_info.flags = fd_flags;
401 exp_info.priv = buffer;
402 dmabuf = dma_buf_export(&exp_info);
403 if (IS_ERR(dmabuf)) {
404 ret = PTR_ERR(dmabuf);
405 goto free_pages;
406 }
407 return dmabuf;
408
409 free_pages:
410 for_each_sgtable_sg(table, sg, i) {
411 struct page *p = sg_page(sg);
412
413 __free_pages(p, compound_order(p));
414 }
415 sg_free_table(table);
416 free_buffer:
417 list_for_each_entry_safe(page, tmp_page, &pages, lru)
418 __free_pages(page, compound_order(page));
419 kfree(buffer);
420
421 return ERR_PTR(ret);
422 }
423
424 static const struct dma_heap_ops system_heap_ops = {
425 .allocate = system_heap_allocate,
426 };
427
system_heap_create(void)428 static int __init system_heap_create(void)
429 {
430 struct dma_heap_export_info exp_info;
431 struct dma_heap *sys_heap;
432
433 exp_info.name = "system";
434 exp_info.ops = &system_heap_ops;
435 exp_info.priv = NULL;
436
437 sys_heap = dma_heap_add(&exp_info);
438 if (IS_ERR(sys_heap))
439 return PTR_ERR(sys_heap);
440
441 return 0;
442 }
443 module_init(system_heap_create);
444