xref: /linux/drivers/dma-buf/heaps/system_heap.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF System heap exporter
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (C) 2019, 2020 Linaro Ltd.
7  *
8  * Portions based off of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *	Andrew F. Davis <afd@ti.com>
11  */
12 
13 #include <linux/cc_platform.h>
14 #include <linux/dma-buf.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-heap.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/mem_encrypt.h>
20 #include <linux/mm.h>
21 #include <linux/set_memory.h>
22 #include <linux/module.h>
23 #include <linux/pgtable.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 
28 struct system_heap_priv {
29 	bool cc_shared;
30 };
31 
32 struct system_heap_buffer {
33 	struct dma_heap *heap;
34 	struct list_head attachments;
35 	struct mutex lock;
36 	unsigned long len;
37 	struct sg_table sg_table;
38 	int vmap_cnt;
39 	void *vaddr;
40 	bool cc_shared;
41 };
42 
43 struct dma_heap_attachment {
44 	struct device *dev;
45 	struct sg_table table;
46 	struct list_head list;
47 	bool mapped;
48 	bool cc_shared;
49 };
50 
51 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
52 #define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
53 				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
54 				| __GFP_COMP)
55 static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
56 /*
57  * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
58  * to match with the sizes often found in IOMMUs. Using order 4 pages instead
59  * of order 0 pages can significantly improve the performance of many IOMMUs
60  * by reducing TLB pressure and time spent updating page tables.
61  */
62 static const unsigned int orders[] = {8, 4, 0};
63 #define NUM_ORDERS ARRAY_SIZE(orders)
64 
65 static int system_heap_set_page_decrypted(struct page *page)
66 {
67 	unsigned long addr = (unsigned long)page_address(page);
68 	unsigned int nr_pages = 1 << compound_order(page);
69 	int ret;
70 
71 	ret = set_memory_decrypted(addr, nr_pages);
72 	if (ret)
73 		pr_warn_ratelimited("dma-buf system heap: failed to decrypt page at %p\n",
74 				    page_address(page));
75 
76 	return ret;
77 }
78 
79 static int system_heap_set_page_encrypted(struct page *page)
80 {
81 	unsigned long addr = (unsigned long)page_address(page);
82 	unsigned int nr_pages = 1 << compound_order(page);
83 	int ret;
84 
85 	ret = set_memory_encrypted(addr, nr_pages);
86 	if (ret)
87 		pr_warn_ratelimited("dma-buf system heap: failed to re-encrypt page at %p, leaking memory\n",
88 				    page_address(page));
89 
90 	return ret;
91 }
92 
93 static int dup_sg_table(struct sg_table *from, struct sg_table *to)
94 {
95 	struct scatterlist *sg, *new_sg;
96 	int ret, i;
97 
98 	ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
99 	if (ret)
100 		return ret;
101 
102 	new_sg = to->sgl;
103 	for_each_sgtable_sg(from, sg, i) {
104 		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
105 		new_sg = sg_next(new_sg);
106 	}
107 
108 	return 0;
109 }
110 
111 static int system_heap_attach(struct dma_buf *dmabuf,
112 			      struct dma_buf_attachment *attachment)
113 {
114 	struct system_heap_buffer *buffer = dmabuf->priv;
115 	struct dma_heap_attachment *a;
116 	int ret;
117 
118 	a = kzalloc_obj(*a);
119 	if (!a)
120 		return -ENOMEM;
121 
122 	ret = dup_sg_table(&buffer->sg_table, &a->table);
123 	if (ret) {
124 		kfree(a);
125 		return ret;
126 	}
127 
128 	a->dev = attachment->dev;
129 	INIT_LIST_HEAD(&a->list);
130 	a->mapped = false;
131 	a->cc_shared = buffer->cc_shared;
132 
133 	attachment->priv = a;
134 
135 	mutex_lock(&buffer->lock);
136 	list_add(&a->list, &buffer->attachments);
137 	mutex_unlock(&buffer->lock);
138 
139 	return 0;
140 }
141 
142 static void system_heap_detach(struct dma_buf *dmabuf,
143 			       struct dma_buf_attachment *attachment)
144 {
145 	struct system_heap_buffer *buffer = dmabuf->priv;
146 	struct dma_heap_attachment *a = attachment->priv;
147 
148 	mutex_lock(&buffer->lock);
149 	list_del(&a->list);
150 	mutex_unlock(&buffer->lock);
151 
152 	sg_free_table(&a->table);
153 	kfree(a);
154 }
155 
156 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
157 						enum dma_data_direction direction)
158 {
159 	struct dma_heap_attachment *a = attachment->priv;
160 	struct sg_table *table = &a->table;
161 	unsigned long attrs;
162 	int ret;
163 
164 	attrs = a->cc_shared ? DMA_ATTR_CC_SHARED : 0;
165 	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
166 	if (ret)
167 		return ERR_PTR(ret);
168 
169 	a->mapped = true;
170 	return table;
171 }
172 
173 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
174 				      struct sg_table *table,
175 				      enum dma_data_direction direction)
176 {
177 	struct dma_heap_attachment *a = attachment->priv;
178 
179 	a->mapped = false;
180 	dma_unmap_sgtable(attachment->dev, table, direction, 0);
181 }
182 
183 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
184 						enum dma_data_direction direction)
185 {
186 	struct system_heap_buffer *buffer = dmabuf->priv;
187 	struct dma_heap_attachment *a;
188 
189 	mutex_lock(&buffer->lock);
190 
191 	if (buffer->vmap_cnt)
192 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
193 
194 	list_for_each_entry(a, &buffer->attachments, list) {
195 		if (!a->mapped)
196 			continue;
197 		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
198 	}
199 	mutex_unlock(&buffer->lock);
200 
201 	return 0;
202 }
203 
204 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
205 					      enum dma_data_direction direction)
206 {
207 	struct system_heap_buffer *buffer = dmabuf->priv;
208 	struct dma_heap_attachment *a;
209 
210 	mutex_lock(&buffer->lock);
211 
212 	if (buffer->vmap_cnt)
213 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
214 
215 	list_for_each_entry(a, &buffer->attachments, list) {
216 		if (!a->mapped)
217 			continue;
218 		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
219 	}
220 	mutex_unlock(&buffer->lock);
221 
222 	return 0;
223 }
224 
225 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
226 {
227 	struct system_heap_buffer *buffer = dmabuf->priv;
228 	struct sg_table *table = &buffer->sg_table;
229 	unsigned long addr = vma->vm_start;
230 	unsigned long pgoff = vma->vm_pgoff;
231 	struct scatterlist *sg;
232 	pgprot_t prot;
233 	int i, ret;
234 
235 	prot = vma->vm_page_prot;
236 	if (buffer->cc_shared)
237 		prot = pgprot_decrypted(prot);
238 
239 	for_each_sgtable_sg(table, sg, i) {
240 		unsigned long n = sg->length >> PAGE_SHIFT;
241 
242 		if (pgoff < n)
243 			break;
244 		pgoff -= n;
245 	}
246 
247 	for (; sg && addr < vma->vm_end; sg = sg_next(sg)) {
248 		unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff;
249 		struct page *page = sg_page(sg) + pgoff;
250 		unsigned long size = n << PAGE_SHIFT;
251 
252 		if (addr + size > vma->vm_end)
253 			size = vma->vm_end - addr;
254 
255 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), size, prot);
256 		if (ret)
257 			return ret;
258 
259 		addr += size;
260 		pgoff = 0;
261 	}
262 
263 	return 0;
264 }
265 
266 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
267 {
268 	struct sg_table *table = &buffer->sg_table;
269 	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
270 	struct page **pages = vmalloc(sizeof(struct page *) * npages);
271 	struct page **tmp = pages;
272 	struct sg_page_iter piter;
273 	pgprot_t prot;
274 	void *vaddr;
275 
276 	if (!pages)
277 		return ERR_PTR(-ENOMEM);
278 
279 	for_each_sgtable_page(table, &piter, 0) {
280 		WARN_ON(tmp - pages >= npages);
281 		*tmp++ = sg_page_iter_page(&piter);
282 	}
283 
284 	prot = PAGE_KERNEL;
285 	if (buffer->cc_shared)
286 		prot = pgprot_decrypted(prot);
287 	vaddr = vmap(pages, npages, VM_MAP, prot);
288 	vfree(pages);
289 
290 	if (!vaddr)
291 		return ERR_PTR(-ENOMEM);
292 
293 	return vaddr;
294 }
295 
296 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
297 {
298 	struct system_heap_buffer *buffer = dmabuf->priv;
299 	void *vaddr;
300 	int ret = 0;
301 
302 	mutex_lock(&buffer->lock);
303 	if (buffer->vmap_cnt) {
304 		buffer->vmap_cnt++;
305 		iosys_map_set_vaddr(map, buffer->vaddr);
306 		goto out;
307 	}
308 
309 	vaddr = system_heap_do_vmap(buffer);
310 	if (IS_ERR(vaddr)) {
311 		ret = PTR_ERR(vaddr);
312 		goto out;
313 	}
314 
315 	buffer->vaddr = vaddr;
316 	buffer->vmap_cnt++;
317 	iosys_map_set_vaddr(map, buffer->vaddr);
318 out:
319 	mutex_unlock(&buffer->lock);
320 
321 	return ret;
322 }
323 
324 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
325 {
326 	struct system_heap_buffer *buffer = dmabuf->priv;
327 
328 	mutex_lock(&buffer->lock);
329 	if (!--buffer->vmap_cnt) {
330 		vunmap(buffer->vaddr);
331 		buffer->vaddr = NULL;
332 	}
333 	mutex_unlock(&buffer->lock);
334 	iosys_map_clear(map);
335 }
336 
337 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
338 {
339 	struct system_heap_buffer *buffer = dmabuf->priv;
340 	struct sg_table *table;
341 	struct scatterlist *sg;
342 	int i;
343 
344 	table = &buffer->sg_table;
345 	for_each_sgtable_sg(table, sg, i) {
346 		struct page *page = sg_page(sg);
347 
348 		/*
349 		 * Intentionally leak pages that cannot be re-encrypted
350 		 * to prevent shared memory from being reused.
351 		 */
352 		if (buffer->cc_shared &&
353 		    system_heap_set_page_encrypted(page))
354 			continue;
355 
356 		__free_pages(page, compound_order(page));
357 	}
358 	sg_free_table(table);
359 	kfree(buffer);
360 }
361 
362 static const struct dma_buf_ops system_heap_buf_ops = {
363 	.attach = system_heap_attach,
364 	.detach = system_heap_detach,
365 	.map_dma_buf = system_heap_map_dma_buf,
366 	.unmap_dma_buf = system_heap_unmap_dma_buf,
367 	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
368 	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
369 	.mmap = system_heap_mmap,
370 	.vmap = system_heap_vmap,
371 	.vunmap = system_heap_vunmap,
372 	.release = system_heap_dma_buf_release,
373 };
374 
375 static struct page *alloc_largest_available(unsigned long size,
376 					    unsigned int max_order)
377 {
378 	struct page *page;
379 	int i;
380 	gfp_t flags;
381 
382 	for (i = 0; i < NUM_ORDERS; i++) {
383 		if (size <  (PAGE_SIZE << orders[i]))
384 			continue;
385 		if (max_order < orders[i])
386 			continue;
387 		flags = order_flags[i];
388 		if (mem_accounting)
389 			flags |= __GFP_ACCOUNT;
390 		page = alloc_pages(flags, orders[i]);
391 		if (!page)
392 			continue;
393 		return page;
394 	}
395 	return NULL;
396 }
397 
398 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
399 					    unsigned long len,
400 					    u32 fd_flags,
401 					    u64 heap_flags)
402 {
403 	struct system_heap_buffer *buffer;
404 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
405 	unsigned long size_remaining = len;
406 	unsigned int max_order = orders[0];
407 	struct system_heap_priv *priv = dma_heap_get_drvdata(heap);
408 	bool cc_shared = priv->cc_shared;
409 	struct dma_buf *dmabuf;
410 	struct sg_table *table;
411 	struct scatterlist *sg;
412 	struct list_head pages;
413 	struct page *page, *tmp_page;
414 	int i, ret = -ENOMEM;
415 
416 	buffer = kzalloc_obj(*buffer);
417 	if (!buffer)
418 		return ERR_PTR(-ENOMEM);
419 
420 	INIT_LIST_HEAD(&buffer->attachments);
421 	mutex_init(&buffer->lock);
422 	buffer->heap = heap;
423 	buffer->len = len;
424 	buffer->cc_shared = cc_shared;
425 
426 	INIT_LIST_HEAD(&pages);
427 	i = 0;
428 	while (size_remaining > 0) {
429 		/*
430 		 * Avoid trying to allocate memory if the process
431 		 * has been killed by SIGKILL
432 		 */
433 		if (fatal_signal_pending(current)) {
434 			ret = -EINTR;
435 			goto free_buffer;
436 		}
437 
438 		page = alloc_largest_available(size_remaining, max_order);
439 		if (!page)
440 			goto free_buffer;
441 
442 		list_add_tail(&page->lru, &pages);
443 		size_remaining -= page_size(page);
444 		max_order = compound_order(page);
445 		i++;
446 	}
447 
448 	table = &buffer->sg_table;
449 	if (sg_alloc_table(table, i, GFP_KERNEL))
450 		goto free_buffer;
451 
452 	sg = table->sgl;
453 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
454 		sg_set_page(sg, page, page_size(page), 0);
455 		sg = sg_next(sg);
456 		list_del(&page->lru);
457 	}
458 
459 	if (cc_shared) {
460 		for_each_sgtable_sg(table, sg, i) {
461 			ret = system_heap_set_page_decrypted(sg_page(sg));
462 			if (ret)
463 				goto free_pages;
464 		}
465 	}
466 
467 	/* create the dmabuf */
468 	exp_info.exp_name = dma_heap_get_name(heap);
469 	exp_info.ops = &system_heap_buf_ops;
470 	exp_info.size = buffer->len;
471 	exp_info.flags = fd_flags;
472 	exp_info.priv = buffer;
473 	dmabuf = dma_buf_export(&exp_info);
474 	if (IS_ERR(dmabuf)) {
475 		ret = PTR_ERR(dmabuf);
476 		goto free_pages;
477 	}
478 	return dmabuf;
479 
480 free_pages:
481 	for_each_sgtable_sg(table, sg, i) {
482 		struct page *p = sg_page(sg);
483 
484 		/*
485 		 * Intentionally leak pages that cannot be re-encrypted
486 		 * to prevent shared memory from being reused.
487 		 */
488 		if (buffer->cc_shared &&
489 		    system_heap_set_page_encrypted(p))
490 			continue;
491 		__free_pages(p, compound_order(p));
492 	}
493 	sg_free_table(table);
494 free_buffer:
495 	list_for_each_entry_safe(page, tmp_page, &pages, lru)
496 		__free_pages(page, compound_order(page));
497 	kfree(buffer);
498 
499 	return ERR_PTR(ret);
500 }
501 
502 static const struct dma_heap_ops system_heap_ops = {
503 	.allocate = system_heap_allocate,
504 };
505 
506 static struct system_heap_priv system_heap_priv = {
507 	.cc_shared = false,
508 };
509 
510 static struct system_heap_priv system_heap_cc_shared_priv = {
511 	.cc_shared = true,
512 };
513 
514 static int __init system_heap_create(void)
515 {
516 	struct dma_heap_export_info exp_info;
517 	struct dma_heap *sys_heap;
518 
519 	exp_info.name = "system";
520 	exp_info.ops = &system_heap_ops;
521 	exp_info.priv = &system_heap_priv;
522 
523 	sys_heap = dma_heap_add(&exp_info);
524 	if (IS_ERR(sys_heap))
525 		return PTR_ERR(sys_heap);
526 
527 	if (IS_ENABLED(CONFIG_HIGHMEM) ||
528 	    !cc_platform_has(CC_ATTR_MEM_ENCRYPT))
529 		return 0;
530 
531 	exp_info.name = "system_cc_shared";
532 	exp_info.priv = &system_heap_cc_shared_priv;
533 	sys_heap = dma_heap_add(&exp_info);
534 	if (IS_ERR(sys_heap))
535 		return PTR_ERR(sys_heap);
536 
537 	return 0;
538 }
539 module_init(system_heap_create);
540