xref: /linux/drivers/tee/tee_heap.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2025, Linaro Limited
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/dma-heap.h>
8 #include <linux/genalloc.h>
9 #include <linux/module.h>
10 #include <linux/scatterlist.h>
11 #include <linux/slab.h>
12 #include <linux/tee_core.h>
13 #include <linux/xarray.h>
14 
15 #include "tee_private.h"
16 
17 struct tee_dma_heap {
18 	struct dma_heap *heap;
19 	enum tee_dma_heap_id id;
20 	struct kref kref;
21 	struct tee_protmem_pool *pool;
22 	struct tee_device *teedev;
23 	bool shutting_down;
24 	/* Protects pool, teedev, and shutting_down above */
25 	struct mutex mu;
26 };
27 
28 struct tee_heap_buffer {
29 	struct tee_dma_heap *heap;
30 	size_t size;
31 	size_t offs;
32 	struct sg_table table;
33 };
34 
35 struct tee_heap_attachment {
36 	struct sg_table table;
37 	struct device *dev;
38 };
39 
40 struct tee_protmem_static_pool {
41 	struct tee_protmem_pool pool;
42 	struct gen_pool *gen_pool;
43 	phys_addr_t pa_base;
44 };
45 
46 #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
47 static DEFINE_XARRAY_ALLOC(tee_dma_heap);
48 
49 static void tee_heap_release(struct kref *kref)
50 {
51 	struct tee_dma_heap *h = container_of(kref, struct tee_dma_heap, kref);
52 
53 	h->pool->ops->destroy_pool(h->pool);
54 	tee_device_put(h->teedev);
55 	h->pool = NULL;
56 	h->teedev = NULL;
57 }
58 
59 static void put_tee_heap(struct tee_dma_heap *h)
60 {
61 	kref_put(&h->kref, tee_heap_release);
62 }
63 
64 static void get_tee_heap(struct tee_dma_heap *h)
65 {
66 	kref_get(&h->kref);
67 }
68 
69 static int copy_sg_table(struct sg_table *dst, struct sg_table *src)
70 {
71 	struct scatterlist *dst_sg;
72 	struct scatterlist *src_sg;
73 	int ret;
74 	int i;
75 
76 	ret = sg_alloc_table(dst, src->orig_nents, GFP_KERNEL);
77 	if (ret)
78 		return ret;
79 
80 	dst_sg = dst->sgl;
81 	for_each_sgtable_sg(src, src_sg, i) {
82 		sg_set_page(dst_sg, sg_page(src_sg), src_sg->length,
83 			    src_sg->offset);
84 		dst_sg = sg_next(dst_sg);
85 	}
86 
87 	return 0;
88 }
89 
90 static int tee_heap_attach(struct dma_buf *dmabuf,
91 			   struct dma_buf_attachment *attachment)
92 {
93 	struct tee_heap_buffer *buf = dmabuf->priv;
94 	struct tee_heap_attachment *a;
95 	int ret;
96 
97 	a = kzalloc(sizeof(*a), GFP_KERNEL);
98 	if (!a)
99 		return -ENOMEM;
100 
101 	ret = copy_sg_table(&a->table, &buf->table);
102 	if (ret) {
103 		kfree(a);
104 		return ret;
105 	}
106 
107 	a->dev = attachment->dev;
108 	attachment->priv = a;
109 
110 	return 0;
111 }
112 
113 static void tee_heap_detach(struct dma_buf *dmabuf,
114 			    struct dma_buf_attachment *attachment)
115 {
116 	struct tee_heap_attachment *a = attachment->priv;
117 
118 	sg_free_table(&a->table);
119 	kfree(a);
120 }
121 
122 static struct sg_table *
123 tee_heap_map_dma_buf(struct dma_buf_attachment *attachment,
124 		     enum dma_data_direction direction)
125 {
126 	struct tee_heap_attachment *a = attachment->priv;
127 	int ret;
128 
129 	ret = dma_map_sgtable(attachment->dev, &a->table, direction,
130 			      DMA_ATTR_SKIP_CPU_SYNC);
131 	if (ret)
132 		return ERR_PTR(ret);
133 
134 	return &a->table;
135 }
136 
137 static void tee_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
138 				   struct sg_table *table,
139 				   enum dma_data_direction direction)
140 {
141 	struct tee_heap_attachment *a = attachment->priv;
142 
143 	WARN_ON(&a->table != table);
144 
145 	dma_unmap_sgtable(attachment->dev, table, direction,
146 			  DMA_ATTR_SKIP_CPU_SYNC);
147 }
148 
149 static void tee_heap_buf_free(struct dma_buf *dmabuf)
150 {
151 	struct tee_heap_buffer *buf = dmabuf->priv;
152 
153 	buf->heap->pool->ops->free(buf->heap->pool, &buf->table);
154 	mutex_lock(&buf->heap->mu);
155 	put_tee_heap(buf->heap);
156 	mutex_unlock(&buf->heap->mu);
157 	kfree(buf);
158 }
159 
160 static const struct dma_buf_ops tee_heap_buf_ops = {
161 	.attach = tee_heap_attach,
162 	.detach = tee_heap_detach,
163 	.map_dma_buf = tee_heap_map_dma_buf,
164 	.unmap_dma_buf = tee_heap_unmap_dma_buf,
165 	.release = tee_heap_buf_free,
166 };
167 
168 static struct dma_buf *tee_dma_heap_alloc(struct dma_heap *heap,
169 					  unsigned long len, u32 fd_flags,
170 					  u64 heap_flags)
171 {
172 	struct tee_dma_heap *h = dma_heap_get_drvdata(heap);
173 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
174 	struct tee_device *teedev = NULL;
175 	struct tee_heap_buffer *buf;
176 	struct tee_protmem_pool *pool;
177 	struct dma_buf *dmabuf;
178 	int rc;
179 
180 	mutex_lock(&h->mu);
181 	if (h->teedev) {
182 		teedev = h->teedev;
183 		pool = h->pool;
184 		get_tee_heap(h);
185 	}
186 	mutex_unlock(&h->mu);
187 
188 	if (!teedev)
189 		return ERR_PTR(-EINVAL);
190 
191 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
192 	if (!buf) {
193 		dmabuf = ERR_PTR(-ENOMEM);
194 		goto err;
195 	}
196 	buf->size = len;
197 	buf->heap = h;
198 
199 	rc = pool->ops->alloc(pool, &buf->table, len, &buf->offs);
200 	if (rc) {
201 		dmabuf = ERR_PTR(rc);
202 		goto err_kfree;
203 	}
204 
205 	exp_info.ops = &tee_heap_buf_ops;
206 	exp_info.size = len;
207 	exp_info.priv = buf;
208 	exp_info.flags = fd_flags;
209 	dmabuf = dma_buf_export(&exp_info);
210 	if (IS_ERR(dmabuf))
211 		goto err_protmem_free;
212 
213 	return dmabuf;
214 
215 err_protmem_free:
216 	pool->ops->free(pool, &buf->table);
217 err_kfree:
218 	kfree(buf);
219 err:
220 	mutex_lock(&h->mu);
221 	put_tee_heap(h);
222 	mutex_unlock(&h->mu);
223 	return dmabuf;
224 }
225 
226 static const struct dma_heap_ops tee_dma_heap_ops = {
227 	.allocate = tee_dma_heap_alloc,
228 };
229 
230 static const char *heap_id_2_name(enum tee_dma_heap_id id)
231 {
232 	switch (id) {
233 	case TEE_DMA_HEAP_SECURE_VIDEO_PLAY:
234 		return "protected,secure-video";
235 	case TEE_DMA_HEAP_TRUSTED_UI:
236 		return "protected,trusted-ui";
237 	case TEE_DMA_HEAP_SECURE_VIDEO_RECORD:
238 		return "protected,secure-video-record";
239 	default:
240 		return NULL;
241 	}
242 }
243 
244 static int alloc_dma_heap(struct tee_device *teedev, enum tee_dma_heap_id id,
245 			  struct tee_protmem_pool *pool)
246 {
247 	struct dma_heap_export_info exp_info = {
248 		.ops = &tee_dma_heap_ops,
249 		.name = heap_id_2_name(id),
250 	};
251 	struct tee_dma_heap *h;
252 	int rc;
253 
254 	if (!exp_info.name)
255 		return -EINVAL;
256 
257 	if (xa_reserve(&tee_dma_heap, id, GFP_KERNEL)) {
258 		if (!xa_load(&tee_dma_heap, id))
259 			return -EEXIST;
260 		return -ENOMEM;
261 	}
262 
263 	h = kzalloc(sizeof(*h), GFP_KERNEL);
264 	if (!h)
265 		return -ENOMEM;
266 	h->id = id;
267 	kref_init(&h->kref);
268 	h->teedev = teedev;
269 	h->pool = pool;
270 	mutex_init(&h->mu);
271 
272 	exp_info.priv = h;
273 	h->heap = dma_heap_add(&exp_info);
274 	if (IS_ERR(h->heap)) {
275 		rc = PTR_ERR(h->heap);
276 		kfree(h);
277 
278 		return rc;
279 	}
280 
281 	/* "can't fail" due to the call to xa_reserve() above */
282 	return WARN_ON(xa_is_err(xa_store(&tee_dma_heap, id, h, GFP_KERNEL)));
283 }
284 
285 int tee_device_register_dma_heap(struct tee_device *teedev,
286 				 enum tee_dma_heap_id id,
287 				 struct tee_protmem_pool *pool)
288 {
289 	struct tee_dma_heap *h;
290 	int rc;
291 
292 	if (!tee_device_get(teedev))
293 		return -EINVAL;
294 
295 	h = xa_load(&tee_dma_heap, id);
296 	if (h) {
297 		mutex_lock(&h->mu);
298 		if (h->teedev) {
299 			rc = -EBUSY;
300 		} else {
301 			kref_init(&h->kref);
302 			h->shutting_down = false;
303 			h->teedev = teedev;
304 			h->pool = pool;
305 			rc = 0;
306 		}
307 		mutex_unlock(&h->mu);
308 	} else {
309 		rc = alloc_dma_heap(teedev, id, pool);
310 	}
311 
312 	if (rc) {
313 		tee_device_put(teedev);
314 		dev_err(&teedev->dev, "can't register DMA heap id %d (%s)\n",
315 			id, heap_id_2_name(id));
316 	}
317 
318 	return rc;
319 }
320 EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
321 
322 void tee_device_put_all_dma_heaps(struct tee_device *teedev)
323 {
324 	struct tee_dma_heap *h;
325 	u_long i;
326 
327 	xa_for_each(&tee_dma_heap, i, h) {
328 		if (h) {
329 			mutex_lock(&h->mu);
330 			if (h->teedev == teedev && !h->shutting_down) {
331 				h->shutting_down = true;
332 				put_tee_heap(h);
333 			}
334 			mutex_unlock(&h->mu);
335 		}
336 	}
337 }
338 EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
339 
340 int tee_heap_update_from_dma_buf(struct tee_device *teedev,
341 				 struct dma_buf *dmabuf, size_t *offset,
342 				 struct tee_shm *shm,
343 				 struct tee_shm **parent_shm)
344 {
345 	struct tee_heap_buffer *buf;
346 	int rc;
347 
348 	/* The DMA-buf must be from our heap */
349 	if (dmabuf->ops != &tee_heap_buf_ops)
350 		return -EINVAL;
351 
352 	buf = dmabuf->priv;
353 	/* The buffer must be from the same teedev */
354 	if (buf->heap->teedev != teedev)
355 		return -EINVAL;
356 
357 	shm->size = buf->size;
358 
359 	rc = buf->heap->pool->ops->update_shm(buf->heap->pool, &buf->table,
360 					      buf->offs, shm, parent_shm);
361 	if (!rc && *parent_shm)
362 		*offset = buf->offs;
363 
364 	return rc;
365 }
366 #else
367 int tee_device_register_dma_heap(struct tee_device *teedev __always_unused,
368 				 enum tee_dma_heap_id id __always_unused,
369 				 struct tee_protmem_pool *pool __always_unused)
370 {
371 	return -EINVAL;
372 }
373 EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
374 
375 void
376 tee_device_put_all_dma_heaps(struct tee_device *teedev __always_unused)
377 {
378 }
379 EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
380 
381 int tee_heap_update_from_dma_buf(struct tee_device *teedev __always_unused,
382 				 struct dma_buf *dmabuf __always_unused,
383 				 size_t *offset __always_unused,
384 				 struct tee_shm *shm __always_unused,
385 				 struct tee_shm **parent_shm __always_unused)
386 {
387 	return -EINVAL;
388 }
389 #endif
390 
391 static struct tee_protmem_static_pool *
392 to_protmem_static_pool(struct tee_protmem_pool *pool)
393 {
394 	return container_of(pool, struct tee_protmem_static_pool, pool);
395 }
396 
397 static int protmem_pool_op_static_alloc(struct tee_protmem_pool *pool,
398 					struct sg_table *sgt, size_t size,
399 					size_t *offs)
400 {
401 	struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
402 	phys_addr_t pa;
403 	int ret;
404 
405 	pa = gen_pool_alloc(stp->gen_pool, size);
406 	if (!pa)
407 		return -ENOMEM;
408 
409 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
410 	if (ret) {
411 		gen_pool_free(stp->gen_pool, pa, size);
412 		return ret;
413 	}
414 
415 	sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
416 	*offs = pa - stp->pa_base;
417 
418 	return 0;
419 }
420 
421 static void protmem_pool_op_static_free(struct tee_protmem_pool *pool,
422 					struct sg_table *sgt)
423 {
424 	struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
425 	struct scatterlist *sg;
426 	int i;
427 
428 	for_each_sgtable_sg(sgt, sg, i)
429 		gen_pool_free(stp->gen_pool, sg_phys(sg), sg->length);
430 	sg_free_table(sgt);
431 }
432 
433 static int protmem_pool_op_static_update_shm(struct tee_protmem_pool *pool,
434 					     struct sg_table *sgt, size_t offs,
435 					     struct tee_shm *shm,
436 					     struct tee_shm **parent_shm)
437 {
438 	struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
439 
440 	shm->paddr = stp->pa_base + offs;
441 	*parent_shm = NULL;
442 
443 	return 0;
444 }
445 
446 static void protmem_pool_op_static_destroy_pool(struct tee_protmem_pool *pool)
447 {
448 	struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
449 
450 	gen_pool_destroy(stp->gen_pool);
451 	kfree(stp);
452 }
453 
454 static struct tee_protmem_pool_ops protmem_pool_ops_static = {
455 	.alloc = protmem_pool_op_static_alloc,
456 	.free = protmem_pool_op_static_free,
457 	.update_shm = protmem_pool_op_static_update_shm,
458 	.destroy_pool = protmem_pool_op_static_destroy_pool,
459 };
460 
461 struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
462 						       size_t size)
463 {
464 	const size_t page_mask = PAGE_SIZE - 1;
465 	struct tee_protmem_static_pool *stp;
466 	int rc;
467 
468 	/* Check it's page aligned */
469 	if ((paddr | size) & page_mask)
470 		return ERR_PTR(-EINVAL);
471 
472 	if (!pfn_valid(PHYS_PFN(paddr)))
473 		return ERR_PTR(-EINVAL);
474 
475 	stp = kzalloc(sizeof(*stp), GFP_KERNEL);
476 	if (!stp)
477 		return ERR_PTR(-ENOMEM);
478 
479 	stp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
480 	if (!stp->gen_pool) {
481 		rc = -ENOMEM;
482 		goto err_free;
483 	}
484 
485 	rc = gen_pool_add(stp->gen_pool, paddr, size, -1);
486 	if (rc)
487 		goto err_free_pool;
488 
489 	stp->pool.ops = &protmem_pool_ops_static;
490 	stp->pa_base = paddr;
491 	return &stp->pool;
492 
493 err_free_pool:
494 	gen_pool_destroy(stp->gen_pool);
495 err_free:
496 	kfree(stp);
497 
498 	return ERR_PTR(rc);
499 }
500 EXPORT_SYMBOL_GPL(tee_protmem_static_pool_alloc);
501