xref: /linux/drivers/tee/tee_shm.c (revision d6e290837e50f73f88f31f19bd8a7213d92e6e46)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
4  */
5 #include <linux/anon_inodes.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/highmem.h>
10 #include <linux/idr.h>
11 #include <linux/io.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/tee_core.h>
16 #include <linux/uaccess.h>
17 #include <linux/uio.h>
18 #include "tee_private.h"
19 
20 struct tee_shm_dma_mem {
21 	struct tee_shm shm;
22 	dma_addr_t dma_addr;
23 	struct page *page;
24 };
25 
26 static void shm_put_kernel_pages(struct page **pages, size_t page_count)
27 {
28 	size_t n;
29 
30 	for (n = 0; n < page_count; n++)
31 		put_page(pages[n]);
32 }
33 
34 static void shm_get_kernel_pages(struct page **pages, size_t page_count)
35 {
36 	size_t n;
37 
38 	for (n = 0; n < page_count; n++)
39 		get_page(pages[n]);
40 }
41 
42 static void release_registered_pages(struct tee_shm *shm)
43 {
44 	if (shm->pages) {
45 		if (shm->flags & TEE_SHM_USER_MAPPED)
46 			unpin_user_pages(shm->pages, shm->num_pages);
47 		else
48 			shm_put_kernel_pages(shm->pages, shm->num_pages);
49 
50 		kfree(shm->pages);
51 	}
52 }
53 
54 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
55 {
56 	void *p = shm;
57 
58 	if (shm->flags & TEE_SHM_DMA_MEM) {
59 #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
60 		struct tee_shm_dma_mem *dma_mem;
61 
62 		dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
63 		p = dma_mem;
64 		dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
65 			       dma_mem->dma_addr, DMA_BIDIRECTIONAL);
66 #endif
67 	} else if (shm->flags & TEE_SHM_DMA_BUF) {
68 		struct tee_shm_dmabuf_ref *ref;
69 
70 		ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
71 		p = ref;
72 		dma_buf_put(ref->dmabuf);
73 	} else if (shm->flags & TEE_SHM_POOL) {
74 		teedev->pool->ops->free(teedev->pool, shm);
75 	} else if (shm->flags & TEE_SHM_DYNAMIC) {
76 		int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
77 
78 		if (rc)
79 			dev_err(teedev->dev.parent,
80 				"unregister shm %p failed: %d", shm, rc);
81 
82 		release_registered_pages(shm);
83 	}
84 
85 	teedev_ctx_put(shm->ctx);
86 
87 	kfree(p);
88 
89 	tee_device_put(teedev);
90 }
91 
92 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
93 					size_t align, u32 flags, int id)
94 {
95 	struct tee_device *teedev = ctx->teedev;
96 	struct tee_shm *shm;
97 	void *ret;
98 	int rc;
99 
100 	if (!tee_device_get(teedev))
101 		return ERR_PTR(-EINVAL);
102 
103 	if (!teedev->pool) {
104 		/* teedev has been detached from driver */
105 		ret = ERR_PTR(-EINVAL);
106 		goto err_dev_put;
107 	}
108 
109 	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
110 	if (!shm) {
111 		ret = ERR_PTR(-ENOMEM);
112 		goto err_dev_put;
113 	}
114 
115 	refcount_set(&shm->refcount, 1);
116 	shm->flags = flags;
117 	shm->id = id;
118 
119 	/*
120 	 * We're assigning this as it is needed if the shm is to be
121 	 * registered. If this function returns OK then the caller expected
122 	 * to call teedev_ctx_get() or clear shm->ctx in case it's not
123 	 * needed any longer.
124 	 */
125 	shm->ctx = ctx;
126 
127 	rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
128 	if (rc) {
129 		ret = ERR_PTR(rc);
130 		goto err_kfree;
131 	}
132 
133 	teedev_ctx_get(ctx);
134 	return shm;
135 err_kfree:
136 	kfree(shm);
137 err_dev_put:
138 	tee_device_put(teedev);
139 	return ret;
140 }
141 
142 /**
143  * tee_shm_alloc_user_buf() - Allocate shared memory for user space
144  * @ctx:	Context that allocates the shared memory
145  * @size:	Requested size of shared memory
146  *
147  * Memory allocated as user space shared memory is automatically freed when
148  * the TEE file pointer is closed. The primary usage of this function is
149  * when the TEE driver doesn't support registering ordinary user space
150  * memory.
151  *
152  * @returns a pointer to 'struct tee_shm'
153  */
154 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
155 {
156 	u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
157 	struct tee_device *teedev = ctx->teedev;
158 	struct tee_shm *shm;
159 	void *ret;
160 	int id;
161 
162 	mutex_lock(&teedev->mutex);
163 	id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
164 	mutex_unlock(&teedev->mutex);
165 	if (id < 0)
166 		return ERR_PTR(id);
167 
168 	shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
169 	if (IS_ERR(shm)) {
170 		mutex_lock(&teedev->mutex);
171 		idr_remove(&teedev->idr, id);
172 		mutex_unlock(&teedev->mutex);
173 		return shm;
174 	}
175 
176 	mutex_lock(&teedev->mutex);
177 	ret = idr_replace(&teedev->idr, shm, id);
178 	mutex_unlock(&teedev->mutex);
179 	if (IS_ERR(ret)) {
180 		tee_shm_free(shm);
181 		return ret;
182 	}
183 
184 	return shm;
185 }
186 
187 /**
188  * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
189  * @ctx:	Context that allocates the shared memory
190  * @size:	Requested size of shared memory
191  *
192  * The returned memory registered in secure world and is suitable to be
193  * passed as a memory buffer in parameter argument to
194  * tee_client_invoke_func(). The memory allocated is later freed with a
195  * call to tee_shm_free().
196  *
197  * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
198  */
199 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
200 {
201 	u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
202 
203 	return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
204 }
205 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
206 
207 struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd)
208 {
209 	struct tee_shm_dmabuf_ref *ref;
210 	int rc;
211 
212 	if (!tee_device_get(ctx->teedev))
213 		return ERR_PTR(-EINVAL);
214 
215 	teedev_ctx_get(ctx);
216 
217 	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
218 	if (!ref) {
219 		rc = -ENOMEM;
220 		goto err_put_tee;
221 	}
222 
223 	refcount_set(&ref->shm.refcount, 1);
224 	ref->shm.ctx = ctx;
225 	ref->shm.id = -1;
226 	ref->shm.flags = TEE_SHM_DMA_BUF;
227 
228 	ref->dmabuf = dma_buf_get(fd);
229 	if (IS_ERR(ref->dmabuf)) {
230 		rc = PTR_ERR(ref->dmabuf);
231 		goto err_kfree_ref;
232 	}
233 
234 	rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf,
235 					  &ref->offset, &ref->shm,
236 					  &ref->parent_shm);
237 	if (rc)
238 		goto err_put_dmabuf;
239 
240 	mutex_lock(&ref->shm.ctx->teedev->mutex);
241 	ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm,
242 				1, 0, GFP_KERNEL);
243 	mutex_unlock(&ref->shm.ctx->teedev->mutex);
244 	if (ref->shm.id < 0) {
245 		rc = ref->shm.id;
246 		goto err_put_dmabuf;
247 	}
248 
249 	return &ref->shm;
250 
251 err_put_dmabuf:
252 	dma_buf_put(ref->dmabuf);
253 err_kfree_ref:
254 	kfree(ref);
255 err_put_tee:
256 	teedev_ctx_put(ctx);
257 	tee_device_put(ctx->teedev);
258 
259 	return ERR_PTR(rc);
260 }
261 EXPORT_SYMBOL_GPL(tee_shm_register_fd);
262 
263 /**
264  * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
265  *			      kernel buffer
266  * @ctx:	Context that allocates the shared memory
267  * @size:	Requested size of shared memory
268  *
269  * This function returns similar shared memory as
270  * tee_shm_alloc_kernel_buf(), but with the difference that the memory
271  * might not be registered in secure world in case the driver supports
272  * passing memory not registered in advance.
273  *
274  * This function should normally only be used internally in the TEE
275  * drivers.
276  *
277  * @returns a pointer to 'struct tee_shm'
278  */
279 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
280 {
281 	u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
282 
283 	return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
284 }
285 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
286 
287 #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
288 /**
289  * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
290  * @ctx:	Context that allocates the shared memory
291  * @page_count:	Number of pages
292  *
293  * The allocated memory is expected to be lent (made inaccessible to the
294  * kernel) to the TEE while it's used and returned (accessible to the
295  * kernel again) before it's freed.
296  *
297  * This function should normally only be used internally in the TEE
298  * drivers.
299  *
300  * @returns a pointer to 'struct tee_shm'
301  */
302 struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
303 				      size_t page_count)
304 {
305 	struct tee_device *teedev = ctx->teedev;
306 	struct tee_shm_dma_mem *dma_mem;
307 	dma_addr_t dma_addr;
308 	struct page *page;
309 
310 	if (!tee_device_get(teedev))
311 		return ERR_PTR(-EINVAL);
312 
313 	page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
314 			       &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
315 	if (!page)
316 		goto err_put_teedev;
317 
318 	dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
319 	if (!dma_mem)
320 		goto err_free_pages;
321 
322 	refcount_set(&dma_mem->shm.refcount, 1);
323 	dma_mem->shm.ctx = ctx;
324 	dma_mem->shm.paddr = page_to_phys(page);
325 	dma_mem->dma_addr = dma_addr;
326 	dma_mem->page = page;
327 	dma_mem->shm.size = page_count * PAGE_SIZE;
328 	dma_mem->shm.flags = TEE_SHM_DMA_MEM;
329 
330 	teedev_ctx_get(ctx);
331 
332 	return &dma_mem->shm;
333 
334 err_free_pages:
335 	dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
336 		       DMA_BIDIRECTIONAL);
337 err_put_teedev:
338 	tee_device_put(teedev);
339 
340 	return ERR_PTR(-ENOMEM);
341 }
342 EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
343 #else
344 struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
345 				      size_t page_count)
346 {
347 	return ERR_PTR(-EINVAL);
348 }
349 EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
350 #endif
351 
352 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
353 			     int (*shm_register)(struct tee_context *ctx,
354 						 struct tee_shm *shm,
355 						 struct page **pages,
356 						 size_t num_pages,
357 						 unsigned long start))
358 {
359 	size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
360 	struct page **pages;
361 	unsigned int i;
362 	int rc = 0;
363 
364 	/*
365 	 * Ignore alignment since this is already going to be page aligned
366 	 * and there's no need for any larger alignment.
367 	 */
368 	shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
369 				       GFP_KERNEL | __GFP_ZERO);
370 	if (!shm->kaddr)
371 		return -ENOMEM;
372 
373 	shm->paddr = virt_to_phys(shm->kaddr);
374 	shm->size = nr_pages * PAGE_SIZE;
375 
376 	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
377 	if (!pages) {
378 		rc = -ENOMEM;
379 		goto err;
380 	}
381 
382 	for (i = 0; i < nr_pages; i++)
383 		pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
384 
385 	shm->pages = pages;
386 	shm->num_pages = nr_pages;
387 
388 	if (shm_register) {
389 		rc = shm_register(shm->ctx, shm, pages, nr_pages,
390 				  (unsigned long)shm->kaddr);
391 		if (rc)
392 			goto err;
393 	}
394 
395 	return 0;
396 err:
397 	free_pages_exact(shm->kaddr, shm->size);
398 	shm->kaddr = NULL;
399 	return rc;
400 }
401 EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper);
402 
403 void tee_dyn_shm_free_helper(struct tee_shm *shm,
404 			     int (*shm_unregister)(struct tee_context *ctx,
405 						   struct tee_shm *shm))
406 {
407 	if (shm_unregister)
408 		shm_unregister(shm->ctx, shm);
409 	free_pages_exact(shm->kaddr, shm->size);
410 	shm->kaddr = NULL;
411 	kfree(shm->pages);
412 	shm->pages = NULL;
413 }
414 EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper);
415 
416 static struct tee_shm *
417 register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
418 		    int id)
419 {
420 	struct tee_device *teedev = ctx->teedev;
421 	struct tee_shm *shm;
422 	unsigned long start, addr;
423 	size_t num_pages, off;
424 	ssize_t len;
425 	void *ret;
426 	int rc;
427 
428 	if (!tee_device_get(teedev))
429 		return ERR_PTR(-EINVAL);
430 
431 	if (!teedev->desc->ops->shm_register ||
432 	    !teedev->desc->ops->shm_unregister) {
433 		ret = ERR_PTR(-ENOTSUPP);
434 		goto err_dev_put;
435 	}
436 
437 	teedev_ctx_get(ctx);
438 
439 	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
440 	if (!shm) {
441 		ret = ERR_PTR(-ENOMEM);
442 		goto err_ctx_put;
443 	}
444 
445 	refcount_set(&shm->refcount, 1);
446 	shm->flags = flags;
447 	shm->ctx = ctx;
448 	shm->id = id;
449 	addr = untagged_addr((unsigned long)iter_iov_addr(iter));
450 	start = rounddown(addr, PAGE_SIZE);
451 	num_pages = iov_iter_npages(iter, INT_MAX);
452 	if (!num_pages) {
453 		ret = ERR_PTR(-ENOMEM);
454 		goto err_ctx_put;
455 	}
456 
457 	shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
458 	if (!shm->pages) {
459 		ret = ERR_PTR(-ENOMEM);
460 		goto err_free_shm;
461 	}
462 
463 	len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
464 				     &off);
465 	if (unlikely(len <= 0)) {
466 		ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
467 		goto err_free_shm_pages;
468 	}
469 
470 	/*
471 	 * iov_iter_extract_kvec_pages does not get reference on the pages,
472 	 * get a reference on them.
473 	 */
474 	if (iov_iter_is_kvec(iter))
475 		shm_get_kernel_pages(shm->pages, num_pages);
476 
477 	shm->offset = off;
478 	shm->size = len;
479 	shm->num_pages = num_pages;
480 
481 	rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
482 					     shm->num_pages, start);
483 	if (rc) {
484 		ret = ERR_PTR(rc);
485 		goto err_put_shm_pages;
486 	}
487 
488 	return shm;
489 err_put_shm_pages:
490 	if (!iov_iter_is_kvec(iter))
491 		unpin_user_pages(shm->pages, shm->num_pages);
492 	else
493 		shm_put_kernel_pages(shm->pages, shm->num_pages);
494 err_free_shm_pages:
495 	kfree(shm->pages);
496 err_free_shm:
497 	kfree(shm);
498 err_ctx_put:
499 	teedev_ctx_put(ctx);
500 err_dev_put:
501 	tee_device_put(teedev);
502 	return ret;
503 }
504 
505 /**
506  * tee_shm_register_user_buf() - Register a userspace shared memory buffer
507  * @ctx:	Context that registers the shared memory
508  * @addr:	The userspace address of the shared buffer
509  * @length:	Length of the shared buffer
510  *
511  * @returns a pointer to 'struct tee_shm'
512  */
513 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
514 					  unsigned long addr, size_t length)
515 {
516 	u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
517 	struct tee_device *teedev = ctx->teedev;
518 	struct tee_shm *shm;
519 	struct iov_iter iter;
520 	void *ret;
521 	int id;
522 
523 	if (!access_ok((void __user *)addr, length))
524 		return ERR_PTR(-EFAULT);
525 
526 	mutex_lock(&teedev->mutex);
527 	id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
528 	mutex_unlock(&teedev->mutex);
529 	if (id < 0)
530 		return ERR_PTR(id);
531 
532 	iov_iter_ubuf(&iter, ITER_DEST,  (void __user *)addr, length);
533 	shm = register_shm_helper(ctx, &iter, flags, id);
534 	if (IS_ERR(shm)) {
535 		mutex_lock(&teedev->mutex);
536 		idr_remove(&teedev->idr, id);
537 		mutex_unlock(&teedev->mutex);
538 		return shm;
539 	}
540 
541 	mutex_lock(&teedev->mutex);
542 	ret = idr_replace(&teedev->idr, shm, id);
543 	mutex_unlock(&teedev->mutex);
544 	if (IS_ERR(ret)) {
545 		tee_shm_free(shm);
546 		return ret;
547 	}
548 
549 	return shm;
550 }
551 
552 /**
553  * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
554  *				   secure world
555  * @ctx:	Context that registers the shared memory
556  * @addr:	The buffer
557  * @length:	Length of the buffer
558  *
559  * @returns a pointer to 'struct tee_shm'
560  */
561 
562 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
563 					    void *addr, size_t length)
564 {
565 	u32 flags = TEE_SHM_DYNAMIC;
566 	struct kvec kvec;
567 	struct iov_iter iter;
568 
569 	kvec.iov_base = addr;
570 	kvec.iov_len = length;
571 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
572 
573 	return register_shm_helper(ctx, &iter, flags, -1);
574 }
575 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
576 
577 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
578 {
579 	tee_shm_put(filp->private_data);
580 	return 0;
581 }
582 
583 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
584 {
585 	struct tee_shm *shm = filp->private_data;
586 	size_t size = vma->vm_end - vma->vm_start;
587 
588 	/* Refuse sharing shared memory provided by application */
589 	if (shm->flags & TEE_SHM_USER_MAPPED)
590 		return -EINVAL;
591 	/* Refuse sharing registered DMA_bufs with the application */
592 	if (shm->flags & TEE_SHM_DMA_BUF)
593 		return -EINVAL;
594 
595 	/* check for overflowing the buffer's size */
596 	if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
597 		return -EINVAL;
598 
599 	return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
600 			       size, vma->vm_page_prot);
601 }
602 
603 static const struct file_operations tee_shm_fops = {
604 	.owner = THIS_MODULE,
605 	.release = tee_shm_fop_release,
606 	.mmap = tee_shm_fop_mmap,
607 };
608 
609 /**
610  * tee_shm_get_fd() - Increase reference count and return file descriptor
611  * @shm:	Shared memory handle
612  * @returns user space file descriptor to shared memory
613  */
614 int tee_shm_get_fd(struct tee_shm *shm)
615 {
616 	int fd;
617 
618 	if (shm->id < 0)
619 		return -EINVAL;
620 
621 	/* matched by tee_shm_put() in tee_shm_op_release() */
622 	refcount_inc(&shm->refcount);
623 	fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
624 	if (fd < 0)
625 		tee_shm_put(shm);
626 	return fd;
627 }
628 
629 /**
630  * tee_shm_free() - Free shared memory
631  * @shm:	Handle to shared memory to free
632  */
633 void tee_shm_free(struct tee_shm *shm)
634 {
635 	tee_shm_put(shm);
636 }
637 EXPORT_SYMBOL_GPL(tee_shm_free);
638 
639 /**
640  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
641  * @shm:	Shared memory handle
642  * @offs:	Offset from start of this shared memory
643  * @returns virtual address of the shared memory + offs if offs is within
644  *	the bounds of this shared memory, else an ERR_PTR
645  */
646 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
647 {
648 	if (!shm->kaddr)
649 		return ERR_PTR(-EINVAL);
650 	if (offs >= shm->size)
651 		return ERR_PTR(-EINVAL);
652 	return (char *)shm->kaddr + offs;
653 }
654 EXPORT_SYMBOL_GPL(tee_shm_get_va);
655 
656 /**
657  * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
658  * @shm:	Shared memory handle
659  * @offs:	Offset from start of this shared memory
660  * @pa:		Physical address to return
661  * @returns 0 if offs is within the bounds of this shared memory, else an
662  *	error code.
663  */
664 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
665 {
666 	if (offs >= shm->size)
667 		return -EINVAL;
668 	if (pa)
669 		*pa = shm->paddr + offs;
670 	return 0;
671 }
672 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
673 
674 /**
675  * tee_shm_get_from_id() - Find shared memory object and increase reference
676  * count
677  * @ctx:	Context owning the shared memory
678  * @id:		Id of shared memory object
679  * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
680  */
681 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
682 {
683 	struct tee_device *teedev;
684 	struct tee_shm *shm;
685 
686 	if (!ctx)
687 		return ERR_PTR(-EINVAL);
688 
689 	teedev = ctx->teedev;
690 	mutex_lock(&teedev->mutex);
691 	shm = idr_find(&teedev->idr, id);
692 	/*
693 	 * If the tee_shm was found in the IDR it must have a refcount
694 	 * larger than 0 due to the guarantee in tee_shm_put() below. So
695 	 * it's safe to use refcount_inc().
696 	 */
697 	if (!shm || shm->ctx != ctx)
698 		shm = ERR_PTR(-EINVAL);
699 	else
700 		refcount_inc(&shm->refcount);
701 	mutex_unlock(&teedev->mutex);
702 	return shm;
703 }
704 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
705 
706 /**
707  * tee_shm_put() - Decrease reference count on a shared memory handle
708  * @shm:	Shared memory handle
709  */
710 void tee_shm_put(struct tee_shm *shm)
711 {
712 	struct tee_device *teedev = shm->ctx->teedev;
713 	bool do_release = false;
714 
715 	mutex_lock(&teedev->mutex);
716 	if (refcount_dec_and_test(&shm->refcount)) {
717 		/*
718 		 * refcount has reached 0, we must now remove it from the
719 		 * IDR before releasing the mutex. This will guarantee that
720 		 * the refcount_inc() in tee_shm_get_from_id() never starts
721 		 * from 0.
722 		 */
723 		if (shm->id >= 0)
724 			idr_remove(&teedev->idr, shm->id);
725 		do_release = true;
726 	}
727 	mutex_unlock(&teedev->mutex);
728 
729 	if (do_release)
730 		tee_shm_release(teedev, shm);
731 }
732 EXPORT_SYMBOL_GPL(tee_shm_put);
733