1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
4 */
5 #include <linux/anon_inodes.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/highmem.h>
10 #include <linux/idr.h>
11 #include <linux/io.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/tee_core.h>
16 #include <linux/uaccess.h>
17 #include <linux/uio.h>
18 #include "tee_private.h"
19
20 struct tee_shm_dma_mem {
21 struct tee_shm shm;
22 dma_addr_t dma_addr;
23 struct page *page;
24 };
25
shm_put_kernel_pages(struct page ** pages,size_t page_count)26 static void shm_put_kernel_pages(struct page **pages, size_t page_count)
27 {
28 size_t n;
29
30 for (n = 0; n < page_count; n++)
31 put_page(pages[n]);
32 }
33
shm_get_kernel_pages(struct page ** pages,size_t page_count)34 static void shm_get_kernel_pages(struct page **pages, size_t page_count)
35 {
36 size_t n;
37
38 for (n = 0; n < page_count; n++)
39 get_page(pages[n]);
40 }
41
release_registered_pages(struct tee_shm * shm)42 static void release_registered_pages(struct tee_shm *shm)
43 {
44 if (shm->pages) {
45 if (shm->flags & TEE_SHM_USER_MAPPED)
46 unpin_user_pages(shm->pages, shm->num_pages);
47 else
48 shm_put_kernel_pages(shm->pages, shm->num_pages);
49
50 kfree(shm->pages);
51 }
52 }
53
tee_shm_release(struct tee_device * teedev,struct tee_shm * shm)54 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
55 {
56 void *p = shm;
57
58 if (shm->flags & TEE_SHM_DMA_MEM) {
59 #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
60 struct tee_shm_dma_mem *dma_mem;
61
62 dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
63 p = dma_mem;
64 dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
65 dma_mem->dma_addr, DMA_BIDIRECTIONAL);
66 #endif
67 } else if (shm->flags & TEE_SHM_DMA_BUF) {
68 struct tee_shm_dmabuf_ref *ref;
69
70 ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
71 p = ref;
72 dma_buf_put(ref->dmabuf);
73 } else if (shm->flags & TEE_SHM_POOL) {
74 teedev->pool->ops->free(teedev->pool, shm);
75 } else if (shm->flags & TEE_SHM_DYNAMIC) {
76 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
77
78 if (rc)
79 dev_err(teedev->dev.parent,
80 "unregister shm %p failed: %d", shm, rc);
81
82 release_registered_pages(shm);
83 }
84
85 teedev_ctx_put(shm->ctx);
86
87 kfree(p);
88
89 tee_device_put(teedev);
90 }
91
shm_alloc_helper(struct tee_context * ctx,size_t size,size_t align,u32 flags,int id)92 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
93 size_t align, u32 flags, int id)
94 {
95 struct tee_device *teedev = ctx->teedev;
96 struct tee_shm *shm;
97 void *ret;
98 int rc;
99
100 if (!tee_device_get(teedev))
101 return ERR_PTR(-EINVAL);
102
103 if (!teedev->pool) {
104 /* teedev has been detached from driver */
105 ret = ERR_PTR(-EINVAL);
106 goto err_dev_put;
107 }
108
109 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
110 if (!shm) {
111 ret = ERR_PTR(-ENOMEM);
112 goto err_dev_put;
113 }
114
115 refcount_set(&shm->refcount, 1);
116 shm->flags = flags;
117 shm->id = id;
118
119 /*
120 * We're assigning this as it is needed if the shm is to be
121 * registered. If this function returns OK then the caller expected
122 * to call teedev_ctx_get() or clear shm->ctx in case it's not
123 * needed any longer.
124 */
125 shm->ctx = ctx;
126
127 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
128 if (rc) {
129 ret = ERR_PTR(rc);
130 goto err_kfree;
131 }
132
133 teedev_ctx_get(ctx);
134 return shm;
135 err_kfree:
136 kfree(shm);
137 err_dev_put:
138 tee_device_put(teedev);
139 return ret;
140 }
141
142 /**
143 * tee_shm_alloc_user_buf() - Allocate shared memory for user space
144 * @ctx: Context that allocates the shared memory
145 * @size: Requested size of shared memory
146 *
147 * Memory allocated as user space shared memory is automatically freed when
148 * the TEE file pointer is closed. The primary usage of this function is
149 * when the TEE driver doesn't support registering ordinary user space
150 * memory.
151 *
152 * @returns a pointer to 'struct tee_shm'
153 */
tee_shm_alloc_user_buf(struct tee_context * ctx,size_t size)154 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
155 {
156 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
157 struct tee_device *teedev = ctx->teedev;
158 struct tee_shm *shm;
159 void *ret;
160 int id;
161
162 mutex_lock(&teedev->mutex);
163 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
164 mutex_unlock(&teedev->mutex);
165 if (id < 0)
166 return ERR_PTR(id);
167
168 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
169 if (IS_ERR(shm)) {
170 mutex_lock(&teedev->mutex);
171 idr_remove(&teedev->idr, id);
172 mutex_unlock(&teedev->mutex);
173 return shm;
174 }
175
176 mutex_lock(&teedev->mutex);
177 ret = idr_replace(&teedev->idr, shm, id);
178 mutex_unlock(&teedev->mutex);
179 if (IS_ERR(ret)) {
180 tee_shm_free(shm);
181 return ret;
182 }
183
184 return shm;
185 }
186
187 /**
188 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
189 * @ctx: Context that allocates the shared memory
190 * @size: Requested size of shared memory
191 *
192 * The returned memory registered in secure world and is suitable to be
193 * passed as a memory buffer in parameter argument to
194 * tee_client_invoke_func(). The memory allocated is later freed with a
195 * call to tee_shm_free().
196 *
197 * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
198 */
tee_shm_alloc_kernel_buf(struct tee_context * ctx,size_t size)199 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
200 {
201 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
202
203 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
204 }
205 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
206
tee_shm_register_fd(struct tee_context * ctx,int fd)207 struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd)
208 {
209 struct tee_shm_dmabuf_ref *ref;
210 int rc;
211
212 if (!tee_device_get(ctx->teedev))
213 return ERR_PTR(-EINVAL);
214
215 teedev_ctx_get(ctx);
216
217 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
218 if (!ref) {
219 rc = -ENOMEM;
220 goto err_put_tee;
221 }
222
223 refcount_set(&ref->shm.refcount, 1);
224 ref->shm.ctx = ctx;
225 ref->shm.id = -1;
226 ref->shm.flags = TEE_SHM_DMA_BUF;
227
228 ref->dmabuf = dma_buf_get(fd);
229 if (IS_ERR(ref->dmabuf)) {
230 rc = PTR_ERR(ref->dmabuf);
231 goto err_kfree_ref;
232 }
233
234 rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf,
235 &ref->offset, &ref->shm,
236 &ref->parent_shm);
237 if (rc)
238 goto err_put_dmabuf;
239
240 mutex_lock(&ref->shm.ctx->teedev->mutex);
241 ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm,
242 1, 0, GFP_KERNEL);
243 mutex_unlock(&ref->shm.ctx->teedev->mutex);
244 if (ref->shm.id < 0) {
245 rc = ref->shm.id;
246 goto err_put_dmabuf;
247 }
248
249 return &ref->shm;
250
251 err_put_dmabuf:
252 dma_buf_put(ref->dmabuf);
253 err_kfree_ref:
254 kfree(ref);
255 err_put_tee:
256 teedev_ctx_put(ctx);
257 tee_device_put(ctx->teedev);
258
259 return ERR_PTR(rc);
260 }
261 EXPORT_SYMBOL_GPL(tee_shm_register_fd);
262
263 /**
264 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
265 * kernel buffer
266 * @ctx: Context that allocates the shared memory
267 * @size: Requested size of shared memory
268 *
269 * This function returns similar shared memory as
270 * tee_shm_alloc_kernel_buf(), but with the difference that the memory
271 * might not be registered in secure world in case the driver supports
272 * passing memory not registered in advance.
273 *
274 * This function should normally only be used internally in the TEE
275 * drivers.
276 *
277 * @returns a pointer to 'struct tee_shm'
278 */
tee_shm_alloc_priv_buf(struct tee_context * ctx,size_t size)279 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
280 {
281 u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
282
283 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
284 }
285 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
286
287 #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
288 /**
289 * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
290 * @ctx: Context that allocates the shared memory
291 * @page_count: Number of pages
292 *
293 * The allocated memory is expected to be lent (made inaccessible to the
294 * kernel) to the TEE while it's used and returned (accessible to the
295 * kernel again) before it's freed.
296 *
297 * This function should normally only be used internally in the TEE
298 * drivers.
299 *
300 * @returns a pointer to 'struct tee_shm'
301 */
tee_shm_alloc_dma_mem(struct tee_context * ctx,size_t page_count)302 struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
303 size_t page_count)
304 {
305 struct tee_device *teedev = ctx->teedev;
306 struct tee_shm_dma_mem *dma_mem;
307 dma_addr_t dma_addr;
308 struct page *page;
309
310 if (!tee_device_get(teedev))
311 return ERR_PTR(-EINVAL);
312
313 page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
314 &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
315 if (!page)
316 goto err_put_teedev;
317
318 dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
319 if (!dma_mem)
320 goto err_free_pages;
321
322 refcount_set(&dma_mem->shm.refcount, 1);
323 dma_mem->shm.ctx = ctx;
324 dma_mem->shm.paddr = page_to_phys(page);
325 dma_mem->dma_addr = dma_addr;
326 dma_mem->page = page;
327 dma_mem->shm.size = page_count * PAGE_SIZE;
328 dma_mem->shm.flags = TEE_SHM_DMA_MEM;
329
330 teedev_ctx_get(ctx);
331
332 return &dma_mem->shm;
333
334 err_free_pages:
335 dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
336 DMA_BIDIRECTIONAL);
337 err_put_teedev:
338 tee_device_put(teedev);
339
340 return ERR_PTR(-ENOMEM);
341 }
342 EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
343 #else
tee_shm_alloc_dma_mem(struct tee_context * ctx,size_t page_count)344 struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
345 size_t page_count)
346 {
347 return ERR_PTR(-EINVAL);
348 }
349 EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
350 #endif
351
tee_dyn_shm_alloc_helper(struct tee_shm * shm,size_t size,size_t align,int (* shm_register)(struct tee_context * ctx,struct tee_shm * shm,struct page ** pages,size_t num_pages,unsigned long start))352 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
353 int (*shm_register)(struct tee_context *ctx,
354 struct tee_shm *shm,
355 struct page **pages,
356 size_t num_pages,
357 unsigned long start))
358 {
359 size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
360 struct page **pages;
361 unsigned int i;
362 int rc = 0;
363
364 /*
365 * Ignore alignment since this is already going to be page aligned
366 * and there's no need for any larger alignment.
367 */
368 shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
369 GFP_KERNEL | __GFP_ZERO);
370 if (!shm->kaddr)
371 return -ENOMEM;
372
373 shm->paddr = virt_to_phys(shm->kaddr);
374 shm->size = nr_pages * PAGE_SIZE;
375
376 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
377 if (!pages) {
378 rc = -ENOMEM;
379 goto err_pages;
380 }
381
382 for (i = 0; i < nr_pages; i++)
383 pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
384
385 shm->pages = pages;
386 shm->num_pages = nr_pages;
387
388 if (shm_register) {
389 rc = shm_register(shm->ctx, shm, pages, nr_pages,
390 (unsigned long)shm->kaddr);
391 if (rc)
392 goto err_kfree;
393 }
394
395 return 0;
396 err_kfree:
397 kfree(pages);
398 err_pages:
399 free_pages_exact(shm->kaddr, shm->size);
400 shm->kaddr = NULL;
401 return rc;
402 }
403 EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper);
404
tee_dyn_shm_free_helper(struct tee_shm * shm,int (* shm_unregister)(struct tee_context * ctx,struct tee_shm * shm))405 void tee_dyn_shm_free_helper(struct tee_shm *shm,
406 int (*shm_unregister)(struct tee_context *ctx,
407 struct tee_shm *shm))
408 {
409 if (shm_unregister)
410 shm_unregister(shm->ctx, shm);
411 free_pages_exact(shm->kaddr, shm->size);
412 shm->kaddr = NULL;
413 kfree(shm->pages);
414 shm->pages = NULL;
415 }
416 EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper);
417
418 static struct tee_shm *
register_shm_helper(struct tee_context * ctx,struct iov_iter * iter,u32 flags,int id)419 register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
420 int id)
421 {
422 struct tee_device *teedev = ctx->teedev;
423 struct tee_shm *shm;
424 unsigned long start, addr;
425 size_t num_pages, off;
426 ssize_t len;
427 void *ret;
428 int rc;
429
430 if (!tee_device_get(teedev))
431 return ERR_PTR(-EINVAL);
432
433 if (!teedev->desc->ops->shm_register ||
434 !teedev->desc->ops->shm_unregister) {
435 ret = ERR_PTR(-ENOTSUPP);
436 goto err_dev_put;
437 }
438
439 teedev_ctx_get(ctx);
440
441 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
442 if (!shm) {
443 ret = ERR_PTR(-ENOMEM);
444 goto err_ctx_put;
445 }
446
447 refcount_set(&shm->refcount, 1);
448 shm->flags = flags;
449 shm->ctx = ctx;
450 shm->id = id;
451 addr = untagged_addr((unsigned long)iter_iov_addr(iter));
452 start = rounddown(addr, PAGE_SIZE);
453 num_pages = iov_iter_npages(iter, INT_MAX);
454 if (!num_pages) {
455 ret = ERR_PTR(-ENOMEM);
456 goto err_ctx_put;
457 }
458
459 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
460 if (!shm->pages) {
461 ret = ERR_PTR(-ENOMEM);
462 goto err_free_shm;
463 }
464
465 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
466 &off);
467 if (unlikely(len <= 0)) {
468 ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
469 goto err_free_shm_pages;
470 } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
471 /*
472 * If we only got a few pages, update to release the
473 * correct amount below.
474 */
475 shm->num_pages = len / PAGE_SIZE;
476 ret = ERR_PTR(-ENOMEM);
477 goto err_put_shm_pages;
478 }
479
480 /*
481 * iov_iter_extract_kvec_pages does not get reference on the pages,
482 * get a reference on them.
483 */
484 if (iov_iter_is_kvec(iter))
485 shm_get_kernel_pages(shm->pages, num_pages);
486
487 shm->offset = off;
488 shm->size = len;
489 shm->num_pages = num_pages;
490
491 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
492 shm->num_pages, start);
493 if (rc) {
494 ret = ERR_PTR(rc);
495 goto err_put_shm_pages;
496 }
497
498 return shm;
499 err_put_shm_pages:
500 if (!iov_iter_is_kvec(iter))
501 unpin_user_pages(shm->pages, shm->num_pages);
502 else
503 shm_put_kernel_pages(shm->pages, shm->num_pages);
504 err_free_shm_pages:
505 kfree(shm->pages);
506 err_free_shm:
507 kfree(shm);
508 err_ctx_put:
509 teedev_ctx_put(ctx);
510 err_dev_put:
511 tee_device_put(teedev);
512 return ret;
513 }
514
515 /**
516 * tee_shm_register_user_buf() - Register a userspace shared memory buffer
517 * @ctx: Context that registers the shared memory
518 * @addr: The userspace address of the shared buffer
519 * @length: Length of the shared buffer
520 *
521 * @returns a pointer to 'struct tee_shm'
522 */
tee_shm_register_user_buf(struct tee_context * ctx,unsigned long addr,size_t length)523 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
524 unsigned long addr, size_t length)
525 {
526 u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
527 struct tee_device *teedev = ctx->teedev;
528 struct tee_shm *shm;
529 struct iov_iter iter;
530 void *ret;
531 int id;
532
533 if (!access_ok((void __user *)addr, length))
534 return ERR_PTR(-EFAULT);
535
536 mutex_lock(&teedev->mutex);
537 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
538 mutex_unlock(&teedev->mutex);
539 if (id < 0)
540 return ERR_PTR(id);
541
542 iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length);
543 shm = register_shm_helper(ctx, &iter, flags, id);
544 if (IS_ERR(shm)) {
545 mutex_lock(&teedev->mutex);
546 idr_remove(&teedev->idr, id);
547 mutex_unlock(&teedev->mutex);
548 return shm;
549 }
550
551 mutex_lock(&teedev->mutex);
552 ret = idr_replace(&teedev->idr, shm, id);
553 mutex_unlock(&teedev->mutex);
554 if (IS_ERR(ret)) {
555 tee_shm_free(shm);
556 return ret;
557 }
558
559 return shm;
560 }
561
562 /**
563 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
564 * secure world
565 * @ctx: Context that registers the shared memory
566 * @addr: The buffer
567 * @length: Length of the buffer
568 *
569 * @returns a pointer to 'struct tee_shm'
570 */
571
tee_shm_register_kernel_buf(struct tee_context * ctx,void * addr,size_t length)572 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
573 void *addr, size_t length)
574 {
575 u32 flags = TEE_SHM_DYNAMIC;
576 struct kvec kvec;
577 struct iov_iter iter;
578
579 kvec.iov_base = addr;
580 kvec.iov_len = length;
581 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
582
583 return register_shm_helper(ctx, &iter, flags, -1);
584 }
585 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
586
tee_shm_fop_release(struct inode * inode,struct file * filp)587 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
588 {
589 tee_shm_put(filp->private_data);
590 return 0;
591 }
592
tee_shm_fop_mmap(struct file * filp,struct vm_area_struct * vma)593 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
594 {
595 struct tee_shm *shm = filp->private_data;
596 size_t size = vma->vm_end - vma->vm_start;
597
598 /* Refuse sharing shared memory provided by application */
599 if (shm->flags & TEE_SHM_USER_MAPPED)
600 return -EINVAL;
601 /* Refuse sharing registered DMA_bufs with the application */
602 if (shm->flags & TEE_SHM_DMA_BUF)
603 return -EINVAL;
604
605 /* check for overflowing the buffer's size */
606 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
607 return -EINVAL;
608
609 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
610 size, vma->vm_page_prot);
611 }
612
613 static const struct file_operations tee_shm_fops = {
614 .owner = THIS_MODULE,
615 .release = tee_shm_fop_release,
616 .mmap = tee_shm_fop_mmap,
617 };
618
619 /**
620 * tee_shm_get_fd() - Increase reference count and return file descriptor
621 * @shm: Shared memory handle
622 * @returns user space file descriptor to shared memory
623 */
tee_shm_get_fd(struct tee_shm * shm)624 int tee_shm_get_fd(struct tee_shm *shm)
625 {
626 int fd;
627
628 if (shm->id < 0)
629 return -EINVAL;
630
631 /* matched by tee_shm_put() in tee_shm_op_release() */
632 refcount_inc(&shm->refcount);
633 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
634 if (fd < 0)
635 tee_shm_put(shm);
636 return fd;
637 }
638
639 /**
640 * tee_shm_free() - Free shared memory
641 * @shm: Handle to shared memory to free
642 */
tee_shm_free(struct tee_shm * shm)643 void tee_shm_free(struct tee_shm *shm)
644 {
645 tee_shm_put(shm);
646 }
647 EXPORT_SYMBOL_GPL(tee_shm_free);
648
649 /**
650 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
651 * @shm: Shared memory handle
652 * @offs: Offset from start of this shared memory
653 * @returns virtual address of the shared memory + offs if offs is within
654 * the bounds of this shared memory, else an ERR_PTR
655 */
tee_shm_get_va(struct tee_shm * shm,size_t offs)656 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
657 {
658 if (!shm->kaddr)
659 return ERR_PTR(-EINVAL);
660 if (offs >= shm->size)
661 return ERR_PTR(-EINVAL);
662 return (char *)shm->kaddr + offs;
663 }
664 EXPORT_SYMBOL_GPL(tee_shm_get_va);
665
666 /**
667 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
668 * @shm: Shared memory handle
669 * @offs: Offset from start of this shared memory
670 * @pa: Physical address to return
671 * @returns 0 if offs is within the bounds of this shared memory, else an
672 * error code.
673 */
tee_shm_get_pa(struct tee_shm * shm,size_t offs,phys_addr_t * pa)674 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
675 {
676 if (offs >= shm->size)
677 return -EINVAL;
678 if (pa)
679 *pa = shm->paddr + offs;
680 return 0;
681 }
682 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
683
684 /**
685 * tee_shm_get_from_id() - Find shared memory object and increase reference
686 * count
687 * @ctx: Context owning the shared memory
688 * @id: Id of shared memory object
689 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
690 */
tee_shm_get_from_id(struct tee_context * ctx,int id)691 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
692 {
693 struct tee_device *teedev;
694 struct tee_shm *shm;
695
696 if (!ctx)
697 return ERR_PTR(-EINVAL);
698
699 teedev = ctx->teedev;
700 mutex_lock(&teedev->mutex);
701 shm = idr_find(&teedev->idr, id);
702 /*
703 * If the tee_shm was found in the IDR it must have a refcount
704 * larger than 0 due to the guarantee in tee_shm_put() below. So
705 * it's safe to use refcount_inc().
706 */
707 if (!shm || shm->ctx != ctx)
708 shm = ERR_PTR(-EINVAL);
709 else
710 refcount_inc(&shm->refcount);
711 mutex_unlock(&teedev->mutex);
712 return shm;
713 }
714 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
715
716 /**
717 * tee_shm_put() - Decrease reference count on a shared memory handle
718 * @shm: Shared memory handle
719 */
tee_shm_put(struct tee_shm * shm)720 void tee_shm_put(struct tee_shm *shm)
721 {
722 struct tee_device *teedev;
723 bool do_release = false;
724
725 if (!shm || !shm->ctx || !shm->ctx->teedev)
726 return;
727
728 teedev = shm->ctx->teedev;
729 mutex_lock(&teedev->mutex);
730 if (refcount_dec_and_test(&shm->refcount)) {
731 /*
732 * refcount has reached 0, we must now remove it from the
733 * IDR before releasing the mutex. This will guarantee that
734 * the refcount_inc() in tee_shm_get_from_id() never starts
735 * from 0.
736 */
737 if (shm->id >= 0)
738 idr_remove(&teedev->idr, shm->id);
739 do_release = true;
740 }
741 mutex_unlock(&teedev->mutex);
742
743 if (do_release)
744 tee_shm_release(teedev, shm);
745 }
746 EXPORT_SYMBOL_GPL(tee_shm_put);
747