Lines Matching +full:shared +full:- +full:memory
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
36 if (shm->pages) { in release_registered_pages()
37 if (shm->flags & TEE_SHM_USER_MAPPED) in release_registered_pages()
38 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages()
40 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages()
42 kfree(shm->pages); in release_registered_pages()
48 if (shm->flags & TEE_SHM_POOL) { in tee_shm_release()
49 teedev->pool->ops->free(teedev->pool, shm); in tee_shm_release()
50 } else if (shm->flags & TEE_SHM_DYNAMIC) { in tee_shm_release()
51 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); in tee_shm_release()
54 dev_err(teedev->dev.parent, in tee_shm_release()
60 teedev_ctx_put(shm->ctx); in tee_shm_release()
70 struct tee_device *teedev = ctx->teedev; in shm_alloc_helper()
76 return ERR_PTR(-EINVAL); in shm_alloc_helper()
78 if (!teedev->pool) { in shm_alloc_helper()
80 ret = ERR_PTR(-EINVAL); in shm_alloc_helper()
86 ret = ERR_PTR(-ENOMEM); in shm_alloc_helper()
90 refcount_set(&shm->refcount, 1); in shm_alloc_helper()
91 shm->flags = flags; in shm_alloc_helper()
92 shm->id = id; in shm_alloc_helper()
97 * to call teedev_ctx_get() or clear shm->ctx in case it's not in shm_alloc_helper()
100 shm->ctx = ctx; in shm_alloc_helper()
102 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); in shm_alloc_helper()
118 * tee_shm_alloc_user_buf() - Allocate shared memory for user space
119 * @ctx: Context that allocates the shared memory
120 * @size: Requested size of shared memory
122 * Memory allocated as user space shared memory is automatically freed when
125 * memory.
132 struct tee_device *teedev = ctx->teedev; in tee_shm_alloc_user_buf()
137 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
138 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); in tee_shm_alloc_user_buf()
139 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
145 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
146 idr_remove(&teedev->idr, id); in tee_shm_alloc_user_buf()
147 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
151 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
152 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_alloc_user_buf()
153 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
163 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
164 * @ctx: Context that allocates the shared memory
165 * @size: Requested size of shared memory
167 * The returned memory registered in secure world and is suitable to be
168 * passed as a memory buffer in parameter argument to
169 * tee_client_invoke_func(). The memory allocated is later freed with a
178 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); in tee_shm_alloc_kernel_buf()
183 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
185 * @ctx: Context that allocates the shared memory
186 * @size: Requested size of shared memory
188 * This function returns similar shared memory as
189 * tee_shm_alloc_kernel_buf(), but with the difference that the memory
191 * passing memory not registered in advance.
202 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); in tee_shm_alloc_priv_buf()
222 shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE, in tee_dyn_shm_alloc_helper()
224 if (!shm->kaddr) in tee_dyn_shm_alloc_helper()
225 return -ENOMEM; in tee_dyn_shm_alloc_helper()
227 shm->paddr = virt_to_phys(shm->kaddr); in tee_dyn_shm_alloc_helper()
228 shm->size = nr_pages * PAGE_SIZE; in tee_dyn_shm_alloc_helper()
232 rc = -ENOMEM; in tee_dyn_shm_alloc_helper()
237 pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE); in tee_dyn_shm_alloc_helper()
239 shm->pages = pages; in tee_dyn_shm_alloc_helper()
240 shm->num_pages = nr_pages; in tee_dyn_shm_alloc_helper()
243 rc = shm_register(shm->ctx, shm, pages, nr_pages, in tee_dyn_shm_alloc_helper()
244 (unsigned long)shm->kaddr); in tee_dyn_shm_alloc_helper()
251 free_pages_exact(shm->kaddr, shm->size); in tee_dyn_shm_alloc_helper()
252 shm->kaddr = NULL; in tee_dyn_shm_alloc_helper()
262 shm_unregister(shm->ctx, shm); in tee_dyn_shm_free_helper()
263 free_pages_exact(shm->kaddr, shm->size); in tee_dyn_shm_free_helper()
264 shm->kaddr = NULL; in tee_dyn_shm_free_helper()
265 kfree(shm->pages); in tee_dyn_shm_free_helper()
266 shm->pages = NULL; in tee_dyn_shm_free_helper()
274 struct tee_device *teedev = ctx->teedev; in register_shm_helper()
283 return ERR_PTR(-EINVAL); in register_shm_helper()
285 if (!teedev->desc->ops->shm_register || in register_shm_helper()
286 !teedev->desc->ops->shm_unregister) { in register_shm_helper()
287 ret = ERR_PTR(-ENOTSUPP); in register_shm_helper()
295 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
299 refcount_set(&shm->refcount, 1); in register_shm_helper()
300 shm->flags = flags; in register_shm_helper()
301 shm->ctx = ctx; in register_shm_helper()
302 shm->id = id; in register_shm_helper()
307 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
311 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper()
312 if (!shm->pages) { in register_shm_helper()
313 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
317 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0, in register_shm_helper()
320 ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); in register_shm_helper()
329 shm_get_kernel_pages(shm->pages, num_pages); in register_shm_helper()
331 shm->offset = off; in register_shm_helper()
332 shm->size = len; in register_shm_helper()
333 shm->num_pages = num_pages; in register_shm_helper()
335 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, in register_shm_helper()
336 shm->num_pages, start); in register_shm_helper()
345 unpin_user_pages(shm->pages, shm->num_pages); in register_shm_helper()
347 shm_put_kernel_pages(shm->pages, shm->num_pages); in register_shm_helper()
349 kfree(shm->pages); in register_shm_helper()
360 * tee_shm_register_user_buf() - Register a userspace shared memory buffer
361 * @ctx: Context that registers the shared memory
362 * @addr: The userspace address of the shared buffer
363 * @length: Length of the shared buffer
371 struct tee_device *teedev = ctx->teedev; in tee_shm_register_user_buf()
378 return ERR_PTR(-EFAULT); in tee_shm_register_user_buf()
380 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
381 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); in tee_shm_register_user_buf()
382 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
389 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
390 idr_remove(&teedev->idr, id); in tee_shm_register_user_buf()
391 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
395 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
396 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_register_user_buf()
397 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
407 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
409 * @ctx: Context that registers the shared memory
427 return register_shm_helper(ctx, &iter, flags, -1); in tee_shm_register_kernel_buf()
433 tee_shm_put(filp->private_data); in tee_shm_fop_release()
439 struct tee_shm *shm = filp->private_data; in tee_shm_fop_mmap()
440 size_t size = vma->vm_end - vma->vm_start; in tee_shm_fop_mmap()
442 /* Refuse sharing shared memory provided by application */ in tee_shm_fop_mmap()
443 if (shm->flags & TEE_SHM_USER_MAPPED) in tee_shm_fop_mmap()
444 return -EINVAL; in tee_shm_fop_mmap()
447 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) in tee_shm_fop_mmap()
448 return -EINVAL; in tee_shm_fop_mmap()
450 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, in tee_shm_fop_mmap()
451 size, vma->vm_page_prot); in tee_shm_fop_mmap()
461 * tee_shm_get_fd() - Increase reference count and return file descriptor
462 * @shm: Shared memory handle
463 * @returns user space file descriptor to shared memory
469 if (shm->id < 0) in tee_shm_get_fd()
470 return -EINVAL; in tee_shm_get_fd()
473 refcount_inc(&shm->refcount); in tee_shm_get_fd()
481 * tee_shm_free() - Free shared memory
482 * @shm: Handle to shared memory to free
491 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
492 * @shm: Shared memory handle
493 * @offs: Offset from start of this shared memory
494 * @returns virtual address of the shared memory + offs if offs is within
495 * the bounds of this shared memory, else an ERR_PTR
499 if (!shm->kaddr) in tee_shm_get_va()
500 return ERR_PTR(-EINVAL); in tee_shm_get_va()
501 if (offs >= shm->size) in tee_shm_get_va()
502 return ERR_PTR(-EINVAL); in tee_shm_get_va()
503 return (char *)shm->kaddr + offs; in tee_shm_get_va()
508 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
509 * @shm: Shared memory handle
510 * @offs: Offset from start of this shared memory
512 * @returns 0 if offs is within the bounds of this shared memory, else an
517 if (offs >= shm->size) in tee_shm_get_pa()
518 return -EINVAL; in tee_shm_get_pa()
520 *pa = shm->paddr + offs; in tee_shm_get_pa()
526 * tee_shm_get_from_id() - Find shared memory object and increase reference
528 * @ctx: Context owning the shared memory
529 * @id: Id of shared memory object
538 return ERR_PTR(-EINVAL); in tee_shm_get_from_id()
540 teedev = ctx->teedev; in tee_shm_get_from_id()
541 mutex_lock(&teedev->mutex); in tee_shm_get_from_id()
542 shm = idr_find(&teedev->idr, id); in tee_shm_get_from_id()
548 if (!shm || shm->ctx != ctx) in tee_shm_get_from_id()
549 shm = ERR_PTR(-EINVAL); in tee_shm_get_from_id()
551 refcount_inc(&shm->refcount); in tee_shm_get_from_id()
552 mutex_unlock(&teedev->mutex); in tee_shm_get_from_id()
558 * tee_shm_put() - Decrease reference count on a shared memory handle
559 * @shm: Shared memory handle
563 struct tee_device *teedev = shm->ctx->teedev; in tee_shm_put()
566 mutex_lock(&teedev->mutex); in tee_shm_put()
567 if (refcount_dec_and_test(&shm->refcount)) { in tee_shm_put()
574 if (shm->id >= 0) in tee_shm_put()
575 idr_remove(&teedev->idr, shm->id); in tee_shm_put()
578 mutex_unlock(&teedev->mutex); in tee_shm_put()