Lines Matching full:shm

21 	struct tee_shm shm;  member
42 static void release_registered_pages(struct tee_shm *shm) in release_registered_pages() argument
44 if (shm->pages) { in release_registered_pages()
45 if (shm->flags & TEE_SHM_USER_MAPPED) in release_registered_pages()
46 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages()
48 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages()
50 kfree(shm->pages); in release_registered_pages()
54 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) in tee_shm_release() argument
56 void *p = shm; in tee_shm_release()
58 if (shm->flags & TEE_SHM_DMA_MEM) { in tee_shm_release()
62 dma_mem = container_of(shm, struct tee_shm_dma_mem, shm); in tee_shm_release()
64 dma_free_pages(&teedev->dev, shm->size, dma_mem->page, in tee_shm_release()
67 } else if (shm->flags & TEE_SHM_DMA_BUF) { in tee_shm_release()
70 ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); in tee_shm_release()
73 } else if (shm->flags & TEE_SHM_POOL) { in tee_shm_release()
74 teedev->pool->ops->free(teedev->pool, shm); in tee_shm_release()
75 } else if (shm->flags & TEE_SHM_DYNAMIC) { in tee_shm_release()
76 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); in tee_shm_release()
80 "unregister shm %p failed: %d", shm, rc); in tee_shm_release()
82 release_registered_pages(shm); in tee_shm_release()
85 teedev_ctx_put(shm->ctx); in tee_shm_release()
96 struct tee_shm *shm; in shm_alloc_helper() local
109 shm = kzalloc(sizeof(*shm), GFP_KERNEL); in shm_alloc_helper()
110 if (!shm) { in shm_alloc_helper()
115 refcount_set(&shm->refcount, 1); in shm_alloc_helper()
116 shm->flags = flags; in shm_alloc_helper()
117 shm->id = id; in shm_alloc_helper()
120 * We're assigning this as it is needed if the shm is to be in shm_alloc_helper()
122 * to call teedev_ctx_get() or clear shm->ctx in case it's not in shm_alloc_helper()
125 shm->ctx = ctx; in shm_alloc_helper()
127 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); in shm_alloc_helper()
134 return shm; in shm_alloc_helper()
136 kfree(shm); in shm_alloc_helper()
158 struct tee_shm *shm; in tee_shm_alloc_user_buf() local
168 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id); in tee_shm_alloc_user_buf()
169 if (IS_ERR(shm)) { in tee_shm_alloc_user_buf()
173 return shm; in tee_shm_alloc_user_buf()
177 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_alloc_user_buf()
180 tee_shm_free(shm); in tee_shm_alloc_user_buf()
184 return shm; in tee_shm_alloc_user_buf()
223 refcount_set(&ref->shm.refcount, 1); in tee_shm_register_fd()
224 ref->shm.ctx = ctx; in tee_shm_register_fd()
225 ref->shm.id = -1; in tee_shm_register_fd()
226 ref->shm.flags = TEE_SHM_DMA_BUF; in tee_shm_register_fd()
235 &ref->offset, &ref->shm, in tee_shm_register_fd()
240 mutex_lock(&ref->shm.ctx->teedev->mutex); in tee_shm_register_fd()
241 ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm, in tee_shm_register_fd()
243 mutex_unlock(&ref->shm.ctx->teedev->mutex); in tee_shm_register_fd()
244 if (ref->shm.id < 0) { in tee_shm_register_fd()
245 rc = ref->shm.id; in tee_shm_register_fd()
249 return &ref->shm; in tee_shm_register_fd()
322 refcount_set(&dma_mem->shm.refcount, 1); in tee_shm_alloc_dma_mem()
323 dma_mem->shm.ctx = ctx; in tee_shm_alloc_dma_mem()
324 dma_mem->shm.paddr = page_to_phys(page); in tee_shm_alloc_dma_mem()
327 dma_mem->shm.size = page_count * PAGE_SIZE; in tee_shm_alloc_dma_mem()
328 dma_mem->shm.flags = TEE_SHM_DMA_MEM; in tee_shm_alloc_dma_mem()
332 return &dma_mem->shm; in tee_shm_alloc_dma_mem()
352 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, in tee_dyn_shm_alloc_helper() argument
354 struct tee_shm *shm, in tee_dyn_shm_alloc_helper()
368 shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE, in tee_dyn_shm_alloc_helper()
370 if (!shm->kaddr) in tee_dyn_shm_alloc_helper()
373 shm->paddr = virt_to_phys(shm->kaddr); in tee_dyn_shm_alloc_helper()
374 shm->size = nr_pages * PAGE_SIZE; in tee_dyn_shm_alloc_helper()
383 pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE); in tee_dyn_shm_alloc_helper()
385 shm->pages = pages; in tee_dyn_shm_alloc_helper()
386 shm->num_pages = nr_pages; in tee_dyn_shm_alloc_helper()
389 rc = shm_register(shm->ctx, shm, pages, nr_pages, in tee_dyn_shm_alloc_helper()
390 (unsigned long)shm->kaddr); in tee_dyn_shm_alloc_helper()
399 free_pages_exact(shm->kaddr, shm->size); in tee_dyn_shm_alloc_helper()
400 shm->kaddr = NULL; in tee_dyn_shm_alloc_helper()
405 void tee_dyn_shm_free_helper(struct tee_shm *shm, in tee_dyn_shm_free_helper() argument
407 struct tee_shm *shm)) in tee_dyn_shm_free_helper()
410 shm_unregister(shm->ctx, shm); in tee_dyn_shm_free_helper()
411 free_pages_exact(shm->kaddr, shm->size); in tee_dyn_shm_free_helper()
412 shm->kaddr = NULL; in tee_dyn_shm_free_helper()
413 kfree(shm->pages); in tee_dyn_shm_free_helper()
414 shm->pages = NULL; in tee_dyn_shm_free_helper()
423 struct tee_shm *shm; in register_shm_helper() local
441 shm = kzalloc(sizeof(*shm), GFP_KERNEL); in register_shm_helper()
442 if (!shm) { in register_shm_helper()
447 refcount_set(&shm->refcount, 1); in register_shm_helper()
448 shm->flags = flags; in register_shm_helper()
449 shm->ctx = ctx; in register_shm_helper()
450 shm->id = id; in register_shm_helper()
459 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper()
460 if (!shm->pages) { in register_shm_helper()
465 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0, in register_shm_helper()
475 shm->num_pages = len / PAGE_SIZE; in register_shm_helper()
485 shm_get_kernel_pages(shm->pages, num_pages); in register_shm_helper()
487 shm->offset = off; in register_shm_helper()
488 shm->size = len; in register_shm_helper()
489 shm->num_pages = num_pages; in register_shm_helper()
491 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, in register_shm_helper()
492 shm->num_pages, start); in register_shm_helper()
498 return shm; in register_shm_helper()
501 unpin_user_pages(shm->pages, shm->num_pages); in register_shm_helper()
503 shm_put_kernel_pages(shm->pages, shm->num_pages); in register_shm_helper()
505 kfree(shm->pages); in register_shm_helper()
507 kfree(shm); in register_shm_helper()
528 struct tee_shm *shm; in tee_shm_register_user_buf() local
543 shm = register_shm_helper(ctx, &iter, flags, id); in tee_shm_register_user_buf()
544 if (IS_ERR(shm)) { in tee_shm_register_user_buf()
548 return shm; in tee_shm_register_user_buf()
552 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_register_user_buf()
555 tee_shm_free(shm); in tee_shm_register_user_buf()
559 return shm; in tee_shm_register_user_buf()
595 struct tee_shm *shm = filp->private_data; in tee_shm_fop_mmap() local
599 if (shm->flags & TEE_SHM_USER_MAPPED) in tee_shm_fop_mmap()
602 if (shm->flags & TEE_SHM_DMA_BUF) in tee_shm_fop_mmap()
606 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) in tee_shm_fop_mmap()
609 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, in tee_shm_fop_mmap()
621 * @shm: Shared memory handle
624 int tee_shm_get_fd(struct tee_shm *shm) in tee_shm_get_fd() argument
628 if (shm->id < 0) in tee_shm_get_fd()
632 refcount_inc(&shm->refcount); in tee_shm_get_fd()
633 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); in tee_shm_get_fd()
635 tee_shm_put(shm); in tee_shm_get_fd()
641 * @shm: Handle to shared memory to free
643 void tee_shm_free(struct tee_shm *shm) in tee_shm_free() argument
645 tee_shm_put(shm); in tee_shm_free()
651 * @shm: Shared memory handle
656 void *tee_shm_get_va(struct tee_shm *shm, size_t offs) in tee_shm_get_va() argument
658 if (!shm->kaddr) in tee_shm_get_va()
660 if (offs >= shm->size) in tee_shm_get_va()
662 return (char *)shm->kaddr + offs; in tee_shm_get_va()
668 * @shm: Shared memory handle
674 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) in tee_shm_get_pa() argument
676 if (offs >= shm->size) in tee_shm_get_pa()
679 *pa = shm->paddr + offs; in tee_shm_get_pa()
694 struct tee_shm *shm; in tee_shm_get_from_id() local
701 shm = idr_find(&teedev->idr, id); in tee_shm_get_from_id()
707 if (!shm || shm->ctx != ctx) in tee_shm_get_from_id()
708 shm = ERR_PTR(-EINVAL); in tee_shm_get_from_id()
710 refcount_inc(&shm->refcount); in tee_shm_get_from_id()
712 return shm; in tee_shm_get_from_id()
718 * @shm: Shared memory handle
720 void tee_shm_put(struct tee_shm *shm) in tee_shm_put() argument
725 if (!shm || !shm->ctx || !shm->ctx->teedev) in tee_shm_put()
728 teedev = shm->ctx->teedev; in tee_shm_put()
730 if (refcount_dec_and_test(&shm->refcount)) { in tee_shm_put()
737 if (shm->id >= 0) in tee_shm_put()
738 idr_remove(&teedev->idr, shm->id); in tee_shm_put()
744 tee_shm_release(teedev, shm); in tee_shm_put()