Lines Matching +full:resource +full:- +full:attachments
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dma-resv.h>
199 /* Lock for dma buf attachments */
201 struct list_head attachments; member
314 if (map->table) { in fastrpc_free_map()
315 if (map->attr & FASTRPC_ATTR_SECUREMAP) { in fastrpc_free_map()
317 int vmid = map->fl->cctx->vmperms[0].vmid; in fastrpc_free_map()
323 err = qcom_scm_assign_mem(map->phys, map->len, in fastrpc_free_map()
326 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_free_map()
327 map->phys, map->len, err); in fastrpc_free_map()
331 dma_buf_unmap_attachment_unlocked(map->attach, map->table, in fastrpc_free_map()
333 dma_buf_detach(map->buf, map->attach); in fastrpc_free_map()
334 dma_buf_put(map->buf); in fastrpc_free_map()
337 if (map->fl) { in fastrpc_free_map()
338 spin_lock(&map->fl->lock); in fastrpc_free_map()
339 list_del(&map->node); in fastrpc_free_map()
340 spin_unlock(&map->fl->lock); in fastrpc_free_map()
341 map->fl = NULL; in fastrpc_free_map()
350 kref_put(&map->refcount, fastrpc_free_map); in fastrpc_map_put()
356 return -ENOENT; in fastrpc_map_get()
358 return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT; in fastrpc_map_get()
367 int ret = -ENOENT; in fastrpc_map_lookup()
373 spin_lock(&fl->lock); in fastrpc_map_lookup()
374 list_for_each_entry(map, &fl->maps, node) { in fastrpc_map_lookup()
375 if (map->fd != fd || map->buf != buf) in fastrpc_map_lookup()
382 spin_unlock(&fl->lock); in fastrpc_map_lookup()
391 dma_free_coherent(buf->dev, buf->size, buf->virt, in fastrpc_buf_free()
392 FASTRPC_PHYS(buf->phys)); in fastrpc_buf_free()
403 return -ENOMEM; in __fastrpc_buf_alloc()
405 INIT_LIST_HEAD(&buf->attachments); in __fastrpc_buf_alloc()
406 INIT_LIST_HEAD(&buf->node); in __fastrpc_buf_alloc()
407 mutex_init(&buf->lock); in __fastrpc_buf_alloc()
409 buf->fl = fl; in __fastrpc_buf_alloc()
410 buf->virt = NULL; in __fastrpc_buf_alloc()
411 buf->phys = 0; in __fastrpc_buf_alloc()
412 buf->size = size; in __fastrpc_buf_alloc()
413 buf->dev = dev; in __fastrpc_buf_alloc()
414 buf->raddr = 0; in __fastrpc_buf_alloc()
416 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, in __fastrpc_buf_alloc()
418 if (!buf->virt) { in __fastrpc_buf_alloc()
419 mutex_destroy(&buf->lock); in __fastrpc_buf_alloc()
421 return -ENOMEM; in __fastrpc_buf_alloc()
441 if (fl->sctx && fl->sctx->sid) in fastrpc_buf_alloc()
442 buf->phys += ((u64)fl->sctx->sid << 32); in fastrpc_buf_alloc()
450 struct device *rdev = &fl->cctx->rpdev->dev; in fastrpc_remote_heap_alloc()
466 kref_get(&cctx->refcount); in fastrpc_channel_ctx_get()
471 kref_put(&cctx->refcount, fastrpc_channel_ctx_free); in fastrpc_channel_ctx_put()
482 cctx = ctx->cctx; in fastrpc_context_free()
484 for (i = 0; i < ctx->nbufs; i++) in fastrpc_context_free()
485 fastrpc_map_put(ctx->maps[i]); in fastrpc_context_free()
487 if (ctx->buf) in fastrpc_context_free()
488 fastrpc_buf_free(ctx->buf); in fastrpc_context_free()
490 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_context_free()
491 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); in fastrpc_context_free()
492 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_free()
494 kfree(ctx->maps); in fastrpc_context_free()
495 kfree(ctx->olaps); in fastrpc_context_free()
503 kref_get(&ctx->refcount); in fastrpc_context_get()
508 kref_put(&ctx->refcount, fastrpc_context_free); in fastrpc_context_put()
519 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
525 int st = CMP(pa->start, pb->start); in olaps_cmp()
527 int ed = CMP(pb->end, pa->end); in olaps_cmp()
537 for (i = 0; i < ctx->nbufs; ++i) { in fastrpc_get_buff_overlaps()
538 ctx->olaps[i].start = ctx->args[i].ptr; in fastrpc_get_buff_overlaps()
539 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length; in fastrpc_get_buff_overlaps()
540 ctx->olaps[i].raix = i; in fastrpc_get_buff_overlaps()
543 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL); in fastrpc_get_buff_overlaps()
545 for (i = 0; i < ctx->nbufs; ++i) { in fastrpc_get_buff_overlaps()
547 if (ctx->olaps[i].start < max_end) { in fastrpc_get_buff_overlaps()
548 ctx->olaps[i].mstart = max_end; in fastrpc_get_buff_overlaps()
549 ctx->olaps[i].mend = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
550 ctx->olaps[i].offset = max_end - ctx->olaps[i].start; in fastrpc_get_buff_overlaps()
552 if (ctx->olaps[i].end > max_end) { in fastrpc_get_buff_overlaps()
553 max_end = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
555 ctx->olaps[i].mend = 0; in fastrpc_get_buff_overlaps()
556 ctx->olaps[i].mstart = 0; in fastrpc_get_buff_overlaps()
560 ctx->olaps[i].mend = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
561 ctx->olaps[i].mstart = ctx->olaps[i].start; in fastrpc_get_buff_overlaps()
562 ctx->olaps[i].offset = 0; in fastrpc_get_buff_overlaps()
563 max_end = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
572 struct fastrpc_channel_ctx *cctx = user->cctx; in fastrpc_context_alloc()
579 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
581 INIT_LIST_HEAD(&ctx->node); in fastrpc_context_alloc()
582 ctx->fl = user; in fastrpc_context_alloc()
583 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); in fastrpc_context_alloc()
584 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + in fastrpc_context_alloc()
587 if (ctx->nscalars) { in fastrpc_context_alloc()
588 ctx->maps = kcalloc(ctx->nscalars, in fastrpc_context_alloc()
589 sizeof(*ctx->maps), GFP_KERNEL); in fastrpc_context_alloc()
590 if (!ctx->maps) { in fastrpc_context_alloc()
592 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
594 ctx->olaps = kcalloc(ctx->nscalars, in fastrpc_context_alloc()
595 sizeof(*ctx->olaps), GFP_KERNEL); in fastrpc_context_alloc()
596 if (!ctx->olaps) { in fastrpc_context_alloc()
597 kfree(ctx->maps); in fastrpc_context_alloc()
599 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
601 ctx->args = args; in fastrpc_context_alloc()
608 ctx->sc = sc; in fastrpc_context_alloc()
609 ctx->retval = -1; in fastrpc_context_alloc()
610 ctx->pid = current->pid; in fastrpc_context_alloc()
611 ctx->client_id = user->client_id; in fastrpc_context_alloc()
612 ctx->cctx = cctx; in fastrpc_context_alloc()
613 init_completion(&ctx->work); in fastrpc_context_alloc()
614 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); in fastrpc_context_alloc()
616 spin_lock(&user->lock); in fastrpc_context_alloc()
617 list_add_tail(&ctx->node, &user->pending); in fastrpc_context_alloc()
618 spin_unlock(&user->lock); in fastrpc_context_alloc()
620 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_context_alloc()
621 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, in fastrpc_context_alloc()
624 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_alloc()
627 ctx->ctxid = ret << 4; in fastrpc_context_alloc()
628 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_alloc()
630 kref_init(&ctx->refcount); in fastrpc_context_alloc()
634 spin_lock(&user->lock); in fastrpc_context_alloc()
635 list_del(&ctx->node); in fastrpc_context_alloc()
636 spin_unlock(&user->lock); in fastrpc_context_alloc()
638 kfree(ctx->maps); in fastrpc_context_alloc()
639 kfree(ctx->olaps); in fastrpc_context_alloc()
649 struct fastrpc_dma_buf_attachment *a = attachment->priv; in fastrpc_map_dma_buf()
653 table = &a->sgt; in fastrpc_map_dma_buf()
655 ret = dma_map_sgtable(attachment->dev, table, dir, 0); in fastrpc_map_dma_buf()
665 dma_unmap_sgtable(attach->dev, table, dir, 0); in fastrpc_unmap_dma_buf()
670 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_release()
679 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_dma_buf_attach()
684 return -ENOMEM; in fastrpc_dma_buf_attach()
686 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, in fastrpc_dma_buf_attach()
687 FASTRPC_PHYS(buffer->phys), buffer->size); in fastrpc_dma_buf_attach()
689 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); in fastrpc_dma_buf_attach()
691 return -EINVAL; in fastrpc_dma_buf_attach()
694 a->dev = attachment->dev; in fastrpc_dma_buf_attach()
695 INIT_LIST_HEAD(&a->node); in fastrpc_dma_buf_attach()
696 attachment->priv = a; in fastrpc_dma_buf_attach()
698 mutex_lock(&buffer->lock); in fastrpc_dma_buf_attach()
699 list_add(&a->node, &buffer->attachments); in fastrpc_dma_buf_attach()
700 mutex_unlock(&buffer->lock); in fastrpc_dma_buf_attach()
708 struct fastrpc_dma_buf_attachment *a = attachment->priv; in fastrpc_dma_buf_detatch()
709 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_dma_buf_detatch()
711 mutex_lock(&buffer->lock); in fastrpc_dma_buf_detatch()
712 list_del(&a->node); in fastrpc_dma_buf_detatch()
713 mutex_unlock(&buffer->lock); in fastrpc_dma_buf_detatch()
714 sg_free_table(&a->sgt); in fastrpc_dma_buf_detatch()
720 struct fastrpc_buf *buf = dmabuf->priv; in fastrpc_vmap()
722 iosys_map_set_vaddr(map, buf->virt); in fastrpc_vmap()
730 struct fastrpc_buf *buf = dmabuf->priv; in fastrpc_mmap()
731 size_t size = vma->vm_end - vma->vm_start; in fastrpc_mmap()
733 dma_resv_assert_held(dmabuf->resv); in fastrpc_mmap()
735 return dma_mmap_coherent(buf->dev, vma, buf->virt, in fastrpc_mmap()
736 FASTRPC_PHYS(buf->phys), size); in fastrpc_mmap()
752 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_attach()
760 return -ENOMEM; in fastrpc_map_attach()
762 INIT_LIST_HEAD(&map->node); in fastrpc_map_attach()
763 kref_init(&map->refcount); in fastrpc_map_attach()
765 map->fl = fl; in fastrpc_map_attach()
766 map->fd = fd; in fastrpc_map_attach()
767 map->buf = dma_buf_get(fd); in fastrpc_map_attach()
768 if (IS_ERR(map->buf)) { in fastrpc_map_attach()
769 err = PTR_ERR(map->buf); in fastrpc_map_attach()
773 map->attach = dma_buf_attach(map->buf, sess->dev); in fastrpc_map_attach()
774 if (IS_ERR(map->attach)) { in fastrpc_map_attach()
775 dev_err(sess->dev, "Failed to attach dmabuf\n"); in fastrpc_map_attach()
776 err = PTR_ERR(map->attach); in fastrpc_map_attach()
780 table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL); in fastrpc_map_attach()
785 map->table = table; in fastrpc_map_attach()
788 map->phys = sg_phys(map->table->sgl); in fastrpc_map_attach()
790 map->phys = sg_dma_address(map->table->sgl); in fastrpc_map_attach()
791 map->phys += ((u64)fl->sctx->sid << 32); in fastrpc_map_attach()
793 for_each_sg(map->table->sgl, sgl, map->table->nents, in fastrpc_map_attach()
795 map->size += sg_dma_len(sgl); in fastrpc_map_attach()
796 if (len > map->size) { in fastrpc_map_attach()
797 dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n", in fastrpc_map_attach()
798 len, map->size); in fastrpc_map_attach()
799 err = -EINVAL; in fastrpc_map_attach()
802 map->va = sg_virt(map->table->sgl); in fastrpc_map_attach()
803 map->len = len; in fastrpc_map_attach()
815 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; in fastrpc_map_attach()
817 map->attr = attr; in fastrpc_map_attach()
818 err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2); in fastrpc_map_attach()
820 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", in fastrpc_map_attach()
821 map->phys, map->len, err); in fastrpc_map_attach()
825 spin_lock(&fl->lock); in fastrpc_map_attach()
826 list_add_tail(&map->node, &fl->maps); in fastrpc_map_attach()
827 spin_unlock(&fl->lock); in fastrpc_map_attach()
833 dma_buf_detach(map->buf, map->attach); in fastrpc_map_attach()
835 dma_buf_put(map->buf); in fastrpc_map_attach()
845 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_create()
851 dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n", in fastrpc_map_create()
864 * +---------------------------------+
867 * | (0 - N) |
868 * +---------------------------------+
871 * | (0 - N) |
872 * +---------------------------------+
875 * | (0 - N) |
876 * +---------------------------------+
879 * +---------------------------------+
881 * +---------------------------------+
883 * | (0-N) |
884 * +---------------------------------+
893 sizeof(struct fastrpc_phy_page)) * ctx->nscalars + in fastrpc_get_meta_size()
906 for (oix = 0; oix < ctx->nbufs; oix++) { in fastrpc_get_payload_size()
907 int i = ctx->olaps[oix].raix; in fastrpc_get_payload_size()
909 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { in fastrpc_get_payload_size()
911 if (ctx->olaps[oix].offset == 0) in fastrpc_get_payload_size()
914 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart); in fastrpc_get_payload_size()
923 struct device *dev = ctx->fl->sctx->dev; in fastrpc_create_maps()
926 for (i = 0; i < ctx->nscalars; ++i) { in fastrpc_create_maps()
928 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || in fastrpc_create_maps()
929 ctx->args[i].length == 0) in fastrpc_create_maps()
932 if (i < ctx->nbufs) in fastrpc_create_maps()
933 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, in fastrpc_create_maps()
934 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); in fastrpc_create_maps()
936 err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd, in fastrpc_create_maps()
937 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); in fastrpc_create_maps()
940 return -EINVAL; in fastrpc_create_maps()
959 struct device *dev = ctx->fl->sctx->dev; in fastrpc_get_args()
969 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); in fastrpc_get_args()
977 ctx->msg_sz = pkt_size; in fastrpc_get_args()
979 if (ctx->fl->sctx->sid) in fastrpc_get_args()
980 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
982 err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
986 memset(ctx->buf->virt, 0, pkt_size); in fastrpc_get_args()
987 rpra = ctx->buf->virt; in fastrpc_get_args()
988 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); in fastrpc_get_args()
989 pages = fastrpc_phy_page_start(list, ctx->nscalars); in fastrpc_get_args()
990 args = (uintptr_t)ctx->buf->virt + metalen; in fastrpc_get_args()
991 rlen = pkt_size - metalen; in fastrpc_get_args()
992 ctx->rpra = rpra; in fastrpc_get_args()
994 for (oix = 0; oix < ctx->nbufs; ++oix) { in fastrpc_get_args()
997 i = ctx->olaps[oix].raix; in fastrpc_get_args()
998 len = ctx->args[i].length; in fastrpc_get_args()
1008 if (ctx->maps[i]) { in fastrpc_get_args()
1011 rpra[i].buf.pv = (u64) ctx->args[i].ptr; in fastrpc_get_args()
1012 pages[i].addr = ctx->maps[i]->phys; in fastrpc_get_args()
1014 mmap_read_lock(current->mm); in fastrpc_get_args()
1015 vma = find_vma(current->mm, ctx->args[i].ptr); in fastrpc_get_args()
1017 pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) - in fastrpc_get_args()
1018 vma->vm_start; in fastrpc_get_args()
1019 mmap_read_unlock(current->mm); in fastrpc_get_args()
1021 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args()
1022 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >> in fastrpc_get_args()
1024 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args()
1028 if (ctx->olaps[oix].offset == 0) { in fastrpc_get_args()
1029 rlen -= ALIGN(args, FASTRPC_ALIGN) - args; in fastrpc_get_args()
1033 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart; in fastrpc_get_args()
1038 rpra[i].buf.pv = args - ctx->olaps[oix].offset; in fastrpc_get_args()
1039 pages[i].addr = ctx->buf->phys - in fastrpc_get_args()
1040 ctx->olaps[oix].offset + in fastrpc_get_args()
1041 (pkt_size - rlen); in fastrpc_get_args()
1045 pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args()
1046 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args()
1048 rlen -= mlen; in fastrpc_get_args()
1051 if (i < inbufs && !ctx->maps[i]) { in fastrpc_get_args()
1053 void *src = (void *)(uintptr_t)ctx->args[i].ptr; in fastrpc_get_args()
1058 err = -EFAULT; in fastrpc_get_args()
1067 for (i = ctx->nbufs; i < ctx->nscalars; ++i) { in fastrpc_get_args()
1068 list[i].num = ctx->args[i].length ? 1 : 0; in fastrpc_get_args()
1070 if (ctx->maps[i]) { in fastrpc_get_args()
1071 pages[i].addr = ctx->maps[i]->phys; in fastrpc_get_args()
1072 pages[i].size = ctx->maps[i]->size; in fastrpc_get_args()
1074 rpra[i].dma.fd = ctx->args[i].fd; in fastrpc_get_args()
1075 rpra[i].dma.len = ctx->args[i].length; in fastrpc_get_args()
1076 rpra[i].dma.offset = (u64) ctx->args[i].ptr; in fastrpc_get_args()
1089 union fastrpc_remote_arg *rpra = ctx->rpra; in fastrpc_put_args()
1090 struct fastrpc_user *fl = ctx->fl; in fastrpc_put_args()
1098 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); in fastrpc_put_args()
1099 outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); in fastrpc_put_args()
1100 handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc); in fastrpc_put_args()
1101 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); in fastrpc_put_args()
1102 pages = fastrpc_phy_page_start(list, ctx->nscalars); in fastrpc_put_args()
1105 for (i = inbufs; i < ctx->nbufs; ++i) { in fastrpc_put_args()
1106 if (!ctx->maps[i]) { in fastrpc_put_args()
1108 void *dst = (void *)(uintptr_t)ctx->args[i].ptr; in fastrpc_put_args()
1113 ret = -EFAULT; in fastrpc_put_args()
1139 struct fastrpc_user *fl = ctx->fl; in fastrpc_invoke_send()
1140 struct fastrpc_msg *msg = &ctx->msg; in fastrpc_invoke_send()
1143 cctx = fl->cctx; in fastrpc_invoke_send()
1144 msg->client_id = fl->client_id; in fastrpc_invoke_send()
1145 msg->tid = current->pid; in fastrpc_invoke_send()
1148 msg->client_id = 0; in fastrpc_invoke_send()
1150 msg->ctx = ctx->ctxid | fl->pd; in fastrpc_invoke_send()
1151 msg->handle = handle; in fastrpc_invoke_send()
1152 msg->sc = ctx->sc; in fastrpc_invoke_send()
1153 msg->addr = ctx->buf ? ctx->buf->phys : 0; in fastrpc_invoke_send()
1154 msg->size = roundup(ctx->msg_sz, PAGE_SIZE); in fastrpc_invoke_send()
1157 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); in fastrpc_invoke_send()
1175 if (!fl->sctx) in fastrpc_internal_invoke()
1176 return -EINVAL; in fastrpc_internal_invoke()
1178 if (!fl->cctx->rpdev) in fastrpc_internal_invoke()
1179 return -EPIPE; in fastrpc_internal_invoke()
1182 …dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle… in fastrpc_internal_invoke()
1183 return -EPERM; in fastrpc_internal_invoke()
1197 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); in fastrpc_internal_invoke()
1202 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ)) in fastrpc_internal_invoke()
1203 err = -ETIMEDOUT; in fastrpc_internal_invoke()
1205 err = wait_for_completion_interruptible(&ctx->work); in fastrpc_internal_invoke()
1219 err = ctx->retval; in fastrpc_internal_invoke()
1224 if (err != -ERESTARTSYS && err != -ETIMEDOUT) { in fastrpc_internal_invoke()
1226 spin_lock(&fl->lock); in fastrpc_internal_invoke()
1227 list_del(&ctx->node); in fastrpc_internal_invoke()
1228 spin_unlock(&fl->lock); in fastrpc_internal_invoke()
1232 if (err == -ERESTARTSYS) { in fastrpc_internal_invoke()
1233 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_internal_invoke()
1234 list_del(&buf->node); in fastrpc_internal_invoke()
1235 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps); in fastrpc_internal_invoke()
1240 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); in fastrpc_internal_invoke()
1247 /* Check if the device node is non-secure and channel is secure*/ in is_session_rejected()
1248 if (!fl->is_secure_dev && fl->cctx->secure) { in is_session_rejected()
1254 if (!fl->cctx->unsigned_support || !unsigned_pd_request) { in is_session_rejected()
1255 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n"); in is_session_rejected()
1281 return -ENOMEM; in fastrpc_init_create_static_process()
1284 err = -EFAULT; in fastrpc_init_create_static_process()
1289 err = -EINVAL; in fastrpc_init_create_static_process()
1299 if (!fl->cctx->remote_heap) { in fastrpc_init_create_static_process()
1300 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen, in fastrpc_init_create_static_process()
1301 &fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1306 if (fl->cctx->vmcount) { in fastrpc_init_create_static_process()
1309 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1310 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1312 fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_init_create_static_process()
1314 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1315 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1322 inbuf.client_id = fl->client_id; in fastrpc_init_create_static_process()
1325 fl->pd = USER_PD; in fastrpc_init_create_static_process()
1329 args[0].fd = -1; in fastrpc_init_create_static_process()
1333 args[1].fd = -1; in fastrpc_init_create_static_process()
1335 pages[0].addr = fl->cctx->remote_heap->phys; in fastrpc_init_create_static_process()
1336 pages[0].size = fl->cctx->remote_heap->size; in fastrpc_init_create_static_process()
1340 args[2].fd = -1; in fastrpc_init_create_static_process()
1354 if (fl->cctx->vmcount && scm_done) { in fastrpc_init_create_static_process()
1359 for (i = 0; i < fl->cctx->vmcount; i++) in fastrpc_init_create_static_process()
1360 src_perms |= BIT(fl->cctx->vmperms[i].vmid); in fastrpc_init_create_static_process()
1364 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1365 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1368 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1369 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1372 fastrpc_buf_free(fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1404 return -ENOMEM; in fastrpc_init_create_process()
1407 err = -EFAULT; in fastrpc_init_create_process()
1415 err = -ECONNREFUSED; in fastrpc_init_create_process()
1420 err = -EINVAL; in fastrpc_init_create_process()
1424 inbuf.client_id = fl->client_id; in fastrpc_init_create_process()
1425 inbuf.namelen = strlen(current->comm) + 1; in fastrpc_init_create_process()
1430 fl->pd = USER_PD; in fastrpc_init_create_process()
1440 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, in fastrpc_init_create_process()
1445 fl->init_mem = imem; in fastrpc_init_create_process()
1448 args[0].fd = -1; in fastrpc_init_create_process()
1450 args[1].ptr = (u64)(uintptr_t)current->comm; in fastrpc_init_create_process()
1452 args[1].fd = -1; in fastrpc_init_create_process()
1458 pages[0].addr = imem->phys; in fastrpc_init_create_process()
1459 pages[0].size = imem->size; in fastrpc_init_create_process()
1463 args[3].fd = -1; in fastrpc_init_create_process()
1467 args[4].fd = -1; in fastrpc_init_create_process()
1471 args[5].fd = -1; in fastrpc_init_create_process()
1487 fl->init_mem = NULL; in fastrpc_init_create_process()
1500 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_session_alloc()
1505 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_session_alloc()
1506 for (i = 0; i < cctx->sesscount; i++) { in fastrpc_session_alloc()
1507 if (!cctx->session[i].used && cctx->session[i].valid) { in fastrpc_session_alloc()
1508 cctx->session[i].used = true; in fastrpc_session_alloc()
1509 session = &cctx->session[i]; in fastrpc_session_alloc()
1510 /* any non-zero ID will work, session_idx + 1 is the simplest one */ in fastrpc_session_alloc()
1511 fl->client_id = i + 1; in fastrpc_session_alloc()
1515 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_session_alloc()
1525 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_session_free()
1526 session->used = false; in fastrpc_session_free()
1527 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_session_free()
1536 client_id = fl->client_id; in fastrpc_release_current_dsp_process()
1539 args[0].fd = -1; in fastrpc_release_current_dsp_process()
1548 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_release()
1549 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_device_release()
1557 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_device_release()
1558 list_del(&fl->user); in fastrpc_device_release()
1559 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_device_release()
1561 if (fl->init_mem) in fastrpc_device_release()
1562 fastrpc_buf_free(fl->init_mem); in fastrpc_device_release()
1564 list_for_each_entry_safe(ctx, n, &fl->pending, node) { in fastrpc_device_release()
1565 list_del(&ctx->node); in fastrpc_device_release()
1569 list_for_each_entry_safe(map, m, &fl->maps, node) in fastrpc_device_release()
1572 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_device_release()
1573 list_del(&buf->node); in fastrpc_device_release()
1577 fastrpc_session_free(cctx, fl->sctx); in fastrpc_device_release()
1580 mutex_destroy(&fl->mutex); in fastrpc_device_release()
1582 file->private_data = NULL; in fastrpc_device_release()
1594 fdevice = miscdev_to_fdevice(filp->private_data); in fastrpc_device_open()
1595 cctx = fdevice->cctx; in fastrpc_device_open()
1599 return -ENOMEM; in fastrpc_device_open()
1604 filp->private_data = fl; in fastrpc_device_open()
1605 spin_lock_init(&fl->lock); in fastrpc_device_open()
1606 mutex_init(&fl->mutex); in fastrpc_device_open()
1607 INIT_LIST_HEAD(&fl->pending); in fastrpc_device_open()
1608 INIT_LIST_HEAD(&fl->maps); in fastrpc_device_open()
1609 INIT_LIST_HEAD(&fl->mmaps); in fastrpc_device_open()
1610 INIT_LIST_HEAD(&fl->user); in fastrpc_device_open()
1611 fl->cctx = cctx; in fastrpc_device_open()
1612 fl->is_secure_dev = fdevice->secure; in fastrpc_device_open()
1614 fl->sctx = fastrpc_session_alloc(fl); in fastrpc_device_open()
1615 if (!fl->sctx) { in fastrpc_device_open()
1616 dev_err(&cctx->rpdev->dev, "No session available\n"); in fastrpc_device_open()
1617 mutex_destroy(&fl->mutex); in fastrpc_device_open()
1620 return -EBUSY; in fastrpc_device_open()
1623 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_device_open()
1624 list_add_tail(&fl->user, &cctx->users); in fastrpc_device_open()
1625 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_device_open()
1638 return -EFAULT; in fastrpc_dmabuf_alloc()
1640 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); in fastrpc_dmabuf_alloc()
1647 buf->dmabuf = dma_buf_export(&exp_info); in fastrpc_dmabuf_alloc()
1648 if (IS_ERR(buf->dmabuf)) { in fastrpc_dmabuf_alloc()
1649 err = PTR_ERR(buf->dmabuf); in fastrpc_dmabuf_alloc()
1654 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); in fastrpc_dmabuf_alloc()
1656 dma_buf_put(buf->dmabuf); in fastrpc_dmabuf_alloc()
1657 return -EINVAL; in fastrpc_dmabuf_alloc()
1669 return -EFAULT; in fastrpc_dmabuf_alloc()
1678 int client_id = fl->client_id; in fastrpc_init_attach()
1683 args[0].fd = -1; in fastrpc_init_attach()
1685 fl->pd = pd; in fastrpc_init_attach()
1699 return -EFAULT; in fastrpc_invoke()
1706 return -ENOMEM; in fastrpc_invoke()
1711 return -EFAULT; in fastrpc_invoke()
1732 dsp_attr_buf_len -= 1; in fastrpc_get_info_from_dsp()
1736 args[0].fd = -1; in fastrpc_get_info_from_dsp()
1739 args[1].fd = -1; in fastrpc_get_info_from_dsp()
1748 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_get_info_from_kernel()
1749 uint32_t attribute_id = cap->attribute_id; in fastrpc_get_info_from_kernel()
1754 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1756 if (cctx->valid_attributes) { in fastrpc_get_info_from_kernel()
1757 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1760 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1764 return -ENOMEM; in fastrpc_get_info_from_kernel()
1768 dev_info(&cctx->rpdev->dev, in fastrpc_get_info_from_kernel()
1771 return -EOPNOTSUPP; in fastrpc_get_info_from_kernel()
1773 dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err); in fastrpc_get_info_from_kernel()
1778 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1779 memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); in fastrpc_get_info_from_kernel()
1780 cctx->valid_attributes = true; in fastrpc_get_info_from_kernel()
1781 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1784 cap->capability = cctx->dsp_attributes[attribute_id]; in fastrpc_get_info_from_kernel()
1794 return -EFAULT; in fastrpc_get_dsp_info()
1799 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n", in fastrpc_get_dsp_info()
1801 return -EOVERFLOW; in fastrpc_get_dsp_info()
1809 return -EFAULT; in fastrpc_get_dsp_info()
1818 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap_impl()
1822 req_msg.client_id = fl->client_id; in fastrpc_req_munmap_impl()
1823 req_msg.size = buf->size; in fastrpc_req_munmap_impl()
1824 req_msg.vaddr = buf->raddr; in fastrpc_req_munmap_impl()
1833 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr); in fastrpc_req_munmap_impl()
1834 spin_lock(&fl->lock); in fastrpc_req_munmap_impl()
1835 list_del(&buf->node); in fastrpc_req_munmap_impl()
1836 spin_unlock(&fl->lock); in fastrpc_req_munmap_impl()
1839 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr); in fastrpc_req_munmap_impl()
1849 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap()
1852 return -EFAULT; in fastrpc_req_munmap()
1854 spin_lock(&fl->lock); in fastrpc_req_munmap()
1855 list_for_each_entry_safe(iter, b, &fl->mmaps, node) { in fastrpc_req_munmap()
1856 if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) { in fastrpc_req_munmap()
1861 spin_unlock(&fl->lock); in fastrpc_req_munmap()
1866 return -EINVAL; in fastrpc_req_munmap()
1880 struct device *dev = fl->sctx->dev; in fastrpc_req_mmap()
1885 return -EFAULT; in fastrpc_req_mmap()
1890 return -EINVAL; in fastrpc_req_mmap()
1895 return -EINVAL; in fastrpc_req_mmap()
1908 req_msg.client_id = fl->client_id; in fastrpc_req_mmap()
1916 pages.addr = buf->phys; in fastrpc_req_mmap()
1917 pages.size = buf->size; in fastrpc_req_mmap()
1929 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); in fastrpc_req_mmap()
1935 buf->raddr = (uintptr_t) rsp_msg.vaddr; in fastrpc_req_mmap()
1941 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { in fastrpc_req_mmap()
1944 err = qcom_scm_assign_mem(buf->phys, (u64)buf->size, in fastrpc_req_mmap()
1945 &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_req_mmap()
1947 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", in fastrpc_req_mmap()
1948 buf->phys, buf->size, err); in fastrpc_req_mmap()
1953 spin_lock(&fl->lock); in fastrpc_req_mmap()
1954 list_add_tail(&buf->node, &fl->mmaps); in fastrpc_req_mmap()
1955 spin_unlock(&fl->lock); in fastrpc_req_mmap()
1958 err = -EFAULT; in fastrpc_req_mmap()
1963 buf->raddr, buf->size); in fastrpc_req_mmap()
1980 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_unmap_impl()
1982 spin_lock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1983 list_for_each_entry_safe(iter, m, &fl->maps, node) { in fastrpc_req_mem_unmap_impl()
1984 if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) { in fastrpc_req_mem_unmap_impl()
1990 spin_unlock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1994 return -EINVAL; in fastrpc_req_mem_unmap_impl()
1997 req_msg.client_id = fl->client_id; in fastrpc_req_mem_unmap_impl()
1998 req_msg.len = map->len; in fastrpc_req_mem_unmap_impl()
1999 req_msg.vaddrin = map->raddr; in fastrpc_req_mem_unmap_impl()
2000 req_msg.fd = map->fd; in fastrpc_req_mem_unmap_impl()
2009 dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr); in fastrpc_req_mem_unmap_impl()
2022 return -EFAULT; in fastrpc_req_mem_unmap()
2035 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_map()
2041 return -EFAULT; in fastrpc_req_mem_map()
2050 req_msg.client_id = fl->client_id; in fastrpc_req_mem_map()
2054 map->va = (void *) (uintptr_t) req.vaddrin; in fastrpc_req_mem_map()
2062 pages.addr = map->phys; in fastrpc_req_mem_map()
2063 pages.size = map->len; in fastrpc_req_mem_map()
2078 req.fd, req.vaddrin, map->len); in fastrpc_req_mem_map()
2083 map->raddr = rsp_msg.vaddr; in fastrpc_req_mem_map()
2091 req_unmap.length = map->len; in fastrpc_req_mem_map()
2093 return -EFAULT; in fastrpc_req_mem_map()
2107 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_ioctl()
2146 err = -ENOTTY; in fastrpc_device_ioctl()
2164 struct device *dev = &pdev->dev; in fastrpc_cb_probe()
2169 cctx = dev_get_drvdata(dev->parent); in fastrpc_cb_probe()
2171 return -EINVAL; in fastrpc_cb_probe()
2173 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); in fastrpc_cb_probe()
2175 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_cb_probe()
2176 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) { in fastrpc_cb_probe()
2177 dev_err(&pdev->dev, "too many sessions\n"); in fastrpc_cb_probe()
2178 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_probe()
2179 return -ENOSPC; in fastrpc_cb_probe()
2181 sess = &cctx->session[cctx->sesscount++]; in fastrpc_cb_probe()
2182 sess->used = false; in fastrpc_cb_probe()
2183 sess->valid = true; in fastrpc_cb_probe()
2184 sess->dev = dev; in fastrpc_cb_probe()
2187 if (of_property_read_u32(dev->of_node, "reg", &sess->sid)) in fastrpc_cb_probe()
2194 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) in fastrpc_cb_probe()
2196 dup_sess = &cctx->session[cctx->sesscount++]; in fastrpc_cb_probe()
2200 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_probe()
2203 dev_err(dev, "32-bit DMA enable failed\n"); in fastrpc_cb_probe()
2212 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); in fastrpc_cb_remove()
2213 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); in fastrpc_cb_remove()
2217 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_cb_remove()
2219 if (cctx->session[i].sid == sess->sid) { in fastrpc_cb_remove()
2220 cctx->session[i].valid = false; in fastrpc_cb_remove()
2221 cctx->sesscount--; in fastrpc_cb_remove()
2224 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_remove()
2228 { .compatible = "qcom,fastrpc-compute-cb", },
2236 .name = "qcom,fastrpc-cb",
2250 return -ENOMEM; in fastrpc_device_register()
2252 fdev->secure = is_secured; in fastrpc_device_register()
2253 fdev->cctx = cctx; in fastrpc_device_register()
2254 fdev->miscdev.minor = MISC_DYNAMIC_MINOR; in fastrpc_device_register()
2255 fdev->miscdev.fops = &fastrpc_fops; in fastrpc_device_register()
2256 fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s", in fastrpc_device_register()
2257 domain, is_secured ? "-secure" : ""); in fastrpc_device_register()
2258 if (!fdev->miscdev.name) in fastrpc_device_register()
2259 return -ENOMEM; in fastrpc_device_register()
2261 err = misc_register(&fdev->miscdev); in fastrpc_device_register()
2264 cctx->secure_fdevice = fdev; in fastrpc_device_register()
2266 cctx->fdevice = fdev; in fastrpc_device_register()
2285 return -EINVAL; in fastrpc_get_domain_id()
2290 struct device *rdev = &rpdev->dev; in fastrpc_rpmsg_probe()
2292 int i, err, domain_id = -1, vmcount; in fastrpc_rpmsg_probe()
2297 err = of_property_read_string(rdev->of_node, "label", &domain); in fastrpc_rpmsg_probe()
2307 return -EINVAL; in fastrpc_rpmsg_probe()
2310 if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0)) in fastrpc_rpmsg_probe()
2313 vmcount = of_property_read_variable_u32_array(rdev->of_node, in fastrpc_rpmsg_probe()
2318 return -EPROBE_DEFER; in fastrpc_rpmsg_probe()
2322 return -ENOMEM; in fastrpc_rpmsg_probe()
2325 data->vmcount = vmcount; in fastrpc_rpmsg_probe()
2326 for (i = 0; i < data->vmcount; i++) { in fastrpc_rpmsg_probe()
2327 data->vmperms[i].vmid = vmids[i]; in fastrpc_rpmsg_probe()
2328 data->vmperms[i].perm = QCOM_SCM_PERM_RWX; in fastrpc_rpmsg_probe()
2333 struct resource res; in fastrpc_rpmsg_probe()
2336 err = of_reserved_mem_region_to_resource(rdev->of_node, 0, &res); in fastrpc_rpmsg_probe()
2341 data->vmperms, data->vmcount); in fastrpc_rpmsg_probe()
2346 secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain")); in fastrpc_rpmsg_probe()
2347 data->secure = secure_dsp; in fastrpc_rpmsg_probe()
2354 data->unsigned_support = false; in fastrpc_rpmsg_probe()
2361 data->unsigned_support = true; in fastrpc_rpmsg_probe()
2372 err = -EINVAL; in fastrpc_rpmsg_probe()
2376 kref_init(&data->refcount); in fastrpc_rpmsg_probe()
2378 dev_set_drvdata(&rpdev->dev, data); in fastrpc_rpmsg_probe()
2379 rdev->dma_mask = &data->dma_mask; in fastrpc_rpmsg_probe()
2381 INIT_LIST_HEAD(&data->users); in fastrpc_rpmsg_probe()
2382 INIT_LIST_HEAD(&data->invoke_interrupted_mmaps); in fastrpc_rpmsg_probe()
2383 spin_lock_init(&data->lock); in fastrpc_rpmsg_probe()
2384 idr_init(&data->ctx_idr); in fastrpc_rpmsg_probe()
2385 data->domain_id = domain_id; in fastrpc_rpmsg_probe()
2386 data->rpdev = rpdev; in fastrpc_rpmsg_probe()
2388 err = of_platform_populate(rdev->of_node, NULL, NULL, rdev); in fastrpc_rpmsg_probe()
2395 if (data->fdevice) in fastrpc_rpmsg_probe()
2396 misc_deregister(&data->fdevice->miscdev); in fastrpc_rpmsg_probe()
2397 if (data->secure_fdevice) in fastrpc_rpmsg_probe()
2398 misc_deregister(&data->secure_fdevice->miscdev); in fastrpc_rpmsg_probe()
2409 spin_lock(&user->lock); in fastrpc_notify_users()
2410 list_for_each_entry(ctx, &user->pending, node) { in fastrpc_notify_users()
2411 ctx->retval = -EPIPE; in fastrpc_notify_users()
2412 complete(&ctx->work); in fastrpc_notify_users()
2414 spin_unlock(&user->lock); in fastrpc_notify_users()
2419 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); in fastrpc_rpmsg_remove()
2425 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_rpmsg_remove()
2426 cctx->rpdev = NULL; in fastrpc_rpmsg_remove()
2427 list_for_each_entry(user, &cctx->users, user) in fastrpc_rpmsg_remove()
2429 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_rpmsg_remove()
2431 if (cctx->fdevice) in fastrpc_rpmsg_remove()
2432 misc_deregister(&cctx->fdevice->miscdev); in fastrpc_rpmsg_remove()
2434 if (cctx->secure_fdevice) in fastrpc_rpmsg_remove()
2435 misc_deregister(&cctx->secure_fdevice->miscdev); in fastrpc_rpmsg_remove()
2437 list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node) in fastrpc_rpmsg_remove()
2438 list_del(&buf->node); in fastrpc_rpmsg_remove()
2440 if (cctx->remote_heap) in fastrpc_rpmsg_remove()
2441 fastrpc_buf_free(cctx->remote_heap); in fastrpc_rpmsg_remove()
2443 of_platform_depopulate(&rpdev->dev); in fastrpc_rpmsg_remove()
2451 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); in fastrpc_rpmsg_callback()
2458 return -EINVAL; in fastrpc_rpmsg_callback()
2460 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); in fastrpc_rpmsg_callback()
2462 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_rpmsg_callback()
2463 ctx = idr_find(&cctx->ctx_idr, ctxid); in fastrpc_rpmsg_callback()
2464 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_rpmsg_callback()
2467 dev_err(&rpdev->dev, "No context ID matches response\n"); in fastrpc_rpmsg_callback()
2468 return -ENOENT; in fastrpc_rpmsg_callback()
2471 ctx->retval = rsp->retval; in fastrpc_rpmsg_callback()
2472 complete(&ctx->work); in fastrpc_rpmsg_callback()
2479 schedule_work(&ctx->put_work); in fastrpc_rpmsg_callback()