xref: /linux/drivers/misc/fastrpc.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2018, Linaro Limited
4 
5 #include <linux/completion.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dma-resv.h>
10 #include <linux/idr.h>
11 #include <linux/list.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/sort.h>
18 #include <linux/of_platform.h>
19 #include <linux/rpmsg.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/firmware/qcom/qcom_scm.h>
23 #include <uapi/misc/fastrpc.h>
24 #include <linux/of_reserved_mem.h>
25 #include <linux/bits.h>
26 
27 #define ADSP_DOMAIN_ID (0)
28 #define MDSP_DOMAIN_ID (1)
29 #define SDSP_DOMAIN_ID (2)
30 #define CDSP_DOMAIN_ID (3)
31 #define GDSP_DOMAIN_ID (4)
32 #define FASTRPC_MAX_SESSIONS	14
33 #define FASTRPC_MAX_VMIDS	16
34 #define FASTRPC_ALIGN		128
35 #define FASTRPC_MAX_FDLIST	16
36 #define FASTRPC_MAX_CRCLIST	64
37 #define FASTRPC_CTX_MAX (256)
38 #define FASTRPC_INIT_HANDLE	1
39 #define FASTRPC_DSP_UTILITIES_HANDLE	2
40 #define FASTRPC_CTXID_MASK (0xFF0)
41 #define INIT_FILELEN_MAX (2 * 1024 * 1024)
42 #define INIT_FILE_NAMELEN_MAX (128)
43 #define FASTRPC_DEVICE_NAME	"fastrpc"
44 
45 /* Add memory to static PD pool, protection thru XPU */
46 #define ADSP_MMAP_HEAP_ADDR  4
47 /* MAP static DMA buffer on DSP User PD */
48 #define ADSP_MMAP_DMA_BUFFER  6
49 /* Add memory to static PD pool protection thru hypervisor */
50 #define ADSP_MMAP_REMOTE_HEAP_ADDR  8
51 /* Add memory to userPD pool, for user heap */
52 #define ADSP_MMAP_ADD_PAGES 0x1000
53 /* Add memory to userPD pool, for LLC heap */
54 #define ADSP_MMAP_ADD_PAGES_LLC 0x3000,
55 
56 #define DSP_UNSUPPORTED_API (0x80000414)
57 /* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
58 #define FASTRPC_MAX_DSP_ATTRIBUTES (256)
59 #define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
60 
61 /* Retrives number of input buffers from the scalars parameter */
62 #define REMOTE_SCALARS_INBUFS(sc)	(((sc) >> 16) & 0x0ff)
63 
64 /* Retrives number of output buffers from the scalars parameter */
65 #define REMOTE_SCALARS_OUTBUFS(sc)	(((sc) >> 8) & 0x0ff)
66 
67 /* Retrives number of input handles from the scalars parameter */
68 #define REMOTE_SCALARS_INHANDLES(sc)	(((sc) >> 4) & 0x0f)
69 
70 /* Retrives number of output handles from the scalars parameter */
71 #define REMOTE_SCALARS_OUTHANDLES(sc)	((sc) & 0x0f)
72 
73 #define REMOTE_SCALARS_LENGTH(sc)	(REMOTE_SCALARS_INBUFS(sc) +   \
74 					 REMOTE_SCALARS_OUTBUFS(sc) +  \
75 					 REMOTE_SCALARS_INHANDLES(sc)+ \
76 					 REMOTE_SCALARS_OUTHANDLES(sc))
77 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout)  \
78 				(((attr & 0x07) << 29) |		\
79 				((method & 0x1f) << 24) |	\
80 				((in & 0xff) << 16) |		\
81 				((out & 0xff) <<  8) |		\
82 				((oin & 0x0f) <<  4) |		\
83 				(oout & 0x0f))
84 
85 #define FASTRPC_SCALARS(method, in, out) \
86 		FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
87 
88 #define FASTRPC_CREATE_PROCESS_NARGS	6
89 #define FASTRPC_CREATE_STATIC_PROCESS_NARGS	3
90 /* Remote Method id table */
91 #define FASTRPC_RMID_INIT_ATTACH	0
92 #define FASTRPC_RMID_INIT_RELEASE	1
93 #define FASTRPC_RMID_INIT_MMAP		4
94 #define FASTRPC_RMID_INIT_MUNMAP	5
95 #define FASTRPC_RMID_INIT_CREATE	6
96 #define FASTRPC_RMID_INIT_CREATE_ATTR	7
97 #define FASTRPC_RMID_INIT_CREATE_STATIC	8
98 #define FASTRPC_RMID_INIT_MEM_MAP      10
99 #define FASTRPC_RMID_INIT_MEM_UNMAP    11
100 
101 /* Protection Domain(PD) ids */
102 #define ROOT_PD		(0)
103 #define USER_PD		(1)
104 #define SENSORS_PD	(2)
105 
106 #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
107 
108 struct fastrpc_phy_page {
109 	dma_addr_t addr;	/* dma address */
110 	u64 size;		/* size of contiguous region */
111 };
112 
113 struct fastrpc_invoke_buf {
114 	u32 num;		/* number of contiguous regions */
115 	u32 pgidx;		/* index to start of contiguous region */
116 };
117 
118 struct fastrpc_remote_dmahandle {
119 	s32 fd;		/* dma handle fd */
120 	u32 offset;	/* dma handle offset */
121 	u32 len;	/* dma handle length */
122 };
123 
124 struct fastrpc_remote_buf {
125 	u64 pv;		/* buffer pointer */
126 	u64 len;	/* length of buffer */
127 };
128 
129 union fastrpc_remote_arg {
130 	struct fastrpc_remote_buf buf;
131 	struct fastrpc_remote_dmahandle dma;
132 };
133 
134 struct fastrpc_mmap_rsp_msg {
135 	u64 vaddr;
136 };
137 
138 struct fastrpc_mmap_req_msg {
139 	s32 client_id;
140 	u32 flags;
141 	u64 vaddr;
142 	s32 num;
143 };
144 
145 struct fastrpc_mem_map_req_msg {
146 	s32 client_id;
147 	s32 fd;
148 	s32 offset;
149 	u32 flags;
150 	u64 vaddrin;
151 	s32 num;
152 	s32 data_len;
153 };
154 
155 struct fastrpc_munmap_req_msg {
156 	s32 client_id;
157 	u64 vaddr;
158 	u64 size;
159 };
160 
161 struct fastrpc_mem_unmap_req_msg {
162 	s32 client_id;
163 	s32 fd;
164 	u64 vaddrin;
165 	u64 len;
166 };
167 
168 struct fastrpc_msg {
169 	int client_id;		/* process client id */
170 	int tid;		/* thread id */
171 	u64 ctx;		/* invoke caller context */
172 	u32 handle;	/* handle to invoke */
173 	u32 sc;		/* scalars structure describing the data */
174 	dma_addr_t addr;	/* dma address */
175 	u64 size;		/* size of contiguous region */
176 };
177 
178 struct fastrpc_invoke_rsp {
179 	u64 ctx;		/* invoke caller context */
180 	int retval;		/* invoke return value */
181 };
182 
183 struct fastrpc_buf_overlap {
184 	u64 start;
185 	u64 end;
186 	int raix;
187 	u64 mstart;
188 	u64 mend;
189 	u64 offset;
190 };
191 
192 struct fastrpc_buf {
193 	struct fastrpc_user *fl;
194 	struct dma_buf *dmabuf;
195 	struct device *dev;
196 	void *virt;
197 	dma_addr_t dma_addr;
198 	u64 size;
199 	/* Lock for dma buf attachments */
200 	struct mutex lock;
201 	struct list_head attachments;
202 	/* mmap support */
203 	struct list_head node; /* list of user requested mmaps */
204 	uintptr_t raddr;
205 };
206 
207 struct fastrpc_dma_buf_attachment {
208 	struct device *dev;
209 	struct sg_table sgt;
210 	struct list_head node;
211 };
212 
213 struct fastrpc_map {
214 	struct list_head node;
215 	struct fastrpc_user *fl;
216 	int fd;
217 	struct dma_buf *buf;
218 	struct sg_table *table;
219 	struct dma_buf_attachment *attach;
220 	dma_addr_t dma_addr;
221 	u64 size;
222 	void *va;
223 	u64 len;
224 	u64 raddr;
225 	u32 attr;
226 	struct kref refcount;
227 };
228 
229 struct fastrpc_invoke_ctx {
230 	int nscalars;
231 	int nbufs;
232 	int retval;
233 	int pid;
234 	int client_id;
235 	u32 sc;
236 	u32 *crc;
237 	u64 ctxid;
238 	u64 msg_sz;
239 	struct kref refcount;
240 	struct list_head node; /* list of ctxs */
241 	struct completion work;
242 	struct work_struct put_work;
243 	struct fastrpc_msg msg;
244 	struct fastrpc_user *fl;
245 	union fastrpc_remote_arg *rpra;
246 	struct fastrpc_map **maps;
247 	struct fastrpc_buf *buf;
248 	struct fastrpc_invoke_args *args;
249 	struct fastrpc_buf_overlap *olaps;
250 	struct fastrpc_channel_ctx *cctx;
251 };
252 
253 struct fastrpc_session_ctx {
254 	struct device *dev;
255 	int sid;
256 	bool used;
257 	bool valid;
258 };
259 
260 struct fastrpc_soc_data {
261 	u32 sid_pos;
262 	u32 dma_addr_bits_cdsp;
263 	u32 dma_addr_bits_default;
264 };
265 
266 struct fastrpc_channel_ctx {
267 	int domain_id;
268 	int sesscount;
269 	int vmcount;
270 	struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
271 	struct rpmsg_device *rpdev;
272 	struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
273 	spinlock_t lock;
274 	struct idr ctx_idr;
275 	struct list_head users;
276 	struct kref refcount;
277 	/* Flag if dsp attributes are cached */
278 	bool valid_attributes;
279 	u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
280 	struct fastrpc_device *secure_fdevice;
281 	struct fastrpc_device *fdevice;
282 	struct fastrpc_buf *remote_heap;
283 	struct list_head invoke_interrupted_mmaps;
284 	bool secure;
285 	bool unsigned_support;
286 	u64 dma_mask;
287 	const struct fastrpc_soc_data *soc_data;
288 };
289 
290 struct fastrpc_device {
291 	struct fastrpc_channel_ctx *cctx;
292 	struct miscdevice miscdev;
293 	bool secure;
294 };
295 
296 struct fastrpc_user {
297 	struct list_head user;
298 	struct list_head maps;
299 	struct list_head pending;
300 	struct list_head mmaps;
301 
302 	struct fastrpc_channel_ctx *cctx;
303 	struct fastrpc_session_ctx *sctx;
304 	struct fastrpc_buf *init_mem;
305 
306 	int client_id;
307 	int pd;
308 	bool is_secure_dev;
309 	/* Lock for lists */
310 	spinlock_t lock;
311 	/* lock for allocations */
312 	struct mutex mutex;
313 };
314 
315 /* Extract SMMU PA from consolidated IOVA */
316 static inline dma_addr_t fastrpc_ipa_to_dma_addr(struct fastrpc_channel_ctx *cctx, dma_addr_t iova)
317 {
318 	if (!cctx->soc_data->sid_pos)
319 		return 0;
320 	return iova & GENMASK_ULL(cctx->soc_data->sid_pos - 1, 0);
321 }
322 
323 /*
324  * Prepare the consolidated iova to send to DSP by prepending the SID
325  * to smmu PA at the appropriate position
326  */
327 static inline u64 fastrpc_sid_offset(struct fastrpc_channel_ctx *cctx,
328 				     struct fastrpc_session_ctx *sctx)
329 {
330 	return (u64)sctx->sid << cctx->soc_data->sid_pos;
331 }
332 
333 static void fastrpc_free_map(struct kref *ref)
334 {
335 	struct fastrpc_map *map;
336 
337 	map = container_of(ref, struct fastrpc_map, refcount);
338 
339 	if (map->table) {
340 		if (map->attr & FASTRPC_ATTR_SECUREMAP) {
341 			struct qcom_scm_vmperm perm;
342 			int vmid = map->fl->cctx->vmperms[0].vmid;
343 			u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid);
344 			int err = 0;
345 
346 			perm.vmid = QCOM_SCM_VMID_HLOS;
347 			perm.perm = QCOM_SCM_PERM_RWX;
348 			err = qcom_scm_assign_mem(map->dma_addr, map->len,
349 				&src_perms, &perm, 1);
350 			if (err) {
351 				dev_err(map->fl->sctx->dev,
352 					"Failed to assign memory dma_addr %pad size 0x%llx err %d\n",
353 					&map->dma_addr, map->len, err);
354 				return;
355 			}
356 		}
357 		dma_buf_unmap_attachment_unlocked(map->attach, map->table,
358 						  DMA_BIDIRECTIONAL);
359 		dma_buf_detach(map->buf, map->attach);
360 		dma_buf_put(map->buf);
361 	}
362 
363 	if (map->fl) {
364 		spin_lock(&map->fl->lock);
365 		list_del(&map->node);
366 		spin_unlock(&map->fl->lock);
367 		map->fl = NULL;
368 	}
369 
370 	kfree(map);
371 }
372 
373 static void fastrpc_map_put(struct fastrpc_map *map)
374 {
375 	if (map)
376 		kref_put(&map->refcount, fastrpc_free_map);
377 }
378 
379 static int fastrpc_map_get(struct fastrpc_map *map)
380 {
381 	if (!map)
382 		return -ENOENT;
383 
384 	return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
385 }
386 
387 
388 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
389 			    struct fastrpc_map **ppmap)
390 {
391 	struct fastrpc_map *map = NULL;
392 	struct dma_buf *buf;
393 	int ret = -ENOENT;
394 
395 	buf = dma_buf_get(fd);
396 	if (IS_ERR(buf))
397 		return PTR_ERR(buf);
398 
399 	spin_lock(&fl->lock);
400 	list_for_each_entry(map, &fl->maps, node) {
401 		if (map->fd != fd || map->buf != buf)
402 			continue;
403 
404 		*ppmap = map;
405 		ret = 0;
406 		break;
407 	}
408 	spin_unlock(&fl->lock);
409 
410 	dma_buf_put(buf);
411 
412 	return ret;
413 }
414 
415 static void fastrpc_buf_free(struct fastrpc_buf *buf)
416 {
417 	dma_free_coherent(buf->dev, buf->size, buf->virt,
418 			  fastrpc_ipa_to_dma_addr(buf->fl->cctx, buf->dma_addr));
419 	kfree(buf);
420 }
421 
422 static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
423 			     u64 size, struct fastrpc_buf **obuf)
424 {
425 	struct fastrpc_buf *buf;
426 
427 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
428 	if (!buf)
429 		return -ENOMEM;
430 
431 	INIT_LIST_HEAD(&buf->attachments);
432 	INIT_LIST_HEAD(&buf->node);
433 	mutex_init(&buf->lock);
434 
435 	buf->fl = fl;
436 	buf->virt = NULL;
437 	buf->dma_addr = 0;
438 	buf->size = size;
439 	buf->dev = dev;
440 	buf->raddr = 0;
441 
442 	buf->virt = dma_alloc_coherent(dev, buf->size, &buf->dma_addr,
443 				       GFP_KERNEL);
444 	if (!buf->virt) {
445 		mutex_destroy(&buf->lock);
446 		kfree(buf);
447 		return -ENOMEM;
448 	}
449 
450 	*obuf = buf;
451 
452 	return 0;
453 }
454 
455 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
456 			     u64 size, struct fastrpc_buf **obuf)
457 {
458 	int ret;
459 	struct fastrpc_buf *buf;
460 
461 	ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
462 	if (ret)
463 		return ret;
464 
465 	buf = *obuf;
466 
467 	if (fl->sctx && fl->sctx->sid)
468 		buf->dma_addr += fastrpc_sid_offset(fl->cctx, fl->sctx);
469 
470 	return 0;
471 }
472 
473 static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev,
474 				     u64 size, struct fastrpc_buf **obuf)
475 {
476 	struct device *rdev = &fl->cctx->rpdev->dev;
477 
478 	return  __fastrpc_buf_alloc(fl, rdev, size, obuf);
479 }
480 
481 static void fastrpc_channel_ctx_free(struct kref *ref)
482 {
483 	struct fastrpc_channel_ctx *cctx;
484 
485 	cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
486 
487 	kfree(cctx);
488 }
489 
490 static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
491 {
492 	kref_get(&cctx->refcount);
493 }
494 
495 static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
496 {
497 	kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
498 }
499 
500 static void fastrpc_context_free(struct kref *ref)
501 {
502 	struct fastrpc_invoke_ctx *ctx;
503 	struct fastrpc_channel_ctx *cctx;
504 	unsigned long flags;
505 	int i;
506 
507 	ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
508 	cctx = ctx->cctx;
509 
510 	for (i = 0; i < ctx->nbufs; i++)
511 		fastrpc_map_put(ctx->maps[i]);
512 
513 	if (ctx->buf)
514 		fastrpc_buf_free(ctx->buf);
515 
516 	spin_lock_irqsave(&cctx->lock, flags);
517 	idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
518 	spin_unlock_irqrestore(&cctx->lock, flags);
519 
520 	kfree(ctx->maps);
521 	kfree(ctx->olaps);
522 	kfree(ctx);
523 
524 	fastrpc_channel_ctx_put(cctx);
525 }
526 
527 static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
528 {
529 	kref_get(&ctx->refcount);
530 }
531 
532 static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
533 {
534 	kref_put(&ctx->refcount, fastrpc_context_free);
535 }
536 
537 static void fastrpc_context_put_wq(struct work_struct *work)
538 {
539 	struct fastrpc_invoke_ctx *ctx =
540 			container_of(work, struct fastrpc_invoke_ctx, put_work);
541 
542 	fastrpc_context_put(ctx);
543 }
544 
545 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
546 static int olaps_cmp(const void *a, const void *b)
547 {
548 	struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
549 	struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
550 	/* sort with lowest starting buffer first */
551 	int st = CMP(pa->start, pb->start);
552 	/* sort with highest ending buffer first */
553 	int ed = CMP(pb->end, pa->end);
554 
555 	return st == 0 ? ed : st;
556 }
557 
558 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
559 {
560 	u64 max_end = 0;
561 	int i;
562 
563 	for (i = 0; i < ctx->nbufs; ++i) {
564 		ctx->olaps[i].start = ctx->args[i].ptr;
565 		ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
566 		ctx->olaps[i].raix = i;
567 	}
568 
569 	sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
570 
571 	for (i = 0; i < ctx->nbufs; ++i) {
572 		/* Falling inside previous range */
573 		if (ctx->olaps[i].start < max_end) {
574 			ctx->olaps[i].mstart = max_end;
575 			ctx->olaps[i].mend = ctx->olaps[i].end;
576 			ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
577 
578 			if (ctx->olaps[i].end > max_end) {
579 				max_end = ctx->olaps[i].end;
580 			} else {
581 				ctx->olaps[i].mend = 0;
582 				ctx->olaps[i].mstart = 0;
583 			}
584 
585 		} else  {
586 			ctx->olaps[i].mend = ctx->olaps[i].end;
587 			ctx->olaps[i].mstart = ctx->olaps[i].start;
588 			ctx->olaps[i].offset = 0;
589 			max_end = ctx->olaps[i].end;
590 		}
591 	}
592 }
593 
594 static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
595 			struct fastrpc_user *user, u32 kernel, u32 sc,
596 			struct fastrpc_invoke_args *args)
597 {
598 	struct fastrpc_channel_ctx *cctx = user->cctx;
599 	struct fastrpc_invoke_ctx *ctx = NULL;
600 	unsigned long flags;
601 	int ret;
602 
603 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
604 	if (!ctx)
605 		return ERR_PTR(-ENOMEM);
606 
607 	INIT_LIST_HEAD(&ctx->node);
608 	ctx->fl = user;
609 	ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
610 	ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
611 		     REMOTE_SCALARS_OUTBUFS(sc);
612 
613 	if (ctx->nscalars) {
614 		ctx->maps = kcalloc(ctx->nscalars,
615 				    sizeof(*ctx->maps), GFP_KERNEL);
616 		if (!ctx->maps) {
617 			kfree(ctx);
618 			return ERR_PTR(-ENOMEM);
619 		}
620 		ctx->olaps = kcalloc(ctx->nscalars,
621 				    sizeof(*ctx->olaps), GFP_KERNEL);
622 		if (!ctx->olaps) {
623 			kfree(ctx->maps);
624 			kfree(ctx);
625 			return ERR_PTR(-ENOMEM);
626 		}
627 		ctx->args = args;
628 		fastrpc_get_buff_overlaps(ctx);
629 	}
630 
631 	/* Released in fastrpc_context_put() */
632 	fastrpc_channel_ctx_get(cctx);
633 
634 	ctx->sc = sc;
635 	ctx->retval = -1;
636 	ctx->pid = current->pid;
637 	ctx->client_id = user->client_id;
638 	ctx->cctx = cctx;
639 	init_completion(&ctx->work);
640 	INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
641 
642 	spin_lock(&user->lock);
643 	list_add_tail(&ctx->node, &user->pending);
644 	spin_unlock(&user->lock);
645 
646 	spin_lock_irqsave(&cctx->lock, flags);
647 	ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
648 			       FASTRPC_CTX_MAX, GFP_ATOMIC);
649 	if (ret < 0) {
650 		spin_unlock_irqrestore(&cctx->lock, flags);
651 		goto err_idr;
652 	}
653 	ctx->ctxid = ret << 4;
654 	spin_unlock_irqrestore(&cctx->lock, flags);
655 
656 	kref_init(&ctx->refcount);
657 
658 	return ctx;
659 err_idr:
660 	spin_lock(&user->lock);
661 	list_del(&ctx->node);
662 	spin_unlock(&user->lock);
663 	fastrpc_channel_ctx_put(cctx);
664 	kfree(ctx->maps);
665 	kfree(ctx->olaps);
666 	kfree(ctx);
667 
668 	return ERR_PTR(ret);
669 }
670 
671 static struct sg_table *
672 fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
673 		    enum dma_data_direction dir)
674 {
675 	struct fastrpc_dma_buf_attachment *a = attachment->priv;
676 	struct sg_table *table;
677 	int ret;
678 
679 	table = &a->sgt;
680 
681 	ret = dma_map_sgtable(attachment->dev, table, dir, 0);
682 	if (ret)
683 		table = ERR_PTR(ret);
684 	return table;
685 }
686 
687 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
688 				  struct sg_table *table,
689 				  enum dma_data_direction dir)
690 {
691 	dma_unmap_sgtable(attach->dev, table, dir, 0);
692 }
693 
694 static void fastrpc_release(struct dma_buf *dmabuf)
695 {
696 	struct fastrpc_buf *buffer = dmabuf->priv;
697 
698 	fastrpc_buf_free(buffer);
699 }
700 
701 static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
702 				  struct dma_buf_attachment *attachment)
703 {
704 	struct fastrpc_dma_buf_attachment *a;
705 	struct fastrpc_buf *buffer = dmabuf->priv;
706 	int ret;
707 
708 	a = kzalloc(sizeof(*a), GFP_KERNEL);
709 	if (!a)
710 		return -ENOMEM;
711 
712 	ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
713 			      fastrpc_ipa_to_dma_addr(buffer->fl->cctx, buffer->dma_addr),
714 			      buffer->size);
715 	if (ret < 0) {
716 		dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
717 		kfree(a);
718 		return -EINVAL;
719 	}
720 
721 	a->dev = attachment->dev;
722 	INIT_LIST_HEAD(&a->node);
723 	attachment->priv = a;
724 
725 	mutex_lock(&buffer->lock);
726 	list_add(&a->node, &buffer->attachments);
727 	mutex_unlock(&buffer->lock);
728 
729 	return 0;
730 }
731 
732 static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
733 				    struct dma_buf_attachment *attachment)
734 {
735 	struct fastrpc_dma_buf_attachment *a = attachment->priv;
736 	struct fastrpc_buf *buffer = dmabuf->priv;
737 
738 	mutex_lock(&buffer->lock);
739 	list_del(&a->node);
740 	mutex_unlock(&buffer->lock);
741 	sg_free_table(&a->sgt);
742 	kfree(a);
743 }
744 
745 static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
746 {
747 	struct fastrpc_buf *buf = dmabuf->priv;
748 
749 	iosys_map_set_vaddr(map, buf->virt);
750 
751 	return 0;
752 }
753 
754 static int fastrpc_mmap(struct dma_buf *dmabuf,
755 			struct vm_area_struct *vma)
756 {
757 	struct fastrpc_buf *buf = dmabuf->priv;
758 	size_t size = vma->vm_end - vma->vm_start;
759 
760 	dma_resv_assert_held(dmabuf->resv);
761 
762 	return dma_mmap_coherent(buf->dev, vma, buf->virt,
763 				 fastrpc_ipa_to_dma_addr(buf->fl->cctx, buf->dma_addr), size);
764 }
765 
766 static const struct dma_buf_ops fastrpc_dma_buf_ops = {
767 	.attach = fastrpc_dma_buf_attach,
768 	.detach = fastrpc_dma_buf_detatch,
769 	.map_dma_buf = fastrpc_map_dma_buf,
770 	.unmap_dma_buf = fastrpc_unmap_dma_buf,
771 	.mmap = fastrpc_mmap,
772 	.vmap = fastrpc_vmap,
773 	.release = fastrpc_release,
774 };
775 
776 static dma_addr_t fastrpc_compute_dma_addr(struct fastrpc_user *fl, dma_addr_t sg_dma_addr)
777 {
778 	return sg_dma_addr + fastrpc_sid_offset(fl->cctx, fl->sctx);
779 }
780 
781 static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
782 			      u64 len, u32 attr, struct fastrpc_map **ppmap)
783 {
784 	struct fastrpc_session_ctx *sess = fl->sctx;
785 	struct fastrpc_map *map = NULL;
786 	struct sg_table *table;
787 	struct scatterlist *sgl = NULL;
788 	int err = 0, sgl_index = 0;
789 
790 	map = kzalloc(sizeof(*map), GFP_KERNEL);
791 	if (!map)
792 		return -ENOMEM;
793 
794 	INIT_LIST_HEAD(&map->node);
795 	kref_init(&map->refcount);
796 
797 	map->fl = fl;
798 	map->fd = fd;
799 	map->buf = dma_buf_get(fd);
800 	if (IS_ERR(map->buf)) {
801 		err = PTR_ERR(map->buf);
802 		goto get_err;
803 	}
804 
805 	map->attach = dma_buf_attach(map->buf, sess->dev);
806 	if (IS_ERR(map->attach)) {
807 		dev_err(sess->dev, "Failed to attach dmabuf\n");
808 		err = PTR_ERR(map->attach);
809 		goto attach_err;
810 	}
811 
812 	table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
813 	if (IS_ERR(table)) {
814 		err = PTR_ERR(table);
815 		goto map_err;
816 	}
817 	map->table = table;
818 
819 	if (attr & FASTRPC_ATTR_SECUREMAP)
820 		map->dma_addr = sg_phys(map->table->sgl);
821 	else
822 		map->dma_addr = fastrpc_compute_dma_addr(fl, sg_dma_address(map->table->sgl));
823 	for_each_sg(map->table->sgl, sgl, map->table->nents,
824 		sgl_index)
825 		map->size += sg_dma_len(sgl);
826 	if (len > map->size) {
827 		dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n",
828 				len, map->size);
829 		err = -EINVAL;
830 		goto map_err;
831 	}
832 	map->va = sg_virt(map->table->sgl);
833 	map->len = len;
834 
835 	if (attr & FASTRPC_ATTR_SECUREMAP) {
836 		/*
837 		 * If subsystem VMIDs are defined in DTSI, then do
838 		 * hyp_assign from HLOS to those VM(s)
839 		 */
840 		u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
841 		struct qcom_scm_vmperm dst_perms[2] = {0};
842 
843 		dst_perms[0].vmid = QCOM_SCM_VMID_HLOS;
844 		dst_perms[0].perm = QCOM_SCM_PERM_RW;
845 		dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
846 		dst_perms[1].perm = QCOM_SCM_PERM_RWX;
847 		map->attr = attr;
848 		err = qcom_scm_assign_mem(map->dma_addr, (u64)map->len, &src_perms, dst_perms, 2);
849 		if (err) {
850 			dev_err(sess->dev,
851 				"Failed to assign memory with dma_addr %pad size 0x%llx err %d\n",
852 				&map->dma_addr, map->len, err);
853 			goto map_err;
854 		}
855 	}
856 	spin_lock(&fl->lock);
857 	list_add_tail(&map->node, &fl->maps);
858 	spin_unlock(&fl->lock);
859 	*ppmap = map;
860 
861 	return 0;
862 
863 map_err:
864 	dma_buf_detach(map->buf, map->attach);
865 attach_err:
866 	dma_buf_put(map->buf);
867 get_err:
868 	fastrpc_map_put(map);
869 
870 	return err;
871 }
872 
873 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
874 			      u64 len, u32 attr, struct fastrpc_map **ppmap)
875 {
876 	struct fastrpc_session_ctx *sess = fl->sctx;
877 	int err = 0;
878 
879 	if (!fastrpc_map_lookup(fl, fd, ppmap)) {
880 		if (!fastrpc_map_get(*ppmap))
881 			return 0;
882 		dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n",
883 			__func__, fd);
884 	}
885 
886 	err = fastrpc_map_attach(fl, fd, len, attr, ppmap);
887 
888 	return err;
889 }
890 
891 /*
892  * Fastrpc payload buffer with metadata looks like:
893  *
894  * >>>>>>  START of METADATA <<<<<<<<<
895  * +---------------------------------+
896  * |           Arguments             |
897  * | type:(union fastrpc_remote_arg)|
898  * |             (0 - N)             |
899  * +---------------------------------+
900  * |         Invoke Buffer list      |
901  * | type:(struct fastrpc_invoke_buf)|
902  * |           (0 - N)               |
903  * +---------------------------------+
904  * |         Page info list          |
905  * | type:(struct fastrpc_phy_page)  |
906  * |             (0 - N)             |
907  * +---------------------------------+
908  * |         Optional info           |
909  * |(can be specific to SoC/Firmware)|
910  * +---------------------------------+
911  * >>>>>>>>  END of METADATA <<<<<<<<<
912  * +---------------------------------+
913  * |         Inline ARGS             |
914  * |            (0-N)                |
915  * +---------------------------------+
916  */
917 
918 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
919 {
920 	int size = 0;
921 
922 	size = (sizeof(struct fastrpc_remote_buf) +
923 		sizeof(struct fastrpc_invoke_buf) +
924 		sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
925 		sizeof(u64) * FASTRPC_MAX_FDLIST +
926 		sizeof(u32) * FASTRPC_MAX_CRCLIST;
927 
928 	return size;
929 }
930 
931 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
932 {
933 	u64 size = 0;
934 	int oix;
935 
936 	size = ALIGN(metalen, FASTRPC_ALIGN);
937 	for (oix = 0; oix < ctx->nbufs; oix++) {
938 		int i = ctx->olaps[oix].raix;
939 
940 		if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
941 
942 			if (ctx->olaps[oix].offset == 0)
943 				size = ALIGN(size, FASTRPC_ALIGN);
944 
945 			size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
946 		}
947 	}
948 
949 	return size;
950 }
951 
952 static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
953 {
954 	struct device *dev = ctx->fl->sctx->dev;
955 	int i, err;
956 
957 	for (i = 0; i < ctx->nscalars; ++i) {
958 
959 		if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
960 		    ctx->args[i].length == 0)
961 			continue;
962 
963 		if (i < ctx->nbufs)
964 			err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
965 				 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
966 		else
967 			err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd,
968 				 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
969 		if (err) {
970 			dev_err(dev, "Error Creating map %d\n", err);
971 			return -EINVAL;
972 		}
973 
974 	}
975 	return 0;
976 }
977 
978 static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
979 {
980 	return (struct fastrpc_invoke_buf *)(&pra[len]);
981 }
982 
983 static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
984 {
985 	return (struct fastrpc_phy_page *)(&buf[len]);
986 }
987 
988 static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
989 {
990 	struct device *dev = ctx->fl->sctx->dev;
991 	union fastrpc_remote_arg *rpra;
992 	struct fastrpc_invoke_buf *list;
993 	struct fastrpc_phy_page *pages;
994 	int inbufs, i, oix, err = 0;
995 	u64 len, rlen, pkt_size;
996 	u64 pg_start, pg_end;
997 	uintptr_t args;
998 	int metalen;
999 
1000 	inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1001 	metalen = fastrpc_get_meta_size(ctx);
1002 	pkt_size = fastrpc_get_payload_size(ctx, metalen);
1003 
1004 	err = fastrpc_create_maps(ctx);
1005 	if (err)
1006 		return err;
1007 
1008 	ctx->msg_sz = pkt_size;
1009 
1010 	if (ctx->fl->sctx->sid)
1011 		err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
1012 	else
1013 		err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
1014 	if (err)
1015 		return err;
1016 
1017 	memset(ctx->buf->virt, 0, pkt_size);
1018 	rpra = ctx->buf->virt;
1019 	list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
1020 	pages = fastrpc_phy_page_start(list, ctx->nscalars);
1021 	args = (uintptr_t)ctx->buf->virt + metalen;
1022 	rlen = pkt_size - metalen;
1023 	ctx->rpra = rpra;
1024 
1025 	for (oix = 0; oix < ctx->nbufs; ++oix) {
1026 		int mlen;
1027 
1028 		i = ctx->olaps[oix].raix;
1029 		len = ctx->args[i].length;
1030 
1031 		rpra[i].buf.pv = 0;
1032 		rpra[i].buf.len = len;
1033 		list[i].num = len ? 1 : 0;
1034 		list[i].pgidx = i;
1035 
1036 		if (!len)
1037 			continue;
1038 
1039 		if (ctx->maps[i]) {
1040 			struct vm_area_struct *vma = NULL;
1041 
1042 			rpra[i].buf.pv = (u64) ctx->args[i].ptr;
1043 			pages[i].addr = ctx->maps[i]->dma_addr;
1044 
1045 			mmap_read_lock(current->mm);
1046 			vma = find_vma(current->mm, ctx->args[i].ptr);
1047 			if (vma)
1048 				pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) -
1049 						 vma->vm_start;
1050 			mmap_read_unlock(current->mm);
1051 
1052 			pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
1053 			pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
1054 				  PAGE_SHIFT;
1055 			pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
1056 
1057 		} else {
1058 
1059 			if (ctx->olaps[oix].offset == 0) {
1060 				rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
1061 				args = ALIGN(args, FASTRPC_ALIGN);
1062 			}
1063 
1064 			mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
1065 
1066 			if (rlen < mlen)
1067 				goto bail;
1068 
1069 			rpra[i].buf.pv = args - ctx->olaps[oix].offset;
1070 			pages[i].addr = ctx->buf->dma_addr -
1071 					ctx->olaps[oix].offset +
1072 					(pkt_size - rlen);
1073 			pages[i].addr = pages[i].addr &	PAGE_MASK;
1074 
1075 			pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT;
1076 			pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
1077 			pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
1078 			args = args + mlen;
1079 			rlen -= mlen;
1080 		}
1081 
1082 		if (i < inbufs && !ctx->maps[i]) {
1083 			void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
1084 			void *src = (void *)(uintptr_t)ctx->args[i].ptr;
1085 
1086 			if (!kernel) {
1087 				if (copy_from_user(dst, (void __user *)src,
1088 						   len)) {
1089 					err = -EFAULT;
1090 					goto bail;
1091 				}
1092 			} else {
1093 				memcpy(dst, src, len);
1094 			}
1095 		}
1096 	}
1097 
1098 	for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
1099 		list[i].num = ctx->args[i].length ? 1 : 0;
1100 		list[i].pgidx = i;
1101 		if (ctx->maps[i]) {
1102 			pages[i].addr = ctx->maps[i]->dma_addr;
1103 			pages[i].size = ctx->maps[i]->size;
1104 		}
1105 		rpra[i].dma.fd = ctx->args[i].fd;
1106 		rpra[i].dma.len = ctx->args[i].length;
1107 		rpra[i].dma.offset = (u64) ctx->args[i].ptr;
1108 	}
1109 
1110 bail:
1111 	if (err)
1112 		dev_err(dev, "Error: get invoke args failed:%d\n", err);
1113 
1114 	return err;
1115 }
1116 
1117 static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
1118 			    u32 kernel)
1119 {
1120 	union fastrpc_remote_arg *rpra = ctx->rpra;
1121 	struct fastrpc_user *fl = ctx->fl;
1122 	struct fastrpc_map *mmap = NULL;
1123 	struct fastrpc_invoke_buf *list;
1124 	struct fastrpc_phy_page *pages;
1125 	u64 *fdlist;
1126 	int i, inbufs, outbufs, handles;
1127 	int ret = 0;
1128 
1129 	inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1130 	outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1131 	handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
1132 	list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
1133 	pages = fastrpc_phy_page_start(list, ctx->nscalars);
1134 	fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1135 
1136 	for (i = inbufs; i < ctx->nbufs; ++i) {
1137 		if (!ctx->maps[i]) {
1138 			void *src = (void *)(uintptr_t)rpra[i].buf.pv;
1139 			void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
1140 			u64 len = rpra[i].buf.len;
1141 
1142 			if (!kernel) {
1143 				if (copy_to_user((void __user *)dst, src, len)) {
1144 					ret = -EFAULT;
1145 					goto cleanup_fdlist;
1146 				}
1147 			} else {
1148 				memcpy(dst, src, len);
1149 			}
1150 		}
1151 	}
1152 
1153 cleanup_fdlist:
1154 	/* Clean up fdlist which is updated by DSP */
1155 	for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
1156 		if (!fdlist[i])
1157 			break;
1158 		if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
1159 			fastrpc_map_put(mmap);
1160 	}
1161 
1162 	return ret;
1163 }
1164 
1165 static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
1166 			       struct fastrpc_invoke_ctx *ctx,
1167 			       u32 kernel, uint32_t handle)
1168 {
1169 	struct fastrpc_channel_ctx *cctx;
1170 	struct fastrpc_user *fl = ctx->fl;
1171 	struct fastrpc_msg *msg = &ctx->msg;
1172 	int ret;
1173 
1174 	cctx = fl->cctx;
1175 	msg->client_id = fl->client_id;
1176 	msg->tid = current->pid;
1177 
1178 	if (kernel)
1179 		msg->client_id = 0;
1180 
1181 	msg->ctx = ctx->ctxid | fl->pd;
1182 	msg->handle = handle;
1183 	msg->sc = ctx->sc;
1184 	msg->addr = ctx->buf ? ctx->buf->dma_addr : 0;
1185 	msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
1186 	fastrpc_context_get(ctx);
1187 
1188 	ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
1189 
1190 	if (ret)
1191 		fastrpc_context_put(ctx);
1192 
1193 	return ret;
1194 
1195 }
1196 
1197 static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
1198 				   u32 handle, u32 sc,
1199 				   struct fastrpc_invoke_args *args)
1200 {
1201 	struct fastrpc_invoke_ctx *ctx = NULL;
1202 	struct fastrpc_buf *buf, *b;
1203 
1204 	int err = 0;
1205 
1206 	if (!fl->sctx)
1207 		return -EINVAL;
1208 
1209 	if (!fl->cctx->rpdev)
1210 		return -EPIPE;
1211 
1212 	if (handle == FASTRPC_INIT_HANDLE && !kernel) {
1213 		dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n",  handle);
1214 		return -EPERM;
1215 	}
1216 
1217 	ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1218 	if (IS_ERR(ctx))
1219 		return PTR_ERR(ctx);
1220 
1221 	err = fastrpc_get_args(kernel, ctx);
1222 	if (err)
1223 		goto bail;
1224 
1225 	/* make sure that all CPU memory writes are seen by DSP */
1226 	dma_wmb();
1227 	/* Send invoke buffer to remote dsp */
1228 	err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1229 	if (err)
1230 		goto bail;
1231 
1232 	if (kernel) {
1233 		if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
1234 			err = -ETIMEDOUT;
1235 	} else {
1236 		err = wait_for_completion_interruptible(&ctx->work);
1237 	}
1238 
1239 	if (err)
1240 		goto bail;
1241 
1242 	/* make sure that all memory writes by DSP are seen by CPU */
1243 	dma_rmb();
1244 	/* populate all the output buffers with results */
1245 	err = fastrpc_put_args(ctx, kernel);
1246 	if (err)
1247 		goto bail;
1248 
1249 	/* Check the response from remote dsp */
1250 	err = ctx->retval;
1251 	if (err)
1252 		goto bail;
1253 
1254 bail:
1255 	if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1256 		/* We are done with this compute context */
1257 		spin_lock(&fl->lock);
1258 		list_del(&ctx->node);
1259 		spin_unlock(&fl->lock);
1260 		fastrpc_context_put(ctx);
1261 	}
1262 
1263 	if (err == -ERESTARTSYS) {
1264 		list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1265 			list_del(&buf->node);
1266 			list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
1267 		}
1268 	}
1269 
1270 	if (err)
1271 		dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1272 
1273 	return err;
1274 }
1275 
1276 static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1277 {
1278 	/* Check if the device node is non-secure and channel is secure*/
1279 	if (!fl->is_secure_dev && fl->cctx->secure) {
1280 		/*
1281 		 * Allow untrusted applications to offload only to Unsigned PD when
1282 		 * channel is configured as secure and block untrusted apps on channel
1283 		 * that does not support unsigned PD offload
1284 		 */
1285 		if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1286 			dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n");
1287 			return true;
1288 		}
1289 	}
1290 
1291 	return false;
1292 }
1293 
1294 static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
1295 					      char __user *argp)
1296 {
1297 	struct fastrpc_init_create_static init;
1298 	struct fastrpc_invoke_args *args;
1299 	struct fastrpc_phy_page pages[1];
1300 	char *name;
1301 	int err;
1302 	bool scm_done = false;
1303 	struct {
1304 		int client_id;
1305 		u32 namelen;
1306 		u32 pageslen;
1307 	} inbuf;
1308 	u32 sc;
1309 
1310 	args = kcalloc(FASTRPC_CREATE_STATIC_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1311 	if (!args)
1312 		return -ENOMEM;
1313 
1314 	if (copy_from_user(&init, argp, sizeof(init))) {
1315 		err = -EFAULT;
1316 		goto err;
1317 	}
1318 
1319 	if (init.namelen > INIT_FILE_NAMELEN_MAX) {
1320 		err = -EINVAL;
1321 		goto err;
1322 	}
1323 
1324 	name = memdup_user(u64_to_user_ptr(init.name), init.namelen);
1325 	if (IS_ERR(name)) {
1326 		err = PTR_ERR(name);
1327 		goto err;
1328 	}
1329 
1330 	if (!fl->cctx->remote_heap) {
1331 		err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
1332 						&fl->cctx->remote_heap);
1333 		if (err)
1334 			goto err_name;
1335 
1336 		/* Map if we have any heap VMIDs associated with this ADSP Static Process. */
1337 		if (fl->cctx->vmcount) {
1338 			u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
1339 
1340 			err = qcom_scm_assign_mem(fl->cctx->remote_heap->dma_addr,
1341 							(u64)fl->cctx->remote_heap->size,
1342 							&src_perms,
1343 							fl->cctx->vmperms, fl->cctx->vmcount);
1344 			if (err) {
1345 				dev_err(fl->sctx->dev,
1346 					"Failed to assign memory with dma_addr %pad size 0x%llx err %d\n",
1347 					&fl->cctx->remote_heap->dma_addr,
1348 					fl->cctx->remote_heap->size, err);
1349 				goto err_map;
1350 			}
1351 			scm_done = true;
1352 		}
1353 	}
1354 
1355 	inbuf.client_id = fl->client_id;
1356 	inbuf.namelen = init.namelen;
1357 	inbuf.pageslen = 0;
1358 	fl->pd = USER_PD;
1359 
1360 	args[0].ptr = (u64)(uintptr_t)&inbuf;
1361 	args[0].length = sizeof(inbuf);
1362 	args[0].fd = -1;
1363 
1364 	args[1].ptr = (u64)(uintptr_t)name;
1365 	args[1].length = inbuf.namelen;
1366 	args[1].fd = -1;
1367 
1368 	pages[0].addr = fl->cctx->remote_heap->dma_addr;
1369 	pages[0].size = fl->cctx->remote_heap->size;
1370 
1371 	args[2].ptr = (u64)(uintptr_t) pages;
1372 	args[2].length = sizeof(*pages);
1373 	args[2].fd = -1;
1374 
1375 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0);
1376 
1377 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1378 				      sc, args);
1379 	if (err)
1380 		goto err_invoke;
1381 
1382 	kfree(args);
1383 	kfree(name);
1384 
1385 	return 0;
1386 err_invoke:
1387 	if (fl->cctx->vmcount && scm_done) {
1388 		u64 src_perms = 0;
1389 		struct qcom_scm_vmperm dst_perms;
1390 		u32 i;
1391 
1392 		for (i = 0; i < fl->cctx->vmcount; i++)
1393 			src_perms |= BIT(fl->cctx->vmperms[i].vmid);
1394 
1395 		dst_perms.vmid = QCOM_SCM_VMID_HLOS;
1396 		dst_perms.perm = QCOM_SCM_PERM_RWX;
1397 		err = qcom_scm_assign_mem(fl->cctx->remote_heap->dma_addr,
1398 						(u64)fl->cctx->remote_heap->size,
1399 						&src_perms, &dst_perms, 1);
1400 		if (err)
1401 			dev_err(fl->sctx->dev, "Failed to assign memory dma_addr %pad size 0x%llx err %d\n",
1402 				&fl->cctx->remote_heap->dma_addr, fl->cctx->remote_heap->size, err);
1403 	}
1404 err_map:
1405 	fastrpc_buf_free(fl->cctx->remote_heap);
1406 err_name:
1407 	kfree(name);
1408 err:
1409 	kfree(args);
1410 
1411 	return err;
1412 }
1413 
1414 static int fastrpc_init_create_process(struct fastrpc_user *fl,
1415 					char __user *argp)
1416 {
1417 	struct fastrpc_init_create init;
1418 	struct fastrpc_invoke_args *args;
1419 	struct fastrpc_phy_page pages[1];
1420 	struct fastrpc_map *map = NULL;
1421 	struct fastrpc_buf *imem = NULL;
1422 	int memlen;
1423 	int err;
1424 	struct {
1425 		int client_id;
1426 		u32 namelen;
1427 		u32 filelen;
1428 		u32 pageslen;
1429 		u32 attrs;
1430 		u32 siglen;
1431 	} inbuf;
1432 	u32 sc;
1433 	bool unsigned_module = false;
1434 
1435 	args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1436 	if (!args)
1437 		return -ENOMEM;
1438 
1439 	if (copy_from_user(&init, argp, sizeof(init))) {
1440 		err = -EFAULT;
1441 		goto err;
1442 	}
1443 
1444 	if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
1445 		unsigned_module = true;
1446 
1447 	if (is_session_rejected(fl, unsigned_module)) {
1448 		err = -ECONNREFUSED;
1449 		goto err;
1450 	}
1451 
1452 	if (init.filelen > INIT_FILELEN_MAX) {
1453 		err = -EINVAL;
1454 		goto err;
1455 	}
1456 
1457 	inbuf.client_id = fl->client_id;
1458 	inbuf.namelen = strlen(current->comm) + 1;
1459 	inbuf.filelen = init.filelen;
1460 	inbuf.pageslen = 1;
1461 	inbuf.attrs = init.attrs;
1462 	inbuf.siglen = init.siglen;
1463 	fl->pd = USER_PD;
1464 
1465 	if (init.filelen && init.filefd) {
1466 		err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
1467 		if (err)
1468 			goto err;
1469 	}
1470 
1471 	memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1472 		       1024 * 1024);
1473 	err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1474 				&imem);
1475 	if (err)
1476 		goto err_alloc;
1477 
1478 	fl->init_mem = imem;
1479 	args[0].ptr = (u64)(uintptr_t)&inbuf;
1480 	args[0].length = sizeof(inbuf);
1481 	args[0].fd = -1;
1482 
1483 	args[1].ptr = (u64)(uintptr_t)current->comm;
1484 	args[1].length = inbuf.namelen;
1485 	args[1].fd = -1;
1486 
1487 	args[2].ptr = (u64) init.file;
1488 	args[2].length = inbuf.filelen;
1489 	args[2].fd = init.filefd;
1490 
1491 	pages[0].addr = imem->dma_addr;
1492 	pages[0].size = imem->size;
1493 
1494 	args[3].ptr = (u64)(uintptr_t) pages;
1495 	args[3].length = 1 * sizeof(*pages);
1496 	args[3].fd = -1;
1497 
1498 	args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1499 	args[4].length = sizeof(inbuf.attrs);
1500 	args[4].fd = -1;
1501 
1502 	args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1503 	args[5].length = sizeof(inbuf.siglen);
1504 	args[5].fd = -1;
1505 
1506 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1507 	if (init.attrs)
1508 		sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
1509 
1510 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1511 				      sc, args);
1512 	if (err)
1513 		goto err_invoke;
1514 
1515 	kfree(args);
1516 
1517 	return 0;
1518 
1519 err_invoke:
1520 	fl->init_mem = NULL;
1521 	fastrpc_buf_free(imem);
1522 err_alloc:
1523 	fastrpc_map_put(map);
1524 err:
1525 	kfree(args);
1526 
1527 	return err;
1528 }
1529 
1530 static struct fastrpc_session_ctx *fastrpc_session_alloc(
1531 					struct fastrpc_user *fl)
1532 {
1533 	struct fastrpc_channel_ctx *cctx = fl->cctx;
1534 	struct fastrpc_session_ctx *session = NULL;
1535 	unsigned long flags;
1536 	int i;
1537 
1538 	spin_lock_irqsave(&cctx->lock, flags);
1539 	for (i = 0; i < cctx->sesscount; i++) {
1540 		if (!cctx->session[i].used && cctx->session[i].valid) {
1541 			cctx->session[i].used = true;
1542 			session = &cctx->session[i];
1543 			/* any non-zero ID will work, session_idx + 1 is the simplest one */
1544 			fl->client_id = i + 1;
1545 			break;
1546 		}
1547 	}
1548 	spin_unlock_irqrestore(&cctx->lock, flags);
1549 
1550 	return session;
1551 }
1552 
1553 static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1554 				 struct fastrpc_session_ctx *session)
1555 {
1556 	unsigned long flags;
1557 
1558 	spin_lock_irqsave(&cctx->lock, flags);
1559 	session->used = false;
1560 	spin_unlock_irqrestore(&cctx->lock, flags);
1561 }
1562 
1563 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1564 {
1565 	struct fastrpc_invoke_args args[1];
1566 	int client_id = 0;
1567 	u32 sc;
1568 
1569 	client_id = fl->client_id;
1570 	args[0].ptr = (u64)(uintptr_t) &client_id;
1571 	args[0].length = sizeof(client_id);
1572 	args[0].fd = -1;
1573 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1574 
1575 	return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1576 				       sc, &args[0]);
1577 }
1578 
1579 static int fastrpc_device_release(struct inode *inode, struct file *file)
1580 {
1581 	struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1582 	struct fastrpc_channel_ctx *cctx = fl->cctx;
1583 	struct fastrpc_invoke_ctx *ctx, *n;
1584 	struct fastrpc_map *map, *m;
1585 	struct fastrpc_buf *buf, *b;
1586 	unsigned long flags;
1587 
1588 	fastrpc_release_current_dsp_process(fl);
1589 
1590 	spin_lock_irqsave(&cctx->lock, flags);
1591 	list_del(&fl->user);
1592 	spin_unlock_irqrestore(&cctx->lock, flags);
1593 
1594 	if (fl->init_mem)
1595 		fastrpc_buf_free(fl->init_mem);
1596 
1597 	list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1598 		list_del(&ctx->node);
1599 		fastrpc_context_put(ctx);
1600 	}
1601 
1602 	list_for_each_entry_safe(map, m, &fl->maps, node)
1603 		fastrpc_map_put(map);
1604 
1605 	list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1606 		list_del(&buf->node);
1607 		fastrpc_buf_free(buf);
1608 	}
1609 
1610 	fastrpc_session_free(cctx, fl->sctx);
1611 	fastrpc_channel_ctx_put(cctx);
1612 
1613 	mutex_destroy(&fl->mutex);
1614 	kfree(fl);
1615 	file->private_data = NULL;
1616 
1617 	return 0;
1618 }
1619 
1620 static int fastrpc_device_open(struct inode *inode, struct file *filp)
1621 {
1622 	struct fastrpc_channel_ctx *cctx;
1623 	struct fastrpc_device *fdevice;
1624 	struct fastrpc_user *fl = NULL;
1625 	unsigned long flags;
1626 
1627 	fdevice = miscdev_to_fdevice(filp->private_data);
1628 	cctx = fdevice->cctx;
1629 
1630 	fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1631 	if (!fl)
1632 		return -ENOMEM;
1633 
1634 	/* Released in fastrpc_device_release() */
1635 	fastrpc_channel_ctx_get(cctx);
1636 
1637 	filp->private_data = fl;
1638 	spin_lock_init(&fl->lock);
1639 	mutex_init(&fl->mutex);
1640 	INIT_LIST_HEAD(&fl->pending);
1641 	INIT_LIST_HEAD(&fl->maps);
1642 	INIT_LIST_HEAD(&fl->mmaps);
1643 	INIT_LIST_HEAD(&fl->user);
1644 	fl->cctx = cctx;
1645 	fl->is_secure_dev = fdevice->secure;
1646 
1647 	fl->sctx = fastrpc_session_alloc(fl);
1648 	if (!fl->sctx) {
1649 		dev_err(&cctx->rpdev->dev, "No session available\n");
1650 		mutex_destroy(&fl->mutex);
1651 		kfree(fl);
1652 
1653 		return -EBUSY;
1654 	}
1655 
1656 	spin_lock_irqsave(&cctx->lock, flags);
1657 	list_add_tail(&fl->user, &cctx->users);
1658 	spin_unlock_irqrestore(&cctx->lock, flags);
1659 
1660 	return 0;
1661 }
1662 
1663 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1664 {
1665 	struct fastrpc_alloc_dma_buf bp;
1666 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1667 	struct fastrpc_buf *buf = NULL;
1668 	int err;
1669 
1670 	if (copy_from_user(&bp, argp, sizeof(bp)))
1671 		return -EFAULT;
1672 
1673 	err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1674 	if (err)
1675 		return err;
1676 	exp_info.ops = &fastrpc_dma_buf_ops;
1677 	exp_info.size = bp.size;
1678 	exp_info.flags = O_RDWR;
1679 	exp_info.priv = buf;
1680 	buf->dmabuf = dma_buf_export(&exp_info);
1681 	if (IS_ERR(buf->dmabuf)) {
1682 		err = PTR_ERR(buf->dmabuf);
1683 		fastrpc_buf_free(buf);
1684 		return err;
1685 	}
1686 
1687 	bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1688 	if (bp.fd < 0) {
1689 		dma_buf_put(buf->dmabuf);
1690 		return -EINVAL;
1691 	}
1692 
1693 	if (copy_to_user(argp, &bp, sizeof(bp))) {
1694 		/*
1695 		 * The usercopy failed, but we can't do much about it, as
1696 		 * dma_buf_fd() already called fd_install() and made the
1697 		 * file descriptor accessible for the current process. It
1698 		 * might already be closed and dmabuf no longer valid when
1699 		 * we reach this point. Therefore "leak" the fd and rely on
1700 		 * the process exit path to do any required cleanup.
1701 		 */
1702 		return -EFAULT;
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1709 {
1710 	struct fastrpc_invoke_args args[1];
1711 	int client_id = fl->client_id;
1712 	u32 sc;
1713 
1714 	args[0].ptr = (u64)(uintptr_t) &client_id;
1715 	args[0].length = sizeof(client_id);
1716 	args[0].fd = -1;
1717 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1718 	fl->pd = pd;
1719 
1720 	return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1721 				       sc, &args[0]);
1722 }
1723 
1724 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1725 {
1726 	struct fastrpc_invoke_args *args = NULL;
1727 	struct fastrpc_invoke inv;
1728 	u32 nscalars;
1729 	int err;
1730 
1731 	if (copy_from_user(&inv, argp, sizeof(inv)))
1732 		return -EFAULT;
1733 
1734 	/* nscalars is truncated here to max supported value */
1735 	nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1736 	if (nscalars) {
1737 		args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1738 		if (!args)
1739 			return -ENOMEM;
1740 
1741 		if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1742 				   nscalars * sizeof(*args))) {
1743 			kfree(args);
1744 			return -EFAULT;
1745 		}
1746 	}
1747 
1748 	err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1749 	kfree(args);
1750 
1751 	return err;
1752 }
1753 
1754 static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1755 				     uint32_t dsp_attr_buf_len)
1756 {
1757 	struct fastrpc_invoke_args args[2] = { 0 };
1758 
1759 	/*
1760 	 * Capability filled in userspace. This carries the information
1761 	 * about the remoteproc support which is fetched from the remoteproc
1762 	 * sysfs node by userspace.
1763 	 */
1764 	dsp_attr_buf[0] = 0;
1765 	dsp_attr_buf_len -= 1;
1766 
1767 	args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
1768 	args[0].length = sizeof(dsp_attr_buf_len);
1769 	args[0].fd = -1;
1770 	args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
1771 	args[1].length = dsp_attr_buf_len * sizeof(u32);
1772 	args[1].fd = -1;
1773 
1774 	return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1775 				       FASTRPC_SCALARS(0, 1, 1), args);
1776 }
1777 
1778 static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
1779 					struct fastrpc_user *fl)
1780 {
1781 	struct fastrpc_channel_ctx *cctx = fl->cctx;
1782 	uint32_t attribute_id = cap->attribute_id;
1783 	uint32_t *dsp_attributes;
1784 	unsigned long flags;
1785 	int err;
1786 
1787 	spin_lock_irqsave(&cctx->lock, flags);
1788 	/* check if we already have queried dsp for attributes */
1789 	if (cctx->valid_attributes) {
1790 		spin_unlock_irqrestore(&cctx->lock, flags);
1791 		goto done;
1792 	}
1793 	spin_unlock_irqrestore(&cctx->lock, flags);
1794 
1795 	dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
1796 	if (!dsp_attributes)
1797 		return -ENOMEM;
1798 
1799 	err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
1800 	if (err == DSP_UNSUPPORTED_API) {
1801 		dev_info(&cctx->rpdev->dev,
1802 			 "Warning: DSP capabilities not supported\n");
1803 		kfree(dsp_attributes);
1804 		return -EOPNOTSUPP;
1805 	} else if (err) {
1806 		dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
1807 		kfree(dsp_attributes);
1808 		return err;
1809 	}
1810 
1811 	spin_lock_irqsave(&cctx->lock, flags);
1812 	memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1813 	cctx->valid_attributes = true;
1814 	spin_unlock_irqrestore(&cctx->lock, flags);
1815 	kfree(dsp_attributes);
1816 done:
1817 	cap->capability = cctx->dsp_attributes[attribute_id];
1818 	return 0;
1819 }
1820 
1821 static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1822 {
1823 	struct fastrpc_ioctl_capability cap = {0};
1824 	int err = 0;
1825 
1826 	if (copy_from_user(&cap, argp, sizeof(cap)))
1827 		return  -EFAULT;
1828 
1829 	cap.capability = 0;
1830 
1831 	if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
1832 		dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1833 			cap.attribute_id, err);
1834 		return -EOVERFLOW;
1835 	}
1836 
1837 	err = fastrpc_get_info_from_kernel(&cap, fl);
1838 	if (err)
1839 		return err;
1840 
1841 	if (copy_to_user(argp, &cap, sizeof(cap)))
1842 		return -EFAULT;
1843 
1844 	return 0;
1845 }
1846 
1847 static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
1848 {
1849 	struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1850 	struct fastrpc_munmap_req_msg req_msg;
1851 	struct device *dev = fl->sctx->dev;
1852 	int err;
1853 	u32 sc;
1854 
1855 	req_msg.client_id = fl->client_id;
1856 	req_msg.size = buf->size;
1857 	req_msg.vaddr = buf->raddr;
1858 
1859 	args[0].ptr = (u64) (uintptr_t) &req_msg;
1860 	args[0].length = sizeof(req_msg);
1861 
1862 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1863 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1864 				      &args[0]);
1865 	if (!err) {
1866 		dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1867 		spin_lock(&fl->lock);
1868 		list_del(&buf->node);
1869 		spin_unlock(&fl->lock);
1870 		fastrpc_buf_free(buf);
1871 	} else {
1872 		dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1873 	}
1874 
1875 	return err;
1876 }
1877 
1878 static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1879 {
1880 	struct fastrpc_buf *buf = NULL, *iter, *b;
1881 	struct fastrpc_req_munmap req;
1882 	struct device *dev = fl->sctx->dev;
1883 
1884 	if (copy_from_user(&req, argp, sizeof(req)))
1885 		return -EFAULT;
1886 
1887 	spin_lock(&fl->lock);
1888 	list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1889 		if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
1890 			buf = iter;
1891 			break;
1892 		}
1893 	}
1894 	spin_unlock(&fl->lock);
1895 
1896 	if (!buf) {
1897 		dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
1898 			req.vaddrout, req.size);
1899 		return -EINVAL;
1900 	}
1901 
1902 	return fastrpc_req_munmap_impl(fl, buf);
1903 }
1904 
1905 static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1906 {
1907 	struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1908 	struct fastrpc_buf *buf = NULL;
1909 	struct fastrpc_mmap_req_msg req_msg;
1910 	struct fastrpc_mmap_rsp_msg rsp_msg;
1911 	struct fastrpc_phy_page pages;
1912 	struct fastrpc_req_mmap req;
1913 	struct device *dev = fl->sctx->dev;
1914 	int err;
1915 	u32 sc;
1916 
1917 	if (copy_from_user(&req, argp, sizeof(req)))
1918 		return -EFAULT;
1919 
1920 	if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) {
1921 		dev_err(dev, "flag not supported 0x%x\n", req.flags);
1922 
1923 		return -EINVAL;
1924 	}
1925 
1926 	if (req.vaddrin) {
1927 		dev_err(dev, "adding user allocated pages is not supported\n");
1928 		return -EINVAL;
1929 	}
1930 
1931 	if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1932 		err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
1933 	else
1934 		err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
1935 
1936 	if (err) {
1937 		dev_err(dev, "failed to allocate buffer\n");
1938 		return err;
1939 	}
1940 
1941 	req_msg.client_id = fl->client_id;
1942 	req_msg.flags = req.flags;
1943 	req_msg.vaddr = req.vaddrin;
1944 	req_msg.num = sizeof(pages);
1945 
1946 	args[0].ptr = (u64) (uintptr_t) &req_msg;
1947 	args[0].length = sizeof(req_msg);
1948 
1949 	pages.addr = buf->dma_addr;
1950 	pages.size = buf->size;
1951 
1952 	args[1].ptr = (u64) (uintptr_t) &pages;
1953 	args[1].length = sizeof(pages);
1954 
1955 	args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1956 	args[2].length = sizeof(rsp_msg);
1957 
1958 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1959 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1960 				      &args[0]);
1961 	if (err) {
1962 		dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1963 		fastrpc_buf_free(buf);
1964 		return err;
1965 	}
1966 
1967 	/* update the buffer to be able to deallocate the memory on the DSP */
1968 	buf->raddr = (uintptr_t) rsp_msg.vaddr;
1969 
1970 	/* let the client know the address to use */
1971 	req.vaddrout = rsp_msg.vaddr;
1972 
1973 	/* Add memory to static PD pool, protection thru hypervisor */
1974 	if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
1975 		u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
1976 
1977 		err = qcom_scm_assign_mem(buf->dma_addr, (u64)buf->size,
1978 			&src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
1979 		if (err) {
1980 			dev_err(fl->sctx->dev,
1981 				"Failed to assign memory dma_addr %pad size 0x%llx err %d",
1982 				&buf->dma_addr, buf->size, err);
1983 			goto err_assign;
1984 		}
1985 	}
1986 
1987 	spin_lock(&fl->lock);
1988 	list_add_tail(&buf->node, &fl->mmaps);
1989 	spin_unlock(&fl->lock);
1990 
1991 	if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1992 		err = -EFAULT;
1993 		goto err_assign;
1994 	}
1995 
1996 	dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1997 		buf->raddr, buf->size);
1998 
1999 	return 0;
2000 
2001 err_assign:
2002 	fastrpc_req_munmap_impl(fl, buf);
2003 
2004 	return err;
2005 }
2006 
2007 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
2008 {
2009 	struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
2010 	struct fastrpc_map *map = NULL, *iter, *m;
2011 	struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
2012 	int err = 0;
2013 	u32 sc;
2014 	struct device *dev = fl->sctx->dev;
2015 
2016 	spin_lock(&fl->lock);
2017 	list_for_each_entry_safe(iter, m, &fl->maps, node) {
2018 		if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
2019 			map = iter;
2020 			break;
2021 		}
2022 	}
2023 
2024 	spin_unlock(&fl->lock);
2025 
2026 	if (!map) {
2027 		dev_err(dev, "map not in list\n");
2028 		return -EINVAL;
2029 	}
2030 
2031 	req_msg.client_id = fl->client_id;
2032 	req_msg.len = map->len;
2033 	req_msg.vaddrin = map->raddr;
2034 	req_msg.fd = map->fd;
2035 
2036 	args[0].ptr = (u64) (uintptr_t) &req_msg;
2037 	args[0].length = sizeof(req_msg);
2038 
2039 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
2040 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
2041 				      &args[0]);
2042 	if (err) {
2043 		dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n",  map->fd, map->raddr);
2044 		return err;
2045 	}
2046 	fastrpc_map_put(map);
2047 
2048 	return 0;
2049 }
2050 
2051 static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
2052 {
2053 	struct fastrpc_mem_unmap req;
2054 
2055 	if (copy_from_user(&req, argp, sizeof(req)))
2056 		return -EFAULT;
2057 
2058 	return fastrpc_req_mem_unmap_impl(fl, &req);
2059 }
2060 
2061 static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
2062 {
2063 	struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
2064 	struct fastrpc_mem_map_req_msg req_msg = { 0 };
2065 	struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
2066 	struct fastrpc_mem_unmap req_unmap = { 0 };
2067 	struct fastrpc_phy_page pages = { 0 };
2068 	struct fastrpc_mem_map req;
2069 	struct device *dev = fl->sctx->dev;
2070 	struct fastrpc_map *map = NULL;
2071 	int err;
2072 	u32 sc;
2073 
2074 	if (copy_from_user(&req, argp, sizeof(req)))
2075 		return -EFAULT;
2076 
2077 	/* create SMMU mapping */
2078 	err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
2079 	if (err) {
2080 		dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
2081 		return err;
2082 	}
2083 
2084 	req_msg.client_id = fl->client_id;
2085 	req_msg.fd = req.fd;
2086 	req_msg.offset = req.offset;
2087 	req_msg.vaddrin = req.vaddrin;
2088 	map->va = (void *) (uintptr_t) req.vaddrin;
2089 	req_msg.flags = req.flags;
2090 	req_msg.num = sizeof(pages);
2091 	req_msg.data_len = 0;
2092 
2093 	args[0].ptr = (u64) (uintptr_t) &req_msg;
2094 	args[0].length = sizeof(req_msg);
2095 
2096 	pages.addr = map->dma_addr;
2097 	pages.size = map->len;
2098 
2099 	args[1].ptr = (u64) (uintptr_t) &pages;
2100 	args[1].length = sizeof(pages);
2101 
2102 	args[2].ptr = (u64) (uintptr_t) &pages;
2103 	args[2].length = 0;
2104 
2105 	args[3].ptr = (u64) (uintptr_t) &rsp_msg;
2106 	args[3].length = sizeof(rsp_msg);
2107 
2108 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
2109 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
2110 	if (err) {
2111 		dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
2112 			req.fd, req.vaddrin, map->len);
2113 		goto err_invoke;
2114 	}
2115 
2116 	/* update the buffer to be able to deallocate the memory on the DSP */
2117 	map->raddr = rsp_msg.vaddr;
2118 
2119 	/* let the client know the address to use */
2120 	req.vaddrout = rsp_msg.vaddr;
2121 
2122 	if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
2123 		/* unmap the memory and release the buffer */
2124 		req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
2125 		req_unmap.length = map->len;
2126 		fastrpc_req_mem_unmap_impl(fl, &req_unmap);
2127 		return -EFAULT;
2128 	}
2129 
2130 	return 0;
2131 
2132 err_invoke:
2133 	fastrpc_map_put(map);
2134 
2135 	return err;
2136 }
2137 
2138 static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
2139 				 unsigned long arg)
2140 {
2141 	struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
2142 	char __user *argp = (char __user *)arg;
2143 	int err;
2144 
2145 	switch (cmd) {
2146 	case FASTRPC_IOCTL_INVOKE:
2147 		err = fastrpc_invoke(fl, argp);
2148 		break;
2149 	case FASTRPC_IOCTL_INIT_ATTACH:
2150 		err = fastrpc_init_attach(fl, ROOT_PD);
2151 		break;
2152 	case FASTRPC_IOCTL_INIT_ATTACH_SNS:
2153 		err = fastrpc_init_attach(fl, SENSORS_PD);
2154 		break;
2155 	case FASTRPC_IOCTL_INIT_CREATE_STATIC:
2156 		err = fastrpc_init_create_static_process(fl, argp);
2157 		break;
2158 	case FASTRPC_IOCTL_INIT_CREATE:
2159 		err = fastrpc_init_create_process(fl, argp);
2160 		break;
2161 	case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
2162 		err = fastrpc_dmabuf_alloc(fl, argp);
2163 		break;
2164 	case FASTRPC_IOCTL_MMAP:
2165 		err = fastrpc_req_mmap(fl, argp);
2166 		break;
2167 	case FASTRPC_IOCTL_MUNMAP:
2168 		err = fastrpc_req_munmap(fl, argp);
2169 		break;
2170 	case FASTRPC_IOCTL_MEM_MAP:
2171 		err = fastrpc_req_mem_map(fl, argp);
2172 		break;
2173 	case FASTRPC_IOCTL_MEM_UNMAP:
2174 		err = fastrpc_req_mem_unmap(fl, argp);
2175 		break;
2176 	case FASTRPC_IOCTL_GET_DSP_INFO:
2177 		err = fastrpc_get_dsp_info(fl, argp);
2178 		break;
2179 	default:
2180 		err = -ENOTTY;
2181 		break;
2182 	}
2183 
2184 	return err;
2185 }
2186 
2187 static const struct file_operations fastrpc_fops = {
2188 	.open = fastrpc_device_open,
2189 	.release = fastrpc_device_release,
2190 	.unlocked_ioctl = fastrpc_device_ioctl,
2191 	.compat_ioctl = fastrpc_device_ioctl,
2192 };
2193 
2194 static int fastrpc_cb_probe(struct platform_device *pdev)
2195 {
2196 	struct fastrpc_channel_ctx *cctx;
2197 	struct fastrpc_session_ctx *sess;
2198 	struct device *dev = &pdev->dev;
2199 	int i, sessions = 0;
2200 	unsigned long flags;
2201 	int rc;
2202 	u32 dma_bits;
2203 
2204 	cctx = dev_get_drvdata(dev->parent);
2205 	if (!cctx)
2206 		return -EINVAL;
2207 
2208 	of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
2209 
2210 	spin_lock_irqsave(&cctx->lock, flags);
2211 	if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
2212 		dev_err(&pdev->dev, "too many sessions\n");
2213 		spin_unlock_irqrestore(&cctx->lock, flags);
2214 		return -ENOSPC;
2215 	}
2216 	dma_bits = cctx->soc_data->dma_addr_bits_default;
2217 	sess = &cctx->session[cctx->sesscount++];
2218 	sess->used = false;
2219 	sess->valid = true;
2220 	sess->dev = dev;
2221 	dev_set_drvdata(dev, sess);
2222 
2223 	if (cctx->domain_id == CDSP_DOMAIN_ID)
2224 		dma_bits = cctx->soc_data->dma_addr_bits_cdsp;
2225 
2226 	if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
2227 		dev_info(dev, "FastRPC Session ID not specified in DT\n");
2228 
2229 	if (sessions > 0) {
2230 		struct fastrpc_session_ctx *dup_sess;
2231 
2232 		for (i = 1; i < sessions; i++) {
2233 			if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
2234 				break;
2235 			dup_sess = &cctx->session[cctx->sesscount++];
2236 			memcpy(dup_sess, sess, sizeof(*dup_sess));
2237 		}
2238 	}
2239 	spin_unlock_irqrestore(&cctx->lock, flags);
2240 	rc = dma_set_mask(dev, DMA_BIT_MASK(dma_bits));
2241 	if (rc) {
2242 		dev_err(dev, "%u-bit DMA enable failed\n", dma_bits);
2243 		return rc;
2244 	}
2245 
2246 	return 0;
2247 }
2248 
2249 static void fastrpc_cb_remove(struct platform_device *pdev)
2250 {
2251 	struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
2252 	struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
2253 	unsigned long flags;
2254 	int i;
2255 
2256 	spin_lock_irqsave(&cctx->lock, flags);
2257 	for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
2258 		if (cctx->session[i].sid == sess->sid) {
2259 			cctx->session[i].valid = false;
2260 			cctx->sesscount--;
2261 		}
2262 	}
2263 	spin_unlock_irqrestore(&cctx->lock, flags);
2264 }
2265 
2266 static const struct of_device_id fastrpc_match_table[] = {
2267 	{ .compatible = "qcom,fastrpc-compute-cb", },
2268 	{}
2269 };
2270 
2271 static struct platform_driver fastrpc_cb_driver = {
2272 	.probe = fastrpc_cb_probe,
2273 	.remove = fastrpc_cb_remove,
2274 	.driver = {
2275 		.name = "qcom,fastrpc-cb",
2276 		.of_match_table = fastrpc_match_table,
2277 		.suppress_bind_attrs = true,
2278 	},
2279 };
2280 
2281 static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
2282 				   bool is_secured, const char *domain)
2283 {
2284 	struct fastrpc_device *fdev;
2285 	int err;
2286 
2287 	fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
2288 	if (!fdev)
2289 		return -ENOMEM;
2290 
2291 	fdev->secure = is_secured;
2292 	fdev->cctx = cctx;
2293 	fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
2294 	fdev->miscdev.fops = &fastrpc_fops;
2295 	fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
2296 					    domain, is_secured ? "-secure" : "");
2297 	if (!fdev->miscdev.name)
2298 		return -ENOMEM;
2299 
2300 	err = misc_register(&fdev->miscdev);
2301 	if (!err) {
2302 		if (is_secured)
2303 			cctx->secure_fdevice = fdev;
2304 		else
2305 			cctx->fdevice = fdev;
2306 	}
2307 
2308 	return err;
2309 }
2310 
2311 static int fastrpc_get_domain_id(const char *domain)
2312 {
2313 	if (!strncmp(domain, "adsp", 4))
2314 		return ADSP_DOMAIN_ID;
2315 	else if (!strncmp(domain, "cdsp", 4))
2316 		return CDSP_DOMAIN_ID;
2317 	else if (!strncmp(domain, "mdsp", 4))
2318 		return MDSP_DOMAIN_ID;
2319 	else if (!strncmp(domain, "sdsp", 4))
2320 		return SDSP_DOMAIN_ID;
2321 	else if (!strncmp(domain, "gdsp", 4))
2322 		return GDSP_DOMAIN_ID;
2323 
2324 	return -EINVAL;
2325 }
2326 
2327 static const struct fastrpc_soc_data kaanapali_soc_data = {
2328 	.sid_pos = 56,
2329 	.dma_addr_bits_cdsp = 34,
2330 	.dma_addr_bits_default = 32,
2331 };
2332 
2333 static const struct fastrpc_soc_data default_soc_data = {
2334 	.sid_pos = 32,
2335 	.dma_addr_bits_cdsp = 32,
2336 	.dma_addr_bits_default = 32,
2337 };
2338 
2339 static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
2340 {
2341 	struct device *rdev = &rpdev->dev;
2342 	struct fastrpc_channel_ctx *data;
2343 	int i, err, domain_id = -1, vmcount;
2344 	const char *domain;
2345 	bool secure_dsp;
2346 	unsigned int vmids[FASTRPC_MAX_VMIDS];
2347 	const struct fastrpc_soc_data *soc_data;
2348 
2349 	soc_data = device_get_match_data(rdev);
2350 
2351 	err = of_property_read_string(rdev->of_node, "label", &domain);
2352 	if (err) {
2353 		dev_info(rdev, "FastRPC Domain not specified in DT\n");
2354 		return err;
2355 	}
2356 
2357 	domain_id = fastrpc_get_domain_id(domain);
2358 
2359 	if (domain_id < 0) {
2360 		dev_info(rdev, "FastRPC Domain %s not supported\n", domain);
2361 		return -EINVAL;
2362 	}
2363 
2364 	if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0))
2365 		dev_info(rdev, "no reserved DMA memory for FASTRPC\n");
2366 
2367 	vmcount = of_property_read_variable_u32_array(rdev->of_node,
2368 				"qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
2369 	if (vmcount < 0)
2370 		vmcount = 0;
2371 	else if (!qcom_scm_is_available())
2372 		return -EPROBE_DEFER;
2373 
2374 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2375 	if (!data)
2376 		return -ENOMEM;
2377 
2378 	if (vmcount) {
2379 		data->vmcount = vmcount;
2380 		for (i = 0; i < data->vmcount; i++) {
2381 			data->vmperms[i].vmid = vmids[i];
2382 			data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
2383 		}
2384 	}
2385 
2386 	if (domain_id == SDSP_DOMAIN_ID) {
2387 		struct resource res;
2388 		u64 src_perms;
2389 
2390 		err = of_reserved_mem_region_to_resource(rdev->of_node, 0, &res);
2391 		if (!err) {
2392 			src_perms = BIT(QCOM_SCM_VMID_HLOS);
2393 
2394 			qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms,
2395 				    data->vmperms, data->vmcount);
2396 		}
2397 
2398 	}
2399 
2400 	secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
2401 	data->secure = secure_dsp;
2402 	data->soc_data = soc_data;
2403 
2404 	switch (domain_id) {
2405 	case ADSP_DOMAIN_ID:
2406 	case MDSP_DOMAIN_ID:
2407 	case SDSP_DOMAIN_ID:
2408 		/* Unsigned PD offloading is only supported on CDSP and GDSP */
2409 		data->unsigned_support = false;
2410 		err = fastrpc_device_register(rdev, data, secure_dsp, domain);
2411 		if (err)
2412 			goto err_free_data;
2413 		break;
2414 	case CDSP_DOMAIN_ID:
2415 	case GDSP_DOMAIN_ID:
2416 		data->unsigned_support = true;
2417 		/* Create both device nodes so that we can allow both Signed and Unsigned PD */
2418 		err = fastrpc_device_register(rdev, data, true, domain);
2419 		if (err)
2420 			goto err_free_data;
2421 
2422 		err = fastrpc_device_register(rdev, data, false, domain);
2423 		if (err)
2424 			goto err_deregister_fdev;
2425 		break;
2426 	default:
2427 		err = -EINVAL;
2428 		goto err_free_data;
2429 	}
2430 
2431 	kref_init(&data->refcount);
2432 
2433 	dev_set_drvdata(&rpdev->dev, data);
2434 	rdev->dma_mask = &data->dma_mask;
2435 	dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
2436 	INIT_LIST_HEAD(&data->users);
2437 	INIT_LIST_HEAD(&data->invoke_interrupted_mmaps);
2438 	spin_lock_init(&data->lock);
2439 	idr_init(&data->ctx_idr);
2440 	data->domain_id = domain_id;
2441 	data->rpdev = rpdev;
2442 
2443 	err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
2444 	if (err)
2445 		goto err_deregister_fdev;
2446 
2447 	return 0;
2448 
2449 err_deregister_fdev:
2450 	if (data->fdevice)
2451 		misc_deregister(&data->fdevice->miscdev);
2452 	if (data->secure_fdevice)
2453 		misc_deregister(&data->secure_fdevice->miscdev);
2454 
2455 err_free_data:
2456 	kfree(data);
2457 	return err;
2458 }
2459 
2460 static void fastrpc_notify_users(struct fastrpc_user *user)
2461 {
2462 	struct fastrpc_invoke_ctx *ctx;
2463 
2464 	spin_lock(&user->lock);
2465 	list_for_each_entry(ctx, &user->pending, node) {
2466 		ctx->retval = -EPIPE;
2467 		complete(&ctx->work);
2468 	}
2469 	spin_unlock(&user->lock);
2470 }
2471 
2472 static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
2473 {
2474 	struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2475 	struct fastrpc_buf *buf, *b;
2476 	struct fastrpc_user *user;
2477 	unsigned long flags;
2478 
2479 	/* No invocations past this point */
2480 	spin_lock_irqsave(&cctx->lock, flags);
2481 	cctx->rpdev = NULL;
2482 	list_for_each_entry(user, &cctx->users, user)
2483 		fastrpc_notify_users(user);
2484 	spin_unlock_irqrestore(&cctx->lock, flags);
2485 
2486 	if (cctx->fdevice)
2487 		misc_deregister(&cctx->fdevice->miscdev);
2488 
2489 	if (cctx->secure_fdevice)
2490 		misc_deregister(&cctx->secure_fdevice->miscdev);
2491 
2492 	list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node)
2493 		list_del(&buf->node);
2494 
2495 	if (cctx->remote_heap)
2496 		fastrpc_buf_free(cctx->remote_heap);
2497 
2498 	of_platform_depopulate(&rpdev->dev);
2499 
2500 	fastrpc_channel_ctx_put(cctx);
2501 }
2502 
2503 static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
2504 				  int len, void *priv, u32 addr)
2505 {
2506 	struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2507 	struct fastrpc_invoke_rsp *rsp = data;
2508 	struct fastrpc_invoke_ctx *ctx;
2509 	unsigned long flags;
2510 	unsigned long ctxid;
2511 
2512 	if (len < sizeof(*rsp))
2513 		return -EINVAL;
2514 
2515 	ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2516 
2517 	spin_lock_irqsave(&cctx->lock, flags);
2518 	ctx = idr_find(&cctx->ctx_idr, ctxid);
2519 	spin_unlock_irqrestore(&cctx->lock, flags);
2520 
2521 	if (!ctx) {
2522 		dev_err(&rpdev->dev, "No context ID matches response\n");
2523 		return -ENOENT;
2524 	}
2525 
2526 	ctx->retval = rsp->retval;
2527 	complete(&ctx->work);
2528 
2529 	/*
2530 	 * The DMA buffer associated with the context cannot be freed in
2531 	 * interrupt context so schedule it through a worker thread to
2532 	 * avoid a kernel BUG.
2533 	 */
2534 	schedule_work(&ctx->put_work);
2535 
2536 	return 0;
2537 }
2538 
2539 static const struct of_device_id fastrpc_rpmsg_of_match[] = {
2540 	{ .compatible = "qcom,kaanapali-fastrpc", .data = &kaanapali_soc_data },
2541 	{ .compatible = "qcom,fastrpc", .data = &default_soc_data },
2542 	{ },
2543 };
2544 MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
2545 
2546 static struct rpmsg_driver fastrpc_driver = {
2547 	.probe = fastrpc_rpmsg_probe,
2548 	.remove = fastrpc_rpmsg_remove,
2549 	.callback = fastrpc_rpmsg_callback,
2550 	.drv = {
2551 		.name = "qcom,fastrpc",
2552 		.of_match_table = fastrpc_rpmsg_of_match,
2553 	},
2554 };
2555 
2556 static int fastrpc_init(void)
2557 {
2558 	int ret;
2559 
2560 	ret = platform_driver_register(&fastrpc_cb_driver);
2561 	if (ret < 0) {
2562 		pr_err("fastrpc: failed to register cb driver\n");
2563 		return ret;
2564 	}
2565 
2566 	ret = register_rpmsg_driver(&fastrpc_driver);
2567 	if (ret < 0) {
2568 		pr_err("fastrpc: failed to register rpmsg driver\n");
2569 		platform_driver_unregister(&fastrpc_cb_driver);
2570 		return ret;
2571 	}
2572 
2573 	return 0;
2574 }
2575 module_init(fastrpc_init);
2576 
2577 static void fastrpc_exit(void)
2578 {
2579 	platform_driver_unregister(&fastrpc_cb_driver);
2580 	unregister_rpmsg_driver(&fastrpc_driver);
2581 }
2582 module_exit(fastrpc_exit);
2583 
2584 MODULE_DESCRIPTION("Qualcomm FastRPC");
2585 MODULE_LICENSE("GPL v2");
2586 MODULE_IMPORT_NS("DMA_BUF");
2587