1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2018, Linaro Limited
4
5 #include <linux/completion.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dma-resv.h>
10 #include <linux/idr.h>
11 #include <linux/list.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/sort.h>
18 #include <linux/of_platform.h>
19 #include <linux/rpmsg.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/firmware/qcom/qcom_scm.h>
23 #include <uapi/misc/fastrpc.h>
24 #include <linux/of_reserved_mem.h>
25
26 #define ADSP_DOMAIN_ID (0)
27 #define MDSP_DOMAIN_ID (1)
28 #define SDSP_DOMAIN_ID (2)
29 #define CDSP_DOMAIN_ID (3)
30 #define GDSP_DOMAIN_ID (4)
31 #define FASTRPC_MAX_SESSIONS 14
32 #define FASTRPC_MAX_VMIDS 16
33 #define FASTRPC_ALIGN 128
34 #define FASTRPC_MAX_FDLIST 16
35 #define FASTRPC_MAX_CRCLIST 64
36 #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
37 #define FASTRPC_CTX_MAX (256)
38 #define FASTRPC_INIT_HANDLE 1
39 #define FASTRPC_DSP_UTILITIES_HANDLE 2
40 #define FASTRPC_CTXID_MASK (0xFF0)
41 #define INIT_FILELEN_MAX (2 * 1024 * 1024)
42 #define INIT_FILE_NAMELEN_MAX (128)
43 #define FASTRPC_DEVICE_NAME "fastrpc"
44
45 /* Add memory to static PD pool, protection thru XPU */
46 #define ADSP_MMAP_HEAP_ADDR 4
47 /* MAP static DMA buffer on DSP User PD */
48 #define ADSP_MMAP_DMA_BUFFER 6
49 /* Add memory to static PD pool protection thru hypervisor */
50 #define ADSP_MMAP_REMOTE_HEAP_ADDR 8
51 /* Add memory to userPD pool, for user heap */
52 #define ADSP_MMAP_ADD_PAGES 0x1000
53 /* Add memory to userPD pool, for LLC heap */
54 #define ADSP_MMAP_ADD_PAGES_LLC 0x3000,
55
56 #define DSP_UNSUPPORTED_API (0x80000414)
57 /* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
58 #define FASTRPC_MAX_DSP_ATTRIBUTES (256)
59 #define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
60
61 /* Retrives number of input buffers from the scalars parameter */
62 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
63
64 /* Retrives number of output buffers from the scalars parameter */
65 #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
66
67 /* Retrives number of input handles from the scalars parameter */
68 #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
69
70 /* Retrives number of output handles from the scalars parameter */
71 #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
72
73 #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
74 REMOTE_SCALARS_OUTBUFS(sc) + \
75 REMOTE_SCALARS_INHANDLES(sc)+ \
76 REMOTE_SCALARS_OUTHANDLES(sc))
77 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
78 (((attr & 0x07) << 29) | \
79 ((method & 0x1f) << 24) | \
80 ((in & 0xff) << 16) | \
81 ((out & 0xff) << 8) | \
82 ((oin & 0x0f) << 4) | \
83 (oout & 0x0f))
84
85 #define FASTRPC_SCALARS(method, in, out) \
86 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
87
88 #define FASTRPC_CREATE_PROCESS_NARGS 6
89 #define FASTRPC_CREATE_STATIC_PROCESS_NARGS 3
90 /* Remote Method id table */
91 #define FASTRPC_RMID_INIT_ATTACH 0
92 #define FASTRPC_RMID_INIT_RELEASE 1
93 #define FASTRPC_RMID_INIT_MMAP 4
94 #define FASTRPC_RMID_INIT_MUNMAP 5
95 #define FASTRPC_RMID_INIT_CREATE 6
96 #define FASTRPC_RMID_INIT_CREATE_ATTR 7
97 #define FASTRPC_RMID_INIT_CREATE_STATIC 8
98 #define FASTRPC_RMID_INIT_MEM_MAP 10
99 #define FASTRPC_RMID_INIT_MEM_UNMAP 11
100
101 /* Protection Domain(PD) ids */
102 #define ROOT_PD (0)
103 #define USER_PD (1)
104 #define SENSORS_PD (2)
105
106 #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
107
108 struct fastrpc_phy_page {
109 u64 addr; /* physical address */
110 u64 size; /* size of contiguous region */
111 };
112
113 struct fastrpc_invoke_buf {
114 u32 num; /* number of contiguous regions */
115 u32 pgidx; /* index to start of contiguous region */
116 };
117
118 struct fastrpc_remote_dmahandle {
119 s32 fd; /* dma handle fd */
120 u32 offset; /* dma handle offset */
121 u32 len; /* dma handle length */
122 };
123
124 struct fastrpc_remote_buf {
125 u64 pv; /* buffer pointer */
126 u64 len; /* length of buffer */
127 };
128
129 union fastrpc_remote_arg {
130 struct fastrpc_remote_buf buf;
131 struct fastrpc_remote_dmahandle dma;
132 };
133
134 struct fastrpc_mmap_rsp_msg {
135 u64 vaddr;
136 };
137
138 struct fastrpc_mmap_req_msg {
139 s32 client_id;
140 u32 flags;
141 u64 vaddr;
142 s32 num;
143 };
144
145 struct fastrpc_mem_map_req_msg {
146 s32 client_id;
147 s32 fd;
148 s32 offset;
149 u32 flags;
150 u64 vaddrin;
151 s32 num;
152 s32 data_len;
153 };
154
155 struct fastrpc_munmap_req_msg {
156 s32 client_id;
157 u64 vaddr;
158 u64 size;
159 };
160
161 struct fastrpc_mem_unmap_req_msg {
162 s32 client_id;
163 s32 fd;
164 u64 vaddrin;
165 u64 len;
166 };
167
168 struct fastrpc_msg {
169 int client_id; /* process client id */
170 int tid; /* thread id */
171 u64 ctx; /* invoke caller context */
172 u32 handle; /* handle to invoke */
173 u32 sc; /* scalars structure describing the data */
174 u64 addr; /* physical address */
175 u64 size; /* size of contiguous region */
176 };
177
178 struct fastrpc_invoke_rsp {
179 u64 ctx; /* invoke caller context */
180 int retval; /* invoke return value */
181 };
182
183 struct fastrpc_buf_overlap {
184 u64 start;
185 u64 end;
186 int raix;
187 u64 mstart;
188 u64 mend;
189 u64 offset;
190 };
191
192 struct fastrpc_buf {
193 struct fastrpc_user *fl;
194 struct dma_buf *dmabuf;
195 struct device *dev;
196 void *virt;
197 u64 phys;
198 u64 size;
199 /* Lock for dma buf attachments */
200 struct mutex lock;
201 struct list_head attachments;
202 /* mmap support */
203 struct list_head node; /* list of user requested mmaps */
204 uintptr_t raddr;
205 };
206
207 struct fastrpc_dma_buf_attachment {
208 struct device *dev;
209 struct sg_table sgt;
210 struct list_head node;
211 };
212
213 struct fastrpc_map {
214 struct list_head node;
215 struct fastrpc_user *fl;
216 int fd;
217 struct dma_buf *buf;
218 struct sg_table *table;
219 struct dma_buf_attachment *attach;
220 u64 phys;
221 u64 size;
222 void *va;
223 u64 len;
224 u64 raddr;
225 u32 attr;
226 struct kref refcount;
227 };
228
229 struct fastrpc_invoke_ctx {
230 int nscalars;
231 int nbufs;
232 int retval;
233 int pid;
234 int client_id;
235 u32 sc;
236 u32 *crc;
237 u64 ctxid;
238 u64 msg_sz;
239 struct kref refcount;
240 struct list_head node; /* list of ctxs */
241 struct completion work;
242 struct work_struct put_work;
243 struct fastrpc_msg msg;
244 struct fastrpc_user *fl;
245 union fastrpc_remote_arg *rpra;
246 struct fastrpc_map **maps;
247 struct fastrpc_buf *buf;
248 struct fastrpc_invoke_args *args;
249 struct fastrpc_buf_overlap *olaps;
250 struct fastrpc_channel_ctx *cctx;
251 };
252
253 struct fastrpc_session_ctx {
254 struct device *dev;
255 int sid;
256 bool used;
257 bool valid;
258 };
259
260 struct fastrpc_channel_ctx {
261 int domain_id;
262 int sesscount;
263 int vmcount;
264 struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
265 struct rpmsg_device *rpdev;
266 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
267 spinlock_t lock;
268 struct idr ctx_idr;
269 struct list_head users;
270 struct kref refcount;
271 /* Flag if dsp attributes are cached */
272 bool valid_attributes;
273 u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
274 struct fastrpc_device *secure_fdevice;
275 struct fastrpc_device *fdevice;
276 struct fastrpc_buf *remote_heap;
277 struct list_head invoke_interrupted_mmaps;
278 bool secure;
279 bool unsigned_support;
280 u64 dma_mask;
281 };
282
283 struct fastrpc_device {
284 struct fastrpc_channel_ctx *cctx;
285 struct miscdevice miscdev;
286 bool secure;
287 };
288
289 struct fastrpc_user {
290 struct list_head user;
291 struct list_head maps;
292 struct list_head pending;
293 struct list_head mmaps;
294
295 struct fastrpc_channel_ctx *cctx;
296 struct fastrpc_session_ctx *sctx;
297 struct fastrpc_buf *init_mem;
298
299 int client_id;
300 int pd;
301 bool is_secure_dev;
302 /* Lock for lists */
303 spinlock_t lock;
304 /* lock for allocations */
305 struct mutex mutex;
306 };
307
fastrpc_free_map(struct kref * ref)308 static void fastrpc_free_map(struct kref *ref)
309 {
310 struct fastrpc_map *map;
311
312 map = container_of(ref, struct fastrpc_map, refcount);
313
314 if (map->table) {
315 if (map->attr & FASTRPC_ATTR_SECUREMAP) {
316 struct qcom_scm_vmperm perm;
317 int vmid = map->fl->cctx->vmperms[0].vmid;
318 u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid);
319 int err = 0;
320
321 perm.vmid = QCOM_SCM_VMID_HLOS;
322 perm.perm = QCOM_SCM_PERM_RWX;
323 err = qcom_scm_assign_mem(map->phys, map->len,
324 &src_perms, &perm, 1);
325 if (err) {
326 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
327 map->phys, map->len, err);
328 return;
329 }
330 }
331 dma_buf_unmap_attachment_unlocked(map->attach, map->table,
332 DMA_BIDIRECTIONAL);
333 dma_buf_detach(map->buf, map->attach);
334 dma_buf_put(map->buf);
335 }
336
337 if (map->fl) {
338 spin_lock(&map->fl->lock);
339 list_del(&map->node);
340 spin_unlock(&map->fl->lock);
341 map->fl = NULL;
342 }
343
344 kfree(map);
345 }
346
fastrpc_map_put(struct fastrpc_map * map)347 static void fastrpc_map_put(struct fastrpc_map *map)
348 {
349 if (map)
350 kref_put(&map->refcount, fastrpc_free_map);
351 }
352
fastrpc_map_get(struct fastrpc_map * map)353 static int fastrpc_map_get(struct fastrpc_map *map)
354 {
355 if (!map)
356 return -ENOENT;
357
358 return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
359 }
360
361
fastrpc_map_lookup(struct fastrpc_user * fl,int fd,struct fastrpc_map ** ppmap)362 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
363 struct fastrpc_map **ppmap)
364 {
365 struct fastrpc_map *map = NULL;
366 struct dma_buf *buf;
367 int ret = -ENOENT;
368
369 buf = dma_buf_get(fd);
370 if (IS_ERR(buf))
371 return PTR_ERR(buf);
372
373 spin_lock(&fl->lock);
374 list_for_each_entry(map, &fl->maps, node) {
375 if (map->fd != fd || map->buf != buf)
376 continue;
377
378 *ppmap = map;
379 ret = 0;
380 break;
381 }
382 spin_unlock(&fl->lock);
383
384 return ret;
385 }
386
fastrpc_buf_free(struct fastrpc_buf * buf)387 static void fastrpc_buf_free(struct fastrpc_buf *buf)
388 {
389 dma_free_coherent(buf->dev, buf->size, buf->virt,
390 FASTRPC_PHYS(buf->phys));
391 kfree(buf);
392 }
393
__fastrpc_buf_alloc(struct fastrpc_user * fl,struct device * dev,u64 size,struct fastrpc_buf ** obuf)394 static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
395 u64 size, struct fastrpc_buf **obuf)
396 {
397 struct fastrpc_buf *buf;
398
399 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
400 if (!buf)
401 return -ENOMEM;
402
403 INIT_LIST_HEAD(&buf->attachments);
404 INIT_LIST_HEAD(&buf->node);
405 mutex_init(&buf->lock);
406
407 buf->fl = fl;
408 buf->virt = NULL;
409 buf->phys = 0;
410 buf->size = size;
411 buf->dev = dev;
412 buf->raddr = 0;
413
414 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
415 GFP_KERNEL);
416 if (!buf->virt) {
417 mutex_destroy(&buf->lock);
418 kfree(buf);
419 return -ENOMEM;
420 }
421
422 *obuf = buf;
423
424 return 0;
425 }
426
fastrpc_buf_alloc(struct fastrpc_user * fl,struct device * dev,u64 size,struct fastrpc_buf ** obuf)427 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
428 u64 size, struct fastrpc_buf **obuf)
429 {
430 int ret;
431 struct fastrpc_buf *buf;
432
433 ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
434 if (ret)
435 return ret;
436
437 buf = *obuf;
438
439 if (fl->sctx && fl->sctx->sid)
440 buf->phys += ((u64)fl->sctx->sid << 32);
441
442 return 0;
443 }
444
fastrpc_remote_heap_alloc(struct fastrpc_user * fl,struct device * dev,u64 size,struct fastrpc_buf ** obuf)445 static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev,
446 u64 size, struct fastrpc_buf **obuf)
447 {
448 struct device *rdev = &fl->cctx->rpdev->dev;
449
450 return __fastrpc_buf_alloc(fl, rdev, size, obuf);
451 }
452
fastrpc_channel_ctx_free(struct kref * ref)453 static void fastrpc_channel_ctx_free(struct kref *ref)
454 {
455 struct fastrpc_channel_ctx *cctx;
456
457 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
458
459 kfree(cctx);
460 }
461
fastrpc_channel_ctx_get(struct fastrpc_channel_ctx * cctx)462 static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
463 {
464 kref_get(&cctx->refcount);
465 }
466
fastrpc_channel_ctx_put(struct fastrpc_channel_ctx * cctx)467 static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
468 {
469 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
470 }
471
fastrpc_context_free(struct kref * ref)472 static void fastrpc_context_free(struct kref *ref)
473 {
474 struct fastrpc_invoke_ctx *ctx;
475 struct fastrpc_channel_ctx *cctx;
476 unsigned long flags;
477 int i;
478
479 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
480 cctx = ctx->cctx;
481
482 for (i = 0; i < ctx->nbufs; i++)
483 fastrpc_map_put(ctx->maps[i]);
484
485 if (ctx->buf)
486 fastrpc_buf_free(ctx->buf);
487
488 spin_lock_irqsave(&cctx->lock, flags);
489 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
490 spin_unlock_irqrestore(&cctx->lock, flags);
491
492 kfree(ctx->maps);
493 kfree(ctx->olaps);
494 kfree(ctx);
495
496 fastrpc_channel_ctx_put(cctx);
497 }
498
fastrpc_context_get(struct fastrpc_invoke_ctx * ctx)499 static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
500 {
501 kref_get(&ctx->refcount);
502 }
503
fastrpc_context_put(struct fastrpc_invoke_ctx * ctx)504 static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
505 {
506 kref_put(&ctx->refcount, fastrpc_context_free);
507 }
508
fastrpc_context_put_wq(struct work_struct * work)509 static void fastrpc_context_put_wq(struct work_struct *work)
510 {
511 struct fastrpc_invoke_ctx *ctx =
512 container_of(work, struct fastrpc_invoke_ctx, put_work);
513
514 fastrpc_context_put(ctx);
515 }
516
517 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
olaps_cmp(const void * a,const void * b)518 static int olaps_cmp(const void *a, const void *b)
519 {
520 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
521 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
522 /* sort with lowest starting buffer first */
523 int st = CMP(pa->start, pb->start);
524 /* sort with highest ending buffer first */
525 int ed = CMP(pb->end, pa->end);
526
527 return st == 0 ? ed : st;
528 }
529
fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx * ctx)530 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
531 {
532 u64 max_end = 0;
533 int i;
534
535 for (i = 0; i < ctx->nbufs; ++i) {
536 ctx->olaps[i].start = ctx->args[i].ptr;
537 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
538 ctx->olaps[i].raix = i;
539 }
540
541 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
542
543 for (i = 0; i < ctx->nbufs; ++i) {
544 /* Falling inside previous range */
545 if (ctx->olaps[i].start < max_end) {
546 ctx->olaps[i].mstart = max_end;
547 ctx->olaps[i].mend = ctx->olaps[i].end;
548 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
549
550 if (ctx->olaps[i].end > max_end) {
551 max_end = ctx->olaps[i].end;
552 } else {
553 ctx->olaps[i].mend = 0;
554 ctx->olaps[i].mstart = 0;
555 }
556
557 } else {
558 ctx->olaps[i].mend = ctx->olaps[i].end;
559 ctx->olaps[i].mstart = ctx->olaps[i].start;
560 ctx->olaps[i].offset = 0;
561 max_end = ctx->olaps[i].end;
562 }
563 }
564 }
565
fastrpc_context_alloc(struct fastrpc_user * user,u32 kernel,u32 sc,struct fastrpc_invoke_args * args)566 static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
567 struct fastrpc_user *user, u32 kernel, u32 sc,
568 struct fastrpc_invoke_args *args)
569 {
570 struct fastrpc_channel_ctx *cctx = user->cctx;
571 struct fastrpc_invoke_ctx *ctx = NULL;
572 unsigned long flags;
573 int ret;
574
575 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
576 if (!ctx)
577 return ERR_PTR(-ENOMEM);
578
579 INIT_LIST_HEAD(&ctx->node);
580 ctx->fl = user;
581 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
582 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
583 REMOTE_SCALARS_OUTBUFS(sc);
584
585 if (ctx->nscalars) {
586 ctx->maps = kcalloc(ctx->nscalars,
587 sizeof(*ctx->maps), GFP_KERNEL);
588 if (!ctx->maps) {
589 kfree(ctx);
590 return ERR_PTR(-ENOMEM);
591 }
592 ctx->olaps = kcalloc(ctx->nscalars,
593 sizeof(*ctx->olaps), GFP_KERNEL);
594 if (!ctx->olaps) {
595 kfree(ctx->maps);
596 kfree(ctx);
597 return ERR_PTR(-ENOMEM);
598 }
599 ctx->args = args;
600 fastrpc_get_buff_overlaps(ctx);
601 }
602
603 /* Released in fastrpc_context_put() */
604 fastrpc_channel_ctx_get(cctx);
605
606 ctx->sc = sc;
607 ctx->retval = -1;
608 ctx->pid = current->pid;
609 ctx->client_id = user->client_id;
610 ctx->cctx = cctx;
611 init_completion(&ctx->work);
612 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
613
614 spin_lock(&user->lock);
615 list_add_tail(&ctx->node, &user->pending);
616 spin_unlock(&user->lock);
617
618 spin_lock_irqsave(&cctx->lock, flags);
619 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
620 FASTRPC_CTX_MAX, GFP_ATOMIC);
621 if (ret < 0) {
622 spin_unlock_irqrestore(&cctx->lock, flags);
623 goto err_idr;
624 }
625 ctx->ctxid = ret << 4;
626 spin_unlock_irqrestore(&cctx->lock, flags);
627
628 kref_init(&ctx->refcount);
629
630 return ctx;
631 err_idr:
632 spin_lock(&user->lock);
633 list_del(&ctx->node);
634 spin_unlock(&user->lock);
635 fastrpc_channel_ctx_put(cctx);
636 kfree(ctx->maps);
637 kfree(ctx->olaps);
638 kfree(ctx);
639
640 return ERR_PTR(ret);
641 }
642
643 static struct sg_table *
fastrpc_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)644 fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
645 enum dma_data_direction dir)
646 {
647 struct fastrpc_dma_buf_attachment *a = attachment->priv;
648 struct sg_table *table;
649 int ret;
650
651 table = &a->sgt;
652
653 ret = dma_map_sgtable(attachment->dev, table, dir, 0);
654 if (ret)
655 table = ERR_PTR(ret);
656 return table;
657 }
658
fastrpc_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * table,enum dma_data_direction dir)659 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
660 struct sg_table *table,
661 enum dma_data_direction dir)
662 {
663 dma_unmap_sgtable(attach->dev, table, dir, 0);
664 }
665
fastrpc_release(struct dma_buf * dmabuf)666 static void fastrpc_release(struct dma_buf *dmabuf)
667 {
668 struct fastrpc_buf *buffer = dmabuf->priv;
669
670 fastrpc_buf_free(buffer);
671 }
672
fastrpc_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)673 static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
674 struct dma_buf_attachment *attachment)
675 {
676 struct fastrpc_dma_buf_attachment *a;
677 struct fastrpc_buf *buffer = dmabuf->priv;
678 int ret;
679
680 a = kzalloc(sizeof(*a), GFP_KERNEL);
681 if (!a)
682 return -ENOMEM;
683
684 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
685 FASTRPC_PHYS(buffer->phys), buffer->size);
686 if (ret < 0) {
687 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
688 kfree(a);
689 return -EINVAL;
690 }
691
692 a->dev = attachment->dev;
693 INIT_LIST_HEAD(&a->node);
694 attachment->priv = a;
695
696 mutex_lock(&buffer->lock);
697 list_add(&a->node, &buffer->attachments);
698 mutex_unlock(&buffer->lock);
699
700 return 0;
701 }
702
fastrpc_dma_buf_detatch(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)703 static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
704 struct dma_buf_attachment *attachment)
705 {
706 struct fastrpc_dma_buf_attachment *a = attachment->priv;
707 struct fastrpc_buf *buffer = dmabuf->priv;
708
709 mutex_lock(&buffer->lock);
710 list_del(&a->node);
711 mutex_unlock(&buffer->lock);
712 sg_free_table(&a->sgt);
713 kfree(a);
714 }
715
fastrpc_vmap(struct dma_buf * dmabuf,struct iosys_map * map)716 static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
717 {
718 struct fastrpc_buf *buf = dmabuf->priv;
719
720 iosys_map_set_vaddr(map, buf->virt);
721
722 return 0;
723 }
724
fastrpc_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)725 static int fastrpc_mmap(struct dma_buf *dmabuf,
726 struct vm_area_struct *vma)
727 {
728 struct fastrpc_buf *buf = dmabuf->priv;
729 size_t size = vma->vm_end - vma->vm_start;
730
731 dma_resv_assert_held(dmabuf->resv);
732
733 return dma_mmap_coherent(buf->dev, vma, buf->virt,
734 FASTRPC_PHYS(buf->phys), size);
735 }
736
737 static const struct dma_buf_ops fastrpc_dma_buf_ops = {
738 .attach = fastrpc_dma_buf_attach,
739 .detach = fastrpc_dma_buf_detatch,
740 .map_dma_buf = fastrpc_map_dma_buf,
741 .unmap_dma_buf = fastrpc_unmap_dma_buf,
742 .mmap = fastrpc_mmap,
743 .vmap = fastrpc_vmap,
744 .release = fastrpc_release,
745 };
746
fastrpc_map_attach(struct fastrpc_user * fl,int fd,u64 len,u32 attr,struct fastrpc_map ** ppmap)747 static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
748 u64 len, u32 attr, struct fastrpc_map **ppmap)
749 {
750 struct fastrpc_session_ctx *sess = fl->sctx;
751 struct fastrpc_map *map = NULL;
752 struct sg_table *table;
753 struct scatterlist *sgl = NULL;
754 int err = 0, sgl_index = 0;
755
756 map = kzalloc(sizeof(*map), GFP_KERNEL);
757 if (!map)
758 return -ENOMEM;
759
760 INIT_LIST_HEAD(&map->node);
761 kref_init(&map->refcount);
762
763 map->fl = fl;
764 map->fd = fd;
765 map->buf = dma_buf_get(fd);
766 if (IS_ERR(map->buf)) {
767 err = PTR_ERR(map->buf);
768 goto get_err;
769 }
770
771 map->attach = dma_buf_attach(map->buf, sess->dev);
772 if (IS_ERR(map->attach)) {
773 dev_err(sess->dev, "Failed to attach dmabuf\n");
774 err = PTR_ERR(map->attach);
775 goto attach_err;
776 }
777
778 table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
779 if (IS_ERR(table)) {
780 err = PTR_ERR(table);
781 goto map_err;
782 }
783 map->table = table;
784
785 if (attr & FASTRPC_ATTR_SECUREMAP) {
786 map->phys = sg_phys(map->table->sgl);
787 } else {
788 map->phys = sg_dma_address(map->table->sgl);
789 map->phys += ((u64)fl->sctx->sid << 32);
790 }
791 for_each_sg(map->table->sgl, sgl, map->table->nents,
792 sgl_index)
793 map->size += sg_dma_len(sgl);
794 if (len > map->size) {
795 dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n",
796 len, map->size);
797 err = -EINVAL;
798 goto map_err;
799 }
800 map->va = sg_virt(map->table->sgl);
801 map->len = len;
802
803 if (attr & FASTRPC_ATTR_SECUREMAP) {
804 /*
805 * If subsystem VMIDs are defined in DTSI, then do
806 * hyp_assign from HLOS to those VM(s)
807 */
808 u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
809 struct qcom_scm_vmperm dst_perms[2] = {0};
810
811 dst_perms[0].vmid = QCOM_SCM_VMID_HLOS;
812 dst_perms[0].perm = QCOM_SCM_PERM_RW;
813 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
814 dst_perms[1].perm = QCOM_SCM_PERM_RWX;
815 map->attr = attr;
816 err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2);
817 if (err) {
818 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
819 map->phys, map->len, err);
820 goto map_err;
821 }
822 }
823 spin_lock(&fl->lock);
824 list_add_tail(&map->node, &fl->maps);
825 spin_unlock(&fl->lock);
826 *ppmap = map;
827
828 return 0;
829
830 map_err:
831 dma_buf_detach(map->buf, map->attach);
832 attach_err:
833 dma_buf_put(map->buf);
834 get_err:
835 fastrpc_map_put(map);
836
837 return err;
838 }
839
fastrpc_map_create(struct fastrpc_user * fl,int fd,u64 len,u32 attr,struct fastrpc_map ** ppmap)840 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
841 u64 len, u32 attr, struct fastrpc_map **ppmap)
842 {
843 struct fastrpc_session_ctx *sess = fl->sctx;
844 int err = 0;
845
846 if (!fastrpc_map_lookup(fl, fd, ppmap)) {
847 if (!fastrpc_map_get(*ppmap))
848 return 0;
849 dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n",
850 __func__, fd);
851 }
852
853 err = fastrpc_map_attach(fl, fd, len, attr, ppmap);
854
855 return err;
856 }
857
858 /*
859 * Fastrpc payload buffer with metadata looks like:
860 *
861 * >>>>>> START of METADATA <<<<<<<<<
862 * +---------------------------------+
863 * | Arguments |
864 * | type:(union fastrpc_remote_arg)|
865 * | (0 - N) |
866 * +---------------------------------+
867 * | Invoke Buffer list |
868 * | type:(struct fastrpc_invoke_buf)|
869 * | (0 - N) |
870 * +---------------------------------+
871 * | Page info list |
872 * | type:(struct fastrpc_phy_page) |
873 * | (0 - N) |
874 * +---------------------------------+
875 * | Optional info |
876 * |(can be specific to SoC/Firmware)|
877 * +---------------------------------+
878 * >>>>>>>> END of METADATA <<<<<<<<<
879 * +---------------------------------+
880 * | Inline ARGS |
881 * | (0-N) |
882 * +---------------------------------+
883 */
884
fastrpc_get_meta_size(struct fastrpc_invoke_ctx * ctx)885 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
886 {
887 int size = 0;
888
889 size = (sizeof(struct fastrpc_remote_buf) +
890 sizeof(struct fastrpc_invoke_buf) +
891 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
892 sizeof(u64) * FASTRPC_MAX_FDLIST +
893 sizeof(u32) * FASTRPC_MAX_CRCLIST;
894
895 return size;
896 }
897
fastrpc_get_payload_size(struct fastrpc_invoke_ctx * ctx,int metalen)898 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
899 {
900 u64 size = 0;
901 int oix;
902
903 size = ALIGN(metalen, FASTRPC_ALIGN);
904 for (oix = 0; oix < ctx->nbufs; oix++) {
905 int i = ctx->olaps[oix].raix;
906
907 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
908
909 if (ctx->olaps[oix].offset == 0)
910 size = ALIGN(size, FASTRPC_ALIGN);
911
912 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
913 }
914 }
915
916 return size;
917 }
918
fastrpc_create_maps(struct fastrpc_invoke_ctx * ctx)919 static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
920 {
921 struct device *dev = ctx->fl->sctx->dev;
922 int i, err;
923
924 for (i = 0; i < ctx->nscalars; ++i) {
925
926 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
927 ctx->args[i].length == 0)
928 continue;
929
930 if (i < ctx->nbufs)
931 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
932 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
933 else
934 err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd,
935 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
936 if (err) {
937 dev_err(dev, "Error Creating map %d\n", err);
938 return -EINVAL;
939 }
940
941 }
942 return 0;
943 }
944
fastrpc_invoke_buf_start(union fastrpc_remote_arg * pra,int len)945 static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
946 {
947 return (struct fastrpc_invoke_buf *)(&pra[len]);
948 }
949
fastrpc_phy_page_start(struct fastrpc_invoke_buf * buf,int len)950 static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
951 {
952 return (struct fastrpc_phy_page *)(&buf[len]);
953 }
954
fastrpc_get_args(u32 kernel,struct fastrpc_invoke_ctx * ctx)955 static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
956 {
957 struct device *dev = ctx->fl->sctx->dev;
958 union fastrpc_remote_arg *rpra;
959 struct fastrpc_invoke_buf *list;
960 struct fastrpc_phy_page *pages;
961 int inbufs, i, oix, err = 0;
962 u64 len, rlen, pkt_size;
963 u64 pg_start, pg_end;
964 uintptr_t args;
965 int metalen;
966
967 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
968 metalen = fastrpc_get_meta_size(ctx);
969 pkt_size = fastrpc_get_payload_size(ctx, metalen);
970
971 err = fastrpc_create_maps(ctx);
972 if (err)
973 return err;
974
975 ctx->msg_sz = pkt_size;
976
977 if (ctx->fl->sctx->sid)
978 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
979 else
980 err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
981 if (err)
982 return err;
983
984 memset(ctx->buf->virt, 0, pkt_size);
985 rpra = ctx->buf->virt;
986 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
987 pages = fastrpc_phy_page_start(list, ctx->nscalars);
988 args = (uintptr_t)ctx->buf->virt + metalen;
989 rlen = pkt_size - metalen;
990 ctx->rpra = rpra;
991
992 for (oix = 0; oix < ctx->nbufs; ++oix) {
993 int mlen;
994
995 i = ctx->olaps[oix].raix;
996 len = ctx->args[i].length;
997
998 rpra[i].buf.pv = 0;
999 rpra[i].buf.len = len;
1000 list[i].num = len ? 1 : 0;
1001 list[i].pgidx = i;
1002
1003 if (!len)
1004 continue;
1005
1006 if (ctx->maps[i]) {
1007 struct vm_area_struct *vma = NULL;
1008
1009 rpra[i].buf.pv = (u64) ctx->args[i].ptr;
1010 pages[i].addr = ctx->maps[i]->phys;
1011
1012 mmap_read_lock(current->mm);
1013 vma = find_vma(current->mm, ctx->args[i].ptr);
1014 if (vma)
1015 pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) -
1016 vma->vm_start;
1017 mmap_read_unlock(current->mm);
1018
1019 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
1020 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
1021 PAGE_SHIFT;
1022 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
1023
1024 } else {
1025
1026 if (ctx->olaps[oix].offset == 0) {
1027 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
1028 args = ALIGN(args, FASTRPC_ALIGN);
1029 }
1030
1031 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
1032
1033 if (rlen < mlen)
1034 goto bail;
1035
1036 rpra[i].buf.pv = args - ctx->olaps[oix].offset;
1037 pages[i].addr = ctx->buf->phys -
1038 ctx->olaps[oix].offset +
1039 (pkt_size - rlen);
1040 pages[i].addr = pages[i].addr & PAGE_MASK;
1041
1042 pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT;
1043 pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
1044 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
1045 args = args + mlen;
1046 rlen -= mlen;
1047 }
1048
1049 if (i < inbufs && !ctx->maps[i]) {
1050 void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
1051 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
1052
1053 if (!kernel) {
1054 if (copy_from_user(dst, (void __user *)src,
1055 len)) {
1056 err = -EFAULT;
1057 goto bail;
1058 }
1059 } else {
1060 memcpy(dst, src, len);
1061 }
1062 }
1063 }
1064
1065 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
1066 list[i].num = ctx->args[i].length ? 1 : 0;
1067 list[i].pgidx = i;
1068 if (ctx->maps[i]) {
1069 pages[i].addr = ctx->maps[i]->phys;
1070 pages[i].size = ctx->maps[i]->size;
1071 }
1072 rpra[i].dma.fd = ctx->args[i].fd;
1073 rpra[i].dma.len = ctx->args[i].length;
1074 rpra[i].dma.offset = (u64) ctx->args[i].ptr;
1075 }
1076
1077 bail:
1078 if (err)
1079 dev_err(dev, "Error: get invoke args failed:%d\n", err);
1080
1081 return err;
1082 }
1083
fastrpc_put_args(struct fastrpc_invoke_ctx * ctx,u32 kernel)1084 static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
1085 u32 kernel)
1086 {
1087 union fastrpc_remote_arg *rpra = ctx->rpra;
1088 struct fastrpc_user *fl = ctx->fl;
1089 struct fastrpc_map *mmap = NULL;
1090 struct fastrpc_invoke_buf *list;
1091 struct fastrpc_phy_page *pages;
1092 u64 *fdlist;
1093 int i, inbufs, outbufs, handles;
1094 int ret = 0;
1095
1096 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1097 outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1098 handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
1099 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
1100 pages = fastrpc_phy_page_start(list, ctx->nscalars);
1101 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1102
1103 for (i = inbufs; i < ctx->nbufs; ++i) {
1104 if (!ctx->maps[i]) {
1105 void *src = (void *)(uintptr_t)rpra[i].buf.pv;
1106 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
1107 u64 len = rpra[i].buf.len;
1108
1109 if (!kernel) {
1110 if (copy_to_user((void __user *)dst, src, len)) {
1111 ret = -EFAULT;
1112 goto cleanup_fdlist;
1113 }
1114 } else {
1115 memcpy(dst, src, len);
1116 }
1117 }
1118 }
1119
1120 cleanup_fdlist:
1121 /* Clean up fdlist which is updated by DSP */
1122 for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
1123 if (!fdlist[i])
1124 break;
1125 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
1126 fastrpc_map_put(mmap);
1127 }
1128
1129 return ret;
1130 }
1131
fastrpc_invoke_send(struct fastrpc_session_ctx * sctx,struct fastrpc_invoke_ctx * ctx,u32 kernel,uint32_t handle)1132 static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
1133 struct fastrpc_invoke_ctx *ctx,
1134 u32 kernel, uint32_t handle)
1135 {
1136 struct fastrpc_channel_ctx *cctx;
1137 struct fastrpc_user *fl = ctx->fl;
1138 struct fastrpc_msg *msg = &ctx->msg;
1139 int ret;
1140
1141 cctx = fl->cctx;
1142 msg->client_id = fl->client_id;
1143 msg->tid = current->pid;
1144
1145 if (kernel)
1146 msg->client_id = 0;
1147
1148 msg->ctx = ctx->ctxid | fl->pd;
1149 msg->handle = handle;
1150 msg->sc = ctx->sc;
1151 msg->addr = ctx->buf ? ctx->buf->phys : 0;
1152 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
1153 fastrpc_context_get(ctx);
1154
1155 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
1156
1157 if (ret)
1158 fastrpc_context_put(ctx);
1159
1160 return ret;
1161
1162 }
1163
fastrpc_internal_invoke(struct fastrpc_user * fl,u32 kernel,u32 handle,u32 sc,struct fastrpc_invoke_args * args)1164 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
1165 u32 handle, u32 sc,
1166 struct fastrpc_invoke_args *args)
1167 {
1168 struct fastrpc_invoke_ctx *ctx = NULL;
1169 struct fastrpc_buf *buf, *b;
1170
1171 int err = 0;
1172
1173 if (!fl->sctx)
1174 return -EINVAL;
1175
1176 if (!fl->cctx->rpdev)
1177 return -EPIPE;
1178
1179 if (handle == FASTRPC_INIT_HANDLE && !kernel) {
1180 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
1181 return -EPERM;
1182 }
1183
1184 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1185 if (IS_ERR(ctx))
1186 return PTR_ERR(ctx);
1187
1188 err = fastrpc_get_args(kernel, ctx);
1189 if (err)
1190 goto bail;
1191
1192 /* make sure that all CPU memory writes are seen by DSP */
1193 dma_wmb();
1194 /* Send invoke buffer to remote dsp */
1195 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1196 if (err)
1197 goto bail;
1198
1199 if (kernel) {
1200 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
1201 err = -ETIMEDOUT;
1202 } else {
1203 err = wait_for_completion_interruptible(&ctx->work);
1204 }
1205
1206 if (err)
1207 goto bail;
1208
1209 /* make sure that all memory writes by DSP are seen by CPU */
1210 dma_rmb();
1211 /* populate all the output buffers with results */
1212 err = fastrpc_put_args(ctx, kernel);
1213 if (err)
1214 goto bail;
1215
1216 /* Check the response from remote dsp */
1217 err = ctx->retval;
1218 if (err)
1219 goto bail;
1220
1221 bail:
1222 if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
1223 /* We are done with this compute context */
1224 spin_lock(&fl->lock);
1225 list_del(&ctx->node);
1226 spin_unlock(&fl->lock);
1227 fastrpc_context_put(ctx);
1228 }
1229
1230 if (err == -ERESTARTSYS) {
1231 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1232 list_del(&buf->node);
1233 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
1234 }
1235 }
1236
1237 if (err)
1238 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1239
1240 return err;
1241 }
1242
is_session_rejected(struct fastrpc_user * fl,bool unsigned_pd_request)1243 static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1244 {
1245 /* Check if the device node is non-secure and channel is secure*/
1246 if (!fl->is_secure_dev && fl->cctx->secure) {
1247 /*
1248 * Allow untrusted applications to offload only to Unsigned PD when
1249 * channel is configured as secure and block untrusted apps on channel
1250 * that does not support unsigned PD offload
1251 */
1252 if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1253 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n");
1254 return true;
1255 }
1256 }
1257
1258 return false;
1259 }
1260
fastrpc_init_create_static_process(struct fastrpc_user * fl,char __user * argp)1261 static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
1262 char __user *argp)
1263 {
1264 struct fastrpc_init_create_static init;
1265 struct fastrpc_invoke_args *args;
1266 struct fastrpc_phy_page pages[1];
1267 char *name;
1268 int err;
1269 bool scm_done = false;
1270 struct {
1271 int client_id;
1272 u32 namelen;
1273 u32 pageslen;
1274 } inbuf;
1275 u32 sc;
1276
1277 args = kcalloc(FASTRPC_CREATE_STATIC_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1278 if (!args)
1279 return -ENOMEM;
1280
1281 if (copy_from_user(&init, argp, sizeof(init))) {
1282 err = -EFAULT;
1283 goto err;
1284 }
1285
1286 if (init.namelen > INIT_FILE_NAMELEN_MAX) {
1287 err = -EINVAL;
1288 goto err;
1289 }
1290
1291 name = memdup_user(u64_to_user_ptr(init.name), init.namelen);
1292 if (IS_ERR(name)) {
1293 err = PTR_ERR(name);
1294 goto err;
1295 }
1296
1297 if (!fl->cctx->remote_heap) {
1298 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
1299 &fl->cctx->remote_heap);
1300 if (err)
1301 goto err_name;
1302
1303 /* Map if we have any heap VMIDs associated with this ADSP Static Process. */
1304 if (fl->cctx->vmcount) {
1305 u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
1306
1307 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1308 (u64)fl->cctx->remote_heap->size,
1309 &src_perms,
1310 fl->cctx->vmperms, fl->cctx->vmcount);
1311 if (err) {
1312 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
1313 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1314 goto err_map;
1315 }
1316 scm_done = true;
1317 }
1318 }
1319
1320 inbuf.client_id = fl->client_id;
1321 inbuf.namelen = init.namelen;
1322 inbuf.pageslen = 0;
1323 fl->pd = USER_PD;
1324
1325 args[0].ptr = (u64)(uintptr_t)&inbuf;
1326 args[0].length = sizeof(inbuf);
1327 args[0].fd = -1;
1328
1329 args[1].ptr = (u64)(uintptr_t)name;
1330 args[1].length = inbuf.namelen;
1331 args[1].fd = -1;
1332
1333 pages[0].addr = fl->cctx->remote_heap->phys;
1334 pages[0].size = fl->cctx->remote_heap->size;
1335
1336 args[2].ptr = (u64)(uintptr_t) pages;
1337 args[2].length = sizeof(*pages);
1338 args[2].fd = -1;
1339
1340 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0);
1341
1342 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1343 sc, args);
1344 if (err)
1345 goto err_invoke;
1346
1347 kfree(args);
1348 kfree(name);
1349
1350 return 0;
1351 err_invoke:
1352 if (fl->cctx->vmcount && scm_done) {
1353 u64 src_perms = 0;
1354 struct qcom_scm_vmperm dst_perms;
1355 u32 i;
1356
1357 for (i = 0; i < fl->cctx->vmcount; i++)
1358 src_perms |= BIT(fl->cctx->vmperms[i].vmid);
1359
1360 dst_perms.vmid = QCOM_SCM_VMID_HLOS;
1361 dst_perms.perm = QCOM_SCM_PERM_RWX;
1362 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1363 (u64)fl->cctx->remote_heap->size,
1364 &src_perms, &dst_perms, 1);
1365 if (err)
1366 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
1367 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1368 }
1369 err_map:
1370 fastrpc_buf_free(fl->cctx->remote_heap);
1371 err_name:
1372 kfree(name);
1373 err:
1374 kfree(args);
1375
1376 return err;
1377 }
1378
fastrpc_init_create_process(struct fastrpc_user * fl,char __user * argp)1379 static int fastrpc_init_create_process(struct fastrpc_user *fl,
1380 char __user *argp)
1381 {
1382 struct fastrpc_init_create init;
1383 struct fastrpc_invoke_args *args;
1384 struct fastrpc_phy_page pages[1];
1385 struct fastrpc_map *map = NULL;
1386 struct fastrpc_buf *imem = NULL;
1387 int memlen;
1388 int err;
1389 struct {
1390 int client_id;
1391 u32 namelen;
1392 u32 filelen;
1393 u32 pageslen;
1394 u32 attrs;
1395 u32 siglen;
1396 } inbuf;
1397 u32 sc;
1398 bool unsigned_module = false;
1399
1400 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1401 if (!args)
1402 return -ENOMEM;
1403
1404 if (copy_from_user(&init, argp, sizeof(init))) {
1405 err = -EFAULT;
1406 goto err;
1407 }
1408
1409 if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
1410 unsigned_module = true;
1411
1412 if (is_session_rejected(fl, unsigned_module)) {
1413 err = -ECONNREFUSED;
1414 goto err;
1415 }
1416
1417 if (init.filelen > INIT_FILELEN_MAX) {
1418 err = -EINVAL;
1419 goto err;
1420 }
1421
1422 inbuf.client_id = fl->client_id;
1423 inbuf.namelen = strlen(current->comm) + 1;
1424 inbuf.filelen = init.filelen;
1425 inbuf.pageslen = 1;
1426 inbuf.attrs = init.attrs;
1427 inbuf.siglen = init.siglen;
1428 fl->pd = USER_PD;
1429
1430 if (init.filelen && init.filefd) {
1431 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
1432 if (err)
1433 goto err;
1434 }
1435
1436 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1437 1024 * 1024);
1438 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1439 &imem);
1440 if (err)
1441 goto err_alloc;
1442
1443 fl->init_mem = imem;
1444 args[0].ptr = (u64)(uintptr_t)&inbuf;
1445 args[0].length = sizeof(inbuf);
1446 args[0].fd = -1;
1447
1448 args[1].ptr = (u64)(uintptr_t)current->comm;
1449 args[1].length = inbuf.namelen;
1450 args[1].fd = -1;
1451
1452 args[2].ptr = (u64) init.file;
1453 args[2].length = inbuf.filelen;
1454 args[2].fd = init.filefd;
1455
1456 pages[0].addr = imem->phys;
1457 pages[0].size = imem->size;
1458
1459 args[3].ptr = (u64)(uintptr_t) pages;
1460 args[3].length = 1 * sizeof(*pages);
1461 args[3].fd = -1;
1462
1463 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1464 args[4].length = sizeof(inbuf.attrs);
1465 args[4].fd = -1;
1466
1467 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1468 args[5].length = sizeof(inbuf.siglen);
1469 args[5].fd = -1;
1470
1471 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1472 if (init.attrs)
1473 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
1474
1475 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1476 sc, args);
1477 if (err)
1478 goto err_invoke;
1479
1480 kfree(args);
1481
1482 return 0;
1483
1484 err_invoke:
1485 fl->init_mem = NULL;
1486 fastrpc_buf_free(imem);
1487 err_alloc:
1488 fastrpc_map_put(map);
1489 err:
1490 kfree(args);
1491
1492 return err;
1493 }
1494
fastrpc_session_alloc(struct fastrpc_user * fl)1495 static struct fastrpc_session_ctx *fastrpc_session_alloc(
1496 struct fastrpc_user *fl)
1497 {
1498 struct fastrpc_channel_ctx *cctx = fl->cctx;
1499 struct fastrpc_session_ctx *session = NULL;
1500 unsigned long flags;
1501 int i;
1502
1503 spin_lock_irqsave(&cctx->lock, flags);
1504 for (i = 0; i < cctx->sesscount; i++) {
1505 if (!cctx->session[i].used && cctx->session[i].valid) {
1506 cctx->session[i].used = true;
1507 session = &cctx->session[i];
1508 /* any non-zero ID will work, session_idx + 1 is the simplest one */
1509 fl->client_id = i + 1;
1510 break;
1511 }
1512 }
1513 spin_unlock_irqrestore(&cctx->lock, flags);
1514
1515 return session;
1516 }
1517
fastrpc_session_free(struct fastrpc_channel_ctx * cctx,struct fastrpc_session_ctx * session)1518 static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1519 struct fastrpc_session_ctx *session)
1520 {
1521 unsigned long flags;
1522
1523 spin_lock_irqsave(&cctx->lock, flags);
1524 session->used = false;
1525 spin_unlock_irqrestore(&cctx->lock, flags);
1526 }
1527
fastrpc_release_current_dsp_process(struct fastrpc_user * fl)1528 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1529 {
1530 struct fastrpc_invoke_args args[1];
1531 int client_id = 0;
1532 u32 sc;
1533
1534 client_id = fl->client_id;
1535 args[0].ptr = (u64)(uintptr_t) &client_id;
1536 args[0].length = sizeof(client_id);
1537 args[0].fd = -1;
1538 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1539
1540 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1541 sc, &args[0]);
1542 }
1543
fastrpc_device_release(struct inode * inode,struct file * file)1544 static int fastrpc_device_release(struct inode *inode, struct file *file)
1545 {
1546 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1547 struct fastrpc_channel_ctx *cctx = fl->cctx;
1548 struct fastrpc_invoke_ctx *ctx, *n;
1549 struct fastrpc_map *map, *m;
1550 struct fastrpc_buf *buf, *b;
1551 unsigned long flags;
1552
1553 fastrpc_release_current_dsp_process(fl);
1554
1555 spin_lock_irqsave(&cctx->lock, flags);
1556 list_del(&fl->user);
1557 spin_unlock_irqrestore(&cctx->lock, flags);
1558
1559 if (fl->init_mem)
1560 fastrpc_buf_free(fl->init_mem);
1561
1562 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1563 list_del(&ctx->node);
1564 fastrpc_context_put(ctx);
1565 }
1566
1567 list_for_each_entry_safe(map, m, &fl->maps, node)
1568 fastrpc_map_put(map);
1569
1570 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1571 list_del(&buf->node);
1572 fastrpc_buf_free(buf);
1573 }
1574
1575 fastrpc_session_free(cctx, fl->sctx);
1576 fastrpc_channel_ctx_put(cctx);
1577
1578 mutex_destroy(&fl->mutex);
1579 kfree(fl);
1580 file->private_data = NULL;
1581
1582 return 0;
1583 }
1584
fastrpc_device_open(struct inode * inode,struct file * filp)1585 static int fastrpc_device_open(struct inode *inode, struct file *filp)
1586 {
1587 struct fastrpc_channel_ctx *cctx;
1588 struct fastrpc_device *fdevice;
1589 struct fastrpc_user *fl = NULL;
1590 unsigned long flags;
1591
1592 fdevice = miscdev_to_fdevice(filp->private_data);
1593 cctx = fdevice->cctx;
1594
1595 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1596 if (!fl)
1597 return -ENOMEM;
1598
1599 /* Released in fastrpc_device_release() */
1600 fastrpc_channel_ctx_get(cctx);
1601
1602 filp->private_data = fl;
1603 spin_lock_init(&fl->lock);
1604 mutex_init(&fl->mutex);
1605 INIT_LIST_HEAD(&fl->pending);
1606 INIT_LIST_HEAD(&fl->maps);
1607 INIT_LIST_HEAD(&fl->mmaps);
1608 INIT_LIST_HEAD(&fl->user);
1609 fl->cctx = cctx;
1610 fl->is_secure_dev = fdevice->secure;
1611
1612 fl->sctx = fastrpc_session_alloc(fl);
1613 if (!fl->sctx) {
1614 dev_err(&cctx->rpdev->dev, "No session available\n");
1615 mutex_destroy(&fl->mutex);
1616 kfree(fl);
1617
1618 return -EBUSY;
1619 }
1620
1621 spin_lock_irqsave(&cctx->lock, flags);
1622 list_add_tail(&fl->user, &cctx->users);
1623 spin_unlock_irqrestore(&cctx->lock, flags);
1624
1625 return 0;
1626 }
1627
fastrpc_dmabuf_alloc(struct fastrpc_user * fl,char __user * argp)1628 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1629 {
1630 struct fastrpc_alloc_dma_buf bp;
1631 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1632 struct fastrpc_buf *buf = NULL;
1633 int err;
1634
1635 if (copy_from_user(&bp, argp, sizeof(bp)))
1636 return -EFAULT;
1637
1638 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1639 if (err)
1640 return err;
1641 exp_info.ops = &fastrpc_dma_buf_ops;
1642 exp_info.size = bp.size;
1643 exp_info.flags = O_RDWR;
1644 exp_info.priv = buf;
1645 buf->dmabuf = dma_buf_export(&exp_info);
1646 if (IS_ERR(buf->dmabuf)) {
1647 err = PTR_ERR(buf->dmabuf);
1648 fastrpc_buf_free(buf);
1649 return err;
1650 }
1651
1652 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1653 if (bp.fd < 0) {
1654 dma_buf_put(buf->dmabuf);
1655 return -EINVAL;
1656 }
1657
1658 if (copy_to_user(argp, &bp, sizeof(bp))) {
1659 /*
1660 * The usercopy failed, but we can't do much about it, as
1661 * dma_buf_fd() already called fd_install() and made the
1662 * file descriptor accessible for the current process. It
1663 * might already be closed and dmabuf no longer valid when
1664 * we reach this point. Therefore "leak" the fd and rely on
1665 * the process exit path to do any required cleanup.
1666 */
1667 return -EFAULT;
1668 }
1669
1670 return 0;
1671 }
1672
fastrpc_init_attach(struct fastrpc_user * fl,int pd)1673 static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1674 {
1675 struct fastrpc_invoke_args args[1];
1676 int client_id = fl->client_id;
1677 u32 sc;
1678
1679 args[0].ptr = (u64)(uintptr_t) &client_id;
1680 args[0].length = sizeof(client_id);
1681 args[0].fd = -1;
1682 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1683 fl->pd = pd;
1684
1685 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1686 sc, &args[0]);
1687 }
1688
fastrpc_invoke(struct fastrpc_user * fl,char __user * argp)1689 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1690 {
1691 struct fastrpc_invoke_args *args = NULL;
1692 struct fastrpc_invoke inv;
1693 u32 nscalars;
1694 int err;
1695
1696 if (copy_from_user(&inv, argp, sizeof(inv)))
1697 return -EFAULT;
1698
1699 /* nscalars is truncated here to max supported value */
1700 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1701 if (nscalars) {
1702 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1703 if (!args)
1704 return -ENOMEM;
1705
1706 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1707 nscalars * sizeof(*args))) {
1708 kfree(args);
1709 return -EFAULT;
1710 }
1711 }
1712
1713 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1714 kfree(args);
1715
1716 return err;
1717 }
1718
fastrpc_get_info_from_dsp(struct fastrpc_user * fl,uint32_t * dsp_attr_buf,uint32_t dsp_attr_buf_len)1719 static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1720 uint32_t dsp_attr_buf_len)
1721 {
1722 struct fastrpc_invoke_args args[2] = { 0 };
1723
1724 /*
1725 * Capability filled in userspace. This carries the information
1726 * about the remoteproc support which is fetched from the remoteproc
1727 * sysfs node by userspace.
1728 */
1729 dsp_attr_buf[0] = 0;
1730 dsp_attr_buf_len -= 1;
1731
1732 args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
1733 args[0].length = sizeof(dsp_attr_buf_len);
1734 args[0].fd = -1;
1735 args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
1736 args[1].length = dsp_attr_buf_len * sizeof(u32);
1737 args[1].fd = -1;
1738
1739 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1740 FASTRPC_SCALARS(0, 1, 1), args);
1741 }
1742
fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability * cap,struct fastrpc_user * fl)1743 static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
1744 struct fastrpc_user *fl)
1745 {
1746 struct fastrpc_channel_ctx *cctx = fl->cctx;
1747 uint32_t attribute_id = cap->attribute_id;
1748 uint32_t *dsp_attributes;
1749 unsigned long flags;
1750 int err;
1751
1752 spin_lock_irqsave(&cctx->lock, flags);
1753 /* check if we already have queried dsp for attributes */
1754 if (cctx->valid_attributes) {
1755 spin_unlock_irqrestore(&cctx->lock, flags);
1756 goto done;
1757 }
1758 spin_unlock_irqrestore(&cctx->lock, flags);
1759
1760 dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
1761 if (!dsp_attributes)
1762 return -ENOMEM;
1763
1764 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
1765 if (err == DSP_UNSUPPORTED_API) {
1766 dev_info(&cctx->rpdev->dev,
1767 "Warning: DSP capabilities not supported\n");
1768 kfree(dsp_attributes);
1769 return -EOPNOTSUPP;
1770 } else if (err) {
1771 dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
1772 kfree(dsp_attributes);
1773 return err;
1774 }
1775
1776 spin_lock_irqsave(&cctx->lock, flags);
1777 memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1778 cctx->valid_attributes = true;
1779 spin_unlock_irqrestore(&cctx->lock, flags);
1780 kfree(dsp_attributes);
1781 done:
1782 cap->capability = cctx->dsp_attributes[attribute_id];
1783 return 0;
1784 }
1785
fastrpc_get_dsp_info(struct fastrpc_user * fl,char __user * argp)1786 static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1787 {
1788 struct fastrpc_ioctl_capability cap = {0};
1789 int err = 0;
1790
1791 if (copy_from_user(&cap, argp, sizeof(cap)))
1792 return -EFAULT;
1793
1794 cap.capability = 0;
1795
1796 if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
1797 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1798 cap.attribute_id, err);
1799 return -EOVERFLOW;
1800 }
1801
1802 err = fastrpc_get_info_from_kernel(&cap, fl);
1803 if (err)
1804 return err;
1805
1806 if (copy_to_user(argp, &cap, sizeof(cap)))
1807 return -EFAULT;
1808
1809 return 0;
1810 }
1811
fastrpc_req_munmap_impl(struct fastrpc_user * fl,struct fastrpc_buf * buf)1812 static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
1813 {
1814 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1815 struct fastrpc_munmap_req_msg req_msg;
1816 struct device *dev = fl->sctx->dev;
1817 int err;
1818 u32 sc;
1819
1820 req_msg.client_id = fl->client_id;
1821 req_msg.size = buf->size;
1822 req_msg.vaddr = buf->raddr;
1823
1824 args[0].ptr = (u64) (uintptr_t) &req_msg;
1825 args[0].length = sizeof(req_msg);
1826
1827 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
1828 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1829 &args[0]);
1830 if (!err) {
1831 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
1832 spin_lock(&fl->lock);
1833 list_del(&buf->node);
1834 spin_unlock(&fl->lock);
1835 fastrpc_buf_free(buf);
1836 } else {
1837 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
1838 }
1839
1840 return err;
1841 }
1842
fastrpc_req_munmap(struct fastrpc_user * fl,char __user * argp)1843 static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1844 {
1845 struct fastrpc_buf *buf = NULL, *iter, *b;
1846 struct fastrpc_req_munmap req;
1847 struct device *dev = fl->sctx->dev;
1848
1849 if (copy_from_user(&req, argp, sizeof(req)))
1850 return -EFAULT;
1851
1852 spin_lock(&fl->lock);
1853 list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1854 if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
1855 buf = iter;
1856 break;
1857 }
1858 }
1859 spin_unlock(&fl->lock);
1860
1861 if (!buf) {
1862 dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
1863 req.vaddrout, req.size);
1864 return -EINVAL;
1865 }
1866
1867 return fastrpc_req_munmap_impl(fl, buf);
1868 }
1869
fastrpc_req_mmap(struct fastrpc_user * fl,char __user * argp)1870 static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1871 {
1872 struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
1873 struct fastrpc_buf *buf = NULL;
1874 struct fastrpc_mmap_req_msg req_msg;
1875 struct fastrpc_mmap_rsp_msg rsp_msg;
1876 struct fastrpc_phy_page pages;
1877 struct fastrpc_req_mmap req;
1878 struct device *dev = fl->sctx->dev;
1879 int err;
1880 u32 sc;
1881
1882 if (copy_from_user(&req, argp, sizeof(req)))
1883 return -EFAULT;
1884
1885 if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) {
1886 dev_err(dev, "flag not supported 0x%x\n", req.flags);
1887
1888 return -EINVAL;
1889 }
1890
1891 if (req.vaddrin) {
1892 dev_err(dev, "adding user allocated pages is not supported\n");
1893 return -EINVAL;
1894 }
1895
1896 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1897 err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
1898 else
1899 err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
1900
1901 if (err) {
1902 dev_err(dev, "failed to allocate buffer\n");
1903 return err;
1904 }
1905
1906 req_msg.client_id = fl->client_id;
1907 req_msg.flags = req.flags;
1908 req_msg.vaddr = req.vaddrin;
1909 req_msg.num = sizeof(pages);
1910
1911 args[0].ptr = (u64) (uintptr_t) &req_msg;
1912 args[0].length = sizeof(req_msg);
1913
1914 pages.addr = buf->phys;
1915 pages.size = buf->size;
1916
1917 args[1].ptr = (u64) (uintptr_t) &pages;
1918 args[1].length = sizeof(pages);
1919
1920 args[2].ptr = (u64) (uintptr_t) &rsp_msg;
1921 args[2].length = sizeof(rsp_msg);
1922
1923 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
1924 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1925 &args[0]);
1926 if (err) {
1927 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
1928 fastrpc_buf_free(buf);
1929 return err;
1930 }
1931
1932 /* update the buffer to be able to deallocate the memory on the DSP */
1933 buf->raddr = (uintptr_t) rsp_msg.vaddr;
1934
1935 /* let the client know the address to use */
1936 req.vaddrout = rsp_msg.vaddr;
1937
1938 /* Add memory to static PD pool, protection thru hypervisor */
1939 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
1940 u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
1941
1942 err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
1943 &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
1944 if (err) {
1945 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1946 buf->phys, buf->size, err);
1947 goto err_assign;
1948 }
1949 }
1950
1951 spin_lock(&fl->lock);
1952 list_add_tail(&buf->node, &fl->mmaps);
1953 spin_unlock(&fl->lock);
1954
1955 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
1956 err = -EFAULT;
1957 goto err_assign;
1958 }
1959
1960 dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1961 buf->raddr, buf->size);
1962
1963 return 0;
1964
1965 err_assign:
1966 fastrpc_req_munmap_impl(fl, buf);
1967
1968 return err;
1969 }
1970
fastrpc_req_mem_unmap_impl(struct fastrpc_user * fl,struct fastrpc_mem_unmap * req)1971 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
1972 {
1973 struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
1974 struct fastrpc_map *map = NULL, *iter, *m;
1975 struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
1976 int err = 0;
1977 u32 sc;
1978 struct device *dev = fl->sctx->dev;
1979
1980 spin_lock(&fl->lock);
1981 list_for_each_entry_safe(iter, m, &fl->maps, node) {
1982 if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
1983 map = iter;
1984 break;
1985 }
1986 }
1987
1988 spin_unlock(&fl->lock);
1989
1990 if (!map) {
1991 dev_err(dev, "map not in list\n");
1992 return -EINVAL;
1993 }
1994
1995 req_msg.client_id = fl->client_id;
1996 req_msg.len = map->len;
1997 req_msg.vaddrin = map->raddr;
1998 req_msg.fd = map->fd;
1999
2000 args[0].ptr = (u64) (uintptr_t) &req_msg;
2001 args[0].length = sizeof(req_msg);
2002
2003 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
2004 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
2005 &args[0]);
2006 if (err) {
2007 dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
2008 return err;
2009 }
2010 fastrpc_map_put(map);
2011
2012 return 0;
2013 }
2014
fastrpc_req_mem_unmap(struct fastrpc_user * fl,char __user * argp)2015 static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
2016 {
2017 struct fastrpc_mem_unmap req;
2018
2019 if (copy_from_user(&req, argp, sizeof(req)))
2020 return -EFAULT;
2021
2022 return fastrpc_req_mem_unmap_impl(fl, &req);
2023 }
2024
fastrpc_req_mem_map(struct fastrpc_user * fl,char __user * argp)2025 static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
2026 {
2027 struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
2028 struct fastrpc_mem_map_req_msg req_msg = { 0 };
2029 struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
2030 struct fastrpc_mem_unmap req_unmap = { 0 };
2031 struct fastrpc_phy_page pages = { 0 };
2032 struct fastrpc_mem_map req;
2033 struct device *dev = fl->sctx->dev;
2034 struct fastrpc_map *map = NULL;
2035 int err;
2036 u32 sc;
2037
2038 if (copy_from_user(&req, argp, sizeof(req)))
2039 return -EFAULT;
2040
2041 /* create SMMU mapping */
2042 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
2043 if (err) {
2044 dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
2045 return err;
2046 }
2047
2048 req_msg.client_id = fl->client_id;
2049 req_msg.fd = req.fd;
2050 req_msg.offset = req.offset;
2051 req_msg.vaddrin = req.vaddrin;
2052 map->va = (void *) (uintptr_t) req.vaddrin;
2053 req_msg.flags = req.flags;
2054 req_msg.num = sizeof(pages);
2055 req_msg.data_len = 0;
2056
2057 args[0].ptr = (u64) (uintptr_t) &req_msg;
2058 args[0].length = sizeof(req_msg);
2059
2060 pages.addr = map->phys;
2061 pages.size = map->len;
2062
2063 args[1].ptr = (u64) (uintptr_t) &pages;
2064 args[1].length = sizeof(pages);
2065
2066 args[2].ptr = (u64) (uintptr_t) &pages;
2067 args[2].length = 0;
2068
2069 args[3].ptr = (u64) (uintptr_t) &rsp_msg;
2070 args[3].length = sizeof(rsp_msg);
2071
2072 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
2073 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
2074 if (err) {
2075 dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
2076 req.fd, req.vaddrin, map->len);
2077 goto err_invoke;
2078 }
2079
2080 /* update the buffer to be able to deallocate the memory on the DSP */
2081 map->raddr = rsp_msg.vaddr;
2082
2083 /* let the client know the address to use */
2084 req.vaddrout = rsp_msg.vaddr;
2085
2086 if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
2087 /* unmap the memory and release the buffer */
2088 req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
2089 req_unmap.length = map->len;
2090 fastrpc_req_mem_unmap_impl(fl, &req_unmap);
2091 return -EFAULT;
2092 }
2093
2094 return 0;
2095
2096 err_invoke:
2097 fastrpc_map_put(map);
2098
2099 return err;
2100 }
2101
fastrpc_device_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2102 static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
2103 unsigned long arg)
2104 {
2105 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
2106 char __user *argp = (char __user *)arg;
2107 int err;
2108
2109 switch (cmd) {
2110 case FASTRPC_IOCTL_INVOKE:
2111 err = fastrpc_invoke(fl, argp);
2112 break;
2113 case FASTRPC_IOCTL_INIT_ATTACH:
2114 err = fastrpc_init_attach(fl, ROOT_PD);
2115 break;
2116 case FASTRPC_IOCTL_INIT_ATTACH_SNS:
2117 err = fastrpc_init_attach(fl, SENSORS_PD);
2118 break;
2119 case FASTRPC_IOCTL_INIT_CREATE_STATIC:
2120 err = fastrpc_init_create_static_process(fl, argp);
2121 break;
2122 case FASTRPC_IOCTL_INIT_CREATE:
2123 err = fastrpc_init_create_process(fl, argp);
2124 break;
2125 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
2126 err = fastrpc_dmabuf_alloc(fl, argp);
2127 break;
2128 case FASTRPC_IOCTL_MMAP:
2129 err = fastrpc_req_mmap(fl, argp);
2130 break;
2131 case FASTRPC_IOCTL_MUNMAP:
2132 err = fastrpc_req_munmap(fl, argp);
2133 break;
2134 case FASTRPC_IOCTL_MEM_MAP:
2135 err = fastrpc_req_mem_map(fl, argp);
2136 break;
2137 case FASTRPC_IOCTL_MEM_UNMAP:
2138 err = fastrpc_req_mem_unmap(fl, argp);
2139 break;
2140 case FASTRPC_IOCTL_GET_DSP_INFO:
2141 err = fastrpc_get_dsp_info(fl, argp);
2142 break;
2143 default:
2144 err = -ENOTTY;
2145 break;
2146 }
2147
2148 return err;
2149 }
2150
2151 static const struct file_operations fastrpc_fops = {
2152 .open = fastrpc_device_open,
2153 .release = fastrpc_device_release,
2154 .unlocked_ioctl = fastrpc_device_ioctl,
2155 .compat_ioctl = fastrpc_device_ioctl,
2156 };
2157
fastrpc_cb_probe(struct platform_device * pdev)2158 static int fastrpc_cb_probe(struct platform_device *pdev)
2159 {
2160 struct fastrpc_channel_ctx *cctx;
2161 struct fastrpc_session_ctx *sess;
2162 struct device *dev = &pdev->dev;
2163 int i, sessions = 0;
2164 unsigned long flags;
2165 int rc;
2166
2167 cctx = dev_get_drvdata(dev->parent);
2168 if (!cctx)
2169 return -EINVAL;
2170
2171 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
2172
2173 spin_lock_irqsave(&cctx->lock, flags);
2174 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
2175 dev_err(&pdev->dev, "too many sessions\n");
2176 spin_unlock_irqrestore(&cctx->lock, flags);
2177 return -ENOSPC;
2178 }
2179 sess = &cctx->session[cctx->sesscount++];
2180 sess->used = false;
2181 sess->valid = true;
2182 sess->dev = dev;
2183 dev_set_drvdata(dev, sess);
2184
2185 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
2186 dev_info(dev, "FastRPC Session ID not specified in DT\n");
2187
2188 if (sessions > 0) {
2189 struct fastrpc_session_ctx *dup_sess;
2190
2191 for (i = 1; i < sessions; i++) {
2192 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
2193 break;
2194 dup_sess = &cctx->session[cctx->sesscount++];
2195 memcpy(dup_sess, sess, sizeof(*dup_sess));
2196 }
2197 }
2198 spin_unlock_irqrestore(&cctx->lock, flags);
2199 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
2200 if (rc) {
2201 dev_err(dev, "32-bit DMA enable failed\n");
2202 return rc;
2203 }
2204
2205 return 0;
2206 }
2207
fastrpc_cb_remove(struct platform_device * pdev)2208 static void fastrpc_cb_remove(struct platform_device *pdev)
2209 {
2210 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
2211 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
2212 unsigned long flags;
2213 int i;
2214
2215 spin_lock_irqsave(&cctx->lock, flags);
2216 for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
2217 if (cctx->session[i].sid == sess->sid) {
2218 cctx->session[i].valid = false;
2219 cctx->sesscount--;
2220 }
2221 }
2222 spin_unlock_irqrestore(&cctx->lock, flags);
2223 }
2224
2225 static const struct of_device_id fastrpc_match_table[] = {
2226 { .compatible = "qcom,fastrpc-compute-cb", },
2227 {}
2228 };
2229
2230 static struct platform_driver fastrpc_cb_driver = {
2231 .probe = fastrpc_cb_probe,
2232 .remove = fastrpc_cb_remove,
2233 .driver = {
2234 .name = "qcom,fastrpc-cb",
2235 .of_match_table = fastrpc_match_table,
2236 .suppress_bind_attrs = true,
2237 },
2238 };
2239
fastrpc_device_register(struct device * dev,struct fastrpc_channel_ctx * cctx,bool is_secured,const char * domain)2240 static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
2241 bool is_secured, const char *domain)
2242 {
2243 struct fastrpc_device *fdev;
2244 int err;
2245
2246 fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
2247 if (!fdev)
2248 return -ENOMEM;
2249
2250 fdev->secure = is_secured;
2251 fdev->cctx = cctx;
2252 fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
2253 fdev->miscdev.fops = &fastrpc_fops;
2254 fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
2255 domain, is_secured ? "-secure" : "");
2256 if (!fdev->miscdev.name)
2257 return -ENOMEM;
2258
2259 err = misc_register(&fdev->miscdev);
2260 if (!err) {
2261 if (is_secured)
2262 cctx->secure_fdevice = fdev;
2263 else
2264 cctx->fdevice = fdev;
2265 }
2266
2267 return err;
2268 }
2269
fastrpc_get_domain_id(const char * domain)2270 static int fastrpc_get_domain_id(const char *domain)
2271 {
2272 if (!strncmp(domain, "adsp", 4))
2273 return ADSP_DOMAIN_ID;
2274 else if (!strncmp(domain, "cdsp", 4))
2275 return CDSP_DOMAIN_ID;
2276 else if (!strncmp(domain, "mdsp", 4))
2277 return MDSP_DOMAIN_ID;
2278 else if (!strncmp(domain, "sdsp", 4))
2279 return SDSP_DOMAIN_ID;
2280 else if (!strncmp(domain, "gdsp", 4))
2281 return GDSP_DOMAIN_ID;
2282
2283 return -EINVAL;
2284 }
2285
fastrpc_rpmsg_probe(struct rpmsg_device * rpdev)2286 static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
2287 {
2288 struct device *rdev = &rpdev->dev;
2289 struct fastrpc_channel_ctx *data;
2290 int i, err, domain_id = -1, vmcount;
2291 const char *domain;
2292 bool secure_dsp;
2293 unsigned int vmids[FASTRPC_MAX_VMIDS];
2294
2295 err = of_property_read_string(rdev->of_node, "label", &domain);
2296 if (err) {
2297 dev_info(rdev, "FastRPC Domain not specified in DT\n");
2298 return err;
2299 }
2300
2301 domain_id = fastrpc_get_domain_id(domain);
2302
2303 if (domain_id < 0) {
2304 dev_info(rdev, "FastRPC Domain %s not supported\n", domain);
2305 return -EINVAL;
2306 }
2307
2308 if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0))
2309 dev_info(rdev, "no reserved DMA memory for FASTRPC\n");
2310
2311 vmcount = of_property_read_variable_u32_array(rdev->of_node,
2312 "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
2313 if (vmcount < 0)
2314 vmcount = 0;
2315 else if (!qcom_scm_is_available())
2316 return -EPROBE_DEFER;
2317
2318 data = kzalloc(sizeof(*data), GFP_KERNEL);
2319 if (!data)
2320 return -ENOMEM;
2321
2322 if (vmcount) {
2323 data->vmcount = vmcount;
2324 for (i = 0; i < data->vmcount; i++) {
2325 data->vmperms[i].vmid = vmids[i];
2326 data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
2327 }
2328 }
2329
2330 if (domain_id == SDSP_DOMAIN_ID) {
2331 struct resource res;
2332 u64 src_perms;
2333
2334 err = of_reserved_mem_region_to_resource(rdev->of_node, 0, &res);
2335 if (!err) {
2336 src_perms = BIT(QCOM_SCM_VMID_HLOS);
2337
2338 qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms,
2339 data->vmperms, data->vmcount);
2340 }
2341
2342 }
2343
2344 secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
2345 data->secure = secure_dsp;
2346
2347 switch (domain_id) {
2348 case ADSP_DOMAIN_ID:
2349 case MDSP_DOMAIN_ID:
2350 case SDSP_DOMAIN_ID:
2351 /* Unsigned PD offloading is only supported on CDSP and GDSP */
2352 data->unsigned_support = false;
2353 err = fastrpc_device_register(rdev, data, secure_dsp, domain);
2354 if (err)
2355 goto err_free_data;
2356 break;
2357 case CDSP_DOMAIN_ID:
2358 case GDSP_DOMAIN_ID:
2359 data->unsigned_support = true;
2360 /* Create both device nodes so that we can allow both Signed and Unsigned PD */
2361 err = fastrpc_device_register(rdev, data, true, domain);
2362 if (err)
2363 goto err_free_data;
2364
2365 err = fastrpc_device_register(rdev, data, false, domain);
2366 if (err)
2367 goto err_deregister_fdev;
2368 break;
2369 default:
2370 err = -EINVAL;
2371 goto err_free_data;
2372 }
2373
2374 kref_init(&data->refcount);
2375
2376 dev_set_drvdata(&rpdev->dev, data);
2377 rdev->dma_mask = &data->dma_mask;
2378 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
2379 INIT_LIST_HEAD(&data->users);
2380 INIT_LIST_HEAD(&data->invoke_interrupted_mmaps);
2381 spin_lock_init(&data->lock);
2382 idr_init(&data->ctx_idr);
2383 data->domain_id = domain_id;
2384 data->rpdev = rpdev;
2385
2386 err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
2387 if (err)
2388 goto err_deregister_fdev;
2389
2390 return 0;
2391
2392 err_deregister_fdev:
2393 if (data->fdevice)
2394 misc_deregister(&data->fdevice->miscdev);
2395 if (data->secure_fdevice)
2396 misc_deregister(&data->secure_fdevice->miscdev);
2397
2398 err_free_data:
2399 kfree(data);
2400 return err;
2401 }
2402
fastrpc_notify_users(struct fastrpc_user * user)2403 static void fastrpc_notify_users(struct fastrpc_user *user)
2404 {
2405 struct fastrpc_invoke_ctx *ctx;
2406
2407 spin_lock(&user->lock);
2408 list_for_each_entry(ctx, &user->pending, node) {
2409 ctx->retval = -EPIPE;
2410 complete(&ctx->work);
2411 }
2412 spin_unlock(&user->lock);
2413 }
2414
fastrpc_rpmsg_remove(struct rpmsg_device * rpdev)2415 static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
2416 {
2417 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2418 struct fastrpc_buf *buf, *b;
2419 struct fastrpc_user *user;
2420 unsigned long flags;
2421
2422 /* No invocations past this point */
2423 spin_lock_irqsave(&cctx->lock, flags);
2424 cctx->rpdev = NULL;
2425 list_for_each_entry(user, &cctx->users, user)
2426 fastrpc_notify_users(user);
2427 spin_unlock_irqrestore(&cctx->lock, flags);
2428
2429 if (cctx->fdevice)
2430 misc_deregister(&cctx->fdevice->miscdev);
2431
2432 if (cctx->secure_fdevice)
2433 misc_deregister(&cctx->secure_fdevice->miscdev);
2434
2435 list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node)
2436 list_del(&buf->node);
2437
2438 if (cctx->remote_heap)
2439 fastrpc_buf_free(cctx->remote_heap);
2440
2441 of_platform_depopulate(&rpdev->dev);
2442
2443 fastrpc_channel_ctx_put(cctx);
2444 }
2445
fastrpc_rpmsg_callback(struct rpmsg_device * rpdev,void * data,int len,void * priv,u32 addr)2446 static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
2447 int len, void *priv, u32 addr)
2448 {
2449 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
2450 struct fastrpc_invoke_rsp *rsp = data;
2451 struct fastrpc_invoke_ctx *ctx;
2452 unsigned long flags;
2453 unsigned long ctxid;
2454
2455 if (len < sizeof(*rsp))
2456 return -EINVAL;
2457
2458 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2459
2460 spin_lock_irqsave(&cctx->lock, flags);
2461 ctx = idr_find(&cctx->ctx_idr, ctxid);
2462 spin_unlock_irqrestore(&cctx->lock, flags);
2463
2464 if (!ctx) {
2465 dev_err(&rpdev->dev, "No context ID matches response\n");
2466 return -ENOENT;
2467 }
2468
2469 ctx->retval = rsp->retval;
2470 complete(&ctx->work);
2471
2472 /*
2473 * The DMA buffer associated with the context cannot be freed in
2474 * interrupt context so schedule it through a worker thread to
2475 * avoid a kernel BUG.
2476 */
2477 schedule_work(&ctx->put_work);
2478
2479 return 0;
2480 }
2481
2482 static const struct of_device_id fastrpc_rpmsg_of_match[] = {
2483 { .compatible = "qcom,fastrpc" },
2484 { },
2485 };
2486 MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
2487
2488 static struct rpmsg_driver fastrpc_driver = {
2489 .probe = fastrpc_rpmsg_probe,
2490 .remove = fastrpc_rpmsg_remove,
2491 .callback = fastrpc_rpmsg_callback,
2492 .drv = {
2493 .name = "qcom,fastrpc",
2494 .of_match_table = fastrpc_rpmsg_of_match,
2495 },
2496 };
2497
fastrpc_init(void)2498 static int fastrpc_init(void)
2499 {
2500 int ret;
2501
2502 ret = platform_driver_register(&fastrpc_cb_driver);
2503 if (ret < 0) {
2504 pr_err("fastrpc: failed to register cb driver\n");
2505 return ret;
2506 }
2507
2508 ret = register_rpmsg_driver(&fastrpc_driver);
2509 if (ret < 0) {
2510 pr_err("fastrpc: failed to register rpmsg driver\n");
2511 platform_driver_unregister(&fastrpc_cb_driver);
2512 return ret;
2513 }
2514
2515 return 0;
2516 }
2517 module_init(fastrpc_init);
2518
fastrpc_exit(void)2519 static void fastrpc_exit(void)
2520 {
2521 platform_driver_unregister(&fastrpc_cb_driver);
2522 unregister_rpmsg_driver(&fastrpc_driver);
2523 }
2524 module_exit(fastrpc_exit);
2525
2526 MODULE_DESCRIPTION("Qualcomm FastRPC");
2527 MODULE_LICENSE("GPL v2");
2528 MODULE_IMPORT_NS("DMA_BUF");
2529