xref: /linux/drivers/tee/optee/smc_abi.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2016, EPAM Systems
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/errno.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irqdomain.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/tee_drv.h>
24 #include <linux/types.h>
25 #include <linux/workqueue.h>
26 #include "optee_private.h"
27 #include "optee_smc.h"
28 #include "optee_rpc_cmd.h"
29 #include <linux/kmemleak.h>
30 #define CREATE_TRACE_POINTS
31 #include "optee_trace.h"
32 
33 /*
34  * This file implement the SMC ABI used when communicating with secure world
35  * OP-TEE OS via raw SMCs.
36  * This file is divided into the following sections:
37  * 1. Convert between struct tee_param and struct optee_msg_param
38  * 2. Low level support functions to register shared memory in secure world
39  * 3. Dynamic shared memory pool based on alloc_pages()
40  * 4. Do a normal scheduled call into secure world
41  * 5. Asynchronous notification
42  * 6. Driver initialization.
43  */
44 
45 #define OPTEE_SHM_NUM_PRIV_PAGES	CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
46 
47 /*
48  * 1. Convert between struct tee_param and struct optee_msg_param
49  *
50  * optee_from_msg_param() and optee_to_msg_param() are the main
51  * functions.
52  */
53 
54 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
55 				  const struct optee_msg_param *mp)
56 {
57 	struct tee_shm *shm;
58 	phys_addr_t pa;
59 	int rc;
60 
61 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
62 		  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
63 	p->u.memref.size = mp->u.tmem.size;
64 	shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
65 	if (!shm) {
66 		p->u.memref.shm_offs = 0;
67 		p->u.memref.shm = NULL;
68 		return 0;
69 	}
70 
71 	rc = tee_shm_get_pa(shm, 0, &pa);
72 	if (rc)
73 		return rc;
74 
75 	p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
76 	p->u.memref.shm = shm;
77 
78 	return 0;
79 }
80 
81 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
82 				   const struct optee_msg_param *mp)
83 {
84 	struct tee_shm *shm;
85 
86 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
87 		  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
88 	p->u.memref.size = mp->u.rmem.size;
89 	shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
90 
91 	if (shm) {
92 		p->u.memref.shm_offs = mp->u.rmem.offs;
93 		p->u.memref.shm = shm;
94 	} else {
95 		p->u.memref.shm_offs = 0;
96 		p->u.memref.shm = NULL;
97 	}
98 }
99 
100 /**
101  * optee_from_msg_param() - convert from OPTEE_MSG parameters to
102  *			    struct tee_param
103  * @optee:	main service struct
104  * @params:	subsystem internal parameter representation
105  * @num_params:	number of elements in the parameter arrays
106  * @msg_params:	OPTEE_MSG parameters
107  * Returns 0 on success or <0 on failure
108  */
109 static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
110 				size_t num_params,
111 				const struct optee_msg_param *msg_params)
112 {
113 	int rc;
114 	size_t n;
115 
116 	for (n = 0; n < num_params; n++) {
117 		struct tee_param *p = params + n;
118 		const struct optee_msg_param *mp = msg_params + n;
119 		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
120 
121 		switch (attr) {
122 		case OPTEE_MSG_ATTR_TYPE_NONE:
123 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
124 			memset(&p->u, 0, sizeof(p->u));
125 			break;
126 		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
127 		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
128 		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
129 			optee_from_msg_param_value(p, attr, mp);
130 			break;
131 		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
132 		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
133 		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
134 			rc = from_msg_param_tmp_mem(p, attr, mp);
135 			if (rc)
136 				return rc;
137 			break;
138 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
139 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
140 		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
141 			from_msg_param_reg_mem(p, attr, mp);
142 			break;
143 
144 		default:
145 			return -EINVAL;
146 		}
147 	}
148 	return 0;
149 }
150 
151 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
152 				const struct tee_param *p)
153 {
154 	int rc;
155 	phys_addr_t pa;
156 
157 	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
158 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
159 
160 	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
161 	mp->u.tmem.size = p->u.memref.size;
162 
163 	if (!p->u.memref.shm) {
164 		mp->u.tmem.buf_ptr = 0;
165 		return 0;
166 	}
167 
168 	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
169 	if (rc)
170 		return rc;
171 
172 	mp->u.tmem.buf_ptr = pa;
173 	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
174 		    OPTEE_MSG_ATTR_CACHE_SHIFT;
175 
176 	return 0;
177 }
178 
179 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
180 				const struct tee_param *p)
181 {
182 	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
183 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
184 
185 	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
186 	mp->u.rmem.size = p->u.memref.size;
187 	mp->u.rmem.offs = p->u.memref.shm_offs;
188 	return 0;
189 }
190 
191 /**
192  * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
193  * @optee:	main service struct
194  * @msg_params:	OPTEE_MSG parameters
195  * @num_params:	number of elements in the parameter arrays
196  * @params:	subsystem itnernal parameter representation
197  * Returns 0 on success or <0 on failure
198  */
199 static int optee_to_msg_param(struct optee *optee,
200 			      struct optee_msg_param *msg_params,
201 			      size_t num_params, const struct tee_param *params)
202 {
203 	int rc;
204 	size_t n;
205 
206 	for (n = 0; n < num_params; n++) {
207 		const struct tee_param *p = params + n;
208 		struct optee_msg_param *mp = msg_params + n;
209 
210 		switch (p->attr) {
211 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
212 			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
213 			memset(&mp->u, 0, sizeof(mp->u));
214 			break;
215 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
216 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
217 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
218 			optee_to_msg_param_value(mp, p);
219 			break;
220 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
221 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
222 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
223 			if (tee_shm_is_registered(p->u.memref.shm))
224 				rc = to_msg_param_reg_mem(mp, p);
225 			else
226 				rc = to_msg_param_tmp_mem(mp, p);
227 			if (rc)
228 				return rc;
229 			break;
230 		default:
231 			return -EINVAL;
232 		}
233 	}
234 	return 0;
235 }
236 
237 /*
238  * 2. Low level support functions to register shared memory in secure world
239  *
240  * Functions to enable/disable shared memory caching in secure world, that
241  * is, lazy freeing of previously allocated shared memory. Freeing is
242  * performed when a request has been compled.
243  *
244  * Functions to register and unregister shared memory both for normal
245  * clients and for tee-supplicant.
246  */
247 
248 /**
249  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
250  *			      in OP-TEE
251  * @optee:	main service struct
252  */
253 static void optee_enable_shm_cache(struct optee *optee)
254 {
255 	struct optee_call_waiter w;
256 
257 	/* We need to retry until secure world isn't busy. */
258 	optee_cq_wait_init(&optee->call_queue, &w);
259 	while (true) {
260 		struct arm_smccc_res res;
261 
262 		optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
263 				     0, 0, 0, 0, 0, 0, 0, &res);
264 		if (res.a0 == OPTEE_SMC_RETURN_OK)
265 			break;
266 		optee_cq_wait_for_completion(&optee->call_queue, &w);
267 	}
268 	optee_cq_wait_final(&optee->call_queue, &w);
269 }
270 
271 /**
272  * __optee_disable_shm_cache() - Disables caching of some shared memory
273  *				 allocation in OP-TEE
274  * @optee:	main service struct
275  * @is_mapped:	true if the cached shared memory addresses were mapped by this
276  *		kernel, are safe to dereference, and should be freed
277  */
278 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
279 {
280 	struct optee_call_waiter w;
281 
282 	/* We need to retry until secure world isn't busy. */
283 	optee_cq_wait_init(&optee->call_queue, &w);
284 	while (true) {
285 		union {
286 			struct arm_smccc_res smccc;
287 			struct optee_smc_disable_shm_cache_result result;
288 		} res;
289 
290 		optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
291 				     0, 0, 0, 0, 0, 0, 0, &res.smccc);
292 		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
293 			break; /* All shm's freed */
294 		if (res.result.status == OPTEE_SMC_RETURN_OK) {
295 			struct tee_shm *shm;
296 
297 			/*
298 			 * Shared memory references that were not mapped by
299 			 * this kernel must be ignored to prevent a crash.
300 			 */
301 			if (!is_mapped)
302 				continue;
303 
304 			shm = reg_pair_to_ptr(res.result.shm_upper32,
305 					      res.result.shm_lower32);
306 			tee_shm_free(shm);
307 		} else {
308 			optee_cq_wait_for_completion(&optee->call_queue, &w);
309 		}
310 	}
311 	optee_cq_wait_final(&optee->call_queue, &w);
312 }
313 
314 /**
315  * optee_disable_shm_cache() - Disables caching of mapped shared memory
316  *			       allocations in OP-TEE
317  * @optee:	main service struct
318  */
319 static void optee_disable_shm_cache(struct optee *optee)
320 {
321 	return __optee_disable_shm_cache(optee, true);
322 }
323 
324 /**
325  * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
326  *					allocations in OP-TEE which are not
327  *					currently mapped
328  * @optee:	main service struct
329  */
330 static void optee_disable_unmapped_shm_cache(struct optee *optee)
331 {
332 	return __optee_disable_shm_cache(optee, false);
333 }
334 
335 #define PAGELIST_ENTRIES_PER_PAGE				\
336 	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
337 
338 /*
339  * The final entry in each pagelist page is a pointer to the next
340  * pagelist page.
341  */
342 static size_t get_pages_list_size(size_t num_entries)
343 {
344 	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
345 
346 	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
347 }
348 
349 static u64 *optee_allocate_pages_list(size_t num_entries)
350 {
351 	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
352 }
353 
354 static void optee_free_pages_list(void *list, size_t num_entries)
355 {
356 	free_pages_exact(list, get_pages_list_size(num_entries));
357 }
358 
359 /**
360  * optee_fill_pages_list() - write list of user pages to given shared
361  * buffer.
362  *
363  * @dst: page-aligned buffer where list of pages will be stored
364  * @pages: array of pages that represents shared buffer
365  * @num_pages: number of entries in @pages
366  * @page_offset: offset of user buffer from page start
367  *
368  * @dst should be big enough to hold list of user page addresses and
369  *	links to the next pages of buffer
370  */
371 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
372 				  size_t page_offset)
373 {
374 	int n = 0;
375 	phys_addr_t optee_page;
376 	/*
377 	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
378 	 * for details.
379 	 */
380 	struct {
381 		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
382 		u64 next_page_data;
383 	} *pages_data;
384 
385 	/*
386 	 * Currently OP-TEE uses 4k page size and it does not looks
387 	 * like this will change in the future.  On other hand, there are
388 	 * no know ARM architectures with page size < 4k.
389 	 * Thus the next built assert looks redundant. But the following
390 	 * code heavily relies on this assumption, so it is better be
391 	 * safe than sorry.
392 	 */
393 	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
394 
395 	pages_data = (void *)dst;
396 	/*
397 	 * If linux page is bigger than 4k, and user buffer offset is
398 	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
399 	 * because they bear no value data for OP-TEE.
400 	 */
401 	optee_page = page_to_phys(*pages) +
402 		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
403 
404 	while (true) {
405 		pages_data->pages_list[n++] = optee_page;
406 
407 		if (n == PAGELIST_ENTRIES_PER_PAGE) {
408 			pages_data->next_page_data =
409 				virt_to_phys(pages_data + 1);
410 			pages_data++;
411 			n = 0;
412 		}
413 
414 		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
415 		if (!(optee_page & ~PAGE_MASK)) {
416 			if (!--num_pages)
417 				break;
418 			pages++;
419 			optee_page = page_to_phys(*pages);
420 		}
421 	}
422 }
423 
424 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
425 			      struct page **pages, size_t num_pages,
426 			      unsigned long start)
427 {
428 	struct optee *optee = tee_get_drvdata(ctx->teedev);
429 	struct optee_msg_arg *msg_arg;
430 	struct tee_shm *shm_arg;
431 	u64 *pages_list;
432 	int rc;
433 
434 	if (!num_pages)
435 		return -EINVAL;
436 
437 	rc = optee_check_mem_type(start, num_pages);
438 	if (rc)
439 		return rc;
440 
441 	pages_list = optee_allocate_pages_list(num_pages);
442 	if (!pages_list)
443 		return -ENOMEM;
444 
445 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
446 	if (IS_ERR(shm_arg)) {
447 		rc = PTR_ERR(shm_arg);
448 		goto out;
449 	}
450 
451 	optee_fill_pages_list(pages_list, pages, num_pages,
452 			      tee_shm_get_page_offset(shm));
453 
454 	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
455 	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
456 				OPTEE_MSG_ATTR_NONCONTIG;
457 	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
458 	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
459 	/*
460 	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
461 	 * store buffer offset from 4k page, as described in OP-TEE ABI.
462 	 */
463 	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
464 	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
465 
466 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
467 	    msg_arg->ret != TEEC_SUCCESS)
468 		rc = -EINVAL;
469 
470 	tee_shm_free(shm_arg);
471 out:
472 	optee_free_pages_list(pages_list, num_pages);
473 	return rc;
474 }
475 
476 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
477 {
478 	struct optee *optee = tee_get_drvdata(ctx->teedev);
479 	struct optee_msg_arg *msg_arg;
480 	struct tee_shm *shm_arg;
481 	int rc = 0;
482 
483 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
484 	if (IS_ERR(shm_arg))
485 		return PTR_ERR(shm_arg);
486 
487 	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
488 
489 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
490 	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
491 
492 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
493 	    msg_arg->ret != TEEC_SUCCESS)
494 		rc = -EINVAL;
495 	tee_shm_free(shm_arg);
496 	return rc;
497 }
498 
499 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
500 				   struct page **pages, size_t num_pages,
501 				   unsigned long start)
502 {
503 	/*
504 	 * We don't want to register supplicant memory in OP-TEE.
505 	 * Instead information about it will be passed in RPC code.
506 	 */
507 	return optee_check_mem_type(start, num_pages);
508 }
509 
510 static int optee_shm_unregister_supp(struct tee_context *ctx,
511 				     struct tee_shm *shm)
512 {
513 	return 0;
514 }
515 
516 /*
517  * 3. Dynamic shared memory pool based on alloc_pages()
518  *
519  * Implements an OP-TEE specific shared memory pool which is used
520  * when dynamic shared memory is supported by secure world.
521  *
522  * The main function is optee_shm_pool_alloc_pages().
523  */
524 
525 static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
526 			 struct tee_shm *shm, size_t size)
527 {
528 	/*
529 	 * Shared memory private to the OP-TEE driver doesn't need
530 	 * to be registered with OP-TEE.
531 	 */
532 	if (shm->flags & TEE_SHM_PRIV)
533 		return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
534 
535 	return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
536 }
537 
538 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
539 			 struct tee_shm *shm)
540 {
541 	if (!(shm->flags & TEE_SHM_PRIV))
542 		optee_shm_unregister(shm->ctx, shm);
543 
544 	free_pages((unsigned long)shm->kaddr, get_order(shm->size));
545 	shm->kaddr = NULL;
546 }
547 
548 static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
549 {
550 	kfree(poolm);
551 }
552 
553 static const struct tee_shm_pool_mgr_ops pool_ops = {
554 	.alloc = pool_op_alloc,
555 	.free = pool_op_free,
556 	.destroy_poolmgr = pool_op_destroy_poolmgr,
557 };
558 
559 /**
560  * optee_shm_pool_alloc_pages() - create page-based allocator pool
561  *
562  * This pool is used when OP-TEE supports dymanic SHM. In this case
563  * command buffers and such are allocated from kernel's own memory.
564  */
565 static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
566 {
567 	struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
568 
569 	if (!mgr)
570 		return ERR_PTR(-ENOMEM);
571 
572 	mgr->ops = &pool_ops;
573 
574 	return mgr;
575 }
576 
577 /*
578  * 4. Do a normal scheduled call into secure world
579  *
580  * The function optee_smc_do_call_with_arg() performs a normal scheduled
581  * call into secure world. During this call may normal world request help
582  * from normal world using RPCs, Remote Procedure Calls. This includes
583  * delivery of non-secure interrupts to for instance allow rescheduling of
584  * the current task.
585  */
586 
587 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
588 					 struct optee_msg_arg *arg)
589 {
590 	struct tee_shm *shm;
591 
592 	arg->ret_origin = TEEC_ORIGIN_COMMS;
593 
594 	if (arg->num_params != 1 ||
595 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
596 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
597 		return;
598 	}
599 
600 	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
601 	switch (arg->params[0].u.value.a) {
602 	case OPTEE_RPC_SHM_TYPE_APPL:
603 		optee_rpc_cmd_free_suppl(ctx, shm);
604 		break;
605 	case OPTEE_RPC_SHM_TYPE_KERNEL:
606 		tee_shm_free(shm);
607 		break;
608 	default:
609 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
610 	}
611 	arg->ret = TEEC_SUCCESS;
612 }
613 
614 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
615 					  struct optee *optee,
616 					  struct optee_msg_arg *arg,
617 					  struct optee_call_ctx *call_ctx)
618 {
619 	phys_addr_t pa;
620 	struct tee_shm *shm;
621 	size_t sz;
622 	size_t n;
623 
624 	arg->ret_origin = TEEC_ORIGIN_COMMS;
625 
626 	if (!arg->num_params ||
627 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
628 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
629 		return;
630 	}
631 
632 	for (n = 1; n < arg->num_params; n++) {
633 		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
634 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
635 			return;
636 		}
637 	}
638 
639 	sz = arg->params[0].u.value.b;
640 	switch (arg->params[0].u.value.a) {
641 	case OPTEE_RPC_SHM_TYPE_APPL:
642 		shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
643 		break;
644 	case OPTEE_RPC_SHM_TYPE_KERNEL:
645 		shm = tee_shm_alloc(optee->ctx, sz,
646 				    TEE_SHM_MAPPED | TEE_SHM_PRIV);
647 		break;
648 	default:
649 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
650 		return;
651 	}
652 
653 	if (IS_ERR(shm)) {
654 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
655 		return;
656 	}
657 
658 	if (tee_shm_get_pa(shm, 0, &pa)) {
659 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
660 		goto bad;
661 	}
662 
663 	sz = tee_shm_get_size(shm);
664 
665 	if (tee_shm_is_registered(shm)) {
666 		struct page **pages;
667 		u64 *pages_list;
668 		size_t page_num;
669 
670 		pages = tee_shm_get_pages(shm, &page_num);
671 		if (!pages || !page_num) {
672 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
673 			goto bad;
674 		}
675 
676 		pages_list = optee_allocate_pages_list(page_num);
677 		if (!pages_list) {
678 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
679 			goto bad;
680 		}
681 
682 		call_ctx->pages_list = pages_list;
683 		call_ctx->num_entries = page_num;
684 
685 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
686 				      OPTEE_MSG_ATTR_NONCONTIG;
687 		/*
688 		 * In the least bits of u.tmem.buf_ptr we store buffer offset
689 		 * from 4k page, as described in OP-TEE ABI.
690 		 */
691 		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
692 			(tee_shm_get_page_offset(shm) &
693 			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
694 		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
695 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
696 
697 		optee_fill_pages_list(pages_list, pages, page_num,
698 				      tee_shm_get_page_offset(shm));
699 	} else {
700 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
701 		arg->params[0].u.tmem.buf_ptr = pa;
702 		arg->params[0].u.tmem.size = sz;
703 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
704 	}
705 
706 	arg->ret = TEEC_SUCCESS;
707 	return;
708 bad:
709 	tee_shm_free(shm);
710 }
711 
712 static void free_pages_list(struct optee_call_ctx *call_ctx)
713 {
714 	if (call_ctx->pages_list) {
715 		optee_free_pages_list(call_ctx->pages_list,
716 				      call_ctx->num_entries);
717 		call_ctx->pages_list = NULL;
718 		call_ctx->num_entries = 0;
719 	}
720 }
721 
722 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
723 {
724 	free_pages_list(call_ctx);
725 }
726 
727 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
728 				struct tee_shm *shm,
729 				struct optee_call_ctx *call_ctx)
730 {
731 	struct optee_msg_arg *arg;
732 
733 	arg = tee_shm_get_va(shm, 0);
734 	if (IS_ERR(arg)) {
735 		pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
736 		return;
737 	}
738 
739 	switch (arg->cmd) {
740 	case OPTEE_RPC_CMD_SHM_ALLOC:
741 		free_pages_list(call_ctx);
742 		handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
743 		break;
744 	case OPTEE_RPC_CMD_SHM_FREE:
745 		handle_rpc_func_cmd_shm_free(ctx, arg);
746 		break;
747 	default:
748 		optee_rpc_cmd(ctx, optee, arg);
749 	}
750 }
751 
752 /**
753  * optee_handle_rpc() - handle RPC from secure world
754  * @ctx:	context doing the RPC
755  * @param:	value of registers for the RPC
756  * @call_ctx:	call context. Preserved during one OP-TEE invocation
757  *
758  * Result of RPC is written back into @param.
759  */
760 static void optee_handle_rpc(struct tee_context *ctx,
761 			     struct optee_rpc_param *param,
762 			     struct optee_call_ctx *call_ctx)
763 {
764 	struct tee_device *teedev = ctx->teedev;
765 	struct optee *optee = tee_get_drvdata(teedev);
766 	struct tee_shm *shm;
767 	phys_addr_t pa;
768 
769 	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
770 	case OPTEE_SMC_RPC_FUNC_ALLOC:
771 		shm = tee_shm_alloc(optee->ctx, param->a1,
772 				    TEE_SHM_MAPPED | TEE_SHM_PRIV);
773 		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
774 			reg_pair_from_64(&param->a1, &param->a2, pa);
775 			reg_pair_from_64(&param->a4, &param->a5,
776 					 (unsigned long)shm);
777 		} else {
778 			param->a1 = 0;
779 			param->a2 = 0;
780 			param->a4 = 0;
781 			param->a5 = 0;
782 		}
783 		kmemleak_not_leak(shm);
784 		break;
785 	case OPTEE_SMC_RPC_FUNC_FREE:
786 		shm = reg_pair_to_ptr(param->a1, param->a2);
787 		tee_shm_free(shm);
788 		break;
789 	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
790 		/*
791 		 * A foreign interrupt was raised while secure world was
792 		 * executing, since they are handled in Linux a dummy RPC is
793 		 * performed to let Linux take the interrupt through the normal
794 		 * vector.
795 		 */
796 		break;
797 	case OPTEE_SMC_RPC_FUNC_CMD:
798 		shm = reg_pair_to_ptr(param->a1, param->a2);
799 		handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
800 		break;
801 	default:
802 		pr_warn("Unknown RPC func 0x%x\n",
803 			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
804 		break;
805 	}
806 
807 	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
808 }
809 
810 /**
811  * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
812  * @ctx:	calling context
813  * @arg:	shared memory holding the message to pass to secure world
814  *
815  * Does and SMC to OP-TEE in secure world and handles eventual resulting
816  * Remote Procedure Calls (RPC) from OP-TEE.
817  *
818  * Returns return code from secure world, 0 is OK
819  */
820 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
821 				      struct tee_shm *arg)
822 {
823 	struct optee *optee = tee_get_drvdata(ctx->teedev);
824 	struct optee_call_waiter w;
825 	struct optee_rpc_param param = { };
826 	struct optee_call_ctx call_ctx = { };
827 	phys_addr_t parg;
828 	int rc;
829 
830 	rc = tee_shm_get_pa(arg, 0, &parg);
831 	if (rc)
832 		return rc;
833 
834 	param.a0 = OPTEE_SMC_CALL_WITH_ARG;
835 	reg_pair_from_64(&param.a1, &param.a2, parg);
836 	/* Initialize waiter */
837 	optee_cq_wait_init(&optee->call_queue, &w);
838 	while (true) {
839 		struct arm_smccc_res res;
840 
841 		trace_optee_invoke_fn_begin(&param);
842 		optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
843 				     param.a4, param.a5, param.a6, param.a7,
844 				     &res);
845 		trace_optee_invoke_fn_end(&param, &res);
846 
847 		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
848 			/*
849 			 * Out of threads in secure world, wait for a thread
850 			 * become available.
851 			 */
852 			optee_cq_wait_for_completion(&optee->call_queue, &w);
853 		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
854 			cond_resched();
855 			param.a0 = res.a0;
856 			param.a1 = res.a1;
857 			param.a2 = res.a2;
858 			param.a3 = res.a3;
859 			optee_handle_rpc(ctx, &param, &call_ctx);
860 		} else {
861 			rc = res.a0;
862 			break;
863 		}
864 	}
865 
866 	optee_rpc_finalize_call(&call_ctx);
867 	/*
868 	 * We're done with our thread in secure world, if there's any
869 	 * thread waiters wake up one.
870 	 */
871 	optee_cq_wait_final(&optee->call_queue, &w);
872 
873 	return rc;
874 }
875 
876 static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
877 {
878 	struct optee_msg_arg *msg_arg;
879 	struct tee_shm *shm;
880 
881 	shm = optee_get_msg_arg(ctx, 0, &msg_arg);
882 	if (IS_ERR(shm))
883 		return PTR_ERR(shm);
884 
885 	msg_arg->cmd = cmd;
886 	optee_smc_do_call_with_arg(ctx, shm);
887 
888 	tee_shm_free(shm);
889 	return 0;
890 }
891 
892 static int optee_smc_do_bottom_half(struct tee_context *ctx)
893 {
894 	return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
895 }
896 
897 static int optee_smc_stop_async_notif(struct tee_context *ctx)
898 {
899 	return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
900 }
901 
902 /*
903  * 5. Asynchronous notification
904  */
905 
906 static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
907 				 bool *value_pending)
908 {
909 	struct arm_smccc_res res;
910 
911 	invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
912 
913 	if (res.a0)
914 		return 0;
915 	*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
916 	*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
917 	return res.a1;
918 }
919 
920 static irqreturn_t notif_irq_handler(int irq, void *dev_id)
921 {
922 	struct optee *optee = dev_id;
923 	bool do_bottom_half = false;
924 	bool value_valid;
925 	bool value_pending;
926 	u32 value;
927 
928 	do {
929 		value = get_async_notif_value(optee->smc.invoke_fn,
930 					      &value_valid, &value_pending);
931 		if (!value_valid)
932 			break;
933 
934 		if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
935 			do_bottom_half = true;
936 		else
937 			optee_notif_send(optee, value);
938 	} while (value_pending);
939 
940 	if (do_bottom_half)
941 		return IRQ_WAKE_THREAD;
942 	return IRQ_HANDLED;
943 }
944 
945 static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
946 {
947 	struct optee *optee = dev_id;
948 
949 	optee_smc_do_bottom_half(optee->ctx);
950 
951 	return IRQ_HANDLED;
952 }
953 
954 static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
955 {
956 	int rc;
957 
958 	rc = request_threaded_irq(irq, notif_irq_handler,
959 				  notif_irq_thread_fn,
960 				  0, "optee_notification", optee);
961 	if (rc)
962 		return rc;
963 
964 	optee->smc.notif_irq = irq;
965 
966 	return 0;
967 }
968 
969 static void optee_smc_notif_uninit_irq(struct optee *optee)
970 {
971 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
972 		optee_smc_stop_async_notif(optee->ctx);
973 		if (optee->smc.notif_irq) {
974 			free_irq(optee->smc.notif_irq, optee);
975 			irq_dispose_mapping(optee->smc.notif_irq);
976 		}
977 	}
978 }
979 
980 /*
981  * 6. Driver initialization
982  *
983  * During driver initialization is secure world probed to find out which
984  * features it supports so the driver can be initialized with a matching
985  * configuration. This involves for instance support for dynamic shared
986  * memory instead of a static memory carvout.
987  */
988 
989 static void optee_get_version(struct tee_device *teedev,
990 			      struct tee_ioctl_version_data *vers)
991 {
992 	struct tee_ioctl_version_data v = {
993 		.impl_id = TEE_IMPL_ID_OPTEE,
994 		.impl_caps = TEE_OPTEE_CAP_TZ,
995 		.gen_caps = TEE_GEN_CAP_GP,
996 	};
997 	struct optee *optee = tee_get_drvdata(teedev);
998 
999 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1000 		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
1001 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
1002 		v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
1003 	*vers = v;
1004 }
1005 
1006 static int optee_smc_open(struct tee_context *ctx)
1007 {
1008 	struct optee *optee = tee_get_drvdata(ctx->teedev);
1009 	u32 sec_caps = optee->smc.sec_caps;
1010 
1011 	return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
1012 }
1013 
1014 static const struct tee_driver_ops optee_clnt_ops = {
1015 	.get_version = optee_get_version,
1016 	.open = optee_smc_open,
1017 	.release = optee_release,
1018 	.open_session = optee_open_session,
1019 	.close_session = optee_close_session,
1020 	.invoke_func = optee_invoke_func,
1021 	.cancel_req = optee_cancel_req,
1022 	.shm_register = optee_shm_register,
1023 	.shm_unregister = optee_shm_unregister,
1024 };
1025 
1026 static const struct tee_desc optee_clnt_desc = {
1027 	.name = DRIVER_NAME "-clnt",
1028 	.ops = &optee_clnt_ops,
1029 	.owner = THIS_MODULE,
1030 };
1031 
1032 static const struct tee_driver_ops optee_supp_ops = {
1033 	.get_version = optee_get_version,
1034 	.open = optee_smc_open,
1035 	.release = optee_release_supp,
1036 	.supp_recv = optee_supp_recv,
1037 	.supp_send = optee_supp_send,
1038 	.shm_register = optee_shm_register_supp,
1039 	.shm_unregister = optee_shm_unregister_supp,
1040 };
1041 
1042 static const struct tee_desc optee_supp_desc = {
1043 	.name = DRIVER_NAME "-supp",
1044 	.ops = &optee_supp_ops,
1045 	.owner = THIS_MODULE,
1046 	.flags = TEE_DESC_PRIVILEGED,
1047 };
1048 
1049 static const struct optee_ops optee_ops = {
1050 	.do_call_with_arg = optee_smc_do_call_with_arg,
1051 	.to_msg_param = optee_to_msg_param,
1052 	.from_msg_param = optee_from_msg_param,
1053 };
1054 
1055 static int enable_async_notif(optee_invoke_fn *invoke_fn)
1056 {
1057 	struct arm_smccc_res res;
1058 
1059 	invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
1060 
1061 	if (res.a0)
1062 		return -EINVAL;
1063 	return 0;
1064 }
1065 
1066 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
1067 {
1068 	struct arm_smccc_res res;
1069 
1070 	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1071 
1072 	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
1073 	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
1074 		return true;
1075 	return false;
1076 }
1077 
1078 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
1079 {
1080 	union {
1081 		struct arm_smccc_res smccc;
1082 		struct optee_smc_call_get_os_revision_result result;
1083 	} res = {
1084 		.result = {
1085 			.build_id = 0
1086 		}
1087 	};
1088 
1089 	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
1090 		  &res.smccc);
1091 
1092 	if (res.result.build_id)
1093 		pr_info("revision %lu.%lu (%08lx)", res.result.major,
1094 			res.result.minor, res.result.build_id);
1095 	else
1096 		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
1097 }
1098 
1099 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
1100 {
1101 	union {
1102 		struct arm_smccc_res smccc;
1103 		struct optee_smc_calls_revision_result result;
1104 	} res;
1105 
1106 	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1107 
1108 	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
1109 	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
1110 		return true;
1111 	return false;
1112 }
1113 
1114 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1115 					    u32 *sec_caps, u32 *max_notif_value)
1116 {
1117 	union {
1118 		struct arm_smccc_res smccc;
1119 		struct optee_smc_exchange_capabilities_result result;
1120 	} res;
1121 	u32 a1 = 0;
1122 
1123 	/*
1124 	 * TODO This isn't enough to tell if it's UP system (from kernel
1125 	 * point of view) or not, is_smp() returns the information
1126 	 * needed, but can't be called directly from here.
1127 	 */
1128 	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1129 		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1130 
1131 	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1132 		  &res.smccc);
1133 
1134 	if (res.result.status != OPTEE_SMC_RETURN_OK)
1135 		return false;
1136 
1137 	*sec_caps = res.result.capabilities;
1138 	if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
1139 		*max_notif_value = res.result.max_notif_value;
1140 	else
1141 		*max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
1142 
1143 	return true;
1144 }
1145 
1146 static struct tee_shm_pool *optee_config_dyn_shm(void)
1147 {
1148 	struct tee_shm_pool_mgr *priv_mgr;
1149 	struct tee_shm_pool_mgr *dmabuf_mgr;
1150 	void *rc;
1151 
1152 	rc = optee_shm_pool_alloc_pages();
1153 	if (IS_ERR(rc))
1154 		return rc;
1155 	priv_mgr = rc;
1156 
1157 	rc = optee_shm_pool_alloc_pages();
1158 	if (IS_ERR(rc)) {
1159 		tee_shm_pool_mgr_destroy(priv_mgr);
1160 		return rc;
1161 	}
1162 	dmabuf_mgr = rc;
1163 
1164 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1165 	if (IS_ERR(rc)) {
1166 		tee_shm_pool_mgr_destroy(priv_mgr);
1167 		tee_shm_pool_mgr_destroy(dmabuf_mgr);
1168 	}
1169 
1170 	return rc;
1171 }
1172 
1173 static struct tee_shm_pool *
1174 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1175 {
1176 	union {
1177 		struct arm_smccc_res smccc;
1178 		struct optee_smc_get_shm_config_result result;
1179 	} res;
1180 	unsigned long vaddr;
1181 	phys_addr_t paddr;
1182 	size_t size;
1183 	phys_addr_t begin;
1184 	phys_addr_t end;
1185 	void *va;
1186 	struct tee_shm_pool_mgr *priv_mgr;
1187 	struct tee_shm_pool_mgr *dmabuf_mgr;
1188 	void *rc;
1189 	const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1190 
1191 	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1192 	if (res.result.status != OPTEE_SMC_RETURN_OK) {
1193 		pr_err("static shm service not available\n");
1194 		return ERR_PTR(-ENOENT);
1195 	}
1196 
1197 	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1198 		pr_err("only normal cached shared memory supported\n");
1199 		return ERR_PTR(-EINVAL);
1200 	}
1201 
1202 	begin = roundup(res.result.start, PAGE_SIZE);
1203 	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1204 	paddr = begin;
1205 	size = end - begin;
1206 
1207 	if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1208 		pr_err("too small shared memory area\n");
1209 		return ERR_PTR(-EINVAL);
1210 	}
1211 
1212 	va = memremap(paddr, size, MEMREMAP_WB);
1213 	if (!va) {
1214 		pr_err("shared memory ioremap failed\n");
1215 		return ERR_PTR(-EINVAL);
1216 	}
1217 	vaddr = (unsigned long)va;
1218 
1219 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1220 					    3 /* 8 bytes aligned */);
1221 	if (IS_ERR(rc))
1222 		goto err_memunmap;
1223 	priv_mgr = rc;
1224 
1225 	vaddr += sz;
1226 	paddr += sz;
1227 	size -= sz;
1228 
1229 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1230 	if (IS_ERR(rc))
1231 		goto err_free_priv_mgr;
1232 	dmabuf_mgr = rc;
1233 
1234 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1235 	if (IS_ERR(rc))
1236 		goto err_free_dmabuf_mgr;
1237 
1238 	*memremaped_shm = va;
1239 
1240 	return rc;
1241 
1242 err_free_dmabuf_mgr:
1243 	tee_shm_pool_mgr_destroy(dmabuf_mgr);
1244 err_free_priv_mgr:
1245 	tee_shm_pool_mgr_destroy(priv_mgr);
1246 err_memunmap:
1247 	memunmap(va);
1248 	return rc;
1249 }
1250 
1251 /* Simple wrapper functions to be able to use a function pointer */
1252 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1253 			    unsigned long a2, unsigned long a3,
1254 			    unsigned long a4, unsigned long a5,
1255 			    unsigned long a6, unsigned long a7,
1256 			    struct arm_smccc_res *res)
1257 {
1258 	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1259 }
1260 
1261 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1262 			    unsigned long a2, unsigned long a3,
1263 			    unsigned long a4, unsigned long a5,
1264 			    unsigned long a6, unsigned long a7,
1265 			    struct arm_smccc_res *res)
1266 {
1267 	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1268 }
1269 
1270 static optee_invoke_fn *get_invoke_func(struct device *dev)
1271 {
1272 	const char *method;
1273 
1274 	pr_info("probing for conduit method.\n");
1275 
1276 	if (device_property_read_string(dev, "method", &method)) {
1277 		pr_warn("missing \"method\" property\n");
1278 		return ERR_PTR(-ENXIO);
1279 	}
1280 
1281 	if (!strcmp("hvc", method))
1282 		return optee_smccc_hvc;
1283 	else if (!strcmp("smc", method))
1284 		return optee_smccc_smc;
1285 
1286 	pr_warn("invalid \"method\" property: %s\n", method);
1287 	return ERR_PTR(-EINVAL);
1288 }
1289 
1290 /* optee_remove - Device Removal Routine
1291  * @pdev: platform device information struct
1292  *
1293  * optee_remove is called by platform subsystem to alert the driver
1294  * that it should release the device
1295  */
1296 static int optee_smc_remove(struct platform_device *pdev)
1297 {
1298 	struct optee *optee = platform_get_drvdata(pdev);
1299 
1300 	/*
1301 	 * Ask OP-TEE to free all cached shared memory objects to decrease
1302 	 * reference counters and also avoid wild pointers in secure world
1303 	 * into the old shared memory range.
1304 	 */
1305 	optee_disable_shm_cache(optee);
1306 
1307 	optee_smc_notif_uninit_irq(optee);
1308 
1309 	optee_remove_common(optee);
1310 
1311 	if (optee->smc.memremaped_shm)
1312 		memunmap(optee->smc.memremaped_shm);
1313 
1314 	kfree(optee);
1315 
1316 	return 0;
1317 }
1318 
1319 /* optee_shutdown - Device Removal Routine
1320  * @pdev: platform device information struct
1321  *
1322  * platform_shutdown is called by the platform subsystem to alert
1323  * the driver that a shutdown, reboot, or kexec is happening and
1324  * device must be disabled.
1325  */
1326 static void optee_shutdown(struct platform_device *pdev)
1327 {
1328 	optee_disable_shm_cache(platform_get_drvdata(pdev));
1329 }
1330 
1331 static int optee_probe(struct platform_device *pdev)
1332 {
1333 	optee_invoke_fn *invoke_fn;
1334 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1335 	struct optee *optee = NULL;
1336 	void *memremaped_shm = NULL;
1337 	struct tee_device *teedev;
1338 	struct tee_context *ctx;
1339 	u32 max_notif_value;
1340 	u32 sec_caps;
1341 	int rc;
1342 
1343 	invoke_fn = get_invoke_func(&pdev->dev);
1344 	if (IS_ERR(invoke_fn))
1345 		return PTR_ERR(invoke_fn);
1346 
1347 	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1348 		pr_warn("api uid mismatch\n");
1349 		return -EINVAL;
1350 	}
1351 
1352 	optee_msg_get_os_revision(invoke_fn);
1353 
1354 	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1355 		pr_warn("api revision mismatch\n");
1356 		return -EINVAL;
1357 	}
1358 
1359 	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
1360 					     &max_notif_value)) {
1361 		pr_warn("capabilities mismatch\n");
1362 		return -EINVAL;
1363 	}
1364 
1365 	/*
1366 	 * Try to use dynamic shared memory if possible
1367 	 */
1368 	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1369 		pool = optee_config_dyn_shm();
1370 
1371 	/*
1372 	 * If dynamic shared memory is not available or failed - try static one
1373 	 */
1374 	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1375 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1376 
1377 	if (IS_ERR(pool))
1378 		return PTR_ERR(pool);
1379 
1380 	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1381 	if (!optee) {
1382 		rc = -ENOMEM;
1383 		goto err_free_pool;
1384 	}
1385 
1386 	optee->ops = &optee_ops;
1387 	optee->smc.invoke_fn = invoke_fn;
1388 	optee->smc.sec_caps = sec_caps;
1389 
1390 	teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1391 	if (IS_ERR(teedev)) {
1392 		rc = PTR_ERR(teedev);
1393 		goto err_free_optee;
1394 	}
1395 	optee->teedev = teedev;
1396 
1397 	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1398 	if (IS_ERR(teedev)) {
1399 		rc = PTR_ERR(teedev);
1400 		goto err_unreg_teedev;
1401 	}
1402 	optee->supp_teedev = teedev;
1403 
1404 	rc = tee_device_register(optee->teedev);
1405 	if (rc)
1406 		goto err_unreg_supp_teedev;
1407 
1408 	rc = tee_device_register(optee->supp_teedev);
1409 	if (rc)
1410 		goto err_unreg_supp_teedev;
1411 
1412 	mutex_init(&optee->call_queue.mutex);
1413 	INIT_LIST_HEAD(&optee->call_queue.waiters);
1414 	optee_supp_init(&optee->supp);
1415 	optee->smc.memremaped_shm = memremaped_shm;
1416 	optee->pool = pool;
1417 
1418 	platform_set_drvdata(pdev, optee);
1419 	ctx = teedev_open(optee->teedev);
1420 	if (IS_ERR(ctx)) {
1421 		rc = PTR_ERR(ctx);
1422 		goto err_supp_uninit;
1423 	}
1424 	optee->ctx = ctx;
1425 	rc = optee_notif_init(optee, max_notif_value);
1426 	if (rc)
1427 		goto err_close_ctx;
1428 
1429 	if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1430 		unsigned int irq;
1431 
1432 		rc = platform_get_irq(pdev, 0);
1433 		if (rc < 0) {
1434 			pr_err("platform_get_irq: ret %d\n", rc);
1435 			goto err_notif_uninit;
1436 		}
1437 		irq = rc;
1438 
1439 		rc = optee_smc_notif_init_irq(optee, irq);
1440 		if (rc) {
1441 			irq_dispose_mapping(irq);
1442 			goto err_notif_uninit;
1443 		}
1444 		enable_async_notif(optee->smc.invoke_fn);
1445 		pr_info("Asynchronous notifications enabled\n");
1446 	}
1447 
1448 	/*
1449 	 * Ensure that there are no pre-existing shm objects before enabling
1450 	 * the shm cache so that there's no chance of receiving an invalid
1451 	 * address during shutdown. This could occur, for example, if we're
1452 	 * kexec booting from an older kernel that did not properly cleanup the
1453 	 * shm cache.
1454 	 */
1455 	optee_disable_unmapped_shm_cache(optee);
1456 
1457 	optee_enable_shm_cache(optee);
1458 
1459 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1460 		pr_info("dynamic shared memory is enabled\n");
1461 
1462 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1463 	if (rc)
1464 		goto err_disable_shm_cache;
1465 
1466 	pr_info("initialized driver\n");
1467 	return 0;
1468 
1469 err_disable_shm_cache:
1470 	optee_disable_shm_cache(optee);
1471 	optee_smc_notif_uninit_irq(optee);
1472 	optee_unregister_devices();
1473 err_notif_uninit:
1474 	optee_notif_uninit(optee);
1475 err_close_ctx:
1476 	teedev_close_context(ctx);
1477 err_supp_uninit:
1478 	optee_supp_uninit(&optee->supp);
1479 	mutex_destroy(&optee->call_queue.mutex);
1480 err_unreg_supp_teedev:
1481 	tee_device_unregister(optee->supp_teedev);
1482 err_unreg_teedev:
1483 	tee_device_unregister(optee->teedev);
1484 err_free_optee:
1485 	kfree(optee);
1486 err_free_pool:
1487 	tee_shm_pool_free(pool);
1488 	if (memremaped_shm)
1489 		memunmap(memremaped_shm);
1490 	return rc;
1491 }
1492 
1493 static const struct of_device_id optee_dt_match[] = {
1494 	{ .compatible = "linaro,optee-tz" },
1495 	{},
1496 };
1497 MODULE_DEVICE_TABLE(of, optee_dt_match);
1498 
1499 static struct platform_driver optee_driver = {
1500 	.probe  = optee_probe,
1501 	.remove = optee_smc_remove,
1502 	.shutdown = optee_shutdown,
1503 	.driver = {
1504 		.name = "optee",
1505 		.of_match_table = optee_dt_match,
1506 	},
1507 };
1508 
1509 int optee_smc_abi_register(void)
1510 {
1511 	return platform_driver_register(&optee_driver);
1512 }
1513 
1514 void optee_smc_abi_unregister(void)
1515 {
1516 	platform_driver_unregister(&optee_driver);
1517 }
1518