xref: /linux/drivers/tee/tee_core.c (revision dcc7a571a3665a16581b5b18ca6b113f60a9a41a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016, Linaro Limited
4  */
5 
6 #define pr_fmt(fmt) "%s: " fmt, __func__
7 
8 #include <linux/cdev.h>
9 #include <linux/cred.h>
10 #include <linux/fs.h>
11 #include <linux/idr.h>
12 #include <linux/module.h>
13 #include <linux/overflow.h>
14 #include <linux/slab.h>
15 #include <linux/tee_core.h>
16 #include <linux/uaccess.h>
17 #include <crypto/hash.h>
18 #include <crypto/sha1.h>
19 #include "tee_private.h"
20 
21 #define TEE_NUM_DEVICES	32
22 
23 #define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
24 
25 #define TEE_UUID_NS_NAME_SIZE	128
26 
27 /*
28  * TEE Client UUID name space identifier (UUIDv4)
29  *
30  * Value here is random UUID that is allocated as name space identifier for
31  * forming Client UUID's for TEE environment using UUIDv5 scheme.
32  */
33 static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
34 						   0xa1, 0xb8, 0xec, 0x4b,
35 						   0xc0, 0x8e, 0x01, 0xb6);
36 
37 /*
38  * Unprivileged devices in the lower half range and privileged devices in
39  * the upper half range.
40  */
41 static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
42 static DEFINE_SPINLOCK(driver_lock);
43 
44 static const struct class tee_class;
45 static dev_t tee_devt;
46 
47 struct tee_context *teedev_open(struct tee_device *teedev)
48 {
49 	int rc;
50 	struct tee_context *ctx;
51 
52 	if (!tee_device_get(teedev))
53 		return ERR_PTR(-EINVAL);
54 
55 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
56 	if (!ctx) {
57 		rc = -ENOMEM;
58 		goto err;
59 	}
60 
61 	kref_init(&ctx->refcount);
62 	ctx->teedev = teedev;
63 	rc = teedev->desc->ops->open(ctx);
64 	if (rc)
65 		goto err;
66 
67 	return ctx;
68 err:
69 	kfree(ctx);
70 	tee_device_put(teedev);
71 	return ERR_PTR(rc);
72 
73 }
74 EXPORT_SYMBOL_GPL(teedev_open);
75 
76 void teedev_ctx_get(struct tee_context *ctx)
77 {
78 	if (ctx->releasing)
79 		return;
80 
81 	kref_get(&ctx->refcount);
82 }
83 EXPORT_SYMBOL_GPL(teedev_ctx_get);
84 
85 static void teedev_ctx_release(struct kref *ref)
86 {
87 	struct tee_context *ctx = container_of(ref, struct tee_context,
88 					       refcount);
89 	ctx->releasing = true;
90 	ctx->teedev->desc->ops->release(ctx);
91 	kfree(ctx);
92 }
93 
94 void teedev_ctx_put(struct tee_context *ctx)
95 {
96 	if (ctx->releasing)
97 		return;
98 
99 	kref_put(&ctx->refcount, teedev_ctx_release);
100 }
101 EXPORT_SYMBOL_GPL(teedev_ctx_put);
102 
103 void teedev_close_context(struct tee_context *ctx)
104 {
105 	struct tee_device *teedev = ctx->teedev;
106 
107 	if (teedev->desc->ops->close_context)
108 		teedev->desc->ops->close_context(ctx);
109 
110 	teedev_ctx_put(ctx);
111 	tee_device_put(teedev);
112 }
113 EXPORT_SYMBOL_GPL(teedev_close_context);
114 
115 static int tee_open(struct inode *inode, struct file *filp)
116 {
117 	struct tee_context *ctx;
118 
119 	ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
120 	if (IS_ERR(ctx))
121 		return PTR_ERR(ctx);
122 
123 	/*
124 	 * Default user-space behaviour is to wait for tee-supplicant
125 	 * if not present for any requests in this context.
126 	 */
127 	ctx->supp_nowait = false;
128 	filp->private_data = ctx;
129 	return 0;
130 }
131 
132 static int tee_release(struct inode *inode, struct file *filp)
133 {
134 	teedev_close_context(filp->private_data);
135 	return 0;
136 }
137 
138 /**
139  * uuid_v5() - Calculate UUIDv5
140  * @uuid: Resulting UUID
141  * @ns: Name space ID for UUIDv5 function
142  * @name: Name for UUIDv5 function
143  * @size: Size of name
144  *
145  * UUIDv5 is specific in RFC 4122.
146  *
147  * This implements section (for SHA-1):
148  * 4.3.  Algorithm for Creating a Name-Based UUID
149  */
150 static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
151 		   size_t size)
152 {
153 	unsigned char hash[SHA1_DIGEST_SIZE];
154 	struct crypto_shash *shash = NULL;
155 	struct shash_desc *desc = NULL;
156 	int rc;
157 
158 	shash = crypto_alloc_shash("sha1", 0, 0);
159 	if (IS_ERR(shash)) {
160 		rc = PTR_ERR(shash);
161 		pr_err("shash(sha1) allocation failed\n");
162 		return rc;
163 	}
164 
165 	desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
166 		       GFP_KERNEL);
167 	if (!desc) {
168 		rc = -ENOMEM;
169 		goto out_free_shash;
170 	}
171 
172 	desc->tfm = shash;
173 
174 	rc = crypto_shash_init(desc);
175 	if (rc < 0)
176 		goto out_free_desc;
177 
178 	rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
179 	if (rc < 0)
180 		goto out_free_desc;
181 
182 	rc = crypto_shash_update(desc, (const u8 *)name, size);
183 	if (rc < 0)
184 		goto out_free_desc;
185 
186 	rc = crypto_shash_final(desc, hash);
187 	if (rc < 0)
188 		goto out_free_desc;
189 
190 	memcpy(uuid->b, hash, UUID_SIZE);
191 
192 	/* Tag for version 5 */
193 	uuid->b[6] = (hash[6] & 0x0F) | 0x50;
194 	uuid->b[8] = (hash[8] & 0x3F) | 0x80;
195 
196 out_free_desc:
197 	kfree(desc);
198 
199 out_free_shash:
200 	crypto_free_shash(shash);
201 	return rc;
202 }
203 
204 int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
205 				 const u8 connection_data[TEE_IOCTL_UUID_LEN])
206 {
207 	gid_t ns_grp = (gid_t)-1;
208 	kgid_t grp = INVALID_GID;
209 	char *name = NULL;
210 	int name_len;
211 	int rc;
212 
213 	if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
214 	    connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
215 		/* Nil UUID to be passed to TEE environment */
216 		uuid_copy(uuid, &uuid_null);
217 		return 0;
218 	}
219 
220 	/*
221 	 * In Linux environment client UUID is based on UUIDv5.
222 	 *
223 	 * Determine client UUID with following semantics for 'name':
224 	 *
225 	 * For TEEC_LOGIN_USER:
226 	 * uid=<uid>
227 	 *
228 	 * For TEEC_LOGIN_GROUP:
229 	 * gid=<gid>
230 	 *
231 	 */
232 
233 	name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
234 	if (!name)
235 		return -ENOMEM;
236 
237 	switch (connection_method) {
238 	case TEE_IOCTL_LOGIN_USER:
239 		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
240 				    current_euid().val);
241 		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
242 			rc = -E2BIG;
243 			goto out_free_name;
244 		}
245 		break;
246 
247 	case TEE_IOCTL_LOGIN_GROUP:
248 		memcpy(&ns_grp, connection_data, sizeof(gid_t));
249 		grp = make_kgid(current_user_ns(), ns_grp);
250 		if (!gid_valid(grp) || !in_egroup_p(grp)) {
251 			rc = -EPERM;
252 			goto out_free_name;
253 		}
254 
255 		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
256 				    grp.val);
257 		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
258 			rc = -E2BIG;
259 			goto out_free_name;
260 		}
261 		break;
262 
263 	default:
264 		rc = -EINVAL;
265 		goto out_free_name;
266 	}
267 
268 	rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
269 out_free_name:
270 	kfree(name);
271 
272 	return rc;
273 }
274 EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
275 
276 static int tee_ioctl_version(struct tee_context *ctx,
277 			     struct tee_ioctl_version_data __user *uvers)
278 {
279 	struct tee_ioctl_version_data vers;
280 
281 	ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
282 
283 	if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
284 		vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
285 
286 	if (copy_to_user(uvers, &vers, sizeof(vers)))
287 		return -EFAULT;
288 
289 	return 0;
290 }
291 
292 static int tee_ioctl_shm_alloc(struct tee_context *ctx,
293 			       struct tee_ioctl_shm_alloc_data __user *udata)
294 {
295 	long ret;
296 	struct tee_ioctl_shm_alloc_data data;
297 	struct tee_shm *shm;
298 
299 	if (copy_from_user(&data, udata, sizeof(data)))
300 		return -EFAULT;
301 
302 	/* Currently no input flags are supported */
303 	if (data.flags)
304 		return -EINVAL;
305 
306 	shm = tee_shm_alloc_user_buf(ctx, data.size);
307 	if (IS_ERR(shm))
308 		return PTR_ERR(shm);
309 
310 	data.id = shm->id;
311 	data.size = shm->size;
312 
313 	if (copy_to_user(udata, &data, sizeof(data)))
314 		ret = -EFAULT;
315 	else
316 		ret = tee_shm_get_fd(shm);
317 
318 	/*
319 	 * When user space closes the file descriptor the shared memory
320 	 * should be freed or if tee_shm_get_fd() failed then it will
321 	 * be freed immediately.
322 	 */
323 	tee_shm_put(shm);
324 	return ret;
325 }
326 
327 static int
328 tee_ioctl_shm_register(struct tee_context *ctx,
329 		       struct tee_ioctl_shm_register_data __user *udata)
330 {
331 	long ret;
332 	struct tee_ioctl_shm_register_data data;
333 	struct tee_shm *shm;
334 
335 	if (copy_from_user(&data, udata, sizeof(data)))
336 		return -EFAULT;
337 
338 	/* Currently no input flags are supported */
339 	if (data.flags)
340 		return -EINVAL;
341 
342 	shm = tee_shm_register_user_buf(ctx, data.addr, data.length);
343 	if (IS_ERR(shm))
344 		return PTR_ERR(shm);
345 
346 	data.id = shm->id;
347 	data.length = shm->size;
348 
349 	if (copy_to_user(udata, &data, sizeof(data)))
350 		ret = -EFAULT;
351 	else
352 		ret = tee_shm_get_fd(shm);
353 	/*
354 	 * When user space closes the file descriptor the shared memory
355 	 * should be freed or if tee_shm_get_fd() failed then it will
356 	 * be freed immediately.
357 	 */
358 	tee_shm_put(shm);
359 	return ret;
360 }
361 
362 static int
363 tee_ioctl_shm_register_fd(struct tee_context *ctx,
364 			  struct tee_ioctl_shm_register_fd_data __user *udata)
365 {
366 	struct tee_ioctl_shm_register_fd_data data;
367 	struct tee_shm *shm;
368 	long ret;
369 
370 	if (copy_from_user(&data, udata, sizeof(data)))
371 		return -EFAULT;
372 
373 	/* Currently no input flags are supported */
374 	if (data.flags)
375 		return -EINVAL;
376 
377 	shm = tee_shm_register_fd(ctx, data.fd);
378 	if (IS_ERR(shm))
379 		return -EINVAL;
380 
381 	data.id = shm->id;
382 	data.flags = shm->flags;
383 	data.size = shm->size;
384 
385 	if (copy_to_user(udata, &data, sizeof(data)))
386 		ret = -EFAULT;
387 	else
388 		ret = tee_shm_get_fd(shm);
389 
390 	/*
391 	 * When user space closes the file descriptor the shared memory
392 	 * should be freed or if tee_shm_get_fd() failed then it will
393 	 * be freed immediately.
394 	 */
395 	tee_shm_put(shm);
396 	return ret;
397 }
398 
399 static int param_from_user_memref(struct tee_context *ctx,
400 				  struct tee_param_memref *memref,
401 				  struct tee_ioctl_param *ip)
402 {
403 	struct tee_shm *shm;
404 	size_t offs = 0;
405 
406 	/*
407 	 * If a NULL pointer is passed to a TA in the TEE,
408 	 * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
409 	 * indicating a NULL memory reference.
410 	 */
411 	if (ip->c != TEE_MEMREF_NULL) {
412 		/*
413 		 * If we fail to get a pointer to a shared
414 		 * memory object (and increase the ref count)
415 		 * from an identifier we return an error. All
416 		 * pointers that has been added in params have
417 		 * an increased ref count. It's the callers
418 		 * responibility to do tee_shm_put() on all
419 		 * resolved pointers.
420 		 */
421 		shm = tee_shm_get_from_id(ctx, ip->c);
422 		if (IS_ERR(shm))
423 			return PTR_ERR(shm);
424 
425 		/*
426 		 * Ensure offset + size does not overflow
427 		 * offset and does not overflow the size of
428 		 * the referred shared memory object.
429 		 */
430 		if ((ip->a + ip->b) < ip->a ||
431 		    (ip->a + ip->b) > shm->size) {
432 			tee_shm_put(shm);
433 			return -EINVAL;
434 		}
435 
436 		if (shm->flags & TEE_SHM_DMA_BUF) {
437 			struct tee_shm_dmabuf_ref *ref;
438 
439 			ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
440 			if (ref->parent_shm) {
441 				/*
442 				 * The shm already has one reference to
443 				 * ref->parent_shm so we are clear of 0.
444 				 * We're getting another reference since
445 				 * this shm will be used in the parameter
446 				 * list instead of the shm we got with
447 				 * tee_shm_get_from_id() above.
448 				 */
449 				refcount_inc(&ref->parent_shm->refcount);
450 				tee_shm_put(shm);
451 				shm = ref->parent_shm;
452 				offs = ref->offset;
453 			}
454 		}
455 	} else if (ctx->cap_memref_null) {
456 		/* Pass NULL pointer to OP-TEE */
457 		shm = NULL;
458 	} else {
459 		return -EINVAL;
460 	}
461 
462 	memref->shm_offs = ip->a + offs;
463 	memref->size = ip->b;
464 	memref->shm = shm;
465 
466 	return 0;
467 }
468 
469 static int params_from_user(struct tee_context *ctx, struct tee_param *params,
470 			    size_t num_params,
471 			    struct tee_ioctl_param __user *uparams)
472 {
473 	size_t n;
474 
475 	for (n = 0; n < num_params; n++) {
476 		struct tee_ioctl_param ip;
477 		int rc;
478 
479 		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
480 			return -EFAULT;
481 
482 		/* All unused attribute bits has to be zero */
483 		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
484 			return -EINVAL;
485 
486 		params[n].attr = ip.attr;
487 		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
488 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
489 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
490 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
491 			break;
492 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
493 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
494 			params[n].u.value.a = ip.a;
495 			params[n].u.value.b = ip.b;
496 			params[n].u.value.c = ip.c;
497 			break;
498 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
499 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
500 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
501 			params[n].u.ubuf.uaddr = u64_to_user_ptr(ip.a);
502 			params[n].u.ubuf.size = ip.b;
503 
504 			if (!access_ok(params[n].u.ubuf.uaddr,
505 				       params[n].u.ubuf.size))
506 				return -EFAULT;
507 
508 			break;
509 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
510 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
511 			params[n].u.objref.id = ip.a;
512 			params[n].u.objref.flags = ip.b;
513 			break;
514 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
515 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
516 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
517 			rc = param_from_user_memref(ctx, &params[n].u.memref,
518 						    &ip);
519 			if (rc)
520 				return rc;
521 			break;
522 		default:
523 			/* Unknown attribute */
524 			return -EINVAL;
525 		}
526 	}
527 	return 0;
528 }
529 
530 static int params_to_user(struct tee_ioctl_param __user *uparams,
531 			  size_t num_params, struct tee_param *params)
532 {
533 	size_t n;
534 
535 	for (n = 0; n < num_params; n++) {
536 		struct tee_ioctl_param __user *up = uparams + n;
537 		struct tee_param *p = params + n;
538 
539 		switch (p->attr) {
540 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
541 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
542 			if (put_user(p->u.value.a, &up->a) ||
543 			    put_user(p->u.value.b, &up->b) ||
544 			    put_user(p->u.value.c, &up->c))
545 				return -EFAULT;
546 			break;
547 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
548 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
549 			if (put_user((u64)p->u.ubuf.size, &up->b))
550 				return -EFAULT;
551 			break;
552 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
553 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
554 			if (put_user(p->u.objref.id, &up->a) ||
555 			    put_user(p->u.objref.flags, &up->b))
556 				return -EFAULT;
557 			break;
558 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
559 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
560 			if (put_user((u64)p->u.memref.size, &up->b))
561 				return -EFAULT;
562 			break;
563 		default:
564 			break;
565 		}
566 	}
567 	return 0;
568 }
569 
570 static int tee_ioctl_open_session(struct tee_context *ctx,
571 				  struct tee_ioctl_buf_data __user *ubuf)
572 {
573 	int rc;
574 	size_t n;
575 	struct tee_ioctl_buf_data buf;
576 	struct tee_ioctl_open_session_arg __user *uarg;
577 	struct tee_ioctl_open_session_arg arg;
578 	struct tee_ioctl_param __user *uparams = NULL;
579 	struct tee_param *params = NULL;
580 	bool have_session = false;
581 
582 	if (!ctx->teedev->desc->ops->open_session)
583 		return -EINVAL;
584 
585 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
586 		return -EFAULT;
587 
588 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
589 	    buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
590 		return -EINVAL;
591 
592 	uarg = u64_to_user_ptr(buf.buf_ptr);
593 	if (copy_from_user(&arg, uarg, sizeof(arg)))
594 		return -EFAULT;
595 
596 	if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
597 		return -EINVAL;
598 
599 	if (arg.num_params) {
600 		params = kcalloc(arg.num_params, sizeof(struct tee_param),
601 				 GFP_KERNEL);
602 		if (!params)
603 			return -ENOMEM;
604 		uparams = uarg->params;
605 		rc = params_from_user(ctx, params, arg.num_params, uparams);
606 		if (rc)
607 			goto out;
608 	}
609 
610 	if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
611 	    arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
612 		pr_debug("login method not allowed for user-space client\n");
613 		rc = -EPERM;
614 		goto out;
615 	}
616 
617 	rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
618 	if (rc)
619 		goto out;
620 	have_session = true;
621 
622 	if (put_user(arg.session, &uarg->session) ||
623 	    put_user(arg.ret, &uarg->ret) ||
624 	    put_user(arg.ret_origin, &uarg->ret_origin)) {
625 		rc = -EFAULT;
626 		goto out;
627 	}
628 	rc = params_to_user(uparams, arg.num_params, params);
629 out:
630 	/*
631 	 * If we've succeeded to open the session but failed to communicate
632 	 * it back to user space, close the session again to avoid leakage.
633 	 */
634 	if (rc && have_session && ctx->teedev->desc->ops->close_session)
635 		ctx->teedev->desc->ops->close_session(ctx, arg.session);
636 
637 	if (params) {
638 		/* Decrease ref count for all valid shared memory pointers */
639 		for (n = 0; n < arg.num_params; n++)
640 			if (tee_param_is_memref(params + n) &&
641 			    params[n].u.memref.shm)
642 				tee_shm_put(params[n].u.memref.shm);
643 		kfree(params);
644 	}
645 
646 	return rc;
647 }
648 
649 static int tee_ioctl_invoke(struct tee_context *ctx,
650 			    struct tee_ioctl_buf_data __user *ubuf)
651 {
652 	int rc;
653 	size_t n;
654 	struct tee_ioctl_buf_data buf;
655 	struct tee_ioctl_invoke_arg __user *uarg;
656 	struct tee_ioctl_invoke_arg arg;
657 	struct tee_ioctl_param __user *uparams = NULL;
658 	struct tee_param *params = NULL;
659 
660 	if (!ctx->teedev->desc->ops->invoke_func)
661 		return -EINVAL;
662 
663 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
664 		return -EFAULT;
665 
666 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
667 	    buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
668 		return -EINVAL;
669 
670 	uarg = u64_to_user_ptr(buf.buf_ptr);
671 	if (copy_from_user(&arg, uarg, sizeof(arg)))
672 		return -EFAULT;
673 
674 	if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
675 		return -EINVAL;
676 
677 	if (arg.num_params) {
678 		params = kcalloc(arg.num_params, sizeof(struct tee_param),
679 				 GFP_KERNEL);
680 		if (!params)
681 			return -ENOMEM;
682 		uparams = uarg->params;
683 		rc = params_from_user(ctx, params, arg.num_params, uparams);
684 		if (rc)
685 			goto out;
686 	}
687 
688 	rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
689 	if (rc)
690 		goto out;
691 
692 	if (put_user(arg.ret, &uarg->ret) ||
693 	    put_user(arg.ret_origin, &uarg->ret_origin)) {
694 		rc = -EFAULT;
695 		goto out;
696 	}
697 	rc = params_to_user(uparams, arg.num_params, params);
698 out:
699 	if (params) {
700 		/* Decrease ref count for all valid shared memory pointers */
701 		for (n = 0; n < arg.num_params; n++)
702 			if (tee_param_is_memref(params + n) &&
703 			    params[n].u.memref.shm)
704 				tee_shm_put(params[n].u.memref.shm);
705 		kfree(params);
706 	}
707 	return rc;
708 }
709 
710 static int tee_ioctl_object_invoke(struct tee_context *ctx,
711 				   struct tee_ioctl_buf_data __user *ubuf)
712 {
713 	int rc;
714 	size_t n;
715 	struct tee_ioctl_buf_data buf;
716 	struct tee_ioctl_object_invoke_arg __user *uarg;
717 	struct tee_ioctl_object_invoke_arg arg;
718 	struct tee_ioctl_param __user *uparams = NULL;
719 	struct tee_param *params = NULL;
720 
721 	if (!ctx->teedev->desc->ops->object_invoke_func)
722 		return -EINVAL;
723 
724 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
725 		return -EFAULT;
726 
727 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
728 	    buf.buf_len < sizeof(struct tee_ioctl_object_invoke_arg))
729 		return -EINVAL;
730 
731 	uarg = u64_to_user_ptr(buf.buf_ptr);
732 	if (copy_from_user(&arg, uarg, sizeof(arg)))
733 		return -EFAULT;
734 
735 	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
736 		return -EINVAL;
737 
738 	if (arg.num_params) {
739 		params = kcalloc(arg.num_params, sizeof(struct tee_param),
740 				 GFP_KERNEL);
741 		if (!params)
742 			return -ENOMEM;
743 		uparams = uarg->params;
744 		rc = params_from_user(ctx, params, arg.num_params, uparams);
745 		if (rc)
746 			goto out;
747 	}
748 
749 	rc = ctx->teedev->desc->ops->object_invoke_func(ctx, &arg, params);
750 	if (rc)
751 		goto out;
752 
753 	if (put_user(arg.ret, &uarg->ret)) {
754 		rc = -EFAULT;
755 		goto out;
756 	}
757 	rc = params_to_user(uparams, arg.num_params, params);
758 out:
759 	if (params) {
760 		/* Decrease ref count for all valid shared memory pointers */
761 		for (n = 0; n < arg.num_params; n++)
762 			if (tee_param_is_memref(params + n) &&
763 			    params[n].u.memref.shm)
764 				tee_shm_put(params[n].u.memref.shm);
765 		kfree(params);
766 	}
767 	return rc;
768 }
769 
770 static int tee_ioctl_cancel(struct tee_context *ctx,
771 			    struct tee_ioctl_cancel_arg __user *uarg)
772 {
773 	struct tee_ioctl_cancel_arg arg;
774 
775 	if (!ctx->teedev->desc->ops->cancel_req)
776 		return -EINVAL;
777 
778 	if (copy_from_user(&arg, uarg, sizeof(arg)))
779 		return -EFAULT;
780 
781 	return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
782 						  arg.session);
783 }
784 
785 static int
786 tee_ioctl_close_session(struct tee_context *ctx,
787 			struct tee_ioctl_close_session_arg __user *uarg)
788 {
789 	struct tee_ioctl_close_session_arg arg;
790 
791 	if (!ctx->teedev->desc->ops->close_session)
792 		return -EINVAL;
793 
794 	if (copy_from_user(&arg, uarg, sizeof(arg)))
795 		return -EFAULT;
796 
797 	return ctx->teedev->desc->ops->close_session(ctx, arg.session);
798 }
799 
800 static int params_to_supp(struct tee_context *ctx,
801 			  struct tee_ioctl_param __user *uparams,
802 			  size_t num_params, struct tee_param *params)
803 {
804 	size_t n;
805 
806 	for (n = 0; n < num_params; n++) {
807 		struct tee_ioctl_param ip;
808 		struct tee_param *p = params + n;
809 
810 		ip.attr = p->attr;
811 		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
812 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
813 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
814 			ip.a = p->u.value.a;
815 			ip.b = p->u.value.b;
816 			ip.c = p->u.value.c;
817 			break;
818 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
819 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
820 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
821 			ip.a = (__force unsigned long)p->u.ubuf.uaddr;
822 			ip.b = p->u.ubuf.size;
823 			ip.c = 0;
824 			break;
825 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
826 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
827 			ip.a = p->u.objref.id;
828 			ip.b = p->u.objref.flags;
829 			ip.c = 0;
830 			break;
831 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
832 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
833 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
834 			ip.b = p->u.memref.size;
835 			if (!p->u.memref.shm) {
836 				ip.a = 0;
837 				ip.c = (u64)-1; /* invalid shm id */
838 				break;
839 			}
840 			ip.a = p->u.memref.shm_offs;
841 			ip.c = p->u.memref.shm->id;
842 			break;
843 		default:
844 			ip.a = 0;
845 			ip.b = 0;
846 			ip.c = 0;
847 			break;
848 		}
849 
850 		if (copy_to_user(uparams + n, &ip, sizeof(ip)))
851 			return -EFAULT;
852 	}
853 
854 	return 0;
855 }
856 
857 static int tee_ioctl_supp_recv(struct tee_context *ctx,
858 			       struct tee_ioctl_buf_data __user *ubuf)
859 {
860 	int rc;
861 	struct tee_ioctl_buf_data buf;
862 	struct tee_iocl_supp_recv_arg __user *uarg;
863 	struct tee_param *params;
864 	u32 num_params;
865 	u32 func;
866 
867 	if (!ctx->teedev->desc->ops->supp_recv)
868 		return -EINVAL;
869 
870 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
871 		return -EFAULT;
872 
873 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
874 	    buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
875 		return -EINVAL;
876 
877 	uarg = u64_to_user_ptr(buf.buf_ptr);
878 	if (get_user(num_params, &uarg->num_params))
879 		return -EFAULT;
880 
881 	if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len)
882 		return -EINVAL;
883 
884 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
885 	if (!params)
886 		return -ENOMEM;
887 
888 	rc = params_from_user(ctx, params, num_params, uarg->params);
889 	if (rc)
890 		goto out;
891 
892 	rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
893 	if (rc)
894 		goto out;
895 
896 	if (put_user(func, &uarg->func) ||
897 	    put_user(num_params, &uarg->num_params)) {
898 		rc = -EFAULT;
899 		goto out;
900 	}
901 
902 	rc = params_to_supp(ctx, uarg->params, num_params, params);
903 out:
904 	kfree(params);
905 	return rc;
906 }
907 
908 static int params_from_supp(struct tee_param *params, size_t num_params,
909 			    struct tee_ioctl_param __user *uparams)
910 {
911 	size_t n;
912 
913 	for (n = 0; n < num_params; n++) {
914 		struct tee_param *p = params + n;
915 		struct tee_ioctl_param ip;
916 
917 		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
918 			return -EFAULT;
919 
920 		/* All unused attribute bits has to be zero */
921 		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
922 			return -EINVAL;
923 
924 		p->attr = ip.attr;
925 		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
926 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
927 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
928 			/* Only out and in/out values can be updated */
929 			p->u.value.a = ip.a;
930 			p->u.value.b = ip.b;
931 			p->u.value.c = ip.c;
932 			break;
933 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
934 		case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
935 			p->u.ubuf.uaddr = u64_to_user_ptr(ip.a);
936 			p->u.ubuf.size = ip.b;
937 
938 			if (!access_ok(params[n].u.ubuf.uaddr,
939 				       params[n].u.ubuf.size))
940 				return -EFAULT;
941 
942 			break;
943 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
944 		case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
945 			p->u.objref.id = ip.a;
946 			p->u.objref.flags = ip.b;
947 			break;
948 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
949 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
950 			/*
951 			 * Only the size of the memref can be updated.
952 			 * Since we don't have access to the original
953 			 * parameters here, only store the supplied size.
954 			 * The driver will copy the updated size into the
955 			 * original parameters.
956 			 */
957 			p->u.memref.shm = NULL;
958 			p->u.memref.shm_offs = 0;
959 			p->u.memref.size = ip.b;
960 			break;
961 		default:
962 			memset(&p->u, 0, sizeof(p->u));
963 			break;
964 		}
965 	}
966 	return 0;
967 }
968 
969 static int tee_ioctl_supp_send(struct tee_context *ctx,
970 			       struct tee_ioctl_buf_data __user *ubuf)
971 {
972 	long rc;
973 	struct tee_ioctl_buf_data buf;
974 	struct tee_iocl_supp_send_arg __user *uarg;
975 	struct tee_param *params;
976 	u32 num_params;
977 	u32 ret;
978 
979 	/* Not valid for this driver */
980 	if (!ctx->teedev->desc->ops->supp_send)
981 		return -EINVAL;
982 
983 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
984 		return -EFAULT;
985 
986 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
987 	    buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
988 		return -EINVAL;
989 
990 	uarg = u64_to_user_ptr(buf.buf_ptr);
991 	if (get_user(ret, &uarg->ret) ||
992 	    get_user(num_params, &uarg->num_params))
993 		return -EFAULT;
994 
995 	if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len)
996 		return -EINVAL;
997 
998 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
999 	if (!params)
1000 		return -ENOMEM;
1001 
1002 	rc = params_from_supp(params, num_params, uarg->params);
1003 	if (rc)
1004 		goto out;
1005 
1006 	rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
1007 out:
1008 	kfree(params);
1009 	return rc;
1010 }
1011 
1012 static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1013 {
1014 	struct tee_context *ctx = filp->private_data;
1015 	void __user *uarg = (void __user *)arg;
1016 
1017 	switch (cmd) {
1018 	case TEE_IOC_VERSION:
1019 		return tee_ioctl_version(ctx, uarg);
1020 	case TEE_IOC_SHM_ALLOC:
1021 		return tee_ioctl_shm_alloc(ctx, uarg);
1022 	case TEE_IOC_SHM_REGISTER:
1023 		return tee_ioctl_shm_register(ctx, uarg);
1024 	case TEE_IOC_SHM_REGISTER_FD:
1025 		return tee_ioctl_shm_register_fd(ctx, uarg);
1026 	case TEE_IOC_OPEN_SESSION:
1027 		return tee_ioctl_open_session(ctx, uarg);
1028 	case TEE_IOC_INVOKE:
1029 		return tee_ioctl_invoke(ctx, uarg);
1030 	case TEE_IOC_OBJECT_INVOKE:
1031 		return tee_ioctl_object_invoke(ctx, uarg);
1032 	case TEE_IOC_CANCEL:
1033 		return tee_ioctl_cancel(ctx, uarg);
1034 	case TEE_IOC_CLOSE_SESSION:
1035 		return tee_ioctl_close_session(ctx, uarg);
1036 	case TEE_IOC_SUPPL_RECV:
1037 		return tee_ioctl_supp_recv(ctx, uarg);
1038 	case TEE_IOC_SUPPL_SEND:
1039 		return tee_ioctl_supp_send(ctx, uarg);
1040 	default:
1041 		return -EINVAL;
1042 	}
1043 }
1044 
1045 static const struct file_operations tee_fops = {
1046 	.owner = THIS_MODULE,
1047 	.open = tee_open,
1048 	.release = tee_release,
1049 	.unlocked_ioctl = tee_ioctl,
1050 	.compat_ioctl = compat_ptr_ioctl,
1051 };
1052 
1053 static void tee_release_device(struct device *dev)
1054 {
1055 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1056 
1057 	spin_lock(&driver_lock);
1058 	clear_bit(teedev->id, dev_mask);
1059 	spin_unlock(&driver_lock);
1060 	mutex_destroy(&teedev->mutex);
1061 	idr_destroy(&teedev->idr);
1062 	kfree(teedev);
1063 }
1064 
1065 /**
1066  * tee_device_alloc() - Allocate a new struct tee_device instance
1067  * @teedesc:	Descriptor for this driver
1068  * @dev:	Parent device for this device
1069  * @pool:	Shared memory pool, NULL if not used
1070  * @driver_data: Private driver data for this device
1071  *
1072  * Allocates a new struct tee_device instance. The device is
1073  * removed by tee_device_unregister().
1074  *
1075  * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
1076  */
1077 struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
1078 				    struct device *dev,
1079 				    struct tee_shm_pool *pool,
1080 				    void *driver_data)
1081 {
1082 	struct tee_device *teedev;
1083 	void *ret;
1084 	int rc, max_id;
1085 	int offs = 0;
1086 
1087 	if (!teedesc || !teedesc->name || !teedesc->ops ||
1088 	    !teedesc->ops->get_version || !teedesc->ops->open ||
1089 	    !teedesc->ops->release)
1090 		return ERR_PTR(-EINVAL);
1091 
1092 	teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
1093 	if (!teedev) {
1094 		ret = ERR_PTR(-ENOMEM);
1095 		goto err;
1096 	}
1097 
1098 	max_id = TEE_NUM_DEVICES / 2;
1099 
1100 	if (teedesc->flags & TEE_DESC_PRIVILEGED) {
1101 		offs = TEE_NUM_DEVICES / 2;
1102 		max_id = TEE_NUM_DEVICES;
1103 	}
1104 
1105 	spin_lock(&driver_lock);
1106 	teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
1107 	if (teedev->id < max_id)
1108 		set_bit(teedev->id, dev_mask);
1109 	spin_unlock(&driver_lock);
1110 
1111 	if (teedev->id >= max_id) {
1112 		ret = ERR_PTR(-ENOMEM);
1113 		goto err;
1114 	}
1115 
1116 	snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
1117 		 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
1118 		 teedev->id - offs);
1119 
1120 	teedev->dev.class = &tee_class;
1121 	teedev->dev.release = tee_release_device;
1122 	teedev->dev.parent = dev;
1123 
1124 	teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
1125 
1126 	rc = dev_set_name(&teedev->dev, "%s", teedev->name);
1127 	if (rc) {
1128 		ret = ERR_PTR(rc);
1129 		goto err_devt;
1130 	}
1131 
1132 	cdev_init(&teedev->cdev, &tee_fops);
1133 	teedev->cdev.owner = teedesc->owner;
1134 
1135 	dev_set_drvdata(&teedev->dev, driver_data);
1136 	device_initialize(&teedev->dev);
1137 
1138 	/* 1 as tee_device_unregister() does one final tee_device_put() */
1139 	teedev->num_users = 1;
1140 	init_completion(&teedev->c_no_users);
1141 	mutex_init(&teedev->mutex);
1142 	idr_init(&teedev->idr);
1143 
1144 	teedev->desc = teedesc;
1145 	teedev->pool = pool;
1146 
1147 	return teedev;
1148 err_devt:
1149 	unregister_chrdev_region(teedev->dev.devt, 1);
1150 err:
1151 	pr_err("could not register %s driver\n",
1152 	       teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
1153 	if (teedev && teedev->id < TEE_NUM_DEVICES) {
1154 		spin_lock(&driver_lock);
1155 		clear_bit(teedev->id, dev_mask);
1156 		spin_unlock(&driver_lock);
1157 	}
1158 	kfree(teedev);
1159 	return ret;
1160 }
1161 EXPORT_SYMBOL_GPL(tee_device_alloc);
1162 
1163 void tee_device_set_dev_groups(struct tee_device *teedev,
1164 			       const struct attribute_group **dev_groups)
1165 {
1166 	teedev->dev.groups = dev_groups;
1167 }
1168 EXPORT_SYMBOL_GPL(tee_device_set_dev_groups);
1169 
1170 static ssize_t implementation_id_show(struct device *dev,
1171 				      struct device_attribute *attr, char *buf)
1172 {
1173 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1174 	struct tee_ioctl_version_data vers;
1175 
1176 	teedev->desc->ops->get_version(teedev, &vers);
1177 	return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
1178 }
1179 static DEVICE_ATTR_RO(implementation_id);
1180 
1181 static struct attribute *tee_dev_attrs[] = {
1182 	&dev_attr_implementation_id.attr,
1183 	NULL
1184 };
1185 
1186 ATTRIBUTE_GROUPS(tee_dev);
1187 
1188 static const struct class tee_class = {
1189 	.name = "tee",
1190 	.dev_groups = tee_dev_groups,
1191 };
1192 
1193 /**
1194  * tee_device_register() - Registers a TEE device
1195  * @teedev:	Device to register
1196  *
1197  * tee_device_unregister() need to be called to remove the @teedev if
1198  * this function fails.
1199  *
1200  * @returns < 0 on failure
1201  */
1202 int tee_device_register(struct tee_device *teedev)
1203 {
1204 	int rc;
1205 
1206 	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
1207 		dev_err(&teedev->dev, "attempt to register twice\n");
1208 		return -EINVAL;
1209 	}
1210 
1211 	rc = cdev_device_add(&teedev->cdev, &teedev->dev);
1212 	if (rc) {
1213 		dev_err(&teedev->dev,
1214 			"unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
1215 			teedev->name, MAJOR(teedev->dev.devt),
1216 			MINOR(teedev->dev.devt), rc);
1217 		return rc;
1218 	}
1219 
1220 	teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
1221 	return 0;
1222 }
1223 EXPORT_SYMBOL_GPL(tee_device_register);
1224 
1225 void tee_device_put(struct tee_device *teedev)
1226 {
1227 	mutex_lock(&teedev->mutex);
1228 	/* Shouldn't put in this state */
1229 	if (!WARN_ON(!teedev->desc)) {
1230 		teedev->num_users--;
1231 		if (!teedev->num_users) {
1232 			teedev->desc = NULL;
1233 			complete(&teedev->c_no_users);
1234 		}
1235 	}
1236 	mutex_unlock(&teedev->mutex);
1237 }
1238 EXPORT_SYMBOL_GPL(tee_device_put);
1239 
1240 bool tee_device_get(struct tee_device *teedev)
1241 {
1242 	mutex_lock(&teedev->mutex);
1243 	if (!teedev->desc) {
1244 		mutex_unlock(&teedev->mutex);
1245 		return false;
1246 	}
1247 	teedev->num_users++;
1248 	mutex_unlock(&teedev->mutex);
1249 	return true;
1250 }
1251 EXPORT_SYMBOL_GPL(tee_device_get);
1252 
1253 /**
1254  * tee_device_unregister() - Removes a TEE device
1255  * @teedev:	Device to unregister
1256  *
1257  * This function should be called to remove the @teedev even if
1258  * tee_device_register() hasn't been called yet. Does nothing if
1259  * @teedev is NULL.
1260  */
1261 void tee_device_unregister(struct tee_device *teedev)
1262 {
1263 	if (!teedev)
1264 		return;
1265 
1266 	tee_device_put_all_dma_heaps(teedev);
1267 
1268 	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
1269 		cdev_device_del(&teedev->cdev, &teedev->dev);
1270 
1271 	tee_device_put(teedev);
1272 	wait_for_completion(&teedev->c_no_users);
1273 
1274 	/*
1275 	 * No need to take a mutex any longer now since teedev->desc was
1276 	 * set to NULL before teedev->c_no_users was completed.
1277 	 */
1278 
1279 	teedev->pool = NULL;
1280 
1281 	put_device(&teedev->dev);
1282 }
1283 EXPORT_SYMBOL_GPL(tee_device_unregister);
1284 
1285 /**
1286  * tee_get_drvdata() - Return driver_data pointer
1287  * @teedev:	Device containing the driver_data pointer
1288  * @returns the driver_data pointer supplied to tee_device_alloc().
1289  */
1290 void *tee_get_drvdata(struct tee_device *teedev)
1291 {
1292 	return dev_get_drvdata(&teedev->dev);
1293 }
1294 EXPORT_SYMBOL_GPL(tee_get_drvdata);
1295 
1296 struct match_dev_data {
1297 	struct tee_ioctl_version_data *vers;
1298 	const void *data;
1299 	int (*match)(struct tee_ioctl_version_data *, const void *);
1300 };
1301 
1302 static int match_dev(struct device *dev, const void *data)
1303 {
1304 	const struct match_dev_data *match_data = data;
1305 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1306 
1307 	teedev->desc->ops->get_version(teedev, match_data->vers);
1308 	return match_data->match(match_data->vers, match_data->data);
1309 }
1310 
1311 struct tee_context *
1312 tee_client_open_context(struct tee_context *start,
1313 			int (*match)(struct tee_ioctl_version_data *,
1314 				     const void *),
1315 			const void *data, struct tee_ioctl_version_data *vers)
1316 {
1317 	struct device *dev = NULL;
1318 	struct device *put_dev = NULL;
1319 	struct tee_context *ctx = NULL;
1320 	struct tee_ioctl_version_data v;
1321 	struct match_dev_data match_data = { vers ? vers : &v, data, match };
1322 
1323 	if (start)
1324 		dev = &start->teedev->dev;
1325 
1326 	do {
1327 		dev = class_find_device(&tee_class, dev, &match_data, match_dev);
1328 		if (!dev) {
1329 			ctx = ERR_PTR(-ENOENT);
1330 			break;
1331 		}
1332 
1333 		put_device(put_dev);
1334 		put_dev = dev;
1335 
1336 		ctx = teedev_open(container_of(dev, struct tee_device, dev));
1337 	} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
1338 
1339 	put_device(put_dev);
1340 	/*
1341 	 * Default behaviour for in kernel client is to not wait for
1342 	 * tee-supplicant if not present for any requests in this context.
1343 	 * Also this flag could be configured again before call to
1344 	 * tee_client_open_session() if any in kernel client requires
1345 	 * different behaviour.
1346 	 */
1347 	if (!IS_ERR(ctx))
1348 		ctx->supp_nowait = true;
1349 
1350 	return ctx;
1351 }
1352 EXPORT_SYMBOL_GPL(tee_client_open_context);
1353 
1354 void tee_client_close_context(struct tee_context *ctx)
1355 {
1356 	teedev_close_context(ctx);
1357 }
1358 EXPORT_SYMBOL_GPL(tee_client_close_context);
1359 
1360 void tee_client_get_version(struct tee_context *ctx,
1361 			    struct tee_ioctl_version_data *vers)
1362 {
1363 	ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
1364 }
1365 EXPORT_SYMBOL_GPL(tee_client_get_version);
1366 
1367 int tee_client_open_session(struct tee_context *ctx,
1368 			    struct tee_ioctl_open_session_arg *arg,
1369 			    struct tee_param *param)
1370 {
1371 	if (!ctx->teedev->desc->ops->open_session)
1372 		return -EINVAL;
1373 	return ctx->teedev->desc->ops->open_session(ctx, arg, param);
1374 }
1375 EXPORT_SYMBOL_GPL(tee_client_open_session);
1376 
1377 int tee_client_close_session(struct tee_context *ctx, u32 session)
1378 {
1379 	if (!ctx->teedev->desc->ops->close_session)
1380 		return -EINVAL;
1381 	return ctx->teedev->desc->ops->close_session(ctx, session);
1382 }
1383 EXPORT_SYMBOL_GPL(tee_client_close_session);
1384 
1385 int tee_client_system_session(struct tee_context *ctx, u32 session)
1386 {
1387 	if (!ctx->teedev->desc->ops->system_session)
1388 		return -EINVAL;
1389 	return ctx->teedev->desc->ops->system_session(ctx, session);
1390 }
1391 EXPORT_SYMBOL_GPL(tee_client_system_session);
1392 
1393 int tee_client_invoke_func(struct tee_context *ctx,
1394 			   struct tee_ioctl_invoke_arg *arg,
1395 			   struct tee_param *param)
1396 {
1397 	if (!ctx->teedev->desc->ops->invoke_func)
1398 		return -EINVAL;
1399 	return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
1400 }
1401 EXPORT_SYMBOL_GPL(tee_client_invoke_func);
1402 
1403 int tee_client_cancel_req(struct tee_context *ctx,
1404 			  struct tee_ioctl_cancel_arg *arg)
1405 {
1406 	if (!ctx->teedev->desc->ops->cancel_req)
1407 		return -EINVAL;
1408 	return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
1409 						  arg->session);
1410 }
1411 
1412 static int tee_client_device_match(struct device *dev,
1413 				   const struct device_driver *drv)
1414 {
1415 	const struct tee_client_device_id *id_table;
1416 	struct tee_client_device *tee_device;
1417 
1418 	id_table = to_tee_client_driver(drv)->id_table;
1419 	tee_device = to_tee_client_device(dev);
1420 
1421 	while (!uuid_is_null(&id_table->uuid)) {
1422 		if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
1423 			return 1;
1424 		id_table++;
1425 	}
1426 
1427 	return 0;
1428 }
1429 
1430 static int tee_client_device_uevent(const struct device *dev,
1431 				    struct kobj_uevent_env *env)
1432 {
1433 	uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
1434 
1435 	return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
1436 }
1437 
1438 const struct bus_type tee_bus_type = {
1439 	.name		= "tee",
1440 	.match		= tee_client_device_match,
1441 	.uevent		= tee_client_device_uevent,
1442 };
1443 EXPORT_SYMBOL_GPL(tee_bus_type);
1444 
1445 static int __init tee_init(void)
1446 {
1447 	int rc;
1448 
1449 	rc = class_register(&tee_class);
1450 	if (rc) {
1451 		pr_err("couldn't create class\n");
1452 		return rc;
1453 	}
1454 
1455 	rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
1456 	if (rc) {
1457 		pr_err("failed to allocate char dev region\n");
1458 		goto out_unreg_class;
1459 	}
1460 
1461 	rc = bus_register(&tee_bus_type);
1462 	if (rc) {
1463 		pr_err("failed to register tee bus\n");
1464 		goto out_unreg_chrdev;
1465 	}
1466 
1467 	return 0;
1468 
1469 out_unreg_chrdev:
1470 	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1471 out_unreg_class:
1472 	class_unregister(&tee_class);
1473 
1474 	return rc;
1475 }
1476 
1477 static void __exit tee_exit(void)
1478 {
1479 	bus_unregister(&tee_bus_type);
1480 	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1481 	class_unregister(&tee_class);
1482 }
1483 
1484 subsys_initcall(tee_init);
1485 module_exit(tee_exit);
1486 
1487 MODULE_AUTHOR("Linaro");
1488 MODULE_DESCRIPTION("TEE Driver");
1489 MODULE_VERSION("1.0");
1490 MODULE_LICENSE("GPL v2");
1491 MODULE_IMPORT_NS("DMA_BUF");
1492 MODULE_IMPORT_NS("DMA_BUF_HEAP");
1493