1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2016, Linaro Limited
4 */
5
6 #define pr_fmt(fmt) "%s: " fmt, __func__
7
8 #include <linux/cdev.h>
9 #include <linux/cred.h>
10 #include <linux/fs.h>
11 #include <linux/idr.h>
12 #include <linux/module.h>
13 #include <linux/overflow.h>
14 #include <linux/slab.h>
15 #include <linux/tee_core.h>
16 #include <linux/uaccess.h>
17 #include <crypto/sha1.h>
18 #include "tee_private.h"
19
20 #define TEE_NUM_DEVICES 32
21
22 #define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
23
24 #define TEE_UUID_NS_NAME_SIZE 128
25
26 /*
27 * TEE Client UUID name space identifier (UUIDv4)
28 *
29 * Value here is random UUID that is allocated as name space identifier for
30 * forming Client UUID's for TEE environment using UUIDv5 scheme.
31 */
32 static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
33 0xa1, 0xb8, 0xec, 0x4b,
34 0xc0, 0x8e, 0x01, 0xb6);
35
36 /*
37 * Unprivileged devices in the lower half range and privileged devices in
38 * the upper half range.
39 */
40 static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
41 static DEFINE_SPINLOCK(driver_lock);
42
43 static const struct class tee_class;
44 static dev_t tee_devt;
45
teedev_open(struct tee_device * teedev)46 struct tee_context *teedev_open(struct tee_device *teedev)
47 {
48 int rc;
49 struct tee_context *ctx;
50
51 if (!tee_device_get(teedev))
52 return ERR_PTR(-EINVAL);
53
54 ctx = kzalloc_obj(*ctx);
55 if (!ctx) {
56 rc = -ENOMEM;
57 goto err;
58 }
59
60 kref_init(&ctx->refcount);
61 ctx->teedev = teedev;
62 rc = teedev->desc->ops->open(ctx);
63 if (rc)
64 goto err;
65
66 return ctx;
67 err:
68 kfree(ctx);
69 tee_device_put(teedev);
70 return ERR_PTR(rc);
71
72 }
73 EXPORT_SYMBOL_GPL(teedev_open);
74
teedev_ctx_get(struct tee_context * ctx)75 void teedev_ctx_get(struct tee_context *ctx)
76 {
77 if (ctx->releasing)
78 return;
79
80 kref_get(&ctx->refcount);
81 }
82 EXPORT_SYMBOL_GPL(teedev_ctx_get);
83
teedev_ctx_release(struct kref * ref)84 static void teedev_ctx_release(struct kref *ref)
85 {
86 struct tee_context *ctx = container_of(ref, struct tee_context,
87 refcount);
88 ctx->releasing = true;
89 ctx->teedev->desc->ops->release(ctx);
90 kfree(ctx);
91 }
92
teedev_ctx_put(struct tee_context * ctx)93 void teedev_ctx_put(struct tee_context *ctx)
94 {
95 if (ctx->releasing)
96 return;
97
98 kref_put(&ctx->refcount, teedev_ctx_release);
99 }
100 EXPORT_SYMBOL_GPL(teedev_ctx_put);
101
teedev_close_context(struct tee_context * ctx)102 void teedev_close_context(struct tee_context *ctx)
103 {
104 struct tee_device *teedev = ctx->teedev;
105
106 if (teedev->desc->ops->close_context)
107 teedev->desc->ops->close_context(ctx);
108
109 teedev_ctx_put(ctx);
110 tee_device_put(teedev);
111 }
112 EXPORT_SYMBOL_GPL(teedev_close_context);
113
tee_open(struct inode * inode,struct file * filp)114 static int tee_open(struct inode *inode, struct file *filp)
115 {
116 struct tee_context *ctx;
117
118 ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
119 if (IS_ERR(ctx))
120 return PTR_ERR(ctx);
121
122 /*
123 * Default user-space behaviour is to wait for tee-supplicant
124 * if not present for any requests in this context.
125 */
126 ctx->supp_nowait = false;
127 filp->private_data = ctx;
128 return 0;
129 }
130
tee_release(struct inode * inode,struct file * filp)131 static int tee_release(struct inode *inode, struct file *filp)
132 {
133 teedev_close_context(filp->private_data);
134 return 0;
135 }
136
137 /**
138 * uuid_v5() - Calculate UUIDv5
139 * @uuid: Resulting UUID
140 * @ns: Name space ID for UUIDv5 function
141 * @name: Name for UUIDv5 function
142 * @size: Size of name
143 *
144 * UUIDv5 is specific in RFC 4122.
145 *
146 * This implements section (for SHA-1):
147 * 4.3. Algorithm for Creating a Name-Based UUID
148 */
uuid_v5(uuid_t * uuid,const uuid_t * ns,const void * name,size_t size)149 static void uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
150 size_t size)
151 {
152 unsigned char hash[SHA1_DIGEST_SIZE];
153 struct sha1_ctx ctx;
154
155 sha1_init(&ctx);
156 sha1_update(&ctx, (const u8 *)ns, sizeof(*ns));
157 sha1_update(&ctx, (const u8 *)name, size);
158 sha1_final(&ctx, hash);
159
160 memcpy(uuid->b, hash, UUID_SIZE);
161
162 /* Tag for version 5 */
163 uuid->b[6] = (hash[6] & 0x0F) | 0x50;
164 uuid->b[8] = (hash[8] & 0x3F) | 0x80;
165 }
166
tee_session_calc_client_uuid(uuid_t * uuid,u32 connection_method,const u8 connection_data[TEE_IOCTL_UUID_LEN])167 int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
168 const u8 connection_data[TEE_IOCTL_UUID_LEN])
169 {
170 gid_t ns_grp = (gid_t)-1;
171 kgid_t grp = INVALID_GID;
172 char *name = NULL;
173 int name_len;
174 int rc = 0;
175
176 if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
177 connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
178 /* Nil UUID to be passed to TEE environment */
179 uuid_copy(uuid, &uuid_null);
180 return 0;
181 }
182
183 /*
184 * In Linux environment client UUID is based on UUIDv5.
185 *
186 * Determine client UUID with following semantics for 'name':
187 *
188 * For TEEC_LOGIN_USER:
189 * uid=<uid>
190 *
191 * For TEEC_LOGIN_GROUP:
192 * gid=<gid>
193 *
194 */
195
196 name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
197 if (!name)
198 return -ENOMEM;
199
200 switch (connection_method) {
201 case TEE_IOCTL_LOGIN_USER:
202 name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
203 current_euid().val);
204 if (name_len >= TEE_UUID_NS_NAME_SIZE) {
205 rc = -E2BIG;
206 goto out_free_name;
207 }
208 break;
209
210 case TEE_IOCTL_LOGIN_GROUP:
211 memcpy(&ns_grp, connection_data, sizeof(gid_t));
212 grp = make_kgid(current_user_ns(), ns_grp);
213 if (!gid_valid(grp) || !in_egroup_p(grp)) {
214 rc = -EPERM;
215 goto out_free_name;
216 }
217
218 name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
219 grp.val);
220 if (name_len >= TEE_UUID_NS_NAME_SIZE) {
221 rc = -E2BIG;
222 goto out_free_name;
223 }
224 break;
225
226 default:
227 rc = -EINVAL;
228 goto out_free_name;
229 }
230
231 uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
232 out_free_name:
233 kfree(name);
234
235 return rc;
236 }
237 EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
238
tee_ioctl_version(struct tee_context * ctx,struct tee_ioctl_version_data __user * uvers)239 static int tee_ioctl_version(struct tee_context *ctx,
240 struct tee_ioctl_version_data __user *uvers)
241 {
242 struct tee_ioctl_version_data vers;
243
244 ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
245
246 if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
247 vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
248
249 if (copy_to_user(uvers, &vers, sizeof(vers)))
250 return -EFAULT;
251
252 return 0;
253 }
254
tee_ioctl_shm_alloc(struct tee_context * ctx,struct tee_ioctl_shm_alloc_data __user * udata)255 static int tee_ioctl_shm_alloc(struct tee_context *ctx,
256 struct tee_ioctl_shm_alloc_data __user *udata)
257 {
258 long ret;
259 struct tee_ioctl_shm_alloc_data data;
260 struct tee_shm *shm;
261
262 if (copy_from_user(&data, udata, sizeof(data)))
263 return -EFAULT;
264
265 /* Currently no input flags are supported */
266 if (data.flags)
267 return -EINVAL;
268
269 shm = tee_shm_alloc_user_buf(ctx, data.size);
270 if (IS_ERR(shm))
271 return PTR_ERR(shm);
272
273 data.id = shm->id;
274 data.size = shm->size;
275
276 if (copy_to_user(udata, &data, sizeof(data)))
277 ret = -EFAULT;
278 else
279 ret = tee_shm_get_fd(shm);
280
281 /*
282 * When user space closes the file descriptor the shared memory
283 * should be freed or if tee_shm_get_fd() failed then it will
284 * be freed immediately.
285 */
286 tee_shm_put(shm);
287 return ret;
288 }
289
290 static int
tee_ioctl_shm_register(struct tee_context * ctx,struct tee_ioctl_shm_register_data __user * udata)291 tee_ioctl_shm_register(struct tee_context *ctx,
292 struct tee_ioctl_shm_register_data __user *udata)
293 {
294 long ret;
295 struct tee_ioctl_shm_register_data data;
296 struct tee_shm *shm;
297
298 if (copy_from_user(&data, udata, sizeof(data)))
299 return -EFAULT;
300
301 /* Currently no input flags are supported */
302 if (data.flags)
303 return -EINVAL;
304
305 shm = tee_shm_register_user_buf(ctx, data.addr, data.length);
306 if (IS_ERR(shm))
307 return PTR_ERR(shm);
308
309 data.id = shm->id;
310 data.length = shm->size;
311
312 if (copy_to_user(udata, &data, sizeof(data)))
313 ret = -EFAULT;
314 else
315 ret = tee_shm_get_fd(shm);
316 /*
317 * When user space closes the file descriptor the shared memory
318 * should be freed or if tee_shm_get_fd() failed then it will
319 * be freed immediately.
320 */
321 tee_shm_put(shm);
322 return ret;
323 }
324
325 static int
tee_ioctl_shm_register_fd(struct tee_context * ctx,struct tee_ioctl_shm_register_fd_data __user * udata)326 tee_ioctl_shm_register_fd(struct tee_context *ctx,
327 struct tee_ioctl_shm_register_fd_data __user *udata)
328 {
329 struct tee_ioctl_shm_register_fd_data data;
330 struct tee_shm *shm;
331 long ret;
332
333 if (copy_from_user(&data, udata, sizeof(data)))
334 return -EFAULT;
335
336 /* Currently no input flags are supported */
337 if (data.flags)
338 return -EINVAL;
339
340 shm = tee_shm_register_fd(ctx, data.fd);
341 if (IS_ERR(shm))
342 return -EINVAL;
343
344 data.id = shm->id;
345 data.flags = shm->flags;
346 data.size = shm->size;
347
348 if (copy_to_user(udata, &data, sizeof(data)))
349 ret = -EFAULT;
350 else
351 ret = tee_shm_get_fd(shm);
352
353 /*
354 * When user space closes the file descriptor the shared memory
355 * should be freed or if tee_shm_get_fd() failed then it will
356 * be freed immediately.
357 */
358 tee_shm_put(shm);
359 return ret;
360 }
361
param_from_user_memref(struct tee_context * ctx,struct tee_param_memref * memref,struct tee_ioctl_param * ip)362 static int param_from_user_memref(struct tee_context *ctx,
363 struct tee_param_memref *memref,
364 struct tee_ioctl_param *ip)
365 {
366 struct tee_shm *shm;
367 size_t offs = 0;
368
369 /*
370 * If a NULL pointer is passed to a TA in the TEE,
371 * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
372 * indicating a NULL memory reference.
373 */
374 if (ip->c != TEE_MEMREF_NULL) {
375 /*
376 * If we fail to get a pointer to a shared
377 * memory object (and increase the ref count)
378 * from an identifier we return an error. All
379 * pointers that has been added in params have
380 * an increased ref count. It's the callers
381 * responibility to do tee_shm_put() on all
382 * resolved pointers.
383 */
384 shm = tee_shm_get_from_id(ctx, ip->c);
385 if (IS_ERR(shm))
386 return PTR_ERR(shm);
387
388 /*
389 * Ensure offset + size does not overflow
390 * offset and does not overflow the size of
391 * the referred shared memory object.
392 */
393 if ((ip->a + ip->b) < ip->a ||
394 (ip->a + ip->b) > shm->size) {
395 tee_shm_put(shm);
396 return -EINVAL;
397 }
398
399 if (shm->flags & TEE_SHM_DMA_BUF) {
400 struct tee_shm_dmabuf_ref *ref;
401
402 ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
403 if (ref->parent_shm) {
404 /*
405 * The shm already has one reference to
406 * ref->parent_shm so we are clear of 0.
407 * We're getting another reference since
408 * this shm will be used in the parameter
409 * list instead of the shm we got with
410 * tee_shm_get_from_id() above.
411 */
412 refcount_inc(&ref->parent_shm->refcount);
413 tee_shm_put(shm);
414 shm = ref->parent_shm;
415 offs = ref->offset;
416 }
417 }
418 } else if (ctx->cap_memref_null) {
419 /* Pass NULL pointer to OP-TEE */
420 shm = NULL;
421 } else {
422 return -EINVAL;
423 }
424
425 memref->shm_offs = ip->a + offs;
426 memref->size = ip->b;
427 memref->shm = shm;
428
429 return 0;
430 }
431
params_from_user(struct tee_context * ctx,struct tee_param * params,size_t num_params,struct tee_ioctl_param __user * uparams)432 static int params_from_user(struct tee_context *ctx, struct tee_param *params,
433 size_t num_params,
434 struct tee_ioctl_param __user *uparams)
435 {
436 size_t n;
437
438 for (n = 0; n < num_params; n++) {
439 struct tee_ioctl_param ip;
440 int rc;
441
442 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
443 return -EFAULT;
444
445 /* All unused attribute bits has to be zero */
446 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
447 return -EINVAL;
448
449 params[n].attr = ip.attr;
450 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
451 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
452 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
453 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
454 break;
455 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
456 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
457 params[n].u.value.a = ip.a;
458 params[n].u.value.b = ip.b;
459 params[n].u.value.c = ip.c;
460 break;
461 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
462 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
463 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
464 params[n].u.ubuf.uaddr = u64_to_user_ptr(ip.a);
465 params[n].u.ubuf.size = ip.b;
466
467 if (!access_ok(params[n].u.ubuf.uaddr,
468 params[n].u.ubuf.size))
469 return -EFAULT;
470
471 break;
472 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
473 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
474 params[n].u.objref.id = ip.a;
475 params[n].u.objref.flags = ip.b;
476 break;
477 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
478 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
479 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
480 rc = param_from_user_memref(ctx, ¶ms[n].u.memref,
481 &ip);
482 if (rc)
483 return rc;
484 break;
485 default:
486 /* Unknown attribute */
487 return -EINVAL;
488 }
489 }
490 return 0;
491 }
492
params_to_user(struct tee_ioctl_param __user * uparams,size_t num_params,struct tee_param * params)493 static int params_to_user(struct tee_ioctl_param __user *uparams,
494 size_t num_params, struct tee_param *params)
495 {
496 size_t n;
497
498 for (n = 0; n < num_params; n++) {
499 struct tee_ioctl_param __user *up = uparams + n;
500 struct tee_param *p = params + n;
501
502 switch (p->attr) {
503 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
504 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
505 if (put_user(p->u.value.a, &up->a) ||
506 put_user(p->u.value.b, &up->b) ||
507 put_user(p->u.value.c, &up->c))
508 return -EFAULT;
509 break;
510 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
511 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
512 if (put_user((u64)p->u.ubuf.size, &up->b))
513 return -EFAULT;
514 break;
515 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
516 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
517 if (put_user(p->u.objref.id, &up->a) ||
518 put_user(p->u.objref.flags, &up->b))
519 return -EFAULT;
520 break;
521 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
522 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
523 if (put_user((u64)p->u.memref.size, &up->b))
524 return -EFAULT;
525 break;
526 default:
527 break;
528 }
529 }
530 return 0;
531 }
532
tee_ioctl_open_session(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)533 static int tee_ioctl_open_session(struct tee_context *ctx,
534 struct tee_ioctl_buf_data __user *ubuf)
535 {
536 int rc;
537 size_t n;
538 struct tee_ioctl_buf_data buf;
539 struct tee_ioctl_open_session_arg __user *uarg;
540 struct tee_ioctl_open_session_arg arg;
541 struct tee_ioctl_param __user *uparams = NULL;
542 struct tee_param *params = NULL;
543 bool have_session = false;
544
545 if (!ctx->teedev->desc->ops->open_session)
546 return -EINVAL;
547
548 if (copy_from_user(&buf, ubuf, sizeof(buf)))
549 return -EFAULT;
550
551 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
552 buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
553 return -EINVAL;
554
555 uarg = u64_to_user_ptr(buf.buf_ptr);
556 if (copy_from_user(&arg, uarg, sizeof(arg)))
557 return -EFAULT;
558
559 if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
560 return -EINVAL;
561
562 if (arg.num_params) {
563 params = kzalloc_objs(struct tee_param, arg.num_params);
564 if (!params)
565 return -ENOMEM;
566 uparams = uarg->params;
567 rc = params_from_user(ctx, params, arg.num_params, uparams);
568 if (rc)
569 goto out;
570 }
571
572 if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
573 arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
574 pr_debug("login method not allowed for user-space client\n");
575 rc = -EPERM;
576 goto out;
577 }
578
579 rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
580 if (rc)
581 goto out;
582 have_session = true;
583
584 if (put_user(arg.session, &uarg->session) ||
585 put_user(arg.ret, &uarg->ret) ||
586 put_user(arg.ret_origin, &uarg->ret_origin)) {
587 rc = -EFAULT;
588 goto out;
589 }
590 rc = params_to_user(uparams, arg.num_params, params);
591 out:
592 /*
593 * If we've succeeded to open the session but failed to communicate
594 * it back to user space, close the session again to avoid leakage.
595 */
596 if (rc && have_session && ctx->teedev->desc->ops->close_session)
597 ctx->teedev->desc->ops->close_session(ctx, arg.session);
598
599 if (params) {
600 /* Decrease ref count for all valid shared memory pointers */
601 for (n = 0; n < arg.num_params; n++)
602 if (tee_param_is_memref(params + n) &&
603 params[n].u.memref.shm)
604 tee_shm_put(params[n].u.memref.shm);
605 kfree(params);
606 }
607
608 return rc;
609 }
610
tee_ioctl_invoke(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)611 static int tee_ioctl_invoke(struct tee_context *ctx,
612 struct tee_ioctl_buf_data __user *ubuf)
613 {
614 int rc;
615 size_t n;
616 struct tee_ioctl_buf_data buf;
617 struct tee_ioctl_invoke_arg __user *uarg;
618 struct tee_ioctl_invoke_arg arg;
619 struct tee_ioctl_param __user *uparams = NULL;
620 struct tee_param *params = NULL;
621
622 if (!ctx->teedev->desc->ops->invoke_func)
623 return -EINVAL;
624
625 if (copy_from_user(&buf, ubuf, sizeof(buf)))
626 return -EFAULT;
627
628 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
629 buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
630 return -EINVAL;
631
632 uarg = u64_to_user_ptr(buf.buf_ptr);
633 if (copy_from_user(&arg, uarg, sizeof(arg)))
634 return -EFAULT;
635
636 if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
637 return -EINVAL;
638
639 if (arg.num_params) {
640 params = kzalloc_objs(struct tee_param, arg.num_params);
641 if (!params)
642 return -ENOMEM;
643 uparams = uarg->params;
644 rc = params_from_user(ctx, params, arg.num_params, uparams);
645 if (rc)
646 goto out;
647 }
648
649 rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
650 if (rc)
651 goto out;
652
653 if (put_user(arg.ret, &uarg->ret) ||
654 put_user(arg.ret_origin, &uarg->ret_origin)) {
655 rc = -EFAULT;
656 goto out;
657 }
658 rc = params_to_user(uparams, arg.num_params, params);
659 out:
660 if (params) {
661 /* Decrease ref count for all valid shared memory pointers */
662 for (n = 0; n < arg.num_params; n++)
663 if (tee_param_is_memref(params + n) &&
664 params[n].u.memref.shm)
665 tee_shm_put(params[n].u.memref.shm);
666 kfree(params);
667 }
668 return rc;
669 }
670
tee_ioctl_object_invoke(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)671 static int tee_ioctl_object_invoke(struct tee_context *ctx,
672 struct tee_ioctl_buf_data __user *ubuf)
673 {
674 int rc;
675 size_t n;
676 struct tee_ioctl_buf_data buf;
677 struct tee_ioctl_object_invoke_arg __user *uarg;
678 struct tee_ioctl_object_invoke_arg arg;
679 struct tee_ioctl_param __user *uparams = NULL;
680 struct tee_param *params = NULL;
681
682 if (!ctx->teedev->desc->ops->object_invoke_func)
683 return -EINVAL;
684
685 if (copy_from_user(&buf, ubuf, sizeof(buf)))
686 return -EFAULT;
687
688 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
689 buf.buf_len < sizeof(struct tee_ioctl_object_invoke_arg))
690 return -EINVAL;
691
692 uarg = u64_to_user_ptr(buf.buf_ptr);
693 if (copy_from_user(&arg, uarg, sizeof(arg)))
694 return -EFAULT;
695
696 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
697 return -EINVAL;
698
699 if (arg.num_params) {
700 params = kzalloc_objs(struct tee_param, arg.num_params);
701 if (!params)
702 return -ENOMEM;
703 uparams = uarg->params;
704 rc = params_from_user(ctx, params, arg.num_params, uparams);
705 if (rc)
706 goto out;
707 }
708
709 rc = ctx->teedev->desc->ops->object_invoke_func(ctx, &arg, params);
710 if (rc)
711 goto out;
712
713 if (put_user(arg.ret, &uarg->ret)) {
714 rc = -EFAULT;
715 goto out;
716 }
717 rc = params_to_user(uparams, arg.num_params, params);
718 out:
719 if (params) {
720 /* Decrease ref count for all valid shared memory pointers */
721 for (n = 0; n < arg.num_params; n++)
722 if (tee_param_is_memref(params + n) &&
723 params[n].u.memref.shm)
724 tee_shm_put(params[n].u.memref.shm);
725 kfree(params);
726 }
727 return rc;
728 }
729
tee_ioctl_cancel(struct tee_context * ctx,struct tee_ioctl_cancel_arg __user * uarg)730 static int tee_ioctl_cancel(struct tee_context *ctx,
731 struct tee_ioctl_cancel_arg __user *uarg)
732 {
733 struct tee_ioctl_cancel_arg arg;
734
735 if (!ctx->teedev->desc->ops->cancel_req)
736 return -EINVAL;
737
738 if (copy_from_user(&arg, uarg, sizeof(arg)))
739 return -EFAULT;
740
741 return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
742 arg.session);
743 }
744
745 static int
tee_ioctl_close_session(struct tee_context * ctx,struct tee_ioctl_close_session_arg __user * uarg)746 tee_ioctl_close_session(struct tee_context *ctx,
747 struct tee_ioctl_close_session_arg __user *uarg)
748 {
749 struct tee_ioctl_close_session_arg arg;
750
751 if (!ctx->teedev->desc->ops->close_session)
752 return -EINVAL;
753
754 if (copy_from_user(&arg, uarg, sizeof(arg)))
755 return -EFAULT;
756
757 return ctx->teedev->desc->ops->close_session(ctx, arg.session);
758 }
759
params_to_supp(struct tee_context * ctx,struct tee_ioctl_param __user * uparams,size_t num_params,struct tee_param * params)760 static int params_to_supp(struct tee_context *ctx,
761 struct tee_ioctl_param __user *uparams,
762 size_t num_params, struct tee_param *params)
763 {
764 size_t n;
765
766 for (n = 0; n < num_params; n++) {
767 struct tee_ioctl_param ip;
768 struct tee_param *p = params + n;
769
770 ip.attr = p->attr;
771 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
772 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
773 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
774 ip.a = p->u.value.a;
775 ip.b = p->u.value.b;
776 ip.c = p->u.value.c;
777 break;
778 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
779 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
780 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
781 ip.a = (__force unsigned long)p->u.ubuf.uaddr;
782 ip.b = p->u.ubuf.size;
783 ip.c = 0;
784 break;
785 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
786 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
787 ip.a = p->u.objref.id;
788 ip.b = p->u.objref.flags;
789 ip.c = 0;
790 break;
791 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
792 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
793 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
794 ip.b = p->u.memref.size;
795 if (!p->u.memref.shm) {
796 ip.a = 0;
797 ip.c = (u64)-1; /* invalid shm id */
798 break;
799 }
800 ip.a = p->u.memref.shm_offs;
801 ip.c = p->u.memref.shm->id;
802 break;
803 default:
804 ip.a = 0;
805 ip.b = 0;
806 ip.c = 0;
807 break;
808 }
809
810 if (copy_to_user(uparams + n, &ip, sizeof(ip)))
811 return -EFAULT;
812 }
813
814 return 0;
815 }
816
tee_ioctl_supp_recv(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)817 static int tee_ioctl_supp_recv(struct tee_context *ctx,
818 struct tee_ioctl_buf_data __user *ubuf)
819 {
820 int rc;
821 struct tee_ioctl_buf_data buf;
822 struct tee_iocl_supp_recv_arg __user *uarg;
823 struct tee_param *params;
824 u32 num_params;
825 u32 func;
826
827 if (!ctx->teedev->desc->ops->supp_recv)
828 return -EINVAL;
829
830 if (copy_from_user(&buf, ubuf, sizeof(buf)))
831 return -EFAULT;
832
833 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
834 buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
835 return -EINVAL;
836
837 uarg = u64_to_user_ptr(buf.buf_ptr);
838 if (get_user(num_params, &uarg->num_params))
839 return -EFAULT;
840
841 if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len)
842 return -EINVAL;
843
844 params = kzalloc_objs(struct tee_param, num_params);
845 if (!params)
846 return -ENOMEM;
847
848 rc = params_from_user(ctx, params, num_params, uarg->params);
849 if (rc)
850 goto out;
851
852 rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
853 if (rc)
854 goto out;
855
856 if (put_user(func, &uarg->func) ||
857 put_user(num_params, &uarg->num_params)) {
858 rc = -EFAULT;
859 goto out;
860 }
861
862 rc = params_to_supp(ctx, uarg->params, num_params, params);
863 out:
864 kfree(params);
865 return rc;
866 }
867
params_from_supp(struct tee_param * params,size_t num_params,struct tee_ioctl_param __user * uparams)868 static int params_from_supp(struct tee_param *params, size_t num_params,
869 struct tee_ioctl_param __user *uparams)
870 {
871 size_t n;
872
873 for (n = 0; n < num_params; n++) {
874 struct tee_param *p = params + n;
875 struct tee_ioctl_param ip;
876
877 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
878 return -EFAULT;
879
880 /* All unused attribute bits has to be zero */
881 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
882 return -EINVAL;
883
884 p->attr = ip.attr;
885 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
886 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
887 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
888 /* Only out and in/out values can be updated */
889 p->u.value.a = ip.a;
890 p->u.value.b = ip.b;
891 p->u.value.c = ip.c;
892 break;
893 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
894 case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
895 p->u.ubuf.uaddr = u64_to_user_ptr(ip.a);
896 p->u.ubuf.size = ip.b;
897
898 if (!access_ok(params[n].u.ubuf.uaddr,
899 params[n].u.ubuf.size))
900 return -EFAULT;
901
902 break;
903 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
904 case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
905 p->u.objref.id = ip.a;
906 p->u.objref.flags = ip.b;
907 break;
908 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
909 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
910 /*
911 * Only the size of the memref can be updated.
912 * Since we don't have access to the original
913 * parameters here, only store the supplied size.
914 * The driver will copy the updated size into the
915 * original parameters.
916 */
917 p->u.memref.shm = NULL;
918 p->u.memref.shm_offs = 0;
919 p->u.memref.size = ip.b;
920 break;
921 default:
922 memset(&p->u, 0, sizeof(p->u));
923 break;
924 }
925 }
926 return 0;
927 }
928
tee_ioctl_supp_send(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)929 static int tee_ioctl_supp_send(struct tee_context *ctx,
930 struct tee_ioctl_buf_data __user *ubuf)
931 {
932 long rc;
933 struct tee_ioctl_buf_data buf;
934 struct tee_iocl_supp_send_arg __user *uarg;
935 struct tee_param *params;
936 u32 num_params;
937 u32 ret;
938
939 /* Not valid for this driver */
940 if (!ctx->teedev->desc->ops->supp_send)
941 return -EINVAL;
942
943 if (copy_from_user(&buf, ubuf, sizeof(buf)))
944 return -EFAULT;
945
946 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
947 buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
948 return -EINVAL;
949
950 uarg = u64_to_user_ptr(buf.buf_ptr);
951 if (get_user(ret, &uarg->ret) ||
952 get_user(num_params, &uarg->num_params))
953 return -EFAULT;
954
955 if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len)
956 return -EINVAL;
957
958 params = kzalloc_objs(struct tee_param, num_params);
959 if (!params)
960 return -ENOMEM;
961
962 rc = params_from_supp(params, num_params, uarg->params);
963 if (rc)
964 goto out;
965
966 rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
967 out:
968 kfree(params);
969 return rc;
970 }
971
tee_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)972 static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
973 {
974 struct tee_context *ctx = filp->private_data;
975 void __user *uarg = (void __user *)arg;
976
977 switch (cmd) {
978 case TEE_IOC_VERSION:
979 return tee_ioctl_version(ctx, uarg);
980 case TEE_IOC_SHM_ALLOC:
981 return tee_ioctl_shm_alloc(ctx, uarg);
982 case TEE_IOC_SHM_REGISTER:
983 return tee_ioctl_shm_register(ctx, uarg);
984 case TEE_IOC_SHM_REGISTER_FD:
985 return tee_ioctl_shm_register_fd(ctx, uarg);
986 case TEE_IOC_OPEN_SESSION:
987 return tee_ioctl_open_session(ctx, uarg);
988 case TEE_IOC_INVOKE:
989 return tee_ioctl_invoke(ctx, uarg);
990 case TEE_IOC_OBJECT_INVOKE:
991 return tee_ioctl_object_invoke(ctx, uarg);
992 case TEE_IOC_CANCEL:
993 return tee_ioctl_cancel(ctx, uarg);
994 case TEE_IOC_CLOSE_SESSION:
995 return tee_ioctl_close_session(ctx, uarg);
996 case TEE_IOC_SUPPL_RECV:
997 return tee_ioctl_supp_recv(ctx, uarg);
998 case TEE_IOC_SUPPL_SEND:
999 return tee_ioctl_supp_send(ctx, uarg);
1000 default:
1001 return -EINVAL;
1002 }
1003 }
1004
1005 static const struct file_operations tee_fops = {
1006 .owner = THIS_MODULE,
1007 .open = tee_open,
1008 .release = tee_release,
1009 .unlocked_ioctl = tee_ioctl,
1010 .compat_ioctl = compat_ptr_ioctl,
1011 };
1012
tee_release_device(struct device * dev)1013 static void tee_release_device(struct device *dev)
1014 {
1015 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1016
1017 spin_lock(&driver_lock);
1018 clear_bit(teedev->id, dev_mask);
1019 spin_unlock(&driver_lock);
1020 mutex_destroy(&teedev->mutex);
1021 idr_destroy(&teedev->idr);
1022 kfree(teedev);
1023 }
1024
1025 /**
1026 * tee_device_alloc() - Allocate a new struct tee_device instance
1027 * @teedesc: Descriptor for this driver
1028 * @dev: Parent device for this device
1029 * @pool: Shared memory pool, NULL if not used
1030 * @driver_data: Private driver data for this device
1031 *
1032 * Allocates a new struct tee_device instance. The device is
1033 * removed by tee_device_unregister().
1034 *
1035 * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
1036 */
tee_device_alloc(const struct tee_desc * teedesc,struct device * dev,struct tee_shm_pool * pool,void * driver_data)1037 struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
1038 struct device *dev,
1039 struct tee_shm_pool *pool,
1040 void *driver_data)
1041 {
1042 struct tee_device *teedev;
1043 void *ret;
1044 int rc, max_id;
1045 int offs = 0;
1046
1047 if (!teedesc || !teedesc->name || !teedesc->ops ||
1048 !teedesc->ops->get_version || !teedesc->ops->open ||
1049 !teedesc->ops->release)
1050 return ERR_PTR(-EINVAL);
1051
1052 teedev = kzalloc_obj(*teedev);
1053 if (!teedev) {
1054 ret = ERR_PTR(-ENOMEM);
1055 goto err;
1056 }
1057
1058 max_id = TEE_NUM_DEVICES / 2;
1059
1060 if (teedesc->flags & TEE_DESC_PRIVILEGED) {
1061 offs = TEE_NUM_DEVICES / 2;
1062 max_id = TEE_NUM_DEVICES;
1063 }
1064
1065 spin_lock(&driver_lock);
1066 teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
1067 if (teedev->id < max_id)
1068 set_bit(teedev->id, dev_mask);
1069 spin_unlock(&driver_lock);
1070
1071 if (teedev->id >= max_id) {
1072 ret = ERR_PTR(-ENOMEM);
1073 goto err;
1074 }
1075
1076 snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
1077 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
1078 teedev->id - offs);
1079
1080 teedev->dev.class = &tee_class;
1081 teedev->dev.release = tee_release_device;
1082 teedev->dev.parent = dev;
1083
1084 teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
1085
1086 rc = dev_set_name(&teedev->dev, "%s", teedev->name);
1087 if (rc) {
1088 ret = ERR_PTR(rc);
1089 goto err_devt;
1090 }
1091
1092 cdev_init(&teedev->cdev, &tee_fops);
1093 teedev->cdev.owner = teedesc->owner;
1094
1095 dev_set_drvdata(&teedev->dev, driver_data);
1096 device_initialize(&teedev->dev);
1097
1098 /* 1 as tee_device_unregister() does one final tee_device_put() */
1099 teedev->num_users = 1;
1100 init_completion(&teedev->c_no_users);
1101 mutex_init(&teedev->mutex);
1102 idr_init(&teedev->idr);
1103
1104 teedev->desc = teedesc;
1105 teedev->pool = pool;
1106
1107 return teedev;
1108 err_devt:
1109 unregister_chrdev_region(teedev->dev.devt, 1);
1110 err:
1111 pr_err("could not register %s driver\n",
1112 teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
1113 if (teedev && teedev->id < TEE_NUM_DEVICES) {
1114 spin_lock(&driver_lock);
1115 clear_bit(teedev->id, dev_mask);
1116 spin_unlock(&driver_lock);
1117 }
1118 kfree(teedev);
1119 return ret;
1120 }
1121 EXPORT_SYMBOL_GPL(tee_device_alloc);
1122
tee_device_set_dev_groups(struct tee_device * teedev,const struct attribute_group ** dev_groups)1123 void tee_device_set_dev_groups(struct tee_device *teedev,
1124 const struct attribute_group **dev_groups)
1125 {
1126 teedev->dev.groups = dev_groups;
1127 }
1128 EXPORT_SYMBOL_GPL(tee_device_set_dev_groups);
1129
implementation_id_show(struct device * dev,struct device_attribute * attr,char * buf)1130 static ssize_t implementation_id_show(struct device *dev,
1131 struct device_attribute *attr, char *buf)
1132 {
1133 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1134 struct tee_ioctl_version_data vers;
1135
1136 teedev->desc->ops->get_version(teedev, &vers);
1137 return sysfs_emit(buf, "%d\n", vers.impl_id);
1138 }
1139 static DEVICE_ATTR_RO(implementation_id);
1140
1141 static struct attribute *tee_dev_attrs[] = {
1142 &dev_attr_implementation_id.attr,
1143 NULL
1144 };
1145
1146 static const struct attribute_group tee_dev_group = {
1147 .attrs = tee_dev_attrs,
1148 };
1149
revision_show(struct device * dev,struct device_attribute * attr,char * buf)1150 static ssize_t revision_show(struct device *dev,
1151 struct device_attribute *attr, char *buf)
1152 {
1153 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1154 char version[TEE_REVISION_STR_SIZE];
1155 int ret;
1156
1157 if (!teedev->desc->ops->get_tee_revision)
1158 return -ENODEV;
1159
1160 ret = teedev->desc->ops->get_tee_revision(teedev, version,
1161 sizeof(version));
1162 if (ret)
1163 return ret;
1164
1165 return sysfs_emit(buf, "%s\n", version);
1166 }
1167 static DEVICE_ATTR_RO(revision);
1168
1169 static struct attribute *tee_revision_attrs[] = {
1170 &dev_attr_revision.attr,
1171 NULL
1172 };
1173
tee_revision_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)1174 static umode_t tee_revision_attr_is_visible(struct kobject *kobj,
1175 struct attribute *attr, int n)
1176 {
1177 struct device *dev = kobj_to_dev(kobj);
1178 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1179
1180 if (teedev->desc->ops->get_tee_revision)
1181 return attr->mode;
1182
1183 return 0;
1184 }
1185
1186 static const struct attribute_group tee_revision_group = {
1187 .attrs = tee_revision_attrs,
1188 .is_visible = tee_revision_attr_is_visible,
1189 };
1190
1191 static const struct attribute_group *tee_dev_groups[] = {
1192 &tee_dev_group,
1193 &tee_revision_group,
1194 NULL
1195 };
1196
1197 static const struct class tee_class = {
1198 .name = "tee",
1199 .dev_groups = tee_dev_groups,
1200 };
1201
1202 /**
1203 * tee_device_register() - Registers a TEE device
1204 * @teedev: Device to register
1205 *
1206 * tee_device_unregister() need to be called to remove the @teedev if
1207 * this function fails.
1208 *
1209 * @returns < 0 on failure
1210 */
tee_device_register(struct tee_device * teedev)1211 int tee_device_register(struct tee_device *teedev)
1212 {
1213 int rc;
1214
1215 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
1216 dev_err(&teedev->dev, "attempt to register twice\n");
1217 return -EINVAL;
1218 }
1219
1220 rc = cdev_device_add(&teedev->cdev, &teedev->dev);
1221 if (rc) {
1222 dev_err(&teedev->dev,
1223 "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
1224 teedev->name, MAJOR(teedev->dev.devt),
1225 MINOR(teedev->dev.devt), rc);
1226 return rc;
1227 }
1228
1229 teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
1230 return 0;
1231 }
1232 EXPORT_SYMBOL_GPL(tee_device_register);
1233
tee_device_put(struct tee_device * teedev)1234 void tee_device_put(struct tee_device *teedev)
1235 {
1236 mutex_lock(&teedev->mutex);
1237 /* Shouldn't put in this state */
1238 if (!WARN_ON(!teedev->desc)) {
1239 teedev->num_users--;
1240 if (!teedev->num_users) {
1241 teedev->desc = NULL;
1242 complete(&teedev->c_no_users);
1243 }
1244 }
1245 mutex_unlock(&teedev->mutex);
1246 }
1247 EXPORT_SYMBOL_GPL(tee_device_put);
1248
tee_device_get(struct tee_device * teedev)1249 bool tee_device_get(struct tee_device *teedev)
1250 {
1251 mutex_lock(&teedev->mutex);
1252 if (!teedev->desc) {
1253 mutex_unlock(&teedev->mutex);
1254 return false;
1255 }
1256 teedev->num_users++;
1257 mutex_unlock(&teedev->mutex);
1258 return true;
1259 }
1260 EXPORT_SYMBOL_GPL(tee_device_get);
1261
1262 /**
1263 * tee_device_unregister() - Removes a TEE device
1264 * @teedev: Device to unregister
1265 *
1266 * This function should be called to remove the @teedev even if
1267 * tee_device_register() hasn't been called yet. Does nothing if
1268 * @teedev is NULL.
1269 */
tee_device_unregister(struct tee_device * teedev)1270 void tee_device_unregister(struct tee_device *teedev)
1271 {
1272 if (!teedev)
1273 return;
1274
1275 tee_device_put_all_dma_heaps(teedev);
1276
1277 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
1278 cdev_device_del(&teedev->cdev, &teedev->dev);
1279
1280 tee_device_put(teedev);
1281 wait_for_completion(&teedev->c_no_users);
1282
1283 /*
1284 * No need to take a mutex any longer now since teedev->desc was
1285 * set to NULL before teedev->c_no_users was completed.
1286 */
1287
1288 teedev->pool = NULL;
1289
1290 put_device(&teedev->dev);
1291 }
1292 EXPORT_SYMBOL_GPL(tee_device_unregister);
1293
1294 /**
1295 * tee_get_drvdata() - Return driver_data pointer
1296 * @teedev: Device containing the driver_data pointer
1297 * @returns the driver_data pointer supplied to tee_device_alloc().
1298 */
tee_get_drvdata(struct tee_device * teedev)1299 void *tee_get_drvdata(struct tee_device *teedev)
1300 {
1301 return dev_get_drvdata(&teedev->dev);
1302 }
1303 EXPORT_SYMBOL_GPL(tee_get_drvdata);
1304
1305 struct match_dev_data {
1306 struct tee_ioctl_version_data *vers;
1307 const void *data;
1308 int (*match)(struct tee_ioctl_version_data *, const void *);
1309 };
1310
match_dev(struct device * dev,const void * data)1311 static int match_dev(struct device *dev, const void *data)
1312 {
1313 const struct match_dev_data *match_data = data;
1314 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1315
1316 teedev->desc->ops->get_version(teedev, match_data->vers);
1317 return match_data->match(match_data->vers, match_data->data);
1318 }
1319
1320 struct tee_context *
tee_client_open_context(struct tee_context * start,int (* match)(struct tee_ioctl_version_data *,const void *),const void * data,struct tee_ioctl_version_data * vers)1321 tee_client_open_context(struct tee_context *start,
1322 int (*match)(struct tee_ioctl_version_data *,
1323 const void *),
1324 const void *data, struct tee_ioctl_version_data *vers)
1325 {
1326 struct device *dev = NULL;
1327 struct device *put_dev = NULL;
1328 struct tee_context *ctx = NULL;
1329 struct tee_ioctl_version_data v;
1330 struct match_dev_data match_data = { vers ? vers : &v, data, match };
1331
1332 if (start)
1333 dev = &start->teedev->dev;
1334
1335 do {
1336 dev = class_find_device(&tee_class, dev, &match_data, match_dev);
1337 if (!dev) {
1338 ctx = ERR_PTR(-ENOENT);
1339 break;
1340 }
1341
1342 put_device(put_dev);
1343 put_dev = dev;
1344
1345 ctx = teedev_open(container_of(dev, struct tee_device, dev));
1346 } while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
1347
1348 put_device(put_dev);
1349 /*
1350 * Default behaviour for in kernel client is to not wait for
1351 * tee-supplicant if not present for any requests in this context.
1352 * Also this flag could be configured again before call to
1353 * tee_client_open_session() if any in kernel client requires
1354 * different behaviour.
1355 */
1356 if (!IS_ERR(ctx))
1357 ctx->supp_nowait = true;
1358
1359 return ctx;
1360 }
1361 EXPORT_SYMBOL_GPL(tee_client_open_context);
1362
tee_client_close_context(struct tee_context * ctx)1363 void tee_client_close_context(struct tee_context *ctx)
1364 {
1365 teedev_close_context(ctx);
1366 }
1367 EXPORT_SYMBOL_GPL(tee_client_close_context);
1368
tee_client_get_version(struct tee_context * ctx,struct tee_ioctl_version_data * vers)1369 void tee_client_get_version(struct tee_context *ctx,
1370 struct tee_ioctl_version_data *vers)
1371 {
1372 ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
1373 }
1374 EXPORT_SYMBOL_GPL(tee_client_get_version);
1375
tee_client_open_session(struct tee_context * ctx,struct tee_ioctl_open_session_arg * arg,struct tee_param * param)1376 int tee_client_open_session(struct tee_context *ctx,
1377 struct tee_ioctl_open_session_arg *arg,
1378 struct tee_param *param)
1379 {
1380 if (!ctx->teedev->desc->ops->open_session)
1381 return -EINVAL;
1382 return ctx->teedev->desc->ops->open_session(ctx, arg, param);
1383 }
1384 EXPORT_SYMBOL_GPL(tee_client_open_session);
1385
tee_client_close_session(struct tee_context * ctx,u32 session)1386 int tee_client_close_session(struct tee_context *ctx, u32 session)
1387 {
1388 if (!ctx->teedev->desc->ops->close_session)
1389 return -EINVAL;
1390 return ctx->teedev->desc->ops->close_session(ctx, session);
1391 }
1392 EXPORT_SYMBOL_GPL(tee_client_close_session);
1393
tee_client_system_session(struct tee_context * ctx,u32 session)1394 int tee_client_system_session(struct tee_context *ctx, u32 session)
1395 {
1396 if (!ctx->teedev->desc->ops->system_session)
1397 return -EINVAL;
1398 return ctx->teedev->desc->ops->system_session(ctx, session);
1399 }
1400 EXPORT_SYMBOL_GPL(tee_client_system_session);
1401
tee_client_invoke_func(struct tee_context * ctx,struct tee_ioctl_invoke_arg * arg,struct tee_param * param)1402 int tee_client_invoke_func(struct tee_context *ctx,
1403 struct tee_ioctl_invoke_arg *arg,
1404 struct tee_param *param)
1405 {
1406 if (!ctx->teedev->desc->ops->invoke_func)
1407 return -EINVAL;
1408 return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
1409 }
1410 EXPORT_SYMBOL_GPL(tee_client_invoke_func);
1411
tee_client_cancel_req(struct tee_context * ctx,struct tee_ioctl_cancel_arg * arg)1412 int tee_client_cancel_req(struct tee_context *ctx,
1413 struct tee_ioctl_cancel_arg *arg)
1414 {
1415 if (!ctx->teedev->desc->ops->cancel_req)
1416 return -EINVAL;
1417 return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
1418 arg->session);
1419 }
1420
tee_client_device_match(struct device * dev,const struct device_driver * drv)1421 static int tee_client_device_match(struct device *dev,
1422 const struct device_driver *drv)
1423 {
1424 const struct tee_client_device_id *id_table;
1425 struct tee_client_device *tee_device;
1426
1427 id_table = to_tee_client_driver(drv)->id_table;
1428 tee_device = to_tee_client_device(dev);
1429
1430 while (!uuid_is_null(&id_table->uuid)) {
1431 if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
1432 return 1;
1433 id_table++;
1434 }
1435
1436 return 0;
1437 }
1438
tee_client_device_uevent(const struct device * dev,struct kobj_uevent_env * env)1439 static int tee_client_device_uevent(const struct device *dev,
1440 struct kobj_uevent_env *env)
1441 {
1442 uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
1443
1444 return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
1445 }
1446
tee_client_device_probe(struct device * dev)1447 static int tee_client_device_probe(struct device *dev)
1448 {
1449 struct tee_client_device *tcdev = to_tee_client_device(dev);
1450 struct tee_client_driver *drv = to_tee_client_driver(dev->driver);
1451
1452 if (drv->probe)
1453 return drv->probe(tcdev);
1454 else
1455 return 0;
1456 }
1457
tee_client_device_remove(struct device * dev)1458 static void tee_client_device_remove(struct device *dev)
1459 {
1460 struct tee_client_device *tcdev = to_tee_client_device(dev);
1461 struct tee_client_driver *drv = to_tee_client_driver(dev->driver);
1462
1463 if (drv->remove)
1464 drv->remove(tcdev);
1465 }
1466
tee_client_device_shutdown(struct device * dev)1467 static void tee_client_device_shutdown(struct device *dev)
1468 {
1469 struct tee_client_device *tcdev = to_tee_client_device(dev);
1470 struct tee_client_driver *drv = to_tee_client_driver(dev->driver);
1471
1472 if (dev->driver && drv->shutdown)
1473 drv->shutdown(tcdev);
1474 }
1475
1476 const struct bus_type tee_bus_type = {
1477 .name = "tee",
1478 .match = tee_client_device_match,
1479 .uevent = tee_client_device_uevent,
1480 .probe = tee_client_device_probe,
1481 .remove = tee_client_device_remove,
1482 .shutdown = tee_client_device_shutdown,
1483 };
1484 EXPORT_SYMBOL_GPL(tee_bus_type);
1485
tee_client_device_probe_legacy(struct tee_client_device * tcdev)1486 static int tee_client_device_probe_legacy(struct tee_client_device *tcdev)
1487 {
1488 struct device *dev = &tcdev->dev;
1489 struct device_driver *driver = dev->driver;
1490
1491 return driver->probe(dev);
1492 }
1493
tee_client_device_remove_legacy(struct tee_client_device * tcdev)1494 static void tee_client_device_remove_legacy(struct tee_client_device *tcdev)
1495 {
1496 struct device *dev = &tcdev->dev;
1497 struct device_driver *driver = dev->driver;
1498
1499 driver->remove(dev);
1500 }
1501
tee_client_device_shutdown_legacy(struct tee_client_device * tcdev)1502 static void tee_client_device_shutdown_legacy(struct tee_client_device *tcdev)
1503 {
1504 struct device *dev = &tcdev->dev;
1505 struct device_driver *driver = dev->driver;
1506
1507 driver->shutdown(dev);
1508 }
1509
__tee_client_driver_register(struct tee_client_driver * tee_driver,struct module * owner)1510 int __tee_client_driver_register(struct tee_client_driver *tee_driver,
1511 struct module *owner)
1512 {
1513 tee_driver->driver.owner = owner;
1514 tee_driver->driver.bus = &tee_bus_type;
1515
1516 /*
1517 * Drivers that have callbacks set for tee_driver->driver need updating
1518 * to use the callbacks in tee_driver instead. driver_register() warns
1519 * about that, so no need to warn here, too.
1520 */
1521 if (!tee_driver->probe && tee_driver->driver.probe)
1522 tee_driver->probe = tee_client_device_probe_legacy;
1523 if (!tee_driver->remove && tee_driver->driver.remove)
1524 tee_driver->remove = tee_client_device_remove_legacy;
1525 if (!tee_driver->shutdown && tee_driver->driver.probe)
1526 tee_driver->shutdown = tee_client_device_shutdown_legacy;
1527
1528 return driver_register(&tee_driver->driver);
1529 }
1530 EXPORT_SYMBOL_GPL(__tee_client_driver_register);
1531
tee_client_driver_unregister(struct tee_client_driver * tee_driver)1532 void tee_client_driver_unregister(struct tee_client_driver *tee_driver)
1533 {
1534 driver_unregister(&tee_driver->driver);
1535 }
1536 EXPORT_SYMBOL_GPL(tee_client_driver_unregister);
1537
tee_init(void)1538 static int __init tee_init(void)
1539 {
1540 int rc;
1541
1542 rc = class_register(&tee_class);
1543 if (rc) {
1544 pr_err("couldn't create class\n");
1545 return rc;
1546 }
1547
1548 rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
1549 if (rc) {
1550 pr_err("failed to allocate char dev region\n");
1551 goto out_unreg_class;
1552 }
1553
1554 rc = bus_register(&tee_bus_type);
1555 if (rc) {
1556 pr_err("failed to register tee bus\n");
1557 goto out_unreg_chrdev;
1558 }
1559
1560 return 0;
1561
1562 out_unreg_chrdev:
1563 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1564 out_unreg_class:
1565 class_unregister(&tee_class);
1566
1567 return rc;
1568 }
1569
tee_exit(void)1570 static void __exit tee_exit(void)
1571 {
1572 bus_unregister(&tee_bus_type);
1573 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1574 class_unregister(&tee_class);
1575 }
1576
1577 subsys_initcall(tee_init);
1578 module_exit(tee_exit);
1579
1580 MODULE_AUTHOR("Linaro");
1581 MODULE_DESCRIPTION("TEE Driver");
1582 MODULE_VERSION("1.0");
1583 MODULE_LICENSE("GPL v2");
1584 MODULE_IMPORT_NS("DMA_BUF");
1585 MODULE_IMPORT_NS("DMA_BUF_HEAP");
1586