1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2016, Linaro Limited 4 */ 5 6 #define pr_fmt(fmt) "%s: " fmt, __func__ 7 8 #include <linux/cdev.h> 9 #include <linux/cred.h> 10 #include <linux/fs.h> 11 #include <linux/idr.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/tee_drv.h> 15 #include <linux/uaccess.h> 16 #include <crypto/hash.h> 17 #include <crypto/sha.h> 18 #include "tee_private.h" 19 20 #define TEE_NUM_DEVICES 32 21 22 #define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) 23 24 #define TEE_UUID_NS_NAME_SIZE 128 25 26 /* 27 * TEE Client UUID name space identifier (UUIDv4) 28 * 29 * Value here is random UUID that is allocated as name space identifier for 30 * forming Client UUID's for TEE environment using UUIDv5 scheme. 31 */ 32 static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683, 33 0xa1, 0xb8, 0xec, 0x4b, 34 0xc0, 0x8e, 0x01, 0xb6); 35 36 /* 37 * Unprivileged devices in the lower half range and privileged devices in 38 * the upper half range. 39 */ 40 static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES); 41 static DEFINE_SPINLOCK(driver_lock); 42 43 static struct class *tee_class; 44 static dev_t tee_devt; 45 46 static struct tee_context *teedev_open(struct tee_device *teedev) 47 { 48 int rc; 49 struct tee_context *ctx; 50 51 if (!tee_device_get(teedev)) 52 return ERR_PTR(-EINVAL); 53 54 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 55 if (!ctx) { 56 rc = -ENOMEM; 57 goto err; 58 } 59 60 kref_init(&ctx->refcount); 61 ctx->teedev = teedev; 62 rc = teedev->desc->ops->open(ctx); 63 if (rc) 64 goto err; 65 66 return ctx; 67 err: 68 kfree(ctx); 69 tee_device_put(teedev); 70 return ERR_PTR(rc); 71 72 } 73 74 void teedev_ctx_get(struct tee_context *ctx) 75 { 76 if (ctx->releasing) 77 return; 78 79 kref_get(&ctx->refcount); 80 } 81 82 static void teedev_ctx_release(struct kref *ref) 83 { 84 struct tee_context *ctx = container_of(ref, struct tee_context, 85 refcount); 86 ctx->releasing = true; 87 ctx->teedev->desc->ops->release(ctx); 88 kfree(ctx); 89 } 90 91 void teedev_ctx_put(struct tee_context *ctx) 92 { 93 if (ctx->releasing) 94 return; 95 96 kref_put(&ctx->refcount, teedev_ctx_release); 97 } 98 99 static void teedev_close_context(struct tee_context *ctx) 100 { 101 tee_device_put(ctx->teedev); 102 teedev_ctx_put(ctx); 103 } 104 105 static int tee_open(struct inode *inode, struct file *filp) 106 { 107 struct tee_context *ctx; 108 109 ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev)); 110 if (IS_ERR(ctx)) 111 return PTR_ERR(ctx); 112 113 /* 114 * Default user-space behaviour is to wait for tee-supplicant 115 * if not present for any requests in this context. 116 */ 117 ctx->supp_nowait = false; 118 filp->private_data = ctx; 119 return 0; 120 } 121 122 static int tee_release(struct inode *inode, struct file *filp) 123 { 124 teedev_close_context(filp->private_data); 125 return 0; 126 } 127 128 /** 129 * uuid_v5() - Calculate UUIDv5 130 * @uuid: Resulting UUID 131 * @ns: Name space ID for UUIDv5 function 132 * @name: Name for UUIDv5 function 133 * @size: Size of name 134 * 135 * UUIDv5 is specific in RFC 4122. 136 * 137 * This implements section (for SHA-1): 138 * 4.3. Algorithm for Creating a Name-Based UUID 139 */ 140 static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name, 141 size_t size) 142 { 143 unsigned char hash[SHA1_DIGEST_SIZE]; 144 struct crypto_shash *shash = NULL; 145 struct shash_desc *desc = NULL; 146 int rc; 147 148 shash = crypto_alloc_shash("sha1", 0, 0); 149 if (IS_ERR(shash)) { 150 rc = PTR_ERR(shash); 151 pr_err("shash(sha1) allocation failed\n"); 152 return rc; 153 } 154 155 desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), 156 GFP_KERNEL); 157 if (!desc) { 158 rc = -ENOMEM; 159 goto out_free_shash; 160 } 161 162 desc->tfm = shash; 163 164 rc = crypto_shash_init(desc); 165 if (rc < 0) 166 goto out_free_desc; 167 168 rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns)); 169 if (rc < 0) 170 goto out_free_desc; 171 172 rc = crypto_shash_update(desc, (const u8 *)name, size); 173 if (rc < 0) 174 goto out_free_desc; 175 176 rc = crypto_shash_final(desc, hash); 177 if (rc < 0) 178 goto out_free_desc; 179 180 memcpy(uuid->b, hash, UUID_SIZE); 181 182 /* Tag for version 5 */ 183 uuid->b[6] = (hash[6] & 0x0F) | 0x50; 184 uuid->b[8] = (hash[8] & 0x3F) | 0x80; 185 186 out_free_desc: 187 kfree(desc); 188 189 out_free_shash: 190 crypto_free_shash(shash); 191 return rc; 192 } 193 194 int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, 195 const u8 connection_data[TEE_IOCTL_UUID_LEN]) 196 { 197 gid_t ns_grp = (gid_t)-1; 198 kgid_t grp = INVALID_GID; 199 char *name = NULL; 200 int name_len; 201 int rc; 202 203 if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) { 204 /* Nil UUID to be passed to TEE environment */ 205 uuid_copy(uuid, &uuid_null); 206 return 0; 207 } 208 209 /* 210 * In Linux environment client UUID is based on UUIDv5. 211 * 212 * Determine client UUID with following semantics for 'name': 213 * 214 * For TEEC_LOGIN_USER: 215 * uid=<uid> 216 * 217 * For TEEC_LOGIN_GROUP: 218 * gid=<gid> 219 * 220 */ 221 222 name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL); 223 if (!name) 224 return -ENOMEM; 225 226 switch (connection_method) { 227 case TEE_IOCTL_LOGIN_USER: 228 name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x", 229 current_euid().val); 230 if (name_len >= TEE_UUID_NS_NAME_SIZE) { 231 rc = -E2BIG; 232 goto out_free_name; 233 } 234 break; 235 236 case TEE_IOCTL_LOGIN_GROUP: 237 memcpy(&ns_grp, connection_data, sizeof(gid_t)); 238 grp = make_kgid(current_user_ns(), ns_grp); 239 if (!gid_valid(grp) || !in_egroup_p(grp)) { 240 rc = -EPERM; 241 goto out_free_name; 242 } 243 244 name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x", 245 grp.val); 246 if (name_len >= TEE_UUID_NS_NAME_SIZE) { 247 rc = -E2BIG; 248 goto out_free_name; 249 } 250 break; 251 252 default: 253 rc = -EINVAL; 254 goto out_free_name; 255 } 256 257 rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len); 258 out_free_name: 259 kfree(name); 260 261 return rc; 262 } 263 EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid); 264 265 static int tee_ioctl_version(struct tee_context *ctx, 266 struct tee_ioctl_version_data __user *uvers) 267 { 268 struct tee_ioctl_version_data vers; 269 270 ctx->teedev->desc->ops->get_version(ctx->teedev, &vers); 271 272 if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED) 273 vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED; 274 275 if (copy_to_user(uvers, &vers, sizeof(vers))) 276 return -EFAULT; 277 278 return 0; 279 } 280 281 static int tee_ioctl_shm_alloc(struct tee_context *ctx, 282 struct tee_ioctl_shm_alloc_data __user *udata) 283 { 284 long ret; 285 struct tee_ioctl_shm_alloc_data data; 286 struct tee_shm *shm; 287 288 if (copy_from_user(&data, udata, sizeof(data))) 289 return -EFAULT; 290 291 /* Currently no input flags are supported */ 292 if (data.flags) 293 return -EINVAL; 294 295 shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); 296 if (IS_ERR(shm)) 297 return PTR_ERR(shm); 298 299 data.id = shm->id; 300 data.flags = shm->flags; 301 data.size = shm->size; 302 303 if (copy_to_user(udata, &data, sizeof(data))) 304 ret = -EFAULT; 305 else 306 ret = tee_shm_get_fd(shm); 307 308 /* 309 * When user space closes the file descriptor the shared memory 310 * should be freed or if tee_shm_get_fd() failed then it will 311 * be freed immediately. 312 */ 313 tee_shm_put(shm); 314 return ret; 315 } 316 317 static int 318 tee_ioctl_shm_register(struct tee_context *ctx, 319 struct tee_ioctl_shm_register_data __user *udata) 320 { 321 long ret; 322 struct tee_ioctl_shm_register_data data; 323 struct tee_shm *shm; 324 325 if (copy_from_user(&data, udata, sizeof(data))) 326 return -EFAULT; 327 328 /* Currently no input flags are supported */ 329 if (data.flags) 330 return -EINVAL; 331 332 shm = tee_shm_register(ctx, data.addr, data.length, 333 TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); 334 if (IS_ERR(shm)) 335 return PTR_ERR(shm); 336 337 data.id = shm->id; 338 data.flags = shm->flags; 339 data.length = shm->size; 340 341 if (copy_to_user(udata, &data, sizeof(data))) 342 ret = -EFAULT; 343 else 344 ret = tee_shm_get_fd(shm); 345 /* 346 * When user space closes the file descriptor the shared memory 347 * should be freed or if tee_shm_get_fd() failed then it will 348 * be freed immediately. 349 */ 350 tee_shm_put(shm); 351 return ret; 352 } 353 354 static int params_from_user(struct tee_context *ctx, struct tee_param *params, 355 size_t num_params, 356 struct tee_ioctl_param __user *uparams) 357 { 358 size_t n; 359 360 for (n = 0; n < num_params; n++) { 361 struct tee_shm *shm; 362 struct tee_ioctl_param ip; 363 364 if (copy_from_user(&ip, uparams + n, sizeof(ip))) 365 return -EFAULT; 366 367 /* All unused attribute bits has to be zero */ 368 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK) 369 return -EINVAL; 370 371 params[n].attr = ip.attr; 372 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { 373 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 374 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 375 break; 376 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 377 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 378 params[n].u.value.a = ip.a; 379 params[n].u.value.b = ip.b; 380 params[n].u.value.c = ip.c; 381 break; 382 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 383 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 384 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 385 /* 386 * If we fail to get a pointer to a shared memory 387 * object (and increase the ref count) from an 388 * identifier we return an error. All pointers that 389 * has been added in params have an increased ref 390 * count. It's the callers responibility to do 391 * tee_shm_put() on all resolved pointers. 392 */ 393 shm = tee_shm_get_from_id(ctx, ip.c); 394 if (IS_ERR(shm)) 395 return PTR_ERR(shm); 396 397 /* 398 * Ensure offset + size does not overflow offset 399 * and does not overflow the size of the referred 400 * shared memory object. 401 */ 402 if ((ip.a + ip.b) < ip.a || 403 (ip.a + ip.b) > shm->size) { 404 tee_shm_put(shm); 405 return -EINVAL; 406 } 407 408 params[n].u.memref.shm_offs = ip.a; 409 params[n].u.memref.size = ip.b; 410 params[n].u.memref.shm = shm; 411 break; 412 default: 413 /* Unknown attribute */ 414 return -EINVAL; 415 } 416 } 417 return 0; 418 } 419 420 static int params_to_user(struct tee_ioctl_param __user *uparams, 421 size_t num_params, struct tee_param *params) 422 { 423 size_t n; 424 425 for (n = 0; n < num_params; n++) { 426 struct tee_ioctl_param __user *up = uparams + n; 427 struct tee_param *p = params + n; 428 429 switch (p->attr) { 430 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 431 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 432 if (put_user(p->u.value.a, &up->a) || 433 put_user(p->u.value.b, &up->b) || 434 put_user(p->u.value.c, &up->c)) 435 return -EFAULT; 436 break; 437 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 438 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 439 if (put_user((u64)p->u.memref.size, &up->b)) 440 return -EFAULT; 441 default: 442 break; 443 } 444 } 445 return 0; 446 } 447 448 static int tee_ioctl_open_session(struct tee_context *ctx, 449 struct tee_ioctl_buf_data __user *ubuf) 450 { 451 int rc; 452 size_t n; 453 struct tee_ioctl_buf_data buf; 454 struct tee_ioctl_open_session_arg __user *uarg; 455 struct tee_ioctl_open_session_arg arg; 456 struct tee_ioctl_param __user *uparams = NULL; 457 struct tee_param *params = NULL; 458 bool have_session = false; 459 460 if (!ctx->teedev->desc->ops->open_session) 461 return -EINVAL; 462 463 if (copy_from_user(&buf, ubuf, sizeof(buf))) 464 return -EFAULT; 465 466 if (buf.buf_len > TEE_MAX_ARG_SIZE || 467 buf.buf_len < sizeof(struct tee_ioctl_open_session_arg)) 468 return -EINVAL; 469 470 uarg = u64_to_user_ptr(buf.buf_ptr); 471 if (copy_from_user(&arg, uarg, sizeof(arg))) 472 return -EFAULT; 473 474 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) 475 return -EINVAL; 476 477 if (arg.num_params) { 478 params = kcalloc(arg.num_params, sizeof(struct tee_param), 479 GFP_KERNEL); 480 if (!params) 481 return -ENOMEM; 482 uparams = uarg->params; 483 rc = params_from_user(ctx, params, arg.num_params, uparams); 484 if (rc) 485 goto out; 486 } 487 488 if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN && 489 arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) { 490 pr_debug("login method not allowed for user-space client\n"); 491 rc = -EPERM; 492 goto out; 493 } 494 495 rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params); 496 if (rc) 497 goto out; 498 have_session = true; 499 500 if (put_user(arg.session, &uarg->session) || 501 put_user(arg.ret, &uarg->ret) || 502 put_user(arg.ret_origin, &uarg->ret_origin)) { 503 rc = -EFAULT; 504 goto out; 505 } 506 rc = params_to_user(uparams, arg.num_params, params); 507 out: 508 /* 509 * If we've succeeded to open the session but failed to communicate 510 * it back to user space, close the session again to avoid leakage. 511 */ 512 if (rc && have_session && ctx->teedev->desc->ops->close_session) 513 ctx->teedev->desc->ops->close_session(ctx, arg.session); 514 515 if (params) { 516 /* Decrease ref count for all valid shared memory pointers */ 517 for (n = 0; n < arg.num_params; n++) 518 if (tee_param_is_memref(params + n) && 519 params[n].u.memref.shm) 520 tee_shm_put(params[n].u.memref.shm); 521 kfree(params); 522 } 523 524 return rc; 525 } 526 527 static int tee_ioctl_invoke(struct tee_context *ctx, 528 struct tee_ioctl_buf_data __user *ubuf) 529 { 530 int rc; 531 size_t n; 532 struct tee_ioctl_buf_data buf; 533 struct tee_ioctl_invoke_arg __user *uarg; 534 struct tee_ioctl_invoke_arg arg; 535 struct tee_ioctl_param __user *uparams = NULL; 536 struct tee_param *params = NULL; 537 538 if (!ctx->teedev->desc->ops->invoke_func) 539 return -EINVAL; 540 541 if (copy_from_user(&buf, ubuf, sizeof(buf))) 542 return -EFAULT; 543 544 if (buf.buf_len > TEE_MAX_ARG_SIZE || 545 buf.buf_len < sizeof(struct tee_ioctl_invoke_arg)) 546 return -EINVAL; 547 548 uarg = u64_to_user_ptr(buf.buf_ptr); 549 if (copy_from_user(&arg, uarg, sizeof(arg))) 550 return -EFAULT; 551 552 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) 553 return -EINVAL; 554 555 if (arg.num_params) { 556 params = kcalloc(arg.num_params, sizeof(struct tee_param), 557 GFP_KERNEL); 558 if (!params) 559 return -ENOMEM; 560 uparams = uarg->params; 561 rc = params_from_user(ctx, params, arg.num_params, uparams); 562 if (rc) 563 goto out; 564 } 565 566 rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params); 567 if (rc) 568 goto out; 569 570 if (put_user(arg.ret, &uarg->ret) || 571 put_user(arg.ret_origin, &uarg->ret_origin)) { 572 rc = -EFAULT; 573 goto out; 574 } 575 rc = params_to_user(uparams, arg.num_params, params); 576 out: 577 if (params) { 578 /* Decrease ref count for all valid shared memory pointers */ 579 for (n = 0; n < arg.num_params; n++) 580 if (tee_param_is_memref(params + n) && 581 params[n].u.memref.shm) 582 tee_shm_put(params[n].u.memref.shm); 583 kfree(params); 584 } 585 return rc; 586 } 587 588 static int tee_ioctl_cancel(struct tee_context *ctx, 589 struct tee_ioctl_cancel_arg __user *uarg) 590 { 591 struct tee_ioctl_cancel_arg arg; 592 593 if (!ctx->teedev->desc->ops->cancel_req) 594 return -EINVAL; 595 596 if (copy_from_user(&arg, uarg, sizeof(arg))) 597 return -EFAULT; 598 599 return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id, 600 arg.session); 601 } 602 603 static int 604 tee_ioctl_close_session(struct tee_context *ctx, 605 struct tee_ioctl_close_session_arg __user *uarg) 606 { 607 struct tee_ioctl_close_session_arg arg; 608 609 if (!ctx->teedev->desc->ops->close_session) 610 return -EINVAL; 611 612 if (copy_from_user(&arg, uarg, sizeof(arg))) 613 return -EFAULT; 614 615 return ctx->teedev->desc->ops->close_session(ctx, arg.session); 616 } 617 618 static int params_to_supp(struct tee_context *ctx, 619 struct tee_ioctl_param __user *uparams, 620 size_t num_params, struct tee_param *params) 621 { 622 size_t n; 623 624 for (n = 0; n < num_params; n++) { 625 struct tee_ioctl_param ip; 626 struct tee_param *p = params + n; 627 628 ip.attr = p->attr; 629 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { 630 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 631 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 632 ip.a = p->u.value.a; 633 ip.b = p->u.value.b; 634 ip.c = p->u.value.c; 635 break; 636 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 637 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 638 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 639 ip.b = p->u.memref.size; 640 if (!p->u.memref.shm) { 641 ip.a = 0; 642 ip.c = (u64)-1; /* invalid shm id */ 643 break; 644 } 645 ip.a = p->u.memref.shm_offs; 646 ip.c = p->u.memref.shm->id; 647 break; 648 default: 649 ip.a = 0; 650 ip.b = 0; 651 ip.c = 0; 652 break; 653 } 654 655 if (copy_to_user(uparams + n, &ip, sizeof(ip))) 656 return -EFAULT; 657 } 658 659 return 0; 660 } 661 662 static int tee_ioctl_supp_recv(struct tee_context *ctx, 663 struct tee_ioctl_buf_data __user *ubuf) 664 { 665 int rc; 666 struct tee_ioctl_buf_data buf; 667 struct tee_iocl_supp_recv_arg __user *uarg; 668 struct tee_param *params; 669 u32 num_params; 670 u32 func; 671 672 if (!ctx->teedev->desc->ops->supp_recv) 673 return -EINVAL; 674 675 if (copy_from_user(&buf, ubuf, sizeof(buf))) 676 return -EFAULT; 677 678 if (buf.buf_len > TEE_MAX_ARG_SIZE || 679 buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg)) 680 return -EINVAL; 681 682 uarg = u64_to_user_ptr(buf.buf_ptr); 683 if (get_user(num_params, &uarg->num_params)) 684 return -EFAULT; 685 686 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len) 687 return -EINVAL; 688 689 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); 690 if (!params) 691 return -ENOMEM; 692 693 rc = params_from_user(ctx, params, num_params, uarg->params); 694 if (rc) 695 goto out; 696 697 rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); 698 if (rc) 699 goto out; 700 701 if (put_user(func, &uarg->func) || 702 put_user(num_params, &uarg->num_params)) { 703 rc = -EFAULT; 704 goto out; 705 } 706 707 rc = params_to_supp(ctx, uarg->params, num_params, params); 708 out: 709 kfree(params); 710 return rc; 711 } 712 713 static int params_from_supp(struct tee_param *params, size_t num_params, 714 struct tee_ioctl_param __user *uparams) 715 { 716 size_t n; 717 718 for (n = 0; n < num_params; n++) { 719 struct tee_param *p = params + n; 720 struct tee_ioctl_param ip; 721 722 if (copy_from_user(&ip, uparams + n, sizeof(ip))) 723 return -EFAULT; 724 725 /* All unused attribute bits has to be zero */ 726 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK) 727 return -EINVAL; 728 729 p->attr = ip.attr; 730 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { 731 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 732 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 733 /* Only out and in/out values can be updated */ 734 p->u.value.a = ip.a; 735 p->u.value.b = ip.b; 736 p->u.value.c = ip.c; 737 break; 738 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 739 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 740 /* 741 * Only the size of the memref can be updated. 742 * Since we don't have access to the original 743 * parameters here, only store the supplied size. 744 * The driver will copy the updated size into the 745 * original parameters. 746 */ 747 p->u.memref.shm = NULL; 748 p->u.memref.shm_offs = 0; 749 p->u.memref.size = ip.b; 750 break; 751 default: 752 memset(&p->u, 0, sizeof(p->u)); 753 break; 754 } 755 } 756 return 0; 757 } 758 759 static int tee_ioctl_supp_send(struct tee_context *ctx, 760 struct tee_ioctl_buf_data __user *ubuf) 761 { 762 long rc; 763 struct tee_ioctl_buf_data buf; 764 struct tee_iocl_supp_send_arg __user *uarg; 765 struct tee_param *params; 766 u32 num_params; 767 u32 ret; 768 769 /* Not valid for this driver */ 770 if (!ctx->teedev->desc->ops->supp_send) 771 return -EINVAL; 772 773 if (copy_from_user(&buf, ubuf, sizeof(buf))) 774 return -EFAULT; 775 776 if (buf.buf_len > TEE_MAX_ARG_SIZE || 777 buf.buf_len < sizeof(struct tee_iocl_supp_send_arg)) 778 return -EINVAL; 779 780 uarg = u64_to_user_ptr(buf.buf_ptr); 781 if (get_user(ret, &uarg->ret) || 782 get_user(num_params, &uarg->num_params)) 783 return -EFAULT; 784 785 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len) 786 return -EINVAL; 787 788 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); 789 if (!params) 790 return -ENOMEM; 791 792 rc = params_from_supp(params, num_params, uarg->params); 793 if (rc) 794 goto out; 795 796 rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params); 797 out: 798 kfree(params); 799 return rc; 800 } 801 802 static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 803 { 804 struct tee_context *ctx = filp->private_data; 805 void __user *uarg = (void __user *)arg; 806 807 switch (cmd) { 808 case TEE_IOC_VERSION: 809 return tee_ioctl_version(ctx, uarg); 810 case TEE_IOC_SHM_ALLOC: 811 return tee_ioctl_shm_alloc(ctx, uarg); 812 case TEE_IOC_SHM_REGISTER: 813 return tee_ioctl_shm_register(ctx, uarg); 814 case TEE_IOC_OPEN_SESSION: 815 return tee_ioctl_open_session(ctx, uarg); 816 case TEE_IOC_INVOKE: 817 return tee_ioctl_invoke(ctx, uarg); 818 case TEE_IOC_CANCEL: 819 return tee_ioctl_cancel(ctx, uarg); 820 case TEE_IOC_CLOSE_SESSION: 821 return tee_ioctl_close_session(ctx, uarg); 822 case TEE_IOC_SUPPL_RECV: 823 return tee_ioctl_supp_recv(ctx, uarg); 824 case TEE_IOC_SUPPL_SEND: 825 return tee_ioctl_supp_send(ctx, uarg); 826 default: 827 return -EINVAL; 828 } 829 } 830 831 static const struct file_operations tee_fops = { 832 .owner = THIS_MODULE, 833 .open = tee_open, 834 .release = tee_release, 835 .unlocked_ioctl = tee_ioctl, 836 .compat_ioctl = compat_ptr_ioctl, 837 }; 838 839 static void tee_release_device(struct device *dev) 840 { 841 struct tee_device *teedev = container_of(dev, struct tee_device, dev); 842 843 spin_lock(&driver_lock); 844 clear_bit(teedev->id, dev_mask); 845 spin_unlock(&driver_lock); 846 mutex_destroy(&teedev->mutex); 847 idr_destroy(&teedev->idr); 848 kfree(teedev); 849 } 850 851 /** 852 * tee_device_alloc() - Allocate a new struct tee_device instance 853 * @teedesc: Descriptor for this driver 854 * @dev: Parent device for this device 855 * @pool: Shared memory pool, NULL if not used 856 * @driver_data: Private driver data for this device 857 * 858 * Allocates a new struct tee_device instance. The device is 859 * removed by tee_device_unregister(). 860 * 861 * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure 862 */ 863 struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, 864 struct device *dev, 865 struct tee_shm_pool *pool, 866 void *driver_data) 867 { 868 struct tee_device *teedev; 869 void *ret; 870 int rc, max_id; 871 int offs = 0; 872 873 if (!teedesc || !teedesc->name || !teedesc->ops || 874 !teedesc->ops->get_version || !teedesc->ops->open || 875 !teedesc->ops->release || !pool) 876 return ERR_PTR(-EINVAL); 877 878 teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); 879 if (!teedev) { 880 ret = ERR_PTR(-ENOMEM); 881 goto err; 882 } 883 884 max_id = TEE_NUM_DEVICES / 2; 885 886 if (teedesc->flags & TEE_DESC_PRIVILEGED) { 887 offs = TEE_NUM_DEVICES / 2; 888 max_id = TEE_NUM_DEVICES; 889 } 890 891 spin_lock(&driver_lock); 892 teedev->id = find_next_zero_bit(dev_mask, max_id, offs); 893 if (teedev->id < max_id) 894 set_bit(teedev->id, dev_mask); 895 spin_unlock(&driver_lock); 896 897 if (teedev->id >= max_id) { 898 ret = ERR_PTR(-ENOMEM); 899 goto err; 900 } 901 902 snprintf(teedev->name, sizeof(teedev->name), "tee%s%d", 903 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "", 904 teedev->id - offs); 905 906 teedev->dev.class = tee_class; 907 teedev->dev.release = tee_release_device; 908 teedev->dev.parent = dev; 909 910 teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id); 911 912 rc = dev_set_name(&teedev->dev, "%s", teedev->name); 913 if (rc) { 914 ret = ERR_PTR(rc); 915 goto err_devt; 916 } 917 918 cdev_init(&teedev->cdev, &tee_fops); 919 teedev->cdev.owner = teedesc->owner; 920 teedev->cdev.kobj.parent = &teedev->dev.kobj; 921 922 dev_set_drvdata(&teedev->dev, driver_data); 923 device_initialize(&teedev->dev); 924 925 /* 1 as tee_device_unregister() does one final tee_device_put() */ 926 teedev->num_users = 1; 927 init_completion(&teedev->c_no_users); 928 mutex_init(&teedev->mutex); 929 idr_init(&teedev->idr); 930 931 teedev->desc = teedesc; 932 teedev->pool = pool; 933 934 return teedev; 935 err_devt: 936 unregister_chrdev_region(teedev->dev.devt, 1); 937 err: 938 pr_err("could not register %s driver\n", 939 teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client"); 940 if (teedev && teedev->id < TEE_NUM_DEVICES) { 941 spin_lock(&driver_lock); 942 clear_bit(teedev->id, dev_mask); 943 spin_unlock(&driver_lock); 944 } 945 kfree(teedev); 946 return ret; 947 } 948 EXPORT_SYMBOL_GPL(tee_device_alloc); 949 950 static ssize_t implementation_id_show(struct device *dev, 951 struct device_attribute *attr, char *buf) 952 { 953 struct tee_device *teedev = container_of(dev, struct tee_device, dev); 954 struct tee_ioctl_version_data vers; 955 956 teedev->desc->ops->get_version(teedev, &vers); 957 return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id); 958 } 959 static DEVICE_ATTR_RO(implementation_id); 960 961 static struct attribute *tee_dev_attrs[] = { 962 &dev_attr_implementation_id.attr, 963 NULL 964 }; 965 966 static const struct attribute_group tee_dev_group = { 967 .attrs = tee_dev_attrs, 968 }; 969 970 /** 971 * tee_device_register() - Registers a TEE device 972 * @teedev: Device to register 973 * 974 * tee_device_unregister() need to be called to remove the @teedev if 975 * this function fails. 976 * 977 * @returns < 0 on failure 978 */ 979 int tee_device_register(struct tee_device *teedev) 980 { 981 int rc; 982 983 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { 984 dev_err(&teedev->dev, "attempt to register twice\n"); 985 return -EINVAL; 986 } 987 988 rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1); 989 if (rc) { 990 dev_err(&teedev->dev, 991 "unable to cdev_add() %s, major %d, minor %d, err=%d\n", 992 teedev->name, MAJOR(teedev->dev.devt), 993 MINOR(teedev->dev.devt), rc); 994 return rc; 995 } 996 997 rc = device_add(&teedev->dev); 998 if (rc) { 999 dev_err(&teedev->dev, 1000 "unable to device_add() %s, major %d, minor %d, err=%d\n", 1001 teedev->name, MAJOR(teedev->dev.devt), 1002 MINOR(teedev->dev.devt), rc); 1003 goto err_device_add; 1004 } 1005 1006 rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group); 1007 if (rc) { 1008 dev_err(&teedev->dev, 1009 "failed to create sysfs attributes, err=%d\n", rc); 1010 goto err_sysfs_create_group; 1011 } 1012 1013 teedev->flags |= TEE_DEVICE_FLAG_REGISTERED; 1014 return 0; 1015 1016 err_sysfs_create_group: 1017 device_del(&teedev->dev); 1018 err_device_add: 1019 cdev_del(&teedev->cdev); 1020 return rc; 1021 } 1022 EXPORT_SYMBOL_GPL(tee_device_register); 1023 1024 void tee_device_put(struct tee_device *teedev) 1025 { 1026 mutex_lock(&teedev->mutex); 1027 /* Shouldn't put in this state */ 1028 if (!WARN_ON(!teedev->desc)) { 1029 teedev->num_users--; 1030 if (!teedev->num_users) { 1031 teedev->desc = NULL; 1032 complete(&teedev->c_no_users); 1033 } 1034 } 1035 mutex_unlock(&teedev->mutex); 1036 } 1037 1038 bool tee_device_get(struct tee_device *teedev) 1039 { 1040 mutex_lock(&teedev->mutex); 1041 if (!teedev->desc) { 1042 mutex_unlock(&teedev->mutex); 1043 return false; 1044 } 1045 teedev->num_users++; 1046 mutex_unlock(&teedev->mutex); 1047 return true; 1048 } 1049 1050 /** 1051 * tee_device_unregister() - Removes a TEE device 1052 * @teedev: Device to unregister 1053 * 1054 * This function should be called to remove the @teedev even if 1055 * tee_device_register() hasn't been called yet. Does nothing if 1056 * @teedev is NULL. 1057 */ 1058 void tee_device_unregister(struct tee_device *teedev) 1059 { 1060 if (!teedev) 1061 return; 1062 1063 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { 1064 sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group); 1065 cdev_del(&teedev->cdev); 1066 device_del(&teedev->dev); 1067 } 1068 1069 tee_device_put(teedev); 1070 wait_for_completion(&teedev->c_no_users); 1071 1072 /* 1073 * No need to take a mutex any longer now since teedev->desc was 1074 * set to NULL before teedev->c_no_users was completed. 1075 */ 1076 1077 teedev->pool = NULL; 1078 1079 put_device(&teedev->dev); 1080 } 1081 EXPORT_SYMBOL_GPL(tee_device_unregister); 1082 1083 /** 1084 * tee_get_drvdata() - Return driver_data pointer 1085 * @teedev: Device containing the driver_data pointer 1086 * @returns the driver_data pointer supplied to tee_register(). 1087 */ 1088 void *tee_get_drvdata(struct tee_device *teedev) 1089 { 1090 return dev_get_drvdata(&teedev->dev); 1091 } 1092 EXPORT_SYMBOL_GPL(tee_get_drvdata); 1093 1094 struct match_dev_data { 1095 struct tee_ioctl_version_data *vers; 1096 const void *data; 1097 int (*match)(struct tee_ioctl_version_data *, const void *); 1098 }; 1099 1100 static int match_dev(struct device *dev, const void *data) 1101 { 1102 const struct match_dev_data *match_data = data; 1103 struct tee_device *teedev = container_of(dev, struct tee_device, dev); 1104 1105 teedev->desc->ops->get_version(teedev, match_data->vers); 1106 return match_data->match(match_data->vers, match_data->data); 1107 } 1108 1109 struct tee_context * 1110 tee_client_open_context(struct tee_context *start, 1111 int (*match)(struct tee_ioctl_version_data *, 1112 const void *), 1113 const void *data, struct tee_ioctl_version_data *vers) 1114 { 1115 struct device *dev = NULL; 1116 struct device *put_dev = NULL; 1117 struct tee_context *ctx = NULL; 1118 struct tee_ioctl_version_data v; 1119 struct match_dev_data match_data = { vers ? vers : &v, data, match }; 1120 1121 if (start) 1122 dev = &start->teedev->dev; 1123 1124 do { 1125 dev = class_find_device(tee_class, dev, &match_data, match_dev); 1126 if (!dev) { 1127 ctx = ERR_PTR(-ENOENT); 1128 break; 1129 } 1130 1131 put_device(put_dev); 1132 put_dev = dev; 1133 1134 ctx = teedev_open(container_of(dev, struct tee_device, dev)); 1135 } while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM); 1136 1137 put_device(put_dev); 1138 /* 1139 * Default behaviour for in kernel client is to not wait for 1140 * tee-supplicant if not present for any requests in this context. 1141 * Also this flag could be configured again before call to 1142 * tee_client_open_session() if any in kernel client requires 1143 * different behaviour. 1144 */ 1145 if (!IS_ERR(ctx)) 1146 ctx->supp_nowait = true; 1147 1148 return ctx; 1149 } 1150 EXPORT_SYMBOL_GPL(tee_client_open_context); 1151 1152 void tee_client_close_context(struct tee_context *ctx) 1153 { 1154 teedev_close_context(ctx); 1155 } 1156 EXPORT_SYMBOL_GPL(tee_client_close_context); 1157 1158 void tee_client_get_version(struct tee_context *ctx, 1159 struct tee_ioctl_version_data *vers) 1160 { 1161 ctx->teedev->desc->ops->get_version(ctx->teedev, vers); 1162 } 1163 EXPORT_SYMBOL_GPL(tee_client_get_version); 1164 1165 int tee_client_open_session(struct tee_context *ctx, 1166 struct tee_ioctl_open_session_arg *arg, 1167 struct tee_param *param) 1168 { 1169 if (!ctx->teedev->desc->ops->open_session) 1170 return -EINVAL; 1171 return ctx->teedev->desc->ops->open_session(ctx, arg, param); 1172 } 1173 EXPORT_SYMBOL_GPL(tee_client_open_session); 1174 1175 int tee_client_close_session(struct tee_context *ctx, u32 session) 1176 { 1177 if (!ctx->teedev->desc->ops->close_session) 1178 return -EINVAL; 1179 return ctx->teedev->desc->ops->close_session(ctx, session); 1180 } 1181 EXPORT_SYMBOL_GPL(tee_client_close_session); 1182 1183 int tee_client_invoke_func(struct tee_context *ctx, 1184 struct tee_ioctl_invoke_arg *arg, 1185 struct tee_param *param) 1186 { 1187 if (!ctx->teedev->desc->ops->invoke_func) 1188 return -EINVAL; 1189 return ctx->teedev->desc->ops->invoke_func(ctx, arg, param); 1190 } 1191 EXPORT_SYMBOL_GPL(tee_client_invoke_func); 1192 1193 int tee_client_cancel_req(struct tee_context *ctx, 1194 struct tee_ioctl_cancel_arg *arg) 1195 { 1196 if (!ctx->teedev->desc->ops->cancel_req) 1197 return -EINVAL; 1198 return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id, 1199 arg->session); 1200 } 1201 1202 static int tee_client_device_match(struct device *dev, 1203 struct device_driver *drv) 1204 { 1205 const struct tee_client_device_id *id_table; 1206 struct tee_client_device *tee_device; 1207 1208 id_table = to_tee_client_driver(drv)->id_table; 1209 tee_device = to_tee_client_device(dev); 1210 1211 while (!uuid_is_null(&id_table->uuid)) { 1212 if (uuid_equal(&tee_device->id.uuid, &id_table->uuid)) 1213 return 1; 1214 id_table++; 1215 } 1216 1217 return 0; 1218 } 1219 1220 static int tee_client_device_uevent(struct device *dev, 1221 struct kobj_uevent_env *env) 1222 { 1223 uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid; 1224 1225 return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id); 1226 } 1227 1228 struct bus_type tee_bus_type = { 1229 .name = "tee", 1230 .match = tee_client_device_match, 1231 .uevent = tee_client_device_uevent, 1232 }; 1233 EXPORT_SYMBOL_GPL(tee_bus_type); 1234 1235 static int __init tee_init(void) 1236 { 1237 int rc; 1238 1239 tee_class = class_create(THIS_MODULE, "tee"); 1240 if (IS_ERR(tee_class)) { 1241 pr_err("couldn't create class\n"); 1242 return PTR_ERR(tee_class); 1243 } 1244 1245 rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee"); 1246 if (rc) { 1247 pr_err("failed to allocate char dev region\n"); 1248 goto out_unreg_class; 1249 } 1250 1251 rc = bus_register(&tee_bus_type); 1252 if (rc) { 1253 pr_err("failed to register tee bus\n"); 1254 goto out_unreg_chrdev; 1255 } 1256 1257 return 0; 1258 1259 out_unreg_chrdev: 1260 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); 1261 out_unreg_class: 1262 class_destroy(tee_class); 1263 tee_class = NULL; 1264 1265 return rc; 1266 } 1267 1268 static void __exit tee_exit(void) 1269 { 1270 bus_unregister(&tee_bus_type); 1271 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); 1272 class_destroy(tee_class); 1273 tee_class = NULL; 1274 } 1275 1276 subsys_initcall(tee_init); 1277 module_exit(tee_exit); 1278 1279 MODULE_AUTHOR("Linaro"); 1280 MODULE_DESCRIPTION("TEE Driver"); 1281 MODULE_VERSION("1.0"); 1282 MODULE_LICENSE("GPL v2"); 1283