1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, Linaro Limited 4 */ 5 #include <linux/arm-smccc.h> 6 #include <linux/device.h> 7 #include <linux/err.h> 8 #include <linux/errno.h> 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/tee_drv.h> 12 #include <linux/types.h> 13 #include <linux/uaccess.h> 14 #include "optee_private.h" 15 #include "optee_smc.h" 16 17 struct optee_call_waiter { 18 struct list_head list_node; 19 struct completion c; 20 }; 21 22 static void optee_cq_wait_init(struct optee_call_queue *cq, 23 struct optee_call_waiter *w) 24 { 25 /* 26 * We're preparing to make a call to secure world. In case we can't 27 * allocate a thread in secure world we'll end up waiting in 28 * optee_cq_wait_for_completion(). 29 * 30 * Normally if there's no contention in secure world the call will 31 * complete and we can cleanup directly with optee_cq_wait_final(). 32 */ 33 mutex_lock(&cq->mutex); 34 35 /* 36 * We add ourselves to the queue, but we don't wait. This 37 * guarantees that we don't lose a completion if secure world 38 * returns busy and another thread just exited and try to complete 39 * someone. 40 */ 41 init_completion(&w->c); 42 list_add_tail(&w->list_node, &cq->waiters); 43 44 mutex_unlock(&cq->mutex); 45 } 46 47 static void optee_cq_wait_for_completion(struct optee_call_queue *cq, 48 struct optee_call_waiter *w) 49 { 50 wait_for_completion(&w->c); 51 52 mutex_lock(&cq->mutex); 53 54 /* Move to end of list to get out of the way for other waiters */ 55 list_del(&w->list_node); 56 reinit_completion(&w->c); 57 list_add_tail(&w->list_node, &cq->waiters); 58 59 mutex_unlock(&cq->mutex); 60 } 61 62 static void optee_cq_complete_one(struct optee_call_queue *cq) 63 { 64 struct optee_call_waiter *w; 65 66 list_for_each_entry(w, &cq->waiters, list_node) { 67 if (!completion_done(&w->c)) { 68 complete(&w->c); 69 break; 70 } 71 } 72 } 73 74 static void optee_cq_wait_final(struct optee_call_queue *cq, 75 struct optee_call_waiter *w) 76 { 77 /* 78 * We're done with the call to secure world. The thread in secure 79 * world that was used for this call is now available for some 80 * other task to use. 81 */ 82 mutex_lock(&cq->mutex); 83 84 /* Get out of the list */ 85 list_del(&w->list_node); 86 87 /* Wake up one eventual waiting task */ 88 optee_cq_complete_one(cq); 89 90 /* 91 * If we're completed we've got a completion from another task that 92 * was just done with its call to secure world. Since yet another 93 * thread now is available in secure world wake up another eventual 94 * waiting task. 95 */ 96 if (completion_done(&w->c)) 97 optee_cq_complete_one(cq); 98 99 mutex_unlock(&cq->mutex); 100 } 101 102 /* Requires the filpstate mutex to be held */ 103 static struct optee_session *find_session(struct optee_context_data *ctxdata, 104 u32 session_id) 105 { 106 struct optee_session *sess; 107 108 list_for_each_entry(sess, &ctxdata->sess_list, list_node) 109 if (sess->session_id == session_id) 110 return sess; 111 112 return NULL; 113 } 114 115 /** 116 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world 117 * @ctx: calling context 118 * @parg: physical address of message to pass to secure world 119 * 120 * Does and SMC to OP-TEE in secure world and handles eventual resulting 121 * Remote Procedure Calls (RPC) from OP-TEE. 122 * 123 * Returns return code from secure world, 0 is OK 124 */ 125 u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) 126 { 127 struct optee *optee = tee_get_drvdata(ctx->teedev); 128 struct optee_call_waiter w; 129 struct optee_rpc_param param = { }; 130 struct optee_call_ctx call_ctx = { }; 131 u32 ret; 132 133 param.a0 = OPTEE_SMC_CALL_WITH_ARG; 134 reg_pair_from_64(¶m.a1, ¶m.a2, parg); 135 /* Initialize waiter */ 136 optee_cq_wait_init(&optee->call_queue, &w); 137 while (true) { 138 struct arm_smccc_res res; 139 140 optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, 141 param.a4, param.a5, param.a6, param.a7, 142 &res); 143 144 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { 145 /* 146 * Out of threads in secure world, wait for a thread 147 * become available. 148 */ 149 optee_cq_wait_for_completion(&optee->call_queue, &w); 150 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { 151 param.a0 = res.a0; 152 param.a1 = res.a1; 153 param.a2 = res.a2; 154 param.a3 = res.a3; 155 optee_handle_rpc(ctx, ¶m, &call_ctx); 156 } else { 157 ret = res.a0; 158 break; 159 } 160 } 161 162 optee_rpc_finalize_call(&call_ctx); 163 /* 164 * We're done with our thread in secure world, if there's any 165 * thread waiters wake up one. 166 */ 167 optee_cq_wait_final(&optee->call_queue, &w); 168 169 return ret; 170 } 171 172 static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, 173 struct optee_msg_arg **msg_arg, 174 phys_addr_t *msg_parg) 175 { 176 int rc; 177 struct tee_shm *shm; 178 struct optee_msg_arg *ma; 179 180 shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), 181 TEE_SHM_MAPPED); 182 if (IS_ERR(shm)) 183 return shm; 184 185 ma = tee_shm_get_va(shm, 0); 186 if (IS_ERR(ma)) { 187 rc = PTR_ERR(ma); 188 goto out; 189 } 190 191 rc = tee_shm_get_pa(shm, 0, msg_parg); 192 if (rc) 193 goto out; 194 195 memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 196 ma->num_params = num_params; 197 *msg_arg = ma; 198 out: 199 if (rc) { 200 tee_shm_free(shm); 201 return ERR_PTR(rc); 202 } 203 204 return shm; 205 } 206 207 int optee_open_session(struct tee_context *ctx, 208 struct tee_ioctl_open_session_arg *arg, 209 struct tee_param *param) 210 { 211 struct optee_context_data *ctxdata = ctx->data; 212 int rc; 213 struct tee_shm *shm; 214 struct optee_msg_arg *msg_arg; 215 phys_addr_t msg_parg; 216 struct optee_session *sess = NULL; 217 218 /* +2 for the meta parameters added below */ 219 shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); 220 if (IS_ERR(shm)) 221 return PTR_ERR(shm); 222 223 msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; 224 msg_arg->cancel_id = arg->cancel_id; 225 226 /* 227 * Initialize and add the meta parameters needed when opening a 228 * session. 229 */ 230 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | 231 OPTEE_MSG_ATTR_META; 232 msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | 233 OPTEE_MSG_ATTR_META; 234 memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); 235 memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid)); 236 msg_arg->params[1].u.value.c = arg->clnt_login; 237 238 rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); 239 if (rc) 240 goto out; 241 242 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 243 if (!sess) { 244 rc = -ENOMEM; 245 goto out; 246 } 247 248 if (optee_do_call_with_arg(ctx, msg_parg)) { 249 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 250 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 251 } 252 253 if (msg_arg->ret == TEEC_SUCCESS) { 254 /* A new session has been created, add it to the list. */ 255 sess->session_id = msg_arg->session; 256 mutex_lock(&ctxdata->mutex); 257 list_add(&sess->list_node, &ctxdata->sess_list); 258 mutex_unlock(&ctxdata->mutex); 259 } else { 260 kfree(sess); 261 } 262 263 if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { 264 arg->ret = TEEC_ERROR_COMMUNICATION; 265 arg->ret_origin = TEEC_ORIGIN_COMMS; 266 /* Close session again to avoid leakage */ 267 optee_close_session(ctx, msg_arg->session); 268 } else { 269 arg->session = msg_arg->session; 270 arg->ret = msg_arg->ret; 271 arg->ret_origin = msg_arg->ret_origin; 272 } 273 out: 274 tee_shm_free(shm); 275 276 return rc; 277 } 278 279 int optee_close_session(struct tee_context *ctx, u32 session) 280 { 281 struct optee_context_data *ctxdata = ctx->data; 282 struct tee_shm *shm; 283 struct optee_msg_arg *msg_arg; 284 phys_addr_t msg_parg; 285 struct optee_session *sess; 286 287 /* Check that the session is valid and remove it from the list */ 288 mutex_lock(&ctxdata->mutex); 289 sess = find_session(ctxdata, session); 290 if (sess) 291 list_del(&sess->list_node); 292 mutex_unlock(&ctxdata->mutex); 293 if (!sess) 294 return -EINVAL; 295 kfree(sess); 296 297 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); 298 if (IS_ERR(shm)) 299 return PTR_ERR(shm); 300 301 msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; 302 msg_arg->session = session; 303 optee_do_call_with_arg(ctx, msg_parg); 304 305 tee_shm_free(shm); 306 return 0; 307 } 308 309 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, 310 struct tee_param *param) 311 { 312 struct optee_context_data *ctxdata = ctx->data; 313 struct tee_shm *shm; 314 struct optee_msg_arg *msg_arg; 315 phys_addr_t msg_parg; 316 struct optee_session *sess; 317 int rc; 318 319 /* Check that the session is valid */ 320 mutex_lock(&ctxdata->mutex); 321 sess = find_session(ctxdata, arg->session); 322 mutex_unlock(&ctxdata->mutex); 323 if (!sess) 324 return -EINVAL; 325 326 shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); 327 if (IS_ERR(shm)) 328 return PTR_ERR(shm); 329 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; 330 msg_arg->func = arg->func; 331 msg_arg->session = arg->session; 332 msg_arg->cancel_id = arg->cancel_id; 333 334 rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); 335 if (rc) 336 goto out; 337 338 if (optee_do_call_with_arg(ctx, msg_parg)) { 339 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 340 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 341 } 342 343 if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { 344 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 345 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 346 } 347 348 arg->ret = msg_arg->ret; 349 arg->ret_origin = msg_arg->ret_origin; 350 out: 351 tee_shm_free(shm); 352 return rc; 353 } 354 355 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) 356 { 357 struct optee_context_data *ctxdata = ctx->data; 358 struct tee_shm *shm; 359 struct optee_msg_arg *msg_arg; 360 phys_addr_t msg_parg; 361 struct optee_session *sess; 362 363 /* Check that the session is valid */ 364 mutex_lock(&ctxdata->mutex); 365 sess = find_session(ctxdata, session); 366 mutex_unlock(&ctxdata->mutex); 367 if (!sess) 368 return -EINVAL; 369 370 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); 371 if (IS_ERR(shm)) 372 return PTR_ERR(shm); 373 374 msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; 375 msg_arg->session = session; 376 msg_arg->cancel_id = cancel_id; 377 optee_do_call_with_arg(ctx, msg_parg); 378 379 tee_shm_free(shm); 380 return 0; 381 } 382 383 /** 384 * optee_enable_shm_cache() - Enables caching of some shared memory allocation 385 * in OP-TEE 386 * @optee: main service struct 387 */ 388 void optee_enable_shm_cache(struct optee *optee) 389 { 390 struct optee_call_waiter w; 391 392 /* We need to retry until secure world isn't busy. */ 393 optee_cq_wait_init(&optee->call_queue, &w); 394 while (true) { 395 struct arm_smccc_res res; 396 397 optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, 398 0, &res); 399 if (res.a0 == OPTEE_SMC_RETURN_OK) 400 break; 401 optee_cq_wait_for_completion(&optee->call_queue, &w); 402 } 403 optee_cq_wait_final(&optee->call_queue, &w); 404 } 405 406 /** 407 * optee_disable_shm_cache() - Disables caching of some shared memory allocation 408 * in OP-TEE 409 * @optee: main service struct 410 */ 411 void optee_disable_shm_cache(struct optee *optee) 412 { 413 struct optee_call_waiter w; 414 415 /* We need to retry until secure world isn't busy. */ 416 optee_cq_wait_init(&optee->call_queue, &w); 417 while (true) { 418 union { 419 struct arm_smccc_res smccc; 420 struct optee_smc_disable_shm_cache_result result; 421 } res; 422 423 optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, 424 0, &res.smccc); 425 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) 426 break; /* All shm's freed */ 427 if (res.result.status == OPTEE_SMC_RETURN_OK) { 428 struct tee_shm *shm; 429 430 shm = reg_pair_to_ptr(res.result.shm_upper32, 431 res.result.shm_lower32); 432 tee_shm_free(shm); 433 } else { 434 optee_cq_wait_for_completion(&optee->call_queue, &w); 435 } 436 } 437 optee_cq_wait_final(&optee->call_queue, &w); 438 } 439 440 #define PAGELIST_ENTRIES_PER_PAGE \ 441 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) 442 443 /** 444 * optee_fill_pages_list() - write list of user pages to given shared 445 * buffer. 446 * 447 * @dst: page-aligned buffer where list of pages will be stored 448 * @pages: array of pages that represents shared buffer 449 * @num_pages: number of entries in @pages 450 * @page_offset: offset of user buffer from page start 451 * 452 * @dst should be big enough to hold list of user page addresses and 453 * links to the next pages of buffer 454 */ 455 void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, 456 size_t page_offset) 457 { 458 int n = 0; 459 phys_addr_t optee_page; 460 /* 461 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h 462 * for details. 463 */ 464 struct { 465 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; 466 u64 next_page_data; 467 } *pages_data; 468 469 /* 470 * Currently OP-TEE uses 4k page size and it does not looks 471 * like this will change in the future. On other hand, there are 472 * no know ARM architectures with page size < 4k. 473 * Thus the next built assert looks redundant. But the following 474 * code heavily relies on this assumption, so it is better be 475 * safe than sorry. 476 */ 477 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); 478 479 pages_data = (void *)dst; 480 /* 481 * If linux page is bigger than 4k, and user buffer offset is 482 * larger than 4k/8k/12k/etc this will skip first 4k pages, 483 * because they bear no value data for OP-TEE. 484 */ 485 optee_page = page_to_phys(*pages) + 486 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); 487 488 while (true) { 489 pages_data->pages_list[n++] = optee_page; 490 491 if (n == PAGELIST_ENTRIES_PER_PAGE) { 492 pages_data->next_page_data = 493 virt_to_phys(pages_data + 1); 494 pages_data++; 495 n = 0; 496 } 497 498 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; 499 if (!(optee_page & ~PAGE_MASK)) { 500 if (!--num_pages) 501 break; 502 pages++; 503 optee_page = page_to_phys(*pages); 504 } 505 } 506 } 507 508 /* 509 * The final entry in each pagelist page is a pointer to the next 510 * pagelist page. 511 */ 512 static size_t get_pages_list_size(size_t num_entries) 513 { 514 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); 515 516 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; 517 } 518 519 u64 *optee_allocate_pages_list(size_t num_entries) 520 { 521 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); 522 } 523 524 void optee_free_pages_list(void *list, size_t num_entries) 525 { 526 free_pages_exact(list, get_pages_list_size(num_entries)); 527 } 528 529 static bool is_normal_memory(pgprot_t p) 530 { 531 #if defined(CONFIG_ARM) 532 return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC; 533 #elif defined(CONFIG_ARM64) 534 return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); 535 #else 536 #error "Unuspported architecture" 537 #endif 538 } 539 540 static int __check_mem_type(struct vm_area_struct *vma, unsigned long end) 541 { 542 while (vma && is_normal_memory(vma->vm_page_prot)) { 543 if (vma->vm_end >= end) 544 return 0; 545 vma = vma->vm_next; 546 } 547 548 return -EINVAL; 549 } 550 551 static int check_mem_type(unsigned long start, size_t num_pages) 552 { 553 struct mm_struct *mm = current->mm; 554 int rc; 555 556 down_read(&mm->mmap_sem); 557 rc = __check_mem_type(find_vma(mm, start), 558 start + num_pages * PAGE_SIZE); 559 up_read(&mm->mmap_sem); 560 561 return rc; 562 } 563 564 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 565 struct page **pages, size_t num_pages, 566 unsigned long start) 567 { 568 struct tee_shm *shm_arg = NULL; 569 struct optee_msg_arg *msg_arg; 570 u64 *pages_list; 571 phys_addr_t msg_parg; 572 int rc; 573 574 if (!num_pages) 575 return -EINVAL; 576 577 rc = check_mem_type(start, num_pages); 578 if (rc) 579 return rc; 580 581 pages_list = optee_allocate_pages_list(num_pages); 582 if (!pages_list) 583 return -ENOMEM; 584 585 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); 586 if (IS_ERR(shm_arg)) { 587 rc = PTR_ERR(shm_arg); 588 goto out; 589 } 590 591 optee_fill_pages_list(pages_list, pages, num_pages, 592 tee_shm_get_page_offset(shm)); 593 594 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; 595 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 596 OPTEE_MSG_ATTR_NONCONTIG; 597 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; 598 msg_arg->params->u.tmem.size = tee_shm_get_size(shm); 599 /* 600 * In the least bits of msg_arg->params->u.tmem.buf_ptr we 601 * store buffer offset from 4k page, as described in OP-TEE ABI. 602 */ 603 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | 604 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 605 606 if (optee_do_call_with_arg(ctx, msg_parg) || 607 msg_arg->ret != TEEC_SUCCESS) 608 rc = -EINVAL; 609 610 tee_shm_free(shm_arg); 611 out: 612 optee_free_pages_list(pages_list, num_pages); 613 return rc; 614 } 615 616 int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) 617 { 618 struct tee_shm *shm_arg; 619 struct optee_msg_arg *msg_arg; 620 phys_addr_t msg_parg; 621 int rc = 0; 622 623 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); 624 if (IS_ERR(shm_arg)) 625 return PTR_ERR(shm_arg); 626 627 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; 628 629 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 630 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; 631 632 if (optee_do_call_with_arg(ctx, msg_parg) || 633 msg_arg->ret != TEEC_SUCCESS) 634 rc = -EINVAL; 635 tee_shm_free(shm_arg); 636 return rc; 637 } 638 639 int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, 640 struct page **pages, size_t num_pages, 641 unsigned long start) 642 { 643 /* 644 * We don't want to register supplicant memory in OP-TEE. 645 * Instead information about it will be passed in RPC code. 646 */ 647 return check_mem_type(start, num_pages); 648 } 649 650 int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) 651 { 652 return 0; 653 } 654