1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited 4 */ 5 #include <linux/anon_inodes.h> 6 #include <linux/device.h> 7 #include <linux/idr.h> 8 #include <linux/io.h> 9 #include <linux/mm.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/tee_core.h> 13 #include <linux/uaccess.h> 14 #include <linux/uio.h> 15 #include <linux/highmem.h> 16 #include "tee_private.h" 17 18 static void shm_put_kernel_pages(struct page **pages, size_t page_count) 19 { 20 size_t n; 21 22 for (n = 0; n < page_count; n++) 23 put_page(pages[n]); 24 } 25 26 static void shm_get_kernel_pages(struct page **pages, size_t page_count) 27 { 28 size_t n; 29 30 for (n = 0; n < page_count; n++) 31 get_page(pages[n]); 32 } 33 34 static void release_registered_pages(struct tee_shm *shm) 35 { 36 if (shm->pages) { 37 if (shm->flags & TEE_SHM_USER_MAPPED) 38 unpin_user_pages(shm->pages, shm->num_pages); 39 else 40 shm_put_kernel_pages(shm->pages, shm->num_pages); 41 42 kfree(shm->pages); 43 } 44 } 45 46 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 47 { 48 if (shm->flags & TEE_SHM_POOL) { 49 teedev->pool->ops->free(teedev->pool, shm); 50 } else if (shm->flags & TEE_SHM_DYNAMIC) { 51 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); 52 53 if (rc) 54 dev_err(teedev->dev.parent, 55 "unregister shm %p failed: %d", shm, rc); 56 57 release_registered_pages(shm); 58 } 59 60 teedev_ctx_put(shm->ctx); 61 62 kfree(shm); 63 64 tee_device_put(teedev); 65 } 66 67 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size, 68 size_t align, u32 flags, int id) 69 { 70 struct tee_device *teedev = ctx->teedev; 71 struct tee_shm *shm; 72 void *ret; 73 int rc; 74 75 if (!tee_device_get(teedev)) 76 return ERR_PTR(-EINVAL); 77 78 if (!teedev->pool) { 79 /* teedev has been detached from driver */ 80 ret = ERR_PTR(-EINVAL); 81 goto err_dev_put; 82 } 83 84 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 85 if (!shm) { 86 ret = ERR_PTR(-ENOMEM); 87 goto err_dev_put; 88 } 89 90 refcount_set(&shm->refcount, 1); 91 shm->flags = flags; 92 shm->id = id; 93 94 /* 95 * We're assigning this as it is needed if the shm is to be 96 * registered. If this function returns OK then the caller expected 97 * to call teedev_ctx_get() or clear shm->ctx in case it's not 98 * needed any longer. 99 */ 100 shm->ctx = ctx; 101 102 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); 103 if (rc) { 104 ret = ERR_PTR(rc); 105 goto err_kfree; 106 } 107 108 teedev_ctx_get(ctx); 109 return shm; 110 err_kfree: 111 kfree(shm); 112 err_dev_put: 113 tee_device_put(teedev); 114 return ret; 115 } 116 117 /** 118 * tee_shm_alloc_user_buf() - Allocate shared memory for user space 119 * @ctx: Context that allocates the shared memory 120 * @size: Requested size of shared memory 121 * 122 * Memory allocated as user space shared memory is automatically freed when 123 * the TEE file pointer is closed. The primary usage of this function is 124 * when the TEE driver doesn't support registering ordinary user space 125 * memory. 126 * 127 * @returns a pointer to 'struct tee_shm' 128 */ 129 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) 130 { 131 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; 132 struct tee_device *teedev = ctx->teedev; 133 struct tee_shm *shm; 134 void *ret; 135 int id; 136 137 mutex_lock(&teedev->mutex); 138 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); 139 mutex_unlock(&teedev->mutex); 140 if (id < 0) 141 return ERR_PTR(id); 142 143 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id); 144 if (IS_ERR(shm)) { 145 mutex_lock(&teedev->mutex); 146 idr_remove(&teedev->idr, id); 147 mutex_unlock(&teedev->mutex); 148 return shm; 149 } 150 151 mutex_lock(&teedev->mutex); 152 ret = idr_replace(&teedev->idr, shm, id); 153 mutex_unlock(&teedev->mutex); 154 if (IS_ERR(ret)) { 155 tee_shm_free(shm); 156 return ret; 157 } 158 159 return shm; 160 } 161 162 /** 163 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer 164 * @ctx: Context that allocates the shared memory 165 * @size: Requested size of shared memory 166 * 167 * The returned memory registered in secure world and is suitable to be 168 * passed as a memory buffer in parameter argument to 169 * tee_client_invoke_func(). The memory allocated is later freed with a 170 * call to tee_shm_free(). 171 * 172 * @returns a pointer to 'struct tee_shm' 173 */ 174 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) 175 { 176 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; 177 178 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); 179 } 180 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); 181 182 /** 183 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared 184 * kernel buffer 185 * @ctx: Context that allocates the shared memory 186 * @size: Requested size of shared memory 187 * 188 * This function returns similar shared memory as 189 * tee_shm_alloc_kernel_buf(), but with the difference that the memory 190 * might not be registered in secure world in case the driver supports 191 * passing memory not registered in advance. 192 * 193 * This function should normally only be used internally in the TEE 194 * drivers. 195 * 196 * @returns a pointer to 'struct tee_shm' 197 */ 198 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) 199 { 200 u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL; 201 202 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); 203 } 204 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); 205 206 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, 207 int (*shm_register)(struct tee_context *ctx, 208 struct tee_shm *shm, 209 struct page **pages, 210 size_t num_pages, 211 unsigned long start)) 212 { 213 size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; 214 struct page **pages; 215 unsigned int i; 216 int rc = 0; 217 218 /* 219 * Ignore alignment since this is already going to be page aligned 220 * and there's no need for any larger alignment. 221 */ 222 shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE, 223 GFP_KERNEL | __GFP_ZERO); 224 if (!shm->kaddr) 225 return -ENOMEM; 226 227 shm->paddr = virt_to_phys(shm->kaddr); 228 shm->size = nr_pages * PAGE_SIZE; 229 230 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); 231 if (!pages) { 232 rc = -ENOMEM; 233 goto err_pages; 234 } 235 236 for (i = 0; i < nr_pages; i++) 237 pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE); 238 239 shm->pages = pages; 240 shm->num_pages = nr_pages; 241 242 if (shm_register) { 243 rc = shm_register(shm->ctx, shm, pages, nr_pages, 244 (unsigned long)shm->kaddr); 245 if (rc) 246 goto err_kfree; 247 } 248 249 return 0; 250 err_kfree: 251 kfree(pages); 252 err_pages: 253 free_pages_exact(shm->kaddr, shm->size); 254 shm->kaddr = NULL; 255 return rc; 256 } 257 EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper); 258 259 void tee_dyn_shm_free_helper(struct tee_shm *shm, 260 int (*shm_unregister)(struct tee_context *ctx, 261 struct tee_shm *shm)) 262 { 263 if (shm_unregister) 264 shm_unregister(shm->ctx, shm); 265 free_pages_exact(shm->kaddr, shm->size); 266 shm->kaddr = NULL; 267 kfree(shm->pages); 268 shm->pages = NULL; 269 } 270 EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper); 271 272 static struct tee_shm * 273 register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags, 274 int id) 275 { 276 struct tee_device *teedev = ctx->teedev; 277 struct tee_shm *shm; 278 unsigned long start, addr; 279 size_t num_pages, off; 280 ssize_t len; 281 void *ret; 282 int rc; 283 284 if (!tee_device_get(teedev)) 285 return ERR_PTR(-EINVAL); 286 287 if (!teedev->desc->ops->shm_register || 288 !teedev->desc->ops->shm_unregister) { 289 ret = ERR_PTR(-ENOTSUPP); 290 goto err_dev_put; 291 } 292 293 teedev_ctx_get(ctx); 294 295 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 296 if (!shm) { 297 ret = ERR_PTR(-ENOMEM); 298 goto err_ctx_put; 299 } 300 301 refcount_set(&shm->refcount, 1); 302 shm->flags = flags; 303 shm->ctx = ctx; 304 shm->id = id; 305 addr = untagged_addr((unsigned long)iter_iov_addr(iter)); 306 start = rounddown(addr, PAGE_SIZE); 307 num_pages = iov_iter_npages(iter, INT_MAX); 308 if (!num_pages) { 309 ret = ERR_PTR(-ENOMEM); 310 goto err_ctx_put; 311 } 312 313 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); 314 if (!shm->pages) { 315 ret = ERR_PTR(-ENOMEM); 316 goto err_free_shm; 317 } 318 319 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0, 320 &off); 321 if (unlikely(len <= 0)) { 322 ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); 323 goto err_free_shm_pages; 324 } 325 326 /* 327 * iov_iter_extract_kvec_pages does not get reference on the pages, 328 * get a reference on them. 329 */ 330 if (iov_iter_is_kvec(iter)) 331 shm_get_kernel_pages(shm->pages, num_pages); 332 333 shm->offset = off; 334 shm->size = len; 335 shm->num_pages = num_pages; 336 337 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, 338 shm->num_pages, start); 339 if (rc) { 340 ret = ERR_PTR(rc); 341 goto err_put_shm_pages; 342 } 343 344 return shm; 345 err_put_shm_pages: 346 if (!iov_iter_is_kvec(iter)) 347 unpin_user_pages(shm->pages, shm->num_pages); 348 else 349 shm_put_kernel_pages(shm->pages, shm->num_pages); 350 err_free_shm_pages: 351 kfree(shm->pages); 352 err_free_shm: 353 kfree(shm); 354 err_ctx_put: 355 teedev_ctx_put(ctx); 356 err_dev_put: 357 tee_device_put(teedev); 358 return ret; 359 } 360 361 /** 362 * tee_shm_register_user_buf() - Register a userspace shared memory buffer 363 * @ctx: Context that registers the shared memory 364 * @addr: The userspace address of the shared buffer 365 * @length: Length of the shared buffer 366 * 367 * @returns a pointer to 'struct tee_shm' 368 */ 369 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, 370 unsigned long addr, size_t length) 371 { 372 u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC; 373 struct tee_device *teedev = ctx->teedev; 374 struct tee_shm *shm; 375 struct iov_iter iter; 376 void *ret; 377 int id; 378 379 if (!access_ok((void __user *)addr, length)) 380 return ERR_PTR(-EFAULT); 381 382 mutex_lock(&teedev->mutex); 383 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); 384 mutex_unlock(&teedev->mutex); 385 if (id < 0) 386 return ERR_PTR(id); 387 388 iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length); 389 shm = register_shm_helper(ctx, &iter, flags, id); 390 if (IS_ERR(shm)) { 391 mutex_lock(&teedev->mutex); 392 idr_remove(&teedev->idr, id); 393 mutex_unlock(&teedev->mutex); 394 return shm; 395 } 396 397 mutex_lock(&teedev->mutex); 398 ret = idr_replace(&teedev->idr, shm, id); 399 mutex_unlock(&teedev->mutex); 400 if (IS_ERR(ret)) { 401 tee_shm_free(shm); 402 return ret; 403 } 404 405 return shm; 406 } 407 408 /** 409 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with 410 * secure world 411 * @ctx: Context that registers the shared memory 412 * @addr: The buffer 413 * @length: Length of the buffer 414 * 415 * @returns a pointer to 'struct tee_shm' 416 */ 417 418 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, 419 void *addr, size_t length) 420 { 421 u32 flags = TEE_SHM_DYNAMIC; 422 struct kvec kvec; 423 struct iov_iter iter; 424 425 kvec.iov_base = addr; 426 kvec.iov_len = length; 427 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length); 428 429 return register_shm_helper(ctx, &iter, flags, -1); 430 } 431 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf); 432 433 static int tee_shm_fop_release(struct inode *inode, struct file *filp) 434 { 435 tee_shm_put(filp->private_data); 436 return 0; 437 } 438 439 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) 440 { 441 struct tee_shm *shm = filp->private_data; 442 size_t size = vma->vm_end - vma->vm_start; 443 444 /* Refuse sharing shared memory provided by application */ 445 if (shm->flags & TEE_SHM_USER_MAPPED) 446 return -EINVAL; 447 448 /* check for overflowing the buffer's size */ 449 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 450 return -EINVAL; 451 452 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 453 size, vma->vm_page_prot); 454 } 455 456 static const struct file_operations tee_shm_fops = { 457 .owner = THIS_MODULE, 458 .release = tee_shm_fop_release, 459 .mmap = tee_shm_fop_mmap, 460 }; 461 462 /** 463 * tee_shm_get_fd() - Increase reference count and return file descriptor 464 * @shm: Shared memory handle 465 * @returns user space file descriptor to shared memory 466 */ 467 int tee_shm_get_fd(struct tee_shm *shm) 468 { 469 int fd; 470 471 if (shm->id < 0) 472 return -EINVAL; 473 474 /* matched by tee_shm_put() in tee_shm_op_release() */ 475 refcount_inc(&shm->refcount); 476 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); 477 if (fd < 0) 478 tee_shm_put(shm); 479 return fd; 480 } 481 482 /** 483 * tee_shm_free() - Free shared memory 484 * @shm: Handle to shared memory to free 485 */ 486 void tee_shm_free(struct tee_shm *shm) 487 { 488 tee_shm_put(shm); 489 } 490 EXPORT_SYMBOL_GPL(tee_shm_free); 491 492 /** 493 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset 494 * @shm: Shared memory handle 495 * @offs: Offset from start of this shared memory 496 * @returns virtual address of the shared memory + offs if offs is within 497 * the bounds of this shared memory, else an ERR_PTR 498 */ 499 void *tee_shm_get_va(struct tee_shm *shm, size_t offs) 500 { 501 if (!shm->kaddr) 502 return ERR_PTR(-EINVAL); 503 if (offs >= shm->size) 504 return ERR_PTR(-EINVAL); 505 return (char *)shm->kaddr + offs; 506 } 507 EXPORT_SYMBOL_GPL(tee_shm_get_va); 508 509 /** 510 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset 511 * @shm: Shared memory handle 512 * @offs: Offset from start of this shared memory 513 * @pa: Physical address to return 514 * @returns 0 if offs is within the bounds of this shared memory, else an 515 * error code. 516 */ 517 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) 518 { 519 if (offs >= shm->size) 520 return -EINVAL; 521 if (pa) 522 *pa = shm->paddr + offs; 523 return 0; 524 } 525 EXPORT_SYMBOL_GPL(tee_shm_get_pa); 526 527 /** 528 * tee_shm_get_from_id() - Find shared memory object and increase reference 529 * count 530 * @ctx: Context owning the shared memory 531 * @id: Id of shared memory object 532 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure 533 */ 534 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) 535 { 536 struct tee_device *teedev; 537 struct tee_shm *shm; 538 539 if (!ctx) 540 return ERR_PTR(-EINVAL); 541 542 teedev = ctx->teedev; 543 mutex_lock(&teedev->mutex); 544 shm = idr_find(&teedev->idr, id); 545 /* 546 * If the tee_shm was found in the IDR it must have a refcount 547 * larger than 0 due to the guarantee in tee_shm_put() below. So 548 * it's safe to use refcount_inc(). 549 */ 550 if (!shm || shm->ctx != ctx) 551 shm = ERR_PTR(-EINVAL); 552 else 553 refcount_inc(&shm->refcount); 554 mutex_unlock(&teedev->mutex); 555 return shm; 556 } 557 EXPORT_SYMBOL_GPL(tee_shm_get_from_id); 558 559 /** 560 * tee_shm_put() - Decrease reference count on a shared memory handle 561 * @shm: Shared memory handle 562 */ 563 void tee_shm_put(struct tee_shm *shm) 564 { 565 struct tee_device *teedev; 566 bool do_release = false; 567 568 if (!shm || !shm->ctx || !shm->ctx->teedev) 569 return; 570 571 teedev = shm->ctx->teedev; 572 mutex_lock(&teedev->mutex); 573 if (refcount_dec_and_test(&shm->refcount)) { 574 /* 575 * refcount has reached 0, we must now remove it from the 576 * IDR before releasing the mutex. This will guarantee that 577 * the refcount_inc() in tee_shm_get_from_id() never starts 578 * from 0. 579 */ 580 if (shm->id >= 0) 581 idr_remove(&teedev->idr, shm->id); 582 do_release = true; 583 } 584 mutex_unlock(&teedev->mutex); 585 586 if (do_release) 587 tee_shm_release(teedev, shm); 588 } 589 EXPORT_SYMBOL_GPL(tee_shm_put); 590