1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited 4 */ 5 #include <linux/anon_inodes.h> 6 #include <linux/device.h> 7 #include <linux/idr.h> 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/tee_drv.h> 12 #include <linux/uaccess.h> 13 #include <linux/uio.h> 14 #include "tee_private.h" 15 16 static void shm_put_kernel_pages(struct page **pages, size_t page_count) 17 { 18 size_t n; 19 20 for (n = 0; n < page_count; n++) 21 put_page(pages[n]); 22 } 23 24 static int shm_get_kernel_pages(unsigned long start, size_t page_count, 25 struct page **pages) 26 { 27 size_t n; 28 int rc; 29 30 if (is_vmalloc_addr((void *)start)) { 31 struct page *page; 32 33 for (n = 0; n < page_count; n++) { 34 page = vmalloc_to_page((void *)(start + PAGE_SIZE * n)); 35 if (!page) 36 return -ENOMEM; 37 38 get_page(page); 39 pages[n] = page; 40 } 41 rc = page_count; 42 } else { 43 struct kvec *kiov; 44 45 kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); 46 if (!kiov) 47 return -ENOMEM; 48 49 for (n = 0; n < page_count; n++) { 50 kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); 51 kiov[n].iov_len = PAGE_SIZE; 52 } 53 54 rc = get_kernel_pages(kiov, page_count, 0, pages); 55 kfree(kiov); 56 } 57 58 return rc; 59 } 60 61 static void release_registered_pages(struct tee_shm *shm) 62 { 63 if (shm->pages) { 64 if (shm->flags & TEE_SHM_USER_MAPPED) 65 unpin_user_pages(shm->pages, shm->num_pages); 66 else 67 shm_put_kernel_pages(shm->pages, shm->num_pages); 68 69 kfree(shm->pages); 70 } 71 } 72 73 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 74 { 75 if (shm->flags & TEE_SHM_POOL) { 76 teedev->pool->ops->free(teedev->pool, shm); 77 } else if (shm->flags & TEE_SHM_DYNAMIC) { 78 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); 79 80 if (rc) 81 dev_err(teedev->dev.parent, 82 "unregister shm %p failed: %d", shm, rc); 83 84 release_registered_pages(shm); 85 } 86 87 teedev_ctx_put(shm->ctx); 88 89 kfree(shm); 90 91 tee_device_put(teedev); 92 } 93 94 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size, 95 size_t align, u32 flags, int id) 96 { 97 struct tee_device *teedev = ctx->teedev; 98 struct tee_shm *shm; 99 void *ret; 100 int rc; 101 102 if (!tee_device_get(teedev)) 103 return ERR_PTR(-EINVAL); 104 105 if (!teedev->pool) { 106 /* teedev has been detached from driver */ 107 ret = ERR_PTR(-EINVAL); 108 goto err_dev_put; 109 } 110 111 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 112 if (!shm) { 113 ret = ERR_PTR(-ENOMEM); 114 goto err_dev_put; 115 } 116 117 refcount_set(&shm->refcount, 1); 118 shm->flags = flags; 119 shm->id = id; 120 121 /* 122 * We're assigning this as it is needed if the shm is to be 123 * registered. If this function returns OK then the caller expected 124 * to call teedev_ctx_get() or clear shm->ctx in case it's not 125 * needed any longer. 126 */ 127 shm->ctx = ctx; 128 129 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); 130 if (rc) { 131 ret = ERR_PTR(rc); 132 goto err_kfree; 133 } 134 135 teedev_ctx_get(ctx); 136 return shm; 137 err_kfree: 138 kfree(shm); 139 err_dev_put: 140 tee_device_put(teedev); 141 return ret; 142 } 143 144 /** 145 * tee_shm_alloc_user_buf() - Allocate shared memory for user space 146 * @ctx: Context that allocates the shared memory 147 * @size: Requested size of shared memory 148 * 149 * Memory allocated as user space shared memory is automatically freed when 150 * the TEE file pointer is closed. The primary usage of this function is 151 * when the TEE driver doesn't support registering ordinary user space 152 * memory. 153 * 154 * @returns a pointer to 'struct tee_shm' 155 */ 156 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) 157 { 158 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; 159 struct tee_device *teedev = ctx->teedev; 160 struct tee_shm *shm; 161 void *ret; 162 int id; 163 164 mutex_lock(&teedev->mutex); 165 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); 166 mutex_unlock(&teedev->mutex); 167 if (id < 0) 168 return ERR_PTR(id); 169 170 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id); 171 if (IS_ERR(shm)) { 172 mutex_lock(&teedev->mutex); 173 idr_remove(&teedev->idr, id); 174 mutex_unlock(&teedev->mutex); 175 return shm; 176 } 177 178 mutex_lock(&teedev->mutex); 179 ret = idr_replace(&teedev->idr, shm, id); 180 mutex_unlock(&teedev->mutex); 181 if (IS_ERR(ret)) { 182 tee_shm_free(shm); 183 return ret; 184 } 185 186 return shm; 187 } 188 189 /** 190 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer 191 * @ctx: Context that allocates the shared memory 192 * @size: Requested size of shared memory 193 * 194 * The returned memory registered in secure world and is suitable to be 195 * passed as a memory buffer in parameter argument to 196 * tee_client_invoke_func(). The memory allocated is later freed with a 197 * call to tee_shm_free(). 198 * 199 * @returns a pointer to 'struct tee_shm' 200 */ 201 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) 202 { 203 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; 204 205 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); 206 } 207 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); 208 209 /** 210 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared 211 * kernel buffer 212 * @ctx: Context that allocates the shared memory 213 * @size: Requested size of shared memory 214 * 215 * This function returns similar shared memory as 216 * tee_shm_alloc_kernel_buf(), but with the difference that the memory 217 * might not be registered in secure world in case the driver supports 218 * passing memory not registered in advance. 219 * 220 * This function should normally only be used internally in the TEE 221 * drivers. 222 * 223 * @returns a pointer to 'struct tee_shm' 224 */ 225 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) 226 { 227 u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL; 228 229 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); 230 } 231 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); 232 233 static struct tee_shm * 234 register_shm_helper(struct tee_context *ctx, unsigned long addr, 235 size_t length, u32 flags, int id) 236 { 237 struct tee_device *teedev = ctx->teedev; 238 struct tee_shm *shm; 239 unsigned long start; 240 size_t num_pages; 241 void *ret; 242 int rc; 243 244 if (!tee_device_get(teedev)) 245 return ERR_PTR(-EINVAL); 246 247 if (!teedev->desc->ops->shm_register || 248 !teedev->desc->ops->shm_unregister) { 249 ret = ERR_PTR(-ENOTSUPP); 250 goto err_dev_put; 251 } 252 253 teedev_ctx_get(ctx); 254 255 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 256 if (!shm) { 257 ret = ERR_PTR(-ENOMEM); 258 goto err_ctx_put; 259 } 260 261 refcount_set(&shm->refcount, 1); 262 shm->flags = flags; 263 shm->ctx = ctx; 264 shm->id = id; 265 addr = untagged_addr(addr); 266 start = rounddown(addr, PAGE_SIZE); 267 shm->offset = addr - start; 268 shm->size = length; 269 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; 270 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); 271 if (!shm->pages) { 272 ret = ERR_PTR(-ENOMEM); 273 goto err_free_shm; 274 } 275 276 if (flags & TEE_SHM_USER_MAPPED) 277 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, 278 shm->pages); 279 else 280 rc = shm_get_kernel_pages(start, num_pages, shm->pages); 281 if (rc > 0) 282 shm->num_pages = rc; 283 if (rc != num_pages) { 284 if (rc >= 0) 285 rc = -ENOMEM; 286 ret = ERR_PTR(rc); 287 goto err_put_shm_pages; 288 } 289 290 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, 291 shm->num_pages, start); 292 if (rc) { 293 ret = ERR_PTR(rc); 294 goto err_put_shm_pages; 295 } 296 297 return shm; 298 err_put_shm_pages: 299 if (flags & TEE_SHM_USER_MAPPED) 300 unpin_user_pages(shm->pages, shm->num_pages); 301 else 302 shm_put_kernel_pages(shm->pages, shm->num_pages); 303 kfree(shm->pages); 304 err_free_shm: 305 kfree(shm); 306 err_ctx_put: 307 teedev_ctx_put(ctx); 308 err_dev_put: 309 tee_device_put(teedev); 310 return ret; 311 } 312 313 /** 314 * tee_shm_register_user_buf() - Register a userspace shared memory buffer 315 * @ctx: Context that registers the shared memory 316 * @addr: The userspace address of the shared buffer 317 * @length: Length of the shared buffer 318 * 319 * @returns a pointer to 'struct tee_shm' 320 */ 321 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, 322 unsigned long addr, size_t length) 323 { 324 u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC; 325 struct tee_device *teedev = ctx->teedev; 326 struct tee_shm *shm; 327 void *ret; 328 int id; 329 330 if (!access_ok((void __user *)addr, length)) 331 return ERR_PTR(-EFAULT); 332 333 mutex_lock(&teedev->mutex); 334 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); 335 mutex_unlock(&teedev->mutex); 336 if (id < 0) 337 return ERR_PTR(id); 338 339 shm = register_shm_helper(ctx, addr, length, flags, id); 340 if (IS_ERR(shm)) { 341 mutex_lock(&teedev->mutex); 342 idr_remove(&teedev->idr, id); 343 mutex_unlock(&teedev->mutex); 344 return shm; 345 } 346 347 mutex_lock(&teedev->mutex); 348 ret = idr_replace(&teedev->idr, shm, id); 349 mutex_unlock(&teedev->mutex); 350 if (IS_ERR(ret)) { 351 tee_shm_free(shm); 352 return ret; 353 } 354 355 return shm; 356 } 357 358 /** 359 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with 360 * secure world 361 * @ctx: Context that registers the shared memory 362 * @addr: The buffer 363 * @length: Length of the buffer 364 * 365 * @returns a pointer to 'struct tee_shm' 366 */ 367 368 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, 369 void *addr, size_t length) 370 { 371 u32 flags = TEE_SHM_DYNAMIC; 372 373 return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1); 374 } 375 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf); 376 377 static int tee_shm_fop_release(struct inode *inode, struct file *filp) 378 { 379 tee_shm_put(filp->private_data); 380 return 0; 381 } 382 383 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) 384 { 385 struct tee_shm *shm = filp->private_data; 386 size_t size = vma->vm_end - vma->vm_start; 387 388 /* Refuse sharing shared memory provided by application */ 389 if (shm->flags & TEE_SHM_USER_MAPPED) 390 return -EINVAL; 391 392 /* check for overflowing the buffer's size */ 393 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 394 return -EINVAL; 395 396 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 397 size, vma->vm_page_prot); 398 } 399 400 static const struct file_operations tee_shm_fops = { 401 .owner = THIS_MODULE, 402 .release = tee_shm_fop_release, 403 .mmap = tee_shm_fop_mmap, 404 }; 405 406 /** 407 * tee_shm_get_fd() - Increase reference count and return file descriptor 408 * @shm: Shared memory handle 409 * @returns user space file descriptor to shared memory 410 */ 411 int tee_shm_get_fd(struct tee_shm *shm) 412 { 413 int fd; 414 415 if (shm->id < 0) 416 return -EINVAL; 417 418 /* matched by tee_shm_put() in tee_shm_op_release() */ 419 refcount_inc(&shm->refcount); 420 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); 421 if (fd < 0) 422 tee_shm_put(shm); 423 return fd; 424 } 425 426 /** 427 * tee_shm_free() - Free shared memory 428 * @shm: Handle to shared memory to free 429 */ 430 void tee_shm_free(struct tee_shm *shm) 431 { 432 tee_shm_put(shm); 433 } 434 EXPORT_SYMBOL_GPL(tee_shm_free); 435 436 /** 437 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset 438 * @shm: Shared memory handle 439 * @offs: Offset from start of this shared memory 440 * @returns virtual address of the shared memory + offs if offs is within 441 * the bounds of this shared memory, else an ERR_PTR 442 */ 443 void *tee_shm_get_va(struct tee_shm *shm, size_t offs) 444 { 445 if (!shm->kaddr) 446 return ERR_PTR(-EINVAL); 447 if (offs >= shm->size) 448 return ERR_PTR(-EINVAL); 449 return (char *)shm->kaddr + offs; 450 } 451 EXPORT_SYMBOL_GPL(tee_shm_get_va); 452 453 /** 454 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset 455 * @shm: Shared memory handle 456 * @offs: Offset from start of this shared memory 457 * @pa: Physical address to return 458 * @returns 0 if offs is within the bounds of this shared memory, else an 459 * error code. 460 */ 461 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) 462 { 463 if (offs >= shm->size) 464 return -EINVAL; 465 if (pa) 466 *pa = shm->paddr + offs; 467 return 0; 468 } 469 EXPORT_SYMBOL_GPL(tee_shm_get_pa); 470 471 /** 472 * tee_shm_get_from_id() - Find shared memory object and increase reference 473 * count 474 * @ctx: Context owning the shared memory 475 * @id: Id of shared memory object 476 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure 477 */ 478 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) 479 { 480 struct tee_device *teedev; 481 struct tee_shm *shm; 482 483 if (!ctx) 484 return ERR_PTR(-EINVAL); 485 486 teedev = ctx->teedev; 487 mutex_lock(&teedev->mutex); 488 shm = idr_find(&teedev->idr, id); 489 /* 490 * If the tee_shm was found in the IDR it must have a refcount 491 * larger than 0 due to the guarantee in tee_shm_put() below. So 492 * it's safe to use refcount_inc(). 493 */ 494 if (!shm || shm->ctx != ctx) 495 shm = ERR_PTR(-EINVAL); 496 else 497 refcount_inc(&shm->refcount); 498 mutex_unlock(&teedev->mutex); 499 return shm; 500 } 501 EXPORT_SYMBOL_GPL(tee_shm_get_from_id); 502 503 /** 504 * tee_shm_put() - Decrease reference count on a shared memory handle 505 * @shm: Shared memory handle 506 */ 507 void tee_shm_put(struct tee_shm *shm) 508 { 509 struct tee_device *teedev = shm->ctx->teedev; 510 bool do_release = false; 511 512 mutex_lock(&teedev->mutex); 513 if (refcount_dec_and_test(&shm->refcount)) { 514 /* 515 * refcount has reached 0, we must now remove it from the 516 * IDR before releasing the mutex. This will guarantee that 517 * the refcount_inc() in tee_shm_get_from_id() never starts 518 * from 0. 519 */ 520 if (shm->id >= 0) 521 idr_remove(&teedev->idr, shm->id); 522 do_release = true; 523 } 524 mutex_unlock(&teedev->mutex); 525 526 if (do_release) 527 tee_shm_release(teedev, shm); 528 } 529 EXPORT_SYMBOL_GPL(tee_shm_put); 530