1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited 4 */ 5 #include <linux/anon_inodes.h> 6 #include <linux/device.h> 7 #include <linux/idr.h> 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/tee_drv.h> 12 #include <linux/uaccess.h> 13 #include <linux/uio.h> 14 #include <linux/highmem.h> 15 #include "tee_private.h" 16 17 static void shm_put_kernel_pages(struct page **pages, size_t page_count) 18 { 19 size_t n; 20 21 for (n = 0; n < page_count; n++) 22 put_page(pages[n]); 23 } 24 25 static int shm_get_kernel_pages(unsigned long start, size_t page_count, 26 struct page **pages) 27 { 28 struct page *page; 29 size_t n; 30 31 if (WARN_ON_ONCE(is_vmalloc_addr((void *)start) || 32 is_kmap_addr((void *)start))) 33 return -EINVAL; 34 35 page = virt_to_page((void *)start); 36 for (n = 0; n < page_count; n++) { 37 pages[n] = page + n; 38 get_page(pages[n]); 39 } 40 41 return page_count; 42 } 43 44 static void release_registered_pages(struct tee_shm *shm) 45 { 46 if (shm->pages) { 47 if (shm->flags & TEE_SHM_USER_MAPPED) 48 unpin_user_pages(shm->pages, shm->num_pages); 49 else 50 shm_put_kernel_pages(shm->pages, shm->num_pages); 51 52 kfree(shm->pages); 53 } 54 } 55 56 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 57 { 58 if (shm->flags & TEE_SHM_POOL) { 59 teedev->pool->ops->free(teedev->pool, shm); 60 } else if (shm->flags & TEE_SHM_DYNAMIC) { 61 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); 62 63 if (rc) 64 dev_err(teedev->dev.parent, 65 "unregister shm %p failed: %d", shm, rc); 66 67 release_registered_pages(shm); 68 } 69 70 teedev_ctx_put(shm->ctx); 71 72 kfree(shm); 73 74 tee_device_put(teedev); 75 } 76 77 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size, 78 size_t align, u32 flags, int id) 79 { 80 struct tee_device *teedev = ctx->teedev; 81 struct tee_shm *shm; 82 void *ret; 83 int rc; 84 85 if (!tee_device_get(teedev)) 86 return ERR_PTR(-EINVAL); 87 88 if (!teedev->pool) { 89 /* teedev has been detached from driver */ 90 ret = ERR_PTR(-EINVAL); 91 goto err_dev_put; 92 } 93 94 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 95 if (!shm) { 96 ret = ERR_PTR(-ENOMEM); 97 goto err_dev_put; 98 } 99 100 refcount_set(&shm->refcount, 1); 101 shm->flags = flags; 102 shm->id = id; 103 104 /* 105 * We're assigning this as it is needed if the shm is to be 106 * registered. If this function returns OK then the caller expected 107 * to call teedev_ctx_get() or clear shm->ctx in case it's not 108 * needed any longer. 109 */ 110 shm->ctx = ctx; 111 112 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); 113 if (rc) { 114 ret = ERR_PTR(rc); 115 goto err_kfree; 116 } 117 118 teedev_ctx_get(ctx); 119 return shm; 120 err_kfree: 121 kfree(shm); 122 err_dev_put: 123 tee_device_put(teedev); 124 return ret; 125 } 126 127 /** 128 * tee_shm_alloc_user_buf() - Allocate shared memory for user space 129 * @ctx: Context that allocates the shared memory 130 * @size: Requested size of shared memory 131 * 132 * Memory allocated as user space shared memory is automatically freed when 133 * the TEE file pointer is closed. The primary usage of this function is 134 * when the TEE driver doesn't support registering ordinary user space 135 * memory. 136 * 137 * @returns a pointer to 'struct tee_shm' 138 */ 139 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) 140 { 141 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; 142 struct tee_device *teedev = ctx->teedev; 143 struct tee_shm *shm; 144 void *ret; 145 int id; 146 147 mutex_lock(&teedev->mutex); 148 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); 149 mutex_unlock(&teedev->mutex); 150 if (id < 0) 151 return ERR_PTR(id); 152 153 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id); 154 if (IS_ERR(shm)) { 155 mutex_lock(&teedev->mutex); 156 idr_remove(&teedev->idr, id); 157 mutex_unlock(&teedev->mutex); 158 return shm; 159 } 160 161 mutex_lock(&teedev->mutex); 162 ret = idr_replace(&teedev->idr, shm, id); 163 mutex_unlock(&teedev->mutex); 164 if (IS_ERR(ret)) { 165 tee_shm_free(shm); 166 return ret; 167 } 168 169 return shm; 170 } 171 172 /** 173 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer 174 * @ctx: Context that allocates the shared memory 175 * @size: Requested size of shared memory 176 * 177 * The returned memory registered in secure world and is suitable to be 178 * passed as a memory buffer in parameter argument to 179 * tee_client_invoke_func(). The memory allocated is later freed with a 180 * call to tee_shm_free(). 181 * 182 * @returns a pointer to 'struct tee_shm' 183 */ 184 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) 185 { 186 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; 187 188 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); 189 } 190 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); 191 192 /** 193 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared 194 * kernel buffer 195 * @ctx: Context that allocates the shared memory 196 * @size: Requested size of shared memory 197 * 198 * This function returns similar shared memory as 199 * tee_shm_alloc_kernel_buf(), but with the difference that the memory 200 * might not be registered in secure world in case the driver supports 201 * passing memory not registered in advance. 202 * 203 * This function should normally only be used internally in the TEE 204 * drivers. 205 * 206 * @returns a pointer to 'struct tee_shm' 207 */ 208 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) 209 { 210 u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL; 211 212 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); 213 } 214 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); 215 216 static struct tee_shm * 217 register_shm_helper(struct tee_context *ctx, unsigned long addr, 218 size_t length, u32 flags, int id) 219 { 220 struct tee_device *teedev = ctx->teedev; 221 struct tee_shm *shm; 222 unsigned long start; 223 size_t num_pages; 224 void *ret; 225 int rc; 226 227 if (!tee_device_get(teedev)) 228 return ERR_PTR(-EINVAL); 229 230 if (!teedev->desc->ops->shm_register || 231 !teedev->desc->ops->shm_unregister) { 232 ret = ERR_PTR(-ENOTSUPP); 233 goto err_dev_put; 234 } 235 236 teedev_ctx_get(ctx); 237 238 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 239 if (!shm) { 240 ret = ERR_PTR(-ENOMEM); 241 goto err_ctx_put; 242 } 243 244 refcount_set(&shm->refcount, 1); 245 shm->flags = flags; 246 shm->ctx = ctx; 247 shm->id = id; 248 addr = untagged_addr(addr); 249 start = rounddown(addr, PAGE_SIZE); 250 shm->offset = addr - start; 251 shm->size = length; 252 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; 253 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); 254 if (!shm->pages) { 255 ret = ERR_PTR(-ENOMEM); 256 goto err_free_shm; 257 } 258 259 if (flags & TEE_SHM_USER_MAPPED) 260 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, 261 shm->pages); 262 else 263 rc = shm_get_kernel_pages(start, num_pages, shm->pages); 264 if (rc > 0) 265 shm->num_pages = rc; 266 if (rc != num_pages) { 267 if (rc >= 0) 268 rc = -ENOMEM; 269 ret = ERR_PTR(rc); 270 goto err_put_shm_pages; 271 } 272 273 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, 274 shm->num_pages, start); 275 if (rc) { 276 ret = ERR_PTR(rc); 277 goto err_put_shm_pages; 278 } 279 280 return shm; 281 err_put_shm_pages: 282 if (flags & TEE_SHM_USER_MAPPED) 283 unpin_user_pages(shm->pages, shm->num_pages); 284 else 285 shm_put_kernel_pages(shm->pages, shm->num_pages); 286 kfree(shm->pages); 287 err_free_shm: 288 kfree(shm); 289 err_ctx_put: 290 teedev_ctx_put(ctx); 291 err_dev_put: 292 tee_device_put(teedev); 293 return ret; 294 } 295 296 /** 297 * tee_shm_register_user_buf() - Register a userspace shared memory buffer 298 * @ctx: Context that registers the shared memory 299 * @addr: The userspace address of the shared buffer 300 * @length: Length of the shared buffer 301 * 302 * @returns a pointer to 'struct tee_shm' 303 */ 304 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, 305 unsigned long addr, size_t length) 306 { 307 u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC; 308 struct tee_device *teedev = ctx->teedev; 309 struct tee_shm *shm; 310 void *ret; 311 int id; 312 313 if (!access_ok((void __user *)addr, length)) 314 return ERR_PTR(-EFAULT); 315 316 mutex_lock(&teedev->mutex); 317 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); 318 mutex_unlock(&teedev->mutex); 319 if (id < 0) 320 return ERR_PTR(id); 321 322 shm = register_shm_helper(ctx, addr, length, flags, id); 323 if (IS_ERR(shm)) { 324 mutex_lock(&teedev->mutex); 325 idr_remove(&teedev->idr, id); 326 mutex_unlock(&teedev->mutex); 327 return shm; 328 } 329 330 mutex_lock(&teedev->mutex); 331 ret = idr_replace(&teedev->idr, shm, id); 332 mutex_unlock(&teedev->mutex); 333 if (IS_ERR(ret)) { 334 tee_shm_free(shm); 335 return ret; 336 } 337 338 return shm; 339 } 340 341 /** 342 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with 343 * secure world 344 * @ctx: Context that registers the shared memory 345 * @addr: The buffer 346 * @length: Length of the buffer 347 * 348 * @returns a pointer to 'struct tee_shm' 349 */ 350 351 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, 352 void *addr, size_t length) 353 { 354 u32 flags = TEE_SHM_DYNAMIC; 355 356 return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1); 357 } 358 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf); 359 360 static int tee_shm_fop_release(struct inode *inode, struct file *filp) 361 { 362 tee_shm_put(filp->private_data); 363 return 0; 364 } 365 366 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) 367 { 368 struct tee_shm *shm = filp->private_data; 369 size_t size = vma->vm_end - vma->vm_start; 370 371 /* Refuse sharing shared memory provided by application */ 372 if (shm->flags & TEE_SHM_USER_MAPPED) 373 return -EINVAL; 374 375 /* check for overflowing the buffer's size */ 376 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 377 return -EINVAL; 378 379 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 380 size, vma->vm_page_prot); 381 } 382 383 static const struct file_operations tee_shm_fops = { 384 .owner = THIS_MODULE, 385 .release = tee_shm_fop_release, 386 .mmap = tee_shm_fop_mmap, 387 }; 388 389 /** 390 * tee_shm_get_fd() - Increase reference count and return file descriptor 391 * @shm: Shared memory handle 392 * @returns user space file descriptor to shared memory 393 */ 394 int tee_shm_get_fd(struct tee_shm *shm) 395 { 396 int fd; 397 398 if (shm->id < 0) 399 return -EINVAL; 400 401 /* matched by tee_shm_put() in tee_shm_op_release() */ 402 refcount_inc(&shm->refcount); 403 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); 404 if (fd < 0) 405 tee_shm_put(shm); 406 return fd; 407 } 408 409 /** 410 * tee_shm_free() - Free shared memory 411 * @shm: Handle to shared memory to free 412 */ 413 void tee_shm_free(struct tee_shm *shm) 414 { 415 tee_shm_put(shm); 416 } 417 EXPORT_SYMBOL_GPL(tee_shm_free); 418 419 /** 420 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset 421 * @shm: Shared memory handle 422 * @offs: Offset from start of this shared memory 423 * @returns virtual address of the shared memory + offs if offs is within 424 * the bounds of this shared memory, else an ERR_PTR 425 */ 426 void *tee_shm_get_va(struct tee_shm *shm, size_t offs) 427 { 428 if (!shm->kaddr) 429 return ERR_PTR(-EINVAL); 430 if (offs >= shm->size) 431 return ERR_PTR(-EINVAL); 432 return (char *)shm->kaddr + offs; 433 } 434 EXPORT_SYMBOL_GPL(tee_shm_get_va); 435 436 /** 437 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset 438 * @shm: Shared memory handle 439 * @offs: Offset from start of this shared memory 440 * @pa: Physical address to return 441 * @returns 0 if offs is within the bounds of this shared memory, else an 442 * error code. 443 */ 444 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) 445 { 446 if (offs >= shm->size) 447 return -EINVAL; 448 if (pa) 449 *pa = shm->paddr + offs; 450 return 0; 451 } 452 EXPORT_SYMBOL_GPL(tee_shm_get_pa); 453 454 /** 455 * tee_shm_get_from_id() - Find shared memory object and increase reference 456 * count 457 * @ctx: Context owning the shared memory 458 * @id: Id of shared memory object 459 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure 460 */ 461 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) 462 { 463 struct tee_device *teedev; 464 struct tee_shm *shm; 465 466 if (!ctx) 467 return ERR_PTR(-EINVAL); 468 469 teedev = ctx->teedev; 470 mutex_lock(&teedev->mutex); 471 shm = idr_find(&teedev->idr, id); 472 /* 473 * If the tee_shm was found in the IDR it must have a refcount 474 * larger than 0 due to the guarantee in tee_shm_put() below. So 475 * it's safe to use refcount_inc(). 476 */ 477 if (!shm || shm->ctx != ctx) 478 shm = ERR_PTR(-EINVAL); 479 else 480 refcount_inc(&shm->refcount); 481 mutex_unlock(&teedev->mutex); 482 return shm; 483 } 484 EXPORT_SYMBOL_GPL(tee_shm_get_from_id); 485 486 /** 487 * tee_shm_put() - Decrease reference count on a shared memory handle 488 * @shm: Shared memory handle 489 */ 490 void tee_shm_put(struct tee_shm *shm) 491 { 492 struct tee_device *teedev = shm->ctx->teedev; 493 bool do_release = false; 494 495 mutex_lock(&teedev->mutex); 496 if (refcount_dec_and_test(&shm->refcount)) { 497 /* 498 * refcount has reached 0, we must now remove it from the 499 * IDR before releasing the mutex. This will guarantee that 500 * the refcount_inc() in tee_shm_get_from_id() never starts 501 * from 0. 502 */ 503 if (shm->id >= 0) 504 idr_remove(&teedev->idr, shm->id); 505 do_release = true; 506 } 507 mutex_unlock(&teedev->mutex); 508 509 if (do_release) 510 tee_shm_release(teedev, shm); 511 } 512 EXPORT_SYMBOL_GPL(tee_shm_put); 513