1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2016, Linaro Limited 4 */ 5 #include <linux/device.h> 6 #include <linux/dma-buf.h> 7 #include <linux/fdtable.h> 8 #include <linux/idr.h> 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/tee_drv.h> 12 #include <linux/uio.h> 13 #include <linux/module.h> 14 #include "tee_private.h" 15 16 MODULE_IMPORT_NS(DMA_BUF); 17 18 static void release_registered_pages(struct tee_shm *shm) 19 { 20 if (shm->pages) { 21 if (shm->flags & TEE_SHM_USER_MAPPED) { 22 unpin_user_pages(shm->pages, shm->num_pages); 23 } else { 24 size_t n; 25 26 for (n = 0; n < shm->num_pages; n++) 27 put_page(shm->pages[n]); 28 } 29 30 kfree(shm->pages); 31 } 32 } 33 34 static void tee_shm_release(struct tee_shm *shm) 35 { 36 struct tee_device *teedev = shm->ctx->teedev; 37 38 if (shm->flags & TEE_SHM_DMA_BUF) { 39 mutex_lock(&teedev->mutex); 40 idr_remove(&teedev->idr, shm->id); 41 mutex_unlock(&teedev->mutex); 42 } 43 44 if (shm->flags & TEE_SHM_POOL) { 45 struct tee_shm_pool_mgr *poolm; 46 47 if (shm->flags & TEE_SHM_DMA_BUF) 48 poolm = teedev->pool->dma_buf_mgr; 49 else 50 poolm = teedev->pool->private_mgr; 51 52 poolm->ops->free(poolm, shm); 53 } else if (shm->flags & TEE_SHM_REGISTER) { 54 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); 55 56 if (rc) 57 dev_err(teedev->dev.parent, 58 "unregister shm %p failed: %d", shm, rc); 59 60 release_registered_pages(shm); 61 } 62 63 teedev_ctx_put(shm->ctx); 64 65 kfree(shm); 66 67 tee_device_put(teedev); 68 } 69 70 static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment 71 *attach, enum dma_data_direction dir) 72 { 73 return NULL; 74 } 75 76 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, 77 struct sg_table *table, 78 enum dma_data_direction dir) 79 { 80 } 81 82 static void tee_shm_op_release(struct dma_buf *dmabuf) 83 { 84 struct tee_shm *shm = dmabuf->priv; 85 86 tee_shm_release(shm); 87 } 88 89 static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 90 { 91 struct tee_shm *shm = dmabuf->priv; 92 size_t size = vma->vm_end - vma->vm_start; 93 94 /* Refuse sharing shared memory provided by application */ 95 if (shm->flags & TEE_SHM_USER_MAPPED) 96 return -EINVAL; 97 98 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 99 size, vma->vm_page_prot); 100 } 101 102 static const struct dma_buf_ops tee_shm_dma_buf_ops = { 103 .map_dma_buf = tee_shm_op_map_dma_buf, 104 .unmap_dma_buf = tee_shm_op_unmap_dma_buf, 105 .release = tee_shm_op_release, 106 .mmap = tee_shm_op_mmap, 107 }; 108 109 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 110 { 111 struct tee_device *teedev = ctx->teedev; 112 struct tee_shm_pool_mgr *poolm = NULL; 113 struct tee_shm *shm; 114 void *ret; 115 int rc; 116 117 if (!(flags & TEE_SHM_MAPPED)) { 118 dev_err(teedev->dev.parent, 119 "only mapped allocations supported\n"); 120 return ERR_PTR(-EINVAL); 121 } 122 123 if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) { 124 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); 125 return ERR_PTR(-EINVAL); 126 } 127 128 if (!tee_device_get(teedev)) 129 return ERR_PTR(-EINVAL); 130 131 if (!teedev->pool) { 132 /* teedev has been detached from driver */ 133 ret = ERR_PTR(-EINVAL); 134 goto err_dev_put; 135 } 136 137 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 138 if (!shm) { 139 ret = ERR_PTR(-ENOMEM); 140 goto err_dev_put; 141 } 142 143 shm->flags = flags | TEE_SHM_POOL; 144 shm->ctx = ctx; 145 if (flags & TEE_SHM_DMA_BUF) 146 poolm = teedev->pool->dma_buf_mgr; 147 else 148 poolm = teedev->pool->private_mgr; 149 150 rc = poolm->ops->alloc(poolm, shm, size); 151 if (rc) { 152 ret = ERR_PTR(rc); 153 goto err_kfree; 154 } 155 156 157 if (flags & TEE_SHM_DMA_BUF) { 158 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 159 160 mutex_lock(&teedev->mutex); 161 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 162 mutex_unlock(&teedev->mutex); 163 if (shm->id < 0) { 164 ret = ERR_PTR(shm->id); 165 goto err_pool_free; 166 } 167 168 exp_info.ops = &tee_shm_dma_buf_ops; 169 exp_info.size = shm->size; 170 exp_info.flags = O_RDWR; 171 exp_info.priv = shm; 172 173 shm->dmabuf = dma_buf_export(&exp_info); 174 if (IS_ERR(shm->dmabuf)) { 175 ret = ERR_CAST(shm->dmabuf); 176 goto err_rem; 177 } 178 } 179 180 teedev_ctx_get(ctx); 181 182 return shm; 183 err_rem: 184 if (flags & TEE_SHM_DMA_BUF) { 185 mutex_lock(&teedev->mutex); 186 idr_remove(&teedev->idr, shm->id); 187 mutex_unlock(&teedev->mutex); 188 } 189 err_pool_free: 190 poolm->ops->free(poolm, shm); 191 err_kfree: 192 kfree(shm); 193 err_dev_put: 194 tee_device_put(teedev); 195 return ret; 196 } 197 EXPORT_SYMBOL_GPL(tee_shm_alloc); 198 199 /** 200 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer 201 * @ctx: Context that allocates the shared memory 202 * @size: Requested size of shared memory 203 * 204 * The returned memory registered in secure world and is suitable to be 205 * passed as a memory buffer in parameter argument to 206 * tee_client_invoke_func(). The memory allocated is later freed with a 207 * call to tee_shm_free(). 208 * 209 * @returns a pointer to 'struct tee_shm' 210 */ 211 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) 212 { 213 return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED); 214 } 215 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); 216 217 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, 218 size_t length, u32 flags) 219 { 220 struct tee_device *teedev = ctx->teedev; 221 const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED; 222 const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED; 223 struct tee_shm *shm; 224 void *ret; 225 int rc; 226 int num_pages; 227 unsigned long start; 228 229 if (flags != req_user_flags && flags != req_kernel_flags) 230 return ERR_PTR(-ENOTSUPP); 231 232 if (!tee_device_get(teedev)) 233 return ERR_PTR(-EINVAL); 234 235 if (!teedev->desc->ops->shm_register || 236 !teedev->desc->ops->shm_unregister) { 237 tee_device_put(teedev); 238 return ERR_PTR(-ENOTSUPP); 239 } 240 241 teedev_ctx_get(ctx); 242 243 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 244 if (!shm) { 245 ret = ERR_PTR(-ENOMEM); 246 goto err; 247 } 248 249 shm->flags = flags | TEE_SHM_REGISTER; 250 shm->ctx = ctx; 251 shm->id = -1; 252 addr = untagged_addr(addr); 253 start = rounddown(addr, PAGE_SIZE); 254 shm->offset = addr - start; 255 shm->size = length; 256 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; 257 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); 258 if (!shm->pages) { 259 ret = ERR_PTR(-ENOMEM); 260 goto err; 261 } 262 263 if (flags & TEE_SHM_USER_MAPPED) { 264 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, 265 shm->pages); 266 } else { 267 struct kvec *kiov; 268 int i; 269 270 kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL); 271 if (!kiov) { 272 ret = ERR_PTR(-ENOMEM); 273 goto err; 274 } 275 276 for (i = 0; i < num_pages; i++) { 277 kiov[i].iov_base = (void *)(start + i * PAGE_SIZE); 278 kiov[i].iov_len = PAGE_SIZE; 279 } 280 281 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages); 282 kfree(kiov); 283 } 284 if (rc > 0) 285 shm->num_pages = rc; 286 if (rc != num_pages) { 287 if (rc >= 0) 288 rc = -ENOMEM; 289 ret = ERR_PTR(rc); 290 goto err; 291 } 292 293 mutex_lock(&teedev->mutex); 294 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 295 mutex_unlock(&teedev->mutex); 296 297 if (shm->id < 0) { 298 ret = ERR_PTR(shm->id); 299 goto err; 300 } 301 302 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, 303 shm->num_pages, start); 304 if (rc) { 305 ret = ERR_PTR(rc); 306 goto err; 307 } 308 309 if (flags & TEE_SHM_DMA_BUF) { 310 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 311 312 exp_info.ops = &tee_shm_dma_buf_ops; 313 exp_info.size = shm->size; 314 exp_info.flags = O_RDWR; 315 exp_info.priv = shm; 316 317 shm->dmabuf = dma_buf_export(&exp_info); 318 if (IS_ERR(shm->dmabuf)) { 319 ret = ERR_CAST(shm->dmabuf); 320 teedev->desc->ops->shm_unregister(ctx, shm); 321 goto err; 322 } 323 } 324 325 return shm; 326 err: 327 if (shm) { 328 if (shm->id >= 0) { 329 mutex_lock(&teedev->mutex); 330 idr_remove(&teedev->idr, shm->id); 331 mutex_unlock(&teedev->mutex); 332 } 333 release_registered_pages(shm); 334 } 335 kfree(shm); 336 teedev_ctx_put(ctx); 337 tee_device_put(teedev); 338 return ret; 339 } 340 EXPORT_SYMBOL_GPL(tee_shm_register); 341 342 /** 343 * tee_shm_get_fd() - Increase reference count and return file descriptor 344 * @shm: Shared memory handle 345 * @returns user space file descriptor to shared memory 346 */ 347 int tee_shm_get_fd(struct tee_shm *shm) 348 { 349 int fd; 350 351 if (!(shm->flags & TEE_SHM_DMA_BUF)) 352 return -EINVAL; 353 354 get_dma_buf(shm->dmabuf); 355 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); 356 if (fd < 0) 357 dma_buf_put(shm->dmabuf); 358 return fd; 359 } 360 361 /** 362 * tee_shm_free() - Free shared memory 363 * @shm: Handle to shared memory to free 364 */ 365 void tee_shm_free(struct tee_shm *shm) 366 { 367 /* 368 * dma_buf_put() decreases the dmabuf reference counter and will 369 * call tee_shm_release() when the last reference is gone. 370 * 371 * In the case of driver private memory we call tee_shm_release 372 * directly instead as it doesn't have a reference counter. 373 */ 374 if (shm->flags & TEE_SHM_DMA_BUF) 375 dma_buf_put(shm->dmabuf); 376 else 377 tee_shm_release(shm); 378 } 379 EXPORT_SYMBOL_GPL(tee_shm_free); 380 381 /** 382 * tee_shm_va2pa() - Get physical address of a virtual address 383 * @shm: Shared memory handle 384 * @va: Virtual address to tranlsate 385 * @pa: Returned physical address 386 * @returns 0 on success and < 0 on failure 387 */ 388 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) 389 { 390 if (!(shm->flags & TEE_SHM_MAPPED)) 391 return -EINVAL; 392 /* Check that we're in the range of the shm */ 393 if ((char *)va < (char *)shm->kaddr) 394 return -EINVAL; 395 if ((char *)va >= ((char *)shm->kaddr + shm->size)) 396 return -EINVAL; 397 398 return tee_shm_get_pa( 399 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); 400 } 401 EXPORT_SYMBOL_GPL(tee_shm_va2pa); 402 403 /** 404 * tee_shm_pa2va() - Get virtual address of a physical address 405 * @shm: Shared memory handle 406 * @pa: Physical address to tranlsate 407 * @va: Returned virtual address 408 * @returns 0 on success and < 0 on failure 409 */ 410 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) 411 { 412 if (!(shm->flags & TEE_SHM_MAPPED)) 413 return -EINVAL; 414 /* Check that we're in the range of the shm */ 415 if (pa < shm->paddr) 416 return -EINVAL; 417 if (pa >= (shm->paddr + shm->size)) 418 return -EINVAL; 419 420 if (va) { 421 void *v = tee_shm_get_va(shm, pa - shm->paddr); 422 423 if (IS_ERR(v)) 424 return PTR_ERR(v); 425 *va = v; 426 } 427 return 0; 428 } 429 EXPORT_SYMBOL_GPL(tee_shm_pa2va); 430 431 /** 432 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset 433 * @shm: Shared memory handle 434 * @offs: Offset from start of this shared memory 435 * @returns virtual address of the shared memory + offs if offs is within 436 * the bounds of this shared memory, else an ERR_PTR 437 */ 438 void *tee_shm_get_va(struct tee_shm *shm, size_t offs) 439 { 440 if (!(shm->flags & TEE_SHM_MAPPED)) 441 return ERR_PTR(-EINVAL); 442 if (offs >= shm->size) 443 return ERR_PTR(-EINVAL); 444 return (char *)shm->kaddr + offs; 445 } 446 EXPORT_SYMBOL_GPL(tee_shm_get_va); 447 448 /** 449 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset 450 * @shm: Shared memory handle 451 * @offs: Offset from start of this shared memory 452 * @pa: Physical address to return 453 * @returns 0 if offs is within the bounds of this shared memory, else an 454 * error code. 455 */ 456 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) 457 { 458 if (offs >= shm->size) 459 return -EINVAL; 460 if (pa) 461 *pa = shm->paddr + offs; 462 return 0; 463 } 464 EXPORT_SYMBOL_GPL(tee_shm_get_pa); 465 466 /** 467 * tee_shm_get_from_id() - Find shared memory object and increase reference 468 * count 469 * @ctx: Context owning the shared memory 470 * @id: Id of shared memory object 471 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure 472 */ 473 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) 474 { 475 struct tee_device *teedev; 476 struct tee_shm *shm; 477 478 if (!ctx) 479 return ERR_PTR(-EINVAL); 480 481 teedev = ctx->teedev; 482 mutex_lock(&teedev->mutex); 483 shm = idr_find(&teedev->idr, id); 484 if (!shm || shm->ctx != ctx) 485 shm = ERR_PTR(-EINVAL); 486 else if (shm->flags & TEE_SHM_DMA_BUF) 487 get_dma_buf(shm->dmabuf); 488 mutex_unlock(&teedev->mutex); 489 return shm; 490 } 491 EXPORT_SYMBOL_GPL(tee_shm_get_from_id); 492 493 /** 494 * tee_shm_put() - Decrease reference count on a shared memory handle 495 * @shm: Shared memory handle 496 */ 497 void tee_shm_put(struct tee_shm *shm) 498 { 499 if (shm->flags & TEE_SHM_DMA_BUF) 500 dma_buf_put(shm->dmabuf); 501 } 502 EXPORT_SYMBOL_GPL(tee_shm_put); 503