1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework for buffer objects that can be shared across devices/subsystems. 4 * 5 * Copyright(C) 2011 Linaro Limited. All rights reserved. 6 * Author: Sumit Semwal <sumit.semwal@ti.com> 7 * 8 * Many thanks to linaro-mm-sig list, and specially 9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 11 * refining of this idea. 12 */ 13 14 #include <linux/fs.h> 15 #include <linux/slab.h> 16 #include <linux/dma-buf.h> 17 #include <linux/dma-fence.h> 18 #include <linux/dma-fence-unwrap.h> 19 #include <linux/anon_inodes.h> 20 #include <linux/export.h> 21 #include <linux/debugfs.h> 22 #include <linux/module.h> 23 #include <linux/seq_file.h> 24 #include <linux/sync_file.h> 25 #include <linux/poll.h> 26 #include <linux/dma-resv.h> 27 #include <linux/mm.h> 28 #include <linux/mount.h> 29 #include <linux/pseudo_fs.h> 30 31 #include <uapi/linux/dma-buf.h> 32 #include <uapi/linux/magic.h> 33 34 #include "dma-buf-sysfs-stats.h" 35 36 static inline int is_dma_buf_file(struct file *); 37 38 #if IS_ENABLED(CONFIG_DEBUG_FS) 39 static DEFINE_MUTEX(debugfs_list_mutex); 40 static LIST_HEAD(debugfs_list); 41 42 static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) 43 { 44 mutex_lock(&debugfs_list_mutex); 45 list_add(&dmabuf->list_node, &debugfs_list); 46 mutex_unlock(&debugfs_list_mutex); 47 } 48 49 static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) 50 { 51 if (!dmabuf) 52 return; 53 54 mutex_lock(&debugfs_list_mutex); 55 list_del(&dmabuf->list_node); 56 mutex_unlock(&debugfs_list_mutex); 57 } 58 #else 59 static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) 60 { 61 } 62 63 static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) 64 { 65 } 66 #endif 67 68 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) 69 { 70 struct dma_buf *dmabuf; 71 char name[DMA_BUF_NAME_LEN]; 72 ssize_t ret = 0; 73 74 dmabuf = dentry->d_fsdata; 75 spin_lock(&dmabuf->name_lock); 76 if (dmabuf->name) 77 ret = strscpy(name, dmabuf->name, sizeof(name)); 78 spin_unlock(&dmabuf->name_lock); 79 80 return dynamic_dname(buffer, buflen, "/%s:%s", 81 dentry->d_name.name, ret > 0 ? name : ""); 82 } 83 84 static void dma_buf_release(struct dentry *dentry) 85 { 86 struct dma_buf *dmabuf; 87 88 dmabuf = dentry->d_fsdata; 89 if (unlikely(!dmabuf)) 90 return; 91 92 BUG_ON(dmabuf->vmapping_counter); 93 94 /* 95 * If you hit this BUG() it could mean: 96 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else 97 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback 98 */ 99 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); 100 101 dma_buf_stats_teardown(dmabuf); 102 dmabuf->ops->release(dmabuf); 103 104 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 105 dma_resv_fini(dmabuf->resv); 106 107 WARN_ON(!list_empty(&dmabuf->attachments)); 108 module_put(dmabuf->owner); 109 kfree(dmabuf->name); 110 kfree(dmabuf); 111 } 112 113 static int dma_buf_file_release(struct inode *inode, struct file *file) 114 { 115 if (!is_dma_buf_file(file)) 116 return -EINVAL; 117 118 __dma_buf_debugfs_list_del(file->private_data); 119 120 return 0; 121 } 122 123 static const struct dentry_operations dma_buf_dentry_ops = { 124 .d_dname = dmabuffs_dname, 125 .d_release = dma_buf_release, 126 }; 127 128 static struct vfsmount *dma_buf_mnt; 129 130 static int dma_buf_fs_init_context(struct fs_context *fc) 131 { 132 struct pseudo_fs_context *ctx; 133 134 ctx = init_pseudo(fc, DMA_BUF_MAGIC); 135 if (!ctx) 136 return -ENOMEM; 137 ctx->dops = &dma_buf_dentry_ops; 138 return 0; 139 } 140 141 static struct file_system_type dma_buf_fs_type = { 142 .name = "dmabuf", 143 .init_fs_context = dma_buf_fs_init_context, 144 .kill_sb = kill_anon_super, 145 }; 146 147 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) 148 { 149 struct dma_buf *dmabuf; 150 151 if (!is_dma_buf_file(file)) 152 return -EINVAL; 153 154 dmabuf = file->private_data; 155 156 /* check if buffer supports mmap */ 157 if (!dmabuf->ops->mmap) 158 return -EINVAL; 159 160 /* check for overflowing the buffer's size */ 161 if (vma->vm_pgoff + vma_pages(vma) > 162 dmabuf->size >> PAGE_SHIFT) 163 return -EINVAL; 164 165 return dmabuf->ops->mmap(dmabuf, vma); 166 } 167 168 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) 169 { 170 struct dma_buf *dmabuf; 171 loff_t base; 172 173 if (!is_dma_buf_file(file)) 174 return -EBADF; 175 176 dmabuf = file->private_data; 177 178 /* only support discovering the end of the buffer, 179 * but also allow SEEK_SET to maintain the idiomatic 180 * SEEK_END(0), SEEK_CUR(0) pattern. 181 */ 182 if (whence == SEEK_END) 183 base = dmabuf->size; 184 else if (whence == SEEK_SET) 185 base = 0; 186 else 187 return -EINVAL; 188 189 if (offset != 0) 190 return -EINVAL; 191 192 return base + offset; 193 } 194 195 /** 196 * DOC: implicit fence polling 197 * 198 * To support cross-device and cross-driver synchronization of buffer access 199 * implicit fences (represented internally in the kernel with &struct dma_fence) 200 * can be attached to a &dma_buf. The glue for that and a few related things are 201 * provided in the &dma_resv structure. 202 * 203 * Userspace can query the state of these implicitly tracked fences using poll() 204 * and related system calls: 205 * 206 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the 207 * most recent write or exclusive fence. 208 * 209 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of 210 * all attached fences, shared and exclusive ones. 211 * 212 * Note that this only signals the completion of the respective fences, i.e. the 213 * DMA transfers are complete. Cache flushing and any other necessary 214 * preparations before CPU access can begin still need to happen. 215 * 216 * As an alternative to poll(), the set of fences on DMA buffer can be 217 * exported as a &sync_file using &dma_buf_sync_file_export. 218 */ 219 220 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 221 { 222 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; 223 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); 224 unsigned long flags; 225 226 spin_lock_irqsave(&dcb->poll->lock, flags); 227 wake_up_locked_poll(dcb->poll, dcb->active); 228 dcb->active = 0; 229 spin_unlock_irqrestore(&dcb->poll->lock, flags); 230 dma_fence_put(fence); 231 /* Paired with get_file in dma_buf_poll */ 232 fput(dmabuf->file); 233 } 234 235 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, 236 struct dma_buf_poll_cb_t *dcb) 237 { 238 struct dma_resv_iter cursor; 239 struct dma_fence *fence; 240 int r; 241 242 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), 243 fence) { 244 dma_fence_get(fence); 245 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); 246 if (!r) 247 return true; 248 dma_fence_put(fence); 249 } 250 251 return false; 252 } 253 254 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 255 { 256 struct dma_buf *dmabuf; 257 struct dma_resv *resv; 258 __poll_t events; 259 260 dmabuf = file->private_data; 261 if (!dmabuf || !dmabuf->resv) 262 return EPOLLERR; 263 264 resv = dmabuf->resv; 265 266 poll_wait(file, &dmabuf->poll, poll); 267 268 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 269 if (!events) 270 return 0; 271 272 dma_resv_lock(resv, NULL); 273 274 if (events & EPOLLOUT) { 275 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; 276 277 /* Check that callback isn't busy */ 278 spin_lock_irq(&dmabuf->poll.lock); 279 if (dcb->active) 280 events &= ~EPOLLOUT; 281 else 282 dcb->active = EPOLLOUT; 283 spin_unlock_irq(&dmabuf->poll.lock); 284 285 if (events & EPOLLOUT) { 286 /* Paired with fput in dma_buf_poll_cb */ 287 get_file(dmabuf->file); 288 289 if (!dma_buf_poll_add_cb(resv, true, dcb)) 290 /* No callback queued, wake up any other waiters */ 291 dma_buf_poll_cb(NULL, &dcb->cb); 292 else 293 events &= ~EPOLLOUT; 294 } 295 } 296 297 if (events & EPOLLIN) { 298 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; 299 300 /* Check that callback isn't busy */ 301 spin_lock_irq(&dmabuf->poll.lock); 302 if (dcb->active) 303 events &= ~EPOLLIN; 304 else 305 dcb->active = EPOLLIN; 306 spin_unlock_irq(&dmabuf->poll.lock); 307 308 if (events & EPOLLIN) { 309 /* Paired with fput in dma_buf_poll_cb */ 310 get_file(dmabuf->file); 311 312 if (!dma_buf_poll_add_cb(resv, false, dcb)) 313 /* No callback queued, wake up any other waiters */ 314 dma_buf_poll_cb(NULL, &dcb->cb); 315 else 316 events &= ~EPOLLIN; 317 } 318 } 319 320 dma_resv_unlock(resv); 321 return events; 322 } 323 324 /** 325 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. 326 * It could support changing the name of the dma-buf if the same 327 * piece of memory is used for multiple purpose between different devices. 328 * 329 * @dmabuf: [in] dmabuf buffer that will be renamed. 330 * @buf: [in] A piece of userspace memory that contains the name of 331 * the dma-buf. 332 * 333 * Returns 0 on success. If the dma-buf buffer is already attached to 334 * devices, return -EBUSY. 335 * 336 */ 337 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) 338 { 339 char *name = strndup_user(buf, DMA_BUF_NAME_LEN); 340 341 if (IS_ERR(name)) 342 return PTR_ERR(name); 343 344 spin_lock(&dmabuf->name_lock); 345 kfree(dmabuf->name); 346 dmabuf->name = name; 347 spin_unlock(&dmabuf->name_lock); 348 349 return 0; 350 } 351 352 #if IS_ENABLED(CONFIG_SYNC_FILE) 353 static long dma_buf_export_sync_file(struct dma_buf *dmabuf, 354 void __user *user_data) 355 { 356 struct dma_buf_export_sync_file arg; 357 enum dma_resv_usage usage; 358 struct dma_fence *fence = NULL; 359 struct sync_file *sync_file; 360 int fd, ret; 361 362 if (copy_from_user(&arg, user_data, sizeof(arg))) 363 return -EFAULT; 364 365 if (arg.flags & ~DMA_BUF_SYNC_RW) 366 return -EINVAL; 367 368 if ((arg.flags & DMA_BUF_SYNC_RW) == 0) 369 return -EINVAL; 370 371 fd = get_unused_fd_flags(O_CLOEXEC); 372 if (fd < 0) 373 return fd; 374 375 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE); 376 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence); 377 if (ret) 378 goto err_put_fd; 379 380 if (!fence) 381 fence = dma_fence_get_stub(); 382 383 sync_file = sync_file_create(fence); 384 385 dma_fence_put(fence); 386 387 if (!sync_file) { 388 ret = -ENOMEM; 389 goto err_put_fd; 390 } 391 392 arg.fd = fd; 393 if (copy_to_user(user_data, &arg, sizeof(arg))) { 394 ret = -EFAULT; 395 goto err_put_file; 396 } 397 398 fd_install(fd, sync_file->file); 399 400 return 0; 401 402 err_put_file: 403 fput(sync_file->file); 404 err_put_fd: 405 put_unused_fd(fd); 406 return ret; 407 } 408 409 static long dma_buf_import_sync_file(struct dma_buf *dmabuf, 410 const void __user *user_data) 411 { 412 struct dma_buf_import_sync_file arg; 413 struct dma_fence *fence, *f; 414 enum dma_resv_usage usage; 415 struct dma_fence_unwrap iter; 416 unsigned int num_fences; 417 int ret = 0; 418 419 if (copy_from_user(&arg, user_data, sizeof(arg))) 420 return -EFAULT; 421 422 if (arg.flags & ~DMA_BUF_SYNC_RW) 423 return -EINVAL; 424 425 if ((arg.flags & DMA_BUF_SYNC_RW) == 0) 426 return -EINVAL; 427 428 fence = sync_file_get_fence(arg.fd); 429 if (!fence) 430 return -EINVAL; 431 432 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : 433 DMA_RESV_USAGE_READ; 434 435 num_fences = 0; 436 dma_fence_unwrap_for_each(f, &iter, fence) 437 ++num_fences; 438 439 if (num_fences > 0) { 440 dma_resv_lock(dmabuf->resv, NULL); 441 442 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences); 443 if (!ret) { 444 dma_fence_unwrap_for_each(f, &iter, fence) 445 dma_resv_add_fence(dmabuf->resv, f, usage); 446 } 447 448 dma_resv_unlock(dmabuf->resv); 449 } 450 451 dma_fence_put(fence); 452 453 return ret; 454 } 455 #endif 456 457 static long dma_buf_ioctl(struct file *file, 458 unsigned int cmd, unsigned long arg) 459 { 460 struct dma_buf *dmabuf; 461 struct dma_buf_sync sync; 462 enum dma_data_direction direction; 463 int ret; 464 465 dmabuf = file->private_data; 466 467 switch (cmd) { 468 case DMA_BUF_IOCTL_SYNC: 469 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) 470 return -EFAULT; 471 472 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) 473 return -EINVAL; 474 475 switch (sync.flags & DMA_BUF_SYNC_RW) { 476 case DMA_BUF_SYNC_READ: 477 direction = DMA_FROM_DEVICE; 478 break; 479 case DMA_BUF_SYNC_WRITE: 480 direction = DMA_TO_DEVICE; 481 break; 482 case DMA_BUF_SYNC_RW: 483 direction = DMA_BIDIRECTIONAL; 484 break; 485 default: 486 return -EINVAL; 487 } 488 489 if (sync.flags & DMA_BUF_SYNC_END) 490 ret = dma_buf_end_cpu_access(dmabuf, direction); 491 else 492 ret = dma_buf_begin_cpu_access(dmabuf, direction); 493 494 return ret; 495 496 case DMA_BUF_SET_NAME_A: 497 case DMA_BUF_SET_NAME_B: 498 return dma_buf_set_name(dmabuf, (const char __user *)arg); 499 500 #if IS_ENABLED(CONFIG_SYNC_FILE) 501 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: 502 return dma_buf_export_sync_file(dmabuf, (void __user *)arg); 503 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: 504 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg); 505 #endif 506 507 default: 508 return -ENOTTY; 509 } 510 } 511 512 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) 513 { 514 struct dma_buf *dmabuf = file->private_data; 515 516 seq_printf(m, "size:\t%zu\n", dmabuf->size); 517 /* Don't count the temporary reference taken inside procfs seq_show */ 518 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); 519 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); 520 spin_lock(&dmabuf->name_lock); 521 if (dmabuf->name) 522 seq_printf(m, "name:\t%s\n", dmabuf->name); 523 spin_unlock(&dmabuf->name_lock); 524 } 525 526 static const struct file_operations dma_buf_fops = { 527 .release = dma_buf_file_release, 528 .mmap = dma_buf_mmap_internal, 529 .llseek = dma_buf_llseek, 530 .poll = dma_buf_poll, 531 .unlocked_ioctl = dma_buf_ioctl, 532 .compat_ioctl = compat_ptr_ioctl, 533 .show_fdinfo = dma_buf_show_fdinfo, 534 }; 535 536 /* 537 * is_dma_buf_file - Check if struct file* is associated with dma_buf 538 */ 539 static inline int is_dma_buf_file(struct file *file) 540 { 541 return file->f_op == &dma_buf_fops; 542 } 543 544 static struct file *dma_buf_getfile(size_t size, int flags) 545 { 546 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); 547 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); 548 struct file *file; 549 550 if (IS_ERR(inode)) 551 return ERR_CAST(inode); 552 553 inode->i_size = size; 554 inode_set_bytes(inode, size); 555 556 /* 557 * The ->i_ino acquired from get_next_ino() is not unique thus 558 * not suitable for using it as dentry name by dmabuf stats. 559 * Override ->i_ino with the unique and dmabuffs specific 560 * value. 561 */ 562 inode->i_ino = atomic64_inc_return(&dmabuf_inode); 563 flags &= O_ACCMODE | O_NONBLOCK; 564 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", 565 flags, &dma_buf_fops); 566 if (IS_ERR(file)) 567 goto err_alloc_file; 568 569 return file; 570 571 err_alloc_file: 572 iput(inode); 573 return file; 574 } 575 576 /** 577 * DOC: dma buf device access 578 * 579 * For device DMA access to a shared DMA buffer the usual sequence of operations 580 * is fairly simple: 581 * 582 * 1. The exporter defines his exporter instance using 583 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private 584 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace 585 * as a file descriptor by calling dma_buf_fd(). 586 * 587 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer 588 * to share with: First the file descriptor is converted to a &dma_buf using 589 * dma_buf_get(). Then the buffer is attached to the device using 590 * dma_buf_attach(). 591 * 592 * Up to this stage the exporter is still free to migrate or reallocate the 593 * backing storage. 594 * 595 * 3. Once the buffer is attached to all devices userspace can initiate DMA 596 * access to the shared buffer. In the kernel this is done by calling 597 * dma_buf_map_attachment() and dma_buf_unmap_attachment(). 598 * 599 * 4. Once a driver is done with a shared buffer it needs to call 600 * dma_buf_detach() (after cleaning up any mappings) and then release the 601 * reference acquired with dma_buf_get() by calling dma_buf_put(). 602 * 603 * For the detailed semantics exporters are expected to implement see 604 * &dma_buf_ops. 605 */ 606 607 /** 608 * dma_buf_export - Creates a new dma_buf, and associates an anon file 609 * with this buffer, so it can be exported. 610 * Also connect the allocator specific data and ops to the buffer. 611 * Additionally, provide a name string for exporter; useful in debugging. 612 * 613 * @exp_info: [in] holds all the export related information provided 614 * by the exporter. see &struct dma_buf_export_info 615 * for further details. 616 * 617 * Returns, on success, a newly created struct dma_buf object, which wraps the 618 * supplied private data and operations for struct dma_buf_ops. On either 619 * missing ops, or error in allocating struct dma_buf, will return negative 620 * error. 621 * 622 * For most cases the easiest way to create @exp_info is through the 623 * %DEFINE_DMA_BUF_EXPORT_INFO macro. 624 */ 625 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 626 { 627 struct dma_buf *dmabuf; 628 struct dma_resv *resv = exp_info->resv; 629 struct file *file; 630 size_t alloc_size = sizeof(struct dma_buf); 631 int ret; 632 633 if (WARN_ON(!exp_info->priv || !exp_info->ops 634 || !exp_info->ops->map_dma_buf 635 || !exp_info->ops->unmap_dma_buf 636 || !exp_info->ops->release)) 637 return ERR_PTR(-EINVAL); 638 639 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) 640 return ERR_PTR(-EINVAL); 641 642 if (!try_module_get(exp_info->owner)) 643 return ERR_PTR(-ENOENT); 644 645 file = dma_buf_getfile(exp_info->size, exp_info->flags); 646 if (IS_ERR(file)) { 647 ret = PTR_ERR(file); 648 goto err_module; 649 } 650 651 if (!exp_info->resv) 652 alloc_size += sizeof(struct dma_resv); 653 else 654 /* prevent &dma_buf[1] == dma_buf->resv */ 655 alloc_size += 1; 656 dmabuf = kzalloc(alloc_size, GFP_KERNEL); 657 if (!dmabuf) { 658 ret = -ENOMEM; 659 goto err_file; 660 } 661 662 dmabuf->priv = exp_info->priv; 663 dmabuf->ops = exp_info->ops; 664 dmabuf->size = exp_info->size; 665 dmabuf->exp_name = exp_info->exp_name; 666 dmabuf->owner = exp_info->owner; 667 spin_lock_init(&dmabuf->name_lock); 668 init_waitqueue_head(&dmabuf->poll); 669 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; 670 dmabuf->cb_in.active = dmabuf->cb_out.active = 0; 671 INIT_LIST_HEAD(&dmabuf->attachments); 672 673 if (!resv) { 674 dmabuf->resv = (struct dma_resv *)&dmabuf[1]; 675 dma_resv_init(dmabuf->resv); 676 } else { 677 dmabuf->resv = resv; 678 } 679 680 ret = dma_buf_stats_setup(dmabuf, file); 681 if (ret) 682 goto err_dmabuf; 683 684 file->private_data = dmabuf; 685 file->f_path.dentry->d_fsdata = dmabuf; 686 dmabuf->file = file; 687 688 __dma_buf_debugfs_list_add(dmabuf); 689 690 return dmabuf; 691 692 err_dmabuf: 693 if (!resv) 694 dma_resv_fini(dmabuf->resv); 695 kfree(dmabuf); 696 err_file: 697 fput(file); 698 err_module: 699 module_put(exp_info->owner); 700 return ERR_PTR(ret); 701 } 702 EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF"); 703 704 /** 705 * dma_buf_fd - returns a file descriptor for the given struct dma_buf 706 * @dmabuf: [in] pointer to dma_buf for which fd is required. 707 * @flags: [in] flags to give to fd 708 * 709 * On success, returns an associated 'fd'. Else, returns error. 710 */ 711 int dma_buf_fd(struct dma_buf *dmabuf, int flags) 712 { 713 int fd; 714 715 if (!dmabuf || !dmabuf->file) 716 return -EINVAL; 717 718 fd = get_unused_fd_flags(flags); 719 if (fd < 0) 720 return fd; 721 722 fd_install(fd, dmabuf->file); 723 724 return fd; 725 } 726 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF"); 727 728 /** 729 * dma_buf_get - returns the struct dma_buf related to an fd 730 * @fd: [in] fd associated with the struct dma_buf to be returned 731 * 732 * On success, returns the struct dma_buf associated with an fd; uses 733 * file's refcounting done by fget to increase refcount. returns ERR_PTR 734 * otherwise. 735 */ 736 struct dma_buf *dma_buf_get(int fd) 737 { 738 struct file *file; 739 740 file = fget(fd); 741 742 if (!file) 743 return ERR_PTR(-EBADF); 744 745 if (!is_dma_buf_file(file)) { 746 fput(file); 747 return ERR_PTR(-EINVAL); 748 } 749 750 return file->private_data; 751 } 752 EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF"); 753 754 /** 755 * dma_buf_put - decreases refcount of the buffer 756 * @dmabuf: [in] buffer to reduce refcount of 757 * 758 * Uses file's refcounting done implicitly by fput(). 759 * 760 * If, as a result of this call, the refcount becomes 0, the 'release' file 761 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc 762 * in turn, and frees the memory allocated for dmabuf when exported. 763 */ 764 void dma_buf_put(struct dma_buf *dmabuf) 765 { 766 if (WARN_ON(!dmabuf || !dmabuf->file)) 767 return; 768 769 fput(dmabuf->file); 770 } 771 EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF"); 772 773 static void mangle_sg_table(struct sg_table *sg_table) 774 { 775 #ifdef CONFIG_DMABUF_DEBUG 776 int i; 777 struct scatterlist *sg; 778 779 /* To catch abuse of the underlying struct page by importers mix 780 * up the bits, but take care to preserve the low SG_ bits to 781 * not corrupt the sgt. The mixing is undone on unmap 782 * before passing the sgt back to the exporter. 783 */ 784 for_each_sgtable_sg(sg_table, sg, i) 785 sg->page_link ^= ~0xffUL; 786 #endif 787 788 } 789 790 static inline bool 791 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) 792 { 793 return !!attach->importer_ops; 794 } 795 796 static bool 797 dma_buf_pin_on_map(struct dma_buf_attachment *attach) 798 { 799 return attach->dmabuf->ops->pin && 800 (!dma_buf_attachment_is_dynamic(attach) || 801 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)); 802 } 803 804 /** 805 * DOC: locking convention 806 * 807 * In order to avoid deadlock situations between dma-buf exports and importers, 808 * all dma-buf API users must follow the common dma-buf locking convention. 809 * 810 * Convention for importers 811 * 812 * 1. Importers must hold the dma-buf reservation lock when calling these 813 * functions: 814 * 815 * - dma_buf_pin() 816 * - dma_buf_unpin() 817 * - dma_buf_map_attachment() 818 * - dma_buf_unmap_attachment() 819 * - dma_buf_vmap() 820 * - dma_buf_vunmap() 821 * 822 * 2. Importers must not hold the dma-buf reservation lock when calling these 823 * functions: 824 * 825 * - dma_buf_attach() 826 * - dma_buf_dynamic_attach() 827 * - dma_buf_detach() 828 * - dma_buf_export() 829 * - dma_buf_fd() 830 * - dma_buf_get() 831 * - dma_buf_put() 832 * - dma_buf_mmap() 833 * - dma_buf_begin_cpu_access() 834 * - dma_buf_end_cpu_access() 835 * - dma_buf_map_attachment_unlocked() 836 * - dma_buf_unmap_attachment_unlocked() 837 * - dma_buf_vmap_unlocked() 838 * - dma_buf_vunmap_unlocked() 839 * 840 * Convention for exporters 841 * 842 * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf 843 * reservation and exporter can take the lock: 844 * 845 * - &dma_buf_ops.attach() 846 * - &dma_buf_ops.detach() 847 * - &dma_buf_ops.release() 848 * - &dma_buf_ops.begin_cpu_access() 849 * - &dma_buf_ops.end_cpu_access() 850 * - &dma_buf_ops.mmap() 851 * 852 * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf 853 * reservation and exporter can't take the lock: 854 * 855 * - &dma_buf_ops.pin() 856 * - &dma_buf_ops.unpin() 857 * - &dma_buf_ops.map_dma_buf() 858 * - &dma_buf_ops.unmap_dma_buf() 859 * - &dma_buf_ops.vmap() 860 * - &dma_buf_ops.vunmap() 861 * 862 * 3. Exporters must hold the dma-buf reservation lock when calling these 863 * functions: 864 * 865 * - dma_buf_move_notify() 866 */ 867 868 /** 869 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list 870 * @dmabuf: [in] buffer to attach device to. 871 * @dev: [in] device to be attached. 872 * @importer_ops: [in] importer operations for the attachment 873 * @importer_priv: [in] importer private pointer for the attachment 874 * 875 * Returns struct dma_buf_attachment pointer for this attachment. Attachments 876 * must be cleaned up by calling dma_buf_detach(). 877 * 878 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach 879 * functionality. 880 * 881 * Returns: 882 * 883 * A pointer to newly created &dma_buf_attachment on success, or a negative 884 * error code wrapped into a pointer on failure. 885 * 886 * Note that this can fail if the backing storage of @dmabuf is in a place not 887 * accessible to @dev, and cannot be moved to a more suitable place. This is 888 * indicated with the error code -EBUSY. 889 */ 890 struct dma_buf_attachment * 891 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, 892 const struct dma_buf_attach_ops *importer_ops, 893 void *importer_priv) 894 { 895 struct dma_buf_attachment *attach; 896 int ret; 897 898 if (WARN_ON(!dmabuf || !dev)) 899 return ERR_PTR(-EINVAL); 900 901 if (WARN_ON(importer_ops && !importer_ops->move_notify)) 902 return ERR_PTR(-EINVAL); 903 904 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 905 if (!attach) 906 return ERR_PTR(-ENOMEM); 907 908 attach->dev = dev; 909 attach->dmabuf = dmabuf; 910 if (importer_ops) 911 attach->peer2peer = importer_ops->allow_peer2peer; 912 attach->importer_ops = importer_ops; 913 attach->importer_priv = importer_priv; 914 915 if (dmabuf->ops->attach) { 916 ret = dmabuf->ops->attach(dmabuf, attach); 917 if (ret) 918 goto err_attach; 919 } 920 dma_resv_lock(dmabuf->resv, NULL); 921 list_add(&attach->node, &dmabuf->attachments); 922 dma_resv_unlock(dmabuf->resv); 923 924 return attach; 925 926 err_attach: 927 kfree(attach); 928 return ERR_PTR(ret); 929 } 930 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF"); 931 932 /** 933 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach 934 * @dmabuf: [in] buffer to attach device to. 935 * @dev: [in] device to be attached. 936 * 937 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static 938 * mapping. 939 */ 940 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 941 struct device *dev) 942 { 943 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); 944 } 945 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF"); 946 947 /** 948 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list 949 * @dmabuf: [in] buffer to detach from. 950 * @attach: [in] attachment to be detached; is free'd after this call. 951 * 952 * Clean up a device attachment obtained by calling dma_buf_attach(). 953 * 954 * Optionally this calls &dma_buf_ops.detach for device-specific detach. 955 */ 956 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 957 { 958 if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf)) 959 return; 960 961 dma_resv_lock(dmabuf->resv, NULL); 962 list_del(&attach->node); 963 dma_resv_unlock(dmabuf->resv); 964 965 if (dmabuf->ops->detach) 966 dmabuf->ops->detach(dmabuf, attach); 967 968 kfree(attach); 969 } 970 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF"); 971 972 /** 973 * dma_buf_pin - Lock down the DMA-buf 974 * @attach: [in] attachment which should be pinned 975 * 976 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may 977 * call this, and only for limited use cases like scanout and not for temporary 978 * pin operations. It is not permitted to allow userspace to pin arbitrary 979 * amounts of buffers through this interface. 980 * 981 * Buffers must be unpinned by calling dma_buf_unpin(). 982 * 983 * Returns: 984 * 0 on success, negative error code on failure. 985 */ 986 int dma_buf_pin(struct dma_buf_attachment *attach) 987 { 988 struct dma_buf *dmabuf = attach->dmabuf; 989 int ret = 0; 990 991 WARN_ON(!attach->importer_ops); 992 993 dma_resv_assert_held(dmabuf->resv); 994 995 if (dmabuf->ops->pin) 996 ret = dmabuf->ops->pin(attach); 997 998 return ret; 999 } 1000 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF"); 1001 1002 /** 1003 * dma_buf_unpin - Unpin a DMA-buf 1004 * @attach: [in] attachment which should be unpinned 1005 * 1006 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move 1007 * any mapping of @attach again and inform the importer through 1008 * &dma_buf_attach_ops.move_notify. 1009 */ 1010 void dma_buf_unpin(struct dma_buf_attachment *attach) 1011 { 1012 struct dma_buf *dmabuf = attach->dmabuf; 1013 1014 WARN_ON(!attach->importer_ops); 1015 1016 dma_resv_assert_held(dmabuf->resv); 1017 1018 if (dmabuf->ops->unpin) 1019 dmabuf->ops->unpin(attach); 1020 } 1021 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF"); 1022 1023 /** 1024 * dma_buf_map_attachment - Returns the scatterlist table of the attachment; 1025 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 1026 * dma_buf_ops. 1027 * @attach: [in] attachment whose scatterlist is to be returned 1028 * @direction: [in] direction of DMA transfer 1029 * 1030 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR 1031 * on error. May return -EINTR if it is interrupted by a signal. 1032 * 1033 * On success, the DMA addresses and lengths in the returned scatterlist are 1034 * PAGE_SIZE aligned. 1035 * 1036 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that 1037 * the underlying backing storage is pinned for as long as a mapping exists, 1038 * therefore users/importers should not hold onto a mapping for undue amounts of 1039 * time. 1040 * 1041 * Important: Dynamic importers must wait for the exclusive fence of the struct 1042 * dma_resv attached to the DMA-BUF first. 1043 */ 1044 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, 1045 enum dma_data_direction direction) 1046 { 1047 struct sg_table *sg_table; 1048 signed long ret; 1049 1050 might_sleep(); 1051 1052 if (WARN_ON(!attach || !attach->dmabuf)) 1053 return ERR_PTR(-EINVAL); 1054 1055 dma_resv_assert_held(attach->dmabuf->resv); 1056 1057 if (dma_buf_pin_on_map(attach)) { 1058 ret = attach->dmabuf->ops->pin(attach); 1059 /* 1060 * Catch exporters making buffers inaccessible even when 1061 * attachments preventing that exist. 1062 */ 1063 WARN_ON_ONCE(ret == EBUSY); 1064 if (ret) 1065 return ERR_PTR(ret); 1066 } 1067 1068 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); 1069 if (!sg_table) 1070 sg_table = ERR_PTR(-ENOMEM); 1071 if (IS_ERR(sg_table)) 1072 goto error_unpin; 1073 1074 /* 1075 * Importers with static attachments don't wait for fences. 1076 */ 1077 if (!dma_buf_attachment_is_dynamic(attach)) { 1078 ret = dma_resv_wait_timeout(attach->dmabuf->resv, 1079 DMA_RESV_USAGE_KERNEL, true, 1080 MAX_SCHEDULE_TIMEOUT); 1081 if (ret < 0) 1082 goto error_unmap; 1083 } 1084 mangle_sg_table(sg_table); 1085 1086 #ifdef CONFIG_DMA_API_DEBUG 1087 { 1088 struct scatterlist *sg; 1089 u64 addr; 1090 int len; 1091 int i; 1092 1093 for_each_sgtable_dma_sg(sg_table, sg, i) { 1094 addr = sg_dma_address(sg); 1095 len = sg_dma_len(sg); 1096 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { 1097 pr_debug("%s: addr %llx or len %x is not page aligned!\n", 1098 __func__, addr, len); 1099 } 1100 } 1101 } 1102 #endif /* CONFIG_DMA_API_DEBUG */ 1103 return sg_table; 1104 1105 error_unmap: 1106 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); 1107 sg_table = ERR_PTR(ret); 1108 1109 error_unpin: 1110 if (dma_buf_pin_on_map(attach)) 1111 attach->dmabuf->ops->unpin(attach); 1112 1113 return sg_table; 1114 } 1115 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF"); 1116 1117 /** 1118 * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; 1119 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 1120 * dma_buf_ops. 1121 * @attach: [in] attachment whose scatterlist is to be returned 1122 * @direction: [in] direction of DMA transfer 1123 * 1124 * Unlocked variant of dma_buf_map_attachment(). 1125 */ 1126 struct sg_table * 1127 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, 1128 enum dma_data_direction direction) 1129 { 1130 struct sg_table *sg_table; 1131 1132 might_sleep(); 1133 1134 if (WARN_ON(!attach || !attach->dmabuf)) 1135 return ERR_PTR(-EINVAL); 1136 1137 dma_resv_lock(attach->dmabuf->resv, NULL); 1138 sg_table = dma_buf_map_attachment(attach, direction); 1139 dma_resv_unlock(attach->dmabuf->resv); 1140 1141 return sg_table; 1142 } 1143 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF"); 1144 1145 /** 1146 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might 1147 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 1148 * dma_buf_ops. 1149 * @attach: [in] attachment to unmap buffer from 1150 * @sg_table: [in] scatterlist info of the buffer to unmap 1151 * @direction: [in] direction of DMA transfer 1152 * 1153 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). 1154 */ 1155 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 1156 struct sg_table *sg_table, 1157 enum dma_data_direction direction) 1158 { 1159 might_sleep(); 1160 1161 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 1162 return; 1163 1164 dma_resv_assert_held(attach->dmabuf->resv); 1165 1166 mangle_sg_table(sg_table); 1167 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); 1168 1169 if (dma_buf_pin_on_map(attach)) 1170 attach->dmabuf->ops->unpin(attach); 1171 } 1172 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF"); 1173 1174 /** 1175 * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might 1176 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 1177 * dma_buf_ops. 1178 * @attach: [in] attachment to unmap buffer from 1179 * @sg_table: [in] scatterlist info of the buffer to unmap 1180 * @direction: [in] direction of DMA transfer 1181 * 1182 * Unlocked variant of dma_buf_unmap_attachment(). 1183 */ 1184 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, 1185 struct sg_table *sg_table, 1186 enum dma_data_direction direction) 1187 { 1188 might_sleep(); 1189 1190 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 1191 return; 1192 1193 dma_resv_lock(attach->dmabuf->resv, NULL); 1194 dma_buf_unmap_attachment(attach, sg_table, direction); 1195 dma_resv_unlock(attach->dmabuf->resv); 1196 } 1197 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF"); 1198 1199 /** 1200 * dma_buf_move_notify - notify attachments that DMA-buf is moving 1201 * 1202 * @dmabuf: [in] buffer which is moving 1203 * 1204 * Informs all attachments that they need to destroy and recreate all their 1205 * mappings. 1206 */ 1207 void dma_buf_move_notify(struct dma_buf *dmabuf) 1208 { 1209 struct dma_buf_attachment *attach; 1210 1211 dma_resv_assert_held(dmabuf->resv); 1212 1213 list_for_each_entry(attach, &dmabuf->attachments, node) 1214 if (attach->importer_ops) 1215 attach->importer_ops->move_notify(attach); 1216 } 1217 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF"); 1218 1219 /** 1220 * DOC: cpu access 1221 * 1222 * There are multiple reasons for supporting CPU access to a dma buffer object: 1223 * 1224 * - Fallback operations in the kernel, for example when a device is connected 1225 * over USB and the kernel needs to shuffle the data around first before 1226 * sending it away. Cache coherency is handled by bracketing any transactions 1227 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() 1228 * access. 1229 * 1230 * Since for most kernel internal dma-buf accesses need the entire buffer, a 1231 * vmap interface is introduced. Note that on very old 32-bit architectures 1232 * vmalloc space might be limited and result in vmap calls failing. 1233 * 1234 * Interfaces: 1235 * 1236 * .. code-block:: c 1237 * 1238 * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) 1239 * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) 1240 * 1241 * The vmap call can fail if there is no vmap support in the exporter, or if 1242 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference 1243 * count for all vmap access and calls down into the exporter's vmap function 1244 * only when no vmapping exists, and only unmaps it once. Protection against 1245 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. 1246 * 1247 * - For full compatibility on the importer side with existing userspace 1248 * interfaces, which might already support mmap'ing buffers. This is needed in 1249 * many processing pipelines (e.g. feeding a software rendered image into a 1250 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION 1251 * framework already supported this and for DMA buffer file descriptors to 1252 * replace ION buffers mmap support was needed. 1253 * 1254 * There is no special interfaces, userspace simply calls mmap on the dma-buf 1255 * fd. But like for CPU access there's a need to bracket the actual access, 1256 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that 1257 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must 1258 * be restarted. 1259 * 1260 * Some systems might need some sort of cache coherency management e.g. when 1261 * CPU and GPU domains are being accessed through dma-buf at the same time. 1262 * To circumvent this problem there are begin/end coherency markers, that 1263 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace 1264 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The 1265 * sequence would be used like following: 1266 * 1267 * - mmap dma-buf fd 1268 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write 1269 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you 1270 * want (with the new data being consumed by say the GPU or the scanout 1271 * device) 1272 * - munmap once you don't need the buffer any more 1273 * 1274 * For correctness and optimal performance, it is always required to use 1275 * SYNC_START and SYNC_END before and after, respectively, when accessing the 1276 * mapped address. Userspace cannot rely on coherent access, even when there 1277 * are systems where it just works without calling these ioctls. 1278 * 1279 * - And as a CPU fallback in userspace processing pipelines. 1280 * 1281 * Similar to the motivation for kernel cpu access it is again important that 1282 * the userspace code of a given importing subsystem can use the same 1283 * interfaces with a imported dma-buf buffer object as with a native buffer 1284 * object. This is especially important for drm where the userspace part of 1285 * contemporary OpenGL, X, and other drivers is huge, and reworking them to 1286 * use a different way to mmap a buffer rather invasive. 1287 * 1288 * The assumption in the current dma-buf interfaces is that redirecting the 1289 * initial mmap is all that's needed. A survey of some of the existing 1290 * subsystems shows that no driver seems to do any nefarious thing like 1291 * syncing up with outstanding asynchronous processing on the device or 1292 * allocating special resources at fault time. So hopefully this is good 1293 * enough, since adding interfaces to intercept pagefaults and allow pte 1294 * shootdowns would increase the complexity quite a bit. 1295 * 1296 * Interface: 1297 * 1298 * .. code-block:: c 1299 * 1300 * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); 1301 * 1302 * If the importing subsystem simply provides a special-purpose mmap call to 1303 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will 1304 * equally achieve that for a dma-buf object. 1305 */ 1306 1307 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1308 enum dma_data_direction direction) 1309 { 1310 bool write = (direction == DMA_BIDIRECTIONAL || 1311 direction == DMA_TO_DEVICE); 1312 struct dma_resv *resv = dmabuf->resv; 1313 long ret; 1314 1315 /* Wait on any implicit rendering fences */ 1316 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), 1317 true, MAX_SCHEDULE_TIMEOUT); 1318 if (ret < 0) 1319 return ret; 1320 1321 return 0; 1322 } 1323 1324 /** 1325 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 1326 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific 1327 * preparations. Coherency is only guaranteed in the specified range for the 1328 * specified access direction. 1329 * @dmabuf: [in] buffer to prepare cpu access for. 1330 * @direction: [in] direction of access. 1331 * 1332 * After the cpu access is complete the caller should call 1333 * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is 1334 * it guaranteed to be coherent with other DMA access. 1335 * 1336 * This function will also wait for any DMA transactions tracked through 1337 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit 1338 * synchronization this function will only ensure cache coherency, callers must 1339 * ensure synchronization with such DMA transactions on their own. 1340 * 1341 * Can return negative error values, returns 0 on success. 1342 */ 1343 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1344 enum dma_data_direction direction) 1345 { 1346 int ret = 0; 1347 1348 if (WARN_ON(!dmabuf)) 1349 return -EINVAL; 1350 1351 might_lock(&dmabuf->resv->lock.base); 1352 1353 if (dmabuf->ops->begin_cpu_access) 1354 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); 1355 1356 /* Ensure that all fences are waited upon - but we first allow 1357 * the native handler the chance to do so more efficiently if it 1358 * chooses. A double invocation here will be reasonably cheap no-op. 1359 */ 1360 if (ret == 0) 1361 ret = __dma_buf_begin_cpu_access(dmabuf, direction); 1362 1363 return ret; 1364 } 1365 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF"); 1366 1367 /** 1368 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the 1369 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific 1370 * actions. Coherency is only guaranteed in the specified range for the 1371 * specified access direction. 1372 * @dmabuf: [in] buffer to complete cpu access for. 1373 * @direction: [in] direction of access. 1374 * 1375 * This terminates CPU access started with dma_buf_begin_cpu_access(). 1376 * 1377 * Can return negative error values, returns 0 on success. 1378 */ 1379 int dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1380 enum dma_data_direction direction) 1381 { 1382 int ret = 0; 1383 1384 WARN_ON(!dmabuf); 1385 1386 might_lock(&dmabuf->resv->lock.base); 1387 1388 if (dmabuf->ops->end_cpu_access) 1389 ret = dmabuf->ops->end_cpu_access(dmabuf, direction); 1390 1391 return ret; 1392 } 1393 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF"); 1394 1395 1396 /** 1397 * dma_buf_mmap - Setup up a userspace mmap with the given vma 1398 * @dmabuf: [in] buffer that should back the vma 1399 * @vma: [in] vma for the mmap 1400 * @pgoff: [in] offset in pages where this mmap should start within the 1401 * dma-buf buffer. 1402 * 1403 * This function adjusts the passed in vma so that it points at the file of the 1404 * dma_buf operation. It also adjusts the starting pgoff and does bounds 1405 * checking on the size of the vma. Then it calls the exporters mmap function to 1406 * set up the mapping. 1407 * 1408 * Can return negative error values, returns 0 on success. 1409 */ 1410 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 1411 unsigned long pgoff) 1412 { 1413 if (WARN_ON(!dmabuf || !vma)) 1414 return -EINVAL; 1415 1416 /* check if buffer supports mmap */ 1417 if (!dmabuf->ops->mmap) 1418 return -EINVAL; 1419 1420 /* check for offset overflow */ 1421 if (pgoff + vma_pages(vma) < pgoff) 1422 return -EOVERFLOW; 1423 1424 /* check for overflowing the buffer's size */ 1425 if (pgoff + vma_pages(vma) > 1426 dmabuf->size >> PAGE_SHIFT) 1427 return -EINVAL; 1428 1429 /* readjust the vma */ 1430 vma_set_file(vma, dmabuf->file); 1431 vma->vm_pgoff = pgoff; 1432 1433 return dmabuf->ops->mmap(dmabuf, vma); 1434 } 1435 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF"); 1436 1437 /** 1438 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel 1439 * address space. Same restrictions as for vmap and friends apply. 1440 * @dmabuf: [in] buffer to vmap 1441 * @map: [out] returns the vmap pointer 1442 * 1443 * This call may fail due to lack of virtual mapping address space. 1444 * These calls are optional in drivers. The intended use for them 1445 * is for mapping objects linear in kernel space for high use objects. 1446 * 1447 * To ensure coherency users must call dma_buf_begin_cpu_access() and 1448 * dma_buf_end_cpu_access() around any cpu access performed through this 1449 * mapping. 1450 * 1451 * Returns 0 on success, or a negative errno code otherwise. 1452 */ 1453 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) 1454 { 1455 struct iosys_map ptr; 1456 int ret; 1457 1458 iosys_map_clear(map); 1459 1460 if (WARN_ON(!dmabuf)) 1461 return -EINVAL; 1462 1463 dma_resv_assert_held(dmabuf->resv); 1464 1465 if (!dmabuf->ops->vmap) 1466 return -EINVAL; 1467 1468 if (dmabuf->vmapping_counter) { 1469 dmabuf->vmapping_counter++; 1470 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); 1471 *map = dmabuf->vmap_ptr; 1472 return 0; 1473 } 1474 1475 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); 1476 1477 ret = dmabuf->ops->vmap(dmabuf, &ptr); 1478 if (WARN_ON_ONCE(ret)) 1479 return ret; 1480 1481 dmabuf->vmap_ptr = ptr; 1482 dmabuf->vmapping_counter = 1; 1483 1484 *map = dmabuf->vmap_ptr; 1485 1486 return 0; 1487 } 1488 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF"); 1489 1490 /** 1491 * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel 1492 * address space. Same restrictions as for vmap and friends apply. 1493 * @dmabuf: [in] buffer to vmap 1494 * @map: [out] returns the vmap pointer 1495 * 1496 * Unlocked version of dma_buf_vmap() 1497 * 1498 * Returns 0 on success, or a negative errno code otherwise. 1499 */ 1500 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) 1501 { 1502 int ret; 1503 1504 iosys_map_clear(map); 1505 1506 if (WARN_ON(!dmabuf)) 1507 return -EINVAL; 1508 1509 dma_resv_lock(dmabuf->resv, NULL); 1510 ret = dma_buf_vmap(dmabuf, map); 1511 dma_resv_unlock(dmabuf->resv); 1512 1513 return ret; 1514 } 1515 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF"); 1516 1517 /** 1518 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. 1519 * @dmabuf: [in] buffer to vunmap 1520 * @map: [in] vmap pointer to vunmap 1521 */ 1522 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) 1523 { 1524 if (WARN_ON(!dmabuf)) 1525 return; 1526 1527 dma_resv_assert_held(dmabuf->resv); 1528 1529 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); 1530 BUG_ON(dmabuf->vmapping_counter == 0); 1531 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); 1532 1533 if (--dmabuf->vmapping_counter == 0) { 1534 if (dmabuf->ops->vunmap) 1535 dmabuf->ops->vunmap(dmabuf, map); 1536 iosys_map_clear(&dmabuf->vmap_ptr); 1537 } 1538 } 1539 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF"); 1540 1541 /** 1542 * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. 1543 * @dmabuf: [in] buffer to vunmap 1544 * @map: [in] vmap pointer to vunmap 1545 */ 1546 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) 1547 { 1548 if (WARN_ON(!dmabuf)) 1549 return; 1550 1551 dma_resv_lock(dmabuf->resv, NULL); 1552 dma_buf_vunmap(dmabuf, map); 1553 dma_resv_unlock(dmabuf->resv); 1554 } 1555 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF"); 1556 1557 #ifdef CONFIG_DEBUG_FS 1558 static int dma_buf_debug_show(struct seq_file *s, void *unused) 1559 { 1560 struct dma_buf *buf_obj; 1561 struct dma_buf_attachment *attach_obj; 1562 int count = 0, attach_count; 1563 size_t size = 0; 1564 int ret; 1565 1566 ret = mutex_lock_interruptible(&debugfs_list_mutex); 1567 1568 if (ret) 1569 return ret; 1570 1571 seq_puts(s, "\nDma-buf Objects:\n"); 1572 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", 1573 "size", "flags", "mode", "count", "ino"); 1574 1575 list_for_each_entry(buf_obj, &debugfs_list, list_node) { 1576 1577 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); 1578 if (ret) 1579 goto error_unlock; 1580 1581 1582 spin_lock(&buf_obj->name_lock); 1583 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", 1584 buf_obj->size, 1585 buf_obj->file->f_flags, buf_obj->file->f_mode, 1586 file_count(buf_obj->file), 1587 buf_obj->exp_name, 1588 file_inode(buf_obj->file)->i_ino, 1589 buf_obj->name ?: "<none>"); 1590 spin_unlock(&buf_obj->name_lock); 1591 1592 dma_resv_describe(buf_obj->resv, s); 1593 1594 seq_puts(s, "\tAttached Devices:\n"); 1595 attach_count = 0; 1596 1597 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 1598 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); 1599 attach_count++; 1600 } 1601 dma_resv_unlock(buf_obj->resv); 1602 1603 seq_printf(s, "Total %d devices attached\n\n", 1604 attach_count); 1605 1606 count++; 1607 size += buf_obj->size; 1608 } 1609 1610 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); 1611 1612 mutex_unlock(&debugfs_list_mutex); 1613 return 0; 1614 1615 error_unlock: 1616 mutex_unlock(&debugfs_list_mutex); 1617 return ret; 1618 } 1619 1620 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); 1621 1622 static struct dentry *dma_buf_debugfs_dir; 1623 1624 static int dma_buf_init_debugfs(void) 1625 { 1626 struct dentry *d; 1627 int err = 0; 1628 1629 d = debugfs_create_dir("dma_buf", NULL); 1630 if (IS_ERR(d)) 1631 return PTR_ERR(d); 1632 1633 dma_buf_debugfs_dir = d; 1634 1635 d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir, 1636 NULL, &dma_buf_debug_fops); 1637 if (IS_ERR(d)) { 1638 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 1639 debugfs_remove_recursive(dma_buf_debugfs_dir); 1640 dma_buf_debugfs_dir = NULL; 1641 err = PTR_ERR(d); 1642 } 1643 1644 return err; 1645 } 1646 1647 static void dma_buf_uninit_debugfs(void) 1648 { 1649 debugfs_remove_recursive(dma_buf_debugfs_dir); 1650 } 1651 #else 1652 static inline int dma_buf_init_debugfs(void) 1653 { 1654 return 0; 1655 } 1656 static inline void dma_buf_uninit_debugfs(void) 1657 { 1658 } 1659 #endif 1660 1661 static int __init dma_buf_init(void) 1662 { 1663 int ret; 1664 1665 ret = dma_buf_init_sysfs_statistics(); 1666 if (ret) 1667 return ret; 1668 1669 dma_buf_mnt = kern_mount(&dma_buf_fs_type); 1670 if (IS_ERR(dma_buf_mnt)) 1671 return PTR_ERR(dma_buf_mnt); 1672 1673 dma_buf_init_debugfs(); 1674 return 0; 1675 } 1676 subsys_initcall(dma_buf_init); 1677 1678 static void __exit dma_buf_deinit(void) 1679 { 1680 dma_buf_uninit_debugfs(); 1681 kern_unmount(dma_buf_mnt); 1682 dma_buf_uninit_sysfs_statistics(); 1683 } 1684 __exitcall(dma_buf_deinit); 1685