1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework for buffer objects that can be shared across devices/subsystems. 4 * 5 * Copyright(C) 2011 Linaro Limited. All rights reserved. 6 * Author: Sumit Semwal <sumit.semwal@ti.com> 7 * 8 * Many thanks to linaro-mm-sig list, and specially 9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 11 * refining of this idea. 12 */ 13 14 #include <linux/fs.h> 15 #include <linux/slab.h> 16 #include <linux/dma-buf.h> 17 #include <linux/dma-fence.h> 18 #include <linux/dma-fence-unwrap.h> 19 #include <linux/anon_inodes.h> 20 #include <linux/export.h> 21 #include <linux/debugfs.h> 22 #include <linux/list.h> 23 #include <linux/module.h> 24 #include <linux/mutex.h> 25 #include <linux/seq_file.h> 26 #include <linux/sync_file.h> 27 #include <linux/poll.h> 28 #include <linux/dma-resv.h> 29 #include <linux/mm.h> 30 #include <linux/mount.h> 31 #include <linux/pseudo_fs.h> 32 33 #include <uapi/linux/dma-buf.h> 34 #include <uapi/linux/magic.h> 35 36 #include "dma-buf-sysfs-stats.h" 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/dma_buf.h> 40 41 /* 42 * dmabuf->name must be accessed with holding dmabuf->name_lock. 43 * we need to take the lock around the tracepoint call itself where 44 * it is called in the code. 45 * 46 * Note: FUNC##_enabled() is a static branch that will only 47 * be set when the trace event is enabled. 48 */ 49 #define DMA_BUF_TRACE(FUNC, ...) \ 50 do { \ 51 if (FUNC##_enabled()) { \ 52 guard(spinlock)(&dmabuf->name_lock); \ 53 FUNC(__VA_ARGS__); \ 54 } else if (IS_ENABLED(CONFIG_LOCKDEP)) { \ 55 /* Expose this lock when lockdep is enabled */ \ 56 guard(spinlock)(&dmabuf->name_lock); \ 57 } \ 58 } while (0) 59 60 /* Wrapper to hide the sg_table page link from the importer */ 61 struct dma_buf_sg_table_wrapper { 62 struct sg_table *original; 63 struct sg_table wrapper; 64 }; 65 66 static inline int is_dma_buf_file(struct file *); 67 68 static DEFINE_MUTEX(dmabuf_list_mutex); 69 static LIST_HEAD(dmabuf_list); 70 71 static void __dma_buf_list_add(struct dma_buf *dmabuf) 72 { 73 mutex_lock(&dmabuf_list_mutex); 74 list_add(&dmabuf->list_node, &dmabuf_list); 75 mutex_unlock(&dmabuf_list_mutex); 76 } 77 78 static void __dma_buf_list_del(struct dma_buf *dmabuf) 79 { 80 if (!dmabuf) 81 return; 82 83 mutex_lock(&dmabuf_list_mutex); 84 list_del(&dmabuf->list_node); 85 mutex_unlock(&dmabuf_list_mutex); 86 } 87 88 /** 89 * dma_buf_iter_begin - begin iteration through global list of all DMA buffers 90 * 91 * Returns the first buffer in the global list of DMA-bufs that's not in the 92 * process of being destroyed. Increments that buffer's reference count to 93 * prevent buffer destruction. Callers must release the reference, either by 94 * continuing iteration with dma_buf_iter_next(), or with dma_buf_put(). 95 * 96 * Return: 97 * * First buffer from global list, with refcount elevated 98 * * NULL if no active buffers are present 99 */ 100 struct dma_buf *dma_buf_iter_begin(void) 101 { 102 struct dma_buf *ret = NULL, *dmabuf; 103 104 /* 105 * The list mutex does not protect a dmabuf's refcount, so it can be 106 * zeroed while we are iterating. We cannot call get_dma_buf() since the 107 * caller may not already own a reference to the buffer. 108 */ 109 mutex_lock(&dmabuf_list_mutex); 110 list_for_each_entry(dmabuf, &dmabuf_list, list_node) { 111 if (file_ref_get(&dmabuf->file->f_ref)) { 112 ret = dmabuf; 113 break; 114 } 115 } 116 mutex_unlock(&dmabuf_list_mutex); 117 return ret; 118 } 119 120 /** 121 * dma_buf_iter_next - continue iteration through global list of all DMA buffers 122 * @dmabuf: [in] pointer to dma_buf 123 * 124 * Decrements the reference count on the provided buffer. Returns the next 125 * buffer from the remainder of the global list of DMA-bufs with its reference 126 * count incremented. Callers must release the reference, either by continuing 127 * iteration with dma_buf_iter_next(), or with dma_buf_put(). 128 * 129 * Return: 130 * * Next buffer from global list, with refcount elevated 131 * * NULL if no additional active buffers are present 132 */ 133 struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf) 134 { 135 struct dma_buf *ret = NULL; 136 137 /* 138 * The list mutex does not protect a dmabuf's refcount, so it can be 139 * zeroed while we are iterating. We cannot call get_dma_buf() since the 140 * caller may not already own a reference to the buffer. 141 */ 142 mutex_lock(&dmabuf_list_mutex); 143 dma_buf_put(dmabuf); 144 list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) { 145 if (file_ref_get(&dmabuf->file->f_ref)) { 146 ret = dmabuf; 147 break; 148 } 149 } 150 mutex_unlock(&dmabuf_list_mutex); 151 return ret; 152 } 153 154 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) 155 { 156 struct dma_buf *dmabuf; 157 char name[DMA_BUF_NAME_LEN]; 158 ssize_t ret = 0; 159 160 dmabuf = dentry->d_fsdata; 161 spin_lock(&dmabuf->name_lock); 162 if (dmabuf->name) 163 ret = strscpy(name, dmabuf->name, sizeof(name)); 164 spin_unlock(&dmabuf->name_lock); 165 166 return dynamic_dname(buffer, buflen, "/%s:%s", 167 dentry->d_name.name, ret > 0 ? name : ""); 168 } 169 170 static void dma_buf_release(struct dentry *dentry) 171 { 172 struct dma_buf *dmabuf; 173 174 dmabuf = dentry->d_fsdata; 175 if (unlikely(!dmabuf)) 176 return; 177 178 BUG_ON(dmabuf->vmapping_counter); 179 180 /* 181 * If you hit this BUG() it could mean: 182 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else 183 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback 184 */ 185 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); 186 187 dma_buf_stats_teardown(dmabuf); 188 dmabuf->ops->release(dmabuf); 189 190 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 191 dma_resv_fini(dmabuf->resv); 192 193 WARN_ON(!list_empty(&dmabuf->attachments)); 194 module_put(dmabuf->owner); 195 kfree(dmabuf->name); 196 kfree(dmabuf); 197 } 198 199 static int dma_buf_file_release(struct inode *inode, struct file *file) 200 { 201 if (!is_dma_buf_file(file)) 202 return -EINVAL; 203 204 __dma_buf_list_del(file->private_data); 205 206 return 0; 207 } 208 209 static const struct dentry_operations dma_buf_dentry_ops = { 210 .d_dname = dmabuffs_dname, 211 .d_release = dma_buf_release, 212 }; 213 214 static struct vfsmount *dma_buf_mnt; 215 216 static int dma_buf_fs_init_context(struct fs_context *fc) 217 { 218 struct pseudo_fs_context *ctx; 219 220 ctx = init_pseudo(fc, DMA_BUF_MAGIC); 221 if (!ctx) 222 return -ENOMEM; 223 ctx->dops = &dma_buf_dentry_ops; 224 return 0; 225 } 226 227 static struct file_system_type dma_buf_fs_type = { 228 .name = "dmabuf", 229 .init_fs_context = dma_buf_fs_init_context, 230 .kill_sb = kill_anon_super, 231 }; 232 233 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) 234 { 235 struct dma_buf *dmabuf; 236 237 if (!is_dma_buf_file(file)) 238 return -EINVAL; 239 240 dmabuf = file->private_data; 241 242 /* check if buffer supports mmap */ 243 if (!dmabuf->ops->mmap) 244 return -EINVAL; 245 246 /* check for overflowing the buffer's size */ 247 if (vma->vm_pgoff + vma_pages(vma) > 248 dmabuf->size >> PAGE_SHIFT) 249 return -EINVAL; 250 251 DMA_BUF_TRACE(trace_dma_buf_mmap_internal, dmabuf); 252 253 return dmabuf->ops->mmap(dmabuf, vma); 254 } 255 256 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) 257 { 258 struct dma_buf *dmabuf; 259 loff_t base; 260 261 if (!is_dma_buf_file(file)) 262 return -EBADF; 263 264 dmabuf = file->private_data; 265 266 /* only support discovering the end of the buffer, 267 * but also allow SEEK_SET to maintain the idiomatic 268 * SEEK_END(0), SEEK_CUR(0) pattern. 269 */ 270 if (whence == SEEK_END) 271 base = dmabuf->size; 272 else if (whence == SEEK_SET) 273 base = 0; 274 else 275 return -EINVAL; 276 277 if (offset != 0) 278 return -EINVAL; 279 280 return base + offset; 281 } 282 283 /** 284 * DOC: implicit fence polling 285 * 286 * To support cross-device and cross-driver synchronization of buffer access 287 * implicit fences (represented internally in the kernel with &struct dma_fence) 288 * can be attached to a &dma_buf. The glue for that and a few related things are 289 * provided in the &dma_resv structure. 290 * 291 * Userspace can query the state of these implicitly tracked fences using poll() 292 * and related system calls: 293 * 294 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the 295 * most recent write or exclusive fence. 296 * 297 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of 298 * all attached fences, shared and exclusive ones. 299 * 300 * Note that this only signals the completion of the respective fences, i.e. the 301 * DMA transfers are complete. Cache flushing and any other necessary 302 * preparations before CPU access can begin still need to happen. 303 * 304 * As an alternative to poll(), the set of fences on DMA buffer can be 305 * exported as a &sync_file using &dma_buf_sync_file_export. 306 */ 307 308 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 309 { 310 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; 311 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); 312 unsigned long flags; 313 314 spin_lock_irqsave(&dcb->poll->lock, flags); 315 wake_up_locked_poll(dcb->poll, dcb->active); 316 dcb->active = 0; 317 spin_unlock_irqrestore(&dcb->poll->lock, flags); 318 dma_fence_put(fence); 319 /* Paired with get_file in dma_buf_poll */ 320 fput(dmabuf->file); 321 } 322 323 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, 324 struct dma_buf_poll_cb_t *dcb) 325 { 326 struct dma_resv_iter cursor; 327 struct dma_fence *fence; 328 int r; 329 330 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), 331 fence) { 332 dma_fence_get(fence); 333 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); 334 if (!r) 335 return true; 336 dma_fence_put(fence); 337 } 338 339 return false; 340 } 341 342 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 343 { 344 struct dma_buf *dmabuf; 345 struct dma_resv *resv; 346 __poll_t events; 347 348 dmabuf = file->private_data; 349 if (!dmabuf || !dmabuf->resv) 350 return EPOLLERR; 351 352 resv = dmabuf->resv; 353 354 poll_wait(file, &dmabuf->poll, poll); 355 356 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 357 if (!events) 358 return 0; 359 360 dma_resv_lock(resv, NULL); 361 362 if (events & EPOLLOUT) { 363 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; 364 365 /* Check that callback isn't busy */ 366 spin_lock_irq(&dmabuf->poll.lock); 367 if (dcb->active) 368 events &= ~EPOLLOUT; 369 else 370 dcb->active = EPOLLOUT; 371 spin_unlock_irq(&dmabuf->poll.lock); 372 373 if (events & EPOLLOUT) { 374 /* Paired with fput in dma_buf_poll_cb */ 375 get_file(dmabuf->file); 376 377 if (!dma_buf_poll_add_cb(resv, true, dcb)) 378 /* No callback queued, wake up any other waiters */ 379 dma_buf_poll_cb(NULL, &dcb->cb); 380 else 381 events &= ~EPOLLOUT; 382 } 383 } 384 385 if (events & EPOLLIN) { 386 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; 387 388 /* Check that callback isn't busy */ 389 spin_lock_irq(&dmabuf->poll.lock); 390 if (dcb->active) 391 events &= ~EPOLLIN; 392 else 393 dcb->active = EPOLLIN; 394 spin_unlock_irq(&dmabuf->poll.lock); 395 396 if (events & EPOLLIN) { 397 /* Paired with fput in dma_buf_poll_cb */ 398 get_file(dmabuf->file); 399 400 if (!dma_buf_poll_add_cb(resv, false, dcb)) 401 /* No callback queued, wake up any other waiters */ 402 dma_buf_poll_cb(NULL, &dcb->cb); 403 else 404 events &= ~EPOLLIN; 405 } 406 } 407 408 dma_resv_unlock(resv); 409 return events; 410 } 411 412 /** 413 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. 414 * It could support changing the name of the dma-buf if the same 415 * piece of memory is used for multiple purpose between different devices. 416 * 417 * @dmabuf: [in] dmabuf buffer that will be renamed. 418 * @buf: [in] A piece of userspace memory that contains the name of 419 * the dma-buf. 420 * 421 * Returns 0 on success. If the dma-buf buffer is already attached to 422 * devices, return -EBUSY. 423 * 424 */ 425 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) 426 { 427 char *name = strndup_user(buf, DMA_BUF_NAME_LEN); 428 429 if (IS_ERR(name)) 430 return PTR_ERR(name); 431 432 spin_lock(&dmabuf->name_lock); 433 kfree(dmabuf->name); 434 dmabuf->name = name; 435 spin_unlock(&dmabuf->name_lock); 436 437 return 0; 438 } 439 440 #if IS_ENABLED(CONFIG_SYNC_FILE) 441 static long dma_buf_export_sync_file(struct dma_buf *dmabuf, 442 void __user *user_data) 443 { 444 struct dma_buf_export_sync_file arg; 445 enum dma_resv_usage usage; 446 struct dma_fence *fence = NULL; 447 struct sync_file *sync_file; 448 int fd, ret; 449 450 if (copy_from_user(&arg, user_data, sizeof(arg))) 451 return -EFAULT; 452 453 if (arg.flags & ~DMA_BUF_SYNC_RW) 454 return -EINVAL; 455 456 if ((arg.flags & DMA_BUF_SYNC_RW) == 0) 457 return -EINVAL; 458 459 fd = get_unused_fd_flags(O_CLOEXEC); 460 if (fd < 0) 461 return fd; 462 463 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE); 464 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence); 465 if (ret) 466 goto err_put_fd; 467 468 if (!fence) 469 fence = dma_fence_get_stub(); 470 471 sync_file = sync_file_create(fence); 472 473 dma_fence_put(fence); 474 475 if (!sync_file) { 476 ret = -ENOMEM; 477 goto err_put_fd; 478 } 479 480 arg.fd = fd; 481 if (copy_to_user(user_data, &arg, sizeof(arg))) { 482 ret = -EFAULT; 483 goto err_put_file; 484 } 485 486 fd_install(fd, sync_file->file); 487 488 return 0; 489 490 err_put_file: 491 fput(sync_file->file); 492 err_put_fd: 493 put_unused_fd(fd); 494 return ret; 495 } 496 497 static long dma_buf_import_sync_file(struct dma_buf *dmabuf, 498 const void __user *user_data) 499 { 500 struct dma_buf_import_sync_file arg; 501 struct dma_fence *fence, *f; 502 enum dma_resv_usage usage; 503 struct dma_fence_unwrap iter; 504 unsigned int num_fences; 505 int ret = 0; 506 507 if (copy_from_user(&arg, user_data, sizeof(arg))) 508 return -EFAULT; 509 510 if (arg.flags & ~DMA_BUF_SYNC_RW) 511 return -EINVAL; 512 513 if ((arg.flags & DMA_BUF_SYNC_RW) == 0) 514 return -EINVAL; 515 516 fence = sync_file_get_fence(arg.fd); 517 if (!fence) 518 return -EINVAL; 519 520 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : 521 DMA_RESV_USAGE_READ; 522 523 num_fences = 0; 524 dma_fence_unwrap_for_each(f, &iter, fence) 525 ++num_fences; 526 527 if (num_fences > 0) { 528 dma_resv_lock(dmabuf->resv, NULL); 529 530 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences); 531 if (!ret) { 532 dma_fence_unwrap_for_each(f, &iter, fence) 533 dma_resv_add_fence(dmabuf->resv, f, usage); 534 } 535 536 dma_resv_unlock(dmabuf->resv); 537 } 538 539 dma_fence_put(fence); 540 541 return ret; 542 } 543 #endif 544 545 static long dma_buf_ioctl(struct file *file, 546 unsigned int cmd, unsigned long arg) 547 { 548 struct dma_buf *dmabuf; 549 struct dma_buf_sync sync; 550 enum dma_data_direction direction; 551 int ret; 552 553 dmabuf = file->private_data; 554 555 switch (cmd) { 556 case DMA_BUF_IOCTL_SYNC: 557 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) 558 return -EFAULT; 559 560 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) 561 return -EINVAL; 562 563 switch (sync.flags & DMA_BUF_SYNC_RW) { 564 case DMA_BUF_SYNC_READ: 565 direction = DMA_FROM_DEVICE; 566 break; 567 case DMA_BUF_SYNC_WRITE: 568 direction = DMA_TO_DEVICE; 569 break; 570 case DMA_BUF_SYNC_RW: 571 direction = DMA_BIDIRECTIONAL; 572 break; 573 default: 574 return -EINVAL; 575 } 576 577 if (sync.flags & DMA_BUF_SYNC_END) 578 ret = dma_buf_end_cpu_access(dmabuf, direction); 579 else 580 ret = dma_buf_begin_cpu_access(dmabuf, direction); 581 582 return ret; 583 584 case DMA_BUF_SET_NAME_A: 585 case DMA_BUF_SET_NAME_B: 586 return dma_buf_set_name(dmabuf, (const char __user *)arg); 587 588 #if IS_ENABLED(CONFIG_SYNC_FILE) 589 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: 590 return dma_buf_export_sync_file(dmabuf, (void __user *)arg); 591 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: 592 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg); 593 #endif 594 595 default: 596 return -ENOTTY; 597 } 598 } 599 600 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) 601 { 602 struct dma_buf *dmabuf = file->private_data; 603 604 seq_printf(m, "size:\t%zu\n", dmabuf->size); 605 /* Don't count the temporary reference taken inside procfs seq_show */ 606 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); 607 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); 608 spin_lock(&dmabuf->name_lock); 609 if (dmabuf->name) 610 seq_printf(m, "name:\t%s\n", dmabuf->name); 611 spin_unlock(&dmabuf->name_lock); 612 } 613 614 static const struct file_operations dma_buf_fops = { 615 .release = dma_buf_file_release, 616 .mmap = dma_buf_mmap_internal, 617 .llseek = dma_buf_llseek, 618 .poll = dma_buf_poll, 619 .unlocked_ioctl = dma_buf_ioctl, 620 .compat_ioctl = compat_ptr_ioctl, 621 .show_fdinfo = dma_buf_show_fdinfo, 622 }; 623 624 /* 625 * is_dma_buf_file - Check if struct file* is associated with dma_buf 626 */ 627 static inline int is_dma_buf_file(struct file *file) 628 { 629 return file->f_op == &dma_buf_fops; 630 } 631 632 static struct file *dma_buf_getfile(size_t size, int flags) 633 { 634 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); 635 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); 636 struct file *file; 637 638 if (IS_ERR(inode)) 639 return ERR_CAST(inode); 640 641 inode->i_size = size; 642 inode_set_bytes(inode, size); 643 644 /* 645 * The ->i_ino acquired from get_next_ino() is not unique thus 646 * not suitable for using it as dentry name by dmabuf stats. 647 * Override ->i_ino with the unique and dmabuffs specific 648 * value. 649 */ 650 inode->i_ino = atomic64_inc_return(&dmabuf_inode); 651 flags &= O_ACCMODE | O_NONBLOCK; 652 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", 653 flags, &dma_buf_fops); 654 if (IS_ERR(file)) 655 goto err_alloc_file; 656 657 return file; 658 659 err_alloc_file: 660 iput(inode); 661 return file; 662 } 663 664 /** 665 * DOC: dma buf device access 666 * 667 * For device DMA access to a shared DMA buffer the usual sequence of operations 668 * is fairly simple: 669 * 670 * 1. The exporter defines his exporter instance using 671 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private 672 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace 673 * as a file descriptor by calling dma_buf_fd(). 674 * 675 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer 676 * to share with: First the file descriptor is converted to a &dma_buf using 677 * dma_buf_get(). Then the buffer is attached to the device using 678 * dma_buf_attach(). 679 * 680 * Up to this stage the exporter is still free to migrate or reallocate the 681 * backing storage. 682 * 683 * 3. Once the buffer is attached to all devices userspace can initiate DMA 684 * access to the shared buffer. In the kernel this is done by calling 685 * dma_buf_map_attachment() and dma_buf_unmap_attachment(). 686 * 687 * 4. Once a driver is done with a shared buffer it needs to call 688 * dma_buf_detach() (after cleaning up any mappings) and then release the 689 * reference acquired with dma_buf_get() by calling dma_buf_put(). 690 * 691 * For the detailed semantics exporters are expected to implement see 692 * &dma_buf_ops. 693 */ 694 695 /** 696 * dma_buf_export - Creates a new dma_buf, and associates an anon file 697 * with this buffer, so it can be exported. 698 * Also connect the allocator specific data and ops to the buffer. 699 * Additionally, provide a name string for exporter; useful in debugging. 700 * 701 * @exp_info: [in] holds all the export related information provided 702 * by the exporter. see &struct dma_buf_export_info 703 * for further details. 704 * 705 * Returns, on success, a newly created struct dma_buf object, which wraps the 706 * supplied private data and operations for struct dma_buf_ops. On either 707 * missing ops, or error in allocating struct dma_buf, will return negative 708 * error. 709 * 710 * For most cases the easiest way to create @exp_info is through the 711 * %DEFINE_DMA_BUF_EXPORT_INFO macro. 712 */ 713 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 714 { 715 struct dma_buf *dmabuf; 716 struct dma_resv *resv = exp_info->resv; 717 struct file *file; 718 size_t alloc_size = sizeof(struct dma_buf); 719 int ret; 720 721 if (WARN_ON(!exp_info->priv || !exp_info->ops 722 || !exp_info->ops->map_dma_buf 723 || !exp_info->ops->unmap_dma_buf 724 || !exp_info->ops->release)) 725 return ERR_PTR(-EINVAL); 726 727 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) 728 return ERR_PTR(-EINVAL); 729 730 if (!try_module_get(exp_info->owner)) 731 return ERR_PTR(-ENOENT); 732 733 file = dma_buf_getfile(exp_info->size, exp_info->flags); 734 if (IS_ERR(file)) { 735 ret = PTR_ERR(file); 736 goto err_module; 737 } 738 739 if (!exp_info->resv) 740 alloc_size += sizeof(struct dma_resv); 741 else 742 /* prevent &dma_buf[1] == dma_buf->resv */ 743 alloc_size += 1; 744 dmabuf = kzalloc(alloc_size, GFP_KERNEL); 745 if (!dmabuf) { 746 ret = -ENOMEM; 747 goto err_file; 748 } 749 750 dmabuf->priv = exp_info->priv; 751 dmabuf->ops = exp_info->ops; 752 dmabuf->size = exp_info->size; 753 dmabuf->exp_name = exp_info->exp_name; 754 dmabuf->owner = exp_info->owner; 755 spin_lock_init(&dmabuf->name_lock); 756 init_waitqueue_head(&dmabuf->poll); 757 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; 758 dmabuf->cb_in.active = dmabuf->cb_out.active = 0; 759 INIT_LIST_HEAD(&dmabuf->attachments); 760 761 if (!resv) { 762 dmabuf->resv = (struct dma_resv *)&dmabuf[1]; 763 dma_resv_init(dmabuf->resv); 764 } else { 765 dmabuf->resv = resv; 766 } 767 768 ret = dma_buf_stats_setup(dmabuf, file); 769 if (ret) 770 goto err_dmabuf; 771 772 file->private_data = dmabuf; 773 file->f_path.dentry->d_fsdata = dmabuf; 774 dmabuf->file = file; 775 776 __dma_buf_list_add(dmabuf); 777 778 DMA_BUF_TRACE(trace_dma_buf_export, dmabuf); 779 780 return dmabuf; 781 782 err_dmabuf: 783 if (!resv) 784 dma_resv_fini(dmabuf->resv); 785 kfree(dmabuf); 786 err_file: 787 fput(file); 788 err_module: 789 module_put(exp_info->owner); 790 return ERR_PTR(ret); 791 } 792 EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF"); 793 794 /** 795 * dma_buf_fd - returns a file descriptor for the given struct dma_buf 796 * @dmabuf: [in] pointer to dma_buf for which fd is required. 797 * @flags: [in] flags to give to fd 798 * 799 * On success, returns an associated 'fd'. Else, returns error. 800 */ 801 int dma_buf_fd(struct dma_buf *dmabuf, int flags) 802 { 803 int fd; 804 805 if (!dmabuf || !dmabuf->file) 806 return -EINVAL; 807 808 fd = FD_ADD(flags, dmabuf->file); 809 if (fd >= 0) 810 DMA_BUF_TRACE(trace_dma_buf_fd, dmabuf, fd); 811 812 return fd; 813 } 814 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF"); 815 816 /** 817 * dma_buf_get - returns the struct dma_buf related to an fd 818 * @fd: [in] fd associated with the struct dma_buf to be returned 819 * 820 * On success, returns the struct dma_buf associated with an fd; uses 821 * file's refcounting done by fget to increase refcount. returns ERR_PTR 822 * otherwise. 823 */ 824 struct dma_buf *dma_buf_get(int fd) 825 { 826 struct file *file; 827 struct dma_buf *dmabuf; 828 829 file = fget(fd); 830 831 if (!file) 832 return ERR_PTR(-EBADF); 833 834 if (!is_dma_buf_file(file)) { 835 fput(file); 836 return ERR_PTR(-EINVAL); 837 } 838 839 dmabuf = file->private_data; 840 841 DMA_BUF_TRACE(trace_dma_buf_get, dmabuf, fd); 842 843 return dmabuf; 844 } 845 EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF"); 846 847 /** 848 * dma_buf_put - decreases refcount of the buffer 849 * @dmabuf: [in] buffer to reduce refcount of 850 * 851 * Uses file's refcounting done implicitly by fput(). 852 * 853 * If, as a result of this call, the refcount becomes 0, the 'release' file 854 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc 855 * in turn, and frees the memory allocated for dmabuf when exported. 856 */ 857 void dma_buf_put(struct dma_buf *dmabuf) 858 { 859 if (WARN_ON(!dmabuf || !dmabuf->file)) 860 return; 861 862 fput(dmabuf->file); 863 864 DMA_BUF_TRACE(trace_dma_buf_put, dmabuf); 865 } 866 EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF"); 867 868 static int dma_buf_wrap_sg_table(struct sg_table **sg_table) 869 { 870 struct scatterlist *to_sg, *from_sg; 871 struct sg_table *from = *sg_table; 872 struct dma_buf_sg_table_wrapper *to; 873 int i, ret; 874 875 if (!IS_ENABLED(CONFIG_DMABUF_DEBUG)) 876 return 0; 877 878 /* 879 * To catch abuse of the underlying struct page by importers copy the 880 * sg_table without copying the page_link and give only the copy back to 881 * the importer. 882 */ 883 to = kzalloc(sizeof(*to), GFP_KERNEL); 884 if (!to) 885 return -ENOMEM; 886 887 ret = sg_alloc_table(&to->wrapper, from->nents, GFP_KERNEL); 888 if (ret) 889 goto free_to; 890 891 to_sg = to->wrapper.sgl; 892 for_each_sgtable_dma_sg(from, from_sg, i) { 893 to_sg->offset = 0; 894 to_sg->length = 0; 895 sg_assign_page(to_sg, NULL); 896 sg_dma_address(to_sg) = sg_dma_address(from_sg); 897 sg_dma_len(to_sg) = sg_dma_len(from_sg); 898 to_sg = sg_next(to_sg); 899 } 900 901 to->original = from; 902 *sg_table = &to->wrapper; 903 return 0; 904 905 free_to: 906 kfree(to); 907 return ret; 908 } 909 910 static void dma_buf_unwrap_sg_table(struct sg_table **sg_table) 911 { 912 struct dma_buf_sg_table_wrapper *copy; 913 914 if (!IS_ENABLED(CONFIG_DMABUF_DEBUG)) 915 return; 916 917 copy = container_of(*sg_table, typeof(*copy), wrapper); 918 *sg_table = copy->original; 919 sg_free_table(©->wrapper); 920 kfree(copy); 921 } 922 923 static inline bool 924 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) 925 { 926 return !!attach->importer_ops; 927 } 928 929 static bool 930 dma_buf_pin_on_map(struct dma_buf_attachment *attach) 931 { 932 return attach->dmabuf->ops->pin && 933 (!dma_buf_attachment_is_dynamic(attach) || 934 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)); 935 } 936 937 /** 938 * DOC: locking convention 939 * 940 * In order to avoid deadlock situations between dma-buf exports and importers, 941 * all dma-buf API users must follow the common dma-buf locking convention. 942 * 943 * Convention for importers 944 * 945 * 1. Importers must hold the dma-buf reservation lock when calling these 946 * functions: 947 * 948 * - dma_buf_pin() 949 * - dma_buf_unpin() 950 * - dma_buf_map_attachment() 951 * - dma_buf_unmap_attachment() 952 * - dma_buf_vmap() 953 * - dma_buf_vunmap() 954 * 955 * 2. Importers must not hold the dma-buf reservation lock when calling these 956 * functions: 957 * 958 * - dma_buf_attach() 959 * - dma_buf_dynamic_attach() 960 * - dma_buf_detach() 961 * - dma_buf_export() 962 * - dma_buf_fd() 963 * - dma_buf_get() 964 * - dma_buf_put() 965 * - dma_buf_mmap() 966 * - dma_buf_begin_cpu_access() 967 * - dma_buf_end_cpu_access() 968 * - dma_buf_map_attachment_unlocked() 969 * - dma_buf_unmap_attachment_unlocked() 970 * - dma_buf_vmap_unlocked() 971 * - dma_buf_vunmap_unlocked() 972 * 973 * Convention for exporters 974 * 975 * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf 976 * reservation and exporter can take the lock: 977 * 978 * - &dma_buf_ops.attach() 979 * - &dma_buf_ops.detach() 980 * - &dma_buf_ops.release() 981 * - &dma_buf_ops.begin_cpu_access() 982 * - &dma_buf_ops.end_cpu_access() 983 * - &dma_buf_ops.mmap() 984 * 985 * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf 986 * reservation and exporter can't take the lock: 987 * 988 * - &dma_buf_ops.pin() 989 * - &dma_buf_ops.unpin() 990 * - &dma_buf_ops.map_dma_buf() 991 * - &dma_buf_ops.unmap_dma_buf() 992 * - &dma_buf_ops.vmap() 993 * - &dma_buf_ops.vunmap() 994 * 995 * 3. Exporters must hold the dma-buf reservation lock when calling these 996 * functions: 997 * 998 * - dma_buf_move_notify() 999 */ 1000 1001 /** 1002 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list 1003 * @dmabuf: [in] buffer to attach device to. 1004 * @dev: [in] device to be attached. 1005 * @importer_ops: [in] importer operations for the attachment 1006 * @importer_priv: [in] importer private pointer for the attachment 1007 * 1008 * Returns struct dma_buf_attachment pointer for this attachment. Attachments 1009 * must be cleaned up by calling dma_buf_detach(). 1010 * 1011 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach 1012 * functionality. 1013 * 1014 * Returns: 1015 * 1016 * A pointer to newly created &dma_buf_attachment on success, or a negative 1017 * error code wrapped into a pointer on failure. 1018 * 1019 * Note that this can fail if the backing storage of @dmabuf is in a place not 1020 * accessible to @dev, and cannot be moved to a more suitable place. This is 1021 * indicated with the error code -EBUSY. 1022 */ 1023 struct dma_buf_attachment * 1024 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, 1025 const struct dma_buf_attach_ops *importer_ops, 1026 void *importer_priv) 1027 { 1028 struct dma_buf_attachment *attach; 1029 int ret; 1030 1031 if (WARN_ON(!dmabuf || !dev)) 1032 return ERR_PTR(-EINVAL); 1033 1034 if (WARN_ON(importer_ops && !importer_ops->move_notify)) 1035 return ERR_PTR(-EINVAL); 1036 1037 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 1038 if (!attach) 1039 return ERR_PTR(-ENOMEM); 1040 1041 attach->dev = dev; 1042 attach->dmabuf = dmabuf; 1043 if (importer_ops) 1044 attach->peer2peer = importer_ops->allow_peer2peer; 1045 attach->importer_ops = importer_ops; 1046 attach->importer_priv = importer_priv; 1047 1048 if (dmabuf->ops->attach) { 1049 ret = dmabuf->ops->attach(dmabuf, attach); 1050 if (ret) 1051 goto err_attach; 1052 } 1053 dma_resv_lock(dmabuf->resv, NULL); 1054 list_add(&attach->node, &dmabuf->attachments); 1055 dma_resv_unlock(dmabuf->resv); 1056 1057 DMA_BUF_TRACE(trace_dma_buf_dynamic_attach, dmabuf, attach, 1058 dma_buf_attachment_is_dynamic(attach), dev); 1059 1060 return attach; 1061 1062 err_attach: 1063 kfree(attach); 1064 return ERR_PTR(ret); 1065 } 1066 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF"); 1067 1068 /** 1069 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach 1070 * @dmabuf: [in] buffer to attach device to. 1071 * @dev: [in] device to be attached. 1072 * 1073 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static 1074 * mapping. 1075 */ 1076 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 1077 struct device *dev) 1078 { 1079 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); 1080 } 1081 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF"); 1082 1083 /** 1084 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list 1085 * @dmabuf: [in] buffer to detach from. 1086 * @attach: [in] attachment to be detached; is free'd after this call. 1087 * 1088 * Clean up a device attachment obtained by calling dma_buf_attach(). 1089 * 1090 * Optionally this calls &dma_buf_ops.detach for device-specific detach. 1091 */ 1092 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 1093 { 1094 if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf)) 1095 return; 1096 1097 dma_resv_lock(dmabuf->resv, NULL); 1098 list_del(&attach->node); 1099 dma_resv_unlock(dmabuf->resv); 1100 1101 if (dmabuf->ops->detach) 1102 dmabuf->ops->detach(dmabuf, attach); 1103 1104 DMA_BUF_TRACE(trace_dma_buf_detach, dmabuf, attach, 1105 dma_buf_attachment_is_dynamic(attach), attach->dev); 1106 1107 kfree(attach); 1108 } 1109 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF"); 1110 1111 /** 1112 * dma_buf_pin - Lock down the DMA-buf 1113 * @attach: [in] attachment which should be pinned 1114 * 1115 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may 1116 * call this, and only for limited use cases like scanout and not for temporary 1117 * pin operations. It is not permitted to allow userspace to pin arbitrary 1118 * amounts of buffers through this interface. 1119 * 1120 * Buffers must be unpinned by calling dma_buf_unpin(). 1121 * 1122 * Returns: 1123 * 0 on success, negative error code on failure. 1124 */ 1125 int dma_buf_pin(struct dma_buf_attachment *attach) 1126 { 1127 struct dma_buf *dmabuf = attach->dmabuf; 1128 int ret = 0; 1129 1130 WARN_ON(!attach->importer_ops); 1131 1132 dma_resv_assert_held(dmabuf->resv); 1133 1134 if (dmabuf->ops->pin) 1135 ret = dmabuf->ops->pin(attach); 1136 1137 return ret; 1138 } 1139 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF"); 1140 1141 /** 1142 * dma_buf_unpin - Unpin a DMA-buf 1143 * @attach: [in] attachment which should be unpinned 1144 * 1145 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move 1146 * any mapping of @attach again and inform the importer through 1147 * &dma_buf_attach_ops.move_notify. 1148 */ 1149 void dma_buf_unpin(struct dma_buf_attachment *attach) 1150 { 1151 struct dma_buf *dmabuf = attach->dmabuf; 1152 1153 WARN_ON(!attach->importer_ops); 1154 1155 dma_resv_assert_held(dmabuf->resv); 1156 1157 if (dmabuf->ops->unpin) 1158 dmabuf->ops->unpin(attach); 1159 } 1160 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF"); 1161 1162 /** 1163 * dma_buf_map_attachment - Returns the scatterlist table of the attachment; 1164 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 1165 * dma_buf_ops. 1166 * @attach: [in] attachment whose scatterlist is to be returned 1167 * @direction: [in] direction of DMA transfer 1168 * 1169 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR 1170 * on error. May return -EINTR if it is interrupted by a signal. 1171 * 1172 * On success, the DMA addresses and lengths in the returned scatterlist are 1173 * PAGE_SIZE aligned. 1174 * 1175 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that 1176 * the underlying backing storage is pinned for as long as a mapping exists, 1177 * therefore users/importers should not hold onto a mapping for undue amounts of 1178 * time. 1179 * 1180 * Important: Dynamic importers must wait for the exclusive fence of the struct 1181 * dma_resv attached to the DMA-BUF first. 1182 */ 1183 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, 1184 enum dma_data_direction direction) 1185 { 1186 struct sg_table *sg_table; 1187 signed long ret; 1188 1189 might_sleep(); 1190 1191 if (WARN_ON(!attach || !attach->dmabuf)) 1192 return ERR_PTR(-EINVAL); 1193 1194 dma_resv_assert_held(attach->dmabuf->resv); 1195 1196 if (dma_buf_pin_on_map(attach)) { 1197 ret = attach->dmabuf->ops->pin(attach); 1198 /* 1199 * Catch exporters making buffers inaccessible even when 1200 * attachments preventing that exist. 1201 */ 1202 WARN_ON_ONCE(ret == -EBUSY); 1203 if (ret) 1204 return ERR_PTR(ret); 1205 } 1206 1207 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); 1208 if (!sg_table) 1209 sg_table = ERR_PTR(-ENOMEM); 1210 if (IS_ERR(sg_table)) 1211 goto error_unpin; 1212 1213 /* 1214 * Importers with static attachments don't wait for fences. 1215 */ 1216 if (!dma_buf_attachment_is_dynamic(attach)) { 1217 ret = dma_resv_wait_timeout(attach->dmabuf->resv, 1218 DMA_RESV_USAGE_KERNEL, true, 1219 MAX_SCHEDULE_TIMEOUT); 1220 if (ret < 0) 1221 goto error_unmap; 1222 } 1223 ret = dma_buf_wrap_sg_table(&sg_table); 1224 if (ret) 1225 goto error_unmap; 1226 1227 if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { 1228 struct scatterlist *sg; 1229 u64 addr; 1230 int len; 1231 int i; 1232 1233 for_each_sgtable_dma_sg(sg_table, sg, i) { 1234 addr = sg_dma_address(sg); 1235 len = sg_dma_len(sg); 1236 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { 1237 pr_debug("%s: addr %llx or len %x is not page aligned!\n", 1238 __func__, addr, len); 1239 break; 1240 } 1241 } 1242 } 1243 return sg_table; 1244 1245 error_unmap: 1246 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); 1247 sg_table = ERR_PTR(ret); 1248 1249 error_unpin: 1250 if (dma_buf_pin_on_map(attach)) 1251 attach->dmabuf->ops->unpin(attach); 1252 1253 return sg_table; 1254 } 1255 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF"); 1256 1257 /** 1258 * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; 1259 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 1260 * dma_buf_ops. 1261 * @attach: [in] attachment whose scatterlist is to be returned 1262 * @direction: [in] direction of DMA transfer 1263 * 1264 * Unlocked variant of dma_buf_map_attachment(). 1265 */ 1266 struct sg_table * 1267 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, 1268 enum dma_data_direction direction) 1269 { 1270 struct sg_table *sg_table; 1271 1272 might_sleep(); 1273 1274 if (WARN_ON(!attach || !attach->dmabuf)) 1275 return ERR_PTR(-EINVAL); 1276 1277 dma_resv_lock(attach->dmabuf->resv, NULL); 1278 sg_table = dma_buf_map_attachment(attach, direction); 1279 dma_resv_unlock(attach->dmabuf->resv); 1280 1281 return sg_table; 1282 } 1283 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF"); 1284 1285 /** 1286 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might 1287 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 1288 * dma_buf_ops. 1289 * @attach: [in] attachment to unmap buffer from 1290 * @sg_table: [in] scatterlist info of the buffer to unmap 1291 * @direction: [in] direction of DMA transfer 1292 * 1293 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). 1294 */ 1295 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 1296 struct sg_table *sg_table, 1297 enum dma_data_direction direction) 1298 { 1299 might_sleep(); 1300 1301 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 1302 return; 1303 1304 dma_resv_assert_held(attach->dmabuf->resv); 1305 1306 dma_buf_unwrap_sg_table(&sg_table); 1307 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); 1308 1309 if (dma_buf_pin_on_map(attach)) 1310 attach->dmabuf->ops->unpin(attach); 1311 } 1312 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF"); 1313 1314 /** 1315 * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might 1316 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 1317 * dma_buf_ops. 1318 * @attach: [in] attachment to unmap buffer from 1319 * @sg_table: [in] scatterlist info of the buffer to unmap 1320 * @direction: [in] direction of DMA transfer 1321 * 1322 * Unlocked variant of dma_buf_unmap_attachment(). 1323 */ 1324 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, 1325 struct sg_table *sg_table, 1326 enum dma_data_direction direction) 1327 { 1328 might_sleep(); 1329 1330 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 1331 return; 1332 1333 dma_resv_lock(attach->dmabuf->resv, NULL); 1334 dma_buf_unmap_attachment(attach, sg_table, direction); 1335 dma_resv_unlock(attach->dmabuf->resv); 1336 } 1337 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF"); 1338 1339 /** 1340 * dma_buf_move_notify - notify attachments that DMA-buf is moving 1341 * 1342 * @dmabuf: [in] buffer which is moving 1343 * 1344 * Informs all attachments that they need to destroy and recreate all their 1345 * mappings. 1346 */ 1347 void dma_buf_move_notify(struct dma_buf *dmabuf) 1348 { 1349 struct dma_buf_attachment *attach; 1350 1351 dma_resv_assert_held(dmabuf->resv); 1352 1353 list_for_each_entry(attach, &dmabuf->attachments, node) 1354 if (attach->importer_ops) 1355 attach->importer_ops->move_notify(attach); 1356 } 1357 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF"); 1358 1359 /** 1360 * DOC: cpu access 1361 * 1362 * There are multiple reasons for supporting CPU access to a dma buffer object: 1363 * 1364 * - Fallback operations in the kernel, for example when a device is connected 1365 * over USB and the kernel needs to shuffle the data around first before 1366 * sending it away. Cache coherency is handled by bracketing any transactions 1367 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() 1368 * access. 1369 * 1370 * Since for most kernel internal dma-buf accesses need the entire buffer, a 1371 * vmap interface is introduced. Note that on very old 32-bit architectures 1372 * vmalloc space might be limited and result in vmap calls failing. 1373 * 1374 * Interfaces: 1375 * 1376 * .. code-block:: c 1377 * 1378 * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) 1379 * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) 1380 * 1381 * The vmap call can fail if there is no vmap support in the exporter, or if 1382 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference 1383 * count for all vmap access and calls down into the exporter's vmap function 1384 * only when no vmapping exists, and only unmaps it once. Protection against 1385 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. 1386 * 1387 * - For full compatibility on the importer side with existing userspace 1388 * interfaces, which might already support mmap'ing buffers. This is needed in 1389 * many processing pipelines (e.g. feeding a software rendered image into a 1390 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION 1391 * framework already supported this and for DMA buffer file descriptors to 1392 * replace ION buffers mmap support was needed. 1393 * 1394 * There is no special interfaces, userspace simply calls mmap on the dma-buf 1395 * fd. But like for CPU access there's a need to bracket the actual access, 1396 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that 1397 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must 1398 * be restarted. 1399 * 1400 * Some systems might need some sort of cache coherency management e.g. when 1401 * CPU and GPU domains are being accessed through dma-buf at the same time. 1402 * To circumvent this problem there are begin/end coherency markers, that 1403 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace 1404 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The 1405 * sequence would be used like following: 1406 * 1407 * - mmap dma-buf fd 1408 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write 1409 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you 1410 * want (with the new data being consumed by say the GPU or the scanout 1411 * device) 1412 * - munmap once you don't need the buffer any more 1413 * 1414 * For correctness and optimal performance, it is always required to use 1415 * SYNC_START and SYNC_END before and after, respectively, when accessing the 1416 * mapped address. Userspace cannot rely on coherent access, even when there 1417 * are systems where it just works without calling these ioctls. 1418 * 1419 * - And as a CPU fallback in userspace processing pipelines. 1420 * 1421 * Similar to the motivation for kernel cpu access it is again important that 1422 * the userspace code of a given importing subsystem can use the same 1423 * interfaces with a imported dma-buf buffer object as with a native buffer 1424 * object. This is especially important for drm where the userspace part of 1425 * contemporary OpenGL, X, and other drivers is huge, and reworking them to 1426 * use a different way to mmap a buffer rather invasive. 1427 * 1428 * The assumption in the current dma-buf interfaces is that redirecting the 1429 * initial mmap is all that's needed. A survey of some of the existing 1430 * subsystems shows that no driver seems to do any nefarious thing like 1431 * syncing up with outstanding asynchronous processing on the device or 1432 * allocating special resources at fault time. So hopefully this is good 1433 * enough, since adding interfaces to intercept pagefaults and allow pte 1434 * shootdowns would increase the complexity quite a bit. 1435 * 1436 * Interface: 1437 * 1438 * .. code-block:: c 1439 * 1440 * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); 1441 * 1442 * If the importing subsystem simply provides a special-purpose mmap call to 1443 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will 1444 * equally achieve that for a dma-buf object. 1445 */ 1446 1447 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1448 enum dma_data_direction direction) 1449 { 1450 bool write = (direction == DMA_BIDIRECTIONAL || 1451 direction == DMA_TO_DEVICE); 1452 struct dma_resv *resv = dmabuf->resv; 1453 long ret; 1454 1455 /* Wait on any implicit rendering fences */ 1456 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), 1457 true, MAX_SCHEDULE_TIMEOUT); 1458 if (ret < 0) 1459 return ret; 1460 1461 return 0; 1462 } 1463 1464 /** 1465 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 1466 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific 1467 * preparations. Coherency is only guaranteed in the specified range for the 1468 * specified access direction. 1469 * @dmabuf: [in] buffer to prepare cpu access for. 1470 * @direction: [in] direction of access. 1471 * 1472 * After the cpu access is complete the caller should call 1473 * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is 1474 * it guaranteed to be coherent with other DMA access. 1475 * 1476 * This function will also wait for any DMA transactions tracked through 1477 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit 1478 * synchronization this function will only ensure cache coherency, callers must 1479 * ensure synchronization with such DMA transactions on their own. 1480 * 1481 * Can return negative error values, returns 0 on success. 1482 */ 1483 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1484 enum dma_data_direction direction) 1485 { 1486 int ret = 0; 1487 1488 if (WARN_ON(!dmabuf)) 1489 return -EINVAL; 1490 1491 might_lock(&dmabuf->resv->lock.base); 1492 1493 if (dmabuf->ops->begin_cpu_access) 1494 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); 1495 1496 /* Ensure that all fences are waited upon - but we first allow 1497 * the native handler the chance to do so more efficiently if it 1498 * chooses. A double invocation here will be reasonably cheap no-op. 1499 */ 1500 if (ret == 0) 1501 ret = __dma_buf_begin_cpu_access(dmabuf, direction); 1502 1503 return ret; 1504 } 1505 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF"); 1506 1507 /** 1508 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the 1509 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific 1510 * actions. Coherency is only guaranteed in the specified range for the 1511 * specified access direction. 1512 * @dmabuf: [in] buffer to complete cpu access for. 1513 * @direction: [in] direction of access. 1514 * 1515 * This terminates CPU access started with dma_buf_begin_cpu_access(). 1516 * 1517 * Can return negative error values, returns 0 on success. 1518 */ 1519 int dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1520 enum dma_data_direction direction) 1521 { 1522 int ret = 0; 1523 1524 WARN_ON(!dmabuf); 1525 1526 might_lock(&dmabuf->resv->lock.base); 1527 1528 if (dmabuf->ops->end_cpu_access) 1529 ret = dmabuf->ops->end_cpu_access(dmabuf, direction); 1530 1531 return ret; 1532 } 1533 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF"); 1534 1535 1536 /** 1537 * dma_buf_mmap - Setup up a userspace mmap with the given vma 1538 * @dmabuf: [in] buffer that should back the vma 1539 * @vma: [in] vma for the mmap 1540 * @pgoff: [in] offset in pages where this mmap should start within the 1541 * dma-buf buffer. 1542 * 1543 * This function adjusts the passed in vma so that it points at the file of the 1544 * dma_buf operation. It also adjusts the starting pgoff and does bounds 1545 * checking on the size of the vma. Then it calls the exporters mmap function to 1546 * set up the mapping. 1547 * 1548 * Can return negative error values, returns 0 on success. 1549 */ 1550 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 1551 unsigned long pgoff) 1552 { 1553 if (WARN_ON(!dmabuf || !vma)) 1554 return -EINVAL; 1555 1556 /* check if buffer supports mmap */ 1557 if (!dmabuf->ops->mmap) 1558 return -EINVAL; 1559 1560 /* check for offset overflow */ 1561 if (pgoff + vma_pages(vma) < pgoff) 1562 return -EOVERFLOW; 1563 1564 /* check for overflowing the buffer's size */ 1565 if (pgoff + vma_pages(vma) > 1566 dmabuf->size >> PAGE_SHIFT) 1567 return -EINVAL; 1568 1569 /* readjust the vma */ 1570 vma_set_file(vma, dmabuf->file); 1571 vma->vm_pgoff = pgoff; 1572 1573 DMA_BUF_TRACE(trace_dma_buf_mmap, dmabuf); 1574 1575 return dmabuf->ops->mmap(dmabuf, vma); 1576 } 1577 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF"); 1578 1579 /** 1580 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel 1581 * address space. Same restrictions as for vmap and friends apply. 1582 * @dmabuf: [in] buffer to vmap 1583 * @map: [out] returns the vmap pointer 1584 * 1585 * This call may fail due to lack of virtual mapping address space. 1586 * These calls are optional in drivers. The intended use for them 1587 * is for mapping objects linear in kernel space for high use objects. 1588 * 1589 * To ensure coherency users must call dma_buf_begin_cpu_access() and 1590 * dma_buf_end_cpu_access() around any cpu access performed through this 1591 * mapping. 1592 * 1593 * Returns 0 on success, or a negative errno code otherwise. 1594 */ 1595 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) 1596 { 1597 struct iosys_map ptr; 1598 int ret; 1599 1600 iosys_map_clear(map); 1601 1602 if (WARN_ON(!dmabuf)) 1603 return -EINVAL; 1604 1605 dma_resv_assert_held(dmabuf->resv); 1606 1607 if (!dmabuf->ops->vmap) 1608 return -EINVAL; 1609 1610 if (dmabuf->vmapping_counter) { 1611 dmabuf->vmapping_counter++; 1612 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); 1613 *map = dmabuf->vmap_ptr; 1614 return 0; 1615 } 1616 1617 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); 1618 1619 ret = dmabuf->ops->vmap(dmabuf, &ptr); 1620 if (WARN_ON_ONCE(ret)) 1621 return ret; 1622 1623 dmabuf->vmap_ptr = ptr; 1624 dmabuf->vmapping_counter = 1; 1625 1626 *map = dmabuf->vmap_ptr; 1627 1628 return 0; 1629 } 1630 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF"); 1631 1632 /** 1633 * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel 1634 * address space. Same restrictions as for vmap and friends apply. 1635 * @dmabuf: [in] buffer to vmap 1636 * @map: [out] returns the vmap pointer 1637 * 1638 * Unlocked version of dma_buf_vmap() 1639 * 1640 * Returns 0 on success, or a negative errno code otherwise. 1641 */ 1642 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) 1643 { 1644 int ret; 1645 1646 iosys_map_clear(map); 1647 1648 if (WARN_ON(!dmabuf)) 1649 return -EINVAL; 1650 1651 dma_resv_lock(dmabuf->resv, NULL); 1652 ret = dma_buf_vmap(dmabuf, map); 1653 dma_resv_unlock(dmabuf->resv); 1654 1655 return ret; 1656 } 1657 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF"); 1658 1659 /** 1660 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. 1661 * @dmabuf: [in] buffer to vunmap 1662 * @map: [in] vmap pointer to vunmap 1663 */ 1664 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) 1665 { 1666 if (WARN_ON(!dmabuf)) 1667 return; 1668 1669 dma_resv_assert_held(dmabuf->resv); 1670 1671 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); 1672 BUG_ON(dmabuf->vmapping_counter == 0); 1673 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); 1674 1675 if (--dmabuf->vmapping_counter == 0) { 1676 if (dmabuf->ops->vunmap) 1677 dmabuf->ops->vunmap(dmabuf, map); 1678 iosys_map_clear(&dmabuf->vmap_ptr); 1679 } 1680 } 1681 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF"); 1682 1683 /** 1684 * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. 1685 * @dmabuf: [in] buffer to vunmap 1686 * @map: [in] vmap pointer to vunmap 1687 */ 1688 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) 1689 { 1690 if (WARN_ON(!dmabuf)) 1691 return; 1692 1693 dma_resv_lock(dmabuf->resv, NULL); 1694 dma_buf_vunmap(dmabuf, map); 1695 dma_resv_unlock(dmabuf->resv); 1696 } 1697 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF"); 1698 1699 #ifdef CONFIG_DEBUG_FS 1700 static int dma_buf_debug_show(struct seq_file *s, void *unused) 1701 { 1702 struct dma_buf *buf_obj; 1703 struct dma_buf_attachment *attach_obj; 1704 int count = 0, attach_count; 1705 size_t size = 0; 1706 int ret; 1707 1708 ret = mutex_lock_interruptible(&dmabuf_list_mutex); 1709 1710 if (ret) 1711 return ret; 1712 1713 seq_puts(s, "\nDma-buf Objects:\n"); 1714 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", 1715 "size", "flags", "mode", "count", "ino"); 1716 1717 list_for_each_entry(buf_obj, &dmabuf_list, list_node) { 1718 1719 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); 1720 if (ret) 1721 goto error_unlock; 1722 1723 1724 spin_lock(&buf_obj->name_lock); 1725 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", 1726 buf_obj->size, 1727 buf_obj->file->f_flags, buf_obj->file->f_mode, 1728 file_count(buf_obj->file), 1729 buf_obj->exp_name, 1730 file_inode(buf_obj->file)->i_ino, 1731 buf_obj->name ?: "<none>"); 1732 spin_unlock(&buf_obj->name_lock); 1733 1734 dma_resv_describe(buf_obj->resv, s); 1735 1736 seq_puts(s, "\tAttached Devices:\n"); 1737 attach_count = 0; 1738 1739 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 1740 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); 1741 attach_count++; 1742 } 1743 dma_resv_unlock(buf_obj->resv); 1744 1745 seq_printf(s, "Total %d devices attached\n\n", 1746 attach_count); 1747 1748 count++; 1749 size += buf_obj->size; 1750 } 1751 1752 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); 1753 1754 mutex_unlock(&dmabuf_list_mutex); 1755 return 0; 1756 1757 error_unlock: 1758 mutex_unlock(&dmabuf_list_mutex); 1759 return ret; 1760 } 1761 1762 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); 1763 1764 static struct dentry *dma_buf_debugfs_dir; 1765 1766 static int dma_buf_init_debugfs(void) 1767 { 1768 struct dentry *d; 1769 int err = 0; 1770 1771 d = debugfs_create_dir("dma_buf", NULL); 1772 if (IS_ERR(d)) 1773 return PTR_ERR(d); 1774 1775 dma_buf_debugfs_dir = d; 1776 1777 d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir, 1778 NULL, &dma_buf_debug_fops); 1779 if (IS_ERR(d)) { 1780 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 1781 debugfs_remove_recursive(dma_buf_debugfs_dir); 1782 dma_buf_debugfs_dir = NULL; 1783 err = PTR_ERR(d); 1784 } 1785 1786 return err; 1787 } 1788 1789 static void dma_buf_uninit_debugfs(void) 1790 { 1791 debugfs_remove_recursive(dma_buf_debugfs_dir); 1792 } 1793 #else 1794 static inline int dma_buf_init_debugfs(void) 1795 { 1796 return 0; 1797 } 1798 static inline void dma_buf_uninit_debugfs(void) 1799 { 1800 } 1801 #endif 1802 1803 static int __init dma_buf_init(void) 1804 { 1805 int ret; 1806 1807 ret = dma_buf_init_sysfs_statistics(); 1808 if (ret) 1809 return ret; 1810 1811 dma_buf_mnt = kern_mount(&dma_buf_fs_type); 1812 if (IS_ERR(dma_buf_mnt)) 1813 return PTR_ERR(dma_buf_mnt); 1814 1815 dma_buf_init_debugfs(); 1816 return 0; 1817 } 1818 subsys_initcall(dma_buf_init); 1819 1820 static void __exit dma_buf_deinit(void) 1821 { 1822 dma_buf_uninit_debugfs(); 1823 kern_unmount(dma_buf_mnt); 1824 dma_buf_uninit_sysfs_statistics(); 1825 } 1826 __exitcall(dma_buf_deinit); 1827