1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <linux/rbtree.h> 32 #include <linux/module.h> 33 34 #include <drm/drm.h> 35 #include <drm/drm_drv.h> 36 #include <drm/drm_file.h> 37 #include <drm/drm_framebuffer.h> 38 #include <drm/drm_gem.h> 39 #include <drm/drm_prime.h> 40 41 #include "drm_internal.h" 42 43 MODULE_IMPORT_NS(DMA_BUF); 44 45 /** 46 * DOC: overview and lifetime rules 47 * 48 * Similar to GEM global names, PRIME file descriptors are also used to share 49 * buffer objects across processes. They offer additional security: as file 50 * descriptors must be explicitly sent over UNIX domain sockets to be shared 51 * between applications, they can't be guessed like the globally unique GEM 52 * names. 53 * 54 * Drivers that support the PRIME API implement the drm_gem_object_funcs.export 55 * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for 56 * drivers are all individually exported for drivers which need to overwrite 57 * or reimplement some of them. 58 * 59 * Reference Counting for GEM Drivers 60 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 * 62 * On the export the &dma_buf holds a reference to the exported buffer object, 63 * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD 64 * IOCTL, when it first calls &drm_gem_object_funcs.export 65 * and stores the exporting GEM object in the &dma_buf.priv field. This 66 * reference needs to be released when the final reference to the &dma_buf 67 * itself is dropped and its &dma_buf_ops.release function is called. For 68 * GEM-based drivers, the &dma_buf should be exported using 69 * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release(). 70 * 71 * Thus the chain of references always flows in one direction, avoiding loops: 72 * importing GEM object -> dma-buf -> exported GEM bo. A further complication 73 * are the lookup caches for import and export. These are required to guarantee 74 * that any given object will always have only one unique userspace handle. This 75 * is required to allow userspace to detect duplicated imports, since some GEM 76 * drivers do fail command submissions if a given buffer object is listed more 77 * than once. These import and export caches in &drm_prime_file_private only 78 * retain a weak reference, which is cleaned up when the corresponding object is 79 * released. 80 * 81 * Self-importing: If userspace is using PRIME as a replacement for flink then 82 * it will get a fd->handle request for a GEM object that it created. Drivers 83 * should detect this situation and return back the underlying object from the 84 * dma-buf private. For GEM based drivers this is handled in 85 * drm_gem_prime_import() already. 86 */ 87 88 struct drm_prime_member { 89 struct dma_buf *dma_buf; 90 uint32_t handle; 91 92 struct rb_node dmabuf_rb; 93 struct rb_node handle_rb; 94 }; 95 96 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, 97 struct dma_buf *dma_buf, uint32_t handle) 98 { 99 struct drm_prime_member *member; 100 struct rb_node **p, *rb; 101 102 member = kmalloc(sizeof(*member), GFP_KERNEL); 103 if (!member) 104 return -ENOMEM; 105 106 get_dma_buf(dma_buf); 107 member->dma_buf = dma_buf; 108 member->handle = handle; 109 110 rb = NULL; 111 p = &prime_fpriv->dmabufs.rb_node; 112 while (*p) { 113 struct drm_prime_member *pos; 114 115 rb = *p; 116 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 117 if (dma_buf > pos->dma_buf) 118 p = &rb->rb_right; 119 else 120 p = &rb->rb_left; 121 } 122 rb_link_node(&member->dmabuf_rb, rb, p); 123 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); 124 125 rb = NULL; 126 p = &prime_fpriv->handles.rb_node; 127 while (*p) { 128 struct drm_prime_member *pos; 129 130 rb = *p; 131 pos = rb_entry(rb, struct drm_prime_member, handle_rb); 132 if (handle > pos->handle) 133 p = &rb->rb_right; 134 else 135 p = &rb->rb_left; 136 } 137 rb_link_node(&member->handle_rb, rb, p); 138 rb_insert_color(&member->handle_rb, &prime_fpriv->handles); 139 140 return 0; 141 } 142 143 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 144 uint32_t handle) 145 { 146 struct rb_node *rb; 147 148 rb = prime_fpriv->handles.rb_node; 149 while (rb) { 150 struct drm_prime_member *member; 151 152 member = rb_entry(rb, struct drm_prime_member, handle_rb); 153 if (member->handle == handle) 154 return member->dma_buf; 155 else if (member->handle < handle) 156 rb = rb->rb_right; 157 else 158 rb = rb->rb_left; 159 } 160 161 return NULL; 162 } 163 164 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 165 struct dma_buf *dma_buf, 166 uint32_t *handle) 167 { 168 struct rb_node *rb; 169 170 rb = prime_fpriv->dmabufs.rb_node; 171 while (rb) { 172 struct drm_prime_member *member; 173 174 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 175 if (member->dma_buf == dma_buf) { 176 *handle = member->handle; 177 return 0; 178 } else if (member->dma_buf < dma_buf) { 179 rb = rb->rb_right; 180 } else { 181 rb = rb->rb_left; 182 } 183 } 184 185 return -ENOENT; 186 } 187 188 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, 189 uint32_t handle) 190 { 191 struct rb_node *rb; 192 193 mutex_lock(&prime_fpriv->lock); 194 195 rb = prime_fpriv->handles.rb_node; 196 while (rb) { 197 struct drm_prime_member *member; 198 199 member = rb_entry(rb, struct drm_prime_member, handle_rb); 200 if (member->handle == handle) { 201 rb_erase(&member->handle_rb, &prime_fpriv->handles); 202 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); 203 204 dma_buf_put(member->dma_buf); 205 kfree(member); 206 break; 207 } else if (member->handle < handle) { 208 rb = rb->rb_right; 209 } else { 210 rb = rb->rb_left; 211 } 212 } 213 214 mutex_unlock(&prime_fpriv->lock); 215 } 216 217 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 218 { 219 mutex_init(&prime_fpriv->lock); 220 prime_fpriv->dmabufs = RB_ROOT; 221 prime_fpriv->handles = RB_ROOT; 222 } 223 224 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 225 { 226 /* by now drm_gem_release should've made sure the list is empty */ 227 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); 228 } 229 230 /** 231 * drm_gem_dmabuf_export - &dma_buf export implementation for GEM 232 * @dev: parent device for the exported dmabuf 233 * @exp_info: the export information used by dma_buf_export() 234 * 235 * This wraps dma_buf_export() for use by generic GEM drivers that are using 236 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take 237 * a reference to the &drm_device and the exported &drm_gem_object (stored in 238 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). 239 * 240 * Returns the new dmabuf. 241 */ 242 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, 243 struct dma_buf_export_info *exp_info) 244 { 245 struct drm_gem_object *obj = exp_info->priv; 246 struct dma_buf *dma_buf; 247 248 dma_buf = dma_buf_export(exp_info); 249 if (IS_ERR(dma_buf)) 250 return dma_buf; 251 252 drm_dev_get(dev); 253 drm_gem_object_get(obj); 254 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping; 255 256 return dma_buf; 257 } 258 EXPORT_SYMBOL(drm_gem_dmabuf_export); 259 260 /** 261 * drm_gem_dmabuf_release - &dma_buf release implementation for GEM 262 * @dma_buf: buffer to be released 263 * 264 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers 265 * must use this in their &dma_buf_ops structure as the release callback. 266 * drm_gem_dmabuf_release() should be used in conjunction with 267 * drm_gem_dmabuf_export(). 268 */ 269 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 270 { 271 struct drm_gem_object *obj = dma_buf->priv; 272 struct drm_device *dev = obj->dev; 273 274 /* drop the reference on the export fd holds */ 275 drm_gem_object_put(obj); 276 277 drm_dev_put(dev); 278 } 279 EXPORT_SYMBOL(drm_gem_dmabuf_release); 280 281 /** 282 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers 283 * @dev: drm_device to import into 284 * @file_priv: drm file-private structure 285 * @prime_fd: fd id of the dma-buf which should be imported 286 * @handle: pointer to storage for the handle of the imported buffer object 287 * 288 * This is the PRIME import function which must be used mandatorily by GEM 289 * drivers to ensure correct lifetime management of the underlying GEM object. 290 * The actual importing of GEM object from the dma-buf is done through the 291 * &drm_driver.gem_prime_import driver callback. 292 * 293 * Returns 0 on success or a negative error code on failure. 294 */ 295 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 296 struct drm_file *file_priv, int prime_fd, 297 uint32_t *handle) 298 { 299 struct dma_buf *dma_buf; 300 struct drm_gem_object *obj; 301 int ret; 302 303 dma_buf = dma_buf_get(prime_fd); 304 if (IS_ERR(dma_buf)) 305 return PTR_ERR(dma_buf); 306 307 mutex_lock(&file_priv->prime.lock); 308 309 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 310 dma_buf, handle); 311 if (ret == 0) 312 goto out_put; 313 314 /* never seen this one, need to import */ 315 mutex_lock(&dev->object_name_lock); 316 if (dev->driver->gem_prime_import) 317 obj = dev->driver->gem_prime_import(dev, dma_buf); 318 else 319 obj = drm_gem_prime_import(dev, dma_buf); 320 if (IS_ERR(obj)) { 321 ret = PTR_ERR(obj); 322 goto out_unlock; 323 } 324 325 if (obj->dma_buf) { 326 WARN_ON(obj->dma_buf != dma_buf); 327 } else { 328 obj->dma_buf = dma_buf; 329 get_dma_buf(dma_buf); 330 } 331 332 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ 333 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 334 drm_gem_object_put(obj); 335 if (ret) 336 goto out_put; 337 338 ret = drm_prime_add_buf_handle(&file_priv->prime, 339 dma_buf, *handle); 340 mutex_unlock(&file_priv->prime.lock); 341 if (ret) 342 goto fail; 343 344 dma_buf_put(dma_buf); 345 346 return 0; 347 348 fail: 349 /* hmm, if driver attached, we are relying on the free-object path 350 * to detach.. which seems ok.. 351 */ 352 drm_gem_handle_delete(file_priv, *handle); 353 dma_buf_put(dma_buf); 354 return ret; 355 356 out_unlock: 357 mutex_unlock(&dev->object_name_lock); 358 out_put: 359 mutex_unlock(&file_priv->prime.lock); 360 dma_buf_put(dma_buf); 361 return ret; 362 } 363 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 364 365 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 366 struct drm_file *file_priv) 367 { 368 struct drm_prime_handle *args = data; 369 370 if (dev->driver->prime_fd_to_handle) { 371 return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, 372 &args->handle); 373 } 374 375 return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); 376 } 377 378 static struct dma_buf *export_and_register_object(struct drm_device *dev, 379 struct drm_gem_object *obj, 380 uint32_t flags) 381 { 382 struct dma_buf *dmabuf; 383 384 /* prevent races with concurrent gem_close. */ 385 if (obj->handle_count == 0) { 386 dmabuf = ERR_PTR(-ENOENT); 387 return dmabuf; 388 } 389 390 if (obj->funcs && obj->funcs->export) 391 dmabuf = obj->funcs->export(obj, flags); 392 else 393 dmabuf = drm_gem_prime_export(obj, flags); 394 if (IS_ERR(dmabuf)) { 395 /* normally the created dma-buf takes ownership of the ref, 396 * but if that fails then drop the ref 397 */ 398 return dmabuf; 399 } 400 401 /* 402 * Note that callers do not need to clean up the export cache 403 * since the check for obj->handle_count guarantees that someone 404 * will clean it up. 405 */ 406 obj->dma_buf = dmabuf; 407 get_dma_buf(obj->dma_buf); 408 409 return dmabuf; 410 } 411 412 /** 413 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers 414 * @dev: dev to export the buffer from 415 * @file_priv: drm file-private structure 416 * @handle: buffer handle to export 417 * @flags: flags like DRM_CLOEXEC 418 * @prime_fd: pointer to storage for the fd id of the create dma-buf 419 * 420 * This is the PRIME export function which must be used mandatorily by GEM 421 * drivers to ensure correct lifetime management of the underlying GEM object. 422 * The actual exporting from GEM object to a dma-buf is done through the 423 * &drm_gem_object_funcs.export callback. 424 */ 425 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 426 struct drm_file *file_priv, uint32_t handle, 427 uint32_t flags, 428 int *prime_fd) 429 { 430 struct drm_gem_object *obj; 431 int ret = 0; 432 struct dma_buf *dmabuf; 433 434 mutex_lock(&file_priv->prime.lock); 435 obj = drm_gem_object_lookup(file_priv, handle); 436 if (!obj) { 437 ret = -ENOENT; 438 goto out_unlock; 439 } 440 441 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 442 if (dmabuf) { 443 get_dma_buf(dmabuf); 444 goto out_have_handle; 445 } 446 447 mutex_lock(&dev->object_name_lock); 448 /* re-export the original imported object */ 449 if (obj->import_attach) { 450 dmabuf = obj->import_attach->dmabuf; 451 get_dma_buf(dmabuf); 452 goto out_have_obj; 453 } 454 455 if (obj->dma_buf) { 456 get_dma_buf(obj->dma_buf); 457 dmabuf = obj->dma_buf; 458 goto out_have_obj; 459 } 460 461 dmabuf = export_and_register_object(dev, obj, flags); 462 if (IS_ERR(dmabuf)) { 463 /* normally the created dma-buf takes ownership of the ref, 464 * but if that fails then drop the ref 465 */ 466 ret = PTR_ERR(dmabuf); 467 mutex_unlock(&dev->object_name_lock); 468 goto out; 469 } 470 471 out_have_obj: 472 /* 473 * If we've exported this buffer then cheat and add it to the import list 474 * so we get the correct handle back. We must do this under the 475 * protection of dev->object_name_lock to ensure that a racing gem close 476 * ioctl doesn't miss to remove this buffer handle from the cache. 477 */ 478 ret = drm_prime_add_buf_handle(&file_priv->prime, 479 dmabuf, handle); 480 mutex_unlock(&dev->object_name_lock); 481 if (ret) 482 goto fail_put_dmabuf; 483 484 out_have_handle: 485 ret = dma_buf_fd(dmabuf, flags); 486 /* 487 * We must _not_ remove the buffer from the handle cache since the newly 488 * created dma buf is already linked in the global obj->dma_buf pointer, 489 * and that is invariant as long as a userspace gem handle exists. 490 * Closing the handle will clean out the cache anyway, so we don't leak. 491 */ 492 if (ret < 0) { 493 goto fail_put_dmabuf; 494 } else { 495 *prime_fd = ret; 496 ret = 0; 497 } 498 499 goto out; 500 501 fail_put_dmabuf: 502 dma_buf_put(dmabuf); 503 out: 504 drm_gem_object_put(obj); 505 out_unlock: 506 mutex_unlock(&file_priv->prime.lock); 507 508 return ret; 509 } 510 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 511 512 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 513 struct drm_file *file_priv) 514 { 515 struct drm_prime_handle *args = data; 516 517 /* check flags are valid */ 518 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) 519 return -EINVAL; 520 521 if (dev->driver->prime_handle_to_fd) { 522 return dev->driver->prime_handle_to_fd(dev, file_priv, 523 args->handle, args->flags, 524 &args->fd); 525 } 526 return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle, 527 args->flags, &args->fd); 528 } 529 530 /** 531 * DOC: PRIME Helpers 532 * 533 * Drivers can implement &drm_gem_object_funcs.export and 534 * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper 535 * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions 536 * implement dma-buf support in terms of some lower-level helpers, which are 537 * again exported for drivers to use individually: 538 * 539 * Exporting buffers 540 * ~~~~~~~~~~~~~~~~~ 541 * 542 * Optional pinning of buffers is handled at dma-buf attach and detach time in 543 * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is 544 * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on 545 * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is 546 * unimplemented, exports into another device are rejected. 547 * 548 * For kernel-internal access there's drm_gem_dmabuf_vmap() and 549 * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by 550 * drm_gem_dmabuf_mmap(). 551 * 552 * Note that these export helpers can only be used if the underlying backing 553 * storage is fully coherent and either permanently pinned, or it is safe to pin 554 * it indefinitely. 555 * 556 * FIXME: The underlying helper functions are named rather inconsistently. 557 * 558 * Importing buffers 559 * ~~~~~~~~~~~~~~~~~ 560 * 561 * Importing dma-bufs using drm_gem_prime_import() relies on 562 * &drm_driver.gem_prime_import_sg_table. 563 * 564 * Note that similarly to the export helpers this permanently pins the 565 * underlying backing storage. Which is ok for scanout, but is not the best 566 * option for sharing lots of buffers for rendering. 567 */ 568 569 /** 570 * drm_gem_map_attach - dma_buf attach implementation for GEM 571 * @dma_buf: buffer to attach device to 572 * @attach: buffer attachment data 573 * 574 * Calls &drm_gem_object_funcs.pin for device specific handling. This can be 575 * used as the &dma_buf_ops.attach callback. Must be used together with 576 * drm_gem_map_detach(). 577 * 578 * Returns 0 on success, negative error code on failure. 579 */ 580 int drm_gem_map_attach(struct dma_buf *dma_buf, 581 struct dma_buf_attachment *attach) 582 { 583 struct drm_gem_object *obj = dma_buf->priv; 584 585 /* 586 * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers 587 * that implement their own ->map_dma_buf() do not. 588 */ 589 if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf && 590 !obj->funcs->get_sg_table) 591 return -ENOSYS; 592 593 return drm_gem_pin(obj); 594 } 595 EXPORT_SYMBOL(drm_gem_map_attach); 596 597 /** 598 * drm_gem_map_detach - dma_buf detach implementation for GEM 599 * @dma_buf: buffer to detach from 600 * @attach: attachment to be detached 601 * 602 * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up 603 * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the 604 * &dma_buf_ops.detach callback. 605 */ 606 void drm_gem_map_detach(struct dma_buf *dma_buf, 607 struct dma_buf_attachment *attach) 608 { 609 struct drm_gem_object *obj = dma_buf->priv; 610 611 drm_gem_unpin(obj); 612 } 613 EXPORT_SYMBOL(drm_gem_map_detach); 614 615 /** 616 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM 617 * @attach: attachment whose scatterlist is to be returned 618 * @dir: direction of DMA transfer 619 * 620 * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This 621 * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together 622 * with drm_gem_unmap_dma_buf(). 623 * 624 * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR 625 * on error. May return -EINTR if it is interrupted by a signal. 626 */ 627 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 628 enum dma_data_direction dir) 629 { 630 struct drm_gem_object *obj = attach->dmabuf->priv; 631 struct sg_table *sgt; 632 int ret; 633 634 if (WARN_ON(dir == DMA_NONE)) 635 return ERR_PTR(-EINVAL); 636 637 if (WARN_ON(!obj->funcs->get_sg_table)) 638 return ERR_PTR(-ENOSYS); 639 640 sgt = obj->funcs->get_sg_table(obj); 641 if (IS_ERR(sgt)) 642 return sgt; 643 644 ret = dma_map_sgtable(attach->dev, sgt, dir, 645 DMA_ATTR_SKIP_CPU_SYNC); 646 if (ret) { 647 sg_free_table(sgt); 648 kfree(sgt); 649 sgt = ERR_PTR(ret); 650 } 651 652 return sgt; 653 } 654 EXPORT_SYMBOL(drm_gem_map_dma_buf); 655 656 /** 657 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM 658 * @attach: attachment to unmap buffer from 659 * @sgt: scatterlist info of the buffer to unmap 660 * @dir: direction of DMA transfer 661 * 662 * This can be used as the &dma_buf_ops.unmap_dma_buf callback. 663 */ 664 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 665 struct sg_table *sgt, 666 enum dma_data_direction dir) 667 { 668 if (!sgt) 669 return; 670 671 dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); 672 sg_free_table(sgt); 673 kfree(sgt); 674 } 675 EXPORT_SYMBOL(drm_gem_unmap_dma_buf); 676 677 /** 678 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM 679 * @dma_buf: buffer to be mapped 680 * @map: the virtual address of the buffer 681 * 682 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap 683 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. 684 * The kernel virtual address is returned in map. 685 * 686 * Returns 0 on success or a negative errno code otherwise. 687 */ 688 int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) 689 { 690 struct drm_gem_object *obj = dma_buf->priv; 691 692 return drm_gem_vmap(obj, map); 693 } 694 EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 695 696 /** 697 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM 698 * @dma_buf: buffer to be unmapped 699 * @map: the virtual address of the buffer 700 * 701 * Releases a kernel virtual mapping. This can be used as the 702 * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. 703 */ 704 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) 705 { 706 struct drm_gem_object *obj = dma_buf->priv; 707 708 drm_gem_vunmap(obj, map); 709 } 710 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 711 712 /** 713 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers 714 * @obj: GEM object 715 * @vma: Virtual address range 716 * 717 * This function sets up a userspace mapping for PRIME exported buffers using 718 * the same codepath that is used for regular GEM buffer mapping on the DRM fd. 719 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is 720 * called to set up the mapping. 721 */ 722 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 723 { 724 struct drm_file *priv; 725 struct file *fil; 726 int ret; 727 728 /* Add the fake offset */ 729 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); 730 731 if (obj->funcs && obj->funcs->mmap) { 732 vma->vm_ops = obj->funcs->vm_ops; 733 734 drm_gem_object_get(obj); 735 ret = obj->funcs->mmap(obj, vma); 736 if (ret) { 737 drm_gem_object_put(obj); 738 return ret; 739 } 740 vma->vm_private_data = obj; 741 return 0; 742 } 743 744 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 745 fil = kzalloc(sizeof(*fil), GFP_KERNEL); 746 if (!priv || !fil) { 747 ret = -ENOMEM; 748 goto out; 749 } 750 751 /* Used by drm_gem_mmap() to lookup the GEM object */ 752 priv->minor = obj->dev->primary; 753 fil->private_data = priv; 754 755 ret = drm_vma_node_allow(&obj->vma_node, priv); 756 if (ret) 757 goto out; 758 759 ret = obj->dev->driver->fops->mmap(fil, vma); 760 761 drm_vma_node_revoke(&obj->vma_node, priv); 762 out: 763 kfree(priv); 764 kfree(fil); 765 766 return ret; 767 } 768 EXPORT_SYMBOL(drm_gem_prime_mmap); 769 770 /** 771 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM 772 * @dma_buf: buffer to be mapped 773 * @vma: virtual address range 774 * 775 * Provides memory mapping for the buffer. This can be used as the 776 * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap(). 777 * 778 * Returns 0 on success or a negative error code on failure. 779 */ 780 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 781 { 782 struct drm_gem_object *obj = dma_buf->priv; 783 784 return drm_gem_prime_mmap(obj, vma); 785 } 786 EXPORT_SYMBOL(drm_gem_dmabuf_mmap); 787 788 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 789 .cache_sgt_mapping = true, 790 .attach = drm_gem_map_attach, 791 .detach = drm_gem_map_detach, 792 .map_dma_buf = drm_gem_map_dma_buf, 793 .unmap_dma_buf = drm_gem_unmap_dma_buf, 794 .release = drm_gem_dmabuf_release, 795 .mmap = drm_gem_dmabuf_mmap, 796 .vmap = drm_gem_dmabuf_vmap, 797 .vunmap = drm_gem_dmabuf_vunmap, 798 }; 799 800 /** 801 * drm_prime_pages_to_sg - converts a page array into an sg list 802 * @dev: DRM device 803 * @pages: pointer to the array of page pointers to convert 804 * @nr_pages: length of the page vector 805 * 806 * This helper creates an sg table object from a set of pages 807 * the driver is responsible for mapping the pages into the 808 * importers address space for use with dma_buf itself. 809 * 810 * This is useful for implementing &drm_gem_object_funcs.get_sg_table. 811 */ 812 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, 813 struct page **pages, unsigned int nr_pages) 814 { 815 struct sg_table *sg; 816 size_t max_segment = 0; 817 int err; 818 819 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 820 if (!sg) 821 return ERR_PTR(-ENOMEM); 822 823 if (dev) 824 max_segment = dma_max_mapping_size(dev->dev); 825 if (max_segment == 0) 826 max_segment = UINT_MAX; 827 err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, 828 (unsigned long)nr_pages << PAGE_SHIFT, 829 max_segment, GFP_KERNEL); 830 if (err) { 831 kfree(sg); 832 sg = ERR_PTR(err); 833 } 834 return sg; 835 } 836 EXPORT_SYMBOL(drm_prime_pages_to_sg); 837 838 /** 839 * drm_prime_get_contiguous_size - returns the contiguous size of the buffer 840 * @sgt: sg_table describing the buffer to check 841 * 842 * This helper calculates the contiguous size in the DMA address space 843 * of the buffer described by the provided sg_table. 844 * 845 * This is useful for implementing 846 * &drm_gem_object_funcs.gem_prime_import_sg_table. 847 */ 848 unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) 849 { 850 dma_addr_t expected = sg_dma_address(sgt->sgl); 851 struct scatterlist *sg; 852 unsigned long size = 0; 853 int i; 854 855 for_each_sgtable_dma_sg(sgt, sg, i) { 856 unsigned int len = sg_dma_len(sg); 857 858 if (!len) 859 break; 860 if (sg_dma_address(sg) != expected) 861 break; 862 expected += len; 863 size += len; 864 } 865 return size; 866 } 867 EXPORT_SYMBOL(drm_prime_get_contiguous_size); 868 869 /** 870 * drm_gem_prime_export - helper library implementation of the export callback 871 * @obj: GEM object to export 872 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 873 * 874 * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers 875 * using the PRIME helpers. It is used as the default in 876 * drm_gem_prime_handle_to_fd(). 877 */ 878 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, 879 int flags) 880 { 881 struct drm_device *dev = obj->dev; 882 struct dma_buf_export_info exp_info = { 883 .exp_name = KBUILD_MODNAME, /* white lie for debug */ 884 .owner = dev->driver->fops->owner, 885 .ops = &drm_gem_prime_dmabuf_ops, 886 .size = obj->size, 887 .flags = flags, 888 .priv = obj, 889 .resv = obj->resv, 890 }; 891 892 return drm_gem_dmabuf_export(dev, &exp_info); 893 } 894 EXPORT_SYMBOL(drm_gem_prime_export); 895 896 /** 897 * drm_gem_prime_import_dev - core implementation of the import callback 898 * @dev: drm_device to import into 899 * @dma_buf: dma-buf object to import 900 * @attach_dev: struct device to dma_buf attach 901 * 902 * This is the core of drm_gem_prime_import(). It's designed to be called by 903 * drivers who want to use a different device structure than &drm_device.dev for 904 * attaching via dma_buf. This function calls 905 * &drm_driver.gem_prime_import_sg_table internally. 906 * 907 * Drivers must arrange to call drm_prime_gem_destroy() from their 908 * &drm_gem_object_funcs.free hook when using this function. 909 */ 910 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, 911 struct dma_buf *dma_buf, 912 struct device *attach_dev) 913 { 914 struct dma_buf_attachment *attach; 915 struct sg_table *sgt; 916 struct drm_gem_object *obj; 917 int ret; 918 919 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 920 obj = dma_buf->priv; 921 if (obj->dev == dev) { 922 /* 923 * Importing dmabuf exported from our own gem increases 924 * refcount on gem itself instead of f_count of dmabuf. 925 */ 926 drm_gem_object_get(obj); 927 return obj; 928 } 929 } 930 931 if (!dev->driver->gem_prime_import_sg_table) 932 return ERR_PTR(-EINVAL); 933 934 attach = dma_buf_attach(dma_buf, attach_dev); 935 if (IS_ERR(attach)) 936 return ERR_CAST(attach); 937 938 get_dma_buf(dma_buf); 939 940 sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); 941 if (IS_ERR(sgt)) { 942 ret = PTR_ERR(sgt); 943 goto fail_detach; 944 } 945 946 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); 947 if (IS_ERR(obj)) { 948 ret = PTR_ERR(obj); 949 goto fail_unmap; 950 } 951 952 obj->import_attach = attach; 953 obj->resv = dma_buf->resv; 954 955 return obj; 956 957 fail_unmap: 958 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); 959 fail_detach: 960 dma_buf_detach(dma_buf, attach); 961 dma_buf_put(dma_buf); 962 963 return ERR_PTR(ret); 964 } 965 EXPORT_SYMBOL(drm_gem_prime_import_dev); 966 967 /** 968 * drm_gem_prime_import - helper library implementation of the import callback 969 * @dev: drm_device to import into 970 * @dma_buf: dma-buf object to import 971 * 972 * This is the implementation of the gem_prime_import functions for GEM drivers 973 * using the PRIME helpers. Drivers can use this as their 974 * &drm_driver.gem_prime_import implementation. It is used as the default 975 * implementation in drm_gem_prime_fd_to_handle(). 976 * 977 * Drivers must arrange to call drm_prime_gem_destroy() from their 978 * &drm_gem_object_funcs.free hook when using this function. 979 */ 980 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 981 struct dma_buf *dma_buf) 982 { 983 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); 984 } 985 EXPORT_SYMBOL(drm_gem_prime_import); 986 987 /** 988 * drm_prime_sg_to_page_array - convert an sg table into a page array 989 * @sgt: scatter-gather table to convert 990 * @pages: array of page pointers to store the pages in 991 * @max_entries: size of the passed-in array 992 * 993 * Exports an sg table into an array of pages. 994 * 995 * This function is deprecated and strongly discouraged to be used. 996 * The page array is only useful for page faults and those can corrupt fields 997 * in the struct page if they are not handled by the exporting driver. 998 */ 999 int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt, 1000 struct page **pages, 1001 int max_entries) 1002 { 1003 struct sg_page_iter page_iter; 1004 struct page **p = pages; 1005 1006 for_each_sgtable_page(sgt, &page_iter, 0) { 1007 if (WARN_ON(p - pages >= max_entries)) 1008 return -1; 1009 *p++ = sg_page_iter_page(&page_iter); 1010 } 1011 return 0; 1012 } 1013 EXPORT_SYMBOL(drm_prime_sg_to_page_array); 1014 1015 /** 1016 * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array 1017 * @sgt: scatter-gather table to convert 1018 * @addrs: array to store the dma bus address of each page 1019 * @max_entries: size of both the passed-in arrays 1020 * 1021 * Exports an sg table into an array of addresses. 1022 * 1023 * Drivers should use this in their &drm_driver.gem_prime_import_sg_table 1024 * implementation. 1025 */ 1026 int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs, 1027 int max_entries) 1028 { 1029 struct sg_dma_page_iter dma_iter; 1030 dma_addr_t *a = addrs; 1031 1032 for_each_sgtable_dma_page(sgt, &dma_iter, 0) { 1033 if (WARN_ON(a - addrs >= max_entries)) 1034 return -1; 1035 *a++ = sg_page_iter_dma_address(&dma_iter); 1036 } 1037 return 0; 1038 } 1039 EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array); 1040 1041 /** 1042 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object 1043 * @obj: GEM object which was created from a dma-buf 1044 * @sg: the sg-table which was pinned at import time 1045 * 1046 * This is the cleanup functions which GEM drivers need to call when they use 1047 * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs. 1048 */ 1049 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 1050 { 1051 struct dma_buf_attachment *attach; 1052 struct dma_buf *dma_buf; 1053 1054 attach = obj->import_attach; 1055 if (sg) 1056 dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL); 1057 dma_buf = attach->dmabuf; 1058 dma_buf_detach(attach->dmabuf, attach); 1059 /* remove the reference */ 1060 dma_buf_put(dma_buf); 1061 } 1062 EXPORT_SYMBOL(drm_prime_gem_destroy); 1063