1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <linux/rbtree.h> 32 #include <drm/drmP.h> 33 #include <drm/drm_gem.h> 34 35 #include "drm_internal.h" 36 37 /* 38 * DMA-BUF/GEM Object references and lifetime overview: 39 * 40 * On the export the dma_buf holds a reference to the exporting GEM 41 * object. It takes this reference in handle_to_fd_ioctl, when it 42 * first calls .prime_export and stores the exporting GEM object in 43 * the dma_buf priv. This reference is released when the dma_buf 44 * object goes away in the driver .release function. 45 * 46 * On the import the importing GEM object holds a reference to the 47 * dma_buf (which in turn holds a ref to the exporting GEM object). 48 * It takes that reference in the fd_to_handle ioctl. 49 * It calls dma_buf_get, creates an attachment to it and stores the 50 * attachment in the GEM object. When this attachment is destroyed 51 * when the imported object is destroyed, we remove the attachment 52 * and drop the reference to the dma_buf. 53 * 54 * Thus the chain of references always flows in one direction 55 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 56 * 57 * Self-importing: if userspace is using PRIME as a replacement for flink 58 * then it will get a fd->handle request for a GEM object that it created. 59 * Drivers should detect this situation and return back the gem object 60 * from the dma-buf private. Prime will do this automatically for drivers that 61 * use the drm_gem_prime_{import,export} helpers. 62 */ 63 64 struct drm_prime_member { 65 struct dma_buf *dma_buf; 66 uint32_t handle; 67 68 struct rb_node dmabuf_rb; 69 struct rb_node handle_rb; 70 }; 71 72 struct drm_prime_attachment { 73 struct sg_table *sgt; 74 enum dma_data_direction dir; 75 }; 76 77 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, 78 struct dma_buf *dma_buf, uint32_t handle) 79 { 80 struct drm_prime_member *member; 81 struct rb_node **p, *rb; 82 83 member = kmalloc(sizeof(*member), GFP_KERNEL); 84 if (!member) 85 return -ENOMEM; 86 87 get_dma_buf(dma_buf); 88 member->dma_buf = dma_buf; 89 member->handle = handle; 90 91 rb = NULL; 92 p = &prime_fpriv->dmabufs.rb_node; 93 while (*p) { 94 struct drm_prime_member *pos; 95 96 rb = *p; 97 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 98 if (dma_buf > pos->dma_buf) 99 p = &rb->rb_right; 100 else 101 p = &rb->rb_left; 102 } 103 rb_link_node(&member->dmabuf_rb, rb, p); 104 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); 105 106 rb = NULL; 107 p = &prime_fpriv->handles.rb_node; 108 while (*p) { 109 struct drm_prime_member *pos; 110 111 rb = *p; 112 pos = rb_entry(rb, struct drm_prime_member, handle_rb); 113 if (handle > pos->handle) 114 p = &rb->rb_right; 115 else 116 p = &rb->rb_left; 117 } 118 rb_link_node(&member->handle_rb, rb, p); 119 rb_insert_color(&member->handle_rb, &prime_fpriv->handles); 120 121 return 0; 122 } 123 124 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 125 uint32_t handle) 126 { 127 struct rb_node *rb; 128 129 rb = prime_fpriv->handles.rb_node; 130 while (rb) { 131 struct drm_prime_member *member; 132 133 member = rb_entry(rb, struct drm_prime_member, handle_rb); 134 if (member->handle == handle) 135 return member->dma_buf; 136 else if (member->handle < handle) 137 rb = rb->rb_right; 138 else 139 rb = rb->rb_left; 140 } 141 142 return NULL; 143 } 144 145 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 146 struct dma_buf *dma_buf, 147 uint32_t *handle) 148 { 149 struct rb_node *rb; 150 151 rb = prime_fpriv->dmabufs.rb_node; 152 while (rb) { 153 struct drm_prime_member *member; 154 155 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 156 if (member->dma_buf == dma_buf) { 157 *handle = member->handle; 158 return 0; 159 } else if (member->dma_buf < dma_buf) { 160 rb = rb->rb_right; 161 } else { 162 rb = rb->rb_left; 163 } 164 } 165 166 return -ENOENT; 167 } 168 169 static int drm_gem_map_attach(struct dma_buf *dma_buf, 170 struct device *target_dev, 171 struct dma_buf_attachment *attach) 172 { 173 struct drm_prime_attachment *prime_attach; 174 struct drm_gem_object *obj = dma_buf->priv; 175 struct drm_device *dev = obj->dev; 176 177 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 178 if (!prime_attach) 179 return -ENOMEM; 180 181 prime_attach->dir = DMA_NONE; 182 attach->priv = prime_attach; 183 184 if (!dev->driver->gem_prime_pin) 185 return 0; 186 187 return dev->driver->gem_prime_pin(obj); 188 } 189 190 static void drm_gem_map_detach(struct dma_buf *dma_buf, 191 struct dma_buf_attachment *attach) 192 { 193 struct drm_prime_attachment *prime_attach = attach->priv; 194 struct drm_gem_object *obj = dma_buf->priv; 195 struct drm_device *dev = obj->dev; 196 struct sg_table *sgt; 197 198 if (dev->driver->gem_prime_unpin) 199 dev->driver->gem_prime_unpin(obj); 200 201 if (!prime_attach) 202 return; 203 204 sgt = prime_attach->sgt; 205 if (sgt) { 206 if (prime_attach->dir != DMA_NONE) 207 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 208 prime_attach->dir); 209 sg_free_table(sgt); 210 } 211 212 kfree(sgt); 213 kfree(prime_attach); 214 attach->priv = NULL; 215 } 216 217 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 218 struct dma_buf *dma_buf) 219 { 220 struct rb_node *rb; 221 222 rb = prime_fpriv->dmabufs.rb_node; 223 while (rb) { 224 struct drm_prime_member *member; 225 226 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 227 if (member->dma_buf == dma_buf) { 228 rb_erase(&member->handle_rb, &prime_fpriv->handles); 229 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); 230 231 dma_buf_put(dma_buf); 232 kfree(member); 233 return; 234 } else if (member->dma_buf < dma_buf) { 235 rb = rb->rb_right; 236 } else { 237 rb = rb->rb_left; 238 } 239 } 240 } 241 242 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 243 enum dma_data_direction dir) 244 { 245 struct drm_prime_attachment *prime_attach = attach->priv; 246 struct drm_gem_object *obj = attach->dmabuf->priv; 247 struct sg_table *sgt; 248 249 if (WARN_ON(dir == DMA_NONE || !prime_attach)) 250 return ERR_PTR(-EINVAL); 251 252 /* return the cached mapping when possible */ 253 if (prime_attach->dir == dir) 254 return prime_attach->sgt; 255 256 /* 257 * two mappings with different directions for the same attachment are 258 * not allowed 259 */ 260 if (WARN_ON(prime_attach->dir != DMA_NONE)) 261 return ERR_PTR(-EBUSY); 262 263 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 264 265 if (!IS_ERR(sgt)) { 266 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { 267 sg_free_table(sgt); 268 kfree(sgt); 269 sgt = ERR_PTR(-ENOMEM); 270 } else { 271 prime_attach->sgt = sgt; 272 prime_attach->dir = dir; 273 } 274 } 275 276 return sgt; 277 } 278 279 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 280 struct sg_table *sgt, 281 enum dma_data_direction dir) 282 { 283 /* nothing to be done here */ 284 } 285 286 /** 287 * drm_gem_dmabuf_export - dma_buf export implementation for GEM 288 * @dev: parent device for the exported dmabuf 289 * @exp_info: the export information used by dma_buf_export() 290 * 291 * This wraps dma_buf_export() for use by generic GEM drivers that are using 292 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take 293 * a reference to the drm_device which is released by drm_gem_dmabuf_release(). 294 * 295 * Returns the new dmabuf. 296 */ 297 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, 298 struct dma_buf_export_info *exp_info) 299 { 300 struct dma_buf *dma_buf; 301 302 dma_buf = dma_buf_export(exp_info); 303 if (!IS_ERR(dma_buf)) 304 drm_dev_ref(dev); 305 306 return dma_buf; 307 } 308 EXPORT_SYMBOL(drm_gem_dmabuf_export); 309 310 /** 311 * drm_gem_dmabuf_release - dma_buf release implementation for GEM 312 * @dma_buf: buffer to be released 313 * 314 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers 315 * must use this in their dma_buf ops structure as the release callback. 316 * drm_gem_dmabuf_release() should be used in conjunction with 317 * drm_gem_dmabuf_export(). 318 */ 319 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 320 { 321 struct drm_gem_object *obj = dma_buf->priv; 322 struct drm_device *dev = obj->dev; 323 324 /* drop the reference on the export fd holds */ 325 drm_gem_object_unreference_unlocked(obj); 326 327 drm_dev_unref(dev); 328 } 329 EXPORT_SYMBOL(drm_gem_dmabuf_release); 330 331 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 332 { 333 struct drm_gem_object *obj = dma_buf->priv; 334 struct drm_device *dev = obj->dev; 335 336 return dev->driver->gem_prime_vmap(obj); 337 } 338 339 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 340 { 341 struct drm_gem_object *obj = dma_buf->priv; 342 struct drm_device *dev = obj->dev; 343 344 dev->driver->gem_prime_vunmap(obj, vaddr); 345 } 346 347 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 348 unsigned long page_num) 349 { 350 return NULL; 351 } 352 353 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 354 unsigned long page_num, void *addr) 355 { 356 357 } 358 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, 359 unsigned long page_num) 360 { 361 return NULL; 362 } 363 364 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 365 unsigned long page_num, void *addr) 366 { 367 368 } 369 370 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, 371 struct vm_area_struct *vma) 372 { 373 struct drm_gem_object *obj = dma_buf->priv; 374 struct drm_device *dev = obj->dev; 375 376 if (!dev->driver->gem_prime_mmap) 377 return -ENOSYS; 378 379 return dev->driver->gem_prime_mmap(obj, vma); 380 } 381 382 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 383 .attach = drm_gem_map_attach, 384 .detach = drm_gem_map_detach, 385 .map_dma_buf = drm_gem_map_dma_buf, 386 .unmap_dma_buf = drm_gem_unmap_dma_buf, 387 .release = drm_gem_dmabuf_release, 388 .kmap = drm_gem_dmabuf_kmap, 389 .kmap_atomic = drm_gem_dmabuf_kmap_atomic, 390 .kunmap = drm_gem_dmabuf_kunmap, 391 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, 392 .mmap = drm_gem_dmabuf_mmap, 393 .vmap = drm_gem_dmabuf_vmap, 394 .vunmap = drm_gem_dmabuf_vunmap, 395 }; 396 397 /** 398 * DOC: PRIME Helpers 399 * 400 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 401 * simpler APIs by using the helper functions @drm_gem_prime_export and 402 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 403 * six lower-level driver callbacks: 404 * 405 * Export callbacks: 406 * 407 * * @gem_prime_pin (optional): prepare a GEM object for exporting 408 * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 409 * * @gem_prime_vmap: vmap a buffer exported by your driver 410 * * @gem_prime_vunmap: vunmap a buffer exported by your driver 411 * * @gem_prime_mmap (optional): mmap a buffer exported by your driver 412 * 413 * Import callback: 414 * 415 * * @gem_prime_import_sg_table (import): produce a GEM object from another 416 * driver's scatter/gather table 417 */ 418 419 /** 420 * drm_gem_prime_export - helper library implementation of the export callback 421 * @dev: drm_device to export from 422 * @obj: GEM object to export 423 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 424 * 425 * This is the implementation of the gem_prime_export functions for GEM drivers 426 * using the PRIME helpers. 427 */ 428 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 429 struct drm_gem_object *obj, 430 int flags) 431 { 432 struct dma_buf_export_info exp_info = { 433 .exp_name = KBUILD_MODNAME, /* white lie for debug */ 434 .owner = dev->driver->fops->owner, 435 .ops = &drm_gem_prime_dmabuf_ops, 436 .size = obj->size, 437 .flags = flags, 438 .priv = obj, 439 }; 440 441 if (dev->driver->gem_prime_res_obj) 442 exp_info.resv = dev->driver->gem_prime_res_obj(obj); 443 444 return drm_gem_dmabuf_export(dev, &exp_info); 445 } 446 EXPORT_SYMBOL(drm_gem_prime_export); 447 448 static struct dma_buf *export_and_register_object(struct drm_device *dev, 449 struct drm_gem_object *obj, 450 uint32_t flags) 451 { 452 struct dma_buf *dmabuf; 453 454 /* prevent races with concurrent gem_close. */ 455 if (obj->handle_count == 0) { 456 dmabuf = ERR_PTR(-ENOENT); 457 return dmabuf; 458 } 459 460 dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 461 if (IS_ERR(dmabuf)) { 462 /* normally the created dma-buf takes ownership of the ref, 463 * but if that fails then drop the ref 464 */ 465 return dmabuf; 466 } 467 468 /* 469 * Note that callers do not need to clean up the export cache 470 * since the check for obj->handle_count guarantees that someone 471 * will clean it up. 472 */ 473 obj->dma_buf = dmabuf; 474 get_dma_buf(obj->dma_buf); 475 /* Grab a new ref since the callers is now used by the dma-buf */ 476 drm_gem_object_reference(obj); 477 478 return dmabuf; 479 } 480 481 /** 482 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers 483 * @dev: dev to export the buffer from 484 * @file_priv: drm file-private structure 485 * @handle: buffer handle to export 486 * @flags: flags like DRM_CLOEXEC 487 * @prime_fd: pointer to storage for the fd id of the create dma-buf 488 * 489 * This is the PRIME export function which must be used mandatorily by GEM 490 * drivers to ensure correct lifetime management of the underlying GEM object. 491 * The actual exporting from GEM object to a dma-buf is done through the 492 * gem_prime_export driver callback. 493 */ 494 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 495 struct drm_file *file_priv, uint32_t handle, 496 uint32_t flags, 497 int *prime_fd) 498 { 499 struct drm_gem_object *obj; 500 int ret = 0; 501 struct dma_buf *dmabuf; 502 503 mutex_lock(&file_priv->prime.lock); 504 obj = drm_gem_object_lookup(file_priv, handle); 505 if (!obj) { 506 ret = -ENOENT; 507 goto out_unlock; 508 } 509 510 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 511 if (dmabuf) { 512 get_dma_buf(dmabuf); 513 goto out_have_handle; 514 } 515 516 mutex_lock(&dev->object_name_lock); 517 /* re-export the original imported object */ 518 if (obj->import_attach) { 519 dmabuf = obj->import_attach->dmabuf; 520 get_dma_buf(dmabuf); 521 goto out_have_obj; 522 } 523 524 if (obj->dma_buf) { 525 get_dma_buf(obj->dma_buf); 526 dmabuf = obj->dma_buf; 527 goto out_have_obj; 528 } 529 530 dmabuf = export_and_register_object(dev, obj, flags); 531 if (IS_ERR(dmabuf)) { 532 /* normally the created dma-buf takes ownership of the ref, 533 * but if that fails then drop the ref 534 */ 535 ret = PTR_ERR(dmabuf); 536 mutex_unlock(&dev->object_name_lock); 537 goto out; 538 } 539 540 out_have_obj: 541 /* 542 * If we've exported this buffer then cheat and add it to the import list 543 * so we get the correct handle back. We must do this under the 544 * protection of dev->object_name_lock to ensure that a racing gem close 545 * ioctl doesn't miss to remove this buffer handle from the cache. 546 */ 547 ret = drm_prime_add_buf_handle(&file_priv->prime, 548 dmabuf, handle); 549 mutex_unlock(&dev->object_name_lock); 550 if (ret) 551 goto fail_put_dmabuf; 552 553 out_have_handle: 554 ret = dma_buf_fd(dmabuf, flags); 555 /* 556 * We must _not_ remove the buffer from the handle cache since the newly 557 * created dma buf is already linked in the global obj->dma_buf pointer, 558 * and that is invariant as long as a userspace gem handle exists. 559 * Closing the handle will clean out the cache anyway, so we don't leak. 560 */ 561 if (ret < 0) { 562 goto fail_put_dmabuf; 563 } else { 564 *prime_fd = ret; 565 ret = 0; 566 } 567 568 goto out; 569 570 fail_put_dmabuf: 571 dma_buf_put(dmabuf); 572 out: 573 drm_gem_object_unreference_unlocked(obj); 574 out_unlock: 575 mutex_unlock(&file_priv->prime.lock); 576 577 return ret; 578 } 579 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 580 581 /** 582 * drm_gem_prime_import - helper library implementation of the import callback 583 * @dev: drm_device to import into 584 * @dma_buf: dma-buf object to import 585 * 586 * This is the implementation of the gem_prime_import functions for GEM drivers 587 * using the PRIME helpers. 588 */ 589 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 590 struct dma_buf *dma_buf) 591 { 592 struct dma_buf_attachment *attach; 593 struct sg_table *sgt; 594 struct drm_gem_object *obj; 595 int ret; 596 597 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 598 obj = dma_buf->priv; 599 if (obj->dev == dev) { 600 /* 601 * Importing dmabuf exported from out own gem increases 602 * refcount on gem itself instead of f_count of dmabuf. 603 */ 604 drm_gem_object_reference(obj); 605 return obj; 606 } 607 } 608 609 if (!dev->driver->gem_prime_import_sg_table) 610 return ERR_PTR(-EINVAL); 611 612 attach = dma_buf_attach(dma_buf, dev->dev); 613 if (IS_ERR(attach)) 614 return ERR_CAST(attach); 615 616 get_dma_buf(dma_buf); 617 618 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 619 if (IS_ERR(sgt)) { 620 ret = PTR_ERR(sgt); 621 goto fail_detach; 622 } 623 624 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); 625 if (IS_ERR(obj)) { 626 ret = PTR_ERR(obj); 627 goto fail_unmap; 628 } 629 630 obj->import_attach = attach; 631 632 return obj; 633 634 fail_unmap: 635 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 636 fail_detach: 637 dma_buf_detach(dma_buf, attach); 638 dma_buf_put(dma_buf); 639 640 return ERR_PTR(ret); 641 } 642 EXPORT_SYMBOL(drm_gem_prime_import); 643 644 /** 645 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers 646 * @dev: dev to export the buffer from 647 * @file_priv: drm file-private structure 648 * @prime_fd: fd id of the dma-buf which should be imported 649 * @handle: pointer to storage for the handle of the imported buffer object 650 * 651 * This is the PRIME import function which must be used mandatorily by GEM 652 * drivers to ensure correct lifetime management of the underlying GEM object. 653 * The actual importing of GEM object from the dma-buf is done through the 654 * gem_import_export driver callback. 655 */ 656 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 657 struct drm_file *file_priv, int prime_fd, 658 uint32_t *handle) 659 { 660 struct dma_buf *dma_buf; 661 struct drm_gem_object *obj; 662 int ret; 663 664 dma_buf = dma_buf_get(prime_fd); 665 if (IS_ERR(dma_buf)) 666 return PTR_ERR(dma_buf); 667 668 mutex_lock(&file_priv->prime.lock); 669 670 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 671 dma_buf, handle); 672 if (ret == 0) 673 goto out_put; 674 675 /* never seen this one, need to import */ 676 mutex_lock(&dev->object_name_lock); 677 obj = dev->driver->gem_prime_import(dev, dma_buf); 678 if (IS_ERR(obj)) { 679 ret = PTR_ERR(obj); 680 goto out_unlock; 681 } 682 683 if (obj->dma_buf) { 684 WARN_ON(obj->dma_buf != dma_buf); 685 } else { 686 obj->dma_buf = dma_buf; 687 get_dma_buf(dma_buf); 688 } 689 690 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ 691 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 692 drm_gem_object_unreference_unlocked(obj); 693 if (ret) 694 goto out_put; 695 696 ret = drm_prime_add_buf_handle(&file_priv->prime, 697 dma_buf, *handle); 698 mutex_unlock(&file_priv->prime.lock); 699 if (ret) 700 goto fail; 701 702 dma_buf_put(dma_buf); 703 704 return 0; 705 706 fail: 707 /* hmm, if driver attached, we are relying on the free-object path 708 * to detach.. which seems ok.. 709 */ 710 drm_gem_handle_delete(file_priv, *handle); 711 dma_buf_put(dma_buf); 712 return ret; 713 714 out_unlock: 715 mutex_unlock(&dev->object_name_lock); 716 out_put: 717 mutex_unlock(&file_priv->prime.lock); 718 dma_buf_put(dma_buf); 719 return ret; 720 } 721 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 722 723 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 724 struct drm_file *file_priv) 725 { 726 struct drm_prime_handle *args = data; 727 728 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 729 return -EINVAL; 730 731 if (!dev->driver->prime_handle_to_fd) 732 return -ENOSYS; 733 734 /* check flags are valid */ 735 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) 736 return -EINVAL; 737 738 return dev->driver->prime_handle_to_fd(dev, file_priv, 739 args->handle, args->flags, &args->fd); 740 } 741 742 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 743 struct drm_file *file_priv) 744 { 745 struct drm_prime_handle *args = data; 746 747 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 748 return -EINVAL; 749 750 if (!dev->driver->prime_fd_to_handle) 751 return -ENOSYS; 752 753 return dev->driver->prime_fd_to_handle(dev, file_priv, 754 args->fd, &args->handle); 755 } 756 757 /** 758 * drm_prime_pages_to_sg - converts a page array into an sg list 759 * @pages: pointer to the array of page pointers to convert 760 * @nr_pages: length of the page vector 761 * 762 * This helper creates an sg table object from a set of pages 763 * the driver is responsible for mapping the pages into the 764 * importers address space for use with dma_buf itself. 765 */ 766 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) 767 { 768 struct sg_table *sg = NULL; 769 int ret; 770 771 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 772 if (!sg) { 773 ret = -ENOMEM; 774 goto out; 775 } 776 777 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 778 nr_pages << PAGE_SHIFT, GFP_KERNEL); 779 if (ret) 780 goto out; 781 782 return sg; 783 out: 784 kfree(sg); 785 return ERR_PTR(ret); 786 } 787 EXPORT_SYMBOL(drm_prime_pages_to_sg); 788 789 /** 790 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array 791 * @sgt: scatter-gather table to convert 792 * @pages: array of page pointers to store the page array in 793 * @addrs: optional array to store the dma bus address of each page 794 * @max_pages: size of both the passed-in arrays 795 * 796 * Exports an sg table into an array of pages and addresses. This is currently 797 * required by the TTM driver in order to do correct fault handling. 798 */ 799 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 800 dma_addr_t *addrs, int max_pages) 801 { 802 unsigned count; 803 struct scatterlist *sg; 804 struct page *page; 805 u32 len; 806 int pg_index; 807 dma_addr_t addr; 808 809 pg_index = 0; 810 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 811 len = sg->length; 812 page = sg_page(sg); 813 addr = sg_dma_address(sg); 814 815 while (len > 0) { 816 if (WARN_ON(pg_index >= max_pages)) 817 return -1; 818 pages[pg_index] = page; 819 if (addrs) 820 addrs[pg_index] = addr; 821 822 page++; 823 addr += PAGE_SIZE; 824 len -= PAGE_SIZE; 825 pg_index++; 826 } 827 } 828 return 0; 829 } 830 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 831 832 /** 833 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object 834 * @obj: GEM object which was created from a dma-buf 835 * @sg: the sg-table which was pinned at import time 836 * 837 * This is the cleanup functions which GEM drivers need to call when they use 838 * @drm_gem_prime_import to import dma-bufs. 839 */ 840 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 841 { 842 struct dma_buf_attachment *attach; 843 struct dma_buf *dma_buf; 844 attach = obj->import_attach; 845 if (sg) 846 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 847 dma_buf = attach->dmabuf; 848 dma_buf_detach(attach->dmabuf, attach); 849 /* remove the reference */ 850 dma_buf_put(dma_buf); 851 } 852 EXPORT_SYMBOL(drm_prime_gem_destroy); 853 854 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 855 { 856 mutex_init(&prime_fpriv->lock); 857 prime_fpriv->dmabufs = RB_ROOT; 858 prime_fpriv->handles = RB_ROOT; 859 } 860 861 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 862 { 863 /* by now drm_gem_release should've made sure the list is empty */ 864 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); 865 } 866