1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Xen dma-buf functionality for gntdev. 5 * 6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c. 7 * 8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/dma-buf.h> 14 #include <linux/dma-direct.h> 15 #include <linux/slab.h> 16 #include <linux/types.h> 17 #include <linux/uaccess.h> 18 #include <linux/module.h> 19 20 #include <xen/xen.h> 21 #include <xen/grant_table.h> 22 23 #include "gntdev-common.h" 24 #include "gntdev-dmabuf.h" 25 26 MODULE_IMPORT_NS("DMA_BUF"); 27 28 struct gntdev_dmabuf { 29 struct gntdev_dmabuf_priv *priv; 30 struct dma_buf *dmabuf; 31 struct list_head next; 32 int fd; 33 34 union { 35 struct { 36 /* Exported buffers are reference counted. */ 37 struct kref refcount; 38 39 struct gntdev_priv *priv; 40 struct gntdev_grant_map *map; 41 } exp; 42 struct { 43 /* Granted references of the imported buffer. */ 44 grant_ref_t *refs; 45 /* Scatter-gather table of the imported buffer. */ 46 struct sg_table *sgt; 47 /* dma-buf attachment of the imported buffer. */ 48 struct dma_buf_attachment *attach; 49 } imp; 50 } u; 51 52 /* Number of pages this buffer has. */ 53 int nr_pages; 54 /* Pages of this buffer (only for dma-buf export). */ 55 struct page **pages; 56 }; 57 58 struct gntdev_dmabuf_wait_obj { 59 struct list_head next; 60 struct gntdev_dmabuf *gntdev_dmabuf; 61 struct completion completion; 62 }; 63 64 struct gntdev_dmabuf_attachment { 65 struct sg_table *sgt; 66 enum dma_data_direction dir; 67 }; 68 69 struct gntdev_dmabuf_priv { 70 /* List of exported DMA buffers. */ 71 struct list_head exp_list; 72 /* List of wait objects. */ 73 struct list_head exp_wait_list; 74 /* List of imported DMA buffers. */ 75 struct list_head imp_list; 76 /* This is the lock which protects dma_buf_xxx lists. */ 77 struct mutex lock; 78 /* 79 * We reference this file while exporting dma-bufs, so 80 * the grant device context is not destroyed while there are 81 * external users alive. 82 */ 83 struct file *filp; 84 }; 85 86 /* DMA buffer export support. */ 87 88 /* Implementation of wait for exported DMA buffer to be released. */ 89 90 static void dmabuf_exp_release(struct kref *kref); 91 92 static struct gntdev_dmabuf_wait_obj * 93 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv, 94 struct gntdev_dmabuf *gntdev_dmabuf) 95 { 96 struct gntdev_dmabuf_wait_obj *obj; 97 98 obj = kzalloc_obj(*obj); 99 if (!obj) 100 return ERR_PTR(-ENOMEM); 101 102 init_completion(&obj->completion); 103 obj->gntdev_dmabuf = gntdev_dmabuf; 104 105 mutex_lock(&priv->lock); 106 list_add(&obj->next, &priv->exp_wait_list); 107 /* Put our reference and wait for gntdev_dmabuf's release to fire. */ 108 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release); 109 mutex_unlock(&priv->lock); 110 return obj; 111 } 112 113 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv, 114 struct gntdev_dmabuf_wait_obj *obj) 115 { 116 mutex_lock(&priv->lock); 117 list_del(&obj->next); 118 mutex_unlock(&priv->lock); 119 kfree(obj); 120 } 121 122 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj, 123 u32 wait_to_ms) 124 { 125 if (wait_for_completion_timeout(&obj->completion, 126 msecs_to_jiffies(wait_to_ms)) <= 0) 127 return -ETIMEDOUT; 128 129 return 0; 130 } 131 132 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv, 133 struct gntdev_dmabuf *gntdev_dmabuf) 134 { 135 struct gntdev_dmabuf_wait_obj *obj; 136 137 list_for_each_entry(obj, &priv->exp_wait_list, next) 138 if (obj->gntdev_dmabuf == gntdev_dmabuf) { 139 pr_debug("Found gntdev_dmabuf in the wait list, wake\n"); 140 complete_all(&obj->completion); 141 break; 142 } 143 } 144 145 static struct gntdev_dmabuf * 146 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd) 147 { 148 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT); 149 150 mutex_lock(&priv->lock); 151 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next) 152 if (gntdev_dmabuf->fd == fd) { 153 pr_debug("Found gntdev_dmabuf in the wait list\n"); 154 kref_get(&gntdev_dmabuf->u.exp.refcount); 155 ret = gntdev_dmabuf; 156 break; 157 } 158 mutex_unlock(&priv->lock); 159 return ret; 160 } 161 162 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd, 163 int wait_to_ms) 164 { 165 struct gntdev_dmabuf *gntdev_dmabuf; 166 struct gntdev_dmabuf_wait_obj *obj; 167 int ret; 168 169 pr_debug("Will wait for dma-buf with fd %d\n", fd); 170 /* 171 * Try to find the DMA buffer: if not found means that 172 * either the buffer has already been released or file descriptor 173 * provided is wrong. 174 */ 175 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd); 176 if (IS_ERR(gntdev_dmabuf)) 177 return PTR_ERR(gntdev_dmabuf); 178 179 /* 180 * gntdev_dmabuf still exists and is reference count locked by us now, 181 * so prepare to wait: allocate wait object and add it to the wait list, 182 * so we can find it on release. 183 */ 184 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf); 185 if (IS_ERR(obj)) 186 return PTR_ERR(obj); 187 188 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms); 189 dmabuf_exp_wait_obj_free(priv, obj); 190 return ret; 191 } 192 193 /* DMA buffer export support. */ 194 195 static struct sg_table * 196 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages) 197 { 198 struct sg_table *sgt; 199 int ret; 200 201 sgt = kmalloc_obj(*sgt); 202 if (!sgt) { 203 ret = -ENOMEM; 204 goto out; 205 } 206 207 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, 208 nr_pages << PAGE_SHIFT, 209 GFP_KERNEL); 210 if (ret) 211 goto out; 212 213 return sgt; 214 215 out: 216 kfree(sgt); 217 return ERR_PTR(ret); 218 } 219 220 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf, 221 struct dma_buf_attachment *attach) 222 { 223 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach; 224 225 gntdev_dmabuf_attach = kzalloc_obj(*gntdev_dmabuf_attach); 226 if (!gntdev_dmabuf_attach) 227 return -ENOMEM; 228 229 gntdev_dmabuf_attach->dir = DMA_NONE; 230 attach->priv = gntdev_dmabuf_attach; 231 return 0; 232 } 233 234 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf, 235 struct dma_buf_attachment *attach) 236 { 237 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv; 238 239 if (gntdev_dmabuf_attach) { 240 struct sg_table *sgt = gntdev_dmabuf_attach->sgt; 241 242 if (sgt) { 243 if (gntdev_dmabuf_attach->dir != DMA_NONE) 244 dma_unmap_sgtable(attach->dev, sgt, 245 gntdev_dmabuf_attach->dir, 246 DMA_ATTR_SKIP_CPU_SYNC); 247 sg_free_table(sgt); 248 } 249 250 kfree(sgt); 251 kfree(gntdev_dmabuf_attach); 252 attach->priv = NULL; 253 } 254 } 255 256 static struct sg_table * 257 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach, 258 enum dma_data_direction dir) 259 { 260 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv; 261 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv; 262 struct sg_table *sgt; 263 264 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages, 265 attach->dev); 266 267 if (dir == DMA_NONE || !gntdev_dmabuf_attach) 268 return ERR_PTR(-EINVAL); 269 270 /* Return the cached mapping when possible. */ 271 if (gntdev_dmabuf_attach->dir == dir) 272 return gntdev_dmabuf_attach->sgt; 273 274 /* 275 * Two mappings with different directions for the same attachment are 276 * not allowed. 277 */ 278 if (gntdev_dmabuf_attach->dir != DMA_NONE) 279 return ERR_PTR(-EBUSY); 280 281 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages, 282 gntdev_dmabuf->nr_pages); 283 if (!IS_ERR(sgt)) { 284 if (dma_map_sgtable(attach->dev, sgt, dir, 285 DMA_ATTR_SKIP_CPU_SYNC)) { 286 sg_free_table(sgt); 287 kfree(sgt); 288 sgt = ERR_PTR(-ENOMEM); 289 } else { 290 gntdev_dmabuf_attach->sgt = sgt; 291 gntdev_dmabuf_attach->dir = dir; 292 } 293 } 294 if (IS_ERR(sgt)) 295 pr_debug("Failed to map sg table for dev %p\n", attach->dev); 296 return sgt; 297 } 298 299 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach, 300 struct sg_table *sgt, 301 enum dma_data_direction dir) 302 { 303 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */ 304 } 305 306 static void dmabuf_exp_release(struct kref *kref) 307 { 308 struct gntdev_dmabuf *gntdev_dmabuf = 309 container_of(kref, struct gntdev_dmabuf, u.exp.refcount); 310 311 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf); 312 list_del(&gntdev_dmabuf->next); 313 fput(gntdev_dmabuf->priv->filp); 314 kfree(gntdev_dmabuf); 315 } 316 317 static void dmabuf_exp_remove_map(struct gntdev_priv *priv, 318 struct gntdev_grant_map *map) 319 { 320 mutex_lock(&priv->lock); 321 list_del(&map->next); 322 gntdev_put_map(NULL /* already removed */, map); 323 mutex_unlock(&priv->lock); 324 } 325 326 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf) 327 { 328 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv; 329 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv; 330 331 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv, 332 gntdev_dmabuf->u.exp.map); 333 mutex_lock(&priv->lock); 334 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release); 335 mutex_unlock(&priv->lock); 336 } 337 338 static const struct dma_buf_ops dmabuf_exp_ops = { 339 .attach = dmabuf_exp_ops_attach, 340 .detach = dmabuf_exp_ops_detach, 341 .map_dma_buf = dmabuf_exp_ops_map_dma_buf, 342 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf, 343 .release = dmabuf_exp_ops_release, 344 }; 345 346 struct gntdev_dmabuf_export_args { 347 struct gntdev_priv *priv; 348 struct gntdev_grant_map *map; 349 struct gntdev_dmabuf_priv *dmabuf_priv; 350 struct device *dev; 351 int count; 352 struct page **pages; 353 u32 fd; 354 }; 355 356 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args) 357 { 358 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 359 struct gntdev_dmabuf *gntdev_dmabuf __free(kfree) = NULL; 360 CLASS(get_unused_fd, ret)(O_CLOEXEC); 361 362 if (ret < 0) 363 return ret; 364 365 gntdev_dmabuf = kzalloc_obj(*gntdev_dmabuf); 366 if (!gntdev_dmabuf) 367 return -ENOMEM; 368 369 kref_init(&gntdev_dmabuf->u.exp.refcount); 370 371 gntdev_dmabuf->priv = args->dmabuf_priv; 372 gntdev_dmabuf->nr_pages = args->count; 373 gntdev_dmabuf->pages = args->pages; 374 gntdev_dmabuf->u.exp.priv = args->priv; 375 gntdev_dmabuf->u.exp.map = args->map; 376 377 exp_info.exp_name = KBUILD_MODNAME; 378 if (args->dev->driver && args->dev->driver->owner) 379 exp_info.owner = args->dev->driver->owner; 380 else 381 exp_info.owner = THIS_MODULE; 382 exp_info.ops = &dmabuf_exp_ops; 383 exp_info.size = args->count << PAGE_SHIFT; 384 exp_info.flags = O_RDWR; 385 exp_info.priv = gntdev_dmabuf; 386 387 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info); 388 if (IS_ERR(gntdev_dmabuf->dmabuf)) 389 return PTR_ERR(gntdev_dmabuf->dmabuf); 390 391 gntdev_dmabuf->fd = ret; 392 args->fd = ret; 393 394 pr_debug("Exporting DMA buffer with fd %d\n", ret); 395 396 get_file(gntdev_dmabuf->priv->filp); 397 mutex_lock(&args->dmabuf_priv->lock); 398 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list); 399 mutex_unlock(&args->dmabuf_priv->lock); 400 401 fd_install(take_fd(ret), no_free_ptr(gntdev_dmabuf)->dmabuf->file); 402 return 0; 403 } 404 405 static struct gntdev_grant_map * 406 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags, 407 int count) 408 { 409 struct gntdev_grant_map *map; 410 411 if (unlikely(gntdev_test_page_count(count))) 412 return ERR_PTR(-EINVAL); 413 414 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) && 415 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) { 416 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags); 417 return ERR_PTR(-EINVAL); 418 } 419 420 map = gntdev_alloc_map(priv, count, dmabuf_flags); 421 if (!map) 422 return ERR_PTR(-ENOMEM); 423 424 return map; 425 } 426 427 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags, 428 int count, u32 domid, u32 *refs, u32 *fd) 429 { 430 struct gntdev_grant_map *map; 431 struct gntdev_dmabuf_export_args args; 432 int i, ret; 433 434 map = dmabuf_exp_alloc_backing_storage(priv, flags, count); 435 if (IS_ERR(map)) 436 return PTR_ERR(map); 437 438 for (i = 0; i < count; i++) { 439 map->grants[i].domid = domid; 440 map->grants[i].ref = refs[i]; 441 } 442 443 mutex_lock(&priv->lock); 444 gntdev_add_map(priv, map); 445 mutex_unlock(&priv->lock); 446 447 map->flags |= GNTMAP_host_map; 448 #if defined(CONFIG_X86) 449 map->flags |= GNTMAP_device_map; 450 #endif 451 452 ret = gntdev_map_grant_pages(map); 453 if (ret < 0) 454 goto out; 455 456 args.priv = priv; 457 args.map = map; 458 args.dev = priv->dma_dev; 459 args.dmabuf_priv = priv->dmabuf_priv; 460 args.count = map->count; 461 args.pages = map->pages; 462 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */ 463 464 ret = dmabuf_exp_from_pages(&args); 465 if (ret < 0) 466 goto out; 467 468 *fd = args.fd; 469 return 0; 470 471 out: 472 dmabuf_exp_remove_map(priv, map); 473 return ret; 474 } 475 476 /* DMA buffer import support. */ 477 478 static int 479 dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs, 480 int count, int domid) 481 { 482 grant_ref_t priv_gref_head; 483 int i, ret; 484 485 ret = gnttab_alloc_grant_references(count, &priv_gref_head); 486 if (ret < 0) { 487 pr_debug("Cannot allocate grant references, ret %d\n", ret); 488 return ret; 489 } 490 491 for (i = 0; i < count; i++) { 492 int cur_ref; 493 494 cur_ref = gnttab_claim_grant_reference(&priv_gref_head); 495 if (cur_ref < 0) { 496 ret = cur_ref; 497 pr_debug("Cannot claim grant reference, ret %d\n", ret); 498 goto out; 499 } 500 501 gnttab_grant_foreign_access_ref(cur_ref, domid, 502 gfns[i], 0); 503 refs[i] = cur_ref; 504 } 505 506 return 0; 507 508 out: 509 gnttab_free_grant_references(priv_gref_head); 510 return ret; 511 } 512 513 static void dmabuf_imp_end_foreign_access(u32 *refs, int count) 514 { 515 int i; 516 517 for (i = 0; i < count; i++) 518 if (refs[i] != INVALID_GRANT_REF) 519 gnttab_end_foreign_access(refs[i], NULL); 520 } 521 522 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf) 523 { 524 kfree(gntdev_dmabuf->u.imp.refs); 525 kfree(gntdev_dmabuf); 526 } 527 528 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count) 529 { 530 struct gntdev_dmabuf *gntdev_dmabuf; 531 int i; 532 533 gntdev_dmabuf = kzalloc_obj(*gntdev_dmabuf); 534 if (!gntdev_dmabuf) 535 goto fail_no_free; 536 537 gntdev_dmabuf->u.imp.refs = kzalloc_objs(gntdev_dmabuf->u.imp.refs[0], 538 count, GFP_KERNEL); 539 if (!gntdev_dmabuf->u.imp.refs) 540 goto fail; 541 542 gntdev_dmabuf->nr_pages = count; 543 544 for (i = 0; i < count; i++) 545 gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF; 546 547 return gntdev_dmabuf; 548 549 fail: 550 dmabuf_imp_free_storage(gntdev_dmabuf); 551 fail_no_free: 552 return ERR_PTR(-ENOMEM); 553 } 554 555 static struct gntdev_dmabuf * 556 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, 557 int fd, int count, int domid) 558 { 559 struct gntdev_dmabuf *gntdev_dmabuf, *ret; 560 struct dma_buf *dma_buf; 561 struct dma_buf_attachment *attach; 562 struct sg_table *sgt; 563 struct sg_dma_page_iter sg_iter; 564 unsigned long *gfns; 565 int i; 566 567 dma_buf = dma_buf_get(fd); 568 if (IS_ERR(dma_buf)) 569 return ERR_CAST(dma_buf); 570 571 gntdev_dmabuf = dmabuf_imp_alloc_storage(count); 572 if (IS_ERR(gntdev_dmabuf)) { 573 ret = gntdev_dmabuf; 574 goto fail_put; 575 } 576 577 gntdev_dmabuf->priv = priv; 578 gntdev_dmabuf->fd = fd; 579 580 attach = dma_buf_attach(dma_buf, dev); 581 if (IS_ERR(attach)) { 582 ret = ERR_CAST(attach); 583 goto fail_free_obj; 584 } 585 586 gntdev_dmabuf->u.imp.attach = attach; 587 588 sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); 589 if (IS_ERR(sgt)) { 590 ret = ERR_CAST(sgt); 591 goto fail_detach; 592 } 593 594 /* Check that we have zero offset. */ 595 if (sgt->sgl->offset) { 596 ret = ERR_PTR(-EINVAL); 597 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n", 598 sgt->sgl->offset); 599 goto fail_unmap; 600 } 601 602 /* Check number of pages that imported buffer has. */ 603 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { 604 ret = ERR_PTR(-EINVAL); 605 pr_debug("DMA buffer has %zu pages, user-space expects %d\n", 606 attach->dmabuf->size, gntdev_dmabuf->nr_pages); 607 goto fail_unmap; 608 } 609 610 gntdev_dmabuf->u.imp.sgt = sgt; 611 612 gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL); 613 if (!gfns) { 614 ret = ERR_PTR(-ENOMEM); 615 goto fail_unmap; 616 } 617 618 /* 619 * Now convert sgt to array of gfns without accessing underlying pages. 620 * It is not allowed to access the underlying struct page of an sg table 621 * exported by DMA-buf, but since we deal with special Xen dma device here 622 * (not a normal physical one) look at the dma addresses in the sg table 623 * and then calculate gfns directly from them. 624 */ 625 i = 0; 626 for_each_sgtable_dma_page(sgt, &sg_iter, 0) { 627 dma_addr_t addr = sg_page_iter_dma_address(&sg_iter); 628 unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr))); 629 630 gfns[i++] = pfn_to_gfn(pfn); 631 } 632 633 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns, 634 gntdev_dmabuf->u.imp.refs, 635 count, domid)); 636 kfree(gfns); 637 if (IS_ERR(ret)) 638 goto fail_end_access; 639 640 pr_debug("Imported DMA buffer with fd %d\n", fd); 641 642 mutex_lock(&priv->lock); 643 list_add(&gntdev_dmabuf->next, &priv->imp_list); 644 mutex_unlock(&priv->lock); 645 646 return gntdev_dmabuf; 647 648 fail_end_access: 649 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count); 650 fail_unmap: 651 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); 652 fail_detach: 653 dma_buf_detach(dma_buf, attach); 654 fail_free_obj: 655 dmabuf_imp_free_storage(gntdev_dmabuf); 656 fail_put: 657 dma_buf_put(dma_buf); 658 return ret; 659 } 660 661 /* 662 * Find the hyper dma-buf by its file descriptor and remove 663 * it from the buffer's list. 664 */ 665 static struct gntdev_dmabuf * 666 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd) 667 { 668 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT); 669 670 mutex_lock(&priv->lock); 671 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) { 672 if (gntdev_dmabuf->fd == fd) { 673 pr_debug("Found gntdev_dmabuf in the import list\n"); 674 ret = gntdev_dmabuf; 675 list_del(&gntdev_dmabuf->next); 676 break; 677 } 678 } 679 mutex_unlock(&priv->lock); 680 return ret; 681 } 682 683 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd) 684 { 685 struct gntdev_dmabuf *gntdev_dmabuf; 686 struct dma_buf_attachment *attach; 687 struct dma_buf *dma_buf; 688 689 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd); 690 if (IS_ERR(gntdev_dmabuf)) 691 return PTR_ERR(gntdev_dmabuf); 692 693 pr_debug("Releasing DMA buffer with fd %d\n", fd); 694 695 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, 696 gntdev_dmabuf->nr_pages); 697 698 attach = gntdev_dmabuf->u.imp.attach; 699 700 if (gntdev_dmabuf->u.imp.sgt) 701 dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt, 702 DMA_BIDIRECTIONAL); 703 dma_buf = attach->dmabuf; 704 dma_buf_detach(attach->dmabuf, attach); 705 dma_buf_put(dma_buf); 706 707 dmabuf_imp_free_storage(gntdev_dmabuf); 708 return 0; 709 } 710 711 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv) 712 { 713 struct gntdev_dmabuf *q, *gntdev_dmabuf; 714 715 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) 716 dmabuf_imp_release(priv, gntdev_dmabuf->fd); 717 } 718 719 /* DMA buffer IOCTL support. */ 720 721 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, 722 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u) 723 { 724 struct ioctl_gntdev_dmabuf_exp_from_refs op; 725 u32 *refs; 726 long ret; 727 728 if (xen_pv_domain()) { 729 pr_debug("Cannot provide dma-buf in a PV domain\n"); 730 return -EINVAL; 731 } 732 733 if (copy_from_user(&op, u, sizeof(op)) != 0) 734 return -EFAULT; 735 736 if (unlikely(gntdev_test_page_count(op.count))) 737 return -EINVAL; 738 739 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL); 740 if (!refs) 741 return -ENOMEM; 742 743 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) { 744 ret = -EFAULT; 745 goto out; 746 } 747 748 ret = dmabuf_exp_from_refs(priv, op.flags, op.count, 749 op.domid, refs, &op.fd); 750 if (ret) 751 goto out; 752 753 if (copy_to_user(u, &op, sizeof(op)) != 0) 754 ret = -EFAULT; 755 756 out: 757 kfree(refs); 758 return ret; 759 } 760 761 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv, 762 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u) 763 { 764 struct ioctl_gntdev_dmabuf_exp_wait_released op; 765 766 if (copy_from_user(&op, u, sizeof(op)) != 0) 767 return -EFAULT; 768 769 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd, 770 op.wait_to_ms); 771 } 772 773 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv, 774 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u) 775 { 776 struct ioctl_gntdev_dmabuf_imp_to_refs op; 777 struct gntdev_dmabuf *gntdev_dmabuf; 778 long ret; 779 780 if (copy_from_user(&op, u, sizeof(op)) != 0) 781 return -EFAULT; 782 783 if (unlikely(gntdev_test_page_count(op.count))) 784 return -EINVAL; 785 786 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv, 787 priv->dma_dev, op.fd, 788 op.count, op.domid); 789 if (IS_ERR(gntdev_dmabuf)) 790 return PTR_ERR(gntdev_dmabuf); 791 792 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs, 793 sizeof(*u->refs) * op.count) != 0) { 794 ret = -EFAULT; 795 goto out_release; 796 } 797 return 0; 798 799 out_release: 800 dmabuf_imp_release(priv->dmabuf_priv, op.fd); 801 return ret; 802 } 803 804 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv, 805 struct ioctl_gntdev_dmabuf_imp_release __user *u) 806 { 807 struct ioctl_gntdev_dmabuf_imp_release op; 808 809 if (copy_from_user(&op, u, sizeof(op)) != 0) 810 return -EFAULT; 811 812 return dmabuf_imp_release(priv->dmabuf_priv, op.fd); 813 } 814 815 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp) 816 { 817 struct gntdev_dmabuf_priv *priv; 818 819 priv = kzalloc_obj(*priv); 820 if (!priv) 821 return ERR_PTR(-ENOMEM); 822 823 mutex_init(&priv->lock); 824 INIT_LIST_HEAD(&priv->exp_list); 825 INIT_LIST_HEAD(&priv->exp_wait_list); 826 INIT_LIST_HEAD(&priv->imp_list); 827 828 priv->filp = filp; 829 830 return priv; 831 } 832 833 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv) 834 { 835 dmabuf_imp_release_all(priv); 836 kfree(priv); 837 } 838