1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018 Noralf Trønnes 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/export.h> 8 #include <linux/mutex.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include <drm/drm.h> 14 #include <drm/drm_device.h> 15 #include <drm/drm_drv.h> 16 #include <drm/drm_gem_shmem_helper.h> 17 #include <drm/drm_prime.h> 18 #include <drm/drm_print.h> 19 20 /** 21 * DOC: overview 22 * 23 * This library provides helpers for GEM objects backed by shmem buffers 24 * allocated using anonymous pageable memory. 25 */ 26 27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { 28 .free = drm_gem_shmem_free_object, 29 .print_info = drm_gem_shmem_print_info, 30 .pin = drm_gem_shmem_pin, 31 .unpin = drm_gem_shmem_unpin, 32 .get_sg_table = drm_gem_shmem_get_sg_table, 33 .vmap = drm_gem_shmem_vmap, 34 .vunmap = drm_gem_shmem_vunmap, 35 .mmap = drm_gem_shmem_mmap, 36 }; 37 38 static struct drm_gem_shmem_object * 39 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) 40 { 41 struct drm_gem_shmem_object *shmem; 42 struct drm_gem_object *obj; 43 int ret = 0; 44 45 size = PAGE_ALIGN(size); 46 47 if (dev->driver->gem_create_object) 48 obj = dev->driver->gem_create_object(dev, size); 49 else 50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL); 51 if (!obj) 52 return ERR_PTR(-ENOMEM); 53 54 shmem = to_drm_gem_shmem_obj(obj); 55 56 if (!obj->funcs) 57 obj->funcs = &drm_gem_shmem_funcs; 58 59 if (private) { 60 drm_gem_private_object_init(dev, obj, size); 61 shmem->map_wc = false; /* dma-buf mappings use always writecombine */ 62 } else { 63 ret = drm_gem_object_init(dev, obj, size); 64 } 65 if (ret) 66 goto err_free; 67 68 ret = drm_gem_create_mmap_offset(obj); 69 if (ret) 70 goto err_release; 71 72 mutex_init(&shmem->pages_lock); 73 mutex_init(&shmem->vmap_lock); 74 INIT_LIST_HEAD(&shmem->madv_list); 75 76 if (!private) { 77 /* 78 * Our buffers are kept pinned, so allocating them 79 * from the MOVABLE zone is a really bad idea, and 80 * conflicts with CMA. See comments above new_inode() 81 * why this is required _and_ expected if you're 82 * going to pin these pages. 83 */ 84 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 85 __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 86 } 87 88 return shmem; 89 90 err_release: 91 drm_gem_object_release(obj); 92 err_free: 93 kfree(obj); 94 95 return ERR_PTR(ret); 96 } 97 /** 98 * drm_gem_shmem_create - Allocate an object with the given size 99 * @dev: DRM device 100 * @size: Size of the object to allocate 101 * 102 * This function creates a shmem GEM object. 103 * 104 * Returns: 105 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative 106 * error code on failure. 107 */ 108 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) 109 { 110 return __drm_gem_shmem_create(dev, size, false); 111 } 112 EXPORT_SYMBOL_GPL(drm_gem_shmem_create); 113 114 /** 115 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object 116 * @obj: GEM object to free 117 * 118 * This function cleans up the GEM object state and frees the memory used to 119 * store the object itself. It should be used to implement 120 * &drm_gem_object_funcs.free. 121 */ 122 void drm_gem_shmem_free_object(struct drm_gem_object *obj) 123 { 124 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 125 126 WARN_ON(shmem->vmap_use_count); 127 128 if (obj->import_attach) { 129 drm_prime_gem_destroy(obj, shmem->sgt); 130 } else { 131 if (shmem->sgt) { 132 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, 133 DMA_BIDIRECTIONAL, 0); 134 sg_free_table(shmem->sgt); 135 kfree(shmem->sgt); 136 } 137 if (shmem->pages) 138 drm_gem_shmem_put_pages(shmem); 139 } 140 141 WARN_ON(shmem->pages_use_count); 142 143 drm_gem_object_release(obj); 144 mutex_destroy(&shmem->pages_lock); 145 mutex_destroy(&shmem->vmap_lock); 146 kfree(shmem); 147 } 148 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); 149 150 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) 151 { 152 struct drm_gem_object *obj = &shmem->base; 153 struct page **pages; 154 155 if (shmem->pages_use_count++ > 0) 156 return 0; 157 158 pages = drm_gem_get_pages(obj); 159 if (IS_ERR(pages)) { 160 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); 161 shmem->pages_use_count = 0; 162 return PTR_ERR(pages); 163 } 164 165 shmem->pages = pages; 166 167 return 0; 168 } 169 170 /* 171 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object 172 * @shmem: shmem GEM object 173 * 174 * This function makes sure that backing pages exists for the shmem GEM object 175 * and increases the use count. 176 * 177 * Returns: 178 * 0 on success or a negative error code on failure. 179 */ 180 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) 181 { 182 int ret; 183 184 WARN_ON(shmem->base.import_attach); 185 186 ret = mutex_lock_interruptible(&shmem->pages_lock); 187 if (ret) 188 return ret; 189 ret = drm_gem_shmem_get_pages_locked(shmem); 190 mutex_unlock(&shmem->pages_lock); 191 192 return ret; 193 } 194 EXPORT_SYMBOL(drm_gem_shmem_get_pages); 195 196 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) 197 { 198 struct drm_gem_object *obj = &shmem->base; 199 200 if (WARN_ON_ONCE(!shmem->pages_use_count)) 201 return; 202 203 if (--shmem->pages_use_count > 0) 204 return; 205 206 drm_gem_put_pages(obj, shmem->pages, 207 shmem->pages_mark_dirty_on_put, 208 shmem->pages_mark_accessed_on_put); 209 shmem->pages = NULL; 210 } 211 212 /* 213 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object 214 * @shmem: shmem GEM object 215 * 216 * This function decreases the use count and puts the backing pages when use drops to zero. 217 */ 218 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) 219 { 220 mutex_lock(&shmem->pages_lock); 221 drm_gem_shmem_put_pages_locked(shmem); 222 mutex_unlock(&shmem->pages_lock); 223 } 224 EXPORT_SYMBOL(drm_gem_shmem_put_pages); 225 226 /** 227 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object 228 * @obj: GEM object 229 * 230 * This function makes sure the backing pages are pinned in memory while the 231 * buffer is exported. It should only be used to implement 232 * &drm_gem_object_funcs.pin. 233 * 234 * Returns: 235 * 0 on success or a negative error code on failure. 236 */ 237 int drm_gem_shmem_pin(struct drm_gem_object *obj) 238 { 239 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 240 241 WARN_ON(shmem->base.import_attach); 242 243 return drm_gem_shmem_get_pages(shmem); 244 } 245 EXPORT_SYMBOL(drm_gem_shmem_pin); 246 247 /** 248 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object 249 * @obj: GEM object 250 * 251 * This function removes the requirement that the backing pages are pinned in 252 * memory. It should only be used to implement &drm_gem_object_funcs.unpin. 253 */ 254 void drm_gem_shmem_unpin(struct drm_gem_object *obj) 255 { 256 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 257 258 WARN_ON(shmem->base.import_attach); 259 260 drm_gem_shmem_put_pages(shmem); 261 } 262 EXPORT_SYMBOL(drm_gem_shmem_unpin); 263 264 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) 265 { 266 struct drm_gem_object *obj = &shmem->base; 267 int ret = 0; 268 269 if (shmem->vmap_use_count++ > 0) { 270 dma_buf_map_set_vaddr(map, shmem->vaddr); 271 return 0; 272 } 273 274 if (obj->import_attach) { 275 ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 276 if (!ret) { 277 if (WARN_ON(map->is_iomem)) { 278 ret = -EIO; 279 goto err_put_pages; 280 } 281 shmem->vaddr = map->vaddr; 282 } 283 } else { 284 pgprot_t prot = PAGE_KERNEL; 285 286 ret = drm_gem_shmem_get_pages(shmem); 287 if (ret) 288 goto err_zero_use; 289 290 if (shmem->map_wc) 291 prot = pgprot_writecombine(prot); 292 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 293 VM_MAP, prot); 294 if (!shmem->vaddr) 295 ret = -ENOMEM; 296 else 297 dma_buf_map_set_vaddr(map, shmem->vaddr); 298 } 299 300 if (ret) { 301 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); 302 goto err_put_pages; 303 } 304 305 return 0; 306 307 err_put_pages: 308 if (!obj->import_attach) 309 drm_gem_shmem_put_pages(shmem); 310 err_zero_use: 311 shmem->vmap_use_count = 0; 312 313 return ret; 314 } 315 316 /* 317 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object 318 * @shmem: shmem GEM object 319 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing 320 * store. 321 * 322 * This function makes sure that a contiguous kernel virtual address mapping 323 * exists for the buffer backing the shmem GEM object. 324 * 325 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can 326 * also be called by drivers directly, in which case it will hide the 327 * differences between dma-buf imported and natively allocated objects. 328 * 329 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). 330 * 331 * Returns: 332 * 0 on success or a negative error code on failure. 333 */ 334 int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 335 { 336 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 337 int ret; 338 339 ret = mutex_lock_interruptible(&shmem->vmap_lock); 340 if (ret) 341 return ret; 342 ret = drm_gem_shmem_vmap_locked(shmem, map); 343 mutex_unlock(&shmem->vmap_lock); 344 345 return ret; 346 } 347 EXPORT_SYMBOL(drm_gem_shmem_vmap); 348 349 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, 350 struct dma_buf_map *map) 351 { 352 struct drm_gem_object *obj = &shmem->base; 353 354 if (WARN_ON_ONCE(!shmem->vmap_use_count)) 355 return; 356 357 if (--shmem->vmap_use_count > 0) 358 return; 359 360 if (obj->import_attach) 361 dma_buf_vunmap(obj->import_attach->dmabuf, map); 362 else 363 vunmap(shmem->vaddr); 364 365 shmem->vaddr = NULL; 366 drm_gem_shmem_put_pages(shmem); 367 } 368 369 /* 370 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object 371 * @shmem: shmem GEM object 372 * @map: Kernel virtual address where the SHMEM GEM object was mapped 373 * 374 * This function cleans up a kernel virtual address mapping acquired by 375 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to 376 * zero. 377 * 378 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can 379 * also be called by drivers directly, in which case it will hide the 380 * differences between dma-buf imported and natively allocated objects. 381 */ 382 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) 383 { 384 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 385 386 mutex_lock(&shmem->vmap_lock); 387 drm_gem_shmem_vunmap_locked(shmem, map); 388 mutex_unlock(&shmem->vmap_lock); 389 } 390 EXPORT_SYMBOL(drm_gem_shmem_vunmap); 391 392 struct drm_gem_shmem_object * 393 drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 394 struct drm_device *dev, size_t size, 395 uint32_t *handle) 396 { 397 struct drm_gem_shmem_object *shmem; 398 int ret; 399 400 shmem = drm_gem_shmem_create(dev, size); 401 if (IS_ERR(shmem)) 402 return shmem; 403 404 /* 405 * Allocate an id of idr table where the obj is registered 406 * and handle has the id what user can see. 407 */ 408 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); 409 /* drop reference from allocate - handle holds it now. */ 410 drm_gem_object_put(&shmem->base); 411 if (ret) 412 return ERR_PTR(ret); 413 414 return shmem; 415 } 416 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); 417 418 /* Update madvise status, returns true if not purged, else 419 * false or -errno. 420 */ 421 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) 422 { 423 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 424 425 mutex_lock(&shmem->pages_lock); 426 427 if (shmem->madv >= 0) 428 shmem->madv = madv; 429 430 madv = shmem->madv; 431 432 mutex_unlock(&shmem->pages_lock); 433 434 return (madv >= 0); 435 } 436 EXPORT_SYMBOL(drm_gem_shmem_madvise); 437 438 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) 439 { 440 struct drm_device *dev = obj->dev; 441 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 442 443 WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); 444 445 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); 446 sg_free_table(shmem->sgt); 447 kfree(shmem->sgt); 448 shmem->sgt = NULL; 449 450 drm_gem_shmem_put_pages_locked(shmem); 451 452 shmem->madv = -1; 453 454 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 455 drm_gem_free_mmap_offset(obj); 456 457 /* Our goal here is to return as much of the memory as 458 * is possible back to the system as we are called from OOM. 459 * To do this we must instruct the shmfs to drop all of its 460 * backing pages, *now*. 461 */ 462 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 463 464 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 465 0, (loff_t)-1); 466 } 467 EXPORT_SYMBOL(drm_gem_shmem_purge_locked); 468 469 bool drm_gem_shmem_purge(struct drm_gem_object *obj) 470 { 471 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 472 473 if (!mutex_trylock(&shmem->pages_lock)) 474 return false; 475 drm_gem_shmem_purge_locked(obj); 476 mutex_unlock(&shmem->pages_lock); 477 478 return true; 479 } 480 EXPORT_SYMBOL(drm_gem_shmem_purge); 481 482 /** 483 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 484 * @file: DRM file structure to create the dumb buffer for 485 * @dev: DRM device 486 * @args: IOCTL data 487 * 488 * This function computes the pitch of the dumb buffer and rounds it up to an 489 * integer number of bytes per pixel. Drivers for hardware that doesn't have 490 * any additional restrictions on the pitch can directly use this function as 491 * their &drm_driver.dumb_create callback. 492 * 493 * For hardware with additional restrictions, drivers can adjust the fields 494 * set up by userspace before calling into this function. 495 * 496 * Returns: 497 * 0 on success or a negative error code on failure. 498 */ 499 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 500 struct drm_mode_create_dumb *args) 501 { 502 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 503 struct drm_gem_shmem_object *shmem; 504 505 if (!args->pitch || !args->size) { 506 args->pitch = min_pitch; 507 args->size = args->pitch * args->height; 508 } else { 509 /* ensure sane minimum values */ 510 if (args->pitch < min_pitch) 511 args->pitch = min_pitch; 512 if (args->size < args->pitch * args->height) 513 args->size = args->pitch * args->height; 514 } 515 516 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); 517 518 return PTR_ERR_OR_ZERO(shmem); 519 } 520 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); 521 522 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) 523 { 524 struct vm_area_struct *vma = vmf->vma; 525 struct drm_gem_object *obj = vma->vm_private_data; 526 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 527 loff_t num_pages = obj->size >> PAGE_SHIFT; 528 struct page *page; 529 530 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) 531 return VM_FAULT_SIGBUS; 532 533 page = shmem->pages[vmf->pgoff]; 534 535 return vmf_insert_page(vma, vmf->address, page); 536 } 537 538 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) 539 { 540 struct drm_gem_object *obj = vma->vm_private_data; 541 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 542 int ret; 543 544 WARN_ON(shmem->base.import_attach); 545 546 ret = drm_gem_shmem_get_pages(shmem); 547 WARN_ON_ONCE(ret != 0); 548 549 drm_gem_vm_open(vma); 550 } 551 552 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) 553 { 554 struct drm_gem_object *obj = vma->vm_private_data; 555 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 556 557 drm_gem_shmem_put_pages(shmem); 558 drm_gem_vm_close(vma); 559 } 560 561 static const struct vm_operations_struct drm_gem_shmem_vm_ops = { 562 .fault = drm_gem_shmem_fault, 563 .open = drm_gem_shmem_vm_open, 564 .close = drm_gem_shmem_vm_close, 565 }; 566 567 /** 568 * drm_gem_shmem_mmap - Memory-map a shmem GEM object 569 * @obj: gem object 570 * @vma: VMA for the area to be mapped 571 * 572 * This function implements an augmented version of the GEM DRM file mmap 573 * operation for shmem objects. Drivers which employ the shmem helpers should 574 * use this function as their &drm_gem_object_funcs.mmap handler. 575 * 576 * Returns: 577 * 0 on success or a negative error code on failure. 578 */ 579 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 580 { 581 struct drm_gem_shmem_object *shmem; 582 int ret; 583 584 /* Remove the fake offset */ 585 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 586 587 if (obj->import_attach) { 588 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 589 drm_gem_object_put(obj); 590 vma->vm_private_data = NULL; 591 592 return dma_buf_mmap(obj->dma_buf, vma, 0); 593 } 594 595 shmem = to_drm_gem_shmem_obj(obj); 596 597 ret = drm_gem_shmem_get_pages(shmem); 598 if (ret) { 599 drm_gem_vm_close(vma); 600 return ret; 601 } 602 603 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; 604 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 605 if (shmem->map_wc) 606 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 607 vma->vm_ops = &drm_gem_shmem_vm_ops; 608 609 return 0; 610 } 611 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); 612 613 /** 614 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs 615 * @p: DRM printer 616 * @indent: Tab indentation level 617 * @obj: GEM object 618 * 619 * This implements the &drm_gem_object_funcs.info callback. 620 */ 621 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, 622 const struct drm_gem_object *obj) 623 { 624 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 625 626 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); 627 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); 628 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); 629 } 630 EXPORT_SYMBOL(drm_gem_shmem_print_info); 631 632 /** 633 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned 634 * pages for a shmem GEM object 635 * @obj: GEM object 636 * 637 * This function exports a scatter/gather table suitable for PRIME usage by 638 * calling the standard DMA mapping API. Drivers should not call this function 639 * directly, instead it should only be used as an implementation for 640 * &drm_gem_object_funcs.get_sg_table. 641 * 642 * Drivers who need to acquire an scatter/gather table for objects need to call 643 * drm_gem_shmem_get_pages_sgt() instead. 644 * 645 * Returns: 646 * A pointer to the scatter/gather table of pinned pages or NULL on failure. 647 */ 648 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) 649 { 650 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 651 652 WARN_ON(shmem->base.import_attach); 653 654 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); 655 } 656 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); 657 658 /** 659 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a 660 * scatter/gather table for a shmem GEM object. 661 * @obj: GEM object 662 * 663 * This function returns a scatter/gather table suitable for driver usage. If 664 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg 665 * table created. 666 * 667 * This is the main function for drivers to get at backing storage, and it hides 668 * and difference between dma-buf imported and natively allocated objects. 669 * drm_gem_shmem_get_sg_table() should not be directly called by drivers. 670 * 671 * Returns: 672 * A pointer to the scatter/gather table of pinned pages or errno on failure. 673 */ 674 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) 675 { 676 int ret; 677 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 678 struct sg_table *sgt; 679 680 if (shmem->sgt) 681 return shmem->sgt; 682 683 WARN_ON(obj->import_attach); 684 685 ret = drm_gem_shmem_get_pages(shmem); 686 if (ret) 687 return ERR_PTR(ret); 688 689 sgt = drm_gem_shmem_get_sg_table(&shmem->base); 690 if (IS_ERR(sgt)) { 691 ret = PTR_ERR(sgt); 692 goto err_put_pages; 693 } 694 /* Map the pages for use by the h/w. */ 695 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); 696 if (ret) 697 goto err_free_sgt; 698 699 shmem->sgt = sgt; 700 701 return sgt; 702 703 err_free_sgt: 704 sg_free_table(sgt); 705 kfree(sgt); 706 err_put_pages: 707 drm_gem_shmem_put_pages(shmem); 708 return ERR_PTR(ret); 709 } 710 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); 711 712 /** 713 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from 714 * another driver's scatter/gather table of pinned pages 715 * @dev: Device to import into 716 * @attach: DMA-BUF attachment 717 * @sgt: Scatter/gather table of pinned pages 718 * 719 * This function imports a scatter/gather table exported via DMA-BUF by 720 * another driver. Drivers that use the shmem helpers should set this as their 721 * &drm_driver.gem_prime_import_sg_table callback. 722 * 723 * Returns: 724 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 725 * error code on failure. 726 */ 727 struct drm_gem_object * 728 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, 729 struct dma_buf_attachment *attach, 730 struct sg_table *sgt) 731 { 732 size_t size = PAGE_ALIGN(attach->dmabuf->size); 733 struct drm_gem_shmem_object *shmem; 734 735 shmem = __drm_gem_shmem_create(dev, size, true); 736 if (IS_ERR(shmem)) 737 return ERR_CAST(shmem); 738 739 shmem->sgt = sgt; 740 741 DRM_DEBUG_PRIME("size = %zu\n", size); 742 743 return &shmem->base; 744 } 745 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); 746