1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018 Noralf Trønnes 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/export.h> 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/slab.h> 12 #include <linux/vmalloc.h> 13 14 #ifdef CONFIG_X86 15 #include <asm/set_memory.h> 16 #endif 17 18 #include <drm/drm.h> 19 #include <drm/drm_device.h> 20 #include <drm/drm_drv.h> 21 #include <drm/drm_gem_shmem_helper.h> 22 #include <drm/drm_prime.h> 23 #include <drm/drm_print.h> 24 25 MODULE_IMPORT_NS(DMA_BUF); 26 27 /** 28 * DOC: overview 29 * 30 * This library provides helpers for GEM objects backed by shmem buffers 31 * allocated using anonymous pageable memory. 32 * 33 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object. 34 * For GEM callback helpers in struct &drm_gem_object functions, see likewise 35 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps 36 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion. 37 */ 38 39 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { 40 .free = drm_gem_shmem_object_free, 41 .print_info = drm_gem_shmem_object_print_info, 42 .pin = drm_gem_shmem_object_pin, 43 .unpin = drm_gem_shmem_object_unpin, 44 .get_sg_table = drm_gem_shmem_object_get_sg_table, 45 .vmap = drm_gem_shmem_object_vmap, 46 .vunmap = drm_gem_shmem_object_vunmap, 47 .mmap = drm_gem_shmem_object_mmap, 48 .vm_ops = &drm_gem_shmem_vm_ops, 49 }; 50 51 static struct drm_gem_shmem_object * 52 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) 53 { 54 struct drm_gem_shmem_object *shmem; 55 struct drm_gem_object *obj; 56 int ret = 0; 57 58 size = PAGE_ALIGN(size); 59 60 if (dev->driver->gem_create_object) { 61 obj = dev->driver->gem_create_object(dev, size); 62 if (IS_ERR(obj)) 63 return ERR_CAST(obj); 64 shmem = to_drm_gem_shmem_obj(obj); 65 } else { 66 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 67 if (!shmem) 68 return ERR_PTR(-ENOMEM); 69 obj = &shmem->base; 70 } 71 72 if (!obj->funcs) 73 obj->funcs = &drm_gem_shmem_funcs; 74 75 if (private) { 76 drm_gem_private_object_init(dev, obj, size); 77 shmem->map_wc = false; /* dma-buf mappings use always writecombine */ 78 } else { 79 ret = drm_gem_object_init(dev, obj, size); 80 } 81 if (ret) { 82 drm_gem_private_object_fini(obj); 83 goto err_free; 84 } 85 86 ret = drm_gem_create_mmap_offset(obj); 87 if (ret) 88 goto err_release; 89 90 INIT_LIST_HEAD(&shmem->madv_list); 91 92 if (!private) { 93 /* 94 * Our buffers are kept pinned, so allocating them 95 * from the MOVABLE zone is a really bad idea, and 96 * conflicts with CMA. See comments above new_inode() 97 * why this is required _and_ expected if you're 98 * going to pin these pages. 99 */ 100 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 101 __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 102 } 103 104 return shmem; 105 106 err_release: 107 drm_gem_object_release(obj); 108 err_free: 109 kfree(obj); 110 111 return ERR_PTR(ret); 112 } 113 /** 114 * drm_gem_shmem_create - Allocate an object with the given size 115 * @dev: DRM device 116 * @size: Size of the object to allocate 117 * 118 * This function creates a shmem GEM object. 119 * 120 * Returns: 121 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative 122 * error code on failure. 123 */ 124 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) 125 { 126 return __drm_gem_shmem_create(dev, size, false); 127 } 128 EXPORT_SYMBOL_GPL(drm_gem_shmem_create); 129 130 /** 131 * drm_gem_shmem_free - Free resources associated with a shmem GEM object 132 * @shmem: shmem GEM object to free 133 * 134 * This function cleans up the GEM object state and frees the memory used to 135 * store the object itself. 136 */ 137 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) 138 { 139 struct drm_gem_object *obj = &shmem->base; 140 141 if (obj->import_attach) { 142 drm_prime_gem_destroy(obj, shmem->sgt); 143 } else { 144 dma_resv_lock(shmem->base.resv, NULL); 145 146 drm_WARN_ON(obj->dev, shmem->vmap_use_count); 147 148 if (shmem->sgt) { 149 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, 150 DMA_BIDIRECTIONAL, 0); 151 sg_free_table(shmem->sgt); 152 kfree(shmem->sgt); 153 } 154 if (shmem->pages) 155 drm_gem_shmem_put_pages(shmem); 156 157 drm_WARN_ON(obj->dev, shmem->pages_use_count); 158 159 dma_resv_unlock(shmem->base.resv); 160 } 161 162 drm_gem_object_release(obj); 163 kfree(shmem); 164 } 165 EXPORT_SYMBOL_GPL(drm_gem_shmem_free); 166 167 static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) 168 { 169 struct drm_gem_object *obj = &shmem->base; 170 struct page **pages; 171 172 dma_resv_assert_held(shmem->base.resv); 173 174 if (shmem->pages_use_count++ > 0) 175 return 0; 176 177 pages = drm_gem_get_pages(obj); 178 if (IS_ERR(pages)) { 179 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", 180 PTR_ERR(pages)); 181 shmem->pages_use_count = 0; 182 return PTR_ERR(pages); 183 } 184 185 /* 186 * TODO: Allocating WC pages which are correctly flushed is only 187 * supported on x86. Ideal solution would be a GFP_WC flag, which also 188 * ttm_pool.c could use. 189 */ 190 #ifdef CONFIG_X86 191 if (shmem->map_wc) 192 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT); 193 #endif 194 195 shmem->pages = pages; 196 197 return 0; 198 } 199 200 /* 201 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object 202 * @shmem: shmem GEM object 203 * 204 * This function decreases the use count and puts the backing pages when use drops to zero. 205 */ 206 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) 207 { 208 struct drm_gem_object *obj = &shmem->base; 209 210 dma_resv_assert_held(shmem->base.resv); 211 212 if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) 213 return; 214 215 if (--shmem->pages_use_count > 0) 216 return; 217 218 #ifdef CONFIG_X86 219 if (shmem->map_wc) 220 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); 221 #endif 222 223 drm_gem_put_pages(obj, shmem->pages, 224 shmem->pages_mark_dirty_on_put, 225 shmem->pages_mark_accessed_on_put); 226 shmem->pages = NULL; 227 } 228 EXPORT_SYMBOL(drm_gem_shmem_put_pages); 229 230 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) 231 { 232 int ret; 233 234 dma_resv_assert_held(shmem->base.resv); 235 236 ret = drm_gem_shmem_get_pages(shmem); 237 238 return ret; 239 } 240 EXPORT_SYMBOL(drm_gem_shmem_pin_locked); 241 242 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) 243 { 244 dma_resv_assert_held(shmem->base.resv); 245 246 drm_gem_shmem_put_pages(shmem); 247 } 248 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked); 249 250 /** 251 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object 252 * @shmem: shmem GEM object 253 * 254 * This function makes sure the backing pages are pinned in memory while the 255 * buffer is exported. 256 * 257 * Returns: 258 * 0 on success or a negative error code on failure. 259 */ 260 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) 261 { 262 struct drm_gem_object *obj = &shmem->base; 263 int ret; 264 265 drm_WARN_ON(obj->dev, obj->import_attach); 266 267 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); 268 if (ret) 269 return ret; 270 ret = drm_gem_shmem_pin_locked(shmem); 271 dma_resv_unlock(shmem->base.resv); 272 273 return ret; 274 } 275 EXPORT_SYMBOL(drm_gem_shmem_pin); 276 277 /** 278 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object 279 * @shmem: shmem GEM object 280 * 281 * This function removes the requirement that the backing pages are pinned in 282 * memory. 283 */ 284 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) 285 { 286 struct drm_gem_object *obj = &shmem->base; 287 288 drm_WARN_ON(obj->dev, obj->import_attach); 289 290 dma_resv_lock(shmem->base.resv, NULL); 291 drm_gem_shmem_unpin_locked(shmem); 292 dma_resv_unlock(shmem->base.resv); 293 } 294 EXPORT_SYMBOL(drm_gem_shmem_unpin); 295 296 /* 297 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object 298 * @shmem: shmem GEM object 299 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing 300 * store. 301 * 302 * This function makes sure that a contiguous kernel virtual address mapping 303 * exists for the buffer backing the shmem GEM object. It hides the differences 304 * between dma-buf imported and natively allocated objects. 305 * 306 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). 307 * 308 * Returns: 309 * 0 on success or a negative error code on failure. 310 */ 311 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, 312 struct iosys_map *map) 313 { 314 struct drm_gem_object *obj = &shmem->base; 315 int ret = 0; 316 317 if (obj->import_attach) { 318 ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 319 if (!ret) { 320 if (drm_WARN_ON(obj->dev, map->is_iomem)) { 321 dma_buf_vunmap(obj->import_attach->dmabuf, map); 322 return -EIO; 323 } 324 } 325 } else { 326 pgprot_t prot = PAGE_KERNEL; 327 328 dma_resv_assert_held(shmem->base.resv); 329 330 if (shmem->vmap_use_count++ > 0) { 331 iosys_map_set_vaddr(map, shmem->vaddr); 332 return 0; 333 } 334 335 ret = drm_gem_shmem_get_pages(shmem); 336 if (ret) 337 goto err_zero_use; 338 339 if (shmem->map_wc) 340 prot = pgprot_writecombine(prot); 341 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 342 VM_MAP, prot); 343 if (!shmem->vaddr) 344 ret = -ENOMEM; 345 else 346 iosys_map_set_vaddr(map, shmem->vaddr); 347 } 348 349 if (ret) { 350 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); 351 goto err_put_pages; 352 } 353 354 return 0; 355 356 err_put_pages: 357 if (!obj->import_attach) 358 drm_gem_shmem_put_pages(shmem); 359 err_zero_use: 360 shmem->vmap_use_count = 0; 361 362 return ret; 363 } 364 EXPORT_SYMBOL(drm_gem_shmem_vmap); 365 366 /* 367 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object 368 * @shmem: shmem GEM object 369 * @map: Kernel virtual address where the SHMEM GEM object was mapped 370 * 371 * This function cleans up a kernel virtual address mapping acquired by 372 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to 373 * zero. 374 * 375 * This function hides the differences between dma-buf imported and natively 376 * allocated objects. 377 */ 378 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, 379 struct iosys_map *map) 380 { 381 struct drm_gem_object *obj = &shmem->base; 382 383 if (obj->import_attach) { 384 dma_buf_vunmap(obj->import_attach->dmabuf, map); 385 } else { 386 dma_resv_assert_held(shmem->base.resv); 387 388 if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) 389 return; 390 391 if (--shmem->vmap_use_count > 0) 392 return; 393 394 vunmap(shmem->vaddr); 395 drm_gem_shmem_put_pages(shmem); 396 } 397 398 shmem->vaddr = NULL; 399 } 400 EXPORT_SYMBOL(drm_gem_shmem_vunmap); 401 402 static int 403 drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 404 struct drm_device *dev, size_t size, 405 uint32_t *handle) 406 { 407 struct drm_gem_shmem_object *shmem; 408 int ret; 409 410 shmem = drm_gem_shmem_create(dev, size); 411 if (IS_ERR(shmem)) 412 return PTR_ERR(shmem); 413 414 /* 415 * Allocate an id of idr table where the obj is registered 416 * and handle has the id what user can see. 417 */ 418 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); 419 /* drop reference from allocate - handle holds it now. */ 420 drm_gem_object_put(&shmem->base); 421 422 return ret; 423 } 424 425 /* Update madvise status, returns true if not purged, else 426 * false or -errno. 427 */ 428 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) 429 { 430 dma_resv_assert_held(shmem->base.resv); 431 432 if (shmem->madv >= 0) 433 shmem->madv = madv; 434 435 madv = shmem->madv; 436 437 return (madv >= 0); 438 } 439 EXPORT_SYMBOL(drm_gem_shmem_madvise); 440 441 void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) 442 { 443 struct drm_gem_object *obj = &shmem->base; 444 struct drm_device *dev = obj->dev; 445 446 dma_resv_assert_held(shmem->base.resv); 447 448 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); 449 450 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); 451 sg_free_table(shmem->sgt); 452 kfree(shmem->sgt); 453 shmem->sgt = NULL; 454 455 drm_gem_shmem_put_pages(shmem); 456 457 shmem->madv = -1; 458 459 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 460 drm_gem_free_mmap_offset(obj); 461 462 /* Our goal here is to return as much of the memory as 463 * is possible back to the system as we are called from OOM. 464 * To do this we must instruct the shmfs to drop all of its 465 * backing pages, *now*. 466 */ 467 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 468 469 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); 470 } 471 EXPORT_SYMBOL(drm_gem_shmem_purge); 472 473 /** 474 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 475 * @file: DRM file structure to create the dumb buffer for 476 * @dev: DRM device 477 * @args: IOCTL data 478 * 479 * This function computes the pitch of the dumb buffer and rounds it up to an 480 * integer number of bytes per pixel. Drivers for hardware that doesn't have 481 * any additional restrictions on the pitch can directly use this function as 482 * their &drm_driver.dumb_create callback. 483 * 484 * For hardware with additional restrictions, drivers can adjust the fields 485 * set up by userspace before calling into this function. 486 * 487 * Returns: 488 * 0 on success or a negative error code on failure. 489 */ 490 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 491 struct drm_mode_create_dumb *args) 492 { 493 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 494 495 if (!args->pitch || !args->size) { 496 args->pitch = min_pitch; 497 args->size = PAGE_ALIGN(args->pitch * args->height); 498 } else { 499 /* ensure sane minimum values */ 500 if (args->pitch < min_pitch) 501 args->pitch = min_pitch; 502 if (args->size < args->pitch * args->height) 503 args->size = PAGE_ALIGN(args->pitch * args->height); 504 } 505 506 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); 507 } 508 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); 509 510 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) 511 { 512 struct vm_area_struct *vma = vmf->vma; 513 struct drm_gem_object *obj = vma->vm_private_data; 514 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 515 loff_t num_pages = obj->size >> PAGE_SHIFT; 516 vm_fault_t ret; 517 struct page *page; 518 pgoff_t page_offset; 519 520 /* We don't use vmf->pgoff since that has the fake offset */ 521 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 522 523 dma_resv_lock(shmem->base.resv, NULL); 524 525 if (page_offset >= num_pages || 526 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || 527 shmem->madv < 0) { 528 ret = VM_FAULT_SIGBUS; 529 } else { 530 page = shmem->pages[page_offset]; 531 532 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); 533 } 534 535 dma_resv_unlock(shmem->base.resv); 536 537 return ret; 538 } 539 540 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) 541 { 542 struct drm_gem_object *obj = vma->vm_private_data; 543 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 544 545 drm_WARN_ON(obj->dev, obj->import_attach); 546 547 dma_resv_lock(shmem->base.resv, NULL); 548 549 /* 550 * We should have already pinned the pages when the buffer was first 551 * mmap'd, vm_open() just grabs an additional reference for the new 552 * mm the vma is getting copied into (ie. on fork()). 553 */ 554 if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) 555 shmem->pages_use_count++; 556 557 dma_resv_unlock(shmem->base.resv); 558 559 drm_gem_vm_open(vma); 560 } 561 562 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) 563 { 564 struct drm_gem_object *obj = vma->vm_private_data; 565 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 566 567 dma_resv_lock(shmem->base.resv, NULL); 568 drm_gem_shmem_put_pages(shmem); 569 dma_resv_unlock(shmem->base.resv); 570 571 drm_gem_vm_close(vma); 572 } 573 574 const struct vm_operations_struct drm_gem_shmem_vm_ops = { 575 .fault = drm_gem_shmem_fault, 576 .open = drm_gem_shmem_vm_open, 577 .close = drm_gem_shmem_vm_close, 578 }; 579 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); 580 581 /** 582 * drm_gem_shmem_mmap - Memory-map a shmem GEM object 583 * @shmem: shmem GEM object 584 * @vma: VMA for the area to be mapped 585 * 586 * This function implements an augmented version of the GEM DRM file mmap 587 * operation for shmem objects. 588 * 589 * Returns: 590 * 0 on success or a negative error code on failure. 591 */ 592 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma) 593 { 594 struct drm_gem_object *obj = &shmem->base; 595 int ret; 596 597 if (obj->import_attach) { 598 /* Reset both vm_ops and vm_private_data, so we don't end up with 599 * vm_ops pointing to our implementation if the dma-buf backend 600 * doesn't set those fields. 601 */ 602 vma->vm_private_data = NULL; 603 vma->vm_ops = NULL; 604 605 ret = dma_buf_mmap(obj->dma_buf, vma, 0); 606 607 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 608 if (!ret) 609 drm_gem_object_put(obj); 610 611 return ret; 612 } 613 614 dma_resv_lock(shmem->base.resv, NULL); 615 ret = drm_gem_shmem_get_pages(shmem); 616 dma_resv_unlock(shmem->base.resv); 617 618 if (ret) 619 return ret; 620 621 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 622 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 623 if (shmem->map_wc) 624 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 625 626 return 0; 627 } 628 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); 629 630 /** 631 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs 632 * @shmem: shmem GEM object 633 * @p: DRM printer 634 * @indent: Tab indentation level 635 */ 636 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, 637 struct drm_printer *p, unsigned int indent) 638 { 639 if (shmem->base.import_attach) 640 return; 641 642 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); 643 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); 644 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); 645 } 646 EXPORT_SYMBOL(drm_gem_shmem_print_info); 647 648 /** 649 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned 650 * pages for a shmem GEM object 651 * @shmem: shmem GEM object 652 * 653 * This function exports a scatter/gather table suitable for PRIME usage by 654 * calling the standard DMA mapping API. 655 * 656 * Drivers who need to acquire an scatter/gather table for objects need to call 657 * drm_gem_shmem_get_pages_sgt() instead. 658 * 659 * Returns: 660 * A pointer to the scatter/gather table of pinned pages or error pointer on failure. 661 */ 662 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) 663 { 664 struct drm_gem_object *obj = &shmem->base; 665 666 drm_WARN_ON(obj->dev, obj->import_attach); 667 668 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); 669 } 670 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); 671 672 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem) 673 { 674 struct drm_gem_object *obj = &shmem->base; 675 int ret; 676 struct sg_table *sgt; 677 678 if (shmem->sgt) 679 return shmem->sgt; 680 681 drm_WARN_ON(obj->dev, obj->import_attach); 682 683 ret = drm_gem_shmem_get_pages(shmem); 684 if (ret) 685 return ERR_PTR(ret); 686 687 sgt = drm_gem_shmem_get_sg_table(shmem); 688 if (IS_ERR(sgt)) { 689 ret = PTR_ERR(sgt); 690 goto err_put_pages; 691 } 692 /* Map the pages for use by the h/w. */ 693 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); 694 if (ret) 695 goto err_free_sgt; 696 697 shmem->sgt = sgt; 698 699 return sgt; 700 701 err_free_sgt: 702 sg_free_table(sgt); 703 kfree(sgt); 704 err_put_pages: 705 drm_gem_shmem_put_pages(shmem); 706 return ERR_PTR(ret); 707 } 708 709 /** 710 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a 711 * scatter/gather table for a shmem GEM object. 712 * @shmem: shmem GEM object 713 * 714 * This function returns a scatter/gather table suitable for driver usage. If 715 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg 716 * table created. 717 * 718 * This is the main function for drivers to get at backing storage, and it hides 719 * and difference between dma-buf imported and natively allocated objects. 720 * drm_gem_shmem_get_sg_table() should not be directly called by drivers. 721 * 722 * Returns: 723 * A pointer to the scatter/gather table of pinned pages or errno on failure. 724 */ 725 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) 726 { 727 int ret; 728 struct sg_table *sgt; 729 730 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); 731 if (ret) 732 return ERR_PTR(ret); 733 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem); 734 dma_resv_unlock(shmem->base.resv); 735 736 return sgt; 737 } 738 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); 739 740 /** 741 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from 742 * another driver's scatter/gather table of pinned pages 743 * @dev: Device to import into 744 * @attach: DMA-BUF attachment 745 * @sgt: Scatter/gather table of pinned pages 746 * 747 * This function imports a scatter/gather table exported via DMA-BUF by 748 * another driver. Drivers that use the shmem helpers should set this as their 749 * &drm_driver.gem_prime_import_sg_table callback. 750 * 751 * Returns: 752 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 753 * error code on failure. 754 */ 755 struct drm_gem_object * 756 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, 757 struct dma_buf_attachment *attach, 758 struct sg_table *sgt) 759 { 760 size_t size = PAGE_ALIGN(attach->dmabuf->size); 761 struct drm_gem_shmem_object *shmem; 762 763 shmem = __drm_gem_shmem_create(dev, size, true); 764 if (IS_ERR(shmem)) 765 return ERR_CAST(shmem); 766 767 shmem->sgt = sgt; 768 769 drm_dbg_prime(dev, "size = %zu\n", size); 770 771 return &shmem->base; 772 } 773 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); 774 775 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers"); 776 MODULE_IMPORT_NS(DMA_BUF); 777 MODULE_LICENSE("GPL v2"); 778