1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/vmalloc.h> 9 #include <linux/spinlock.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/dma-buf.h> 12 #include <linux/pfn_t.h> 13 14 #include <drm/drm_prime.h> 15 16 #include "msm_drv.h" 17 #include "msm_fence.h" 18 #include "msm_gem.h" 19 #include "msm_gpu.h" 20 #include "msm_mmu.h" 21 22 static dma_addr_t physaddr(struct drm_gem_object *obj) 23 { 24 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_drm_private *priv = obj->dev->dev_private; 26 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 27 priv->vram.paddr; 28 } 29 30 static bool use_pages(struct drm_gem_object *obj) 31 { 32 struct msm_gem_object *msm_obj = to_msm_bo(obj); 33 return !msm_obj->vram_node; 34 } 35 36 /* 37 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 38 * API. Really GPU cache is out of scope here (handled on cmdstream) 39 * and all we need to do is invalidate newly allocated pages before 40 * mapping to CPU as uncached/writecombine. 41 * 42 * On top of this, we have the added headache, that depending on 43 * display generation, the display's iommu may be wired up to either 44 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 45 * that here we either have dma-direct or iommu ops. 46 * 47 * Let this be a cautionary tail of abstraction gone wrong. 48 */ 49 50 static void sync_for_device(struct msm_gem_object *msm_obj) 51 { 52 struct device *dev = msm_obj->base.dev->dev; 53 54 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 55 } 56 57 static void sync_for_cpu(struct msm_gem_object *msm_obj) 58 { 59 struct device *dev = msm_obj->base.dev->dev; 60 61 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 62 } 63 64 static void update_lru_active(struct drm_gem_object *obj) 65 { 66 struct msm_drm_private *priv = obj->dev->dev_private; 67 struct msm_gem_object *msm_obj = to_msm_bo(obj); 68 69 GEM_WARN_ON(!msm_obj->pages); 70 71 if (msm_obj->pin_count) { 72 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj); 73 } else if (msm_obj->madv == MSM_MADV_WILLNEED) { 74 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj); 75 } else { 76 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); 77 78 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj); 79 } 80 } 81 82 static void update_lru_locked(struct drm_gem_object *obj) 83 { 84 struct msm_drm_private *priv = obj->dev->dev_private; 85 struct msm_gem_object *msm_obj = to_msm_bo(obj); 86 87 msm_gem_assert_locked(&msm_obj->base); 88 89 if (!msm_obj->pages) { 90 GEM_WARN_ON(msm_obj->pin_count); 91 92 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj); 93 } else { 94 update_lru_active(obj); 95 } 96 } 97 98 static void update_lru(struct drm_gem_object *obj) 99 { 100 struct msm_drm_private *priv = obj->dev->dev_private; 101 102 mutex_lock(&priv->lru.lock); 103 update_lru_locked(obj); 104 mutex_unlock(&priv->lru.lock); 105 } 106 107 /* allocate pages from VRAM carveout, used when no IOMMU: */ 108 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 109 { 110 struct msm_gem_object *msm_obj = to_msm_bo(obj); 111 struct msm_drm_private *priv = obj->dev->dev_private; 112 dma_addr_t paddr; 113 struct page **p; 114 int ret, i; 115 116 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 117 if (!p) 118 return ERR_PTR(-ENOMEM); 119 120 spin_lock(&priv->vram.lock); 121 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 122 spin_unlock(&priv->vram.lock); 123 if (ret) { 124 kvfree(p); 125 return ERR_PTR(ret); 126 } 127 128 paddr = physaddr(obj); 129 for (i = 0; i < npages; i++) { 130 p[i] = pfn_to_page(__phys_to_pfn(paddr)); 131 paddr += PAGE_SIZE; 132 } 133 134 return p; 135 } 136 137 static struct page **get_pages(struct drm_gem_object *obj) 138 { 139 struct msm_gem_object *msm_obj = to_msm_bo(obj); 140 141 msm_gem_assert_locked(obj); 142 143 if (!msm_obj->pages) { 144 struct drm_device *dev = obj->dev; 145 struct page **p; 146 int npages = obj->size >> PAGE_SHIFT; 147 148 if (use_pages(obj)) 149 p = drm_gem_get_pages(obj); 150 else 151 p = get_pages_vram(obj, npages); 152 153 if (IS_ERR(p)) { 154 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 155 PTR_ERR(p)); 156 return p; 157 } 158 159 msm_obj->pages = p; 160 161 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 162 if (IS_ERR(msm_obj->sgt)) { 163 void *ptr = ERR_CAST(msm_obj->sgt); 164 165 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 166 msm_obj->sgt = NULL; 167 return ptr; 168 } 169 170 /* For non-cached buffers, ensure the new pages are clean 171 * because display controller, GPU, etc. are not coherent: 172 */ 173 if (msm_obj->flags & MSM_BO_WC) 174 sync_for_device(msm_obj); 175 176 update_lru(obj); 177 } 178 179 return msm_obj->pages; 180 } 181 182 static void put_pages_vram(struct drm_gem_object *obj) 183 { 184 struct msm_gem_object *msm_obj = to_msm_bo(obj); 185 struct msm_drm_private *priv = obj->dev->dev_private; 186 187 spin_lock(&priv->vram.lock); 188 drm_mm_remove_node(msm_obj->vram_node); 189 spin_unlock(&priv->vram.lock); 190 191 kvfree(msm_obj->pages); 192 } 193 194 static void put_pages(struct drm_gem_object *obj) 195 { 196 struct msm_gem_object *msm_obj = to_msm_bo(obj); 197 198 if (msm_obj->pages) { 199 if (msm_obj->sgt) { 200 /* For non-cached buffers, ensure the new 201 * pages are clean because display controller, 202 * GPU, etc. are not coherent: 203 */ 204 if (msm_obj->flags & MSM_BO_WC) 205 sync_for_cpu(msm_obj); 206 207 sg_free_table(msm_obj->sgt); 208 kfree(msm_obj->sgt); 209 msm_obj->sgt = NULL; 210 } 211 212 if (use_pages(obj)) 213 drm_gem_put_pages(obj, msm_obj->pages, true, false); 214 else 215 put_pages_vram(obj); 216 217 msm_obj->pages = NULL; 218 update_lru(obj); 219 } 220 } 221 222 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj, 223 unsigned madv) 224 { 225 struct msm_gem_object *msm_obj = to_msm_bo(obj); 226 227 msm_gem_assert_locked(obj); 228 229 if (GEM_WARN_ON(msm_obj->madv > madv)) { 230 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 231 msm_obj->madv, madv); 232 return ERR_PTR(-EBUSY); 233 } 234 235 return get_pages(obj); 236 } 237 238 /* 239 * Update the pin count of the object, call under lru.lock 240 */ 241 void msm_gem_pin_obj_locked(struct drm_gem_object *obj) 242 { 243 struct msm_drm_private *priv = obj->dev->dev_private; 244 245 msm_gem_assert_locked(obj); 246 247 to_msm_bo(obj)->pin_count++; 248 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj); 249 } 250 251 static void pin_obj_locked(struct drm_gem_object *obj) 252 { 253 struct msm_drm_private *priv = obj->dev->dev_private; 254 255 mutex_lock(&priv->lru.lock); 256 msm_gem_pin_obj_locked(obj); 257 mutex_unlock(&priv->lru.lock); 258 } 259 260 struct page **msm_gem_pin_pages(struct drm_gem_object *obj) 261 { 262 struct page **p; 263 264 msm_gem_lock(obj); 265 p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); 266 if (!IS_ERR(p)) 267 pin_obj_locked(obj); 268 msm_gem_unlock(obj); 269 270 return p; 271 } 272 273 void msm_gem_unpin_pages(struct drm_gem_object *obj) 274 { 275 msm_gem_lock(obj); 276 msm_gem_unpin_locked(obj); 277 msm_gem_unlock(obj); 278 } 279 280 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) 281 { 282 if (msm_obj->flags & MSM_BO_WC) 283 return pgprot_writecombine(prot); 284 return prot; 285 } 286 287 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 288 { 289 struct vm_area_struct *vma = vmf->vma; 290 struct drm_gem_object *obj = vma->vm_private_data; 291 struct msm_gem_object *msm_obj = to_msm_bo(obj); 292 struct page **pages; 293 unsigned long pfn; 294 pgoff_t pgoff; 295 int err; 296 vm_fault_t ret; 297 298 /* 299 * vm_ops.open/drm_gem_mmap_obj and close get and put 300 * a reference on obj. So, we dont need to hold one here. 301 */ 302 err = msm_gem_lock_interruptible(obj); 303 if (err) { 304 ret = VM_FAULT_NOPAGE; 305 goto out; 306 } 307 308 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 309 msm_gem_unlock(obj); 310 return VM_FAULT_SIGBUS; 311 } 312 313 /* make sure we have pages attached now */ 314 pages = get_pages(obj); 315 if (IS_ERR(pages)) { 316 ret = vmf_error(PTR_ERR(pages)); 317 goto out_unlock; 318 } 319 320 /* We don't use vmf->pgoff since that has the fake offset: */ 321 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 322 323 pfn = page_to_pfn(pages[pgoff]); 324 325 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 326 pfn, pfn << PAGE_SHIFT); 327 328 ret = vmf_insert_pfn(vma, vmf->address, pfn); 329 330 out_unlock: 331 msm_gem_unlock(obj); 332 out: 333 return ret; 334 } 335 336 /** get mmap offset */ 337 static uint64_t mmap_offset(struct drm_gem_object *obj) 338 { 339 struct drm_device *dev = obj->dev; 340 int ret; 341 342 msm_gem_assert_locked(obj); 343 344 /* Make it mmapable */ 345 ret = drm_gem_create_mmap_offset(obj); 346 347 if (ret) { 348 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 349 return 0; 350 } 351 352 return drm_vma_node_offset_addr(&obj->vma_node); 353 } 354 355 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 356 { 357 uint64_t offset; 358 359 msm_gem_lock(obj); 360 offset = mmap_offset(obj); 361 msm_gem_unlock(obj); 362 return offset; 363 } 364 365 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 366 struct msm_gem_address_space *aspace) 367 { 368 struct msm_gem_object *msm_obj = to_msm_bo(obj); 369 struct msm_gem_vma *vma; 370 371 msm_gem_assert_locked(obj); 372 373 vma = msm_gem_vma_new(aspace); 374 if (!vma) 375 return ERR_PTR(-ENOMEM); 376 377 list_add_tail(&vma->list, &msm_obj->vmas); 378 379 return vma; 380 } 381 382 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 383 struct msm_gem_address_space *aspace) 384 { 385 struct msm_gem_object *msm_obj = to_msm_bo(obj); 386 struct msm_gem_vma *vma; 387 388 msm_gem_assert_locked(obj); 389 390 list_for_each_entry(vma, &msm_obj->vmas, list) { 391 if (vma->aspace == aspace) 392 return vma; 393 } 394 395 return NULL; 396 } 397 398 static void del_vma(struct msm_gem_vma *vma) 399 { 400 if (!vma) 401 return; 402 403 list_del(&vma->list); 404 kfree(vma); 405 } 406 407 /* 408 * If close is true, this also closes the VMA (releasing the allocated 409 * iova range) in addition to removing the iommu mapping. In the eviction 410 * case (!close), we keep the iova allocated, but only remove the iommu 411 * mapping. 412 */ 413 static void 414 put_iova_spaces(struct drm_gem_object *obj, bool close) 415 { 416 struct msm_gem_object *msm_obj = to_msm_bo(obj); 417 struct msm_gem_vma *vma; 418 419 msm_gem_assert_locked(obj); 420 421 list_for_each_entry(vma, &msm_obj->vmas, list) { 422 if (vma->aspace) { 423 msm_gem_vma_purge(vma); 424 if (close) 425 msm_gem_vma_close(vma); 426 } 427 } 428 } 429 430 /* Called with msm_obj locked */ 431 static void 432 put_iova_vmas(struct drm_gem_object *obj) 433 { 434 struct msm_gem_object *msm_obj = to_msm_bo(obj); 435 struct msm_gem_vma *vma, *tmp; 436 437 msm_gem_assert_locked(obj); 438 439 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 440 del_vma(vma); 441 } 442 } 443 444 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, 445 struct msm_gem_address_space *aspace, 446 u64 range_start, u64 range_end) 447 { 448 struct msm_gem_vma *vma; 449 450 msm_gem_assert_locked(obj); 451 452 vma = lookup_vma(obj, aspace); 453 454 if (!vma) { 455 int ret; 456 457 vma = add_vma(obj, aspace); 458 if (IS_ERR(vma)) 459 return vma; 460 461 ret = msm_gem_vma_init(vma, obj->size, 462 range_start, range_end); 463 if (ret) { 464 del_vma(vma); 465 return ERR_PTR(ret); 466 } 467 } else { 468 GEM_WARN_ON(vma->iova < range_start); 469 GEM_WARN_ON((vma->iova + obj->size) > range_end); 470 } 471 472 return vma; 473 } 474 475 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) 476 { 477 struct msm_gem_object *msm_obj = to_msm_bo(obj); 478 struct page **pages; 479 int prot = IOMMU_READ; 480 481 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 482 prot |= IOMMU_WRITE; 483 484 if (msm_obj->flags & MSM_BO_MAP_PRIV) 485 prot |= IOMMU_PRIV; 486 487 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 488 prot |= IOMMU_CACHE; 489 490 msm_gem_assert_locked(obj); 491 492 pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); 493 if (IS_ERR(pages)) 494 return PTR_ERR(pages); 495 496 return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size); 497 } 498 499 void msm_gem_unpin_locked(struct drm_gem_object *obj) 500 { 501 struct msm_drm_private *priv = obj->dev->dev_private; 502 struct msm_gem_object *msm_obj = to_msm_bo(obj); 503 504 msm_gem_assert_locked(obj); 505 506 mutex_lock(&priv->lru.lock); 507 msm_obj->pin_count--; 508 GEM_WARN_ON(msm_obj->pin_count < 0); 509 update_lru_locked(obj); 510 mutex_unlock(&priv->lru.lock); 511 } 512 513 /* Special unpin path for use in fence-signaling path, avoiding the need 514 * to hold the obj lock by only depending on things that a protected by 515 * the LRU lock. In particular we know that that we already have backing 516 * and and that the object's dma_resv has the fence for the current 517 * submit/job which will prevent us racing against page eviction. 518 */ 519 void msm_gem_unpin_active(struct drm_gem_object *obj) 520 { 521 struct msm_gem_object *msm_obj = to_msm_bo(obj); 522 523 msm_obj->pin_count--; 524 GEM_WARN_ON(msm_obj->pin_count < 0); 525 update_lru_active(obj); 526 } 527 528 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, 529 struct msm_gem_address_space *aspace) 530 { 531 return get_vma_locked(obj, aspace, 0, U64_MAX); 532 } 533 534 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 535 struct msm_gem_address_space *aspace, uint64_t *iova, 536 u64 range_start, u64 range_end) 537 { 538 struct msm_gem_vma *vma; 539 int ret; 540 541 msm_gem_assert_locked(obj); 542 543 vma = get_vma_locked(obj, aspace, range_start, range_end); 544 if (IS_ERR(vma)) 545 return PTR_ERR(vma); 546 547 ret = msm_gem_pin_vma_locked(obj, vma); 548 if (!ret) { 549 *iova = vma->iova; 550 pin_obj_locked(obj); 551 } 552 553 return ret; 554 } 555 556 /* 557 * get iova and pin it. Should have a matching put 558 * limits iova to specified range (in pages) 559 */ 560 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 561 struct msm_gem_address_space *aspace, uint64_t *iova, 562 u64 range_start, u64 range_end) 563 { 564 int ret; 565 566 msm_gem_lock(obj); 567 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 568 msm_gem_unlock(obj); 569 570 return ret; 571 } 572 573 /* get iova and pin it. Should have a matching put */ 574 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 575 struct msm_gem_address_space *aspace, uint64_t *iova) 576 { 577 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 578 } 579 580 /* 581 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 582 * valid for the life of the object 583 */ 584 int msm_gem_get_iova(struct drm_gem_object *obj, 585 struct msm_gem_address_space *aspace, uint64_t *iova) 586 { 587 struct msm_gem_vma *vma; 588 int ret = 0; 589 590 msm_gem_lock(obj); 591 vma = get_vma_locked(obj, aspace, 0, U64_MAX); 592 if (IS_ERR(vma)) { 593 ret = PTR_ERR(vma); 594 } else { 595 *iova = vma->iova; 596 } 597 msm_gem_unlock(obj); 598 599 return ret; 600 } 601 602 static int clear_iova(struct drm_gem_object *obj, 603 struct msm_gem_address_space *aspace) 604 { 605 struct msm_gem_vma *vma = lookup_vma(obj, aspace); 606 607 if (!vma) 608 return 0; 609 610 msm_gem_vma_purge(vma); 611 msm_gem_vma_close(vma); 612 del_vma(vma); 613 614 return 0; 615 } 616 617 /* 618 * Get the requested iova but don't pin it. Fails if the requested iova is 619 * not available. Doesn't need a put because iovas are currently valid for 620 * the life of the object. 621 * 622 * Setting an iova of zero will clear the vma. 623 */ 624 int msm_gem_set_iova(struct drm_gem_object *obj, 625 struct msm_gem_address_space *aspace, uint64_t iova) 626 { 627 int ret = 0; 628 629 msm_gem_lock(obj); 630 if (!iova) { 631 ret = clear_iova(obj, aspace); 632 } else { 633 struct msm_gem_vma *vma; 634 vma = get_vma_locked(obj, aspace, iova, iova + obj->size); 635 if (IS_ERR(vma)) { 636 ret = PTR_ERR(vma); 637 } else if (GEM_WARN_ON(vma->iova != iova)) { 638 clear_iova(obj, aspace); 639 ret = -EBUSY; 640 } 641 } 642 msm_gem_unlock(obj); 643 644 return ret; 645 } 646 647 /* 648 * Unpin a iova by updating the reference counts. The memory isn't actually 649 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 650 * to get rid of it 651 */ 652 void msm_gem_unpin_iova(struct drm_gem_object *obj, 653 struct msm_gem_address_space *aspace) 654 { 655 struct msm_gem_vma *vma; 656 657 msm_gem_lock(obj); 658 vma = lookup_vma(obj, aspace); 659 if (!GEM_WARN_ON(!vma)) { 660 msm_gem_unpin_locked(obj); 661 } 662 msm_gem_unlock(obj); 663 } 664 665 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 666 struct drm_mode_create_dumb *args) 667 { 668 args->pitch = align_pitch(args->width, args->bpp); 669 args->size = PAGE_ALIGN(args->pitch * args->height); 670 return msm_gem_new_handle(dev, file, args->size, 671 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 672 } 673 674 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 675 uint32_t handle, uint64_t *offset) 676 { 677 struct drm_gem_object *obj; 678 int ret = 0; 679 680 /* GEM does all our handle to object mapping */ 681 obj = drm_gem_object_lookup(file, handle); 682 if (obj == NULL) { 683 ret = -ENOENT; 684 goto fail; 685 } 686 687 *offset = msm_gem_mmap_offset(obj); 688 689 drm_gem_object_put(obj); 690 691 fail: 692 return ret; 693 } 694 695 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 696 { 697 struct msm_gem_object *msm_obj = to_msm_bo(obj); 698 struct page **pages; 699 int ret = 0; 700 701 msm_gem_assert_locked(obj); 702 703 if (obj->import_attach) 704 return ERR_PTR(-ENODEV); 705 706 pages = msm_gem_pin_pages_locked(obj, madv); 707 if (IS_ERR(pages)) 708 return ERR_CAST(pages); 709 710 pin_obj_locked(obj); 711 712 /* increment vmap_count *before* vmap() call, so shrinker can 713 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 714 * This guarantees that we won't try to msm_gem_vunmap() this 715 * same object from within the vmap() call (while we already 716 * hold msm_obj lock) 717 */ 718 msm_obj->vmap_count++; 719 720 if (!msm_obj->vaddr) { 721 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 722 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); 723 if (msm_obj->vaddr == NULL) { 724 ret = -ENOMEM; 725 goto fail; 726 } 727 } 728 729 return msm_obj->vaddr; 730 731 fail: 732 msm_obj->vmap_count--; 733 msm_gem_unpin_locked(obj); 734 return ERR_PTR(ret); 735 } 736 737 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 738 { 739 return get_vaddr(obj, MSM_MADV_WILLNEED); 740 } 741 742 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 743 { 744 void *ret; 745 746 msm_gem_lock(obj); 747 ret = msm_gem_get_vaddr_locked(obj); 748 msm_gem_unlock(obj); 749 750 return ret; 751 } 752 753 /* 754 * Don't use this! It is for the very special case of dumping 755 * submits from GPU hangs or faults, were the bo may already 756 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 757 * active list. 758 */ 759 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 760 { 761 return get_vaddr(obj, __MSM_MADV_PURGED); 762 } 763 764 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 765 { 766 struct msm_gem_object *msm_obj = to_msm_bo(obj); 767 768 msm_gem_assert_locked(obj); 769 GEM_WARN_ON(msm_obj->vmap_count < 1); 770 771 msm_obj->vmap_count--; 772 msm_gem_unpin_locked(obj); 773 } 774 775 void msm_gem_put_vaddr(struct drm_gem_object *obj) 776 { 777 msm_gem_lock(obj); 778 msm_gem_put_vaddr_locked(obj); 779 msm_gem_unlock(obj); 780 } 781 782 /* Update madvise status, returns true if not purged, else 783 * false or -errno. 784 */ 785 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 786 { 787 struct msm_drm_private *priv = obj->dev->dev_private; 788 struct msm_gem_object *msm_obj = to_msm_bo(obj); 789 790 msm_gem_lock(obj); 791 792 mutex_lock(&priv->lru.lock); 793 794 if (msm_obj->madv != __MSM_MADV_PURGED) 795 msm_obj->madv = madv; 796 797 madv = msm_obj->madv; 798 799 /* If the obj is inactive, we might need to move it 800 * between inactive lists 801 */ 802 update_lru_locked(obj); 803 804 mutex_unlock(&priv->lru.lock); 805 806 msm_gem_unlock(obj); 807 808 return (madv != __MSM_MADV_PURGED); 809 } 810 811 void msm_gem_purge(struct drm_gem_object *obj) 812 { 813 struct drm_device *dev = obj->dev; 814 struct msm_drm_private *priv = obj->dev->dev_private; 815 struct msm_gem_object *msm_obj = to_msm_bo(obj); 816 817 msm_gem_assert_locked(obj); 818 GEM_WARN_ON(!is_purgeable(msm_obj)); 819 820 /* Get rid of any iommu mapping(s): */ 821 put_iova_spaces(obj, true); 822 823 msm_gem_vunmap(obj); 824 825 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 826 827 put_pages(obj); 828 829 put_iova_vmas(obj); 830 831 mutex_lock(&priv->lru.lock); 832 /* A one-way transition: */ 833 msm_obj->madv = __MSM_MADV_PURGED; 834 mutex_unlock(&priv->lru.lock); 835 836 drm_gem_free_mmap_offset(obj); 837 838 /* Our goal here is to return as much of the memory as 839 * is possible back to the system as we are called from OOM. 840 * To do this we must instruct the shmfs to drop all of its 841 * backing pages, *now*. 842 */ 843 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 844 845 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 846 0, (loff_t)-1); 847 } 848 849 /* 850 * Unpin the backing pages and make them available to be swapped out. 851 */ 852 void msm_gem_evict(struct drm_gem_object *obj) 853 { 854 struct drm_device *dev = obj->dev; 855 struct msm_gem_object *msm_obj = to_msm_bo(obj); 856 857 msm_gem_assert_locked(obj); 858 GEM_WARN_ON(is_unevictable(msm_obj)); 859 860 /* Get rid of any iommu mapping(s): */ 861 put_iova_spaces(obj, false); 862 863 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 864 865 put_pages(obj); 866 } 867 868 void msm_gem_vunmap(struct drm_gem_object *obj) 869 { 870 struct msm_gem_object *msm_obj = to_msm_bo(obj); 871 872 msm_gem_assert_locked(obj); 873 874 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 875 return; 876 877 vunmap(msm_obj->vaddr); 878 msm_obj->vaddr = NULL; 879 } 880 881 bool msm_gem_active(struct drm_gem_object *obj) 882 { 883 msm_gem_assert_locked(obj); 884 885 if (to_msm_bo(obj)->pin_count) 886 return true; 887 888 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true)); 889 } 890 891 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 892 { 893 bool write = !!(op & MSM_PREP_WRITE); 894 unsigned long remain = 895 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 896 long ret; 897 898 if (op & MSM_PREP_BOOST) { 899 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write), 900 ktime_get()); 901 } 902 903 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), 904 true, remain); 905 if (ret == 0) 906 return remain == 0 ? -EBUSY : -ETIMEDOUT; 907 else if (ret < 0) 908 return ret; 909 910 /* TODO cache maintenance */ 911 912 return 0; 913 } 914 915 int msm_gem_cpu_fini(struct drm_gem_object *obj) 916 { 917 /* TODO cache maintenance */ 918 return 0; 919 } 920 921 #ifdef CONFIG_DEBUG_FS 922 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 923 struct msm_gem_stats *stats) 924 { 925 struct msm_gem_object *msm_obj = to_msm_bo(obj); 926 struct dma_resv *robj = obj->resv; 927 struct msm_gem_vma *vma; 928 uint64_t off = drm_vma_node_start(&obj->vma_node); 929 const char *madv; 930 931 msm_gem_lock(obj); 932 933 stats->all.count++; 934 stats->all.size += obj->size; 935 936 if (msm_gem_active(obj)) { 937 stats->active.count++; 938 stats->active.size += obj->size; 939 } 940 941 if (msm_obj->pages) { 942 stats->resident.count++; 943 stats->resident.size += obj->size; 944 } 945 946 switch (msm_obj->madv) { 947 case __MSM_MADV_PURGED: 948 stats->purged.count++; 949 stats->purged.size += obj->size; 950 madv = " purged"; 951 break; 952 case MSM_MADV_DONTNEED: 953 stats->purgeable.count++; 954 stats->purgeable.size += obj->size; 955 madv = " purgeable"; 956 break; 957 case MSM_MADV_WILLNEED: 958 default: 959 madv = ""; 960 break; 961 } 962 963 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 964 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I', 965 obj->name, kref_read(&obj->refcount), 966 off, msm_obj->vaddr); 967 968 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 969 970 if (!list_empty(&msm_obj->vmas)) { 971 972 seq_puts(m, " vmas:"); 973 974 list_for_each_entry(vma, &msm_obj->vmas, list) { 975 const char *name, *comm; 976 if (vma->aspace) { 977 struct msm_gem_address_space *aspace = vma->aspace; 978 struct task_struct *task = 979 get_pid_task(aspace->pid, PIDTYPE_PID); 980 if (task) { 981 comm = kstrdup(task->comm, GFP_KERNEL); 982 put_task_struct(task); 983 } else { 984 comm = NULL; 985 } 986 name = aspace->name; 987 } else { 988 name = comm = NULL; 989 } 990 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]", 991 name, comm ? ":" : "", comm ? comm : "", 992 vma->aspace, vma->iova, 993 vma->mapped ? "mapped" : "unmapped"); 994 kfree(comm); 995 } 996 997 seq_puts(m, "\n"); 998 } 999 1000 dma_resv_describe(robj, m); 1001 msm_gem_unlock(obj); 1002 } 1003 1004 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 1005 { 1006 struct msm_gem_stats stats = {}; 1007 struct msm_gem_object *msm_obj; 1008 1009 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 1010 list_for_each_entry(msm_obj, list, node) { 1011 struct drm_gem_object *obj = &msm_obj->base; 1012 seq_puts(m, " "); 1013 msm_gem_describe(obj, m, &stats); 1014 } 1015 1016 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 1017 stats.all.count, stats.all.size); 1018 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 1019 stats.active.count, stats.active.size); 1020 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 1021 stats.resident.count, stats.resident.size); 1022 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 1023 stats.purgeable.count, stats.purgeable.size); 1024 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 1025 stats.purged.count, stats.purged.size); 1026 } 1027 #endif 1028 1029 /* don't call directly! Use drm_gem_object_put() */ 1030 static void msm_gem_free_object(struct drm_gem_object *obj) 1031 { 1032 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1033 struct drm_device *dev = obj->dev; 1034 struct msm_drm_private *priv = dev->dev_private; 1035 1036 mutex_lock(&priv->obj_lock); 1037 list_del(&msm_obj->node); 1038 mutex_unlock(&priv->obj_lock); 1039 1040 put_iova_spaces(obj, true); 1041 1042 if (obj->import_attach) { 1043 GEM_WARN_ON(msm_obj->vaddr); 1044 1045 /* Don't drop the pages for imported dmabuf, as they are not 1046 * ours, just free the array we allocated: 1047 */ 1048 kvfree(msm_obj->pages); 1049 1050 put_iova_vmas(obj); 1051 1052 drm_prime_gem_destroy(obj, msm_obj->sgt); 1053 } else { 1054 msm_gem_vunmap(obj); 1055 put_pages(obj); 1056 put_iova_vmas(obj); 1057 } 1058 1059 drm_gem_object_release(obj); 1060 1061 kfree(msm_obj); 1062 } 1063 1064 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 1065 { 1066 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1067 1068 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1069 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 1070 1071 return 0; 1072 } 1073 1074 /* convenience method to construct a GEM buffer object, and userspace handle */ 1075 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1076 uint32_t size, uint32_t flags, uint32_t *handle, 1077 char *name) 1078 { 1079 struct drm_gem_object *obj; 1080 int ret; 1081 1082 obj = msm_gem_new(dev, size, flags); 1083 1084 if (IS_ERR(obj)) 1085 return PTR_ERR(obj); 1086 1087 if (name) 1088 msm_gem_object_set_name(obj, "%s", name); 1089 1090 ret = drm_gem_handle_create(file, obj, handle); 1091 1092 /* drop reference from allocate - handle holds it now */ 1093 drm_gem_object_put(obj); 1094 1095 return ret; 1096 } 1097 1098 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj) 1099 { 1100 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1101 enum drm_gem_object_status status = 0; 1102 1103 if (msm_obj->pages) 1104 status |= DRM_GEM_OBJECT_RESIDENT; 1105 1106 if (msm_obj->madv == MSM_MADV_DONTNEED) 1107 status |= DRM_GEM_OBJECT_PURGEABLE; 1108 1109 return status; 1110 } 1111 1112 static const struct vm_operations_struct vm_ops = { 1113 .fault = msm_gem_fault, 1114 .open = drm_gem_vm_open, 1115 .close = drm_gem_vm_close, 1116 }; 1117 1118 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1119 .free = msm_gem_free_object, 1120 .pin = msm_gem_prime_pin, 1121 .unpin = msm_gem_prime_unpin, 1122 .get_sg_table = msm_gem_prime_get_sg_table, 1123 .vmap = msm_gem_prime_vmap, 1124 .vunmap = msm_gem_prime_vunmap, 1125 .mmap = msm_gem_object_mmap, 1126 .status = msm_gem_status, 1127 .vm_ops = &vm_ops, 1128 }; 1129 1130 static int msm_gem_new_impl(struct drm_device *dev, 1131 uint32_t size, uint32_t flags, 1132 struct drm_gem_object **obj) 1133 { 1134 struct msm_drm_private *priv = dev->dev_private; 1135 struct msm_gem_object *msm_obj; 1136 1137 switch (flags & MSM_BO_CACHE_MASK) { 1138 case MSM_BO_CACHED: 1139 case MSM_BO_WC: 1140 break; 1141 case MSM_BO_CACHED_COHERENT: 1142 if (priv->has_cached_coherent) 1143 break; 1144 fallthrough; 1145 default: 1146 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", 1147 (flags & MSM_BO_CACHE_MASK)); 1148 return -EINVAL; 1149 } 1150 1151 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1152 if (!msm_obj) 1153 return -ENOMEM; 1154 1155 msm_obj->flags = flags; 1156 msm_obj->madv = MSM_MADV_WILLNEED; 1157 1158 INIT_LIST_HEAD(&msm_obj->node); 1159 INIT_LIST_HEAD(&msm_obj->vmas); 1160 1161 *obj = &msm_obj->base; 1162 (*obj)->funcs = &msm_gem_object_funcs; 1163 1164 return 0; 1165 } 1166 1167 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) 1168 { 1169 struct msm_drm_private *priv = dev->dev_private; 1170 struct msm_gem_object *msm_obj; 1171 struct drm_gem_object *obj = NULL; 1172 bool use_vram = false; 1173 int ret; 1174 1175 size = PAGE_ALIGN(size); 1176 1177 if (!msm_use_mmu(dev)) 1178 use_vram = true; 1179 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1180 use_vram = true; 1181 1182 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1183 return ERR_PTR(-EINVAL); 1184 1185 /* Disallow zero sized objects as they make the underlying 1186 * infrastructure grumpy 1187 */ 1188 if (size == 0) 1189 return ERR_PTR(-EINVAL); 1190 1191 ret = msm_gem_new_impl(dev, size, flags, &obj); 1192 if (ret) 1193 return ERR_PTR(ret); 1194 1195 msm_obj = to_msm_bo(obj); 1196 1197 if (use_vram) { 1198 struct msm_gem_vma *vma; 1199 struct page **pages; 1200 1201 drm_gem_private_object_init(dev, obj, size); 1202 1203 msm_gem_lock(obj); 1204 1205 vma = add_vma(obj, NULL); 1206 msm_gem_unlock(obj); 1207 if (IS_ERR(vma)) { 1208 ret = PTR_ERR(vma); 1209 goto fail; 1210 } 1211 1212 to_msm_bo(obj)->vram_node = &vma->node; 1213 1214 msm_gem_lock(obj); 1215 pages = get_pages(obj); 1216 msm_gem_unlock(obj); 1217 if (IS_ERR(pages)) { 1218 ret = PTR_ERR(pages); 1219 goto fail; 1220 } 1221 1222 vma->iova = physaddr(obj); 1223 } else { 1224 ret = drm_gem_object_init(dev, obj, size); 1225 if (ret) 1226 goto fail; 1227 /* 1228 * Our buffers are kept pinned, so allocating them from the 1229 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1230 * See comments above new_inode() why this is required _and_ 1231 * expected if you're going to pin these pages. 1232 */ 1233 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1234 } 1235 1236 drm_gem_lru_move_tail(&priv->lru.unbacked, obj); 1237 1238 mutex_lock(&priv->obj_lock); 1239 list_add_tail(&msm_obj->node, &priv->objects); 1240 mutex_unlock(&priv->obj_lock); 1241 1242 ret = drm_gem_create_mmap_offset(obj); 1243 if (ret) 1244 goto fail; 1245 1246 return obj; 1247 1248 fail: 1249 drm_gem_object_put(obj); 1250 return ERR_PTR(ret); 1251 } 1252 1253 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1254 struct dma_buf *dmabuf, struct sg_table *sgt) 1255 { 1256 struct msm_drm_private *priv = dev->dev_private; 1257 struct msm_gem_object *msm_obj; 1258 struct drm_gem_object *obj; 1259 uint32_t size; 1260 int ret, npages; 1261 1262 /* if we don't have IOMMU, don't bother pretending we can import: */ 1263 if (!msm_use_mmu(dev)) { 1264 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1265 return ERR_PTR(-EINVAL); 1266 } 1267 1268 size = PAGE_ALIGN(dmabuf->size); 1269 1270 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1271 if (ret) 1272 return ERR_PTR(ret); 1273 1274 drm_gem_private_object_init(dev, obj, size); 1275 1276 npages = size / PAGE_SIZE; 1277 1278 msm_obj = to_msm_bo(obj); 1279 msm_gem_lock(obj); 1280 msm_obj->sgt = sgt; 1281 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1282 if (!msm_obj->pages) { 1283 msm_gem_unlock(obj); 1284 ret = -ENOMEM; 1285 goto fail; 1286 } 1287 1288 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1289 if (ret) { 1290 msm_gem_unlock(obj); 1291 goto fail; 1292 } 1293 1294 msm_gem_unlock(obj); 1295 1296 drm_gem_lru_move_tail(&priv->lru.pinned, obj); 1297 1298 mutex_lock(&priv->obj_lock); 1299 list_add_tail(&msm_obj->node, &priv->objects); 1300 mutex_unlock(&priv->obj_lock); 1301 1302 ret = drm_gem_create_mmap_offset(obj); 1303 if (ret) 1304 goto fail; 1305 1306 return obj; 1307 1308 fail: 1309 drm_gem_object_put(obj); 1310 return ERR_PTR(ret); 1311 } 1312 1313 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1314 uint32_t flags, struct msm_gem_address_space *aspace, 1315 struct drm_gem_object **bo, uint64_t *iova) 1316 { 1317 void *vaddr; 1318 struct drm_gem_object *obj = msm_gem_new(dev, size, flags); 1319 int ret; 1320 1321 if (IS_ERR(obj)) 1322 return ERR_CAST(obj); 1323 1324 if (iova) { 1325 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1326 if (ret) 1327 goto err; 1328 } 1329 1330 vaddr = msm_gem_get_vaddr(obj); 1331 if (IS_ERR(vaddr)) { 1332 msm_gem_unpin_iova(obj, aspace); 1333 ret = PTR_ERR(vaddr); 1334 goto err; 1335 } 1336 1337 if (bo) 1338 *bo = obj; 1339 1340 return vaddr; 1341 err: 1342 drm_gem_object_put(obj); 1343 1344 return ERR_PTR(ret); 1345 1346 } 1347 1348 void msm_gem_kernel_put(struct drm_gem_object *bo, 1349 struct msm_gem_address_space *aspace) 1350 { 1351 if (IS_ERR_OR_NULL(bo)) 1352 return; 1353 1354 msm_gem_put_vaddr(bo); 1355 msm_gem_unpin_iova(bo, aspace); 1356 drm_gem_object_put(bo); 1357 } 1358 1359 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1360 { 1361 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1362 va_list ap; 1363 1364 if (!fmt) 1365 return; 1366 1367 va_start(ap, fmt); 1368 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1369 va_end(ap); 1370 } 1371