1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/spinlock.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/dma-buf.h> 11 #include <linux/pfn_t.h> 12 13 #include <drm/drm_prime.h> 14 15 #include "msm_drv.h" 16 #include "msm_fence.h" 17 #include "msm_gem.h" 18 #include "msm_gpu.h" 19 #include "msm_mmu.h" 20 21 static void update_inactive(struct msm_gem_object *msm_obj); 22 23 static dma_addr_t physaddr(struct drm_gem_object *obj) 24 { 25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 26 struct msm_drm_private *priv = obj->dev->dev_private; 27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 28 priv->vram.paddr; 29 } 30 31 static bool use_pages(struct drm_gem_object *obj) 32 { 33 struct msm_gem_object *msm_obj = to_msm_bo(obj); 34 return !msm_obj->vram_node; 35 } 36 37 /* 38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 39 * API. Really GPU cache is out of scope here (handled on cmdstream) 40 * and all we need to do is invalidate newly allocated pages before 41 * mapping to CPU as uncached/writecombine. 42 * 43 * On top of this, we have the added headache, that depending on 44 * display generation, the display's iommu may be wired up to either 45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 46 * that here we either have dma-direct or iommu ops. 47 * 48 * Let this be a cautionary tail of abstraction gone wrong. 49 */ 50 51 static void sync_for_device(struct msm_gem_object *msm_obj) 52 { 53 struct device *dev = msm_obj->base.dev->dev; 54 55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 56 } 57 58 static void sync_for_cpu(struct msm_gem_object *msm_obj) 59 { 60 struct device *dev = msm_obj->base.dev->dev; 61 62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 63 } 64 65 /* allocate pages from VRAM carveout, used when no IOMMU: */ 66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 67 { 68 struct msm_gem_object *msm_obj = to_msm_bo(obj); 69 struct msm_drm_private *priv = obj->dev->dev_private; 70 dma_addr_t paddr; 71 struct page **p; 72 int ret, i; 73 74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 75 if (!p) 76 return ERR_PTR(-ENOMEM); 77 78 spin_lock(&priv->vram.lock); 79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 80 spin_unlock(&priv->vram.lock); 81 if (ret) { 82 kvfree(p); 83 return ERR_PTR(ret); 84 } 85 86 paddr = physaddr(obj); 87 for (i = 0; i < npages; i++) { 88 p[i] = phys_to_page(paddr); 89 paddr += PAGE_SIZE; 90 } 91 92 return p; 93 } 94 95 static struct page **get_pages(struct drm_gem_object *obj) 96 { 97 struct msm_gem_object *msm_obj = to_msm_bo(obj); 98 99 GEM_WARN_ON(!msm_gem_is_locked(obj)); 100 101 if (!msm_obj->pages) { 102 struct drm_device *dev = obj->dev; 103 struct page **p; 104 int npages = obj->size >> PAGE_SHIFT; 105 106 if (use_pages(obj)) 107 p = drm_gem_get_pages(obj); 108 else 109 p = get_pages_vram(obj, npages); 110 111 if (IS_ERR(p)) { 112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 113 PTR_ERR(p)); 114 return p; 115 } 116 117 msm_obj->pages = p; 118 119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 120 if (IS_ERR(msm_obj->sgt)) { 121 void *ptr = ERR_CAST(msm_obj->sgt); 122 123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 124 msm_obj->sgt = NULL; 125 return ptr; 126 } 127 128 /* For non-cached buffers, ensure the new pages are clean 129 * because display controller, GPU, etc. are not coherent: 130 */ 131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 132 sync_for_device(msm_obj); 133 134 update_inactive(msm_obj); 135 } 136 137 return msm_obj->pages; 138 } 139 140 static void put_pages_vram(struct drm_gem_object *obj) 141 { 142 struct msm_gem_object *msm_obj = to_msm_bo(obj); 143 struct msm_drm_private *priv = obj->dev->dev_private; 144 145 spin_lock(&priv->vram.lock); 146 drm_mm_remove_node(msm_obj->vram_node); 147 spin_unlock(&priv->vram.lock); 148 149 kvfree(msm_obj->pages); 150 } 151 152 static void put_pages(struct drm_gem_object *obj) 153 { 154 struct msm_gem_object *msm_obj = to_msm_bo(obj); 155 156 if (msm_obj->pages) { 157 if (msm_obj->sgt) { 158 /* For non-cached buffers, ensure the new 159 * pages are clean because display controller, 160 * GPU, etc. are not coherent: 161 */ 162 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 163 sync_for_cpu(msm_obj); 164 165 sg_free_table(msm_obj->sgt); 166 kfree(msm_obj->sgt); 167 msm_obj->sgt = NULL; 168 } 169 170 if (use_pages(obj)) 171 drm_gem_put_pages(obj, msm_obj->pages, true, false); 172 else 173 put_pages_vram(obj); 174 175 msm_obj->pages = NULL; 176 } 177 } 178 179 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 180 { 181 struct msm_gem_object *msm_obj = to_msm_bo(obj); 182 struct page **p; 183 184 msm_gem_lock(obj); 185 186 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 187 msm_gem_unlock(obj); 188 return ERR_PTR(-EBUSY); 189 } 190 191 p = get_pages(obj); 192 193 if (!IS_ERR(p)) { 194 msm_obj->pin_count++; 195 update_inactive(msm_obj); 196 } 197 198 msm_gem_unlock(obj); 199 return p; 200 } 201 202 void msm_gem_put_pages(struct drm_gem_object *obj) 203 { 204 struct msm_gem_object *msm_obj = to_msm_bo(obj); 205 206 msm_gem_lock(obj); 207 msm_obj->pin_count--; 208 GEM_WARN_ON(msm_obj->pin_count < 0); 209 update_inactive(msm_obj); 210 msm_gem_unlock(obj); 211 } 212 213 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) 214 { 215 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 216 return pgprot_writecombine(prot); 217 return prot; 218 } 219 220 int msm_gem_mmap_obj(struct drm_gem_object *obj, 221 struct vm_area_struct *vma) 222 { 223 struct msm_gem_object *msm_obj = to_msm_bo(obj); 224 225 vma->vm_flags &= ~VM_PFNMAP; 226 vma->vm_flags |= VM_MIXEDMAP; 227 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 228 229 return 0; 230 } 231 232 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 233 { 234 int ret; 235 236 ret = drm_gem_mmap(filp, vma); 237 if (ret) { 238 DBG("mmap failed: %d", ret); 239 return ret; 240 } 241 242 return msm_gem_mmap_obj(vma->vm_private_data, vma); 243 } 244 245 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 246 { 247 struct vm_area_struct *vma = vmf->vma; 248 struct drm_gem_object *obj = vma->vm_private_data; 249 struct msm_gem_object *msm_obj = to_msm_bo(obj); 250 struct page **pages; 251 unsigned long pfn; 252 pgoff_t pgoff; 253 int err; 254 vm_fault_t ret; 255 256 /* 257 * vm_ops.open/drm_gem_mmap_obj and close get and put 258 * a reference on obj. So, we dont need to hold one here. 259 */ 260 err = msm_gem_lock_interruptible(obj); 261 if (err) { 262 ret = VM_FAULT_NOPAGE; 263 goto out; 264 } 265 266 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 267 msm_gem_unlock(obj); 268 return VM_FAULT_SIGBUS; 269 } 270 271 /* make sure we have pages attached now */ 272 pages = get_pages(obj); 273 if (IS_ERR(pages)) { 274 ret = vmf_error(PTR_ERR(pages)); 275 goto out_unlock; 276 } 277 278 /* We don't use vmf->pgoff since that has the fake offset: */ 279 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 280 281 pfn = page_to_pfn(pages[pgoff]); 282 283 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 284 pfn, pfn << PAGE_SHIFT); 285 286 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 287 out_unlock: 288 msm_gem_unlock(obj); 289 out: 290 return ret; 291 } 292 293 /** get mmap offset */ 294 static uint64_t mmap_offset(struct drm_gem_object *obj) 295 { 296 struct drm_device *dev = obj->dev; 297 int ret; 298 299 GEM_WARN_ON(!msm_gem_is_locked(obj)); 300 301 /* Make it mmapable */ 302 ret = drm_gem_create_mmap_offset(obj); 303 304 if (ret) { 305 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 306 return 0; 307 } 308 309 return drm_vma_node_offset_addr(&obj->vma_node); 310 } 311 312 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 313 { 314 uint64_t offset; 315 316 msm_gem_lock(obj); 317 offset = mmap_offset(obj); 318 msm_gem_unlock(obj); 319 return offset; 320 } 321 322 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 323 struct msm_gem_address_space *aspace) 324 { 325 struct msm_gem_object *msm_obj = to_msm_bo(obj); 326 struct msm_gem_vma *vma; 327 328 GEM_WARN_ON(!msm_gem_is_locked(obj)); 329 330 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 331 if (!vma) 332 return ERR_PTR(-ENOMEM); 333 334 vma->aspace = aspace; 335 336 list_add_tail(&vma->list, &msm_obj->vmas); 337 338 return vma; 339 } 340 341 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 342 struct msm_gem_address_space *aspace) 343 { 344 struct msm_gem_object *msm_obj = to_msm_bo(obj); 345 struct msm_gem_vma *vma; 346 347 GEM_WARN_ON(!msm_gem_is_locked(obj)); 348 349 list_for_each_entry(vma, &msm_obj->vmas, list) { 350 if (vma->aspace == aspace) 351 return vma; 352 } 353 354 return NULL; 355 } 356 357 static void del_vma(struct msm_gem_vma *vma) 358 { 359 if (!vma) 360 return; 361 362 list_del(&vma->list); 363 kfree(vma); 364 } 365 366 /* 367 * If close is true, this also closes the VMA (releasing the allocated 368 * iova range) in addition to removing the iommu mapping. In the eviction 369 * case (!close), we keep the iova allocated, but only remove the iommu 370 * mapping. 371 */ 372 static void 373 put_iova_spaces(struct drm_gem_object *obj, bool close) 374 { 375 struct msm_gem_object *msm_obj = to_msm_bo(obj); 376 struct msm_gem_vma *vma; 377 378 GEM_WARN_ON(!msm_gem_is_locked(obj)); 379 380 list_for_each_entry(vma, &msm_obj->vmas, list) { 381 if (vma->aspace) { 382 msm_gem_purge_vma(vma->aspace, vma); 383 if (close) 384 msm_gem_close_vma(vma->aspace, vma); 385 } 386 } 387 } 388 389 /* Called with msm_obj locked */ 390 static void 391 put_iova_vmas(struct drm_gem_object *obj) 392 { 393 struct msm_gem_object *msm_obj = to_msm_bo(obj); 394 struct msm_gem_vma *vma, *tmp; 395 396 GEM_WARN_ON(!msm_gem_is_locked(obj)); 397 398 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 399 del_vma(vma); 400 } 401 } 402 403 static int get_iova_locked(struct drm_gem_object *obj, 404 struct msm_gem_address_space *aspace, uint64_t *iova, 405 u64 range_start, u64 range_end) 406 { 407 struct msm_gem_vma *vma; 408 int ret = 0; 409 410 GEM_WARN_ON(!msm_gem_is_locked(obj)); 411 412 vma = lookup_vma(obj, aspace); 413 414 if (!vma) { 415 vma = add_vma(obj, aspace); 416 if (IS_ERR(vma)) 417 return PTR_ERR(vma); 418 419 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 420 range_start, range_end); 421 if (ret) { 422 del_vma(vma); 423 return ret; 424 } 425 } 426 427 *iova = vma->iova; 428 return 0; 429 } 430 431 static int msm_gem_pin_iova(struct drm_gem_object *obj, 432 struct msm_gem_address_space *aspace) 433 { 434 struct msm_gem_object *msm_obj = to_msm_bo(obj); 435 struct msm_gem_vma *vma; 436 struct page **pages; 437 int ret, prot = IOMMU_READ; 438 439 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 440 prot |= IOMMU_WRITE; 441 442 if (msm_obj->flags & MSM_BO_MAP_PRIV) 443 prot |= IOMMU_PRIV; 444 445 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 446 prot |= IOMMU_CACHE; 447 448 GEM_WARN_ON(!msm_gem_is_locked(obj)); 449 450 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 451 return -EBUSY; 452 453 vma = lookup_vma(obj, aspace); 454 if (GEM_WARN_ON(!vma)) 455 return -EINVAL; 456 457 pages = get_pages(obj); 458 if (IS_ERR(pages)) 459 return PTR_ERR(pages); 460 461 ret = msm_gem_map_vma(aspace, vma, prot, 462 msm_obj->sgt, obj->size >> PAGE_SHIFT); 463 464 if (!ret) 465 msm_obj->pin_count++; 466 467 return ret; 468 } 469 470 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 471 struct msm_gem_address_space *aspace, uint64_t *iova, 472 u64 range_start, u64 range_end) 473 { 474 u64 local; 475 int ret; 476 477 GEM_WARN_ON(!msm_gem_is_locked(obj)); 478 479 ret = get_iova_locked(obj, aspace, &local, 480 range_start, range_end); 481 482 if (!ret) 483 ret = msm_gem_pin_iova(obj, aspace); 484 485 if (!ret) 486 *iova = local; 487 488 return ret; 489 } 490 491 /* 492 * get iova and pin it. Should have a matching put 493 * limits iova to specified range (in pages) 494 */ 495 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 496 struct msm_gem_address_space *aspace, uint64_t *iova, 497 u64 range_start, u64 range_end) 498 { 499 int ret; 500 501 msm_gem_lock(obj); 502 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 503 msm_gem_unlock(obj); 504 505 return ret; 506 } 507 508 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 509 struct msm_gem_address_space *aspace, uint64_t *iova) 510 { 511 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 512 } 513 514 /* get iova and pin it. Should have a matching put */ 515 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 516 struct msm_gem_address_space *aspace, uint64_t *iova) 517 { 518 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 519 } 520 521 /* 522 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 523 * valid for the life of the object 524 */ 525 int msm_gem_get_iova(struct drm_gem_object *obj, 526 struct msm_gem_address_space *aspace, uint64_t *iova) 527 { 528 int ret; 529 530 msm_gem_lock(obj); 531 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 532 msm_gem_unlock(obj); 533 534 return ret; 535 } 536 537 /* get iova without taking a reference, used in places where you have 538 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 539 */ 540 uint64_t msm_gem_iova(struct drm_gem_object *obj, 541 struct msm_gem_address_space *aspace) 542 { 543 struct msm_gem_vma *vma; 544 545 msm_gem_lock(obj); 546 vma = lookup_vma(obj, aspace); 547 msm_gem_unlock(obj); 548 GEM_WARN_ON(!vma); 549 550 return vma ? vma->iova : 0; 551 } 552 553 /* 554 * Locked variant of msm_gem_unpin_iova() 555 */ 556 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 557 struct msm_gem_address_space *aspace) 558 { 559 struct msm_gem_object *msm_obj = to_msm_bo(obj); 560 struct msm_gem_vma *vma; 561 562 GEM_WARN_ON(!msm_gem_is_locked(obj)); 563 564 vma = lookup_vma(obj, aspace); 565 566 if (!GEM_WARN_ON(!vma)) { 567 msm_gem_unmap_vma(aspace, vma); 568 569 msm_obj->pin_count--; 570 GEM_WARN_ON(msm_obj->pin_count < 0); 571 572 update_inactive(msm_obj); 573 } 574 } 575 576 /* 577 * Unpin a iova by updating the reference counts. The memory isn't actually 578 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 579 * to get rid of it 580 */ 581 void msm_gem_unpin_iova(struct drm_gem_object *obj, 582 struct msm_gem_address_space *aspace) 583 { 584 msm_gem_lock(obj); 585 msm_gem_unpin_iova_locked(obj, aspace); 586 msm_gem_unlock(obj); 587 } 588 589 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 590 struct drm_mode_create_dumb *args) 591 { 592 args->pitch = align_pitch(args->width, args->bpp); 593 args->size = PAGE_ALIGN(args->pitch * args->height); 594 return msm_gem_new_handle(dev, file, args->size, 595 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 596 } 597 598 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 599 uint32_t handle, uint64_t *offset) 600 { 601 struct drm_gem_object *obj; 602 int ret = 0; 603 604 /* GEM does all our handle to object mapping */ 605 obj = drm_gem_object_lookup(file, handle); 606 if (obj == NULL) { 607 ret = -ENOENT; 608 goto fail; 609 } 610 611 *offset = msm_gem_mmap_offset(obj); 612 613 drm_gem_object_put(obj); 614 615 fail: 616 return ret; 617 } 618 619 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 620 { 621 struct msm_gem_object *msm_obj = to_msm_bo(obj); 622 int ret = 0; 623 624 GEM_WARN_ON(!msm_gem_is_locked(obj)); 625 626 if (obj->import_attach) 627 return ERR_PTR(-ENODEV); 628 629 if (GEM_WARN_ON(msm_obj->madv > madv)) { 630 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 631 msm_obj->madv, madv); 632 return ERR_PTR(-EBUSY); 633 } 634 635 /* increment vmap_count *before* vmap() call, so shrinker can 636 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 637 * This guarantees that we won't try to msm_gem_vunmap() this 638 * same object from within the vmap() call (while we already 639 * hold msm_obj lock) 640 */ 641 msm_obj->vmap_count++; 642 643 if (!msm_obj->vaddr) { 644 struct page **pages = get_pages(obj); 645 if (IS_ERR(pages)) { 646 ret = PTR_ERR(pages); 647 goto fail; 648 } 649 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 650 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); 651 if (msm_obj->vaddr == NULL) { 652 ret = -ENOMEM; 653 goto fail; 654 } 655 656 update_inactive(msm_obj); 657 } 658 659 return msm_obj->vaddr; 660 661 fail: 662 msm_obj->vmap_count--; 663 return ERR_PTR(ret); 664 } 665 666 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 667 { 668 return get_vaddr(obj, MSM_MADV_WILLNEED); 669 } 670 671 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 672 { 673 void *ret; 674 675 msm_gem_lock(obj); 676 ret = msm_gem_get_vaddr_locked(obj); 677 msm_gem_unlock(obj); 678 679 return ret; 680 } 681 682 /* 683 * Don't use this! It is for the very special case of dumping 684 * submits from GPU hangs or faults, were the bo may already 685 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 686 * active list. 687 */ 688 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 689 { 690 return get_vaddr(obj, __MSM_MADV_PURGED); 691 } 692 693 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 694 { 695 struct msm_gem_object *msm_obj = to_msm_bo(obj); 696 697 GEM_WARN_ON(!msm_gem_is_locked(obj)); 698 GEM_WARN_ON(msm_obj->vmap_count < 1); 699 700 msm_obj->vmap_count--; 701 } 702 703 void msm_gem_put_vaddr(struct drm_gem_object *obj) 704 { 705 msm_gem_lock(obj); 706 msm_gem_put_vaddr_locked(obj); 707 msm_gem_unlock(obj); 708 } 709 710 /* Update madvise status, returns true if not purged, else 711 * false or -errno. 712 */ 713 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 714 { 715 struct msm_gem_object *msm_obj = to_msm_bo(obj); 716 717 msm_gem_lock(obj); 718 719 if (msm_obj->madv != __MSM_MADV_PURGED) 720 msm_obj->madv = madv; 721 722 madv = msm_obj->madv; 723 724 /* If the obj is inactive, we might need to move it 725 * between inactive lists 726 */ 727 if (msm_obj->active_count == 0) 728 update_inactive(msm_obj); 729 730 msm_gem_unlock(obj); 731 732 return (madv != __MSM_MADV_PURGED); 733 } 734 735 void msm_gem_purge(struct drm_gem_object *obj) 736 { 737 struct drm_device *dev = obj->dev; 738 struct msm_gem_object *msm_obj = to_msm_bo(obj); 739 740 GEM_WARN_ON(!msm_gem_is_locked(obj)); 741 GEM_WARN_ON(!is_purgeable(msm_obj)); 742 743 /* Get rid of any iommu mapping(s): */ 744 put_iova_spaces(obj, true); 745 746 msm_gem_vunmap(obj); 747 748 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 749 750 put_pages(obj); 751 752 put_iova_vmas(obj); 753 754 msm_obj->madv = __MSM_MADV_PURGED; 755 update_inactive(msm_obj); 756 757 drm_gem_free_mmap_offset(obj); 758 759 /* Our goal here is to return as much of the memory as 760 * is possible back to the system as we are called from OOM. 761 * To do this we must instruct the shmfs to drop all of its 762 * backing pages, *now*. 763 */ 764 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 765 766 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 767 0, (loff_t)-1); 768 } 769 770 /* 771 * Unpin the backing pages and make them available to be swapped out. 772 */ 773 void msm_gem_evict(struct drm_gem_object *obj) 774 { 775 struct drm_device *dev = obj->dev; 776 struct msm_gem_object *msm_obj = to_msm_bo(obj); 777 778 GEM_WARN_ON(!msm_gem_is_locked(obj)); 779 GEM_WARN_ON(is_unevictable(msm_obj)); 780 GEM_WARN_ON(!msm_obj->evictable); 781 GEM_WARN_ON(msm_obj->active_count); 782 783 /* Get rid of any iommu mapping(s): */ 784 put_iova_spaces(obj, false); 785 786 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 787 788 put_pages(obj); 789 790 update_inactive(msm_obj); 791 } 792 793 void msm_gem_vunmap(struct drm_gem_object *obj) 794 { 795 struct msm_gem_object *msm_obj = to_msm_bo(obj); 796 797 GEM_WARN_ON(!msm_gem_is_locked(obj)); 798 799 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 800 return; 801 802 vunmap(msm_obj->vaddr); 803 msm_obj->vaddr = NULL; 804 } 805 806 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 807 { 808 struct msm_gem_object *msm_obj = to_msm_bo(obj); 809 struct msm_drm_private *priv = obj->dev->dev_private; 810 811 might_sleep(); 812 GEM_WARN_ON(!msm_gem_is_locked(obj)); 813 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 814 GEM_WARN_ON(msm_obj->dontneed); 815 816 if (msm_obj->active_count++ == 0) { 817 mutex_lock(&priv->mm_lock); 818 if (msm_obj->evictable) 819 mark_unevictable(msm_obj); 820 list_del(&msm_obj->mm_list); 821 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 822 mutex_unlock(&priv->mm_lock); 823 } 824 } 825 826 void msm_gem_active_put(struct drm_gem_object *obj) 827 { 828 struct msm_gem_object *msm_obj = to_msm_bo(obj); 829 830 might_sleep(); 831 GEM_WARN_ON(!msm_gem_is_locked(obj)); 832 833 if (--msm_obj->active_count == 0) { 834 update_inactive(msm_obj); 835 } 836 } 837 838 static void update_inactive(struct msm_gem_object *msm_obj) 839 { 840 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 841 842 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 843 844 if (msm_obj->active_count != 0) 845 return; 846 847 mutex_lock(&priv->mm_lock); 848 849 if (msm_obj->dontneed) 850 mark_unpurgeable(msm_obj); 851 if (msm_obj->evictable) 852 mark_unevictable(msm_obj); 853 854 list_del(&msm_obj->mm_list); 855 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { 856 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 857 mark_evictable(msm_obj); 858 } else if (msm_obj->madv == MSM_MADV_DONTNEED) { 859 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); 860 mark_purgeable(msm_obj); 861 } else { 862 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); 863 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 864 } 865 866 mutex_unlock(&priv->mm_lock); 867 } 868 869 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 870 { 871 bool write = !!(op & MSM_PREP_WRITE); 872 unsigned long remain = 873 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 874 long ret; 875 876 ret = dma_resv_wait_timeout(obj->resv, write, true, remain); 877 if (ret == 0) 878 return remain == 0 ? -EBUSY : -ETIMEDOUT; 879 else if (ret < 0) 880 return ret; 881 882 /* TODO cache maintenance */ 883 884 return 0; 885 } 886 887 int msm_gem_cpu_fini(struct drm_gem_object *obj) 888 { 889 /* TODO cache maintenance */ 890 return 0; 891 } 892 893 #ifdef CONFIG_DEBUG_FS 894 static void describe_fence(struct dma_fence *fence, const char *type, 895 struct seq_file *m) 896 { 897 if (!dma_fence_is_signaled(fence)) 898 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 899 fence->ops->get_driver_name(fence), 900 fence->ops->get_timeline_name(fence), 901 fence->seqno); 902 } 903 904 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 905 struct msm_gem_stats *stats) 906 { 907 struct msm_gem_object *msm_obj = to_msm_bo(obj); 908 struct dma_resv *robj = obj->resv; 909 struct dma_resv_list *fobj; 910 struct dma_fence *fence; 911 struct msm_gem_vma *vma; 912 uint64_t off = drm_vma_node_start(&obj->vma_node); 913 const char *madv; 914 915 msm_gem_lock(obj); 916 917 stats->all.count++; 918 stats->all.size += obj->size; 919 920 if (is_active(msm_obj)) { 921 stats->active.count++; 922 stats->active.size += obj->size; 923 } 924 925 if (msm_obj->pages) { 926 stats->resident.count++; 927 stats->resident.size += obj->size; 928 } 929 930 switch (msm_obj->madv) { 931 case __MSM_MADV_PURGED: 932 stats->purged.count++; 933 stats->purged.size += obj->size; 934 madv = " purged"; 935 break; 936 case MSM_MADV_DONTNEED: 937 stats->purgeable.count++; 938 stats->purgeable.size += obj->size; 939 madv = " purgeable"; 940 break; 941 case MSM_MADV_WILLNEED: 942 default: 943 madv = ""; 944 break; 945 } 946 947 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 948 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 949 obj->name, kref_read(&obj->refcount), 950 off, msm_obj->vaddr); 951 952 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 953 954 if (!list_empty(&msm_obj->vmas)) { 955 956 seq_puts(m, " vmas:"); 957 958 list_for_each_entry(vma, &msm_obj->vmas, list) { 959 const char *name, *comm; 960 if (vma->aspace) { 961 struct msm_gem_address_space *aspace = vma->aspace; 962 struct task_struct *task = 963 get_pid_task(aspace->pid, PIDTYPE_PID); 964 if (task) { 965 comm = kstrdup(task->comm, GFP_KERNEL); 966 } else { 967 comm = NULL; 968 } 969 name = aspace->name; 970 } else { 971 name = comm = NULL; 972 } 973 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 974 name, comm ? ":" : "", comm ? comm : "", 975 vma->aspace, vma->iova, 976 vma->mapped ? "mapped" : "unmapped", 977 vma->inuse); 978 kfree(comm); 979 } 980 981 seq_puts(m, "\n"); 982 } 983 984 rcu_read_lock(); 985 fobj = dma_resv_shared_list(robj); 986 if (fobj) { 987 unsigned int i, shared_count = fobj->shared_count; 988 989 for (i = 0; i < shared_count; i++) { 990 fence = rcu_dereference(fobj->shared[i]); 991 describe_fence(fence, "Shared", m); 992 } 993 } 994 995 fence = dma_resv_excl_fence(robj); 996 if (fence) 997 describe_fence(fence, "Exclusive", m); 998 rcu_read_unlock(); 999 1000 msm_gem_unlock(obj); 1001 } 1002 1003 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 1004 { 1005 struct msm_gem_stats stats = {}; 1006 struct msm_gem_object *msm_obj; 1007 1008 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 1009 list_for_each_entry(msm_obj, list, node) { 1010 struct drm_gem_object *obj = &msm_obj->base; 1011 seq_puts(m, " "); 1012 msm_gem_describe(obj, m, &stats); 1013 } 1014 1015 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 1016 stats.all.count, stats.all.size); 1017 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 1018 stats.active.count, stats.active.size); 1019 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 1020 stats.resident.count, stats.resident.size); 1021 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 1022 stats.purgeable.count, stats.purgeable.size); 1023 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 1024 stats.purged.count, stats.purged.size); 1025 } 1026 #endif 1027 1028 /* don't call directly! Use drm_gem_object_put() */ 1029 void msm_gem_free_object(struct drm_gem_object *obj) 1030 { 1031 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1032 struct drm_device *dev = obj->dev; 1033 struct msm_drm_private *priv = dev->dev_private; 1034 1035 mutex_lock(&priv->obj_lock); 1036 list_del(&msm_obj->node); 1037 mutex_unlock(&priv->obj_lock); 1038 1039 mutex_lock(&priv->mm_lock); 1040 if (msm_obj->dontneed) 1041 mark_unpurgeable(msm_obj); 1042 list_del(&msm_obj->mm_list); 1043 mutex_unlock(&priv->mm_lock); 1044 1045 msm_gem_lock(obj); 1046 1047 /* object should not be on active list: */ 1048 GEM_WARN_ON(is_active(msm_obj)); 1049 1050 put_iova_spaces(obj, true); 1051 1052 if (obj->import_attach) { 1053 GEM_WARN_ON(msm_obj->vaddr); 1054 1055 /* Don't drop the pages for imported dmabuf, as they are not 1056 * ours, just free the array we allocated: 1057 */ 1058 kvfree(msm_obj->pages); 1059 1060 put_iova_vmas(obj); 1061 1062 /* dma_buf_detach() grabs resv lock, so we need to unlock 1063 * prior to drm_prime_gem_destroy 1064 */ 1065 msm_gem_unlock(obj); 1066 1067 drm_prime_gem_destroy(obj, msm_obj->sgt); 1068 } else { 1069 msm_gem_vunmap(obj); 1070 put_pages(obj); 1071 put_iova_vmas(obj); 1072 msm_gem_unlock(obj); 1073 } 1074 1075 drm_gem_object_release(obj); 1076 1077 kfree(msm_obj); 1078 } 1079 1080 /* convenience method to construct a GEM buffer object, and userspace handle */ 1081 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1082 uint32_t size, uint32_t flags, uint32_t *handle, 1083 char *name) 1084 { 1085 struct drm_gem_object *obj; 1086 int ret; 1087 1088 obj = msm_gem_new(dev, size, flags); 1089 1090 if (IS_ERR(obj)) 1091 return PTR_ERR(obj); 1092 1093 if (name) 1094 msm_gem_object_set_name(obj, "%s", name); 1095 1096 ret = drm_gem_handle_create(file, obj, handle); 1097 1098 /* drop reference from allocate - handle holds it now */ 1099 drm_gem_object_put(obj); 1100 1101 return ret; 1102 } 1103 1104 static const struct vm_operations_struct vm_ops = { 1105 .fault = msm_gem_fault, 1106 .open = drm_gem_vm_open, 1107 .close = drm_gem_vm_close, 1108 }; 1109 1110 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1111 .free = msm_gem_free_object, 1112 .pin = msm_gem_prime_pin, 1113 .unpin = msm_gem_prime_unpin, 1114 .get_sg_table = msm_gem_prime_get_sg_table, 1115 .vmap = msm_gem_prime_vmap, 1116 .vunmap = msm_gem_prime_vunmap, 1117 .vm_ops = &vm_ops, 1118 }; 1119 1120 static int msm_gem_new_impl(struct drm_device *dev, 1121 uint32_t size, uint32_t flags, 1122 struct drm_gem_object **obj) 1123 { 1124 struct msm_drm_private *priv = dev->dev_private; 1125 struct msm_gem_object *msm_obj; 1126 1127 switch (flags & MSM_BO_CACHE_MASK) { 1128 case MSM_BO_UNCACHED: 1129 case MSM_BO_CACHED: 1130 case MSM_BO_WC: 1131 break; 1132 case MSM_BO_CACHED_COHERENT: 1133 if (priv->has_cached_coherent) 1134 break; 1135 fallthrough; 1136 default: 1137 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1138 (flags & MSM_BO_CACHE_MASK)); 1139 return -EINVAL; 1140 } 1141 1142 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1143 if (!msm_obj) 1144 return -ENOMEM; 1145 1146 msm_obj->flags = flags; 1147 msm_obj->madv = MSM_MADV_WILLNEED; 1148 1149 INIT_LIST_HEAD(&msm_obj->vmas); 1150 1151 *obj = &msm_obj->base; 1152 (*obj)->funcs = &msm_gem_object_funcs; 1153 1154 return 0; 1155 } 1156 1157 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) 1158 { 1159 struct msm_drm_private *priv = dev->dev_private; 1160 struct msm_gem_object *msm_obj; 1161 struct drm_gem_object *obj = NULL; 1162 bool use_vram = false; 1163 int ret; 1164 1165 size = PAGE_ALIGN(size); 1166 1167 if (!msm_use_mmu(dev)) 1168 use_vram = true; 1169 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1170 use_vram = true; 1171 1172 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1173 return ERR_PTR(-EINVAL); 1174 1175 /* Disallow zero sized objects as they make the underlying 1176 * infrastructure grumpy 1177 */ 1178 if (size == 0) 1179 return ERR_PTR(-EINVAL); 1180 1181 ret = msm_gem_new_impl(dev, size, flags, &obj); 1182 if (ret) 1183 goto fail; 1184 1185 msm_obj = to_msm_bo(obj); 1186 1187 if (use_vram) { 1188 struct msm_gem_vma *vma; 1189 struct page **pages; 1190 1191 drm_gem_private_object_init(dev, obj, size); 1192 1193 msm_gem_lock(obj); 1194 1195 vma = add_vma(obj, NULL); 1196 msm_gem_unlock(obj); 1197 if (IS_ERR(vma)) { 1198 ret = PTR_ERR(vma); 1199 goto fail; 1200 } 1201 1202 to_msm_bo(obj)->vram_node = &vma->node; 1203 1204 /* Call chain get_pages() -> update_inactive() tries to 1205 * access msm_obj->mm_list, but it is not initialized yet. 1206 * To avoid NULL pointer dereference error, initialize 1207 * mm_list to be empty. 1208 */ 1209 INIT_LIST_HEAD(&msm_obj->mm_list); 1210 1211 msm_gem_lock(obj); 1212 pages = get_pages(obj); 1213 msm_gem_unlock(obj); 1214 if (IS_ERR(pages)) { 1215 ret = PTR_ERR(pages); 1216 goto fail; 1217 } 1218 1219 vma->iova = physaddr(obj); 1220 } else { 1221 ret = drm_gem_object_init(dev, obj, size); 1222 if (ret) 1223 goto fail; 1224 /* 1225 * Our buffers are kept pinned, so allocating them from the 1226 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1227 * See comments above new_inode() why this is required _and_ 1228 * expected if you're going to pin these pages. 1229 */ 1230 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1231 } 1232 1233 mutex_lock(&priv->mm_lock); 1234 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1235 mutex_unlock(&priv->mm_lock); 1236 1237 mutex_lock(&priv->obj_lock); 1238 list_add_tail(&msm_obj->node, &priv->objects); 1239 mutex_unlock(&priv->obj_lock); 1240 1241 return obj; 1242 1243 fail: 1244 drm_gem_object_put(obj); 1245 return ERR_PTR(ret); 1246 } 1247 1248 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1249 struct dma_buf *dmabuf, struct sg_table *sgt) 1250 { 1251 struct msm_drm_private *priv = dev->dev_private; 1252 struct msm_gem_object *msm_obj; 1253 struct drm_gem_object *obj; 1254 uint32_t size; 1255 int ret, npages; 1256 1257 /* if we don't have IOMMU, don't bother pretending we can import: */ 1258 if (!msm_use_mmu(dev)) { 1259 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1260 return ERR_PTR(-EINVAL); 1261 } 1262 1263 size = PAGE_ALIGN(dmabuf->size); 1264 1265 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1266 if (ret) 1267 goto fail; 1268 1269 drm_gem_private_object_init(dev, obj, size); 1270 1271 npages = size / PAGE_SIZE; 1272 1273 msm_obj = to_msm_bo(obj); 1274 msm_gem_lock(obj); 1275 msm_obj->sgt = sgt; 1276 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1277 if (!msm_obj->pages) { 1278 msm_gem_unlock(obj); 1279 ret = -ENOMEM; 1280 goto fail; 1281 } 1282 1283 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1284 if (ret) { 1285 msm_gem_unlock(obj); 1286 goto fail; 1287 } 1288 1289 msm_gem_unlock(obj); 1290 1291 mutex_lock(&priv->mm_lock); 1292 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1293 mutex_unlock(&priv->mm_lock); 1294 1295 mutex_lock(&priv->obj_lock); 1296 list_add_tail(&msm_obj->node, &priv->objects); 1297 mutex_unlock(&priv->obj_lock); 1298 1299 return obj; 1300 1301 fail: 1302 drm_gem_object_put(obj); 1303 return ERR_PTR(ret); 1304 } 1305 1306 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1307 uint32_t flags, struct msm_gem_address_space *aspace, 1308 struct drm_gem_object **bo, uint64_t *iova) 1309 { 1310 void *vaddr; 1311 struct drm_gem_object *obj = msm_gem_new(dev, size, flags); 1312 int ret; 1313 1314 if (IS_ERR(obj)) 1315 return ERR_CAST(obj); 1316 1317 if (iova) { 1318 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1319 if (ret) 1320 goto err; 1321 } 1322 1323 vaddr = msm_gem_get_vaddr(obj); 1324 if (IS_ERR(vaddr)) { 1325 msm_gem_unpin_iova(obj, aspace); 1326 ret = PTR_ERR(vaddr); 1327 goto err; 1328 } 1329 1330 if (bo) 1331 *bo = obj; 1332 1333 return vaddr; 1334 err: 1335 drm_gem_object_put(obj); 1336 1337 return ERR_PTR(ret); 1338 1339 } 1340 1341 void msm_gem_kernel_put(struct drm_gem_object *bo, 1342 struct msm_gem_address_space *aspace) 1343 { 1344 if (IS_ERR_OR_NULL(bo)) 1345 return; 1346 1347 msm_gem_put_vaddr(bo); 1348 msm_gem_unpin_iova(bo, aspace); 1349 drm_gem_object_put(bo); 1350 } 1351 1352 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1353 { 1354 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1355 va_list ap; 1356 1357 if (!fmt) 1358 return; 1359 1360 va_start(ap, fmt); 1361 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1362 va_end(ap); 1363 } 1364