1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/anon_inodes.h> 8 #include <linux/mman.h> 9 #include <linux/pfn_t.h> 10 #include <linux/sizes.h> 11 12 #include <drm/drm_cache.h> 13 14 #include "gt/intel_gt.h" 15 #include "gt/intel_gt_requests.h" 16 17 #include "i915_drv.h" 18 #include "i915_gem_evict.h" 19 #include "i915_gem_gtt.h" 20 #include "i915_gem_ioctls.h" 21 #include "i915_gem_object.h" 22 #include "i915_gem_mman.h" 23 #include "i915_mm.h" 24 #include "i915_trace.h" 25 #include "i915_user_extensions.h" 26 #include "i915_gem_ttm.h" 27 #include "i915_vma.h" 28 29 static inline bool 30 __vma_matches(struct vm_area_struct *vma, struct file *filp, 31 unsigned long addr, unsigned long size) 32 { 33 if (vma->vm_file != filp) 34 return false; 35 36 return vma->vm_start == addr && 37 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); 38 } 39 40 /** 41 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 42 * it is mapped to. 43 * @dev: drm device 44 * @data: ioctl data blob 45 * @file: drm file 46 * 47 * While the mapping holds a reference on the contents of the object, it doesn't 48 * imply a ref on the object itself. 49 * 50 * IMPORTANT: 51 * 52 * DRM driver writers who look a this function as an example for how to do GEM 53 * mmap support, please don't implement mmap support like here. The modern way 54 * to implement DRM mmap support is with an mmap offset ioctl (like 55 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 56 * That way debug tooling like valgrind will understand what's going on, hiding 57 * the mmap call in a driver private ioctl will break that. The i915 driver only 58 * does cpu mmaps this way because we didn't know better. 59 */ 60 int 61 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 62 struct drm_file *file) 63 { 64 struct drm_i915_private *i915 = to_i915(dev); 65 struct drm_i915_gem_mmap *args = data; 66 struct drm_i915_gem_object *obj; 67 unsigned long addr; 68 69 /* 70 * mmap ioctl is disallowed for all discrete platforms, 71 * and for all platforms with GRAPHICS_VER > 12. 72 */ 73 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0)) 74 return -EOPNOTSUPP; 75 76 if (args->flags & ~(I915_MMAP_WC)) 77 return -EINVAL; 78 79 if (args->flags & I915_MMAP_WC && !pat_enabled()) 80 return -ENODEV; 81 82 obj = i915_gem_object_lookup(file, args->handle); 83 if (!obj) 84 return -ENOENT; 85 86 /* prime objects have no backing filp to GEM mmap 87 * pages from. 88 */ 89 if (!obj->base.filp) { 90 addr = -ENXIO; 91 goto err; 92 } 93 94 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { 95 addr = -EINVAL; 96 goto err; 97 } 98 99 addr = vm_mmap(obj->base.filp, 0, args->size, 100 PROT_READ | PROT_WRITE, MAP_SHARED, 101 args->offset); 102 if (IS_ERR_VALUE(addr)) 103 goto err; 104 105 if (args->flags & I915_MMAP_WC) { 106 struct mm_struct *mm = current->mm; 107 struct vm_area_struct *vma; 108 109 if (mmap_write_lock_killable(mm)) { 110 addr = -EINTR; 111 goto err; 112 } 113 vma = find_vma(mm, addr); 114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) 115 vma->vm_page_prot = 116 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 117 else 118 addr = -ENOMEM; 119 mmap_write_unlock(mm); 120 if (IS_ERR_VALUE(addr)) 121 goto err; 122 } 123 i915_gem_object_put(obj); 124 125 args->addr_ptr = (u64)addr; 126 return 0; 127 128 err: 129 i915_gem_object_put(obj); 130 return addr; 131 } 132 133 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) 134 { 135 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 136 } 137 138 /** 139 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 140 * 141 * A history of the GTT mmap interface: 142 * 143 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 144 * aligned and suitable for fencing, and still fit into the available 145 * mappable space left by the pinned display objects. A classic problem 146 * we called the page-fault-of-doom where we would ping-pong between 147 * two objects that could not fit inside the GTT and so the memcpy 148 * would page one object in at the expense of the other between every 149 * single byte. 150 * 151 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 152 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 153 * object is too large for the available space (or simply too large 154 * for the mappable aperture!), a view is created instead and faulted 155 * into userspace. (This view is aligned and sized appropriately for 156 * fenced access.) 157 * 158 * 2 - Recognise WC as a separate cache domain so that we can flush the 159 * delayed writes via GTT before performing direct access via WC. 160 * 161 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial 162 * pagefault; swapin remains transparent. 163 * 164 * 4 - Support multiple fault handlers per object depending on object's 165 * backing storage (a.k.a. MMAP_OFFSET). 166 * 167 * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple 168 * times with different size and offset). 169 * 170 * Restrictions: 171 * 172 * * snoopable objects cannot be accessed via the GTT. It can cause machine 173 * hangs on some architectures, corruption on others. An attempt to service 174 * a GTT page fault from a snoopable object will generate a SIGBUS. 175 * 176 * * the object must be able to fit into RAM (physical memory, though no 177 * limited to the mappable aperture). 178 * 179 * 180 * Caveats: 181 * 182 * * a new GTT page fault will synchronize rendering from the GPU and flush 183 * all data to system memory. Subsequent access will not be synchronized. 184 * 185 * * all mappings are revoked on runtime device suspend. 186 * 187 * * there are only 8, 16 or 32 fence registers to share between all users 188 * (older machines require fence register for display and blitter access 189 * as well). Contention of the fence registers will cause the previous users 190 * to be unmapped and any new access will generate new page faults. 191 * 192 * * running out of memory while servicing a fault may generate a SIGBUS, 193 * rather than the expected SIGSEGV. 194 */ 195 int i915_gem_mmap_gtt_version(void) 196 { 197 return 5; 198 } 199 200 static inline struct i915_gtt_view 201 compute_partial_view(const struct drm_i915_gem_object *obj, 202 pgoff_t page_offset, 203 unsigned int chunk) 204 { 205 struct i915_gtt_view view; 206 207 if (i915_gem_object_is_tiled(obj)) 208 chunk = roundup(chunk, tile_row_pages(obj) ?: 1); 209 210 view.type = I915_GTT_VIEW_PARTIAL; 211 view.partial.offset = rounddown(page_offset, chunk); 212 view.partial.size = 213 min_t(unsigned int, chunk, 214 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 215 216 /* If the partial covers the entire object, just create a normal VMA. */ 217 if (chunk >= obj->base.size >> PAGE_SHIFT) 218 view.type = I915_GTT_VIEW_NORMAL; 219 220 return view; 221 } 222 223 static vm_fault_t i915_error_to_vmf_fault(int err) 224 { 225 switch (err) { 226 default: 227 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); 228 fallthrough; 229 case -EIO: /* shmemfs failure from swap device */ 230 case -EFAULT: /* purged object */ 231 case -ENODEV: /* bad object, how did you get here! */ 232 case -ENXIO: /* unable to access backing store (on device) */ 233 return VM_FAULT_SIGBUS; 234 235 case -ENOMEM: /* our allocation failure */ 236 return VM_FAULT_OOM; 237 238 case 0: 239 case -EAGAIN: 240 case -ENOSPC: /* transient failure to evict? */ 241 case -ENOBUFS: /* temporarily out of fences? */ 242 case -ERESTARTSYS: 243 case -EINTR: 244 case -EBUSY: 245 /* 246 * EBUSY is ok: this just means that another thread 247 * already did the job. 248 */ 249 return VM_FAULT_NOPAGE; 250 } 251 } 252 253 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) 254 { 255 struct vm_area_struct *area = vmf->vma; 256 struct i915_mmap_offset *mmo = area->vm_private_data; 257 struct drm_i915_gem_object *obj = mmo->obj; 258 unsigned long obj_offset; 259 resource_size_t iomap; 260 int err; 261 262 /* Sanity check that we allow writing into this object */ 263 if (unlikely(i915_gem_object_is_readonly(obj) && 264 area->vm_flags & VM_WRITE)) 265 return VM_FAULT_SIGBUS; 266 267 if (i915_gem_object_lock_interruptible(obj, NULL)) 268 return VM_FAULT_NOPAGE; 269 270 err = i915_gem_object_pin_pages(obj); 271 if (err) 272 goto out; 273 274 iomap = -1; 275 if (!i915_gem_object_has_struct_page(obj)) { 276 iomap = obj->mm.region->iomap.base; 277 iomap -= obj->mm.region->region.start; 278 } 279 280 obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); 281 /* PTEs are revoked in obj->ops->put_pages() */ 282 err = remap_io_sg(area, 283 area->vm_start, area->vm_end - area->vm_start, 284 obj->mm.pages->sgl, obj_offset, iomap); 285 286 if (area->vm_flags & VM_WRITE) { 287 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 288 obj->mm.dirty = true; 289 } 290 291 i915_gem_object_unpin_pages(obj); 292 293 out: 294 i915_gem_object_unlock(obj); 295 return i915_error_to_vmf_fault(err); 296 } 297 298 static void set_address_limits(struct vm_area_struct *area, 299 struct i915_vma *vma, 300 unsigned long obj_offset, 301 resource_size_t gmadr_start, 302 unsigned long *start_vaddr, 303 unsigned long *end_vaddr, 304 unsigned long *pfn) 305 { 306 unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ 307 long start, end; /* memory boundaries */ 308 309 /* 310 * Let's move into the ">> PAGE_SHIFT" 311 * domain to be sure not to lose bits 312 */ 313 vm_start = area->vm_start >> PAGE_SHIFT; 314 vm_end = area->vm_end >> PAGE_SHIFT; 315 vma_size = vma->size >> PAGE_SHIFT; 316 317 /* 318 * Calculate the memory boundaries by considering the offset 319 * provided by the user during memory mapping and the offset 320 * provided for the partial mapping. 321 */ 322 start = vm_start; 323 start -= obj_offset; 324 start += vma->gtt_view.partial.offset; 325 end = start + vma_size; 326 327 start = max_t(long, start, vm_start); 328 end = min_t(long, end, vm_end); 329 330 /* Let's move back into the "<< PAGE_SHIFT" domain */ 331 *start_vaddr = (unsigned long)start << PAGE_SHIFT; 332 *end_vaddr = (unsigned long)end << PAGE_SHIFT; 333 334 *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; 335 *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT; 336 *pfn += obj_offset - vma->gtt_view.partial.offset; 337 } 338 339 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) 340 { 341 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) 342 struct vm_area_struct *area = vmf->vma; 343 struct i915_mmap_offset *mmo = area->vm_private_data; 344 struct drm_i915_gem_object *obj = mmo->obj; 345 struct drm_device *dev = obj->base.dev; 346 struct drm_i915_private *i915 = to_i915(dev); 347 struct intel_runtime_pm *rpm = &i915->runtime_pm; 348 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 349 bool write = area->vm_flags & VM_WRITE; 350 struct i915_gem_ww_ctx ww; 351 unsigned long obj_offset; 352 unsigned long start, end; /* memory boundaries */ 353 intel_wakeref_t wakeref; 354 struct i915_vma *vma; 355 pgoff_t page_offset; 356 unsigned long pfn; 357 int srcu; 358 int ret; 359 360 obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); 361 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 362 page_offset += obj_offset; 363 364 trace_i915_gem_object_fault(obj, page_offset, true, write); 365 366 wakeref = intel_runtime_pm_get(rpm); 367 368 i915_gem_ww_ctx_init(&ww, true); 369 retry: 370 ret = i915_gem_object_lock(obj, &ww); 371 if (ret) 372 goto err_rpm; 373 374 /* Sanity check that we allow writing into this object */ 375 if (i915_gem_object_is_readonly(obj) && write) { 376 ret = -EFAULT; 377 goto err_rpm; 378 } 379 380 ret = i915_gem_object_pin_pages(obj); 381 if (ret) 382 goto err_rpm; 383 384 ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu); 385 if (ret) 386 goto err_pages; 387 388 /* Now pin it into the GTT as needed */ 389 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, 390 PIN_MAPPABLE | 391 PIN_NONBLOCK /* NOWARN */ | 392 PIN_NOEVICT); 393 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 394 /* Use a partial view if it is bigger than available space */ 395 struct i915_gtt_view view = 396 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 397 unsigned int flags; 398 399 flags = PIN_MAPPABLE | PIN_NOSEARCH; 400 if (view.type == I915_GTT_VIEW_NORMAL) 401 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ 402 403 /* 404 * Userspace is now writing through an untracked VMA, abandon 405 * all hope that the hardware is able to track future writes. 406 */ 407 408 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 409 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 410 flags = PIN_MAPPABLE; 411 view.type = I915_GTT_VIEW_PARTIAL; 412 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 413 } 414 415 /* 416 * The entire mappable GGTT is pinned? Unexpected! 417 * Try to evict the object we locked too, as normally we skip it 418 * due to lack of short term pinning inside execbuf. 419 */ 420 if (vma == ERR_PTR(-ENOSPC)) { 421 ret = mutex_lock_interruptible(&ggtt->vm.mutex); 422 if (!ret) { 423 ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); 424 mutex_unlock(&ggtt->vm.mutex); 425 } 426 if (ret) 427 goto err_reset; 428 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 429 } 430 } 431 if (IS_ERR(vma)) { 432 ret = PTR_ERR(vma); 433 goto err_reset; 434 } 435 436 /* Access to snoopable pages through the GTT is incoherent. */ 437 /* 438 * For objects created by userspace through GEM_CREATE with pat_index 439 * set by set_pat extension, coherency is managed by userspace, make 440 * sure we don't fail handling the vm fault by calling 441 * i915_gem_object_has_cache_level() which always return true for such 442 * objects. Otherwise this helper function would fall back to checking 443 * whether the object is un-cached. 444 */ 445 if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) || 446 HAS_LLC(i915))) { 447 ret = -EFAULT; 448 goto err_unpin; 449 } 450 451 ret = i915_vma_pin_fence(vma); 452 if (ret) 453 goto err_unpin; 454 455 /* 456 * Dump all the necessary parameters in this function to perform the 457 * arithmetic calculation for the virtual address start and end and 458 * the PFN (Page Frame Number). 459 */ 460 set_address_limits(area, vma, obj_offset, ggtt->gmadr.start, 461 &start, &end, &pfn); 462 463 /* Finally, remap it using the new GTT offset */ 464 ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); 465 if (ret) 466 goto err_fence; 467 468 assert_rpm_wakelock_held(rpm); 469 470 /* Mark as being mmapped into userspace for later revocation */ 471 mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 472 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 473 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); 474 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 475 476 /* Track the mmo associated with the fenced vma */ 477 vma->mmo = mmo; 478 479 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) 480 intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 481 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 482 483 if (write) { 484 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 485 i915_vma_set_ggtt_write(vma); 486 obj->mm.dirty = true; 487 } 488 489 err_fence: 490 i915_vma_unpin_fence(vma); 491 err_unpin: 492 __i915_vma_unpin(vma); 493 err_reset: 494 intel_gt_reset_unlock(ggtt->vm.gt, srcu); 495 err_pages: 496 i915_gem_object_unpin_pages(obj); 497 err_rpm: 498 if (ret == -EDEADLK) { 499 ret = i915_gem_ww_ctx_backoff(&ww); 500 if (!ret) 501 goto retry; 502 } 503 i915_gem_ww_ctx_fini(&ww); 504 intel_runtime_pm_put(rpm, wakeref); 505 return i915_error_to_vmf_fault(ret); 506 } 507 508 static int 509 vm_access(struct vm_area_struct *area, unsigned long addr, 510 void *buf, int len, int write) 511 { 512 struct i915_mmap_offset *mmo = area->vm_private_data; 513 struct drm_i915_gem_object *obj = mmo->obj; 514 struct i915_gem_ww_ctx ww; 515 void *vaddr; 516 int err = 0; 517 518 if (i915_gem_object_is_readonly(obj) && write) 519 return -EACCES; 520 521 addr -= area->vm_start; 522 if (range_overflows_t(u64, addr, len, obj->base.size)) 523 return -EINVAL; 524 525 i915_gem_ww_ctx_init(&ww, true); 526 retry: 527 err = i915_gem_object_lock(obj, &ww); 528 if (err) 529 goto out; 530 531 /* As this is primarily for debugging, let's focus on simplicity */ 532 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); 533 if (IS_ERR(vaddr)) { 534 err = PTR_ERR(vaddr); 535 goto out; 536 } 537 538 if (write) { 539 memcpy(vaddr + addr, buf, len); 540 __i915_gem_object_flush_map(obj, addr, len); 541 } else { 542 memcpy(buf, vaddr + addr, len); 543 } 544 545 i915_gem_object_unpin_map(obj); 546 out: 547 if (err == -EDEADLK) { 548 err = i915_gem_ww_ctx_backoff(&ww); 549 if (!err) 550 goto retry; 551 } 552 i915_gem_ww_ctx_fini(&ww); 553 554 if (err) 555 return err; 556 557 return len; 558 } 559 560 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 561 { 562 struct i915_vma *vma; 563 564 GEM_BUG_ON(!obj->userfault_count); 565 566 for_each_ggtt_vma(vma, obj) 567 i915_vma_revoke_mmap(vma); 568 569 GEM_BUG_ON(obj->userfault_count); 570 } 571 572 /* 573 * It is vital that we remove the page mapping if we have mapped a tiled 574 * object through the GTT and then lose the fence register due to 575 * resource pressure. Similarly if the object has been moved out of the 576 * aperture, than pages mapped into userspace must be revoked. Removing the 577 * mapping will then trigger a page fault on the next user access, allowing 578 * fixup by vm_fault_gtt(). 579 */ 580 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 581 { 582 struct drm_i915_private *i915 = to_i915(obj->base.dev); 583 intel_wakeref_t wakeref; 584 585 /* 586 * Serialisation between user GTT access and our code depends upon 587 * revoking the CPU's PTE whilst the mutex is held. The next user 588 * pagefault then has to wait until we release the mutex. 589 * 590 * Note that RPM complicates somewhat by adding an additional 591 * requirement that operations to the GGTT be made holding the RPM 592 * wakeref. 593 */ 594 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 595 mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 596 597 if (!obj->userfault_count) 598 goto out; 599 600 __i915_gem_object_release_mmap_gtt(obj); 601 602 /* 603 * Ensure that the CPU's PTE are revoked and there are not outstanding 604 * memory transactions from userspace before we return. The TLB 605 * flushing implied above by changing the PTE above *should* be 606 * sufficient, an extra barrier here just provides us with a bit 607 * of paranoid documentation about our requirement to serialise 608 * memory writes before touching registers / GSM. 609 */ 610 wmb(); 611 612 out: 613 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 614 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 615 } 616 617 void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) 618 { 619 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 620 struct ttm_device *bdev = bo->bdev; 621 622 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 623 624 /* 625 * We have exclusive access here via runtime suspend. All other callers 626 * must first grab the rpm wakeref. 627 */ 628 GEM_BUG_ON(!obj->userfault_count); 629 list_del(&obj->userfault_link); 630 obj->userfault_count = 0; 631 } 632 633 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) 634 { 635 struct i915_mmap_offset *mmo, *mn; 636 637 if (obj->ops->unmap_virtual) 638 obj->ops->unmap_virtual(obj); 639 640 spin_lock(&obj->mmo.lock); 641 rbtree_postorder_for_each_entry_safe(mmo, mn, 642 &obj->mmo.offsets, offset) { 643 /* 644 * vma_node_unmap for GTT mmaps handled already in 645 * __i915_gem_object_release_mmap_gtt 646 */ 647 if (mmo->mmap_type == I915_MMAP_TYPE_GTT) 648 continue; 649 650 spin_unlock(&obj->mmo.lock); 651 drm_vma_node_unmap(&mmo->vma_node, 652 obj->base.dev->anon_inode->i_mapping); 653 spin_lock(&obj->mmo.lock); 654 } 655 spin_unlock(&obj->mmo.lock); 656 } 657 658 static struct i915_mmap_offset * 659 lookup_mmo(struct drm_i915_gem_object *obj, 660 enum i915_mmap_type mmap_type) 661 { 662 struct rb_node *rb; 663 664 spin_lock(&obj->mmo.lock); 665 rb = obj->mmo.offsets.rb_node; 666 while (rb) { 667 struct i915_mmap_offset *mmo = 668 rb_entry(rb, typeof(*mmo), offset); 669 670 if (mmo->mmap_type == mmap_type) { 671 spin_unlock(&obj->mmo.lock); 672 return mmo; 673 } 674 675 if (mmo->mmap_type < mmap_type) 676 rb = rb->rb_right; 677 else 678 rb = rb->rb_left; 679 } 680 spin_unlock(&obj->mmo.lock); 681 682 return NULL; 683 } 684 685 static struct i915_mmap_offset * 686 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo) 687 { 688 struct rb_node *rb, **p; 689 690 spin_lock(&obj->mmo.lock); 691 rb = NULL; 692 p = &obj->mmo.offsets.rb_node; 693 while (*p) { 694 struct i915_mmap_offset *pos; 695 696 rb = *p; 697 pos = rb_entry(rb, typeof(*pos), offset); 698 699 if (pos->mmap_type == mmo->mmap_type) { 700 spin_unlock(&obj->mmo.lock); 701 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, 702 &mmo->vma_node); 703 kfree(mmo); 704 return pos; 705 } 706 707 if (pos->mmap_type < mmo->mmap_type) 708 p = &rb->rb_right; 709 else 710 p = &rb->rb_left; 711 } 712 rb_link_node(&mmo->offset, rb, p); 713 rb_insert_color(&mmo->offset, &obj->mmo.offsets); 714 spin_unlock(&obj->mmo.lock); 715 716 return mmo; 717 } 718 719 static struct i915_mmap_offset * 720 mmap_offset_attach(struct drm_i915_gem_object *obj, 721 enum i915_mmap_type mmap_type, 722 struct drm_file *file) 723 { 724 struct drm_i915_private *i915 = to_i915(obj->base.dev); 725 struct i915_mmap_offset *mmo; 726 int err; 727 728 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops); 729 730 mmo = lookup_mmo(obj, mmap_type); 731 if (mmo) 732 goto out; 733 734 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); 735 if (!mmo) 736 return ERR_PTR(-ENOMEM); 737 738 mmo->obj = obj; 739 mmo->mmap_type = mmap_type; 740 drm_vma_node_reset(&mmo->vma_node); 741 742 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 743 &mmo->vma_node, obj->base.size / PAGE_SIZE); 744 if (likely(!err)) 745 goto insert; 746 747 /* Attempt to reap some mmap space from dead objects */ 748 err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT, 749 NULL); 750 if (err) 751 goto err; 752 753 i915_gem_drain_freed_objects(i915); 754 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 755 &mmo->vma_node, obj->base.size / PAGE_SIZE); 756 if (err) 757 goto err; 758 759 insert: 760 mmo = insert_mmo(obj, mmo); 761 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); 762 out: 763 if (file) 764 drm_vma_node_allow_once(&mmo->vma_node, file); 765 return mmo; 766 767 err: 768 kfree(mmo); 769 return ERR_PTR(err); 770 } 771 772 static int 773 __assign_mmap_offset(struct drm_i915_gem_object *obj, 774 enum i915_mmap_type mmap_type, 775 u64 *offset, struct drm_file *file) 776 { 777 struct i915_mmap_offset *mmo; 778 779 if (i915_gem_object_never_mmap(obj)) 780 return -ENODEV; 781 782 if (obj->ops->mmap_offset) { 783 if (mmap_type != I915_MMAP_TYPE_FIXED) 784 return -ENODEV; 785 786 *offset = obj->ops->mmap_offset(obj); 787 return 0; 788 } 789 790 if (mmap_type == I915_MMAP_TYPE_FIXED) 791 return -ENODEV; 792 793 if (mmap_type != I915_MMAP_TYPE_GTT && 794 !i915_gem_object_has_struct_page(obj) && 795 !i915_gem_object_has_iomem(obj)) 796 return -ENODEV; 797 798 mmo = mmap_offset_attach(obj, mmap_type, file); 799 if (IS_ERR(mmo)) 800 return PTR_ERR(mmo); 801 802 *offset = drm_vma_node_offset_addr(&mmo->vma_node); 803 return 0; 804 } 805 806 static int 807 __assign_mmap_offset_handle(struct drm_file *file, 808 u32 handle, 809 enum i915_mmap_type mmap_type, 810 u64 *offset) 811 { 812 struct drm_i915_gem_object *obj; 813 int err; 814 815 obj = i915_gem_object_lookup(file, handle); 816 if (!obj) 817 return -ENOENT; 818 819 err = i915_gem_object_lock_interruptible(obj, NULL); 820 if (err) 821 goto out_put; 822 err = __assign_mmap_offset(obj, mmap_type, offset, file); 823 i915_gem_object_unlock(obj); 824 out_put: 825 i915_gem_object_put(obj); 826 return err; 827 } 828 829 int 830 i915_gem_dumb_mmap_offset(struct drm_file *file, 831 struct drm_device *dev, 832 u32 handle, 833 u64 *offset) 834 { 835 struct drm_i915_private *i915 = to_i915(dev); 836 enum i915_mmap_type mmap_type; 837 838 if (HAS_LMEM(to_i915(dev))) 839 mmap_type = I915_MMAP_TYPE_FIXED; 840 else if (pat_enabled()) 841 mmap_type = I915_MMAP_TYPE_WC; 842 else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 843 return -ENODEV; 844 else 845 mmap_type = I915_MMAP_TYPE_GTT; 846 847 return __assign_mmap_offset_handle(file, handle, mmap_type, offset); 848 } 849 850 /** 851 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing 852 * @dev: DRM device 853 * @data: GTT mapping ioctl data 854 * @file: GEM object info 855 * 856 * Simply returns the fake offset to userspace so it can mmap it. 857 * The mmap call will end up in drm_gem_mmap(), which will set things 858 * up so we can get faults in the handler above. 859 * 860 * The fault handler will take care of binding the object into the GTT 861 * (since it may have been evicted to make room for something), allocating 862 * a fence register, and mapping the appropriate aperture address into 863 * userspace. 864 */ 865 int 866 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, 867 struct drm_file *file) 868 { 869 struct drm_i915_private *i915 = to_i915(dev); 870 struct drm_i915_gem_mmap_offset *args = data; 871 enum i915_mmap_type type; 872 int err; 873 874 /* 875 * Historically we failed to check args.pad and args.offset 876 * and so we cannot use those fields for user input and we cannot 877 * add -EINVAL for them as the ABI is fixed, i.e. old userspace 878 * may be feeding in garbage in those fields. 879 * 880 * if (args->pad) return -EINVAL; is verbotten! 881 */ 882 883 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 884 NULL, 0, NULL); 885 if (err) 886 return err; 887 888 switch (args->flags) { 889 case I915_MMAP_OFFSET_GTT: 890 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 891 return -ENODEV; 892 type = I915_MMAP_TYPE_GTT; 893 break; 894 895 case I915_MMAP_OFFSET_WC: 896 if (!pat_enabled()) 897 return -ENODEV; 898 type = I915_MMAP_TYPE_WC; 899 break; 900 901 case I915_MMAP_OFFSET_WB: 902 type = I915_MMAP_TYPE_WB; 903 break; 904 905 case I915_MMAP_OFFSET_UC: 906 if (!pat_enabled()) 907 return -ENODEV; 908 type = I915_MMAP_TYPE_UC; 909 break; 910 911 case I915_MMAP_OFFSET_FIXED: 912 type = I915_MMAP_TYPE_FIXED; 913 break; 914 915 default: 916 return -EINVAL; 917 } 918 919 return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); 920 } 921 922 static void vm_open(struct vm_area_struct *vma) 923 { 924 struct i915_mmap_offset *mmo = vma->vm_private_data; 925 struct drm_i915_gem_object *obj = mmo->obj; 926 927 GEM_BUG_ON(!obj); 928 i915_gem_object_get(obj); 929 } 930 931 static void vm_close(struct vm_area_struct *vma) 932 { 933 struct i915_mmap_offset *mmo = vma->vm_private_data; 934 struct drm_i915_gem_object *obj = mmo->obj; 935 936 GEM_BUG_ON(!obj); 937 i915_gem_object_put(obj); 938 } 939 940 static const struct vm_operations_struct vm_ops_gtt = { 941 .fault = vm_fault_gtt, 942 .access = vm_access, 943 .open = vm_open, 944 .close = vm_close, 945 }; 946 947 static const struct vm_operations_struct vm_ops_cpu = { 948 .fault = vm_fault_cpu, 949 .access = vm_access, 950 .open = vm_open, 951 .close = vm_close, 952 }; 953 954 static int singleton_release(struct inode *inode, struct file *file) 955 { 956 struct drm_i915_private *i915 = file->private_data; 957 958 cmpxchg(&i915->gem.mmap_singleton, file, NULL); 959 drm_dev_put(&i915->drm); 960 961 return 0; 962 } 963 964 static const struct file_operations singleton_fops = { 965 .owner = THIS_MODULE, 966 .release = singleton_release, 967 }; 968 969 static struct file *mmap_singleton(struct drm_i915_private *i915) 970 { 971 struct file *file; 972 973 file = get_file_active(&i915->gem.mmap_singleton); 974 if (file) 975 return file; 976 977 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); 978 if (IS_ERR(file)) 979 return file; 980 981 /* Everyone shares a single global address space */ 982 file->f_mapping = i915->drm.anon_inode->i_mapping; 983 984 smp_store_mb(i915->gem.mmap_singleton, file); 985 drm_dev_get(&i915->drm); 986 987 return file; 988 } 989 990 static int 991 i915_gem_object_mmap(struct drm_i915_gem_object *obj, 992 struct i915_mmap_offset *mmo, 993 struct vm_area_struct *vma) 994 { 995 struct drm_i915_private *i915 = to_i915(obj->base.dev); 996 struct drm_device *dev = &i915->drm; 997 struct file *anon; 998 999 if (i915_gem_object_is_readonly(obj)) { 1000 if (vma->vm_flags & VM_WRITE) { 1001 i915_gem_object_put(obj); 1002 return -EINVAL; 1003 } 1004 vm_flags_clear(vma, VM_MAYWRITE); 1005 } 1006 1007 anon = mmap_singleton(to_i915(dev)); 1008 if (IS_ERR(anon)) { 1009 i915_gem_object_put(obj); 1010 return PTR_ERR(anon); 1011 } 1012 1013 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); 1014 1015 /* 1016 * We keep the ref on mmo->obj, not vm_file, but we require 1017 * vma->vm_file->f_mapping, see vma_link(), for later revocation. 1018 * Our userspace is accustomed to having per-file resource cleanup 1019 * (i.e. contexts, objects and requests) on their close(fd), which 1020 * requires avoiding extraneous references to their filp, hence why 1021 * we prefer to use an anonymous file for their mmaps. 1022 */ 1023 vma_set_file(vma, anon); 1024 /* Drop the initial creation reference, the vma is now holding one. */ 1025 fput(anon); 1026 1027 if (obj->ops->mmap_ops) { 1028 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); 1029 vma->vm_ops = obj->ops->mmap_ops; 1030 vma->vm_private_data = obj->base.vma_node.driver_private; 1031 return 0; 1032 } 1033 1034 vma->vm_private_data = mmo; 1035 1036 switch (mmo->mmap_type) { 1037 case I915_MMAP_TYPE_WC: 1038 vma->vm_page_prot = 1039 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1040 vma->vm_ops = &vm_ops_cpu; 1041 break; 1042 1043 case I915_MMAP_TYPE_FIXED: 1044 GEM_WARN_ON(1); 1045 fallthrough; 1046 case I915_MMAP_TYPE_WB: 1047 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 1048 vma->vm_ops = &vm_ops_cpu; 1049 break; 1050 1051 case I915_MMAP_TYPE_UC: 1052 vma->vm_page_prot = 1053 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 1054 vma->vm_ops = &vm_ops_cpu; 1055 break; 1056 1057 case I915_MMAP_TYPE_GTT: 1058 vma->vm_page_prot = 1059 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1060 vma->vm_ops = &vm_ops_gtt; 1061 break; 1062 } 1063 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1064 1065 return 0; 1066 } 1067 1068 /* 1069 * This overcomes the limitation in drm_gem_mmap's assignment of a 1070 * drm_gem_object as the vma->vm_private_data. Since we need to 1071 * be able to resolve multiple mmap offsets which could be tied 1072 * to a single gem object. 1073 */ 1074 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1075 { 1076 struct drm_vma_offset_node *node; 1077 struct drm_file *priv = filp->private_data; 1078 struct drm_device *dev = priv->minor->dev; 1079 struct drm_i915_gem_object *obj = NULL; 1080 struct i915_mmap_offset *mmo = NULL; 1081 1082 if (drm_dev_is_unplugged(dev)) 1083 return -ENODEV; 1084 1085 rcu_read_lock(); 1086 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1087 node = drm_vma_offset_lookup_locked(dev->vma_offset_manager, 1088 vma->vm_pgoff, 1089 vma_pages(vma)); 1090 if (node && drm_vma_node_is_allowed(node, priv)) { 1091 /* 1092 * Skip 0-refcnted objects as it is in the process of being 1093 * destroyed and will be invalid when the vma manager lock 1094 * is released. 1095 */ 1096 if (!node->driver_private) { 1097 mmo = container_of(node, struct i915_mmap_offset, vma_node); 1098 obj = i915_gem_object_get_rcu(mmo->obj); 1099 1100 GEM_BUG_ON(obj && obj->ops->mmap_ops); 1101 } else { 1102 obj = i915_gem_object_get_rcu 1103 (container_of(node, struct drm_i915_gem_object, 1104 base.vma_node)); 1105 1106 GEM_BUG_ON(obj && !obj->ops->mmap_ops); 1107 } 1108 } 1109 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1110 rcu_read_unlock(); 1111 if (!obj) 1112 return node ? -EACCES : -EINVAL; 1113 1114 return i915_gem_object_mmap(obj, mmo, vma); 1115 } 1116 1117 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma) 1118 { 1119 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1120 struct drm_device *dev = &i915->drm; 1121 struct i915_mmap_offset *mmo = NULL; 1122 enum i915_mmap_type mmap_type; 1123 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 1124 1125 if (drm_dev_is_unplugged(dev)) 1126 return -ENODEV; 1127 1128 /* handle ttm object */ 1129 if (obj->ops->mmap_ops) { 1130 /* 1131 * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset 1132 * to calculate page offset so set that up. 1133 */ 1134 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node); 1135 } else { 1136 /* handle stolen and smem objects */ 1137 mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC; 1138 mmo = mmap_offset_attach(obj, mmap_type, NULL); 1139 if (IS_ERR(mmo)) 1140 return PTR_ERR(mmo); 1141 1142 vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); 1143 } 1144 1145 /* 1146 * When we install vm_ops for mmap we are too late for 1147 * the vm_ops->open() which increases the ref_count of 1148 * this obj and then it gets decreased by the vm_ops->close(). 1149 * To balance this increase the obj ref_count here. 1150 */ 1151 obj = i915_gem_object_get(obj); 1152 return i915_gem_object_mmap(obj, mmo, vma); 1153 } 1154 1155 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1156 #include "selftests/i915_gem_mman.c" 1157 #endif 1158