1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/anon_inodes.h> 8 #include <linux/mman.h> 9 #include <linux/pfn_t.h> 10 #include <linux/sizes.h> 11 12 #include "gt/intel_gt.h" 13 #include "gt/intel_gt_requests.h" 14 15 #include "i915_drv.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_gem_ioctls.h" 18 #include "i915_gem_object.h" 19 #include "i915_gem_mman.h" 20 #include "i915_trace.h" 21 #include "i915_user_extensions.h" 22 #include "i915_vma.h" 23 24 static inline bool 25 __vma_matches(struct vm_area_struct *vma, struct file *filp, 26 unsigned long addr, unsigned long size) 27 { 28 if (vma->vm_file != filp) 29 return false; 30 31 return vma->vm_start == addr && 32 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); 33 } 34 35 /** 36 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 37 * it is mapped to. 38 * @dev: drm device 39 * @data: ioctl data blob 40 * @file: drm file 41 * 42 * While the mapping holds a reference on the contents of the object, it doesn't 43 * imply a ref on the object itself. 44 * 45 * IMPORTANT: 46 * 47 * DRM driver writers who look a this function as an example for how to do GEM 48 * mmap support, please don't implement mmap support like here. The modern way 49 * to implement DRM mmap support is with an mmap offset ioctl (like 50 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 51 * That way debug tooling like valgrind will understand what's going on, hiding 52 * the mmap call in a driver private ioctl will break that. The i915 driver only 53 * does cpu mmaps this way because we didn't know better. 54 */ 55 int 56 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 57 struct drm_file *file) 58 { 59 struct drm_i915_private *i915 = to_i915(dev); 60 struct drm_i915_gem_mmap *args = data; 61 struct drm_i915_gem_object *obj; 62 unsigned long addr; 63 64 /* 65 * mmap ioctl is disallowed for all discrete platforms, 66 * and for all platforms with GRAPHICS_VER > 12. 67 */ 68 if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12) 69 return -EOPNOTSUPP; 70 71 if (args->flags & ~(I915_MMAP_WC)) 72 return -EINVAL; 73 74 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) 75 return -ENODEV; 76 77 obj = i915_gem_object_lookup(file, args->handle); 78 if (!obj) 79 return -ENOENT; 80 81 /* prime objects have no backing filp to GEM mmap 82 * pages from. 83 */ 84 if (!obj->base.filp) { 85 addr = -ENXIO; 86 goto err; 87 } 88 89 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { 90 addr = -EINVAL; 91 goto err; 92 } 93 94 addr = vm_mmap(obj->base.filp, 0, args->size, 95 PROT_READ | PROT_WRITE, MAP_SHARED, 96 args->offset); 97 if (IS_ERR_VALUE(addr)) 98 goto err; 99 100 if (args->flags & I915_MMAP_WC) { 101 struct mm_struct *mm = current->mm; 102 struct vm_area_struct *vma; 103 104 if (mmap_write_lock_killable(mm)) { 105 addr = -EINTR; 106 goto err; 107 } 108 vma = find_vma(mm, addr); 109 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) 110 vma->vm_page_prot = 111 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 112 else 113 addr = -ENOMEM; 114 mmap_write_unlock(mm); 115 if (IS_ERR_VALUE(addr)) 116 goto err; 117 } 118 i915_gem_object_put(obj); 119 120 args->addr_ptr = (u64)addr; 121 return 0; 122 123 err: 124 i915_gem_object_put(obj); 125 return addr; 126 } 127 128 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) 129 { 130 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 131 } 132 133 /** 134 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 135 * 136 * A history of the GTT mmap interface: 137 * 138 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 139 * aligned and suitable for fencing, and still fit into the available 140 * mappable space left by the pinned display objects. A classic problem 141 * we called the page-fault-of-doom where we would ping-pong between 142 * two objects that could not fit inside the GTT and so the memcpy 143 * would page one object in at the expense of the other between every 144 * single byte. 145 * 146 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 147 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 148 * object is too large for the available space (or simply too large 149 * for the mappable aperture!), a view is created instead and faulted 150 * into userspace. (This view is aligned and sized appropriately for 151 * fenced access.) 152 * 153 * 2 - Recognise WC as a separate cache domain so that we can flush the 154 * delayed writes via GTT before performing direct access via WC. 155 * 156 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial 157 * pagefault; swapin remains transparent. 158 * 159 * 4 - Support multiple fault handlers per object depending on object's 160 * backing storage (a.k.a. MMAP_OFFSET). 161 * 162 * Restrictions: 163 * 164 * * snoopable objects cannot be accessed via the GTT. It can cause machine 165 * hangs on some architectures, corruption on others. An attempt to service 166 * a GTT page fault from a snoopable object will generate a SIGBUS. 167 * 168 * * the object must be able to fit into RAM (physical memory, though no 169 * limited to the mappable aperture). 170 * 171 * 172 * Caveats: 173 * 174 * * a new GTT page fault will synchronize rendering from the GPU and flush 175 * all data to system memory. Subsequent access will not be synchronized. 176 * 177 * * all mappings are revoked on runtime device suspend. 178 * 179 * * there are only 8, 16 or 32 fence registers to share between all users 180 * (older machines require fence register for display and blitter access 181 * as well). Contention of the fence registers will cause the previous users 182 * to be unmapped and any new access will generate new page faults. 183 * 184 * * running out of memory while servicing a fault may generate a SIGBUS, 185 * rather than the expected SIGSEGV. 186 */ 187 int i915_gem_mmap_gtt_version(void) 188 { 189 return 4; 190 } 191 192 static inline struct i915_ggtt_view 193 compute_partial_view(const struct drm_i915_gem_object *obj, 194 pgoff_t page_offset, 195 unsigned int chunk) 196 { 197 struct i915_ggtt_view view; 198 199 if (i915_gem_object_is_tiled(obj)) 200 chunk = roundup(chunk, tile_row_pages(obj) ?: 1); 201 202 view.type = I915_GGTT_VIEW_PARTIAL; 203 view.partial.offset = rounddown(page_offset, chunk); 204 view.partial.size = 205 min_t(unsigned int, chunk, 206 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 207 208 /* If the partial covers the entire object, just create a normal VMA. */ 209 if (chunk >= obj->base.size >> PAGE_SHIFT) 210 view.type = I915_GGTT_VIEW_NORMAL; 211 212 return view; 213 } 214 215 static vm_fault_t i915_error_to_vmf_fault(int err) 216 { 217 switch (err) { 218 default: 219 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); 220 fallthrough; 221 case -EIO: /* shmemfs failure from swap device */ 222 case -EFAULT: /* purged object */ 223 case -ENODEV: /* bad object, how did you get here! */ 224 case -ENXIO: /* unable to access backing store (on device) */ 225 return VM_FAULT_SIGBUS; 226 227 case -ENOMEM: /* our allocation failure */ 228 return VM_FAULT_OOM; 229 230 case 0: 231 case -EAGAIN: 232 case -ENOSPC: /* transient failure to evict? */ 233 case -ERESTARTSYS: 234 case -EINTR: 235 case -EBUSY: 236 /* 237 * EBUSY is ok: this just means that another thread 238 * already did the job. 239 */ 240 return VM_FAULT_NOPAGE; 241 } 242 } 243 244 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) 245 { 246 struct vm_area_struct *area = vmf->vma; 247 struct i915_mmap_offset *mmo = area->vm_private_data; 248 struct drm_i915_gem_object *obj = mmo->obj; 249 resource_size_t iomap; 250 int err; 251 252 /* Sanity check that we allow writing into this object */ 253 if (unlikely(i915_gem_object_is_readonly(obj) && 254 area->vm_flags & VM_WRITE)) 255 return VM_FAULT_SIGBUS; 256 257 if (i915_gem_object_lock_interruptible(obj, NULL)) 258 return VM_FAULT_NOPAGE; 259 260 err = i915_gem_object_pin_pages(obj); 261 if (err) 262 goto out; 263 264 iomap = -1; 265 if (!i915_gem_object_has_struct_page(obj)) { 266 iomap = obj->mm.region->iomap.base; 267 iomap -= obj->mm.region->region.start; 268 } 269 270 /* PTEs are revoked in obj->ops->put_pages() */ 271 err = remap_io_sg(area, 272 area->vm_start, area->vm_end - area->vm_start, 273 obj->mm.pages->sgl, iomap); 274 275 if (area->vm_flags & VM_WRITE) { 276 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 277 obj->mm.dirty = true; 278 } 279 280 i915_gem_object_unpin_pages(obj); 281 282 out: 283 i915_gem_object_unlock(obj); 284 return i915_error_to_vmf_fault(err); 285 } 286 287 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) 288 { 289 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) 290 struct vm_area_struct *area = vmf->vma; 291 struct i915_mmap_offset *mmo = area->vm_private_data; 292 struct drm_i915_gem_object *obj = mmo->obj; 293 struct drm_device *dev = obj->base.dev; 294 struct drm_i915_private *i915 = to_i915(dev); 295 struct intel_runtime_pm *rpm = &i915->runtime_pm; 296 struct i915_ggtt *ggtt = &i915->ggtt; 297 bool write = area->vm_flags & VM_WRITE; 298 struct i915_gem_ww_ctx ww; 299 intel_wakeref_t wakeref; 300 struct i915_vma *vma; 301 pgoff_t page_offset; 302 int srcu; 303 int ret; 304 305 /* We don't use vmf->pgoff since that has the fake offset */ 306 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 307 308 trace_i915_gem_object_fault(obj, page_offset, true, write); 309 310 wakeref = intel_runtime_pm_get(rpm); 311 312 i915_gem_ww_ctx_init(&ww, true); 313 retry: 314 ret = i915_gem_object_lock(obj, &ww); 315 if (ret) 316 goto err_rpm; 317 318 /* Sanity check that we allow writing into this object */ 319 if (i915_gem_object_is_readonly(obj) && write) { 320 ret = -EFAULT; 321 goto err_rpm; 322 } 323 324 ret = i915_gem_object_pin_pages(obj); 325 if (ret) 326 goto err_rpm; 327 328 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); 329 if (ret) 330 goto err_pages; 331 332 /* Now pin it into the GTT as needed */ 333 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, 334 PIN_MAPPABLE | 335 PIN_NONBLOCK /* NOWARN */ | 336 PIN_NOEVICT); 337 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 338 /* Use a partial view if it is bigger than available space */ 339 struct i915_ggtt_view view = 340 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 341 unsigned int flags; 342 343 flags = PIN_MAPPABLE | PIN_NOSEARCH; 344 if (view.type == I915_GGTT_VIEW_NORMAL) 345 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ 346 347 /* 348 * Userspace is now writing through an untracked VMA, abandon 349 * all hope that the hardware is able to track future writes. 350 */ 351 352 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 353 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 354 flags = PIN_MAPPABLE; 355 view.type = I915_GGTT_VIEW_PARTIAL; 356 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 357 } 358 359 /* The entire mappable GGTT is pinned? Unexpected! */ 360 GEM_BUG_ON(vma == ERR_PTR(-ENOSPC)); 361 } 362 if (IS_ERR(vma)) { 363 ret = PTR_ERR(vma); 364 goto err_reset; 365 } 366 367 /* Access to snoopable pages through the GTT is incoherent. */ 368 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { 369 ret = -EFAULT; 370 goto err_unpin; 371 } 372 373 ret = i915_vma_pin_fence(vma); 374 if (ret) 375 goto err_unpin; 376 377 /* Finally, remap it using the new GTT offset */ 378 ret = remap_io_mapping(area, 379 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), 380 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, 381 min_t(u64, vma->size, area->vm_end - area->vm_start), 382 &ggtt->iomap); 383 if (ret) 384 goto err_fence; 385 386 assert_rpm_wakelock_held(rpm); 387 388 /* Mark as being mmapped into userspace for later revocation */ 389 mutex_lock(&i915->ggtt.vm.mutex); 390 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 391 list_add(&obj->userfault_link, &i915->ggtt.userfault_list); 392 mutex_unlock(&i915->ggtt.vm.mutex); 393 394 /* Track the mmo associated with the fenced vma */ 395 vma->mmo = mmo; 396 397 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)) 398 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 399 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 400 401 if (write) { 402 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 403 i915_vma_set_ggtt_write(vma); 404 obj->mm.dirty = true; 405 } 406 407 err_fence: 408 i915_vma_unpin_fence(vma); 409 err_unpin: 410 __i915_vma_unpin(vma); 411 err_reset: 412 intel_gt_reset_unlock(ggtt->vm.gt, srcu); 413 err_pages: 414 i915_gem_object_unpin_pages(obj); 415 err_rpm: 416 if (ret == -EDEADLK) { 417 ret = i915_gem_ww_ctx_backoff(&ww); 418 if (!ret) 419 goto retry; 420 } 421 i915_gem_ww_ctx_fini(&ww); 422 intel_runtime_pm_put(rpm, wakeref); 423 return i915_error_to_vmf_fault(ret); 424 } 425 426 static int 427 vm_access(struct vm_area_struct *area, unsigned long addr, 428 void *buf, int len, int write) 429 { 430 struct i915_mmap_offset *mmo = area->vm_private_data; 431 struct drm_i915_gem_object *obj = mmo->obj; 432 struct i915_gem_ww_ctx ww; 433 void *vaddr; 434 int err = 0; 435 436 if (i915_gem_object_is_readonly(obj) && write) 437 return -EACCES; 438 439 addr -= area->vm_start; 440 if (addr >= obj->base.size) 441 return -EINVAL; 442 443 i915_gem_ww_ctx_init(&ww, true); 444 retry: 445 err = i915_gem_object_lock(obj, &ww); 446 if (err) 447 goto out; 448 449 /* As this is primarily for debugging, let's focus on simplicity */ 450 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); 451 if (IS_ERR(vaddr)) { 452 err = PTR_ERR(vaddr); 453 goto out; 454 } 455 456 if (write) { 457 memcpy(vaddr + addr, buf, len); 458 __i915_gem_object_flush_map(obj, addr, len); 459 } else { 460 memcpy(buf, vaddr + addr, len); 461 } 462 463 i915_gem_object_unpin_map(obj); 464 out: 465 if (err == -EDEADLK) { 466 err = i915_gem_ww_ctx_backoff(&ww); 467 if (!err) 468 goto retry; 469 } 470 i915_gem_ww_ctx_fini(&ww); 471 472 if (err) 473 return err; 474 475 return len; 476 } 477 478 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 479 { 480 struct i915_vma *vma; 481 482 GEM_BUG_ON(!obj->userfault_count); 483 484 for_each_ggtt_vma(vma, obj) 485 i915_vma_revoke_mmap(vma); 486 487 GEM_BUG_ON(obj->userfault_count); 488 } 489 490 /* 491 * It is vital that we remove the page mapping if we have mapped a tiled 492 * object through the GTT and then lose the fence register due to 493 * resource pressure. Similarly if the object has been moved out of the 494 * aperture, than pages mapped into userspace must be revoked. Removing the 495 * mapping will then trigger a page fault on the next user access, allowing 496 * fixup by vm_fault_gtt(). 497 */ 498 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 499 { 500 struct drm_i915_private *i915 = to_i915(obj->base.dev); 501 intel_wakeref_t wakeref; 502 503 /* 504 * Serialisation between user GTT access and our code depends upon 505 * revoking the CPU's PTE whilst the mutex is held. The next user 506 * pagefault then has to wait until we release the mutex. 507 * 508 * Note that RPM complicates somewhat by adding an additional 509 * requirement that operations to the GGTT be made holding the RPM 510 * wakeref. 511 */ 512 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 513 mutex_lock(&i915->ggtt.vm.mutex); 514 515 if (!obj->userfault_count) 516 goto out; 517 518 __i915_gem_object_release_mmap_gtt(obj); 519 520 /* 521 * Ensure that the CPU's PTE are revoked and there are not outstanding 522 * memory transactions from userspace before we return. The TLB 523 * flushing implied above by changing the PTE above *should* be 524 * sufficient, an extra barrier here just provides us with a bit 525 * of paranoid documentation about our requirement to serialise 526 * memory writes before touching registers / GSM. 527 */ 528 wmb(); 529 530 out: 531 mutex_unlock(&i915->ggtt.vm.mutex); 532 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 533 } 534 535 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) 536 { 537 struct i915_mmap_offset *mmo, *mn; 538 539 spin_lock(&obj->mmo.lock); 540 rbtree_postorder_for_each_entry_safe(mmo, mn, 541 &obj->mmo.offsets, offset) { 542 /* 543 * vma_node_unmap for GTT mmaps handled already in 544 * __i915_gem_object_release_mmap_gtt 545 */ 546 if (mmo->mmap_type == I915_MMAP_TYPE_GTT) 547 continue; 548 549 spin_unlock(&obj->mmo.lock); 550 drm_vma_node_unmap(&mmo->vma_node, 551 obj->base.dev->anon_inode->i_mapping); 552 spin_lock(&obj->mmo.lock); 553 } 554 spin_unlock(&obj->mmo.lock); 555 } 556 557 static struct i915_mmap_offset * 558 lookup_mmo(struct drm_i915_gem_object *obj, 559 enum i915_mmap_type mmap_type) 560 { 561 struct rb_node *rb; 562 563 spin_lock(&obj->mmo.lock); 564 rb = obj->mmo.offsets.rb_node; 565 while (rb) { 566 struct i915_mmap_offset *mmo = 567 rb_entry(rb, typeof(*mmo), offset); 568 569 if (mmo->mmap_type == mmap_type) { 570 spin_unlock(&obj->mmo.lock); 571 return mmo; 572 } 573 574 if (mmo->mmap_type < mmap_type) 575 rb = rb->rb_right; 576 else 577 rb = rb->rb_left; 578 } 579 spin_unlock(&obj->mmo.lock); 580 581 return NULL; 582 } 583 584 static struct i915_mmap_offset * 585 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo) 586 { 587 struct rb_node *rb, **p; 588 589 spin_lock(&obj->mmo.lock); 590 rb = NULL; 591 p = &obj->mmo.offsets.rb_node; 592 while (*p) { 593 struct i915_mmap_offset *pos; 594 595 rb = *p; 596 pos = rb_entry(rb, typeof(*pos), offset); 597 598 if (pos->mmap_type == mmo->mmap_type) { 599 spin_unlock(&obj->mmo.lock); 600 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, 601 &mmo->vma_node); 602 kfree(mmo); 603 return pos; 604 } 605 606 if (pos->mmap_type < mmo->mmap_type) 607 p = &rb->rb_right; 608 else 609 p = &rb->rb_left; 610 } 611 rb_link_node(&mmo->offset, rb, p); 612 rb_insert_color(&mmo->offset, &obj->mmo.offsets); 613 spin_unlock(&obj->mmo.lock); 614 615 return mmo; 616 } 617 618 static struct i915_mmap_offset * 619 mmap_offset_attach(struct drm_i915_gem_object *obj, 620 enum i915_mmap_type mmap_type, 621 struct drm_file *file) 622 { 623 struct drm_i915_private *i915 = to_i915(obj->base.dev); 624 struct i915_mmap_offset *mmo; 625 int err; 626 627 mmo = lookup_mmo(obj, mmap_type); 628 if (mmo) 629 goto out; 630 631 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); 632 if (!mmo) 633 return ERR_PTR(-ENOMEM); 634 635 mmo->obj = obj; 636 mmo->mmap_type = mmap_type; 637 drm_vma_node_reset(&mmo->vma_node); 638 639 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 640 &mmo->vma_node, obj->base.size / PAGE_SIZE); 641 if (likely(!err)) 642 goto insert; 643 644 /* Attempt to reap some mmap space from dead objects */ 645 err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT); 646 if (err) 647 goto err; 648 649 i915_gem_drain_freed_objects(i915); 650 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 651 &mmo->vma_node, obj->base.size / PAGE_SIZE); 652 if (err) 653 goto err; 654 655 insert: 656 mmo = insert_mmo(obj, mmo); 657 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); 658 out: 659 if (file) 660 drm_vma_node_allow(&mmo->vma_node, file); 661 return mmo; 662 663 err: 664 kfree(mmo); 665 return ERR_PTR(err); 666 } 667 668 static int 669 __assign_mmap_offset(struct drm_file *file, 670 u32 handle, 671 enum i915_mmap_type mmap_type, 672 u64 *offset) 673 { 674 struct drm_i915_gem_object *obj; 675 struct i915_mmap_offset *mmo; 676 int err; 677 678 obj = i915_gem_object_lookup(file, handle); 679 if (!obj) 680 return -ENOENT; 681 682 if (i915_gem_object_never_mmap(obj)) { 683 err = -ENODEV; 684 goto out; 685 } 686 687 if (mmap_type != I915_MMAP_TYPE_GTT && 688 !i915_gem_object_has_struct_page(obj) && 689 !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) { 690 err = -ENODEV; 691 goto out; 692 } 693 694 mmo = mmap_offset_attach(obj, mmap_type, file); 695 if (IS_ERR(mmo)) { 696 err = PTR_ERR(mmo); 697 goto out; 698 } 699 700 *offset = drm_vma_node_offset_addr(&mmo->vma_node); 701 err = 0; 702 out: 703 i915_gem_object_put(obj); 704 return err; 705 } 706 707 int 708 i915_gem_dumb_mmap_offset(struct drm_file *file, 709 struct drm_device *dev, 710 u32 handle, 711 u64 *offset) 712 { 713 enum i915_mmap_type mmap_type; 714 715 if (boot_cpu_has(X86_FEATURE_PAT)) 716 mmap_type = I915_MMAP_TYPE_WC; 717 else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt)) 718 return -ENODEV; 719 else 720 mmap_type = I915_MMAP_TYPE_GTT; 721 722 return __assign_mmap_offset(file, handle, mmap_type, offset); 723 } 724 725 /** 726 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing 727 * @dev: DRM device 728 * @data: GTT mapping ioctl data 729 * @file: GEM object info 730 * 731 * Simply returns the fake offset to userspace so it can mmap it. 732 * The mmap call will end up in drm_gem_mmap(), which will set things 733 * up so we can get faults in the handler above. 734 * 735 * The fault handler will take care of binding the object into the GTT 736 * (since it may have been evicted to make room for something), allocating 737 * a fence register, and mapping the appropriate aperture address into 738 * userspace. 739 */ 740 int 741 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, 742 struct drm_file *file) 743 { 744 struct drm_i915_private *i915 = to_i915(dev); 745 struct drm_i915_gem_mmap_offset *args = data; 746 enum i915_mmap_type type; 747 int err; 748 749 /* 750 * Historically we failed to check args.pad and args.offset 751 * and so we cannot use those fields for user input and we cannot 752 * add -EINVAL for them as the ABI is fixed, i.e. old userspace 753 * may be feeding in garbage in those fields. 754 * 755 * if (args->pad) return -EINVAL; is verbotten! 756 */ 757 758 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 759 NULL, 0, NULL); 760 if (err) 761 return err; 762 763 switch (args->flags) { 764 case I915_MMAP_OFFSET_GTT: 765 if (!i915_ggtt_has_aperture(&i915->ggtt)) 766 return -ENODEV; 767 type = I915_MMAP_TYPE_GTT; 768 break; 769 770 case I915_MMAP_OFFSET_WC: 771 if (!boot_cpu_has(X86_FEATURE_PAT)) 772 return -ENODEV; 773 type = I915_MMAP_TYPE_WC; 774 break; 775 776 case I915_MMAP_OFFSET_WB: 777 type = I915_MMAP_TYPE_WB; 778 break; 779 780 case I915_MMAP_OFFSET_UC: 781 if (!boot_cpu_has(X86_FEATURE_PAT)) 782 return -ENODEV; 783 type = I915_MMAP_TYPE_UC; 784 break; 785 786 default: 787 return -EINVAL; 788 } 789 790 return __assign_mmap_offset(file, args->handle, type, &args->offset); 791 } 792 793 static void vm_open(struct vm_area_struct *vma) 794 { 795 struct i915_mmap_offset *mmo = vma->vm_private_data; 796 struct drm_i915_gem_object *obj = mmo->obj; 797 798 GEM_BUG_ON(!obj); 799 i915_gem_object_get(obj); 800 } 801 802 static void vm_close(struct vm_area_struct *vma) 803 { 804 struct i915_mmap_offset *mmo = vma->vm_private_data; 805 struct drm_i915_gem_object *obj = mmo->obj; 806 807 GEM_BUG_ON(!obj); 808 i915_gem_object_put(obj); 809 } 810 811 static const struct vm_operations_struct vm_ops_gtt = { 812 .fault = vm_fault_gtt, 813 .access = vm_access, 814 .open = vm_open, 815 .close = vm_close, 816 }; 817 818 static const struct vm_operations_struct vm_ops_cpu = { 819 .fault = vm_fault_cpu, 820 .access = vm_access, 821 .open = vm_open, 822 .close = vm_close, 823 }; 824 825 static int singleton_release(struct inode *inode, struct file *file) 826 { 827 struct drm_i915_private *i915 = file->private_data; 828 829 cmpxchg(&i915->gem.mmap_singleton, file, NULL); 830 drm_dev_put(&i915->drm); 831 832 return 0; 833 } 834 835 static const struct file_operations singleton_fops = { 836 .owner = THIS_MODULE, 837 .release = singleton_release, 838 }; 839 840 static struct file *mmap_singleton(struct drm_i915_private *i915) 841 { 842 struct file *file; 843 844 rcu_read_lock(); 845 file = READ_ONCE(i915->gem.mmap_singleton); 846 if (file && !get_file_rcu(file)) 847 file = NULL; 848 rcu_read_unlock(); 849 if (file) 850 return file; 851 852 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); 853 if (IS_ERR(file)) 854 return file; 855 856 /* Everyone shares a single global address space */ 857 file->f_mapping = i915->drm.anon_inode->i_mapping; 858 859 smp_store_mb(i915->gem.mmap_singleton, file); 860 drm_dev_get(&i915->drm); 861 862 return file; 863 } 864 865 /* 866 * This overcomes the limitation in drm_gem_mmap's assignment of a 867 * drm_gem_object as the vma->vm_private_data. Since we need to 868 * be able to resolve multiple mmap offsets which could be tied 869 * to a single gem object. 870 */ 871 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) 872 { 873 struct drm_vma_offset_node *node; 874 struct drm_file *priv = filp->private_data; 875 struct drm_device *dev = priv->minor->dev; 876 struct drm_i915_gem_object *obj = NULL; 877 struct i915_mmap_offset *mmo = NULL; 878 struct file *anon; 879 880 if (drm_dev_is_unplugged(dev)) 881 return -ENODEV; 882 883 rcu_read_lock(); 884 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 885 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 886 vma->vm_pgoff, 887 vma_pages(vma)); 888 if (node && drm_vma_node_is_allowed(node, priv)) { 889 /* 890 * Skip 0-refcnted objects as it is in the process of being 891 * destroyed and will be invalid when the vma manager lock 892 * is released. 893 */ 894 mmo = container_of(node, struct i915_mmap_offset, vma_node); 895 obj = i915_gem_object_get_rcu(mmo->obj); 896 } 897 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 898 rcu_read_unlock(); 899 if (!obj) 900 return node ? -EACCES : -EINVAL; 901 902 if (i915_gem_object_is_readonly(obj)) { 903 if (vma->vm_flags & VM_WRITE) { 904 i915_gem_object_put(obj); 905 return -EINVAL; 906 } 907 vma->vm_flags &= ~VM_MAYWRITE; 908 } 909 910 anon = mmap_singleton(to_i915(dev)); 911 if (IS_ERR(anon)) { 912 i915_gem_object_put(obj); 913 return PTR_ERR(anon); 914 } 915 916 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 917 vma->vm_private_data = mmo; 918 919 /* 920 * We keep the ref on mmo->obj, not vm_file, but we require 921 * vma->vm_file->f_mapping, see vma_link(), for later revocation. 922 * Our userspace is accustomed to having per-file resource cleanup 923 * (i.e. contexts, objects and requests) on their close(fd), which 924 * requires avoiding extraneous references to their filp, hence why 925 * we prefer to use an anonymous file for their mmaps. 926 */ 927 vma_set_file(vma, anon); 928 /* Drop the initial creation reference, the vma is now holding one. */ 929 fput(anon); 930 931 switch (mmo->mmap_type) { 932 case I915_MMAP_TYPE_WC: 933 vma->vm_page_prot = 934 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 935 vma->vm_ops = &vm_ops_cpu; 936 break; 937 938 case I915_MMAP_TYPE_WB: 939 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 940 vma->vm_ops = &vm_ops_cpu; 941 break; 942 943 case I915_MMAP_TYPE_UC: 944 vma->vm_page_prot = 945 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 946 vma->vm_ops = &vm_ops_cpu; 947 break; 948 949 case I915_MMAP_TYPE_GTT: 950 vma->vm_page_prot = 951 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 952 vma->vm_ops = &vm_ops_gtt; 953 break; 954 } 955 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 956 957 return 0; 958 } 959 960 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 961 #include "selftests/i915_gem_mman.c" 962 #endif 963