1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 #include <drm/drm_gem.h> 27 28 #include "display/intel_frontbuffer.h" 29 #include "gem/i915_gem_lmem.h" 30 #include "gem/i915_gem_tiling.h" 31 #include "gt/intel_engine.h" 32 #include "gt/intel_engine_heartbeat.h" 33 #include "gt/intel_gt.h" 34 #include "gt/intel_gt_requests.h" 35 36 #include "i915_drv.h" 37 #include "i915_gem_evict.h" 38 #include "i915_sw_fence_work.h" 39 #include "i915_trace.h" 40 #include "i915_vma.h" 41 42 static struct kmem_cache *slab_vmas; 43 44 static struct i915_vma *i915_vma_alloc(void) 45 { 46 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); 47 } 48 49 static void i915_vma_free(struct i915_vma *vma) 50 { 51 return kmem_cache_free(slab_vmas, vma); 52 } 53 54 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 55 56 #include <linux/stackdepot.h> 57 58 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 59 { 60 char buf[512]; 61 62 if (!vma->node.stack) { 63 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 64 vma->node.start, vma->node.size, reason); 65 return; 66 } 67 68 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); 69 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 70 vma->node.start, vma->node.size, reason, buf); 71 } 72 73 #else 74 75 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 76 { 77 } 78 79 #endif 80 81 static inline struct i915_vma *active_to_vma(struct i915_active *ref) 82 { 83 return container_of(ref, typeof(struct i915_vma), active); 84 } 85 86 static int __i915_vma_active(struct i915_active *ref) 87 { 88 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 89 } 90 91 static void __i915_vma_retire(struct i915_active *ref) 92 { 93 i915_vma_put(active_to_vma(ref)); 94 } 95 96 static struct i915_vma * 97 vma_create(struct drm_i915_gem_object *obj, 98 struct i915_address_space *vm, 99 const struct i915_ggtt_view *view) 100 { 101 struct i915_vma *pos = ERR_PTR(-E2BIG); 102 struct i915_vma *vma; 103 struct rb_node *rb, **p; 104 105 /* The aliasing_ppgtt should never be used directly! */ 106 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 107 108 vma = i915_vma_alloc(); 109 if (vma == NULL) 110 return ERR_PTR(-ENOMEM); 111 112 kref_init(&vma->ref); 113 vma->vm = i915_vm_get(vm); 114 vma->ops = &vm->vma_ops; 115 vma->obj = obj; 116 vma->size = obj->base.size; 117 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 118 119 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); 120 121 /* Declare ourselves safe for use inside shrinkers */ 122 if (IS_ENABLED(CONFIG_LOCKDEP)) { 123 fs_reclaim_acquire(GFP_KERNEL); 124 might_lock(&vma->active.mutex); 125 fs_reclaim_release(GFP_KERNEL); 126 } 127 128 INIT_LIST_HEAD(&vma->closed_link); 129 130 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 131 vma->ggtt_view = *view; 132 if (view->type == I915_GGTT_VIEW_PARTIAL) { 133 GEM_BUG_ON(range_overflows_t(u64, 134 view->partial.offset, 135 view->partial.size, 136 obj->base.size >> PAGE_SHIFT)); 137 vma->size = view->partial.size; 138 vma->size <<= PAGE_SHIFT; 139 GEM_BUG_ON(vma->size > obj->base.size); 140 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 141 vma->size = intel_rotation_info_size(&view->rotated); 142 vma->size <<= PAGE_SHIFT; 143 } else if (view->type == I915_GGTT_VIEW_REMAPPED) { 144 vma->size = intel_remapped_info_size(&view->remapped); 145 vma->size <<= PAGE_SHIFT; 146 } 147 } 148 149 if (unlikely(vma->size > vm->total)) 150 goto err_vma; 151 152 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 153 154 spin_lock(&obj->vma.lock); 155 156 if (i915_is_ggtt(vm)) { 157 if (unlikely(overflows_type(vma->size, u32))) 158 goto err_unlock; 159 160 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 161 i915_gem_object_get_tiling(obj), 162 i915_gem_object_get_stride(obj)); 163 if (unlikely(vma->fence_size < vma->size || /* overflow */ 164 vma->fence_size > vm->total)) 165 goto err_unlock; 166 167 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 168 169 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 170 i915_gem_object_get_tiling(obj), 171 i915_gem_object_get_stride(obj)); 172 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 173 174 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 175 } 176 177 rb = NULL; 178 p = &obj->vma.tree.rb_node; 179 while (*p) { 180 long cmp; 181 182 rb = *p; 183 pos = rb_entry(rb, struct i915_vma, obj_node); 184 185 /* 186 * If the view already exists in the tree, another thread 187 * already created a matching vma, so return the older instance 188 * and dispose of ours. 189 */ 190 cmp = i915_vma_compare(pos, vm, view); 191 if (cmp < 0) 192 p = &rb->rb_right; 193 else if (cmp > 0) 194 p = &rb->rb_left; 195 else 196 goto err_unlock; 197 } 198 rb_link_node(&vma->obj_node, rb, p); 199 rb_insert_color(&vma->obj_node, &obj->vma.tree); 200 201 if (i915_vma_is_ggtt(vma)) 202 /* 203 * We put the GGTT vma at the start of the vma-list, followed 204 * by the ppGGTT vma. This allows us to break early when 205 * iterating over only the GGTT vma for an object, see 206 * for_each_ggtt_vma() 207 */ 208 list_add(&vma->obj_link, &obj->vma.list); 209 else 210 list_add_tail(&vma->obj_link, &obj->vma.list); 211 212 spin_unlock(&obj->vma.lock); 213 214 return vma; 215 216 err_unlock: 217 spin_unlock(&obj->vma.lock); 218 err_vma: 219 i915_vm_put(vm); 220 i915_vma_free(vma); 221 return pos; 222 } 223 224 static struct i915_vma * 225 i915_vma_lookup(struct drm_i915_gem_object *obj, 226 struct i915_address_space *vm, 227 const struct i915_ggtt_view *view) 228 { 229 struct rb_node *rb; 230 231 rb = obj->vma.tree.rb_node; 232 while (rb) { 233 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 234 long cmp; 235 236 cmp = i915_vma_compare(vma, vm, view); 237 if (cmp == 0) 238 return vma; 239 240 if (cmp < 0) 241 rb = rb->rb_right; 242 else 243 rb = rb->rb_left; 244 } 245 246 return NULL; 247 } 248 249 /** 250 * i915_vma_instance - return the singleton instance of the VMA 251 * @obj: parent &struct drm_i915_gem_object to be mapped 252 * @vm: address space in which the mapping is located 253 * @view: additional mapping requirements 254 * 255 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 256 * the same @view characteristics. If a match is not found, one is created. 257 * Once created, the VMA is kept until either the object is freed, or the 258 * address space is closed. 259 * 260 * Returns the vma, or an error pointer. 261 */ 262 struct i915_vma * 263 i915_vma_instance(struct drm_i915_gem_object *obj, 264 struct i915_address_space *vm, 265 const struct i915_ggtt_view *view) 266 { 267 struct i915_vma *vma; 268 269 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm)); 270 GEM_BUG_ON(!atomic_read(&vm->open)); 271 272 spin_lock(&obj->vma.lock); 273 vma = i915_vma_lookup(obj, vm, view); 274 spin_unlock(&obj->vma.lock); 275 276 /* vma_create() will resolve the race if another creates the vma */ 277 if (unlikely(!vma)) 278 vma = vma_create(obj, vm, view); 279 280 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 281 return vma; 282 } 283 284 struct i915_vma_work { 285 struct dma_fence_work base; 286 struct i915_address_space *vm; 287 struct i915_vm_pt_stash stash; 288 struct i915_vma *vma; 289 struct drm_i915_gem_object *pinned; 290 struct i915_sw_dma_fence_cb cb; 291 enum i915_cache_level cache_level; 292 unsigned int flags; 293 }; 294 295 static void __vma_bind(struct dma_fence_work *work) 296 { 297 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 298 struct i915_vma *vma = vw->vma; 299 300 vma->ops->bind_vma(vw->vm, &vw->stash, 301 vma, vw->cache_level, vw->flags); 302 } 303 304 static void __vma_release(struct dma_fence_work *work) 305 { 306 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 307 308 if (vw->pinned) { 309 __i915_gem_object_unpin_pages(vw->pinned); 310 i915_gem_object_put(vw->pinned); 311 } 312 313 i915_vm_free_pt_stash(vw->vm, &vw->stash); 314 i915_vm_put(vw->vm); 315 } 316 317 static const struct dma_fence_work_ops bind_ops = { 318 .name = "bind", 319 .work = __vma_bind, 320 .release = __vma_release, 321 }; 322 323 struct i915_vma_work *i915_vma_work(void) 324 { 325 struct i915_vma_work *vw; 326 327 vw = kzalloc(sizeof(*vw), GFP_KERNEL); 328 if (!vw) 329 return NULL; 330 331 dma_fence_work_init(&vw->base, &bind_ops); 332 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 333 334 return vw; 335 } 336 337 int i915_vma_wait_for_bind(struct i915_vma *vma) 338 { 339 int err = 0; 340 341 if (rcu_access_pointer(vma->active.excl.fence)) { 342 struct dma_fence *fence; 343 344 rcu_read_lock(); 345 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); 346 rcu_read_unlock(); 347 if (fence) { 348 err = dma_fence_wait(fence, true); 349 dma_fence_put(fence); 350 } 351 } 352 353 return err; 354 } 355 356 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 357 static int i915_vma_verify_bind_complete(struct i915_vma *vma) 358 { 359 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); 360 int err; 361 362 if (!fence) 363 return 0; 364 365 if (dma_fence_is_signaled(fence)) 366 err = fence->error; 367 else 368 err = -EBUSY; 369 370 dma_fence_put(fence); 371 372 return err; 373 } 374 #else 375 #define i915_vma_verify_bind_complete(_vma) 0 376 #endif 377 378 /** 379 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 380 * @vma: VMA to map 381 * @cache_level: mapping cache level 382 * @flags: flags like global or local mapping 383 * @work: preallocated worker for allocating and binding the PTE 384 * 385 * DMA addresses are taken from the scatter-gather table of this object (or of 386 * this VMA in case of non-default GGTT views) and PTE entries set up. 387 * Note that DMA addresses are also the only part of the SG table we care about. 388 */ 389 int i915_vma_bind(struct i915_vma *vma, 390 enum i915_cache_level cache_level, 391 u32 flags, 392 struct i915_vma_work *work) 393 { 394 u32 bind_flags; 395 u32 vma_flags; 396 397 lockdep_assert_held(&vma->vm->mutex); 398 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 399 GEM_BUG_ON(vma->size > vma->node.size); 400 401 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 402 vma->node.size, 403 vma->vm->total))) 404 return -ENODEV; 405 406 if (GEM_DEBUG_WARN_ON(!flags)) 407 return -EINVAL; 408 409 bind_flags = flags; 410 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 411 412 vma_flags = atomic_read(&vma->flags); 413 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 414 415 bind_flags &= ~vma_flags; 416 if (bind_flags == 0) 417 return 0; 418 419 GEM_BUG_ON(!atomic_read(&vma->pages_count)); 420 421 trace_i915_vma_bind(vma, bind_flags); 422 if (work && bind_flags & vma->vm->bind_async_flags) { 423 struct dma_fence *prev; 424 425 work->vma = vma; 426 work->cache_level = cache_level; 427 work->flags = bind_flags; 428 429 /* 430 * Note we only want to chain up to the migration fence on 431 * the pages (not the object itself). As we don't track that, 432 * yet, we have to use the exclusive fence instead. 433 * 434 * Also note that we do not want to track the async vma as 435 * part of the obj->resv->excl_fence as it only affects 436 * execution and not content or object's backing store lifetime. 437 */ 438 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); 439 if (prev) { 440 __i915_sw_fence_await_dma_fence(&work->base.chain, 441 prev, 442 &work->cb); 443 dma_fence_put(prev); 444 } 445 446 work->base.dma.error = 0; /* enable the queue_work() */ 447 448 __i915_gem_object_pin_pages(vma->obj); 449 work->pinned = i915_gem_object_get(vma->obj); 450 } else { 451 if (vma->obj) { 452 int ret; 453 454 ret = i915_gem_object_wait_moving_fence(vma->obj, true); 455 if (ret) 456 return ret; 457 } 458 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); 459 } 460 461 if (vma->obj) 462 set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); 463 464 atomic_or(bind_flags, &vma->flags); 465 return 0; 466 } 467 468 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 469 { 470 void __iomem *ptr; 471 int err; 472 473 if (!i915_gem_object_is_lmem(vma->obj)) { 474 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 475 err = -ENODEV; 476 goto err; 477 } 478 } 479 480 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 481 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 482 GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); 483 484 ptr = READ_ONCE(vma->iomap); 485 if (ptr == NULL) { 486 /* 487 * TODO: consider just using i915_gem_object_pin_map() for lmem 488 * instead, which already supports mapping non-contiguous chunks 489 * of pages, that way we can also drop the 490 * I915_BO_ALLOC_CONTIGUOUS when allocating the object. 491 */ 492 if (i915_gem_object_is_lmem(vma->obj)) 493 ptr = i915_gem_object_lmem_io_map(vma->obj, 0, 494 vma->obj->base.size); 495 else 496 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 497 vma->node.start, 498 vma->node.size); 499 if (ptr == NULL) { 500 err = -ENOMEM; 501 goto err; 502 } 503 504 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 505 io_mapping_unmap(ptr); 506 ptr = vma->iomap; 507 } 508 } 509 510 __i915_vma_pin(vma); 511 512 err = i915_vma_pin_fence(vma); 513 if (err) 514 goto err_unpin; 515 516 i915_vma_set_ggtt_write(vma); 517 518 /* NB Access through the GTT requires the device to be awake. */ 519 return ptr; 520 521 err_unpin: 522 __i915_vma_unpin(vma); 523 err: 524 return IO_ERR_PTR(err); 525 } 526 527 void i915_vma_flush_writes(struct i915_vma *vma) 528 { 529 if (i915_vma_unset_ggtt_write(vma)) 530 intel_gt_flush_ggtt_writes(vma->vm->gt); 531 } 532 533 void i915_vma_unpin_iomap(struct i915_vma *vma) 534 { 535 GEM_BUG_ON(vma->iomap == NULL); 536 537 i915_vma_flush_writes(vma); 538 539 i915_vma_unpin_fence(vma); 540 i915_vma_unpin(vma); 541 } 542 543 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 544 { 545 struct i915_vma *vma; 546 struct drm_i915_gem_object *obj; 547 548 vma = fetch_and_zero(p_vma); 549 if (!vma) 550 return; 551 552 obj = vma->obj; 553 GEM_BUG_ON(!obj); 554 555 i915_vma_unpin(vma); 556 557 if (flags & I915_VMA_RELEASE_MAP) 558 i915_gem_object_unpin_map(obj); 559 560 i915_gem_object_put(obj); 561 } 562 563 bool i915_vma_misplaced(const struct i915_vma *vma, 564 u64 size, u64 alignment, u64 flags) 565 { 566 if (!drm_mm_node_allocated(&vma->node)) 567 return false; 568 569 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 570 return true; 571 572 if (vma->node.size < size) 573 return true; 574 575 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 576 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 577 return true; 578 579 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 580 return true; 581 582 if (flags & PIN_OFFSET_BIAS && 583 vma->node.start < (flags & PIN_OFFSET_MASK)) 584 return true; 585 586 if (flags & PIN_OFFSET_FIXED && 587 vma->node.start != (flags & PIN_OFFSET_MASK)) 588 return true; 589 590 return false; 591 } 592 593 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 594 { 595 bool mappable, fenceable; 596 597 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 598 GEM_BUG_ON(!vma->fence_size); 599 600 fenceable = (vma->node.size >= vma->fence_size && 601 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 602 603 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 604 605 if (mappable && fenceable) 606 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 607 else 608 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 609 } 610 611 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 612 { 613 struct drm_mm_node *node = &vma->node; 614 struct drm_mm_node *other; 615 616 /* 617 * On some machines we have to be careful when putting differing types 618 * of snoopable memory together to avoid the prefetcher crossing memory 619 * domains and dying. During vm initialisation, we decide whether or not 620 * these constraints apply and set the drm_mm.color_adjust 621 * appropriately. 622 */ 623 if (!i915_vm_has_cache_coloring(vma->vm)) 624 return true; 625 626 /* Only valid to be called on an already inserted vma */ 627 GEM_BUG_ON(!drm_mm_node_allocated(node)); 628 GEM_BUG_ON(list_empty(&node->node_list)); 629 630 other = list_prev_entry(node, node_list); 631 if (i915_node_color_differs(other, color) && 632 !drm_mm_hole_follows(other)) 633 return false; 634 635 other = list_next_entry(node, node_list); 636 if (i915_node_color_differs(other, color) && 637 !drm_mm_hole_follows(node)) 638 return false; 639 640 return true; 641 } 642 643 /** 644 * i915_vma_insert - finds a slot for the vma in its address space 645 * @vma: the vma 646 * @size: requested size in bytes (can be larger than the VMA) 647 * @alignment: required alignment 648 * @flags: mask of PIN_* flags to use 649 * 650 * First we try to allocate some free space that meets the requirements for 651 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 652 * preferrably the oldest idle entry to make room for the new VMA. 653 * 654 * Returns: 655 * 0 on success, negative error code otherwise. 656 */ 657 static int 658 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 659 { 660 unsigned long color; 661 u64 start, end; 662 int ret; 663 664 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 665 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 666 667 size = max(size, vma->size); 668 alignment = max(alignment, vma->display_alignment); 669 if (flags & PIN_MAPPABLE) { 670 size = max_t(typeof(size), size, vma->fence_size); 671 alignment = max_t(typeof(alignment), 672 alignment, vma->fence_alignment); 673 } 674 675 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 676 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 677 GEM_BUG_ON(!is_power_of_2(alignment)); 678 679 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 680 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 681 682 end = vma->vm->total; 683 if (flags & PIN_MAPPABLE) 684 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 685 if (flags & PIN_ZONE_4G) 686 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 687 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 688 689 /* If binding the object/GGTT view requires more space than the entire 690 * aperture has, reject it early before evicting everything in a vain 691 * attempt to find space. 692 */ 693 if (size > end) { 694 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 695 size, flags & PIN_MAPPABLE ? "mappable" : "total", 696 end); 697 return -ENOSPC; 698 } 699 700 color = 0; 701 if (i915_vm_has_cache_coloring(vma->vm)) 702 color = vma->obj->cache_level; 703 704 if (flags & PIN_OFFSET_FIXED) { 705 u64 offset = flags & PIN_OFFSET_MASK; 706 if (!IS_ALIGNED(offset, alignment) || 707 range_overflows(offset, size, end)) 708 return -EINVAL; 709 710 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 711 size, offset, color, 712 flags); 713 if (ret) 714 return ret; 715 } else { 716 /* 717 * We only support huge gtt pages through the 48b PPGTT, 718 * however we also don't want to force any alignment for 719 * objects which need to be tightly packed into the low 32bits. 720 * 721 * Note that we assume that GGTT are limited to 4GiB for the 722 * forseeable future. See also i915_ggtt_offset(). 723 */ 724 if (upper_32_bits(end - 1) && 725 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 726 /* 727 * We can't mix 64K and 4K PTEs in the same page-table 728 * (2M block), and so to avoid the ugliness and 729 * complexity of coloring we opt for just aligning 64K 730 * objects to 2M. 731 */ 732 u64 page_alignment = 733 rounddown_pow_of_two(vma->page_sizes.sg | 734 I915_GTT_PAGE_SIZE_2M); 735 736 /* 737 * Check we don't expand for the limited Global GTT 738 * (mappable aperture is even more precious!). This 739 * also checks that we exclude the aliasing-ppgtt. 740 */ 741 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 742 743 alignment = max(alignment, page_alignment); 744 745 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 746 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 747 } 748 749 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 750 size, alignment, color, 751 start, end, flags); 752 if (ret) 753 return ret; 754 755 GEM_BUG_ON(vma->node.start < start); 756 GEM_BUG_ON(vma->node.start + vma->node.size > end); 757 } 758 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 759 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 760 761 list_add_tail(&vma->vm_link, &vma->vm->bound_list); 762 763 return 0; 764 } 765 766 static void 767 i915_vma_detach(struct i915_vma *vma) 768 { 769 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 770 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 771 772 /* 773 * And finally now the object is completely decoupled from this 774 * vma, we can drop its hold on the backing storage and allow 775 * it to be reaped by the shrinker. 776 */ 777 list_del(&vma->vm_link); 778 } 779 780 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 781 { 782 unsigned int bound; 783 bool pinned = true; 784 785 bound = atomic_read(&vma->flags); 786 do { 787 if (unlikely(flags & ~bound)) 788 return false; 789 790 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 791 return false; 792 793 if (!(bound & I915_VMA_PIN_MASK)) 794 goto unpinned; 795 796 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 797 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 798 799 return true; 800 801 unpinned: 802 /* 803 * If pin_count==0, but we are bound, check under the lock to avoid 804 * racing with a concurrent i915_vma_unbind(). 805 */ 806 mutex_lock(&vma->vm->mutex); 807 do { 808 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { 809 pinned = false; 810 break; 811 } 812 813 if (unlikely(flags & ~bound)) { 814 pinned = false; 815 break; 816 } 817 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 818 mutex_unlock(&vma->vm->mutex); 819 820 return pinned; 821 } 822 823 static struct scatterlist * 824 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 825 unsigned int width, unsigned int height, 826 unsigned int src_stride, unsigned int dst_stride, 827 struct sg_table *st, struct scatterlist *sg) 828 { 829 unsigned int column, row; 830 unsigned int src_idx; 831 832 for (column = 0; column < width; column++) { 833 unsigned int left; 834 835 src_idx = src_stride * (height - 1) + column + offset; 836 for (row = 0; row < height; row++) { 837 st->nents++; 838 /* 839 * We don't need the pages, but need to initialize 840 * the entries so the sg list can be happily traversed. 841 * The only thing we need are DMA addresses. 842 */ 843 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 844 sg_dma_address(sg) = 845 i915_gem_object_get_dma_address(obj, src_idx); 846 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 847 sg = sg_next(sg); 848 src_idx -= src_stride; 849 } 850 851 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; 852 853 if (!left) 854 continue; 855 856 st->nents++; 857 858 /* 859 * The DE ignores the PTEs for the padding tiles, the sg entry 860 * here is just a conenience to indicate how many padding PTEs 861 * to insert at this spot. 862 */ 863 sg_set_page(sg, NULL, left, 0); 864 sg_dma_address(sg) = 0; 865 sg_dma_len(sg) = left; 866 sg = sg_next(sg); 867 } 868 869 return sg; 870 } 871 872 static noinline struct sg_table * 873 intel_rotate_pages(struct intel_rotation_info *rot_info, 874 struct drm_i915_gem_object *obj) 875 { 876 unsigned int size = intel_rotation_info_size(rot_info); 877 struct drm_i915_private *i915 = to_i915(obj->base.dev); 878 struct sg_table *st; 879 struct scatterlist *sg; 880 int ret = -ENOMEM; 881 int i; 882 883 /* Allocate target SG list. */ 884 st = kmalloc(sizeof(*st), GFP_KERNEL); 885 if (!st) 886 goto err_st_alloc; 887 888 ret = sg_alloc_table(st, size, GFP_KERNEL); 889 if (ret) 890 goto err_sg_alloc; 891 892 st->nents = 0; 893 sg = st->sgl; 894 895 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 896 sg = rotate_pages(obj, rot_info->plane[i].offset, 897 rot_info->plane[i].width, rot_info->plane[i].height, 898 rot_info->plane[i].src_stride, 899 rot_info->plane[i].dst_stride, 900 st, sg); 901 902 return st; 903 904 err_sg_alloc: 905 kfree(st); 906 err_st_alloc: 907 908 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 909 obj->base.size, rot_info->plane[0].width, 910 rot_info->plane[0].height, size); 911 912 return ERR_PTR(ret); 913 } 914 915 static struct scatterlist * 916 remap_pages(struct drm_i915_gem_object *obj, 917 unsigned int offset, unsigned int alignment_pad, 918 unsigned int width, unsigned int height, 919 unsigned int src_stride, unsigned int dst_stride, 920 struct sg_table *st, struct scatterlist *sg) 921 { 922 unsigned int row; 923 924 if (!width || !height) 925 return sg; 926 927 if (alignment_pad) { 928 st->nents++; 929 930 /* 931 * The DE ignores the PTEs for the padding tiles, the sg entry 932 * here is just a convenience to indicate how many padding PTEs 933 * to insert at this spot. 934 */ 935 sg_set_page(sg, NULL, alignment_pad * 4096, 0); 936 sg_dma_address(sg) = 0; 937 sg_dma_len(sg) = alignment_pad * 4096; 938 sg = sg_next(sg); 939 } 940 941 for (row = 0; row < height; row++) { 942 unsigned int left = width * I915_GTT_PAGE_SIZE; 943 944 while (left) { 945 dma_addr_t addr; 946 unsigned int length; 947 948 /* 949 * We don't need the pages, but need to initialize 950 * the entries so the sg list can be happily traversed. 951 * The only thing we need are DMA addresses. 952 */ 953 954 addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 955 956 length = min(left, length); 957 958 st->nents++; 959 960 sg_set_page(sg, NULL, length, 0); 961 sg_dma_address(sg) = addr; 962 sg_dma_len(sg) = length; 963 sg = sg_next(sg); 964 965 offset += length / I915_GTT_PAGE_SIZE; 966 left -= length; 967 } 968 969 offset += src_stride - width; 970 971 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; 972 973 if (!left) 974 continue; 975 976 st->nents++; 977 978 /* 979 * The DE ignores the PTEs for the padding tiles, the sg entry 980 * here is just a conenience to indicate how many padding PTEs 981 * to insert at this spot. 982 */ 983 sg_set_page(sg, NULL, left, 0); 984 sg_dma_address(sg) = 0; 985 sg_dma_len(sg) = left; 986 sg = sg_next(sg); 987 } 988 989 return sg; 990 } 991 992 static noinline struct sg_table * 993 intel_remap_pages(struct intel_remapped_info *rem_info, 994 struct drm_i915_gem_object *obj) 995 { 996 unsigned int size = intel_remapped_info_size(rem_info); 997 struct drm_i915_private *i915 = to_i915(obj->base.dev); 998 struct sg_table *st; 999 struct scatterlist *sg; 1000 unsigned int gtt_offset = 0; 1001 int ret = -ENOMEM; 1002 int i; 1003 1004 /* Allocate target SG list. */ 1005 st = kmalloc(sizeof(*st), GFP_KERNEL); 1006 if (!st) 1007 goto err_st_alloc; 1008 1009 ret = sg_alloc_table(st, size, GFP_KERNEL); 1010 if (ret) 1011 goto err_sg_alloc; 1012 1013 st->nents = 0; 1014 sg = st->sgl; 1015 1016 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 1017 unsigned int alignment_pad = 0; 1018 1019 if (rem_info->plane_alignment) 1020 alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; 1021 1022 sg = remap_pages(obj, 1023 rem_info->plane[i].offset, alignment_pad, 1024 rem_info->plane[i].width, rem_info->plane[i].height, 1025 rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, 1026 st, sg); 1027 1028 gtt_offset += alignment_pad + 1029 rem_info->plane[i].dst_stride * rem_info->plane[i].height; 1030 } 1031 1032 i915_sg_trim(st); 1033 1034 return st; 1035 1036 err_sg_alloc: 1037 kfree(st); 1038 err_st_alloc: 1039 1040 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 1041 obj->base.size, rem_info->plane[0].width, 1042 rem_info->plane[0].height, size); 1043 1044 return ERR_PTR(ret); 1045 } 1046 1047 static noinline struct sg_table * 1048 intel_partial_pages(const struct i915_ggtt_view *view, 1049 struct drm_i915_gem_object *obj) 1050 { 1051 struct sg_table *st; 1052 struct scatterlist *sg, *iter; 1053 unsigned int count = view->partial.size; 1054 unsigned int offset; 1055 int ret = -ENOMEM; 1056 1057 st = kmalloc(sizeof(*st), GFP_KERNEL); 1058 if (!st) 1059 goto err_st_alloc; 1060 1061 ret = sg_alloc_table(st, count, GFP_KERNEL); 1062 if (ret) 1063 goto err_sg_alloc; 1064 1065 iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); 1066 GEM_BUG_ON(!iter); 1067 1068 sg = st->sgl; 1069 st->nents = 0; 1070 do { 1071 unsigned int len; 1072 1073 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 1074 count << PAGE_SHIFT); 1075 sg_set_page(sg, NULL, len, 0); 1076 sg_dma_address(sg) = 1077 sg_dma_address(iter) + (offset << PAGE_SHIFT); 1078 sg_dma_len(sg) = len; 1079 1080 st->nents++; 1081 count -= len >> PAGE_SHIFT; 1082 if (count == 0) { 1083 sg_mark_end(sg); 1084 i915_sg_trim(st); /* Drop any unused tail entries. */ 1085 1086 return st; 1087 } 1088 1089 sg = __sg_next(sg); 1090 iter = __sg_next(iter); 1091 offset = 0; 1092 } while (1); 1093 1094 err_sg_alloc: 1095 kfree(st); 1096 err_st_alloc: 1097 return ERR_PTR(ret); 1098 } 1099 1100 static int 1101 __i915_vma_get_pages(struct i915_vma *vma) 1102 { 1103 struct sg_table *pages; 1104 int ret; 1105 1106 /* 1107 * The vma->pages are only valid within the lifespan of the borrowed 1108 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 1109 * must be the vma->pages. A simple rule is that vma->pages must only 1110 * be accessed when the obj->mm.pages are pinned. 1111 */ 1112 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 1113 1114 switch (vma->ggtt_view.type) { 1115 default: 1116 GEM_BUG_ON(vma->ggtt_view.type); 1117 fallthrough; 1118 case I915_GGTT_VIEW_NORMAL: 1119 pages = vma->obj->mm.pages; 1120 break; 1121 1122 case I915_GGTT_VIEW_ROTATED: 1123 pages = 1124 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 1125 break; 1126 1127 case I915_GGTT_VIEW_REMAPPED: 1128 pages = 1129 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); 1130 break; 1131 1132 case I915_GGTT_VIEW_PARTIAL: 1133 pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 1134 break; 1135 } 1136 1137 ret = 0; 1138 if (IS_ERR(pages)) { 1139 ret = PTR_ERR(pages); 1140 pages = NULL; 1141 drm_err(&vma->vm->i915->drm, 1142 "Failed to get pages for VMA view type %u (%d)!\n", 1143 vma->ggtt_view.type, ret); 1144 } 1145 1146 vma->pages = pages; 1147 1148 return ret; 1149 } 1150 1151 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) 1152 { 1153 int err; 1154 1155 if (atomic_add_unless(&vma->pages_count, 1, 0)) 1156 return 0; 1157 1158 err = i915_gem_object_pin_pages(vma->obj); 1159 if (err) 1160 return err; 1161 1162 err = __i915_vma_get_pages(vma); 1163 if (err) 1164 goto err_unpin; 1165 1166 vma->page_sizes = vma->obj->mm.page_sizes; 1167 atomic_inc(&vma->pages_count); 1168 1169 return 0; 1170 1171 err_unpin: 1172 __i915_gem_object_unpin_pages(vma->obj); 1173 1174 return err; 1175 } 1176 1177 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 1178 { 1179 /* We allocate under vma_get_pages, so beware the shrinker */ 1180 struct sg_table *pages = READ_ONCE(vma->pages); 1181 1182 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 1183 1184 if (atomic_sub_return(count, &vma->pages_count) == 0) { 1185 /* 1186 * The atomic_sub_return is a read barrier for the READ_ONCE of 1187 * vma->pages above. 1188 * 1189 * READ_ONCE is safe because this is either called from the same 1190 * function (i915_vma_pin_ww), or guarded by vma->vm->mutex. 1191 * 1192 * TODO: We're leaving vma->pages dangling, until vma->obj->resv 1193 * lock is required. 1194 */ 1195 if (pages != vma->obj->mm.pages) { 1196 sg_free_table(pages); 1197 kfree(pages); 1198 } 1199 1200 i915_gem_object_unpin_pages(vma->obj); 1201 } 1202 } 1203 1204 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma) 1205 { 1206 if (atomic_add_unless(&vma->pages_count, -1, 1)) 1207 return; 1208 1209 __vma_put_pages(vma, 1); 1210 } 1211 1212 static void vma_unbind_pages(struct i915_vma *vma) 1213 { 1214 unsigned int count; 1215 1216 lockdep_assert_held(&vma->vm->mutex); 1217 1218 /* The upper portion of pages_count is the number of bindings */ 1219 count = atomic_read(&vma->pages_count); 1220 count >>= I915_VMA_PAGES_BIAS; 1221 GEM_BUG_ON(!count); 1222 1223 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 1224 } 1225 1226 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1227 u64 size, u64 alignment, u64 flags) 1228 { 1229 struct i915_vma_work *work = NULL; 1230 struct dma_fence *moving = NULL; 1231 intel_wakeref_t wakeref = 0; 1232 unsigned int bound; 1233 int err; 1234 1235 assert_vma_held(vma); 1236 GEM_BUG_ON(!ww); 1237 1238 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 1239 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 1240 1241 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 1242 1243 /* First try and grab the pin without rebinding the vma */ 1244 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) 1245 return 0; 1246 1247 err = i915_vma_get_pages(vma); 1248 if (err) 1249 return err; 1250 1251 if (flags & PIN_GLOBAL) 1252 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 1253 1254 moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL; 1255 if (flags & vma->vm->bind_async_flags || moving) { 1256 /* lock VM */ 1257 err = i915_vm_lock_objects(vma->vm, ww); 1258 if (err) 1259 goto err_rpm; 1260 1261 work = i915_vma_work(); 1262 if (!work) { 1263 err = -ENOMEM; 1264 goto err_rpm; 1265 } 1266 1267 work->vm = i915_vm_get(vma->vm); 1268 1269 dma_fence_work_chain(&work->base, moving); 1270 1271 /* Allocate enough page directories to used PTE */ 1272 if (vma->vm->allocate_va_range) { 1273 err = i915_vm_alloc_pt_stash(vma->vm, 1274 &work->stash, 1275 vma->size); 1276 if (err) 1277 goto err_fence; 1278 1279 err = i915_vm_map_pt_stash(vma->vm, &work->stash); 1280 if (err) 1281 goto err_fence; 1282 } 1283 } 1284 1285 /* 1286 * Differentiate between user/kernel vma inside the aliasing-ppgtt. 1287 * 1288 * We conflate the Global GTT with the user's vma when using the 1289 * aliasing-ppgtt, but it is still vitally important to try and 1290 * keep the use cases distinct. For example, userptr objects are 1291 * not allowed inside the Global GTT as that will cause lock 1292 * inversions when we have to evict them the mmu_notifier callbacks - 1293 * but they are allowed to be part of the user ppGTT which can never 1294 * be mapped. As such we try to give the distinct users of the same 1295 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt 1296 * and i915_ppgtt separate]. 1297 * 1298 * NB this may cause us to mask real lock inversions -- while the 1299 * code is safe today, lockdep may not be able to spot future 1300 * transgressions. 1301 */ 1302 err = mutex_lock_interruptible_nested(&vma->vm->mutex, 1303 !(flags & PIN_GLOBAL)); 1304 if (err) 1305 goto err_fence; 1306 1307 /* No more allocations allowed now we hold vm->mutex */ 1308 1309 if (unlikely(i915_vma_is_closed(vma))) { 1310 err = -ENOENT; 1311 goto err_unlock; 1312 } 1313 1314 bound = atomic_read(&vma->flags); 1315 if (unlikely(bound & I915_VMA_ERROR)) { 1316 err = -ENOMEM; 1317 goto err_unlock; 1318 } 1319 1320 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 1321 err = -EAGAIN; /* pins are meant to be fairly temporary */ 1322 goto err_unlock; 1323 } 1324 1325 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 1326 __i915_vma_pin(vma); 1327 goto err_unlock; 1328 } 1329 1330 err = i915_active_acquire(&vma->active); 1331 if (err) 1332 goto err_unlock; 1333 1334 if (!(bound & I915_VMA_BIND_MASK)) { 1335 err = i915_vma_insert(vma, size, alignment, flags); 1336 if (err) 1337 goto err_active; 1338 1339 if (i915_is_ggtt(vma->vm)) 1340 __i915_vma_set_map_and_fenceable(vma); 1341 } 1342 1343 GEM_BUG_ON(!vma->pages); 1344 err = i915_vma_bind(vma, 1345 vma->obj->cache_level, 1346 flags, work); 1347 if (err) 1348 goto err_remove; 1349 1350 /* There should only be at most 2 active bindings (user, global) */ 1351 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 1352 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 1353 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 1354 1355 __i915_vma_pin(vma); 1356 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 1357 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 1358 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 1359 1360 err_remove: 1361 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { 1362 i915_vma_detach(vma); 1363 drm_mm_remove_node(&vma->node); 1364 } 1365 err_active: 1366 i915_active_release(&vma->active); 1367 err_unlock: 1368 mutex_unlock(&vma->vm->mutex); 1369 err_fence: 1370 if (work) 1371 dma_fence_work_commit_imm(&work->base); 1372 err_rpm: 1373 if (wakeref) 1374 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 1375 1376 if (moving) 1377 dma_fence_put(moving); 1378 1379 i915_vma_put_pages(vma); 1380 return err; 1381 } 1382 1383 static void flush_idle_contexts(struct intel_gt *gt) 1384 { 1385 struct intel_engine_cs *engine; 1386 enum intel_engine_id id; 1387 1388 for_each_engine(engine, gt, id) 1389 intel_engine_flush_barriers(engine); 1390 1391 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 1392 } 1393 1394 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1395 u32 align, unsigned int flags) 1396 { 1397 struct i915_address_space *vm = vma->vm; 1398 int err; 1399 1400 do { 1401 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); 1402 1403 if (err != -ENOSPC) { 1404 if (!err) { 1405 err = i915_vma_wait_for_bind(vma); 1406 if (err) 1407 i915_vma_unpin(vma); 1408 } 1409 return err; 1410 } 1411 1412 /* Unlike i915_vma_pin, we don't take no for an answer! */ 1413 flush_idle_contexts(vm->gt); 1414 if (mutex_lock_interruptible(&vm->mutex) == 0) { 1415 i915_gem_evict_vm(vm); 1416 mutex_unlock(&vm->mutex); 1417 } 1418 } while (1); 1419 } 1420 1421 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, 1422 u32 align, unsigned int flags) 1423 { 1424 struct i915_gem_ww_ctx _ww; 1425 int err; 1426 1427 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1428 1429 if (ww) 1430 return __i915_ggtt_pin(vma, ww, align, flags); 1431 1432 #ifdef CONFIG_LOCKDEP 1433 WARN_ON(dma_resv_held(vma->obj->base.resv)); 1434 #endif 1435 1436 for_i915_gem_ww(&_ww, err, true) { 1437 err = i915_gem_object_lock(vma->obj, &_ww); 1438 if (!err) 1439 err = __i915_ggtt_pin(vma, &_ww, align, flags); 1440 } 1441 1442 return err; 1443 } 1444 1445 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) 1446 { 1447 /* 1448 * We defer actually closing, unbinding and destroying the VMA until 1449 * the next idle point, or if the object is freed in the meantime. By 1450 * postponing the unbind, we allow for it to be resurrected by the 1451 * client, avoiding the work required to rebind the VMA. This is 1452 * advantageous for DRI, where the client/server pass objects 1453 * between themselves, temporarily opening a local VMA to the 1454 * object, and then closing it again. The same object is then reused 1455 * on the next frame (or two, depending on the depth of the swap queue) 1456 * causing us to rebind the VMA once more. This ends up being a lot 1457 * of wasted work for the steady state. 1458 */ 1459 GEM_BUG_ON(i915_vma_is_closed(vma)); 1460 list_add(&vma->closed_link, >->closed_vma); 1461 } 1462 1463 void i915_vma_close(struct i915_vma *vma) 1464 { 1465 struct intel_gt *gt = vma->vm->gt; 1466 unsigned long flags; 1467 1468 if (i915_vma_is_ggtt(vma)) 1469 return; 1470 1471 GEM_BUG_ON(!atomic_read(&vma->open_count)); 1472 if (atomic_dec_and_lock_irqsave(&vma->open_count, 1473 >->closed_lock, 1474 flags)) { 1475 __vma_close(vma, gt); 1476 spin_unlock_irqrestore(>->closed_lock, flags); 1477 } 1478 } 1479 1480 static void __i915_vma_remove_closed(struct i915_vma *vma) 1481 { 1482 struct intel_gt *gt = vma->vm->gt; 1483 1484 spin_lock_irq(>->closed_lock); 1485 list_del_init(&vma->closed_link); 1486 spin_unlock_irq(>->closed_lock); 1487 } 1488 1489 void i915_vma_reopen(struct i915_vma *vma) 1490 { 1491 if (i915_vma_is_closed(vma)) 1492 __i915_vma_remove_closed(vma); 1493 } 1494 1495 void i915_vma_release(struct kref *ref) 1496 { 1497 struct i915_vma *vma = container_of(ref, typeof(*vma), ref); 1498 struct drm_i915_gem_object *obj = vma->obj; 1499 1500 if (drm_mm_node_allocated(&vma->node)) { 1501 mutex_lock(&vma->vm->mutex); 1502 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 1503 WARN_ON(__i915_vma_unbind(vma)); 1504 mutex_unlock(&vma->vm->mutex); 1505 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 1506 } 1507 GEM_BUG_ON(i915_vma_is_active(vma)); 1508 1509 spin_lock(&obj->vma.lock); 1510 list_del(&vma->obj_link); 1511 if (!RB_EMPTY_NODE(&vma->obj_node)) 1512 rb_erase(&vma->obj_node, &obj->vma.tree); 1513 spin_unlock(&obj->vma.lock); 1514 1515 __i915_vma_remove_closed(vma); 1516 i915_vm_put(vma->vm); 1517 1518 i915_active_fini(&vma->active); 1519 i915_vma_free(vma); 1520 } 1521 1522 void i915_vma_parked(struct intel_gt *gt) 1523 { 1524 struct i915_vma *vma, *next; 1525 LIST_HEAD(closed); 1526 1527 spin_lock_irq(>->closed_lock); 1528 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 1529 struct drm_i915_gem_object *obj = vma->obj; 1530 struct i915_address_space *vm = vma->vm; 1531 1532 /* XXX All to avoid keeping a reference on i915_vma itself */ 1533 1534 if (!kref_get_unless_zero(&obj->base.refcount)) 1535 continue; 1536 1537 if (!i915_vm_tryopen(vm)) { 1538 i915_gem_object_put(obj); 1539 continue; 1540 } 1541 1542 list_move(&vma->closed_link, &closed); 1543 } 1544 spin_unlock_irq(>->closed_lock); 1545 1546 /* As the GT is held idle, no vma can be reopened as we destroy them */ 1547 list_for_each_entry_safe(vma, next, &closed, closed_link) { 1548 struct drm_i915_gem_object *obj = vma->obj; 1549 struct i915_address_space *vm = vma->vm; 1550 1551 INIT_LIST_HEAD(&vma->closed_link); 1552 __i915_vma_put(vma); 1553 1554 i915_gem_object_put(obj); 1555 i915_vm_close(vm); 1556 } 1557 } 1558 1559 static void __i915_vma_iounmap(struct i915_vma *vma) 1560 { 1561 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1562 1563 if (vma->iomap == NULL) 1564 return; 1565 1566 io_mapping_unmap(vma->iomap); 1567 vma->iomap = NULL; 1568 } 1569 1570 void i915_vma_revoke_mmap(struct i915_vma *vma) 1571 { 1572 struct drm_vma_offset_node *node; 1573 u64 vma_offset; 1574 1575 if (!i915_vma_has_userfault(vma)) 1576 return; 1577 1578 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1579 GEM_BUG_ON(!vma->obj->userfault_count); 1580 1581 node = &vma->mmo->vma_node; 1582 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 1583 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1584 drm_vma_node_offset_addr(node) + vma_offset, 1585 vma->size, 1586 1); 1587 1588 i915_vma_unset_userfault(vma); 1589 if (!--vma->obj->userfault_count) 1590 list_del(&vma->obj->userfault_link); 1591 } 1592 1593 static int 1594 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) 1595 { 1596 return __i915_request_await_exclusive(rq, &vma->active); 1597 } 1598 1599 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 1600 { 1601 int err; 1602 1603 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 1604 1605 /* Wait for the vma to be bound before we start! */ 1606 err = __i915_request_await_bind(rq, vma); 1607 if (err) 1608 return err; 1609 1610 return i915_active_add_request(&vma->active, rq); 1611 } 1612 1613 int _i915_vma_move_to_active(struct i915_vma *vma, 1614 struct i915_request *rq, 1615 struct dma_fence *fence, 1616 unsigned int flags) 1617 { 1618 struct drm_i915_gem_object *obj = vma->obj; 1619 int err; 1620 1621 assert_object_held(obj); 1622 1623 err = __i915_vma_move_to_active(vma, rq); 1624 if (unlikely(err)) 1625 return err; 1626 1627 if (flags & EXEC_OBJECT_WRITE) { 1628 struct intel_frontbuffer *front; 1629 1630 front = __intel_frontbuffer_get(obj); 1631 if (unlikely(front)) { 1632 if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) 1633 i915_active_add_request(&front->write, rq); 1634 intel_frontbuffer_put(front); 1635 } 1636 1637 if (fence) { 1638 dma_resv_add_excl_fence(vma->obj->base.resv, fence); 1639 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1640 obj->read_domains = 0; 1641 } 1642 } else { 1643 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { 1644 err = dma_resv_reserve_shared(vma->obj->base.resv, 1); 1645 if (unlikely(err)) 1646 return err; 1647 } 1648 1649 if (fence) { 1650 dma_resv_add_shared_fence(vma->obj->base.resv, fence); 1651 obj->write_domain = 0; 1652 } 1653 } 1654 1655 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) 1656 i915_active_add_request(&vma->fence->active, rq); 1657 1658 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1659 obj->mm.dirty = true; 1660 1661 GEM_BUG_ON(!i915_vma_is_active(vma)); 1662 return 0; 1663 } 1664 1665 void __i915_vma_evict(struct i915_vma *vma) 1666 { 1667 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1668 1669 if (i915_vma_is_map_and_fenceable(vma)) { 1670 /* Force a pagefault for domain tracking on next user access */ 1671 i915_vma_revoke_mmap(vma); 1672 1673 /* 1674 * Check that we have flushed all writes through the GGTT 1675 * before the unbind, other due to non-strict nature of those 1676 * indirect writes they may end up referencing the GGTT PTE 1677 * after the unbind. 1678 * 1679 * Note that we may be concurrently poking at the GGTT_WRITE 1680 * bit from set-domain, as we mark all GGTT vma associated 1681 * with an object. We know this is for another vma, as we 1682 * are currently unbinding this one -- so if this vma will be 1683 * reused, it will be refaulted and have its dirty bit set 1684 * before the next write. 1685 */ 1686 i915_vma_flush_writes(vma); 1687 1688 /* release the fence reg _after_ flushing */ 1689 i915_vma_revoke_fence(vma); 1690 1691 __i915_vma_iounmap(vma); 1692 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1693 } 1694 GEM_BUG_ON(vma->fence); 1695 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1696 1697 if (likely(atomic_read(&vma->vm->open))) { 1698 trace_i915_vma_unbind(vma); 1699 vma->ops->unbind_vma(vma->vm, vma); 1700 } 1701 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), 1702 &vma->flags); 1703 1704 i915_vma_detach(vma); 1705 vma_unbind_pages(vma); 1706 } 1707 1708 int __i915_vma_unbind(struct i915_vma *vma) 1709 { 1710 int ret; 1711 1712 lockdep_assert_held(&vma->vm->mutex); 1713 1714 if (!drm_mm_node_allocated(&vma->node)) 1715 return 0; 1716 1717 if (i915_vma_is_pinned(vma)) { 1718 vma_print_allocator(vma, "is pinned"); 1719 return -EAGAIN; 1720 } 1721 1722 /* 1723 * After confirming that no one else is pinning this vma, wait for 1724 * any laggards who may have crept in during the wait (through 1725 * a residual pin skipping the vm->mutex) to complete. 1726 */ 1727 ret = i915_vma_sync(vma); 1728 if (ret) 1729 return ret; 1730 1731 GEM_BUG_ON(i915_vma_is_active(vma)); 1732 __i915_vma_evict(vma); 1733 1734 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 1735 return 0; 1736 } 1737 1738 int i915_vma_unbind(struct i915_vma *vma) 1739 { 1740 struct i915_address_space *vm = vma->vm; 1741 intel_wakeref_t wakeref = 0; 1742 int err; 1743 1744 /* Optimistic wait before taking the mutex */ 1745 err = i915_vma_sync(vma); 1746 if (err) 1747 return err; 1748 1749 if (!drm_mm_node_allocated(&vma->node)) 1750 return 0; 1751 1752 if (i915_vma_is_pinned(vma)) { 1753 vma_print_allocator(vma, "is pinned"); 1754 return -EAGAIN; 1755 } 1756 1757 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 1758 /* XXX not always required: nop_clear_range */ 1759 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 1760 1761 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); 1762 if (err) 1763 goto out_rpm; 1764 1765 err = __i915_vma_unbind(vma); 1766 mutex_unlock(&vm->mutex); 1767 1768 out_rpm: 1769 if (wakeref) 1770 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 1771 return err; 1772 } 1773 1774 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 1775 { 1776 i915_gem_object_make_unshrinkable(vma->obj); 1777 return vma; 1778 } 1779 1780 void i915_vma_make_shrinkable(struct i915_vma *vma) 1781 { 1782 i915_gem_object_make_shrinkable(vma->obj); 1783 } 1784 1785 void i915_vma_make_purgeable(struct i915_vma *vma) 1786 { 1787 i915_gem_object_make_purgeable(vma->obj); 1788 } 1789 1790 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1791 #include "selftests/i915_vma.c" 1792 #endif 1793 1794 void i915_vma_module_exit(void) 1795 { 1796 kmem_cache_destroy(slab_vmas); 1797 } 1798 1799 int __init i915_vma_module_init(void) 1800 { 1801 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 1802 if (!slab_vmas) 1803 return -ENOMEM; 1804 1805 return 0; 1806 } 1807