1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 #include <drm/drm_gem.h> 27 28 #include "display/intel_frontbuffer.h" 29 30 #include "gt/intel_engine.h" 31 #include "gt/intel_gt.h" 32 33 #include "i915_drv.h" 34 #include "i915_globals.h" 35 #include "i915_sw_fence_work.h" 36 #include "i915_trace.h" 37 #include "i915_vma.h" 38 39 static struct i915_global_vma { 40 struct i915_global base; 41 struct kmem_cache *slab_vmas; 42 } global; 43 44 struct i915_vma *i915_vma_alloc(void) 45 { 46 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); 47 } 48 49 void i915_vma_free(struct i915_vma *vma) 50 { 51 return kmem_cache_free(global.slab_vmas, vma); 52 } 53 54 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 55 56 #include <linux/stackdepot.h> 57 58 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 59 { 60 unsigned long *entries; 61 unsigned int nr_entries; 62 char buf[512]; 63 64 if (!vma->node.stack) { 65 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 66 vma->node.start, vma->node.size, reason); 67 return; 68 } 69 70 nr_entries = stack_depot_fetch(vma->node.stack, &entries); 71 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); 72 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 73 vma->node.start, vma->node.size, reason, buf); 74 } 75 76 #else 77 78 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 79 { 80 } 81 82 #endif 83 84 static inline struct i915_vma *active_to_vma(struct i915_active *ref) 85 { 86 return container_of(ref, typeof(struct i915_vma), active); 87 } 88 89 static int __i915_vma_active(struct i915_active *ref) 90 { 91 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 92 } 93 94 __i915_active_call 95 static void __i915_vma_retire(struct i915_active *ref) 96 { 97 i915_vma_put(active_to_vma(ref)); 98 } 99 100 static struct i915_vma * 101 vma_create(struct drm_i915_gem_object *obj, 102 struct i915_address_space *vm, 103 const struct i915_ggtt_view *view) 104 { 105 struct i915_vma *vma; 106 struct rb_node *rb, **p; 107 108 /* The aliasing_ppgtt should never be used directly! */ 109 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); 110 111 vma = i915_vma_alloc(); 112 if (vma == NULL) 113 return ERR_PTR(-ENOMEM); 114 115 mutex_init(&vma->pages_mutex); 116 vma->vm = i915_vm_get(vm); 117 vma->ops = &vm->vma_ops; 118 vma->obj = obj; 119 vma->resv = obj->base.resv; 120 vma->size = obj->base.size; 121 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 122 123 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); 124 125 /* Declare ourselves safe for use inside shrinkers */ 126 if (IS_ENABLED(CONFIG_LOCKDEP)) { 127 fs_reclaim_acquire(GFP_KERNEL); 128 might_lock(&vma->active.mutex); 129 fs_reclaim_release(GFP_KERNEL); 130 } 131 132 INIT_LIST_HEAD(&vma->closed_link); 133 134 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 135 vma->ggtt_view = *view; 136 if (view->type == I915_GGTT_VIEW_PARTIAL) { 137 GEM_BUG_ON(range_overflows_t(u64, 138 view->partial.offset, 139 view->partial.size, 140 obj->base.size >> PAGE_SHIFT)); 141 vma->size = view->partial.size; 142 vma->size <<= PAGE_SHIFT; 143 GEM_BUG_ON(vma->size > obj->base.size); 144 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 145 vma->size = intel_rotation_info_size(&view->rotated); 146 vma->size <<= PAGE_SHIFT; 147 } else if (view->type == I915_GGTT_VIEW_REMAPPED) { 148 vma->size = intel_remapped_info_size(&view->remapped); 149 vma->size <<= PAGE_SHIFT; 150 } 151 } 152 153 if (unlikely(vma->size > vm->total)) 154 goto err_vma; 155 156 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 157 158 if (i915_is_ggtt(vm)) { 159 if (unlikely(overflows_type(vma->size, u32))) 160 goto err_vma; 161 162 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 163 i915_gem_object_get_tiling(obj), 164 i915_gem_object_get_stride(obj)); 165 if (unlikely(vma->fence_size < vma->size || /* overflow */ 166 vma->fence_size > vm->total)) 167 goto err_vma; 168 169 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 170 171 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 172 i915_gem_object_get_tiling(obj), 173 i915_gem_object_get_stride(obj)); 174 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 175 176 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 177 } 178 179 spin_lock(&obj->vma.lock); 180 181 rb = NULL; 182 p = &obj->vma.tree.rb_node; 183 while (*p) { 184 struct i915_vma *pos; 185 long cmp; 186 187 rb = *p; 188 pos = rb_entry(rb, struct i915_vma, obj_node); 189 190 /* 191 * If the view already exists in the tree, another thread 192 * already created a matching vma, so return the older instance 193 * and dispose of ours. 194 */ 195 cmp = i915_vma_compare(pos, vm, view); 196 if (cmp == 0) { 197 spin_unlock(&obj->vma.lock); 198 i915_vma_free(vma); 199 return pos; 200 } 201 202 if (cmp < 0) 203 p = &rb->rb_right; 204 else 205 p = &rb->rb_left; 206 } 207 rb_link_node(&vma->obj_node, rb, p); 208 rb_insert_color(&vma->obj_node, &obj->vma.tree); 209 210 if (i915_vma_is_ggtt(vma)) 211 /* 212 * We put the GGTT vma at the start of the vma-list, followed 213 * by the ppGGTT vma. This allows us to break early when 214 * iterating over only the GGTT vma for an object, see 215 * for_each_ggtt_vma() 216 */ 217 list_add(&vma->obj_link, &obj->vma.list); 218 else 219 list_add_tail(&vma->obj_link, &obj->vma.list); 220 221 spin_unlock(&obj->vma.lock); 222 223 return vma; 224 225 err_vma: 226 i915_vma_free(vma); 227 return ERR_PTR(-E2BIG); 228 } 229 230 static struct i915_vma * 231 vma_lookup(struct drm_i915_gem_object *obj, 232 struct i915_address_space *vm, 233 const struct i915_ggtt_view *view) 234 { 235 struct rb_node *rb; 236 237 rb = obj->vma.tree.rb_node; 238 while (rb) { 239 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 240 long cmp; 241 242 cmp = i915_vma_compare(vma, vm, view); 243 if (cmp == 0) 244 return vma; 245 246 if (cmp < 0) 247 rb = rb->rb_right; 248 else 249 rb = rb->rb_left; 250 } 251 252 return NULL; 253 } 254 255 /** 256 * i915_vma_instance - return the singleton instance of the VMA 257 * @obj: parent &struct drm_i915_gem_object to be mapped 258 * @vm: address space in which the mapping is located 259 * @view: additional mapping requirements 260 * 261 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 262 * the same @view characteristics. If a match is not found, one is created. 263 * Once created, the VMA is kept until either the object is freed, or the 264 * address space is closed. 265 * 266 * Returns the vma, or an error pointer. 267 */ 268 struct i915_vma * 269 i915_vma_instance(struct drm_i915_gem_object *obj, 270 struct i915_address_space *vm, 271 const struct i915_ggtt_view *view) 272 { 273 struct i915_vma *vma; 274 275 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 276 GEM_BUG_ON(!atomic_read(&vm->open)); 277 278 spin_lock(&obj->vma.lock); 279 vma = vma_lookup(obj, vm, view); 280 spin_unlock(&obj->vma.lock); 281 282 /* vma_create() will resolve the race if another creates the vma */ 283 if (unlikely(!vma)) 284 vma = vma_create(obj, vm, view); 285 286 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 287 return vma; 288 } 289 290 struct i915_vma_work { 291 struct dma_fence_work base; 292 struct i915_vma *vma; 293 enum i915_cache_level cache_level; 294 unsigned int flags; 295 }; 296 297 static int __vma_bind(struct dma_fence_work *work) 298 { 299 struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 300 struct i915_vma *vma = vw->vma; 301 int err; 302 303 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); 304 if (err) 305 atomic_or(I915_VMA_ERROR, &vma->flags); 306 307 if (vma->obj) 308 __i915_gem_object_unpin_pages(vma->obj); 309 310 return err; 311 } 312 313 static const struct dma_fence_work_ops bind_ops = { 314 .name = "bind", 315 .work = __vma_bind, 316 }; 317 318 struct i915_vma_work *i915_vma_work(void) 319 { 320 struct i915_vma_work *vw; 321 322 vw = kzalloc(sizeof(*vw), GFP_KERNEL); 323 if (!vw) 324 return NULL; 325 326 dma_fence_work_init(&vw->base, &bind_ops); 327 vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 328 329 return vw; 330 } 331 332 /** 333 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 334 * @vma: VMA to map 335 * @cache_level: mapping cache level 336 * @flags: flags like global or local mapping 337 * @work: preallocated worker for allocating and binding the PTE 338 * 339 * DMA addresses are taken from the scatter-gather table of this object (or of 340 * this VMA in case of non-default GGTT views) and PTE entries set up. 341 * Note that DMA addresses are also the only part of the SG table we care about. 342 */ 343 int i915_vma_bind(struct i915_vma *vma, 344 enum i915_cache_level cache_level, 345 u32 flags, 346 struct i915_vma_work *work) 347 { 348 u32 bind_flags; 349 u32 vma_flags; 350 int ret; 351 352 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 353 GEM_BUG_ON(vma->size > vma->node.size); 354 355 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 356 vma->node.size, 357 vma->vm->total))) 358 return -ENODEV; 359 360 if (GEM_DEBUG_WARN_ON(!flags)) 361 return -EINVAL; 362 363 bind_flags = flags; 364 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 365 366 vma_flags = atomic_read(&vma->flags); 367 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 368 if (flags & PIN_UPDATE) 369 bind_flags |= vma_flags; 370 else 371 bind_flags &= ~vma_flags; 372 if (bind_flags == 0) 373 return 0; 374 375 GEM_BUG_ON(!vma->pages); 376 377 trace_i915_vma_bind(vma, bind_flags); 378 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) { 379 work->vma = vma; 380 work->cache_level = cache_level; 381 work->flags = bind_flags | I915_VMA_ALLOC; 382 383 /* 384 * Note we only want to chain up to the migration fence on 385 * the pages (not the object itself). As we don't track that, 386 * yet, we have to use the exclusive fence instead. 387 * 388 * Also note that we do not want to track the async vma as 389 * part of the obj->resv->excl_fence as it only affects 390 * execution and not content or object's backing store lifetime. 391 */ 392 GEM_BUG_ON(i915_active_has_exclusive(&vma->active)); 393 i915_active_set_exclusive(&vma->active, &work->base.dma); 394 work->base.dma.error = 0; /* enable the queue_work() */ 395 396 if (vma->obj) 397 __i915_gem_object_pin_pages(vma->obj); 398 } else { 399 GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags); 400 ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 401 if (ret) 402 return ret; 403 } 404 405 atomic_or(bind_flags, &vma->flags); 406 return 0; 407 } 408 409 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 410 { 411 void __iomem *ptr; 412 int err; 413 414 /* Access through the GTT requires the device to be awake. */ 415 assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm); 416 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 417 err = -ENODEV; 418 goto err; 419 } 420 421 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 422 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 423 424 ptr = READ_ONCE(vma->iomap); 425 if (ptr == NULL) { 426 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 427 vma->node.start, 428 vma->node.size); 429 if (ptr == NULL) { 430 err = -ENOMEM; 431 goto err; 432 } 433 434 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 435 io_mapping_unmap(ptr); 436 ptr = vma->iomap; 437 } 438 } 439 440 __i915_vma_pin(vma); 441 442 err = i915_vma_pin_fence(vma); 443 if (err) 444 goto err_unpin; 445 446 i915_vma_set_ggtt_write(vma); 447 return ptr; 448 449 err_unpin: 450 __i915_vma_unpin(vma); 451 err: 452 return IO_ERR_PTR(err); 453 } 454 455 void i915_vma_flush_writes(struct i915_vma *vma) 456 { 457 if (i915_vma_unset_ggtt_write(vma)) 458 intel_gt_flush_ggtt_writes(vma->vm->gt); 459 } 460 461 void i915_vma_unpin_iomap(struct i915_vma *vma) 462 { 463 GEM_BUG_ON(vma->iomap == NULL); 464 465 i915_vma_flush_writes(vma); 466 467 i915_vma_unpin_fence(vma); 468 i915_vma_unpin(vma); 469 } 470 471 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 472 { 473 struct i915_vma *vma; 474 struct drm_i915_gem_object *obj; 475 476 vma = fetch_and_zero(p_vma); 477 if (!vma) 478 return; 479 480 obj = vma->obj; 481 GEM_BUG_ON(!obj); 482 483 i915_vma_unpin(vma); 484 i915_vma_close(vma); 485 486 if (flags & I915_VMA_RELEASE_MAP) 487 i915_gem_object_unpin_map(obj); 488 489 i915_gem_object_put(obj); 490 } 491 492 bool i915_vma_misplaced(const struct i915_vma *vma, 493 u64 size, u64 alignment, u64 flags) 494 { 495 if (!drm_mm_node_allocated(&vma->node)) 496 return false; 497 498 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 499 return true; 500 501 if (vma->node.size < size) 502 return true; 503 504 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 505 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 506 return true; 507 508 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 509 return true; 510 511 if (flags & PIN_OFFSET_BIAS && 512 vma->node.start < (flags & PIN_OFFSET_MASK)) 513 return true; 514 515 if (flags & PIN_OFFSET_FIXED && 516 vma->node.start != (flags & PIN_OFFSET_MASK)) 517 return true; 518 519 return false; 520 } 521 522 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 523 { 524 bool mappable, fenceable; 525 526 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 527 GEM_BUG_ON(!vma->fence_size); 528 529 fenceable = (vma->node.size >= vma->fence_size && 530 IS_ALIGNED(vma->node.start, vma->fence_alignment)); 531 532 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 533 534 if (mappable && fenceable) 535 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 536 else 537 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 538 } 539 540 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 541 { 542 struct drm_mm_node *node = &vma->node; 543 struct drm_mm_node *other; 544 545 /* 546 * On some machines we have to be careful when putting differing types 547 * of snoopable memory together to avoid the prefetcher crossing memory 548 * domains and dying. During vm initialisation, we decide whether or not 549 * these constraints apply and set the drm_mm.color_adjust 550 * appropriately. 551 */ 552 if (!i915_vm_has_cache_coloring(vma->vm)) 553 return true; 554 555 /* Only valid to be called on an already inserted vma */ 556 GEM_BUG_ON(!drm_mm_node_allocated(node)); 557 GEM_BUG_ON(list_empty(&node->node_list)); 558 559 other = list_prev_entry(node, node_list); 560 if (i915_node_color_differs(other, color) && 561 !drm_mm_hole_follows(other)) 562 return false; 563 564 other = list_next_entry(node, node_list); 565 if (i915_node_color_differs(other, color) && 566 !drm_mm_hole_follows(node)) 567 return false; 568 569 return true; 570 } 571 572 static void assert_bind_count(const struct drm_i915_gem_object *obj) 573 { 574 /* 575 * Combine the assertion that the object is bound and that we have 576 * pinned its pages. But we should never have bound the object 577 * more than we have pinned its pages. (For complete accuracy, we 578 * assume that no else is pinning the pages, but as a rough assertion 579 * that we will not run into problems later, this will do!) 580 */ 581 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count)); 582 } 583 584 /** 585 * i915_vma_insert - finds a slot for the vma in its address space 586 * @vma: the vma 587 * @size: requested size in bytes (can be larger than the VMA) 588 * @alignment: required alignment 589 * @flags: mask of PIN_* flags to use 590 * 591 * First we try to allocate some free space that meets the requirements for 592 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 593 * preferrably the oldest idle entry to make room for the new VMA. 594 * 595 * Returns: 596 * 0 on success, negative error code otherwise. 597 */ 598 static int 599 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 600 { 601 unsigned long color; 602 u64 start, end; 603 int ret; 604 605 GEM_BUG_ON(i915_vma_is_closed(vma)); 606 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 607 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 608 609 size = max(size, vma->size); 610 alignment = max(alignment, vma->display_alignment); 611 if (flags & PIN_MAPPABLE) { 612 size = max_t(typeof(size), size, vma->fence_size); 613 alignment = max_t(typeof(alignment), 614 alignment, vma->fence_alignment); 615 } 616 617 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 618 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 619 GEM_BUG_ON(!is_power_of_2(alignment)); 620 621 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 622 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 623 624 end = vma->vm->total; 625 if (flags & PIN_MAPPABLE) 626 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 627 if (flags & PIN_ZONE_4G) 628 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 629 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 630 631 /* If binding the object/GGTT view requires more space than the entire 632 * aperture has, reject it early before evicting everything in a vain 633 * attempt to find space. 634 */ 635 if (size > end) { 636 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 637 size, flags & PIN_MAPPABLE ? "mappable" : "total", 638 end); 639 return -ENOSPC; 640 } 641 642 color = 0; 643 if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) 644 color = vma->obj->cache_level; 645 646 if (flags & PIN_OFFSET_FIXED) { 647 u64 offset = flags & PIN_OFFSET_MASK; 648 if (!IS_ALIGNED(offset, alignment) || 649 range_overflows(offset, size, end)) 650 return -EINVAL; 651 652 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 653 size, offset, color, 654 flags); 655 if (ret) 656 return ret; 657 } else { 658 /* 659 * We only support huge gtt pages through the 48b PPGTT, 660 * however we also don't want to force any alignment for 661 * objects which need to be tightly packed into the low 32bits. 662 * 663 * Note that we assume that GGTT are limited to 4GiB for the 664 * forseeable future. See also i915_ggtt_offset(). 665 */ 666 if (upper_32_bits(end - 1) && 667 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 668 /* 669 * We can't mix 64K and 4K PTEs in the same page-table 670 * (2M block), and so to avoid the ugliness and 671 * complexity of coloring we opt for just aligning 64K 672 * objects to 2M. 673 */ 674 u64 page_alignment = 675 rounddown_pow_of_two(vma->page_sizes.sg | 676 I915_GTT_PAGE_SIZE_2M); 677 678 /* 679 * Check we don't expand for the limited Global GTT 680 * (mappable aperture is even more precious!). This 681 * also checks that we exclude the aliasing-ppgtt. 682 */ 683 GEM_BUG_ON(i915_vma_is_ggtt(vma)); 684 685 alignment = max(alignment, page_alignment); 686 687 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 688 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 689 } 690 691 ret = i915_gem_gtt_insert(vma->vm, &vma->node, 692 size, alignment, color, 693 start, end, flags); 694 if (ret) 695 return ret; 696 697 GEM_BUG_ON(vma->node.start < start); 698 GEM_BUG_ON(vma->node.start + vma->node.size > end); 699 } 700 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 701 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 702 703 list_add_tail(&vma->vm_link, &vma->vm->bound_list); 704 705 if (vma->obj) { 706 atomic_inc(&vma->obj->bind_count); 707 assert_bind_count(vma->obj); 708 } 709 710 return 0; 711 } 712 713 static void 714 i915_vma_remove(struct i915_vma *vma) 715 { 716 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 717 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 718 719 list_del(&vma->vm_link); 720 721 /* 722 * Since the unbound list is global, only move to that list if 723 * no more VMAs exist. 724 */ 725 if (vma->obj) { 726 struct drm_i915_gem_object *obj = vma->obj; 727 728 /* 729 * And finally now the object is completely decoupled from this 730 * vma, we can drop its hold on the backing storage and allow 731 * it to be reaped by the shrinker. 732 */ 733 atomic_dec(&obj->bind_count); 734 assert_bind_count(obj); 735 } 736 737 drm_mm_remove_node(&vma->node); 738 } 739 740 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 741 { 742 unsigned int bound; 743 bool pinned = true; 744 745 bound = atomic_read(&vma->flags); 746 do { 747 if (unlikely(flags & ~bound)) 748 return false; 749 750 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 751 return false; 752 753 if (!(bound & I915_VMA_PIN_MASK)) 754 goto unpinned; 755 756 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 757 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 758 759 return true; 760 761 unpinned: 762 /* 763 * If pin_count==0, but we are bound, check under the lock to avoid 764 * racing with a concurrent i915_vma_unbind(). 765 */ 766 mutex_lock(&vma->vm->mutex); 767 do { 768 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { 769 pinned = false; 770 break; 771 } 772 773 if (unlikely(flags & ~bound)) { 774 pinned = false; 775 break; 776 } 777 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 778 mutex_unlock(&vma->vm->mutex); 779 780 return pinned; 781 } 782 783 static int vma_get_pages(struct i915_vma *vma) 784 { 785 int err = 0; 786 787 if (atomic_add_unless(&vma->pages_count, 1, 0)) 788 return 0; 789 790 /* Allocations ahoy! */ 791 if (mutex_lock_interruptible(&vma->pages_mutex)) 792 return -EINTR; 793 794 if (!atomic_read(&vma->pages_count)) { 795 if (vma->obj) { 796 err = i915_gem_object_pin_pages(vma->obj); 797 if (err) 798 goto unlock; 799 } 800 801 err = vma->ops->set_pages(vma); 802 if (err) { 803 if (vma->obj) 804 i915_gem_object_unpin_pages(vma->obj); 805 goto unlock; 806 } 807 } 808 atomic_inc(&vma->pages_count); 809 810 unlock: 811 mutex_unlock(&vma->pages_mutex); 812 813 return err; 814 } 815 816 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 817 { 818 /* We allocate under vma_get_pages, so beware the shrinker */ 819 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); 820 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 821 if (atomic_sub_return(count, &vma->pages_count) == 0) { 822 vma->ops->clear_pages(vma); 823 GEM_BUG_ON(vma->pages); 824 if (vma->obj) 825 i915_gem_object_unpin_pages(vma->obj); 826 } 827 mutex_unlock(&vma->pages_mutex); 828 } 829 830 static void vma_put_pages(struct i915_vma *vma) 831 { 832 if (atomic_add_unless(&vma->pages_count, -1, 1)) 833 return; 834 835 __vma_put_pages(vma, 1); 836 } 837 838 static void vma_unbind_pages(struct i915_vma *vma) 839 { 840 unsigned int count; 841 842 lockdep_assert_held(&vma->vm->mutex); 843 844 /* The upper portion of pages_count is the number of bindings */ 845 count = atomic_read(&vma->pages_count); 846 count >>= I915_VMA_PAGES_BIAS; 847 GEM_BUG_ON(!count); 848 849 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 850 } 851 852 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 853 { 854 struct i915_vma_work *work = NULL; 855 unsigned int bound; 856 int err; 857 858 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 859 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 860 861 GEM_BUG_ON(flags & PIN_UPDATE); 862 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 863 864 /* First try and grab the pin without rebinding the vma */ 865 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) 866 return 0; 867 868 err = vma_get_pages(vma); 869 if (err) 870 return err; 871 872 if (flags & vma->vm->bind_async_flags) { 873 work = i915_vma_work(); 874 if (!work) { 875 err = -ENOMEM; 876 goto err_pages; 877 } 878 } 879 880 /* No more allocations allowed once we hold vm->mutex */ 881 err = mutex_lock_interruptible(&vma->vm->mutex); 882 if (err) 883 goto err_fence; 884 885 bound = atomic_read(&vma->flags); 886 if (unlikely(bound & I915_VMA_ERROR)) { 887 err = -ENOMEM; 888 goto err_unlock; 889 } 890 891 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 892 err = -EAGAIN; /* pins are meant to be fairly temporary */ 893 goto err_unlock; 894 } 895 896 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 897 __i915_vma_pin(vma); 898 goto err_unlock; 899 } 900 901 err = i915_active_acquire(&vma->active); 902 if (err) 903 goto err_unlock; 904 905 if (!(bound & I915_VMA_BIND_MASK)) { 906 err = i915_vma_insert(vma, size, alignment, flags); 907 if (err) 908 goto err_active; 909 910 if (i915_is_ggtt(vma->vm)) 911 __i915_vma_set_map_and_fenceable(vma); 912 } 913 914 GEM_BUG_ON(!vma->pages); 915 err = i915_vma_bind(vma, 916 vma->obj ? vma->obj->cache_level : 0, 917 flags, work); 918 if (err) 919 goto err_remove; 920 921 /* There should only be at most 2 active bindings (user, global) */ 922 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 923 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 924 list_move_tail(&vma->vm_link, &vma->vm->bound_list); 925 926 __i915_vma_pin(vma); 927 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 928 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 929 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 930 931 err_remove: 932 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) 933 i915_vma_remove(vma); 934 err_active: 935 i915_active_release(&vma->active); 936 err_unlock: 937 mutex_unlock(&vma->vm->mutex); 938 err_fence: 939 if (work) 940 dma_fence_work_commit(&work->base); 941 err_pages: 942 vma_put_pages(vma); 943 return err; 944 } 945 946 void i915_vma_close(struct i915_vma *vma) 947 { 948 struct drm_i915_private *i915 = vma->vm->i915; 949 unsigned long flags; 950 951 GEM_BUG_ON(i915_vma_is_closed(vma)); 952 953 /* 954 * We defer actually closing, unbinding and destroying the VMA until 955 * the next idle point, or if the object is freed in the meantime. By 956 * postponing the unbind, we allow for it to be resurrected by the 957 * client, avoiding the work required to rebind the VMA. This is 958 * advantageous for DRI, where the client/server pass objects 959 * between themselves, temporarily opening a local VMA to the 960 * object, and then closing it again. The same object is then reused 961 * on the next frame (or two, depending on the depth of the swap queue) 962 * causing us to rebind the VMA once more. This ends up being a lot 963 * of wasted work for the steady state. 964 */ 965 spin_lock_irqsave(&i915->gt.closed_lock, flags); 966 list_add(&vma->closed_link, &i915->gt.closed_vma); 967 spin_unlock_irqrestore(&i915->gt.closed_lock, flags); 968 } 969 970 static void __i915_vma_remove_closed(struct i915_vma *vma) 971 { 972 struct drm_i915_private *i915 = vma->vm->i915; 973 974 spin_lock_irq(&i915->gt.closed_lock); 975 list_del_init(&vma->closed_link); 976 spin_unlock_irq(&i915->gt.closed_lock); 977 } 978 979 void i915_vma_reopen(struct i915_vma *vma) 980 { 981 if (i915_vma_is_closed(vma)) 982 __i915_vma_remove_closed(vma); 983 } 984 985 void i915_vma_destroy(struct i915_vma *vma) 986 { 987 if (drm_mm_node_allocated(&vma->node)) { 988 mutex_lock(&vma->vm->mutex); 989 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 990 WARN_ON(__i915_vma_unbind(vma)); 991 mutex_unlock(&vma->vm->mutex); 992 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 993 } 994 GEM_BUG_ON(i915_vma_is_active(vma)); 995 996 if (vma->obj) { 997 struct drm_i915_gem_object *obj = vma->obj; 998 999 spin_lock(&obj->vma.lock); 1000 list_del(&vma->obj_link); 1001 rb_erase(&vma->obj_node, &obj->vma.tree); 1002 spin_unlock(&obj->vma.lock); 1003 } 1004 1005 __i915_vma_remove_closed(vma); 1006 i915_vm_put(vma->vm); 1007 1008 i915_active_fini(&vma->active); 1009 i915_vma_free(vma); 1010 } 1011 1012 void i915_vma_parked(struct drm_i915_private *i915) 1013 { 1014 struct i915_vma *vma, *next; 1015 1016 spin_lock_irq(&i915->gt.closed_lock); 1017 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { 1018 struct drm_i915_gem_object *obj = vma->obj; 1019 struct i915_address_space *vm = vma->vm; 1020 1021 /* XXX All to avoid keeping a reference on i915_vma itself */ 1022 1023 if (!kref_get_unless_zero(&obj->base.refcount)) 1024 continue; 1025 1026 if (!i915_vm_tryopen(vm)) { 1027 i915_gem_object_put(obj); 1028 obj = NULL; 1029 } 1030 1031 spin_unlock_irq(&i915->gt.closed_lock); 1032 1033 if (obj) { 1034 i915_vma_destroy(vma); 1035 i915_gem_object_put(obj); 1036 } 1037 1038 i915_vm_close(vm); 1039 1040 /* Restart after dropping lock */ 1041 spin_lock_irq(&i915->gt.closed_lock); 1042 next = list_first_entry(&i915->gt.closed_vma, 1043 typeof(*next), closed_link); 1044 } 1045 spin_unlock_irq(&i915->gt.closed_lock); 1046 } 1047 1048 static void __i915_vma_iounmap(struct i915_vma *vma) 1049 { 1050 GEM_BUG_ON(i915_vma_is_pinned(vma)); 1051 1052 if (vma->iomap == NULL) 1053 return; 1054 1055 io_mapping_unmap(vma->iomap); 1056 vma->iomap = NULL; 1057 } 1058 1059 void i915_vma_revoke_mmap(struct i915_vma *vma) 1060 { 1061 struct drm_vma_offset_node *node = &vma->obj->base.vma_node; 1062 u64 vma_offset; 1063 1064 lockdep_assert_held(&vma->vm->mutex); 1065 1066 if (!i915_vma_has_userfault(vma)) 1067 return; 1068 1069 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1070 GEM_BUG_ON(!vma->obj->userfault_count); 1071 1072 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 1073 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1074 drm_vma_node_offset_addr(node) + vma_offset, 1075 vma->size, 1076 1); 1077 1078 i915_vma_unset_userfault(vma); 1079 if (!--vma->obj->userfault_count) 1080 list_del(&vma->obj->userfault_link); 1081 } 1082 1083 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 1084 { 1085 int err; 1086 1087 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 1088 1089 /* Wait for the vma to be bound before we start! */ 1090 err = i915_request_await_active(rq, &vma->active); 1091 if (err) 1092 return err; 1093 1094 return i915_active_add_request(&vma->active, rq); 1095 } 1096 1097 int i915_vma_move_to_active(struct i915_vma *vma, 1098 struct i915_request *rq, 1099 unsigned int flags) 1100 { 1101 struct drm_i915_gem_object *obj = vma->obj; 1102 int err; 1103 1104 assert_object_held(obj); 1105 1106 err = __i915_vma_move_to_active(vma, rq); 1107 if (unlikely(err)) 1108 return err; 1109 1110 if (flags & EXEC_OBJECT_WRITE) { 1111 if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS)) 1112 i915_active_add_request(&obj->frontbuffer->write, rq); 1113 1114 dma_resv_add_excl_fence(vma->resv, &rq->fence); 1115 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1116 obj->read_domains = 0; 1117 } else { 1118 err = dma_resv_reserve_shared(vma->resv, 1); 1119 if (unlikely(err)) 1120 return err; 1121 1122 dma_resv_add_shared_fence(vma->resv, &rq->fence); 1123 obj->write_domain = 0; 1124 } 1125 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1126 obj->mm.dirty = true; 1127 1128 GEM_BUG_ON(!i915_vma_is_active(vma)); 1129 return 0; 1130 } 1131 1132 int __i915_vma_unbind(struct i915_vma *vma) 1133 { 1134 int ret; 1135 1136 lockdep_assert_held(&vma->vm->mutex); 1137 1138 /* 1139 * First wait upon any activity as retiring the request may 1140 * have side-effects such as unpinning or even unbinding this vma. 1141 * 1142 * XXX Actually waiting under the vm->mutex is a hinderance and 1143 * should be pipelined wherever possible. In cases where that is 1144 * unavoidable, we should lift the wait to before the mutex. 1145 */ 1146 ret = i915_vma_sync(vma); 1147 if (ret) 1148 return ret; 1149 1150 GEM_BUG_ON(i915_vma_is_active(vma)); 1151 if (i915_vma_is_pinned(vma)) { 1152 vma_print_allocator(vma, "is pinned"); 1153 return -EBUSY; 1154 } 1155 1156 GEM_BUG_ON(i915_vma_is_active(vma)); 1157 if (!drm_mm_node_allocated(&vma->node)) 1158 return 0; 1159 1160 if (i915_vma_is_map_and_fenceable(vma)) { 1161 /* 1162 * Check that we have flushed all writes through the GGTT 1163 * before the unbind, other due to non-strict nature of those 1164 * indirect writes they may end up referencing the GGTT PTE 1165 * after the unbind. 1166 */ 1167 i915_vma_flush_writes(vma); 1168 GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); 1169 1170 /* release the fence reg _after_ flushing */ 1171 ret = i915_vma_revoke_fence(vma); 1172 if (ret) 1173 return ret; 1174 1175 /* Force a pagefault for domain tracking on next user access */ 1176 i915_vma_revoke_mmap(vma); 1177 1178 __i915_vma_iounmap(vma); 1179 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1180 } 1181 GEM_BUG_ON(vma->fence); 1182 GEM_BUG_ON(i915_vma_has_userfault(vma)); 1183 1184 if (likely(atomic_read(&vma->vm->open))) { 1185 trace_i915_vma_unbind(vma); 1186 vma->ops->unbind_vma(vma); 1187 } 1188 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags); 1189 1190 vma_unbind_pages(vma); 1191 i915_vma_remove(vma); 1192 1193 return 0; 1194 } 1195 1196 int i915_vma_unbind(struct i915_vma *vma) 1197 { 1198 struct i915_address_space *vm = vma->vm; 1199 int err; 1200 1201 err = mutex_lock_interruptible(&vm->mutex); 1202 if (err) 1203 return err; 1204 1205 err = __i915_vma_unbind(vma); 1206 mutex_unlock(&vm->mutex); 1207 1208 return err; 1209 } 1210 1211 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 1212 { 1213 i915_gem_object_make_unshrinkable(vma->obj); 1214 return vma; 1215 } 1216 1217 void i915_vma_make_shrinkable(struct i915_vma *vma) 1218 { 1219 i915_gem_object_make_shrinkable(vma->obj); 1220 } 1221 1222 void i915_vma_make_purgeable(struct i915_vma *vma) 1223 { 1224 i915_gem_object_make_purgeable(vma->obj); 1225 } 1226 1227 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1228 #include "selftests/i915_vma.c" 1229 #endif 1230 1231 static void i915_global_vma_shrink(void) 1232 { 1233 kmem_cache_shrink(global.slab_vmas); 1234 } 1235 1236 static void i915_global_vma_exit(void) 1237 { 1238 kmem_cache_destroy(global.slab_vmas); 1239 } 1240 1241 static struct i915_global_vma global = { { 1242 .shrink = i915_global_vma_shrink, 1243 .exit = i915_global_vma_exit, 1244 } }; 1245 1246 int __init i915_global_vma_init(void) 1247 { 1248 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 1249 if (!global.slab_vmas) 1250 return -ENOMEM; 1251 1252 i915_global_register(&global.base); 1253 return 0; 1254 } 1255