xref: /linux/drivers/gpu/drm/i915/i915_vma.c (revision 2decec48b0fd28ffdbf4cc684bd04e735f0839dd)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "i915_vma.h"
26 
27 #include "i915_drv.h"
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
30 
31 #include <drm/drm_gem.h>
32 
33 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
34 
35 #include <linux/stackdepot.h>
36 
37 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
38 {
39 	unsigned long *entries;
40 	unsigned int nr_entries;
41 	char buf[512];
42 
43 	if (!vma->node.stack) {
44 		DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
45 				 vma->node.start, vma->node.size, reason);
46 		return;
47 	}
48 
49 	nr_entries = stack_depot_fetch(vma->node.stack, &entries);
50 	stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
51 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
52 			 vma->node.start, vma->node.size, reason, buf);
53 }
54 
55 #else
56 
57 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
58 {
59 }
60 
61 #endif
62 
63 static void obj_bump_mru(struct drm_i915_gem_object *obj)
64 {
65 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
66 
67 	spin_lock(&i915->mm.obj_lock);
68 	if (obj->bind_count)
69 		list_move_tail(&obj->mm.link, &i915->mm.bound_list);
70 	spin_unlock(&i915->mm.obj_lock);
71 
72 	obj->mm.dirty = true; /* be paranoid  */
73 }
74 
75 static void __i915_vma_retire(struct i915_active *ref)
76 {
77 	struct i915_vma *vma = container_of(ref, typeof(*vma), active);
78 	struct drm_i915_gem_object *obj = vma->obj;
79 
80 	GEM_BUG_ON(!i915_gem_object_is_active(obj));
81 	if (--obj->active_count)
82 		return;
83 
84 	/* Prune the shared fence arrays iff completely idle (inc. external) */
85 	if (reservation_object_trylock(obj->resv)) {
86 		if (reservation_object_test_signaled_rcu(obj->resv, true))
87 			reservation_object_add_excl_fence(obj->resv, NULL);
88 		reservation_object_unlock(obj->resv);
89 	}
90 
91 	/*
92 	 * Bump our place on the bound list to keep it roughly in LRU order
93 	 * so that we don't steal from recently used but inactive objects
94 	 * (unless we are forced to ofc!)
95 	 */
96 	obj_bump_mru(obj);
97 
98 	if (i915_gem_object_has_active_reference(obj)) {
99 		i915_gem_object_clear_active_reference(obj);
100 		i915_gem_object_put(obj);
101 	}
102 }
103 
104 static struct i915_vma *
105 vma_create(struct drm_i915_gem_object *obj,
106 	   struct i915_address_space *vm,
107 	   const struct i915_ggtt_view *view)
108 {
109 	struct i915_vma *vma;
110 	struct rb_node *rb, **p;
111 
112 	/* The aliasing_ppgtt should never be used directly! */
113 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
114 
115 	vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
116 	if (vma == NULL)
117 		return ERR_PTR(-ENOMEM);
118 
119 	i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
120 	INIT_ACTIVE_REQUEST(&vma->last_fence);
121 
122 	vma->vm = vm;
123 	vma->ops = &vm->vma_ops;
124 	vma->obj = obj;
125 	vma->resv = obj->resv;
126 	vma->size = obj->base.size;
127 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
128 
129 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
130 		vma->ggtt_view = *view;
131 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
132 			GEM_BUG_ON(range_overflows_t(u64,
133 						     view->partial.offset,
134 						     view->partial.size,
135 						     obj->base.size >> PAGE_SHIFT));
136 			vma->size = view->partial.size;
137 			vma->size <<= PAGE_SHIFT;
138 			GEM_BUG_ON(vma->size > obj->base.size);
139 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
140 			vma->size = intel_rotation_info_size(&view->rotated);
141 			vma->size <<= PAGE_SHIFT;
142 		}
143 	}
144 
145 	if (unlikely(vma->size > vm->total))
146 		goto err_vma;
147 
148 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
149 
150 	if (i915_is_ggtt(vm)) {
151 		if (unlikely(overflows_type(vma->size, u32)))
152 			goto err_vma;
153 
154 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
155 						      i915_gem_object_get_tiling(obj),
156 						      i915_gem_object_get_stride(obj));
157 		if (unlikely(vma->fence_size < vma->size || /* overflow */
158 			     vma->fence_size > vm->total))
159 			goto err_vma;
160 
161 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
162 
163 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
164 								i915_gem_object_get_tiling(obj),
165 								i915_gem_object_get_stride(obj));
166 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
167 
168 		vma->flags |= I915_VMA_GGTT;
169 	}
170 
171 	spin_lock(&obj->vma.lock);
172 
173 	rb = NULL;
174 	p = &obj->vma.tree.rb_node;
175 	while (*p) {
176 		struct i915_vma *pos;
177 		long cmp;
178 
179 		rb = *p;
180 		pos = rb_entry(rb, struct i915_vma, obj_node);
181 
182 		/*
183 		 * If the view already exists in the tree, another thread
184 		 * already created a matching vma, so return the older instance
185 		 * and dispose of ours.
186 		 */
187 		cmp = i915_vma_compare(pos, vm, view);
188 		if (cmp == 0) {
189 			spin_unlock(&obj->vma.lock);
190 			kmem_cache_free(vm->i915->vmas, vma);
191 			return pos;
192 		}
193 
194 		if (cmp < 0)
195 			p = &rb->rb_right;
196 		else
197 			p = &rb->rb_left;
198 	}
199 	rb_link_node(&vma->obj_node, rb, p);
200 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
201 
202 	if (i915_vma_is_ggtt(vma))
203 		/*
204 		 * We put the GGTT vma at the start of the vma-list, followed
205 		 * by the ppGGTT vma. This allows us to break early when
206 		 * iterating over only the GGTT vma for an object, see
207 		 * for_each_ggtt_vma()
208 		 */
209 		list_add(&vma->obj_link, &obj->vma.list);
210 	else
211 		list_add_tail(&vma->obj_link, &obj->vma.list);
212 
213 	spin_unlock(&obj->vma.lock);
214 
215 	mutex_lock(&vm->mutex);
216 	list_add(&vma->vm_link, &vm->unbound_list);
217 	mutex_unlock(&vm->mutex);
218 
219 	return vma;
220 
221 err_vma:
222 	kmem_cache_free(vm->i915->vmas, vma);
223 	return ERR_PTR(-E2BIG);
224 }
225 
226 static struct i915_vma *
227 vma_lookup(struct drm_i915_gem_object *obj,
228 	   struct i915_address_space *vm,
229 	   const struct i915_ggtt_view *view)
230 {
231 	struct rb_node *rb;
232 
233 	rb = obj->vma.tree.rb_node;
234 	while (rb) {
235 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
236 		long cmp;
237 
238 		cmp = i915_vma_compare(vma, vm, view);
239 		if (cmp == 0)
240 			return vma;
241 
242 		if (cmp < 0)
243 			rb = rb->rb_right;
244 		else
245 			rb = rb->rb_left;
246 	}
247 
248 	return NULL;
249 }
250 
251 /**
252  * i915_vma_instance - return the singleton instance of the VMA
253  * @obj: parent &struct drm_i915_gem_object to be mapped
254  * @vm: address space in which the mapping is located
255  * @view: additional mapping requirements
256  *
257  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
258  * the same @view characteristics. If a match is not found, one is created.
259  * Once created, the VMA is kept until either the object is freed, or the
260  * address space is closed.
261  *
262  * Must be called with struct_mutex held.
263  *
264  * Returns the vma, or an error pointer.
265  */
266 struct i915_vma *
267 i915_vma_instance(struct drm_i915_gem_object *obj,
268 		  struct i915_address_space *vm,
269 		  const struct i915_ggtt_view *view)
270 {
271 	struct i915_vma *vma;
272 
273 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
274 	GEM_BUG_ON(vm->closed);
275 
276 	spin_lock(&obj->vma.lock);
277 	vma = vma_lookup(obj, vm, view);
278 	spin_unlock(&obj->vma.lock);
279 
280 	/* vma_create() will resolve the race if another creates the vma */
281 	if (unlikely(!vma))
282 		vma = vma_create(obj, vm, view);
283 
284 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
285 	return vma;
286 }
287 
288 /**
289  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
290  * @vma: VMA to map
291  * @cache_level: mapping cache level
292  * @flags: flags like global or local mapping
293  *
294  * DMA addresses are taken from the scatter-gather table of this object (or of
295  * this VMA in case of non-default GGTT views) and PTE entries set up.
296  * Note that DMA addresses are also the only part of the SG table we care about.
297  */
298 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
299 		  u32 flags)
300 {
301 	u32 bind_flags;
302 	u32 vma_flags;
303 	int ret;
304 
305 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
306 	GEM_BUG_ON(vma->size > vma->node.size);
307 
308 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
309 					      vma->node.size,
310 					      vma->vm->total)))
311 		return -ENODEV;
312 
313 	if (GEM_DEBUG_WARN_ON(!flags))
314 		return -EINVAL;
315 
316 	bind_flags = 0;
317 	if (flags & PIN_GLOBAL)
318 		bind_flags |= I915_VMA_GLOBAL_BIND;
319 	if (flags & PIN_USER)
320 		bind_flags |= I915_VMA_LOCAL_BIND;
321 
322 	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
323 	if (flags & PIN_UPDATE)
324 		bind_flags |= vma_flags;
325 	else
326 		bind_flags &= ~vma_flags;
327 	if (bind_flags == 0)
328 		return 0;
329 
330 	GEM_BUG_ON(!vma->pages);
331 
332 	trace_i915_vma_bind(vma, bind_flags);
333 	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
334 	if (ret)
335 		return ret;
336 
337 	vma->flags |= bind_flags;
338 	return 0;
339 }
340 
341 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
342 {
343 	void __iomem *ptr;
344 	int err;
345 
346 	/* Access through the GTT requires the device to be awake. */
347 	assert_rpm_wakelock_held(vma->vm->i915);
348 
349 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
350 	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
351 		err = -ENODEV;
352 		goto err;
353 	}
354 
355 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
356 	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
357 
358 	ptr = vma->iomap;
359 	if (ptr == NULL) {
360 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
361 					vma->node.start,
362 					vma->node.size);
363 		if (ptr == NULL) {
364 			err = -ENOMEM;
365 			goto err;
366 		}
367 
368 		vma->iomap = ptr;
369 	}
370 
371 	__i915_vma_pin(vma);
372 
373 	err = i915_vma_pin_fence(vma);
374 	if (err)
375 		goto err_unpin;
376 
377 	i915_vma_set_ggtt_write(vma);
378 	return ptr;
379 
380 err_unpin:
381 	__i915_vma_unpin(vma);
382 err:
383 	return IO_ERR_PTR(err);
384 }
385 
386 void i915_vma_flush_writes(struct i915_vma *vma)
387 {
388 	if (!i915_vma_has_ggtt_write(vma))
389 		return;
390 
391 	i915_gem_flush_ggtt_writes(vma->vm->i915);
392 
393 	i915_vma_unset_ggtt_write(vma);
394 }
395 
396 void i915_vma_unpin_iomap(struct i915_vma *vma)
397 {
398 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
399 
400 	GEM_BUG_ON(vma->iomap == NULL);
401 
402 	i915_vma_flush_writes(vma);
403 
404 	i915_vma_unpin_fence(vma);
405 	i915_vma_unpin(vma);
406 }
407 
408 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
409 {
410 	struct i915_vma *vma;
411 	struct drm_i915_gem_object *obj;
412 
413 	vma = fetch_and_zero(p_vma);
414 	if (!vma)
415 		return;
416 
417 	obj = vma->obj;
418 	GEM_BUG_ON(!obj);
419 
420 	i915_vma_unpin(vma);
421 	i915_vma_close(vma);
422 
423 	if (flags & I915_VMA_RELEASE_MAP)
424 		i915_gem_object_unpin_map(obj);
425 
426 	__i915_gem_object_release_unless_active(obj);
427 }
428 
429 bool i915_vma_misplaced(const struct i915_vma *vma,
430 			u64 size, u64 alignment, u64 flags)
431 {
432 	if (!drm_mm_node_allocated(&vma->node))
433 		return false;
434 
435 	if (vma->node.size < size)
436 		return true;
437 
438 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
439 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
440 		return true;
441 
442 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
443 		return true;
444 
445 	if (flags & PIN_OFFSET_BIAS &&
446 	    vma->node.start < (flags & PIN_OFFSET_MASK))
447 		return true;
448 
449 	if (flags & PIN_OFFSET_FIXED &&
450 	    vma->node.start != (flags & PIN_OFFSET_MASK))
451 		return true;
452 
453 	return false;
454 }
455 
456 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
457 {
458 	bool mappable, fenceable;
459 
460 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
461 	GEM_BUG_ON(!vma->fence_size);
462 
463 	/*
464 	 * Explicitly disable for rotated VMA since the display does not
465 	 * need the fence and the VMA is not accessible to other users.
466 	 */
467 	if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
468 		return;
469 
470 	fenceable = (vma->node.size >= vma->fence_size &&
471 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
472 
473 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
474 
475 	if (mappable && fenceable)
476 		vma->flags |= I915_VMA_CAN_FENCE;
477 	else
478 		vma->flags &= ~I915_VMA_CAN_FENCE;
479 }
480 
481 static bool color_differs(struct drm_mm_node *node, unsigned long color)
482 {
483 	return node->allocated && node->color != color;
484 }
485 
486 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
487 {
488 	struct drm_mm_node *node = &vma->node;
489 	struct drm_mm_node *other;
490 
491 	/*
492 	 * On some machines we have to be careful when putting differing types
493 	 * of snoopable memory together to avoid the prefetcher crossing memory
494 	 * domains and dying. During vm initialisation, we decide whether or not
495 	 * these constraints apply and set the drm_mm.color_adjust
496 	 * appropriately.
497 	 */
498 	if (vma->vm->mm.color_adjust == NULL)
499 		return true;
500 
501 	/* Only valid to be called on an already inserted vma */
502 	GEM_BUG_ON(!drm_mm_node_allocated(node));
503 	GEM_BUG_ON(list_empty(&node->node_list));
504 
505 	other = list_prev_entry(node, node_list);
506 	if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
507 		return false;
508 
509 	other = list_next_entry(node, node_list);
510 	if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
511 		return false;
512 
513 	return true;
514 }
515 
516 static void assert_bind_count(const struct drm_i915_gem_object *obj)
517 {
518 	/*
519 	 * Combine the assertion that the object is bound and that we have
520 	 * pinned its pages. But we should never have bound the object
521 	 * more than we have pinned its pages. (For complete accuracy, we
522 	 * assume that no else is pinning the pages, but as a rough assertion
523 	 * that we will not run into problems later, this will do!)
524 	 */
525 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
526 }
527 
528 /**
529  * i915_vma_insert - finds a slot for the vma in its address space
530  * @vma: the vma
531  * @size: requested size in bytes (can be larger than the VMA)
532  * @alignment: required alignment
533  * @flags: mask of PIN_* flags to use
534  *
535  * First we try to allocate some free space that meets the requirements for
536  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
537  * preferrably the oldest idle entry to make room for the new VMA.
538  *
539  * Returns:
540  * 0 on success, negative error code otherwise.
541  */
542 static int
543 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
544 {
545 	struct drm_i915_private *dev_priv = vma->vm->i915;
546 	unsigned int cache_level;
547 	u64 start, end;
548 	int ret;
549 
550 	GEM_BUG_ON(i915_vma_is_closed(vma));
551 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
552 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
553 
554 	size = max(size, vma->size);
555 	alignment = max(alignment, vma->display_alignment);
556 	if (flags & PIN_MAPPABLE) {
557 		size = max_t(typeof(size), size, vma->fence_size);
558 		alignment = max_t(typeof(alignment),
559 				  alignment, vma->fence_alignment);
560 	}
561 
562 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
563 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
564 	GEM_BUG_ON(!is_power_of_2(alignment));
565 
566 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
567 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
568 
569 	end = vma->vm->total;
570 	if (flags & PIN_MAPPABLE)
571 		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
572 	if (flags & PIN_ZONE_4G)
573 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
574 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
575 
576 	/* If binding the object/GGTT view requires more space than the entire
577 	 * aperture has, reject it early before evicting everything in a vain
578 	 * attempt to find space.
579 	 */
580 	if (size > end) {
581 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
582 			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
583 			  end);
584 		return -ENOSPC;
585 	}
586 
587 	if (vma->obj) {
588 		ret = i915_gem_object_pin_pages(vma->obj);
589 		if (ret)
590 			return ret;
591 
592 		cache_level = vma->obj->cache_level;
593 	} else {
594 		cache_level = 0;
595 	}
596 
597 	GEM_BUG_ON(vma->pages);
598 
599 	ret = vma->ops->set_pages(vma);
600 	if (ret)
601 		goto err_unpin;
602 
603 	if (flags & PIN_OFFSET_FIXED) {
604 		u64 offset = flags & PIN_OFFSET_MASK;
605 		if (!IS_ALIGNED(offset, alignment) ||
606 		    range_overflows(offset, size, end)) {
607 			ret = -EINVAL;
608 			goto err_clear;
609 		}
610 
611 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
612 					   size, offset, cache_level,
613 					   flags);
614 		if (ret)
615 			goto err_clear;
616 	} else {
617 		/*
618 		 * We only support huge gtt pages through the 48b PPGTT,
619 		 * however we also don't want to force any alignment for
620 		 * objects which need to be tightly packed into the low 32bits.
621 		 *
622 		 * Note that we assume that GGTT are limited to 4GiB for the
623 		 * forseeable future. See also i915_ggtt_offset().
624 		 */
625 		if (upper_32_bits(end - 1) &&
626 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
627 			/*
628 			 * We can't mix 64K and 4K PTEs in the same page-table
629 			 * (2M block), and so to avoid the ugliness and
630 			 * complexity of coloring we opt for just aligning 64K
631 			 * objects to 2M.
632 			 */
633 			u64 page_alignment =
634 				rounddown_pow_of_two(vma->page_sizes.sg |
635 						     I915_GTT_PAGE_SIZE_2M);
636 
637 			/*
638 			 * Check we don't expand for the limited Global GTT
639 			 * (mappable aperture is even more precious!). This
640 			 * also checks that we exclude the aliasing-ppgtt.
641 			 */
642 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
643 
644 			alignment = max(alignment, page_alignment);
645 
646 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
647 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
648 		}
649 
650 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
651 					  size, alignment, cache_level,
652 					  start, end, flags);
653 		if (ret)
654 			goto err_clear;
655 
656 		GEM_BUG_ON(vma->node.start < start);
657 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
658 	}
659 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
660 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
661 
662 	mutex_lock(&vma->vm->mutex);
663 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
664 	mutex_unlock(&vma->vm->mutex);
665 
666 	if (vma->obj) {
667 		struct drm_i915_gem_object *obj = vma->obj;
668 
669 		spin_lock(&dev_priv->mm.obj_lock);
670 		list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
671 		obj->bind_count++;
672 		spin_unlock(&dev_priv->mm.obj_lock);
673 
674 		assert_bind_count(obj);
675 	}
676 
677 	return 0;
678 
679 err_clear:
680 	vma->ops->clear_pages(vma);
681 err_unpin:
682 	if (vma->obj)
683 		i915_gem_object_unpin_pages(vma->obj);
684 	return ret;
685 }
686 
687 static void
688 i915_vma_remove(struct i915_vma *vma)
689 {
690 	struct drm_i915_private *i915 = vma->vm->i915;
691 
692 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
693 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
694 
695 	vma->ops->clear_pages(vma);
696 
697 	mutex_lock(&vma->vm->mutex);
698 	drm_mm_remove_node(&vma->node);
699 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
700 	mutex_unlock(&vma->vm->mutex);
701 
702 	/*
703 	 * Since the unbound list is global, only move to that list if
704 	 * no more VMAs exist.
705 	 */
706 	if (vma->obj) {
707 		struct drm_i915_gem_object *obj = vma->obj;
708 
709 		spin_lock(&i915->mm.obj_lock);
710 		if (--obj->bind_count == 0)
711 			list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
712 		spin_unlock(&i915->mm.obj_lock);
713 
714 		/*
715 		 * And finally now the object is completely decoupled from this
716 		 * vma, we can drop its hold on the backing storage and allow
717 		 * it to be reaped by the shrinker.
718 		 */
719 		i915_gem_object_unpin_pages(obj);
720 		assert_bind_count(obj);
721 	}
722 }
723 
724 int __i915_vma_do_pin(struct i915_vma *vma,
725 		      u64 size, u64 alignment, u64 flags)
726 {
727 	const unsigned int bound = vma->flags;
728 	int ret;
729 
730 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
731 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
732 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
733 
734 	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
735 		ret = -EBUSY;
736 		goto err_unpin;
737 	}
738 
739 	if ((bound & I915_VMA_BIND_MASK) == 0) {
740 		ret = i915_vma_insert(vma, size, alignment, flags);
741 		if (ret)
742 			goto err_unpin;
743 	}
744 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
745 
746 	ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
747 	if (ret)
748 		goto err_remove;
749 
750 	GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
751 
752 	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
753 		__i915_vma_set_map_and_fenceable(vma);
754 
755 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
756 	return 0;
757 
758 err_remove:
759 	if ((bound & I915_VMA_BIND_MASK) == 0) {
760 		i915_vma_remove(vma);
761 		GEM_BUG_ON(vma->pages);
762 		GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
763 	}
764 err_unpin:
765 	__i915_vma_unpin(vma);
766 	return ret;
767 }
768 
769 void i915_vma_close(struct i915_vma *vma)
770 {
771 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
772 
773 	GEM_BUG_ON(i915_vma_is_closed(vma));
774 	vma->flags |= I915_VMA_CLOSED;
775 
776 	/*
777 	 * We defer actually closing, unbinding and destroying the VMA until
778 	 * the next idle point, or if the object is freed in the meantime. By
779 	 * postponing the unbind, we allow for it to be resurrected by the
780 	 * client, avoiding the work required to rebind the VMA. This is
781 	 * advantageous for DRI, where the client/server pass objects
782 	 * between themselves, temporarily opening a local VMA to the
783 	 * object, and then closing it again. The same object is then reused
784 	 * on the next frame (or two, depending on the depth of the swap queue)
785 	 * causing us to rebind the VMA once more. This ends up being a lot
786 	 * of wasted work for the steady state.
787 	 */
788 	list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
789 }
790 
791 void i915_vma_reopen(struct i915_vma *vma)
792 {
793 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
794 
795 	if (vma->flags & I915_VMA_CLOSED) {
796 		vma->flags &= ~I915_VMA_CLOSED;
797 		list_del(&vma->closed_link);
798 	}
799 }
800 
801 static void __i915_vma_destroy(struct i915_vma *vma)
802 {
803 	struct drm_i915_private *i915 = vma->vm->i915;
804 
805 	GEM_BUG_ON(vma->node.allocated);
806 	GEM_BUG_ON(vma->fence);
807 
808 	GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
809 
810 	mutex_lock(&vma->vm->mutex);
811 	list_del(&vma->vm_link);
812 	mutex_unlock(&vma->vm->mutex);
813 
814 	if (vma->obj) {
815 		struct drm_i915_gem_object *obj = vma->obj;
816 
817 		spin_lock(&obj->vma.lock);
818 		list_del(&vma->obj_link);
819 		rb_erase(&vma->obj_node, &vma->obj->vma.tree);
820 		spin_unlock(&obj->vma.lock);
821 	}
822 
823 	i915_active_fini(&vma->active);
824 
825 	kmem_cache_free(i915->vmas, vma);
826 }
827 
828 void i915_vma_destroy(struct i915_vma *vma)
829 {
830 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
831 
832 	GEM_BUG_ON(i915_vma_is_active(vma));
833 	GEM_BUG_ON(i915_vma_is_pinned(vma));
834 
835 	if (i915_vma_is_closed(vma))
836 		list_del(&vma->closed_link);
837 
838 	WARN_ON(i915_vma_unbind(vma));
839 	__i915_vma_destroy(vma);
840 }
841 
842 void i915_vma_parked(struct drm_i915_private *i915)
843 {
844 	struct i915_vma *vma, *next;
845 
846 	list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
847 		GEM_BUG_ON(!i915_vma_is_closed(vma));
848 		i915_vma_destroy(vma);
849 	}
850 
851 	GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
852 }
853 
854 static void __i915_vma_iounmap(struct i915_vma *vma)
855 {
856 	GEM_BUG_ON(i915_vma_is_pinned(vma));
857 
858 	if (vma->iomap == NULL)
859 		return;
860 
861 	io_mapping_unmap(vma->iomap);
862 	vma->iomap = NULL;
863 }
864 
865 void i915_vma_revoke_mmap(struct i915_vma *vma)
866 {
867 	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
868 	u64 vma_offset;
869 
870 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
871 
872 	if (!i915_vma_has_userfault(vma))
873 		return;
874 
875 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
876 	GEM_BUG_ON(!vma->obj->userfault_count);
877 
878 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
879 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
880 			    drm_vma_node_offset_addr(node) + vma_offset,
881 			    vma->size,
882 			    1);
883 
884 	i915_vma_unset_userfault(vma);
885 	if (!--vma->obj->userfault_count)
886 		list_del(&vma->obj->userfault_link);
887 }
888 
889 static void export_fence(struct i915_vma *vma,
890 			 struct i915_request *rq,
891 			 unsigned int flags)
892 {
893 	struct reservation_object *resv = vma->resv;
894 
895 	/*
896 	 * Ignore errors from failing to allocate the new fence, we can't
897 	 * handle an error right now. Worst case should be missed
898 	 * synchronisation leading to rendering corruption.
899 	 */
900 	reservation_object_lock(resv, NULL);
901 	if (flags & EXEC_OBJECT_WRITE)
902 		reservation_object_add_excl_fence(resv, &rq->fence);
903 	else if (reservation_object_reserve_shared(resv, 1) == 0)
904 		reservation_object_add_shared_fence(resv, &rq->fence);
905 	reservation_object_unlock(resv);
906 }
907 
908 int i915_vma_move_to_active(struct i915_vma *vma,
909 			    struct i915_request *rq,
910 			    unsigned int flags)
911 {
912 	struct drm_i915_gem_object *obj = vma->obj;
913 
914 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
915 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
916 
917 	/*
918 	 * Add a reference if we're newly entering the active list.
919 	 * The order in which we add operations to the retirement queue is
920 	 * vital here: mark_active adds to the start of the callback list,
921 	 * such that subsequent callbacks are called first. Therefore we
922 	 * add the active reference first and queue for it to be dropped
923 	 * *last*.
924 	 */
925 	if (!vma->active.count)
926 		obj->active_count++;
927 
928 	if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
929 		if (!vma->active.count)
930 			obj->active_count--;
931 		return -ENOMEM;
932 	}
933 
934 	GEM_BUG_ON(!i915_vma_is_active(vma));
935 	GEM_BUG_ON(!obj->active_count);
936 
937 	obj->write_domain = 0;
938 	if (flags & EXEC_OBJECT_WRITE) {
939 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
940 
941 		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
942 			__i915_active_request_set(&obj->frontbuffer_write, rq);
943 
944 		obj->read_domains = 0;
945 	}
946 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
947 
948 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
949 		__i915_active_request_set(&vma->last_fence, rq);
950 
951 	export_fence(vma, rq, flags);
952 	return 0;
953 }
954 
955 int i915_vma_unbind(struct i915_vma *vma)
956 {
957 	int ret;
958 
959 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
960 
961 	/*
962 	 * First wait upon any activity as retiring the request may
963 	 * have side-effects such as unpinning or even unbinding this vma.
964 	 */
965 	might_sleep();
966 	if (i915_vma_is_active(vma)) {
967 		/*
968 		 * When a closed VMA is retired, it is unbound - eek.
969 		 * In order to prevent it from being recursively closed,
970 		 * take a pin on the vma so that the second unbind is
971 		 * aborted.
972 		 *
973 		 * Even more scary is that the retire callback may free
974 		 * the object (last active vma). To prevent the explosion
975 		 * we defer the actual object free to a worker that can
976 		 * only proceed once it acquires the struct_mutex (which
977 		 * we currently hold, therefore it cannot free this object
978 		 * before we are finished).
979 		 */
980 		__i915_vma_pin(vma);
981 
982 		ret = i915_active_wait(&vma->active);
983 		if (ret)
984 			goto unpin;
985 
986 		ret = i915_active_request_retire(&vma->last_fence,
987 					      &vma->vm->i915->drm.struct_mutex);
988 unpin:
989 		__i915_vma_unpin(vma);
990 		if (ret)
991 			return ret;
992 	}
993 	GEM_BUG_ON(i915_vma_is_active(vma));
994 
995 	if (i915_vma_is_pinned(vma)) {
996 		vma_print_allocator(vma, "is pinned");
997 		return -EBUSY;
998 	}
999 
1000 	if (!drm_mm_node_allocated(&vma->node))
1001 		return 0;
1002 
1003 	if (i915_vma_is_map_and_fenceable(vma)) {
1004 		/*
1005 		 * Check that we have flushed all writes through the GGTT
1006 		 * before the unbind, other due to non-strict nature of those
1007 		 * indirect writes they may end up referencing the GGTT PTE
1008 		 * after the unbind.
1009 		 */
1010 		i915_vma_flush_writes(vma);
1011 		GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1012 
1013 		/* release the fence reg _after_ flushing */
1014 		ret = i915_vma_put_fence(vma);
1015 		if (ret)
1016 			return ret;
1017 
1018 		/* Force a pagefault for domain tracking on next user access */
1019 		i915_vma_revoke_mmap(vma);
1020 
1021 		__i915_vma_iounmap(vma);
1022 		vma->flags &= ~I915_VMA_CAN_FENCE;
1023 	}
1024 	GEM_BUG_ON(vma->fence);
1025 	GEM_BUG_ON(i915_vma_has_userfault(vma));
1026 
1027 	if (likely(!vma->vm->closed)) {
1028 		trace_i915_vma_unbind(vma);
1029 		vma->ops->unbind_vma(vma);
1030 	}
1031 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1032 
1033 	i915_vma_remove(vma);
1034 
1035 	return 0;
1036 }
1037 
1038 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1039 #include "selftests/i915_vma.c"
1040 #endif
1041