xref: /linux/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c (revision df02351331671abb26788bc13f6d276e26ae068f)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/i915_gem_internal.h"
30 #include "gem/i915_gem_lmem.h"
31 #include "gem/i915_gem_region.h"
32 #include "gem/selftests/mock_context.h"
33 #include "gt/intel_context.h"
34 #include "gt/intel_gpu_commands.h"
35 #include "gt/intel_gtt.h"
36 
37 #include "i915_random.h"
38 #include "i915_selftest.h"
39 #include "i915_vma_resource.h"
40 
41 #include "mock_drm.h"
42 #include "mock_gem_device.h"
43 #include "mock_gtt.h"
44 #include "igt_flush_test.h"
45 
cleanup_freed_objects(struct drm_i915_private * i915)46 static void cleanup_freed_objects(struct drm_i915_private *i915)
47 {
48 	i915_gem_drain_freed_objects(i915);
49 }
50 
fake_free_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)51 static void fake_free_pages(struct drm_i915_gem_object *obj,
52 			    struct sg_table *pages)
53 {
54 	sg_free_table(pages);
55 	kfree(pages);
56 }
57 
fake_get_pages(struct drm_i915_gem_object * obj)58 static int fake_get_pages(struct drm_i915_gem_object *obj)
59 {
60 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
61 #define PFN_BIAS 0x1000
62 	struct sg_table *pages;
63 	struct scatterlist *sg;
64 	typeof(obj->base.size) rem;
65 
66 	pages = kmalloc(sizeof(*pages), GFP);
67 	if (!pages)
68 		return -ENOMEM;
69 
70 	rem = round_up(obj->base.size, BIT(31)) >> 31;
71 	/* restricted by sg_alloc_table */
72 	if (overflows_type(rem, unsigned int)) {
73 		kfree(pages);
74 		return -E2BIG;
75 	}
76 
77 	if (sg_alloc_table(pages, rem, GFP)) {
78 		kfree(pages);
79 		return -ENOMEM;
80 	}
81 
82 	rem = obj->base.size;
83 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
84 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
85 
86 		GEM_BUG_ON(!len);
87 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
88 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
89 		sg_dma_len(sg) = len;
90 
91 		rem -= len;
92 	}
93 	GEM_BUG_ON(rem);
94 
95 	__i915_gem_object_set_pages(obj, pages);
96 
97 	return 0;
98 #undef GFP
99 }
100 
fake_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)101 static void fake_put_pages(struct drm_i915_gem_object *obj,
102 			   struct sg_table *pages)
103 {
104 	fake_free_pages(obj, pages);
105 	obj->mm.dirty = false;
106 }
107 
108 static const struct drm_i915_gem_object_ops fake_ops = {
109 	.name = "fake-gem",
110 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
111 	.get_pages = fake_get_pages,
112 	.put_pages = fake_put_pages,
113 };
114 
115 static struct drm_i915_gem_object *
fake_dma_object(struct drm_i915_private * i915,u64 size)116 fake_dma_object(struct drm_i915_private *i915, u64 size)
117 {
118 	static struct lock_class_key lock_class;
119 	struct drm_i915_gem_object *obj;
120 
121 	GEM_BUG_ON(!size);
122 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
123 
124 	if (overflows_type(size, obj->base.size))
125 		return ERR_PTR(-E2BIG);
126 
127 	obj = i915_gem_object_alloc();
128 	if (!obj)
129 		goto err;
130 
131 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
132 	i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
133 
134 	i915_gem_object_set_volatile(obj);
135 
136 	obj->write_domain = I915_GEM_DOMAIN_CPU;
137 	obj->read_domains = I915_GEM_DOMAIN_CPU;
138 	obj->pat_index = i915_gem_get_pat_index(i915, I915_CACHE_NONE);
139 
140 	/* Preallocate the "backing storage" */
141 	if (i915_gem_object_pin_pages_unlocked(obj))
142 		goto err_obj;
143 
144 	i915_gem_object_unpin_pages(obj);
145 	return obj;
146 
147 err_obj:
148 	i915_gem_object_put(obj);
149 err:
150 	return ERR_PTR(-ENOMEM);
151 }
152 
igt_ppgtt_alloc(void * arg)153 static int igt_ppgtt_alloc(void *arg)
154 {
155 	struct drm_i915_private *dev_priv = arg;
156 	struct i915_ppgtt *ppgtt;
157 	struct i915_gem_ww_ctx ww;
158 	u64 size, last, limit;
159 	int err = 0;
160 
161 	/* Allocate a ppggt and try to fill the entire range */
162 
163 	if (!HAS_PPGTT(dev_priv))
164 		return 0;
165 
166 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
167 	if (IS_ERR(ppgtt))
168 		return PTR_ERR(ppgtt);
169 
170 	if (!ppgtt->vm.allocate_va_range)
171 		goto ppgtt_vm_put;
172 
173 	/*
174 	 * While we only allocate the page tables here and so we could
175 	 * address a much larger GTT than we could actually fit into
176 	 * RAM, a practical limit is the amount of physical pages in the system.
177 	 * This should ensure that we do not run into the oomkiller during
178 	 * the test and take down the machine wilfully.
179 	 */
180 	limit = totalram_pages() << PAGE_SHIFT;
181 	limit = min(ppgtt->vm.total, limit);
182 
183 	i915_gem_ww_ctx_init(&ww, false);
184 retry:
185 	err = i915_vm_lock_objects(&ppgtt->vm, &ww);
186 	if (err)
187 		goto err_ppgtt_cleanup;
188 
189 	/* Check we can allocate the entire range */
190 	for (size = 4096; size <= limit; size <<= 2) {
191 		struct i915_vm_pt_stash stash = {};
192 
193 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
194 		if (err)
195 			goto err_ppgtt_cleanup;
196 
197 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
198 		if (err) {
199 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
200 			goto err_ppgtt_cleanup;
201 		}
202 
203 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
204 		cond_resched();
205 
206 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
207 
208 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
209 	}
210 
211 	/* Check we can incrementally allocate the entire range */
212 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
213 		struct i915_vm_pt_stash stash = {};
214 
215 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
216 		if (err)
217 			goto err_ppgtt_cleanup;
218 
219 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
220 		if (err) {
221 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
222 			goto err_ppgtt_cleanup;
223 		}
224 
225 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
226 					    last, size - last);
227 		cond_resched();
228 
229 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
230 	}
231 
232 err_ppgtt_cleanup:
233 	if (err == -EDEADLK) {
234 		err = i915_gem_ww_ctx_backoff(&ww);
235 		if (!err)
236 			goto retry;
237 	}
238 	i915_gem_ww_ctx_fini(&ww);
239 ppgtt_vm_put:
240 	i915_vm_put(&ppgtt->vm);
241 	return err;
242 }
243 
lowlevel_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)244 static int lowlevel_hole(struct i915_address_space *vm,
245 			 u64 hole_start, u64 hole_end,
246 			 unsigned long end_time)
247 {
248 	const unsigned int min_alignment =
249 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
250 	I915_RND_STATE(seed_prng);
251 	struct i915_vma_resource *mock_vma_res;
252 	unsigned int size;
253 
254 	mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
255 	if (!mock_vma_res)
256 		return -ENOMEM;
257 
258 	/* Keep creating larger objects until one cannot fit into the hole */
259 	for (size = 12; (hole_end - hole_start) >> size; size++) {
260 		I915_RND_SUBSTATE(prng, seed_prng);
261 		struct drm_i915_gem_object *obj;
262 		unsigned int *order, count, n;
263 		u64 hole_size, aligned_size;
264 
265 		aligned_size = max_t(u32, ilog2(min_alignment), size);
266 		hole_size = (hole_end - hole_start) >> aligned_size;
267 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
268 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
269 		count = hole_size >> 1;
270 		if (!count) {
271 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
272 				 __func__, hole_start, hole_end, size, hole_size);
273 			break;
274 		}
275 
276 		do {
277 			order = i915_random_order(count, &prng);
278 			if (order)
279 				break;
280 		} while (count >>= 1);
281 		if (!count) {
282 			kfree(mock_vma_res);
283 			return -ENOMEM;
284 		}
285 		GEM_BUG_ON(!order);
286 
287 		GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
288 		GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
289 
290 		/*
291 		 * Ignore allocation failures (i.e. don't report them as
292 		 * a test failure) as we are purposefully allocating very
293 		 * large objects without checking that we have sufficient
294 		 * memory. We expect to hit -ENOMEM.
295 		 */
296 
297 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
298 		if (IS_ERR(obj)) {
299 			kfree(order);
300 			break;
301 		}
302 
303 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
304 
305 		if (i915_gem_object_pin_pages_unlocked(obj)) {
306 			i915_gem_object_put(obj);
307 			kfree(order);
308 			break;
309 		}
310 
311 		for (n = 0; n < count; n++) {
312 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
313 			intel_wakeref_t wakeref;
314 
315 			GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
316 
317 			if (igt_timeout(end_time,
318 					"%s timed out before %d/%d\n",
319 					__func__, n, count)) {
320 				hole_end = hole_start; /* quit */
321 				break;
322 			}
323 
324 			if (vm->allocate_va_range) {
325 				struct i915_vm_pt_stash stash = {};
326 				struct i915_gem_ww_ctx ww;
327 				int err;
328 
329 				i915_gem_ww_ctx_init(&ww, false);
330 retry:
331 				err = i915_vm_lock_objects(vm, &ww);
332 				if (err)
333 					goto alloc_vm_end;
334 
335 				err = -ENOMEM;
336 				if (i915_vm_alloc_pt_stash(vm, &stash,
337 							   BIT_ULL(size)))
338 					goto alloc_vm_end;
339 
340 				err = i915_vm_map_pt_stash(vm, &stash);
341 				if (!err)
342 					vm->allocate_va_range(vm, &stash,
343 							      addr, BIT_ULL(size));
344 				i915_vm_free_pt_stash(vm, &stash);
345 alloc_vm_end:
346 				if (err == -EDEADLK) {
347 					err = i915_gem_ww_ctx_backoff(&ww);
348 					if (!err)
349 						goto retry;
350 				}
351 				i915_gem_ww_ctx_fini(&ww);
352 
353 				if (err)
354 					break;
355 			}
356 
357 			mock_vma_res->bi.pages = obj->mm.pages;
358 			mock_vma_res->node_size = BIT_ULL(aligned_size);
359 			mock_vma_res->start = addr;
360 
361 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
362 			  vm->insert_entries(vm, mock_vma_res,
363 					     i915_gem_get_pat_index(vm->i915,
364 								    I915_CACHE_NONE),
365 					     0);
366 		}
367 		count = n;
368 
369 		i915_random_reorder(order, count, &prng);
370 		for (n = 0; n < count; n++) {
371 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
372 			intel_wakeref_t wakeref;
373 
374 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
375 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
376 				vm->clear_range(vm, addr, BIT_ULL(size));
377 		}
378 
379 		i915_gem_object_unpin_pages(obj);
380 		i915_gem_object_put(obj);
381 
382 		kfree(order);
383 
384 		cleanup_freed_objects(vm->i915);
385 	}
386 
387 	kfree(mock_vma_res);
388 	return 0;
389 }
390 
close_object_list(struct list_head * objects,struct i915_address_space * vm)391 static void close_object_list(struct list_head *objects,
392 			      struct i915_address_space *vm)
393 {
394 	struct drm_i915_gem_object *obj, *on;
395 	int __maybe_unused ignored;
396 
397 	list_for_each_entry_safe(obj, on, objects, st_link) {
398 		struct i915_vma *vma;
399 
400 		vma = i915_vma_instance(obj, vm, NULL);
401 		if (!IS_ERR(vma))
402 			ignored = i915_vma_unbind_unlocked(vma);
403 
404 		list_del(&obj->st_link);
405 		i915_gem_object_put(obj);
406 	}
407 }
408 
fill_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)409 static int fill_hole(struct i915_address_space *vm,
410 		     u64 hole_start, u64 hole_end,
411 		     unsigned long end_time)
412 {
413 	const u64 hole_size = hole_end - hole_start;
414 	struct drm_i915_gem_object *obj;
415 	const unsigned int min_alignment =
416 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
417 	const unsigned long max_pages =
418 		min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
419 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
420 	unsigned long npages, prime, flags;
421 	struct i915_vma *vma;
422 	LIST_HEAD(objects);
423 	int err;
424 
425 	/* Try binding many VMA working inwards from either edge */
426 
427 	flags = PIN_OFFSET_FIXED | PIN_USER;
428 	if (i915_is_ggtt(vm))
429 		flags |= PIN_GLOBAL;
430 
431 	for_each_prime_number_from(prime, 2, max_step) {
432 		for (npages = 1; npages <= max_pages; npages *= prime) {
433 			const u64 full_size = npages << PAGE_SHIFT;
434 			const struct {
435 				const char *name;
436 				u64 offset;
437 				int step;
438 			} phases[] = {
439 				{ "top-down", hole_end, -1, },
440 				{ "bottom-up", hole_start, 1, },
441 				{ }
442 			}, *p;
443 
444 			obj = fake_dma_object(vm->i915, full_size);
445 			if (IS_ERR(obj))
446 				break;
447 
448 			list_add(&obj->st_link, &objects);
449 
450 			/*
451 			 * Align differing sized objects against the edges, and
452 			 * check we don't walk off into the void when binding
453 			 * them into the GTT.
454 			 */
455 			for (p = phases; p->name; p++) {
456 				u64 offset;
457 
458 				offset = p->offset;
459 				list_for_each_entry(obj, &objects, st_link) {
460 					u64 aligned_size = round_up(obj->base.size,
461 								    min_alignment);
462 
463 					vma = i915_vma_instance(obj, vm, NULL);
464 					if (IS_ERR(vma))
465 						continue;
466 
467 					if (p->step < 0) {
468 						if (offset < hole_start + aligned_size)
469 							break;
470 						offset -= aligned_size;
471 					}
472 
473 					err = i915_vma_pin(vma, 0, 0, offset | flags);
474 					if (err) {
475 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
476 						       __func__, p->name, err, npages, prime, offset);
477 						goto err;
478 					}
479 
480 					if (!drm_mm_node_allocated(&vma->node) ||
481 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
482 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
483 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
484 						       offset);
485 						err = -EINVAL;
486 						goto err;
487 					}
488 
489 					i915_vma_unpin(vma);
490 
491 					if (p->step > 0) {
492 						if (offset + aligned_size > hole_end)
493 							break;
494 						offset += aligned_size;
495 					}
496 				}
497 
498 				offset = p->offset;
499 				list_for_each_entry(obj, &objects, st_link) {
500 					u64 aligned_size = round_up(obj->base.size,
501 								    min_alignment);
502 
503 					vma = i915_vma_instance(obj, vm, NULL);
504 					if (IS_ERR(vma))
505 						continue;
506 
507 					if (p->step < 0) {
508 						if (offset < hole_start + aligned_size)
509 							break;
510 						offset -= aligned_size;
511 					}
512 
513 					if (!drm_mm_node_allocated(&vma->node) ||
514 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
515 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
516 						       __func__, p->name, vma->node.start, vma->node.size,
517 						       offset);
518 						err = -EINVAL;
519 						goto err;
520 					}
521 
522 					err = i915_vma_unbind_unlocked(vma);
523 					if (err) {
524 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
525 						       __func__, p->name, vma->node.start, vma->node.size,
526 						       err);
527 						goto err;
528 					}
529 
530 					if (p->step > 0) {
531 						if (offset + aligned_size > hole_end)
532 							break;
533 						offset += aligned_size;
534 					}
535 				}
536 
537 				offset = p->offset;
538 				list_for_each_entry_reverse(obj, &objects, st_link) {
539 					u64 aligned_size = round_up(obj->base.size,
540 								    min_alignment);
541 
542 					vma = i915_vma_instance(obj, vm, NULL);
543 					if (IS_ERR(vma))
544 						continue;
545 
546 					if (p->step < 0) {
547 						if (offset < hole_start + aligned_size)
548 							break;
549 						offset -= aligned_size;
550 					}
551 
552 					err = i915_vma_pin(vma, 0, 0, offset | flags);
553 					if (err) {
554 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
555 						       __func__, p->name, err, npages, prime, offset);
556 						goto err;
557 					}
558 
559 					if (!drm_mm_node_allocated(&vma->node) ||
560 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
561 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
562 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
563 						       offset);
564 						err = -EINVAL;
565 						goto err;
566 					}
567 
568 					i915_vma_unpin(vma);
569 
570 					if (p->step > 0) {
571 						if (offset + aligned_size > hole_end)
572 							break;
573 						offset += aligned_size;
574 					}
575 				}
576 
577 				offset = p->offset;
578 				list_for_each_entry_reverse(obj, &objects, st_link) {
579 					u64 aligned_size = round_up(obj->base.size,
580 								    min_alignment);
581 
582 					vma = i915_vma_instance(obj, vm, NULL);
583 					if (IS_ERR(vma))
584 						continue;
585 
586 					if (p->step < 0) {
587 						if (offset < hole_start + aligned_size)
588 							break;
589 						offset -= aligned_size;
590 					}
591 
592 					if (!drm_mm_node_allocated(&vma->node) ||
593 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
594 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
595 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
596 						       offset);
597 						err = -EINVAL;
598 						goto err;
599 					}
600 
601 					err = i915_vma_unbind_unlocked(vma);
602 					if (err) {
603 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
604 						       __func__, p->name, vma->node.start, vma->node.size,
605 						       err);
606 						goto err;
607 					}
608 
609 					if (p->step > 0) {
610 						if (offset + aligned_size > hole_end)
611 							break;
612 						offset += aligned_size;
613 					}
614 				}
615 			}
616 
617 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
618 					__func__, npages, prime)) {
619 				err = -EINTR;
620 				goto err;
621 			}
622 		}
623 
624 		close_object_list(&objects, vm);
625 		cleanup_freed_objects(vm->i915);
626 	}
627 
628 	return 0;
629 
630 err:
631 	close_object_list(&objects, vm);
632 	return err;
633 }
634 
walk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)635 static int walk_hole(struct i915_address_space *vm,
636 		     u64 hole_start, u64 hole_end,
637 		     unsigned long end_time)
638 {
639 	const u64 hole_size = hole_end - hole_start;
640 	const unsigned long max_pages =
641 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
642 	unsigned long min_alignment;
643 	unsigned long flags;
644 	u64 size;
645 
646 	/* Try binding a single VMA in different positions within the hole */
647 
648 	flags = PIN_OFFSET_FIXED | PIN_USER;
649 	if (i915_is_ggtt(vm))
650 		flags |= PIN_GLOBAL;
651 
652 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
653 
654 	for_each_prime_number_from(size, 1, max_pages) {
655 		struct drm_i915_gem_object *obj;
656 		struct i915_vma *vma;
657 		u64 addr;
658 		int err = 0;
659 
660 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
661 		if (IS_ERR(obj))
662 			break;
663 
664 		vma = i915_vma_instance(obj, vm, NULL);
665 		if (IS_ERR(vma)) {
666 			err = PTR_ERR(vma);
667 			goto err_put;
668 		}
669 
670 		for (addr = hole_start;
671 		     addr + obj->base.size < hole_end;
672 		     addr += round_up(obj->base.size, min_alignment)) {
673 			err = i915_vma_pin(vma, 0, 0, addr | flags);
674 			if (err) {
675 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
676 				       __func__, addr, vma->size,
677 				       hole_start, hole_end, err);
678 				goto err_put;
679 			}
680 			i915_vma_unpin(vma);
681 
682 			if (!drm_mm_node_allocated(&vma->node) ||
683 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
684 				pr_err("%s incorrect at %llx + %llx\n",
685 				       __func__, addr, vma->size);
686 				err = -EINVAL;
687 				goto err_put;
688 			}
689 
690 			err = i915_vma_unbind_unlocked(vma);
691 			if (err) {
692 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
693 				       __func__, addr, vma->size, err);
694 				goto err_put;
695 			}
696 
697 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
698 
699 			if (igt_timeout(end_time,
700 					"%s timed out at %llx\n",
701 					__func__, addr)) {
702 				err = -EINTR;
703 				goto err_put;
704 			}
705 		}
706 
707 err_put:
708 		i915_gem_object_put(obj);
709 		if (err)
710 			return err;
711 
712 		cleanup_freed_objects(vm->i915);
713 	}
714 
715 	return 0;
716 }
717 
pot_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)718 static int pot_hole(struct i915_address_space *vm,
719 		    u64 hole_start, u64 hole_end,
720 		    unsigned long end_time)
721 {
722 	struct drm_i915_gem_object *obj;
723 	struct i915_vma *vma;
724 	unsigned int min_alignment;
725 	unsigned long flags;
726 	unsigned int pot;
727 	int err = 0;
728 
729 	flags = PIN_OFFSET_FIXED | PIN_USER;
730 	if (i915_is_ggtt(vm))
731 		flags |= PIN_GLOBAL;
732 
733 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
734 
735 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
736 	if (IS_ERR(obj))
737 		return PTR_ERR(obj);
738 
739 	vma = i915_vma_instance(obj, vm, NULL);
740 	if (IS_ERR(vma)) {
741 		err = PTR_ERR(vma);
742 		goto err_obj;
743 	}
744 
745 	/* Insert a pair of pages across every pot boundary within the hole */
746 	for (pot = fls64(hole_end - 1) - 1;
747 	     pot > ilog2(2 * min_alignment);
748 	     pot--) {
749 		u64 step = BIT_ULL(pot);
750 		u64 addr;
751 
752 		for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
753 		     hole_end > addr && hole_end - addr >= 2 * min_alignment;
754 		     addr += step) {
755 			err = i915_vma_pin(vma, 0, 0, addr | flags);
756 			if (err) {
757 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
758 				       __func__,
759 				       addr,
760 				       hole_start, hole_end,
761 				       err);
762 				goto err_obj;
763 			}
764 
765 			if (!drm_mm_node_allocated(&vma->node) ||
766 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
767 				pr_err("%s incorrect at %llx + %llx\n",
768 				       __func__, addr, vma->size);
769 				i915_vma_unpin(vma);
770 				err = i915_vma_unbind_unlocked(vma);
771 				err = -EINVAL;
772 				goto err_obj;
773 			}
774 
775 			i915_vma_unpin(vma);
776 			err = i915_vma_unbind_unlocked(vma);
777 			GEM_BUG_ON(err);
778 		}
779 
780 		if (igt_timeout(end_time,
781 				"%s timed out after %d/%d\n",
782 				__func__, pot, fls64(hole_end - 1) - 1)) {
783 			err = -EINTR;
784 			goto err_obj;
785 		}
786 	}
787 
788 err_obj:
789 	i915_gem_object_put(obj);
790 	return err;
791 }
792 
drunk_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)793 static int drunk_hole(struct i915_address_space *vm,
794 		      u64 hole_start, u64 hole_end,
795 		      unsigned long end_time)
796 {
797 	I915_RND_STATE(prng);
798 	unsigned int min_alignment;
799 	unsigned int size;
800 	unsigned long flags;
801 
802 	flags = PIN_OFFSET_FIXED | PIN_USER;
803 	if (i915_is_ggtt(vm))
804 		flags |= PIN_GLOBAL;
805 
806 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
807 
808 	/* Keep creating larger objects until one cannot fit into the hole */
809 	for (size = 12; (hole_end - hole_start) >> size; size++) {
810 		struct drm_i915_gem_object *obj;
811 		unsigned int *order, count, n;
812 		struct i915_vma *vma;
813 		u64 hole_size, aligned_size;
814 		int err = -ENODEV;
815 
816 		aligned_size = max_t(u32, ilog2(min_alignment), size);
817 		hole_size = (hole_end - hole_start) >> aligned_size;
818 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
819 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
820 		count = hole_size >> 1;
821 		if (!count) {
822 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
823 				 __func__, hole_start, hole_end, size, hole_size);
824 			break;
825 		}
826 
827 		do {
828 			order = i915_random_order(count, &prng);
829 			if (order)
830 				break;
831 		} while (count >>= 1);
832 		if (!count)
833 			return -ENOMEM;
834 		GEM_BUG_ON(!order);
835 
836 		/*
837 		 * Ignore allocation failures (i.e. don't report them as
838 		 * a test failure) as we are purposefully allocating very
839 		 * large objects without checking that we have sufficient
840 		 * memory. We expect to hit -ENOMEM.
841 		 */
842 
843 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
844 		if (IS_ERR(obj)) {
845 			kfree(order);
846 			break;
847 		}
848 
849 		vma = i915_vma_instance(obj, vm, NULL);
850 		if (IS_ERR(vma)) {
851 			err = PTR_ERR(vma);
852 			goto err_obj;
853 		}
854 
855 		GEM_BUG_ON(vma->size != BIT_ULL(size));
856 
857 		for (n = 0; n < count; n++) {
858 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
859 
860 			err = i915_vma_pin(vma, 0, 0, addr | flags);
861 			if (err) {
862 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
863 				       __func__,
864 				       addr, BIT_ULL(size),
865 				       hole_start, hole_end,
866 				       err);
867 				goto err_obj;
868 			}
869 
870 			if (!drm_mm_node_allocated(&vma->node) ||
871 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
872 				pr_err("%s incorrect at %llx + %llx\n",
873 				       __func__, addr, BIT_ULL(size));
874 				i915_vma_unpin(vma);
875 				err = i915_vma_unbind_unlocked(vma);
876 				err = -EINVAL;
877 				goto err_obj;
878 			}
879 
880 			i915_vma_unpin(vma);
881 			err = i915_vma_unbind_unlocked(vma);
882 			GEM_BUG_ON(err);
883 
884 			if (igt_timeout(end_time,
885 					"%s timed out after %d/%d\n",
886 					__func__, n, count)) {
887 				err = -EINTR;
888 				goto err_obj;
889 			}
890 		}
891 
892 err_obj:
893 		i915_gem_object_put(obj);
894 		kfree(order);
895 		if (err)
896 			return err;
897 
898 		cleanup_freed_objects(vm->i915);
899 	}
900 
901 	return 0;
902 }
903 
__shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)904 static int __shrink_hole(struct i915_address_space *vm,
905 			 u64 hole_start, u64 hole_end,
906 			 unsigned long end_time)
907 {
908 	struct drm_i915_gem_object *obj;
909 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
910 	unsigned int min_alignment;
911 	unsigned int order = 12;
912 	LIST_HEAD(objects);
913 	int err = 0;
914 	u64 addr;
915 
916 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
917 
918 	/* Keep creating larger objects until one cannot fit into the hole */
919 	for (addr = hole_start; addr < hole_end; ) {
920 		struct i915_vma *vma;
921 		u64 size = BIT_ULL(order++);
922 
923 		size = min(size, hole_end - addr);
924 		obj = fake_dma_object(vm->i915, size);
925 		if (IS_ERR(obj)) {
926 			err = PTR_ERR(obj);
927 			break;
928 		}
929 
930 		list_add(&obj->st_link, &objects);
931 
932 		vma = i915_vma_instance(obj, vm, NULL);
933 		if (IS_ERR(vma)) {
934 			err = PTR_ERR(vma);
935 			break;
936 		}
937 
938 		GEM_BUG_ON(vma->size != size);
939 
940 		err = i915_vma_pin(vma, 0, 0, addr | flags);
941 		if (err) {
942 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
943 			       __func__, addr, size, hole_start, hole_end, err);
944 			break;
945 		}
946 
947 		if (!drm_mm_node_allocated(&vma->node) ||
948 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
949 			pr_err("%s incorrect at %llx + %llx\n",
950 			       __func__, addr, size);
951 			i915_vma_unpin(vma);
952 			err = i915_vma_unbind_unlocked(vma);
953 			err = -EINVAL;
954 			break;
955 		}
956 
957 		i915_vma_unpin(vma);
958 		addr += round_up(size, min_alignment);
959 
960 		/*
961 		 * Since we are injecting allocation faults at random intervals,
962 		 * wait for this allocation to complete before we change the
963 		 * faultinjection.
964 		 */
965 		err = i915_vma_sync(vma);
966 		if (err)
967 			break;
968 
969 		if (igt_timeout(end_time,
970 				"%s timed out at offset %llx [%llx - %llx]\n",
971 				__func__, addr, hole_start, hole_end)) {
972 			err = -EINTR;
973 			break;
974 		}
975 	}
976 
977 	close_object_list(&objects, vm);
978 	cleanup_freed_objects(vm->i915);
979 	return err;
980 }
981 
shrink_hole(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)982 static int shrink_hole(struct i915_address_space *vm,
983 		       u64 hole_start, u64 hole_end,
984 		       unsigned long end_time)
985 {
986 	unsigned long prime;
987 	int err;
988 
989 	vm->fault_attr.probability = 999;
990 	atomic_set(&vm->fault_attr.times, -1);
991 
992 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
993 		vm->fault_attr.interval = prime;
994 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
995 		if (err)
996 			break;
997 	}
998 
999 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1000 
1001 	return err;
1002 }
1003 
shrink_boom(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)1004 static int shrink_boom(struct i915_address_space *vm,
1005 		       u64 hole_start, u64 hole_end,
1006 		       unsigned long end_time)
1007 {
1008 	unsigned int sizes[] = { SZ_2M, SZ_1G };
1009 	struct drm_i915_gem_object *purge;
1010 	struct drm_i915_gem_object *explode;
1011 	int err;
1012 	int i;
1013 
1014 	/*
1015 	 * Catch the case which shrink_hole seems to miss. The setup here
1016 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
1017 	 * ensuring that all vma associated with the respective pd/pdp are
1018 	 * unpinned at the time.
1019 	 */
1020 
1021 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1022 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1023 		unsigned int size = sizes[i];
1024 		struct i915_vma *vma;
1025 
1026 		purge = fake_dma_object(vm->i915, size);
1027 		if (IS_ERR(purge))
1028 			return PTR_ERR(purge);
1029 
1030 		vma = i915_vma_instance(purge, vm, NULL);
1031 		if (IS_ERR(vma)) {
1032 			err = PTR_ERR(vma);
1033 			goto err_purge;
1034 		}
1035 
1036 		err = i915_vma_pin(vma, 0, 0, flags);
1037 		if (err)
1038 			goto err_purge;
1039 
1040 		/* Should now be ripe for purging */
1041 		i915_vma_unpin(vma);
1042 
1043 		explode = fake_dma_object(vm->i915, size);
1044 		if (IS_ERR(explode)) {
1045 			err = PTR_ERR(explode);
1046 			goto err_purge;
1047 		}
1048 
1049 		vm->fault_attr.probability = 100;
1050 		vm->fault_attr.interval = 1;
1051 		atomic_set(&vm->fault_attr.times, -1);
1052 
1053 		vma = i915_vma_instance(explode, vm, NULL);
1054 		if (IS_ERR(vma)) {
1055 			err = PTR_ERR(vma);
1056 			goto err_explode;
1057 		}
1058 
1059 		err = i915_vma_pin(vma, 0, 0, flags | size);
1060 		if (err)
1061 			goto err_explode;
1062 
1063 		i915_vma_unpin(vma);
1064 
1065 		i915_gem_object_put(purge);
1066 		i915_gem_object_put(explode);
1067 
1068 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1069 		cleanup_freed_objects(vm->i915);
1070 	}
1071 
1072 	return 0;
1073 
1074 err_explode:
1075 	i915_gem_object_put(explode);
1076 err_purge:
1077 	i915_gem_object_put(purge);
1078 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1079 	return err;
1080 }
1081 
misaligned_case(struct i915_address_space * vm,struct intel_memory_region * mr,u64 addr,u64 size,unsigned long flags)1082 static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
1083 			   u64 addr, u64 size, unsigned long flags)
1084 {
1085 	struct drm_i915_gem_object *obj;
1086 	struct i915_vma *vma;
1087 	int err = 0;
1088 	u64 expected_vma_size, expected_node_size;
1089 	bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
1090 			 mr->type == INTEL_MEMORY_STOLEN_LOCAL;
1091 
1092 	obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
1093 	if (IS_ERR(obj)) {
1094 		/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
1095 		if (PTR_ERR(obj) == -ENODEV && is_stolen)
1096 			return 0;
1097 		return PTR_ERR(obj);
1098 	}
1099 
1100 	vma = i915_vma_instance(obj, vm, NULL);
1101 	if (IS_ERR(vma)) {
1102 		err = PTR_ERR(vma);
1103 		goto err_put;
1104 	}
1105 
1106 	err = i915_vma_pin(vma, 0, 0, addr | flags);
1107 	if (err)
1108 		goto err_put;
1109 	i915_vma_unpin(vma);
1110 
1111 	if (!drm_mm_node_allocated(&vma->node)) {
1112 		err = -EINVAL;
1113 		goto err_put;
1114 	}
1115 
1116 	if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
1117 		err = -EINVAL;
1118 		goto err_put;
1119 	}
1120 
1121 	expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
1122 	expected_node_size = expected_vma_size;
1123 
1124 	if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
1125 		expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1126 		expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1127 	}
1128 
1129 	if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
1130 		err = i915_vma_unbind_unlocked(vma);
1131 		err = -EBADSLT;
1132 		goto err_put;
1133 	}
1134 
1135 	err = i915_vma_unbind_unlocked(vma);
1136 	if (err)
1137 		goto err_put;
1138 
1139 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1140 
1141 err_put:
1142 	i915_gem_object_put(obj);
1143 	cleanup_freed_objects(vm->i915);
1144 	return err;
1145 }
1146 
misaligned_pin(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time)1147 static int misaligned_pin(struct i915_address_space *vm,
1148 			  u64 hole_start, u64 hole_end,
1149 			  unsigned long end_time)
1150 {
1151 	struct intel_memory_region *mr;
1152 	enum intel_region_id id;
1153 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1154 	int err = 0;
1155 	u64 hole_size = hole_end - hole_start;
1156 
1157 	if (i915_is_ggtt(vm))
1158 		flags |= PIN_GLOBAL;
1159 
1160 	for_each_memory_region(mr, vm->i915, id) {
1161 		u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
1162 		u64 size = min_alignment;
1163 		u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
1164 
1165 		/* avoid -ENOSPC on very small hole setups */
1166 		if (hole_size < 3 * min_alignment)
1167 			continue;
1168 
1169 		/* we can't test < 4k alignment due to flags being encoded in lower bits */
1170 		if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
1171 			err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
1172 			/* misaligned should error with -EINVAL*/
1173 			if (!err)
1174 				err = -EBADSLT;
1175 			if (err != -EINVAL)
1176 				return err;
1177 		}
1178 
1179 		/* test for vma->size expansion to min page size */
1180 		err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
1181 		if (err)
1182 			return err;
1183 
1184 		/* test for intermediate size not expanding vma->size for large alignments */
1185 		err = misaligned_case(vm, mr, addr, size / 2, flags);
1186 		if (err)
1187 			return err;
1188 	}
1189 
1190 	return 0;
1191 }
1192 
exercise_ppgtt(struct drm_i915_private * dev_priv,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1193 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1194 			  int (*func)(struct i915_address_space *vm,
1195 				      u64 hole_start, u64 hole_end,
1196 				      unsigned long end_time))
1197 {
1198 	struct i915_ppgtt *ppgtt;
1199 	IGT_TIMEOUT(end_time);
1200 	struct file *file;
1201 	int err;
1202 
1203 	if (!HAS_FULL_PPGTT(dev_priv))
1204 		return 0;
1205 
1206 	file = mock_file(dev_priv);
1207 	if (IS_ERR(file))
1208 		return PTR_ERR(file);
1209 
1210 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1211 	if (IS_ERR(ppgtt)) {
1212 		err = PTR_ERR(ppgtt);
1213 		goto out_free;
1214 	}
1215 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1216 	assert_vm_alive(&ppgtt->vm);
1217 
1218 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1219 
1220 	i915_vm_put(&ppgtt->vm);
1221 
1222 out_free:
1223 	fput(file);
1224 	return err;
1225 }
1226 
igt_ppgtt_fill(void * arg)1227 static int igt_ppgtt_fill(void *arg)
1228 {
1229 	return exercise_ppgtt(arg, fill_hole);
1230 }
1231 
igt_ppgtt_walk(void * arg)1232 static int igt_ppgtt_walk(void *arg)
1233 {
1234 	return exercise_ppgtt(arg, walk_hole);
1235 }
1236 
igt_ppgtt_pot(void * arg)1237 static int igt_ppgtt_pot(void *arg)
1238 {
1239 	return exercise_ppgtt(arg, pot_hole);
1240 }
1241 
igt_ppgtt_drunk(void * arg)1242 static int igt_ppgtt_drunk(void *arg)
1243 {
1244 	return exercise_ppgtt(arg, drunk_hole);
1245 }
1246 
igt_ppgtt_lowlevel(void * arg)1247 static int igt_ppgtt_lowlevel(void *arg)
1248 {
1249 	return exercise_ppgtt(arg, lowlevel_hole);
1250 }
1251 
igt_ppgtt_shrink(void * arg)1252 static int igt_ppgtt_shrink(void *arg)
1253 {
1254 	return exercise_ppgtt(arg, shrink_hole);
1255 }
1256 
igt_ppgtt_shrink_boom(void * arg)1257 static int igt_ppgtt_shrink_boom(void *arg)
1258 {
1259 	return exercise_ppgtt(arg, shrink_boom);
1260 }
1261 
igt_ppgtt_misaligned_pin(void * arg)1262 static int igt_ppgtt_misaligned_pin(void *arg)
1263 {
1264 	return exercise_ppgtt(arg, misaligned_pin);
1265 }
1266 
sort_holes(void * priv,const struct list_head * A,const struct list_head * B)1267 static int sort_holes(void *priv, const struct list_head *A,
1268 		      const struct list_head *B)
1269 {
1270 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1271 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1272 
1273 	if (a->start < b->start)
1274 		return -1;
1275 	else
1276 		return 1;
1277 }
1278 
exercise_ggtt(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1279 static int exercise_ggtt(struct drm_i915_private *i915,
1280 			 int (*func)(struct i915_address_space *vm,
1281 				     u64 hole_start, u64 hole_end,
1282 				     unsigned long end_time))
1283 {
1284 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1285 	u64 hole_start, hole_end, last = 0;
1286 	struct drm_mm_node *node;
1287 	IGT_TIMEOUT(end_time);
1288 	int err = 0;
1289 
1290 restart:
1291 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1292 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1293 		if (hole_start < last)
1294 			continue;
1295 
1296 		if (ggtt->vm.mm.color_adjust)
1297 			ggtt->vm.mm.color_adjust(node, 0,
1298 						 &hole_start, &hole_end);
1299 		if (hole_start >= hole_end)
1300 			continue;
1301 
1302 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1303 		if (err)
1304 			break;
1305 
1306 		/* As we have manipulated the drm_mm, the list may be corrupt */
1307 		last = hole_end;
1308 		goto restart;
1309 	}
1310 
1311 	return err;
1312 }
1313 
igt_ggtt_fill(void * arg)1314 static int igt_ggtt_fill(void *arg)
1315 {
1316 	return exercise_ggtt(arg, fill_hole);
1317 }
1318 
igt_ggtt_walk(void * arg)1319 static int igt_ggtt_walk(void *arg)
1320 {
1321 	return exercise_ggtt(arg, walk_hole);
1322 }
1323 
igt_ggtt_pot(void * arg)1324 static int igt_ggtt_pot(void *arg)
1325 {
1326 	return exercise_ggtt(arg, pot_hole);
1327 }
1328 
igt_ggtt_drunk(void * arg)1329 static int igt_ggtt_drunk(void *arg)
1330 {
1331 	return exercise_ggtt(arg, drunk_hole);
1332 }
1333 
igt_ggtt_lowlevel(void * arg)1334 static int igt_ggtt_lowlevel(void *arg)
1335 {
1336 	return exercise_ggtt(arg, lowlevel_hole);
1337 }
1338 
igt_ggtt_misaligned_pin(void * arg)1339 static int igt_ggtt_misaligned_pin(void *arg)
1340 {
1341 	return exercise_ggtt(arg, misaligned_pin);
1342 }
1343 
igt_ggtt_page(void * arg)1344 static int igt_ggtt_page(void *arg)
1345 {
1346 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1347 	I915_RND_STATE(prng);
1348 	struct drm_i915_private *i915 = arg;
1349 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1350 	struct drm_i915_gem_object *obj;
1351 	intel_wakeref_t wakeref;
1352 	struct drm_mm_node tmp;
1353 	unsigned int *order, n;
1354 	int err;
1355 
1356 	if (!i915_ggtt_has_aperture(ggtt))
1357 		return 0;
1358 
1359 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1360 	if (IS_ERR(obj))
1361 		return PTR_ERR(obj);
1362 
1363 	err = i915_gem_object_pin_pages_unlocked(obj);
1364 	if (err)
1365 		goto out_free;
1366 
1367 	memset(&tmp, 0, sizeof(tmp));
1368 	mutex_lock(&ggtt->vm.mutex);
1369 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1370 					  count * PAGE_SIZE, 0,
1371 					  I915_COLOR_UNEVICTABLE,
1372 					  0, ggtt->mappable_end,
1373 					  DRM_MM_INSERT_LOW);
1374 	mutex_unlock(&ggtt->vm.mutex);
1375 	if (err)
1376 		goto out_unpin;
1377 
1378 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1379 
1380 	for (n = 0; n < count; n++) {
1381 		u64 offset = tmp.start + n * PAGE_SIZE;
1382 
1383 		ggtt->vm.insert_page(&ggtt->vm,
1384 				     i915_gem_object_get_dma_address(obj, 0),
1385 				     offset,
1386 				     i915_gem_get_pat_index(i915,
1387 							    I915_CACHE_NONE),
1388 				     0);
1389 	}
1390 
1391 	order = i915_random_order(count, &prng);
1392 	if (!order) {
1393 		err = -ENOMEM;
1394 		goto out_remove;
1395 	}
1396 
1397 	for (n = 0; n < count; n++) {
1398 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1399 		u32 __iomem *vaddr;
1400 
1401 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1402 		iowrite32(n, vaddr + n);
1403 		io_mapping_unmap_atomic(vaddr);
1404 	}
1405 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1406 
1407 	i915_random_reorder(order, count, &prng);
1408 	for (n = 0; n < count; n++) {
1409 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1410 		u32 __iomem *vaddr;
1411 		u32 val;
1412 
1413 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1414 		val = ioread32(vaddr + n);
1415 		io_mapping_unmap_atomic(vaddr);
1416 
1417 		if (val != n) {
1418 			pr_err("insert page failed: found %d, expected %d\n",
1419 			       val, n);
1420 			err = -EINVAL;
1421 			break;
1422 		}
1423 	}
1424 
1425 	kfree(order);
1426 out_remove:
1427 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1428 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1429 	mutex_lock(&ggtt->vm.mutex);
1430 	drm_mm_remove_node(&tmp);
1431 	mutex_unlock(&ggtt->vm.mutex);
1432 out_unpin:
1433 	i915_gem_object_unpin_pages(obj);
1434 out_free:
1435 	i915_gem_object_put(obj);
1436 	return err;
1437 }
1438 
track_vma_bind(struct i915_vma * vma)1439 static void track_vma_bind(struct i915_vma *vma)
1440 {
1441 	struct drm_i915_gem_object *obj = vma->obj;
1442 
1443 	__i915_gem_object_pin_pages(obj);
1444 
1445 	GEM_BUG_ON(atomic_read(&vma->pages_count));
1446 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1447 	__i915_gem_object_pin_pages(obj);
1448 	vma->pages = obj->mm.pages;
1449 	vma->resource->bi.pages = vma->pages;
1450 
1451 	mutex_lock(&vma->vm->mutex);
1452 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1453 	mutex_unlock(&vma->vm->mutex);
1454 }
1455 
exercise_mock(struct drm_i915_private * i915,int (* func)(struct i915_address_space * vm,u64 hole_start,u64 hole_end,unsigned long end_time))1456 static int exercise_mock(struct drm_i915_private *i915,
1457 			 int (*func)(struct i915_address_space *vm,
1458 				     u64 hole_start, u64 hole_end,
1459 				     unsigned long end_time))
1460 {
1461 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1462 	struct i915_address_space *vm;
1463 	struct i915_gem_context *ctx;
1464 	IGT_TIMEOUT(end_time);
1465 	int err;
1466 
1467 	ctx = mock_context(i915, "mock");
1468 	if (!ctx)
1469 		return -ENOMEM;
1470 
1471 	vm = i915_gem_context_get_eb_vm(ctx);
1472 	err = func(vm, 0, min(vm->total, limit), end_time);
1473 	i915_vm_put(vm);
1474 
1475 	mock_context_close(ctx);
1476 	return err;
1477 }
1478 
igt_mock_fill(void * arg)1479 static int igt_mock_fill(void *arg)
1480 {
1481 	struct i915_ggtt *ggtt = arg;
1482 
1483 	return exercise_mock(ggtt->vm.i915, fill_hole);
1484 }
1485 
igt_mock_walk(void * arg)1486 static int igt_mock_walk(void *arg)
1487 {
1488 	struct i915_ggtt *ggtt = arg;
1489 
1490 	return exercise_mock(ggtt->vm.i915, walk_hole);
1491 }
1492 
igt_mock_pot(void * arg)1493 static int igt_mock_pot(void *arg)
1494 {
1495 	struct i915_ggtt *ggtt = arg;
1496 
1497 	return exercise_mock(ggtt->vm.i915, pot_hole);
1498 }
1499 
igt_mock_drunk(void * arg)1500 static int igt_mock_drunk(void *arg)
1501 {
1502 	struct i915_ggtt *ggtt = arg;
1503 
1504 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1505 }
1506 
reserve_gtt_with_resource(struct i915_vma * vma,u64 offset)1507 static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1508 {
1509 	struct i915_address_space *vm = vma->vm;
1510 	struct i915_vma_resource *vma_res;
1511 	struct drm_i915_gem_object *obj = vma->obj;
1512 	int err;
1513 
1514 	vma_res = i915_vma_resource_alloc();
1515 	if (IS_ERR(vma_res))
1516 		return PTR_ERR(vma_res);
1517 
1518 	mutex_lock(&vm->mutex);
1519 	err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
1520 				   offset,
1521 				   obj->pat_index,
1522 				   0);
1523 	if (!err) {
1524 		i915_vma_resource_init_from_vma(vma_res, vma);
1525 		vma->resource = vma_res;
1526 	} else {
1527 		kfree(vma_res);
1528 	}
1529 	mutex_unlock(&vm->mutex);
1530 
1531 	return err;
1532 }
1533 
igt_gtt_reserve(void * arg)1534 static int igt_gtt_reserve(void *arg)
1535 {
1536 	struct i915_ggtt *ggtt = arg;
1537 	struct drm_i915_gem_object *obj, *on;
1538 	I915_RND_STATE(prng);
1539 	LIST_HEAD(objects);
1540 	u64 total;
1541 	int err = -ENODEV;
1542 
1543 	/*
1544 	 * i915_gem_gtt_reserve() tries to reserve the precise range
1545 	 * for the node, and evicts if it has to. So our test checks that
1546 	 * it can give us the requested space and prevent overlaps.
1547 	 */
1548 
1549 	/* Start by filling the GGTT */
1550 	for (total = 0;
1551 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1552 	     total += 2 * I915_GTT_PAGE_SIZE) {
1553 		struct i915_vma *vma;
1554 
1555 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1556 						      2 * PAGE_SIZE);
1557 		if (IS_ERR(obj)) {
1558 			err = PTR_ERR(obj);
1559 			goto out;
1560 		}
1561 
1562 		err = i915_gem_object_pin_pages_unlocked(obj);
1563 		if (err) {
1564 			i915_gem_object_put(obj);
1565 			goto out;
1566 		}
1567 
1568 		list_add(&obj->st_link, &objects);
1569 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1570 		if (IS_ERR(vma)) {
1571 			err = PTR_ERR(vma);
1572 			goto out;
1573 		}
1574 
1575 		err = reserve_gtt_with_resource(vma, total);
1576 		if (err) {
1577 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1578 			       total, ggtt->vm.total, err);
1579 			goto out;
1580 		}
1581 		track_vma_bind(vma);
1582 
1583 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1584 		if (vma->node.start != total ||
1585 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1586 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1587 			       vma->node.start, vma->node.size,
1588 			       total, 2*I915_GTT_PAGE_SIZE);
1589 			err = -EINVAL;
1590 			goto out;
1591 		}
1592 	}
1593 
1594 	/* Now we start forcing evictions */
1595 	for (total = I915_GTT_PAGE_SIZE;
1596 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1597 	     total += 2 * I915_GTT_PAGE_SIZE) {
1598 		struct i915_vma *vma;
1599 
1600 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1601 						      2 * PAGE_SIZE);
1602 		if (IS_ERR(obj)) {
1603 			err = PTR_ERR(obj);
1604 			goto out;
1605 		}
1606 
1607 		err = i915_gem_object_pin_pages_unlocked(obj);
1608 		if (err) {
1609 			i915_gem_object_put(obj);
1610 			goto out;
1611 		}
1612 
1613 		list_add(&obj->st_link, &objects);
1614 
1615 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1616 		if (IS_ERR(vma)) {
1617 			err = PTR_ERR(vma);
1618 			goto out;
1619 		}
1620 
1621 		err = reserve_gtt_with_resource(vma, total);
1622 		if (err) {
1623 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1624 			       total, ggtt->vm.total, err);
1625 			goto out;
1626 		}
1627 		track_vma_bind(vma);
1628 
1629 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1630 		if (vma->node.start != total ||
1631 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1632 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1633 			       vma->node.start, vma->node.size,
1634 			       total, 2*I915_GTT_PAGE_SIZE);
1635 			err = -EINVAL;
1636 			goto out;
1637 		}
1638 	}
1639 
1640 	/* And then try at random */
1641 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1642 		struct i915_vma *vma;
1643 		u64 offset;
1644 
1645 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1646 		if (IS_ERR(vma)) {
1647 			err = PTR_ERR(vma);
1648 			goto out;
1649 		}
1650 
1651 		err = i915_vma_unbind_unlocked(vma);
1652 		if (err) {
1653 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1654 			goto out;
1655 		}
1656 
1657 		offset = igt_random_offset(&prng,
1658 					   0, ggtt->vm.total,
1659 					   2 * I915_GTT_PAGE_SIZE,
1660 					   I915_GTT_MIN_ALIGNMENT);
1661 
1662 		err = reserve_gtt_with_resource(vma, offset);
1663 		if (err) {
1664 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1665 			       total, ggtt->vm.total, err);
1666 			goto out;
1667 		}
1668 		track_vma_bind(vma);
1669 
1670 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1671 		if (vma->node.start != offset ||
1672 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1673 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1674 			       vma->node.start, vma->node.size,
1675 			       offset, 2*I915_GTT_PAGE_SIZE);
1676 			err = -EINVAL;
1677 			goto out;
1678 		}
1679 	}
1680 
1681 out:
1682 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1683 		i915_gem_object_unpin_pages(obj);
1684 		i915_gem_object_put(obj);
1685 	}
1686 	return err;
1687 }
1688 
insert_gtt_with_resource(struct i915_vma * vma)1689 static int insert_gtt_with_resource(struct i915_vma *vma)
1690 {
1691 	struct i915_address_space *vm = vma->vm;
1692 	struct i915_vma_resource *vma_res;
1693 	struct drm_i915_gem_object *obj = vma->obj;
1694 	int err;
1695 
1696 	vma_res = i915_vma_resource_alloc();
1697 	if (IS_ERR(vma_res))
1698 		return PTR_ERR(vma_res);
1699 
1700 	mutex_lock(&vm->mutex);
1701 	err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
1702 				  obj->pat_index, 0, vm->total, 0);
1703 	if (!err) {
1704 		i915_vma_resource_init_from_vma(vma_res, vma);
1705 		vma->resource = vma_res;
1706 	} else {
1707 		kfree(vma_res);
1708 	}
1709 	mutex_unlock(&vm->mutex);
1710 
1711 	return err;
1712 }
1713 
igt_gtt_insert(void * arg)1714 static int igt_gtt_insert(void *arg)
1715 {
1716 	struct i915_ggtt *ggtt = arg;
1717 	struct drm_i915_gem_object *obj, *on;
1718 	struct drm_mm_node tmp = {};
1719 	const struct invalid_insert {
1720 		u64 size;
1721 		u64 alignment;
1722 		u64 start, end;
1723 	} invalid_insert[] = {
1724 		{
1725 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1726 			0, ggtt->vm.total,
1727 		},
1728 		{
1729 			2*I915_GTT_PAGE_SIZE, 0,
1730 			0, I915_GTT_PAGE_SIZE,
1731 		},
1732 		{
1733 			-(u64)I915_GTT_PAGE_SIZE, 0,
1734 			0, 4*I915_GTT_PAGE_SIZE,
1735 		},
1736 		{
1737 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1738 			0, 4*I915_GTT_PAGE_SIZE,
1739 		},
1740 		{
1741 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1742 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1743 		},
1744 		{}
1745 	}, *ii;
1746 	LIST_HEAD(objects);
1747 	u64 total;
1748 	int err = -ENODEV;
1749 
1750 	/*
1751 	 * i915_gem_gtt_insert() tries to allocate some free space in the GTT
1752 	 * to the node, evicting if required.
1753 	 */
1754 
1755 	/* Check a couple of obviously invalid requests */
1756 	for (ii = invalid_insert; ii->size; ii++) {
1757 		mutex_lock(&ggtt->vm.mutex);
1758 		err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
1759 					  ii->size, ii->alignment,
1760 					  I915_COLOR_UNEVICTABLE,
1761 					  ii->start, ii->end,
1762 					  0);
1763 		mutex_unlock(&ggtt->vm.mutex);
1764 		if (err != -ENOSPC) {
1765 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1766 			       ii->size, ii->alignment, ii->start, ii->end,
1767 			       err);
1768 			return -EINVAL;
1769 		}
1770 	}
1771 
1772 	/* Start by filling the GGTT */
1773 	for (total = 0;
1774 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1775 	     total += I915_GTT_PAGE_SIZE) {
1776 		struct i915_vma *vma;
1777 
1778 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1779 						      I915_GTT_PAGE_SIZE);
1780 		if (IS_ERR(obj)) {
1781 			err = PTR_ERR(obj);
1782 			goto out;
1783 		}
1784 
1785 		err = i915_gem_object_pin_pages_unlocked(obj);
1786 		if (err) {
1787 			i915_gem_object_put(obj);
1788 			goto out;
1789 		}
1790 
1791 		list_add(&obj->st_link, &objects);
1792 
1793 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1794 		if (IS_ERR(vma)) {
1795 			err = PTR_ERR(vma);
1796 			goto out;
1797 		}
1798 
1799 		err = insert_gtt_with_resource(vma);
1800 		if (err == -ENOSPC) {
1801 			/* maxed out the GGTT space */
1802 			i915_gem_object_put(obj);
1803 			break;
1804 		}
1805 		if (err) {
1806 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1807 			       total, ggtt->vm.total, err);
1808 			goto out;
1809 		}
1810 		track_vma_bind(vma);
1811 		__i915_vma_pin(vma);
1812 
1813 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1814 	}
1815 
1816 	list_for_each_entry(obj, &objects, st_link) {
1817 		struct i915_vma *vma;
1818 
1819 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1820 		if (IS_ERR(vma)) {
1821 			err = PTR_ERR(vma);
1822 			goto out;
1823 		}
1824 
1825 		if (!drm_mm_node_allocated(&vma->node)) {
1826 			pr_err("VMA was unexpectedly evicted!\n");
1827 			err = -EINVAL;
1828 			goto out;
1829 		}
1830 
1831 		__i915_vma_unpin(vma);
1832 	}
1833 
1834 	/* If we then reinsert, we should find the same hole */
1835 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1836 		struct i915_vma *vma;
1837 		u64 offset;
1838 
1839 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1840 		if (IS_ERR(vma)) {
1841 			err = PTR_ERR(vma);
1842 			goto out;
1843 		}
1844 
1845 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1846 		offset = vma->node.start;
1847 
1848 		err = i915_vma_unbind_unlocked(vma);
1849 		if (err) {
1850 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1851 			goto out;
1852 		}
1853 
1854 		err = insert_gtt_with_resource(vma);
1855 		if (err) {
1856 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1857 			       total, ggtt->vm.total, err);
1858 			goto out;
1859 		}
1860 		track_vma_bind(vma);
1861 
1862 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1863 		if (vma->node.start != offset) {
1864 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1865 			       offset, vma->node.start);
1866 			err = -EINVAL;
1867 			goto out;
1868 		}
1869 	}
1870 
1871 	/* And then force evictions */
1872 	for (total = 0;
1873 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1874 	     total += 2 * I915_GTT_PAGE_SIZE) {
1875 		struct i915_vma *vma;
1876 
1877 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1878 						      2 * I915_GTT_PAGE_SIZE);
1879 		if (IS_ERR(obj)) {
1880 			err = PTR_ERR(obj);
1881 			goto out;
1882 		}
1883 
1884 		err = i915_gem_object_pin_pages_unlocked(obj);
1885 		if (err) {
1886 			i915_gem_object_put(obj);
1887 			goto out;
1888 		}
1889 
1890 		list_add(&obj->st_link, &objects);
1891 
1892 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1893 		if (IS_ERR(vma)) {
1894 			err = PTR_ERR(vma);
1895 			goto out;
1896 		}
1897 
1898 		err = insert_gtt_with_resource(vma);
1899 		if (err) {
1900 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1901 			       total, ggtt->vm.total, err);
1902 			goto out;
1903 		}
1904 		track_vma_bind(vma);
1905 
1906 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1907 	}
1908 
1909 out:
1910 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1911 		i915_gem_object_unpin_pages(obj);
1912 		i915_gem_object_put(obj);
1913 	}
1914 	return err;
1915 }
1916 
i915_gem_gtt_mock_selftests(void)1917 int i915_gem_gtt_mock_selftests(void)
1918 {
1919 	static const struct i915_subtest tests[] = {
1920 		SUBTEST(igt_mock_drunk),
1921 		SUBTEST(igt_mock_walk),
1922 		SUBTEST(igt_mock_pot),
1923 		SUBTEST(igt_mock_fill),
1924 		SUBTEST(igt_gtt_reserve),
1925 		SUBTEST(igt_gtt_insert),
1926 	};
1927 	struct drm_i915_private *i915;
1928 	struct intel_gt *gt;
1929 	int err;
1930 
1931 	i915 = mock_gem_device();
1932 	if (!i915)
1933 		return -ENOMEM;
1934 
1935 	/* allocate the ggtt */
1936 	err = intel_gt_assign_ggtt(to_gt(i915));
1937 	if (err)
1938 		goto out_put;
1939 
1940 	gt = to_gt(i915);
1941 
1942 	mock_init_ggtt(gt);
1943 
1944 	err = i915_subtests(tests, gt->ggtt);
1945 
1946 	mock_device_flush(i915);
1947 	i915_gem_drain_freed_objects(i915);
1948 	mock_fini_ggtt(gt->ggtt);
1949 
1950 out_put:
1951 	mock_destroy_device(i915);
1952 	return err;
1953 }
1954 
i915_gem_gtt_live_selftests(struct drm_i915_private * i915)1955 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1956 {
1957 	static const struct i915_subtest tests[] = {
1958 		SUBTEST(igt_ppgtt_alloc),
1959 		SUBTEST(igt_ppgtt_lowlevel),
1960 		SUBTEST(igt_ppgtt_drunk),
1961 		SUBTEST(igt_ppgtt_walk),
1962 		SUBTEST(igt_ppgtt_pot),
1963 		SUBTEST(igt_ppgtt_fill),
1964 		SUBTEST(igt_ppgtt_shrink),
1965 		SUBTEST(igt_ppgtt_shrink_boom),
1966 		SUBTEST(igt_ppgtt_misaligned_pin),
1967 		SUBTEST(igt_ggtt_lowlevel),
1968 		SUBTEST(igt_ggtt_drunk),
1969 		SUBTEST(igt_ggtt_walk),
1970 		SUBTEST(igt_ggtt_pot),
1971 		SUBTEST(igt_ggtt_fill),
1972 		SUBTEST(igt_ggtt_page),
1973 		SUBTEST(igt_ggtt_misaligned_pin),
1974 	};
1975 
1976 	GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
1977 
1978 	return i915_live_subtests(tests, i915);
1979 }
1980