xref: /linux/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "gem/i915_gem_context.h"
29 #include "gem/i915_gem_internal.h"
30 #include "gem/i915_gem_lmem.h"
31 #include "gem/i915_gem_region.h"
32 #include "gem/selftests/mock_context.h"
33 #include "gt/intel_context.h"
34 #include "gt/intel_gpu_commands.h"
35 #include "gt/intel_gtt.h"
36 
37 #include "i915_random.h"
38 #include "i915_selftest.h"
39 #include "i915_vma_resource.h"
40 
41 #include "mock_drm.h"
42 #include "mock_gem_device.h"
43 #include "mock_gtt.h"
44 #include "igt_flush_test.h"
45 
46 static void cleanup_freed_objects(struct drm_i915_private *i915)
47 {
48 	i915_gem_drain_freed_objects(i915);
49 }
50 
51 static void fake_free_pages(struct drm_i915_gem_object *obj,
52 			    struct sg_table *pages)
53 {
54 	sg_free_table(pages);
55 	kfree(pages);
56 }
57 
58 static int fake_get_pages(struct drm_i915_gem_object *obj)
59 {
60 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
61 #define PFN_BIAS 0x1000
62 	struct sg_table *pages;
63 	struct scatterlist *sg;
64 	typeof(obj->base.size) rem;
65 
66 	pages = kmalloc(sizeof(*pages), GFP);
67 	if (!pages)
68 		return -ENOMEM;
69 
70 	rem = round_up(obj->base.size, BIT(31)) >> 31;
71 	/* restricted by sg_alloc_table */
72 	if (overflows_type(rem, unsigned int)) {
73 		kfree(pages);
74 		return -E2BIG;
75 	}
76 
77 	if (sg_alloc_table(pages, rem, GFP)) {
78 		kfree(pages);
79 		return -ENOMEM;
80 	}
81 
82 	rem = obj->base.size;
83 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
84 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
85 
86 		GEM_BUG_ON(!len);
87 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
88 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
89 		sg_dma_len(sg) = len;
90 
91 		rem -= len;
92 	}
93 	GEM_BUG_ON(rem);
94 
95 	__i915_gem_object_set_pages(obj, pages);
96 
97 	return 0;
98 #undef GFP
99 }
100 
101 static void fake_put_pages(struct drm_i915_gem_object *obj,
102 			   struct sg_table *pages)
103 {
104 	fake_free_pages(obj, pages);
105 	obj->mm.dirty = false;
106 }
107 
108 static const struct drm_i915_gem_object_ops fake_ops = {
109 	.name = "fake-gem",
110 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
111 	.get_pages = fake_get_pages,
112 	.put_pages = fake_put_pages,
113 };
114 
115 static struct drm_i915_gem_object *
116 fake_dma_object(struct drm_i915_private *i915, u64 size)
117 {
118 	static struct lock_class_key lock_class;
119 	struct drm_i915_gem_object *obj;
120 
121 	GEM_BUG_ON(!size);
122 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
123 
124 	if (overflows_type(size, obj->base.size))
125 		return ERR_PTR(-E2BIG);
126 
127 	obj = i915_gem_object_alloc();
128 	if (!obj)
129 		goto err;
130 
131 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
132 	i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
133 
134 	i915_gem_object_set_volatile(obj);
135 
136 	obj->write_domain = I915_GEM_DOMAIN_CPU;
137 	obj->read_domains = I915_GEM_DOMAIN_CPU;
138 	obj->pat_index = i915_gem_get_pat_index(i915, I915_CACHE_NONE);
139 
140 	/* Preallocate the "backing storage" */
141 	if (i915_gem_object_pin_pages_unlocked(obj))
142 		goto err_obj;
143 
144 	i915_gem_object_unpin_pages(obj);
145 	return obj;
146 
147 err_obj:
148 	i915_gem_object_put(obj);
149 err:
150 	return ERR_PTR(-ENOMEM);
151 }
152 
153 static int igt_ppgtt_alloc(void *arg)
154 {
155 	struct drm_i915_private *dev_priv = arg;
156 	struct i915_ppgtt *ppgtt;
157 	struct i915_gem_ww_ctx ww;
158 	u64 size, last, limit;
159 	int err = 0;
160 
161 	/* Allocate a ppggt and try to fill the entire range */
162 
163 	if (!HAS_PPGTT(dev_priv))
164 		return 0;
165 
166 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
167 	if (IS_ERR(ppgtt))
168 		return PTR_ERR(ppgtt);
169 
170 	if (!ppgtt->vm.allocate_va_range)
171 		goto ppgtt_vm_put;
172 
173 	/*
174 	 * While we only allocate the page tables here and so we could
175 	 * address a much larger GTT than we could actually fit into
176 	 * RAM, a practical limit is the amount of physical pages in the system.
177 	 * This should ensure that we do not run into the oomkiller during
178 	 * the test and take down the machine wilfully.
179 	 */
180 	limit = totalram_pages() << PAGE_SHIFT;
181 	limit = min(ppgtt->vm.total, limit);
182 
183 	i915_gem_ww_ctx_init(&ww, false);
184 retry:
185 	err = i915_vm_lock_objects(&ppgtt->vm, &ww);
186 	if (err)
187 		goto err_ppgtt_cleanup;
188 
189 	/* Check we can allocate the entire range */
190 	for (size = 4096; size <= limit; size <<= 2) {
191 		struct i915_vm_pt_stash stash = {};
192 
193 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
194 		if (err)
195 			goto err_ppgtt_cleanup;
196 
197 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
198 		if (err) {
199 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
200 			goto err_ppgtt_cleanup;
201 		}
202 
203 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
204 		cond_resched();
205 
206 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
207 
208 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
209 	}
210 
211 	/* Check we can incrementally allocate the entire range */
212 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
213 		struct i915_vm_pt_stash stash = {};
214 
215 		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
216 		if (err)
217 			goto err_ppgtt_cleanup;
218 
219 		err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
220 		if (err) {
221 			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
222 			goto err_ppgtt_cleanup;
223 		}
224 
225 		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
226 					    last, size - last);
227 		cond_resched();
228 
229 		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
230 	}
231 
232 err_ppgtt_cleanup:
233 	if (err == -EDEADLK) {
234 		err = i915_gem_ww_ctx_backoff(&ww);
235 		if (!err)
236 			goto retry;
237 	}
238 	i915_gem_ww_ctx_fini(&ww);
239 ppgtt_vm_put:
240 	i915_vm_put(&ppgtt->vm);
241 	return err;
242 }
243 
244 static int lowlevel_hole(struct i915_address_space *vm,
245 			 u64 hole_start, u64 hole_end,
246 			 unsigned long end_time)
247 {
248 	const unsigned int min_alignment =
249 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
250 	I915_RND_STATE(seed_prng);
251 	struct i915_vma_resource *mock_vma_res;
252 	unsigned int size;
253 
254 	mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
255 	if (!mock_vma_res)
256 		return -ENOMEM;
257 
258 	/* Keep creating larger objects until one cannot fit into the hole */
259 	for (size = 12; (hole_end - hole_start) >> size; size++) {
260 		I915_RND_SUBSTATE(prng, seed_prng);
261 		struct drm_i915_gem_object *obj;
262 		unsigned int *order, count, n;
263 		u64 hole_size, aligned_size;
264 
265 		aligned_size = max_t(u32, ilog2(min_alignment), size);
266 		hole_size = (hole_end - hole_start) >> aligned_size;
267 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
268 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
269 		count = hole_size >> 1;
270 		if (!count) {
271 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
272 				 __func__, hole_start, hole_end, size, hole_size);
273 			break;
274 		}
275 
276 		do {
277 			order = i915_random_order(count, &prng);
278 			if (order)
279 				break;
280 		} while (count >>= 1);
281 		if (!count) {
282 			kfree(mock_vma_res);
283 			return -ENOMEM;
284 		}
285 		GEM_BUG_ON(!order);
286 
287 		GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
288 		GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
289 
290 		/*
291 		 * Ignore allocation failures (i.e. don't report them as
292 		 * a test failure) as we are purposefully allocating very
293 		 * large objects without checking that we have sufficient
294 		 * memory. We expect to hit -ENOMEM.
295 		 */
296 
297 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
298 		if (IS_ERR(obj)) {
299 			kfree(order);
300 			break;
301 		}
302 
303 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
304 
305 		if (i915_gem_object_pin_pages_unlocked(obj)) {
306 			i915_gem_object_put(obj);
307 			kfree(order);
308 			break;
309 		}
310 
311 		for (n = 0; n < count; n++) {
312 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
313 			intel_wakeref_t wakeref;
314 
315 			GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
316 
317 			if (igt_timeout(end_time,
318 					"%s timed out before %d/%d\n",
319 					__func__, n, count)) {
320 				hole_end = hole_start; /* quit */
321 				break;
322 			}
323 
324 			if (vm->allocate_va_range) {
325 				struct i915_vm_pt_stash stash = {};
326 				struct i915_gem_ww_ctx ww;
327 				int err;
328 
329 				i915_gem_ww_ctx_init(&ww, false);
330 retry:
331 				err = i915_vm_lock_objects(vm, &ww);
332 				if (err)
333 					goto alloc_vm_end;
334 
335 				err = -ENOMEM;
336 				if (i915_vm_alloc_pt_stash(vm, &stash,
337 							   BIT_ULL(size)))
338 					goto alloc_vm_end;
339 
340 				err = i915_vm_map_pt_stash(vm, &stash);
341 				if (!err)
342 					vm->allocate_va_range(vm, &stash,
343 							      addr, BIT_ULL(size));
344 				i915_vm_free_pt_stash(vm, &stash);
345 alloc_vm_end:
346 				if (err == -EDEADLK) {
347 					err = i915_gem_ww_ctx_backoff(&ww);
348 					if (!err)
349 						goto retry;
350 				}
351 				i915_gem_ww_ctx_fini(&ww);
352 
353 				if (err)
354 					break;
355 			}
356 
357 			mock_vma_res->bi.pages = obj->mm.pages;
358 			mock_vma_res->node_size = BIT_ULL(aligned_size);
359 			mock_vma_res->start = addr;
360 
361 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
362 			  vm->insert_entries(vm, mock_vma_res,
363 					     i915_gem_get_pat_index(vm->i915,
364 								    I915_CACHE_NONE),
365 					     0);
366 		}
367 		count = n;
368 
369 		i915_random_reorder(order, count, &prng);
370 		for (n = 0; n < count; n++) {
371 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
372 			intel_wakeref_t wakeref;
373 
374 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
375 			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
376 				vm->clear_range(vm, addr, BIT_ULL(size));
377 		}
378 
379 		i915_gem_object_unpin_pages(obj);
380 		i915_gem_object_put(obj);
381 
382 		kfree(order);
383 
384 		cleanup_freed_objects(vm->i915);
385 	}
386 
387 	kfree(mock_vma_res);
388 	return 0;
389 }
390 
391 static void close_object_list(struct list_head *objects,
392 			      struct i915_address_space *vm)
393 {
394 	struct drm_i915_gem_object *obj, *on;
395 	int __maybe_unused ignored;
396 
397 	list_for_each_entry_safe(obj, on, objects, st_link) {
398 		struct i915_vma *vma;
399 
400 		vma = i915_vma_instance(obj, vm, NULL);
401 		if (!IS_ERR(vma))
402 			ignored = i915_vma_unbind_unlocked(vma);
403 
404 		list_del(&obj->st_link);
405 		i915_gem_object_put(obj);
406 	}
407 }
408 
409 static int fill_hole(struct i915_address_space *vm,
410 		     u64 hole_start, u64 hole_end,
411 		     unsigned long end_time)
412 {
413 	const u64 hole_size = hole_end - hole_start;
414 	struct drm_i915_gem_object *obj;
415 	const unsigned int min_alignment =
416 		i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
417 	const unsigned long max_pages =
418 		min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
419 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
420 	unsigned long npages, prime, flags;
421 	struct i915_vma *vma;
422 	LIST_HEAD(objects);
423 	int err;
424 
425 	/* Try binding many VMA working inwards from either edge */
426 
427 	flags = PIN_OFFSET_FIXED | PIN_USER;
428 	if (i915_is_ggtt(vm))
429 		flags |= PIN_GLOBAL;
430 
431 	for_each_prime_number_from(prime, 2, max_step) {
432 		for (npages = 1; npages <= max_pages; npages *= prime) {
433 			const u64 full_size = npages << PAGE_SHIFT;
434 			const struct {
435 				const char *name;
436 				u64 offset;
437 				int step;
438 			} phases[] = {
439 				{ "top-down", hole_end, -1, },
440 				{ "bottom-up", hole_start, 1, },
441 				{ }
442 			}, *p;
443 
444 			obj = fake_dma_object(vm->i915, full_size);
445 			if (IS_ERR(obj))
446 				break;
447 
448 			list_add(&obj->st_link, &objects);
449 
450 			/*
451 			 * Align differing sized objects against the edges, and
452 			 * check we don't walk off into the void when binding
453 			 * them into the GTT.
454 			 */
455 			for (p = phases; p->name; p++) {
456 				u64 offset;
457 
458 				offset = p->offset;
459 				list_for_each_entry(obj, &objects, st_link) {
460 					u64 aligned_size = round_up(obj->base.size,
461 								    min_alignment);
462 
463 					vma = i915_vma_instance(obj, vm, NULL);
464 					if (IS_ERR(vma))
465 						continue;
466 
467 					if (p->step < 0) {
468 						if (offset < hole_start + aligned_size)
469 							break;
470 						offset -= aligned_size;
471 					}
472 
473 					err = i915_vma_pin(vma, 0, 0, offset | flags);
474 					if (err) {
475 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
476 						       __func__, p->name, err, npages, prime, offset);
477 						goto err;
478 					}
479 
480 					if (!drm_mm_node_allocated(&vma->node) ||
481 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
482 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
483 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
484 						       offset);
485 						err = -EINVAL;
486 						goto err;
487 					}
488 
489 					i915_vma_unpin(vma);
490 
491 					if (p->step > 0) {
492 						if (offset + aligned_size > hole_end)
493 							break;
494 						offset += aligned_size;
495 					}
496 				}
497 
498 				offset = p->offset;
499 				list_for_each_entry(obj, &objects, st_link) {
500 					u64 aligned_size = round_up(obj->base.size,
501 								    min_alignment);
502 
503 					vma = i915_vma_instance(obj, vm, NULL);
504 					if (IS_ERR(vma))
505 						continue;
506 
507 					if (p->step < 0) {
508 						if (offset < hole_start + aligned_size)
509 							break;
510 						offset -= aligned_size;
511 					}
512 
513 					if (!drm_mm_node_allocated(&vma->node) ||
514 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
515 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
516 						       __func__, p->name, vma->node.start, vma->node.size,
517 						       offset);
518 						err = -EINVAL;
519 						goto err;
520 					}
521 
522 					err = i915_vma_unbind_unlocked(vma);
523 					if (err) {
524 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
525 						       __func__, p->name, vma->node.start, vma->node.size,
526 						       err);
527 						goto err;
528 					}
529 
530 					if (p->step > 0) {
531 						if (offset + aligned_size > hole_end)
532 							break;
533 						offset += aligned_size;
534 					}
535 				}
536 
537 				offset = p->offset;
538 				list_for_each_entry_reverse(obj, &objects, st_link) {
539 					u64 aligned_size = round_up(obj->base.size,
540 								    min_alignment);
541 
542 					vma = i915_vma_instance(obj, vm, NULL);
543 					if (IS_ERR(vma))
544 						continue;
545 
546 					if (p->step < 0) {
547 						if (offset < hole_start + aligned_size)
548 							break;
549 						offset -= aligned_size;
550 					}
551 
552 					err = i915_vma_pin(vma, 0, 0, offset | flags);
553 					if (err) {
554 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
555 						       __func__, p->name, err, npages, prime, offset);
556 						goto err;
557 					}
558 
559 					if (!drm_mm_node_allocated(&vma->node) ||
560 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
561 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
562 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
563 						       offset);
564 						err = -EINVAL;
565 						goto err;
566 					}
567 
568 					i915_vma_unpin(vma);
569 
570 					if (p->step > 0) {
571 						if (offset + aligned_size > hole_end)
572 							break;
573 						offset += aligned_size;
574 					}
575 				}
576 
577 				offset = p->offset;
578 				list_for_each_entry_reverse(obj, &objects, st_link) {
579 					u64 aligned_size = round_up(obj->base.size,
580 								    min_alignment);
581 
582 					vma = i915_vma_instance(obj, vm, NULL);
583 					if (IS_ERR(vma))
584 						continue;
585 
586 					if (p->step < 0) {
587 						if (offset < hole_start + aligned_size)
588 							break;
589 						offset -= aligned_size;
590 					}
591 
592 					if (!drm_mm_node_allocated(&vma->node) ||
593 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
594 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
595 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
596 						       offset);
597 						err = -EINVAL;
598 						goto err;
599 					}
600 
601 					err = i915_vma_unbind_unlocked(vma);
602 					if (err) {
603 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
604 						       __func__, p->name, vma->node.start, vma->node.size,
605 						       err);
606 						goto err;
607 					}
608 
609 					if (p->step > 0) {
610 						if (offset + aligned_size > hole_end)
611 							break;
612 						offset += aligned_size;
613 					}
614 				}
615 			}
616 
617 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
618 					__func__, npages, prime)) {
619 				err = -EINTR;
620 				goto err;
621 			}
622 		}
623 
624 		close_object_list(&objects, vm);
625 		cleanup_freed_objects(vm->i915);
626 	}
627 
628 	return 0;
629 
630 err:
631 	close_object_list(&objects, vm);
632 	return err;
633 }
634 
635 static int walk_hole(struct i915_address_space *vm,
636 		     u64 hole_start, u64 hole_end,
637 		     unsigned long end_time)
638 {
639 	const u64 hole_size = hole_end - hole_start;
640 	const unsigned long max_pages =
641 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
642 	unsigned long min_alignment;
643 	unsigned long flags;
644 	u64 size;
645 
646 	/* Try binding a single VMA in different positions within the hole */
647 
648 	flags = PIN_OFFSET_FIXED | PIN_USER;
649 	if (i915_is_ggtt(vm))
650 		flags |= PIN_GLOBAL;
651 
652 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
653 
654 	for_each_prime_number_from(size, 1, max_pages) {
655 		struct drm_i915_gem_object *obj;
656 		struct i915_vma *vma;
657 		u64 addr;
658 		int err = 0;
659 
660 		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
661 		if (IS_ERR(obj))
662 			break;
663 
664 		vma = i915_vma_instance(obj, vm, NULL);
665 		if (IS_ERR(vma)) {
666 			err = PTR_ERR(vma);
667 			goto err_put;
668 		}
669 
670 		for (addr = hole_start;
671 		     addr + obj->base.size < hole_end;
672 		     addr += round_up(obj->base.size, min_alignment)) {
673 			err = i915_vma_pin(vma, 0, 0, addr | flags);
674 			if (err) {
675 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
676 				       __func__, addr, vma->size,
677 				       hole_start, hole_end, err);
678 				goto err_put;
679 			}
680 			i915_vma_unpin(vma);
681 
682 			if (!drm_mm_node_allocated(&vma->node) ||
683 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
684 				pr_err("%s incorrect at %llx + %llx\n",
685 				       __func__, addr, vma->size);
686 				err = -EINVAL;
687 				goto err_put;
688 			}
689 
690 			err = i915_vma_unbind_unlocked(vma);
691 			if (err) {
692 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
693 				       __func__, addr, vma->size, err);
694 				goto err_put;
695 			}
696 
697 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
698 
699 			if (igt_timeout(end_time,
700 					"%s timed out at %llx\n",
701 					__func__, addr)) {
702 				err = -EINTR;
703 				goto err_put;
704 			}
705 		}
706 
707 err_put:
708 		i915_gem_object_put(obj);
709 		if (err)
710 			return err;
711 
712 		cleanup_freed_objects(vm->i915);
713 	}
714 
715 	return 0;
716 }
717 
718 static int pot_hole(struct i915_address_space *vm,
719 		    u64 hole_start, u64 hole_end,
720 		    unsigned long end_time)
721 {
722 	struct drm_i915_gem_object *obj;
723 	struct i915_vma *vma;
724 	unsigned int min_alignment;
725 	unsigned long flags;
726 	unsigned int pot;
727 	int err = 0;
728 
729 	flags = PIN_OFFSET_FIXED | PIN_USER;
730 	if (i915_is_ggtt(vm))
731 		flags |= PIN_GLOBAL;
732 
733 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
734 
735 	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
736 	if (IS_ERR(obj))
737 		return PTR_ERR(obj);
738 
739 	vma = i915_vma_instance(obj, vm, NULL);
740 	if (IS_ERR(vma)) {
741 		err = PTR_ERR(vma);
742 		goto err_obj;
743 	}
744 
745 	/* Insert a pair of pages across every pot boundary within the hole */
746 	for (pot = fls64(hole_end - 1) - 1;
747 	     pot > ilog2(2 * min_alignment);
748 	     pot--) {
749 		u64 step = BIT_ULL(pot);
750 		u64 addr;
751 
752 		for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
753 		     hole_end > addr && hole_end - addr >= 2 * min_alignment;
754 		     addr += step) {
755 			err = i915_vma_pin(vma, 0, 0, addr | flags);
756 			if (err) {
757 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
758 				       __func__,
759 				       addr,
760 				       hole_start, hole_end,
761 				       err);
762 				goto err_obj;
763 			}
764 
765 			if (!drm_mm_node_allocated(&vma->node) ||
766 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
767 				pr_err("%s incorrect at %llx + %llx\n",
768 				       __func__, addr, vma->size);
769 				i915_vma_unpin(vma);
770 				err = i915_vma_unbind_unlocked(vma);
771 				err = -EINVAL;
772 				goto err_obj;
773 			}
774 
775 			i915_vma_unpin(vma);
776 			err = i915_vma_unbind_unlocked(vma);
777 			GEM_BUG_ON(err);
778 		}
779 
780 		if (igt_timeout(end_time,
781 				"%s timed out after %d/%d\n",
782 				__func__, pot, fls64(hole_end - 1) - 1)) {
783 			err = -EINTR;
784 			goto err_obj;
785 		}
786 	}
787 
788 err_obj:
789 	i915_gem_object_put(obj);
790 	return err;
791 }
792 
793 static int drunk_hole(struct i915_address_space *vm,
794 		      u64 hole_start, u64 hole_end,
795 		      unsigned long end_time)
796 {
797 	I915_RND_STATE(prng);
798 	unsigned int min_alignment;
799 	unsigned int size;
800 	unsigned long flags;
801 
802 	flags = PIN_OFFSET_FIXED | PIN_USER;
803 	if (i915_is_ggtt(vm))
804 		flags |= PIN_GLOBAL;
805 
806 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
807 
808 	/* Keep creating larger objects until one cannot fit into the hole */
809 	for (size = 12; (hole_end - hole_start) >> size; size++) {
810 		struct drm_i915_gem_object *obj;
811 		unsigned int *order, count, n;
812 		struct i915_vma *vma;
813 		u64 hole_size, aligned_size;
814 		int err = -ENODEV;
815 
816 		aligned_size = max_t(u32, ilog2(min_alignment), size);
817 		hole_size = (hole_end - hole_start) >> aligned_size;
818 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
819 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
820 		count = hole_size >> 1;
821 		if (!count) {
822 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
823 				 __func__, hole_start, hole_end, size, hole_size);
824 			break;
825 		}
826 
827 		do {
828 			order = i915_random_order(count, &prng);
829 			if (order)
830 				break;
831 		} while (count >>= 1);
832 		if (!count)
833 			return -ENOMEM;
834 		GEM_BUG_ON(!order);
835 
836 		/*
837 		 * Ignore allocation failures (i.e. don't report them as
838 		 * a test failure) as we are purposefully allocating very
839 		 * large objects without checking that we have sufficient
840 		 * memory. We expect to hit -ENOMEM.
841 		 */
842 
843 		obj = fake_dma_object(vm->i915, BIT_ULL(size));
844 		if (IS_ERR(obj)) {
845 			kfree(order);
846 			break;
847 		}
848 
849 		vma = i915_vma_instance(obj, vm, NULL);
850 		if (IS_ERR(vma)) {
851 			err = PTR_ERR(vma);
852 			goto err_obj;
853 		}
854 
855 		GEM_BUG_ON(vma->size != BIT_ULL(size));
856 
857 		for (n = 0; n < count; n++) {
858 			u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
859 
860 			err = i915_vma_pin(vma, 0, 0, addr | flags);
861 			if (err) {
862 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
863 				       __func__,
864 				       addr, BIT_ULL(size),
865 				       hole_start, hole_end,
866 				       err);
867 				goto err_obj;
868 			}
869 
870 			if (!drm_mm_node_allocated(&vma->node) ||
871 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
872 				pr_err("%s incorrect at %llx + %llx\n",
873 				       __func__, addr, BIT_ULL(size));
874 				i915_vma_unpin(vma);
875 				err = i915_vma_unbind_unlocked(vma);
876 				err = -EINVAL;
877 				goto err_obj;
878 			}
879 
880 			i915_vma_unpin(vma);
881 			err = i915_vma_unbind_unlocked(vma);
882 			GEM_BUG_ON(err);
883 
884 			if (igt_timeout(end_time,
885 					"%s timed out after %d/%d\n",
886 					__func__, n, count)) {
887 				err = -EINTR;
888 				goto err_obj;
889 			}
890 		}
891 
892 err_obj:
893 		i915_gem_object_put(obj);
894 		kfree(order);
895 		if (err)
896 			return err;
897 
898 		cleanup_freed_objects(vm->i915);
899 	}
900 
901 	return 0;
902 }
903 
904 static int __shrink_hole(struct i915_address_space *vm,
905 			 u64 hole_start, u64 hole_end,
906 			 unsigned long end_time)
907 {
908 	struct drm_i915_gem_object *obj;
909 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
910 	unsigned int min_alignment;
911 	unsigned int order = 12;
912 	LIST_HEAD(objects);
913 	int err = 0;
914 	u64 addr;
915 
916 	min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
917 
918 	/* Keep creating larger objects until one cannot fit into the hole */
919 	for (addr = hole_start; addr < hole_end; ) {
920 		struct i915_vma *vma;
921 		u64 size = BIT_ULL(order++);
922 
923 		size = min(size, hole_end - addr);
924 		obj = fake_dma_object(vm->i915, size);
925 		if (IS_ERR(obj)) {
926 			err = PTR_ERR(obj);
927 			break;
928 		}
929 
930 		list_add(&obj->st_link, &objects);
931 
932 		vma = i915_vma_instance(obj, vm, NULL);
933 		if (IS_ERR(vma)) {
934 			err = PTR_ERR(vma);
935 			break;
936 		}
937 
938 		GEM_BUG_ON(vma->size != size);
939 
940 		err = i915_vma_pin(vma, 0, 0, addr | flags);
941 		if (err) {
942 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
943 			       __func__, addr, size, hole_start, hole_end, err);
944 			break;
945 		}
946 
947 		if (!drm_mm_node_allocated(&vma->node) ||
948 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
949 			pr_err("%s incorrect at %llx + %llx\n",
950 			       __func__, addr, size);
951 			i915_vma_unpin(vma);
952 			err = i915_vma_unbind_unlocked(vma);
953 			err = -EINVAL;
954 			break;
955 		}
956 
957 		i915_vma_unpin(vma);
958 		addr += round_up(size, min_alignment);
959 
960 		/*
961 		 * Since we are injecting allocation faults at random intervals,
962 		 * wait for this allocation to complete before we change the
963 		 * faultinjection.
964 		 */
965 		err = i915_vma_sync(vma);
966 		if (err)
967 			break;
968 
969 		if (igt_timeout(end_time,
970 				"%s timed out at offset %llx [%llx - %llx]\n",
971 				__func__, addr, hole_start, hole_end)) {
972 			err = -EINTR;
973 			break;
974 		}
975 	}
976 
977 	close_object_list(&objects, vm);
978 	cleanup_freed_objects(vm->i915);
979 	return err;
980 }
981 
982 static int shrink_hole(struct i915_address_space *vm,
983 		       u64 hole_start, u64 hole_end,
984 		       unsigned long end_time)
985 {
986 	unsigned long prime;
987 	int err;
988 
989 	vm->fault_attr.probability = 999;
990 	atomic_set(&vm->fault_attr.times, -1);
991 
992 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
993 		vm->fault_attr.interval = prime;
994 		err = __shrink_hole(vm, hole_start, hole_end, end_time);
995 		if (err)
996 			break;
997 	}
998 
999 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1000 
1001 	return err;
1002 }
1003 
1004 static int shrink_boom(struct i915_address_space *vm,
1005 		       u64 hole_start, u64 hole_end,
1006 		       unsigned long end_time)
1007 {
1008 	unsigned int sizes[] = { SZ_2M, SZ_1G };
1009 	struct drm_i915_gem_object *purge;
1010 	struct drm_i915_gem_object *explode;
1011 	int err;
1012 	int i;
1013 
1014 	/*
1015 	 * Catch the case which shrink_hole seems to miss. The setup here
1016 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
1017 	 * ensuring that all vma associated with the respective pd/pdp are
1018 	 * unpinned at the time.
1019 	 */
1020 
1021 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1022 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1023 		unsigned int size = sizes[i];
1024 		struct i915_vma *vma;
1025 
1026 		purge = fake_dma_object(vm->i915, size);
1027 		if (IS_ERR(purge))
1028 			return PTR_ERR(purge);
1029 
1030 		vma = i915_vma_instance(purge, vm, NULL);
1031 		if (IS_ERR(vma)) {
1032 			err = PTR_ERR(vma);
1033 			goto err_purge;
1034 		}
1035 
1036 		err = i915_vma_pin(vma, 0, 0, flags);
1037 		if (err)
1038 			goto err_purge;
1039 
1040 		/* Should now be ripe for purging */
1041 		i915_vma_unpin(vma);
1042 
1043 		explode = fake_dma_object(vm->i915, size);
1044 		if (IS_ERR(explode)) {
1045 			err = PTR_ERR(explode);
1046 			goto err_purge;
1047 		}
1048 
1049 		vm->fault_attr.probability = 100;
1050 		vm->fault_attr.interval = 1;
1051 		atomic_set(&vm->fault_attr.times, -1);
1052 
1053 		vma = i915_vma_instance(explode, vm, NULL);
1054 		if (IS_ERR(vma)) {
1055 			err = PTR_ERR(vma);
1056 			goto err_explode;
1057 		}
1058 
1059 		err = i915_vma_pin(vma, 0, 0, flags | size);
1060 		if (err)
1061 			goto err_explode;
1062 
1063 		i915_vma_unpin(vma);
1064 
1065 		i915_gem_object_put(purge);
1066 		i915_gem_object_put(explode);
1067 
1068 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1069 		cleanup_freed_objects(vm->i915);
1070 	}
1071 
1072 	return 0;
1073 
1074 err_explode:
1075 	i915_gem_object_put(explode);
1076 err_purge:
1077 	i915_gem_object_put(purge);
1078 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1079 	return err;
1080 }
1081 
1082 static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
1083 			   u64 addr, u64 size, unsigned long flags)
1084 {
1085 	struct drm_i915_gem_object *obj;
1086 	struct i915_vma *vma;
1087 	int err = 0;
1088 	u64 expected_vma_size, expected_node_size;
1089 	bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
1090 			 mr->type == INTEL_MEMORY_STOLEN_LOCAL;
1091 
1092 	obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
1093 	if (IS_ERR(obj)) {
1094 		/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
1095 		if (PTR_ERR(obj) == -ENODEV && is_stolen)
1096 			return 0;
1097 		return PTR_ERR(obj);
1098 	}
1099 
1100 	vma = i915_vma_instance(obj, vm, NULL);
1101 	if (IS_ERR(vma)) {
1102 		err = PTR_ERR(vma);
1103 		goto err_put;
1104 	}
1105 
1106 	err = i915_vma_pin(vma, 0, 0, addr | flags);
1107 	if (err)
1108 		goto err_put;
1109 	i915_vma_unpin(vma);
1110 
1111 	if (!drm_mm_node_allocated(&vma->node)) {
1112 		err = -EINVAL;
1113 		goto err_put;
1114 	}
1115 
1116 	if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
1117 		err = -EINVAL;
1118 		goto err_put;
1119 	}
1120 
1121 	/* make sure page_sizes_gtt has been populated before use */
1122 	if (i915_is_ggtt(vm) && intel_vm_no_concurrent_access_wa(vm->i915))
1123 		i915_vma_wait_for_bind(vma);
1124 
1125 	expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
1126 	expected_node_size = expected_vma_size;
1127 
1128 	if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
1129 		expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1130 		expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1131 	}
1132 
1133 	if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
1134 		err = i915_vma_unbind_unlocked(vma);
1135 		err = -EBADSLT;
1136 		goto err_put;
1137 	}
1138 
1139 	err = i915_vma_unbind_unlocked(vma);
1140 	if (err)
1141 		goto err_put;
1142 
1143 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1144 
1145 err_put:
1146 	i915_gem_object_put(obj);
1147 	cleanup_freed_objects(vm->i915);
1148 	return err;
1149 }
1150 
1151 static int misaligned_pin(struct i915_address_space *vm,
1152 			  u64 hole_start, u64 hole_end,
1153 			  unsigned long end_time)
1154 {
1155 	struct intel_memory_region *mr;
1156 	enum intel_region_id id;
1157 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1158 	int err = 0;
1159 	u64 hole_size = hole_end - hole_start;
1160 
1161 	if (i915_is_ggtt(vm))
1162 		flags |= PIN_GLOBAL;
1163 
1164 	for_each_memory_region(mr, vm->i915, id) {
1165 		u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
1166 		u64 size = min_alignment;
1167 		u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
1168 
1169 		/* avoid -ENOSPC on very small hole setups */
1170 		if (hole_size < 3 * min_alignment)
1171 			continue;
1172 
1173 		/* we can't test < 4k alignment due to flags being encoded in lower bits */
1174 		if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
1175 			err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
1176 			/* misaligned should error with -EINVAL*/
1177 			if (!err)
1178 				err = -EBADSLT;
1179 			if (err != -EINVAL)
1180 				return err;
1181 		}
1182 
1183 		/* test for vma->size expansion to min page size */
1184 		err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
1185 		if (err)
1186 			return err;
1187 
1188 		/* test for intermediate size not expanding vma->size for large alignments */
1189 		err = misaligned_case(vm, mr, addr, size / 2, flags);
1190 		if (err)
1191 			return err;
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1198 			  int (*func)(struct i915_address_space *vm,
1199 				      u64 hole_start, u64 hole_end,
1200 				      unsigned long end_time))
1201 {
1202 	struct i915_ppgtt *ppgtt;
1203 	IGT_TIMEOUT(end_time);
1204 	struct file *file;
1205 	int err;
1206 
1207 	if (!HAS_FULL_PPGTT(dev_priv))
1208 		return 0;
1209 
1210 	file = mock_file(dev_priv);
1211 	if (IS_ERR(file))
1212 		return PTR_ERR(file);
1213 
1214 	ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1215 	if (IS_ERR(ppgtt)) {
1216 		err = PTR_ERR(ppgtt);
1217 		goto out_free;
1218 	}
1219 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1220 	assert_vm_alive(&ppgtt->vm);
1221 
1222 	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1223 
1224 	i915_vm_put(&ppgtt->vm);
1225 
1226 out_free:
1227 	fput(file);
1228 	return err;
1229 }
1230 
1231 static int igt_ppgtt_fill(void *arg)
1232 {
1233 	return exercise_ppgtt(arg, fill_hole);
1234 }
1235 
1236 static int igt_ppgtt_walk(void *arg)
1237 {
1238 	return exercise_ppgtt(arg, walk_hole);
1239 }
1240 
1241 static int igt_ppgtt_pot(void *arg)
1242 {
1243 	return exercise_ppgtt(arg, pot_hole);
1244 }
1245 
1246 static int igt_ppgtt_drunk(void *arg)
1247 {
1248 	return exercise_ppgtt(arg, drunk_hole);
1249 }
1250 
1251 static int igt_ppgtt_lowlevel(void *arg)
1252 {
1253 	return exercise_ppgtt(arg, lowlevel_hole);
1254 }
1255 
1256 static int igt_ppgtt_shrink(void *arg)
1257 {
1258 	return exercise_ppgtt(arg, shrink_hole);
1259 }
1260 
1261 static int igt_ppgtt_shrink_boom(void *arg)
1262 {
1263 	return exercise_ppgtt(arg, shrink_boom);
1264 }
1265 
1266 static int igt_ppgtt_misaligned_pin(void *arg)
1267 {
1268 	return exercise_ppgtt(arg, misaligned_pin);
1269 }
1270 
1271 static int sort_holes(void *priv, const struct list_head *A,
1272 		      const struct list_head *B)
1273 {
1274 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1275 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1276 
1277 	if (a->start < b->start)
1278 		return -1;
1279 	else
1280 		return 1;
1281 }
1282 
1283 static int exercise_ggtt(struct drm_i915_private *i915,
1284 			 int (*func)(struct i915_address_space *vm,
1285 				     u64 hole_start, u64 hole_end,
1286 				     unsigned long end_time))
1287 {
1288 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1289 	u64 hole_start, hole_end, last = 0;
1290 	struct drm_mm_node *node;
1291 	IGT_TIMEOUT(end_time);
1292 	int err = 0;
1293 
1294 restart:
1295 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1296 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1297 		if (hole_start < last)
1298 			continue;
1299 
1300 		if (ggtt->vm.mm.color_adjust)
1301 			ggtt->vm.mm.color_adjust(node, 0,
1302 						 &hole_start, &hole_end);
1303 		if (hole_start >= hole_end)
1304 			continue;
1305 
1306 		err = func(&ggtt->vm, hole_start, hole_end, end_time);
1307 		if (err)
1308 			break;
1309 
1310 		/* As we have manipulated the drm_mm, the list may be corrupt */
1311 		last = hole_end;
1312 		goto restart;
1313 	}
1314 
1315 	return err;
1316 }
1317 
1318 static int igt_ggtt_fill(void *arg)
1319 {
1320 	return exercise_ggtt(arg, fill_hole);
1321 }
1322 
1323 static int igt_ggtt_walk(void *arg)
1324 {
1325 	return exercise_ggtt(arg, walk_hole);
1326 }
1327 
1328 static int igt_ggtt_pot(void *arg)
1329 {
1330 	return exercise_ggtt(arg, pot_hole);
1331 }
1332 
1333 static int igt_ggtt_drunk(void *arg)
1334 {
1335 	return exercise_ggtt(arg, drunk_hole);
1336 }
1337 
1338 static int igt_ggtt_lowlevel(void *arg)
1339 {
1340 	return exercise_ggtt(arg, lowlevel_hole);
1341 }
1342 
1343 static int igt_ggtt_misaligned_pin(void *arg)
1344 {
1345 	return exercise_ggtt(arg, misaligned_pin);
1346 }
1347 
1348 static int igt_ggtt_page(void *arg)
1349 {
1350 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1351 	I915_RND_STATE(prng);
1352 	struct drm_i915_private *i915 = arg;
1353 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1354 	struct drm_i915_gem_object *obj;
1355 	intel_wakeref_t wakeref;
1356 	struct drm_mm_node tmp;
1357 	unsigned int *order, n;
1358 	int err;
1359 
1360 	if (!i915_ggtt_has_aperture(ggtt))
1361 		return 0;
1362 
1363 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1364 	if (IS_ERR(obj))
1365 		return PTR_ERR(obj);
1366 
1367 	err = i915_gem_object_pin_pages_unlocked(obj);
1368 	if (err)
1369 		goto out_free;
1370 
1371 	memset(&tmp, 0, sizeof(tmp));
1372 	mutex_lock(&ggtt->vm.mutex);
1373 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1374 					  count * PAGE_SIZE, 0,
1375 					  I915_COLOR_UNEVICTABLE,
1376 					  0, ggtt->mappable_end,
1377 					  DRM_MM_INSERT_LOW);
1378 	mutex_unlock(&ggtt->vm.mutex);
1379 	if (err)
1380 		goto out_unpin;
1381 
1382 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1383 
1384 	for (n = 0; n < count; n++) {
1385 		u64 offset = tmp.start + n * PAGE_SIZE;
1386 
1387 		ggtt->vm.insert_page(&ggtt->vm,
1388 				     i915_gem_object_get_dma_address(obj, 0),
1389 				     offset,
1390 				     i915_gem_get_pat_index(i915,
1391 							    I915_CACHE_NONE),
1392 				     0);
1393 	}
1394 
1395 	order = i915_random_order(count, &prng);
1396 	if (!order) {
1397 		err = -ENOMEM;
1398 		goto out_remove;
1399 	}
1400 
1401 	for (n = 0; n < count; n++) {
1402 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1403 		u32 __iomem *vaddr;
1404 
1405 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1406 		iowrite32(n, vaddr + n);
1407 		io_mapping_unmap_atomic(vaddr);
1408 	}
1409 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1410 
1411 	i915_random_reorder(order, count, &prng);
1412 	for (n = 0; n < count; n++) {
1413 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1414 		u32 __iomem *vaddr;
1415 		u32 val;
1416 
1417 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1418 		val = ioread32(vaddr + n);
1419 		io_mapping_unmap_atomic(vaddr);
1420 
1421 		if (val != n) {
1422 			pr_err("insert page failed: found %d, expected %d\n",
1423 			       val, n);
1424 			err = -EINVAL;
1425 			break;
1426 		}
1427 	}
1428 
1429 	kfree(order);
1430 out_remove:
1431 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1432 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1433 	mutex_lock(&ggtt->vm.mutex);
1434 	drm_mm_remove_node(&tmp);
1435 	mutex_unlock(&ggtt->vm.mutex);
1436 out_unpin:
1437 	i915_gem_object_unpin_pages(obj);
1438 out_free:
1439 	i915_gem_object_put(obj);
1440 	return err;
1441 }
1442 
1443 static void track_vma_bind(struct i915_vma *vma)
1444 {
1445 	struct drm_i915_gem_object *obj = vma->obj;
1446 
1447 	__i915_gem_object_pin_pages(obj);
1448 
1449 	GEM_BUG_ON(atomic_read(&vma->pages_count));
1450 	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1451 	__i915_gem_object_pin_pages(obj);
1452 	vma->pages = obj->mm.pages;
1453 	vma->resource->bi.pages = vma->pages;
1454 
1455 	mutex_lock(&vma->vm->mutex);
1456 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1457 	mutex_unlock(&vma->vm->mutex);
1458 }
1459 
1460 static int exercise_mock(struct drm_i915_private *i915,
1461 			 int (*func)(struct i915_address_space *vm,
1462 				     u64 hole_start, u64 hole_end,
1463 				     unsigned long end_time))
1464 {
1465 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1466 	struct i915_address_space *vm;
1467 	struct i915_gem_context *ctx;
1468 	IGT_TIMEOUT(end_time);
1469 	int err;
1470 
1471 	ctx = mock_context(i915, "mock");
1472 	if (!ctx)
1473 		return -ENOMEM;
1474 
1475 	vm = i915_gem_context_get_eb_vm(ctx);
1476 	err = func(vm, 0, min(vm->total, limit), end_time);
1477 	i915_vm_put(vm);
1478 
1479 	mock_context_close(ctx);
1480 	return err;
1481 }
1482 
1483 static int igt_mock_fill(void *arg)
1484 {
1485 	struct i915_ggtt *ggtt = arg;
1486 
1487 	return exercise_mock(ggtt->vm.i915, fill_hole);
1488 }
1489 
1490 static int igt_mock_walk(void *arg)
1491 {
1492 	struct i915_ggtt *ggtt = arg;
1493 
1494 	return exercise_mock(ggtt->vm.i915, walk_hole);
1495 }
1496 
1497 static int igt_mock_pot(void *arg)
1498 {
1499 	struct i915_ggtt *ggtt = arg;
1500 
1501 	return exercise_mock(ggtt->vm.i915, pot_hole);
1502 }
1503 
1504 static int igt_mock_drunk(void *arg)
1505 {
1506 	struct i915_ggtt *ggtt = arg;
1507 
1508 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1509 }
1510 
1511 static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1512 {
1513 	struct i915_address_space *vm = vma->vm;
1514 	struct i915_vma_resource *vma_res;
1515 	struct drm_i915_gem_object *obj = vma->obj;
1516 	int err;
1517 
1518 	vma_res = i915_vma_resource_alloc();
1519 	if (IS_ERR(vma_res))
1520 		return PTR_ERR(vma_res);
1521 
1522 	mutex_lock(&vm->mutex);
1523 	err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
1524 				   offset,
1525 				   obj->pat_index,
1526 				   0);
1527 	if (!err) {
1528 		i915_vma_resource_init_from_vma(vma_res, vma);
1529 		vma->resource = vma_res;
1530 	} else {
1531 		kfree(vma_res);
1532 	}
1533 	mutex_unlock(&vm->mutex);
1534 
1535 	return err;
1536 }
1537 
1538 static int igt_gtt_reserve(void *arg)
1539 {
1540 	struct i915_ggtt *ggtt = arg;
1541 	struct drm_i915_gem_object *obj, *on;
1542 	I915_RND_STATE(prng);
1543 	LIST_HEAD(objects);
1544 	u64 total;
1545 	int err = -ENODEV;
1546 
1547 	/*
1548 	 * i915_gem_gtt_reserve() tries to reserve the precise range
1549 	 * for the node, and evicts if it has to. So our test checks that
1550 	 * it can give us the requested space and prevent overlaps.
1551 	 */
1552 
1553 	/* Start by filling the GGTT */
1554 	for (total = 0;
1555 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1556 	     total += 2 * I915_GTT_PAGE_SIZE) {
1557 		struct i915_vma *vma;
1558 
1559 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1560 						      2 * PAGE_SIZE);
1561 		if (IS_ERR(obj)) {
1562 			err = PTR_ERR(obj);
1563 			goto out;
1564 		}
1565 
1566 		err = i915_gem_object_pin_pages_unlocked(obj);
1567 		if (err) {
1568 			i915_gem_object_put(obj);
1569 			goto out;
1570 		}
1571 
1572 		list_add(&obj->st_link, &objects);
1573 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1574 		if (IS_ERR(vma)) {
1575 			err = PTR_ERR(vma);
1576 			goto out;
1577 		}
1578 
1579 		err = reserve_gtt_with_resource(vma, total);
1580 		if (err) {
1581 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1582 			       total, ggtt->vm.total, err);
1583 			goto out;
1584 		}
1585 		track_vma_bind(vma);
1586 
1587 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1588 		if (vma->node.start != total ||
1589 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1590 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1591 			       vma->node.start, vma->node.size,
1592 			       total, 2*I915_GTT_PAGE_SIZE);
1593 			err = -EINVAL;
1594 			goto out;
1595 		}
1596 	}
1597 
1598 	/* Now we start forcing evictions */
1599 	for (total = I915_GTT_PAGE_SIZE;
1600 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1601 	     total += 2 * I915_GTT_PAGE_SIZE) {
1602 		struct i915_vma *vma;
1603 
1604 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1605 						      2 * PAGE_SIZE);
1606 		if (IS_ERR(obj)) {
1607 			err = PTR_ERR(obj);
1608 			goto out;
1609 		}
1610 
1611 		err = i915_gem_object_pin_pages_unlocked(obj);
1612 		if (err) {
1613 			i915_gem_object_put(obj);
1614 			goto out;
1615 		}
1616 
1617 		list_add(&obj->st_link, &objects);
1618 
1619 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1620 		if (IS_ERR(vma)) {
1621 			err = PTR_ERR(vma);
1622 			goto out;
1623 		}
1624 
1625 		err = reserve_gtt_with_resource(vma, total);
1626 		if (err) {
1627 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1628 			       total, ggtt->vm.total, err);
1629 			goto out;
1630 		}
1631 		track_vma_bind(vma);
1632 
1633 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1634 		if (vma->node.start != total ||
1635 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1636 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1637 			       vma->node.start, vma->node.size,
1638 			       total, 2*I915_GTT_PAGE_SIZE);
1639 			err = -EINVAL;
1640 			goto out;
1641 		}
1642 	}
1643 
1644 	/* And then try at random */
1645 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1646 		struct i915_vma *vma;
1647 		u64 offset;
1648 
1649 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1650 		if (IS_ERR(vma)) {
1651 			err = PTR_ERR(vma);
1652 			goto out;
1653 		}
1654 
1655 		err = i915_vma_unbind_unlocked(vma);
1656 		if (err) {
1657 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1658 			goto out;
1659 		}
1660 
1661 		offset = igt_random_offset(&prng,
1662 					   0, ggtt->vm.total,
1663 					   2 * I915_GTT_PAGE_SIZE,
1664 					   I915_GTT_MIN_ALIGNMENT);
1665 
1666 		err = reserve_gtt_with_resource(vma, offset);
1667 		if (err) {
1668 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1669 			       total, ggtt->vm.total, err);
1670 			goto out;
1671 		}
1672 		track_vma_bind(vma);
1673 
1674 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1675 		if (vma->node.start != offset ||
1676 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1677 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1678 			       vma->node.start, vma->node.size,
1679 			       offset, 2*I915_GTT_PAGE_SIZE);
1680 			err = -EINVAL;
1681 			goto out;
1682 		}
1683 	}
1684 
1685 out:
1686 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1687 		i915_gem_object_unpin_pages(obj);
1688 		i915_gem_object_put(obj);
1689 	}
1690 	return err;
1691 }
1692 
1693 static int insert_gtt_with_resource(struct i915_vma *vma)
1694 {
1695 	struct i915_address_space *vm = vma->vm;
1696 	struct i915_vma_resource *vma_res;
1697 	struct drm_i915_gem_object *obj = vma->obj;
1698 	int err;
1699 
1700 	vma_res = i915_vma_resource_alloc();
1701 	if (IS_ERR(vma_res))
1702 		return PTR_ERR(vma_res);
1703 
1704 	mutex_lock(&vm->mutex);
1705 	err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
1706 				  obj->pat_index, 0, vm->total, 0);
1707 	if (!err) {
1708 		i915_vma_resource_init_from_vma(vma_res, vma);
1709 		vma->resource = vma_res;
1710 	} else {
1711 		kfree(vma_res);
1712 	}
1713 	mutex_unlock(&vm->mutex);
1714 
1715 	return err;
1716 }
1717 
1718 static int igt_gtt_insert(void *arg)
1719 {
1720 	struct i915_ggtt *ggtt = arg;
1721 	struct drm_i915_gem_object *obj, *on;
1722 	struct drm_mm_node tmp = {};
1723 	const struct invalid_insert {
1724 		u64 size;
1725 		u64 alignment;
1726 		u64 start, end;
1727 	} invalid_insert[] = {
1728 		{
1729 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1730 			0, ggtt->vm.total,
1731 		},
1732 		{
1733 			2*I915_GTT_PAGE_SIZE, 0,
1734 			0, I915_GTT_PAGE_SIZE,
1735 		},
1736 		{
1737 			-(u64)I915_GTT_PAGE_SIZE, 0,
1738 			0, 4*I915_GTT_PAGE_SIZE,
1739 		},
1740 		{
1741 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1742 			0, 4*I915_GTT_PAGE_SIZE,
1743 		},
1744 		{
1745 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1746 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1747 		},
1748 		{}
1749 	}, *ii;
1750 	LIST_HEAD(objects);
1751 	u64 total;
1752 	int err = -ENODEV;
1753 
1754 	/*
1755 	 * i915_gem_gtt_insert() tries to allocate some free space in the GTT
1756 	 * to the node, evicting if required.
1757 	 */
1758 
1759 	/* Check a couple of obviously invalid requests */
1760 	for (ii = invalid_insert; ii->size; ii++) {
1761 		mutex_lock(&ggtt->vm.mutex);
1762 		err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
1763 					  ii->size, ii->alignment,
1764 					  I915_COLOR_UNEVICTABLE,
1765 					  ii->start, ii->end,
1766 					  0);
1767 		mutex_unlock(&ggtt->vm.mutex);
1768 		if (err != -ENOSPC) {
1769 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1770 			       ii->size, ii->alignment, ii->start, ii->end,
1771 			       err);
1772 			return -EINVAL;
1773 		}
1774 	}
1775 
1776 	/* Start by filling the GGTT */
1777 	for (total = 0;
1778 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1779 	     total += I915_GTT_PAGE_SIZE) {
1780 		struct i915_vma *vma;
1781 
1782 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1783 						      I915_GTT_PAGE_SIZE);
1784 		if (IS_ERR(obj)) {
1785 			err = PTR_ERR(obj);
1786 			goto out;
1787 		}
1788 
1789 		err = i915_gem_object_pin_pages_unlocked(obj);
1790 		if (err) {
1791 			i915_gem_object_put(obj);
1792 			goto out;
1793 		}
1794 
1795 		list_add(&obj->st_link, &objects);
1796 
1797 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1798 		if (IS_ERR(vma)) {
1799 			err = PTR_ERR(vma);
1800 			goto out;
1801 		}
1802 
1803 		err = insert_gtt_with_resource(vma);
1804 		if (err == -ENOSPC) {
1805 			/* maxed out the GGTT space */
1806 			i915_gem_object_put(obj);
1807 			break;
1808 		}
1809 		if (err) {
1810 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1811 			       total, ggtt->vm.total, err);
1812 			goto out;
1813 		}
1814 		track_vma_bind(vma);
1815 		__i915_vma_pin(vma);
1816 
1817 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1818 	}
1819 
1820 	list_for_each_entry(obj, &objects, st_link) {
1821 		struct i915_vma *vma;
1822 
1823 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1824 		if (IS_ERR(vma)) {
1825 			err = PTR_ERR(vma);
1826 			goto out;
1827 		}
1828 
1829 		if (!drm_mm_node_allocated(&vma->node)) {
1830 			pr_err("VMA was unexpectedly evicted!\n");
1831 			err = -EINVAL;
1832 			goto out;
1833 		}
1834 
1835 		__i915_vma_unpin(vma);
1836 	}
1837 
1838 	/* If we then reinsert, we should find the same hole */
1839 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1840 		struct i915_vma *vma;
1841 		u64 offset;
1842 
1843 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1844 		if (IS_ERR(vma)) {
1845 			err = PTR_ERR(vma);
1846 			goto out;
1847 		}
1848 
1849 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1850 		offset = vma->node.start;
1851 
1852 		err = i915_vma_unbind_unlocked(vma);
1853 		if (err) {
1854 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1855 			goto out;
1856 		}
1857 
1858 		err = insert_gtt_with_resource(vma);
1859 		if (err) {
1860 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1861 			       total, ggtt->vm.total, err);
1862 			goto out;
1863 		}
1864 		track_vma_bind(vma);
1865 
1866 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1867 		if (vma->node.start != offset) {
1868 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1869 			       offset, vma->node.start);
1870 			err = -EINVAL;
1871 			goto out;
1872 		}
1873 	}
1874 
1875 	/* And then force evictions */
1876 	for (total = 0;
1877 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1878 	     total += 2 * I915_GTT_PAGE_SIZE) {
1879 		struct i915_vma *vma;
1880 
1881 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1882 						      2 * I915_GTT_PAGE_SIZE);
1883 		if (IS_ERR(obj)) {
1884 			err = PTR_ERR(obj);
1885 			goto out;
1886 		}
1887 
1888 		err = i915_gem_object_pin_pages_unlocked(obj);
1889 		if (err) {
1890 			i915_gem_object_put(obj);
1891 			goto out;
1892 		}
1893 
1894 		list_add(&obj->st_link, &objects);
1895 
1896 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1897 		if (IS_ERR(vma)) {
1898 			err = PTR_ERR(vma);
1899 			goto out;
1900 		}
1901 
1902 		err = insert_gtt_with_resource(vma);
1903 		if (err) {
1904 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1905 			       total, ggtt->vm.total, err);
1906 			goto out;
1907 		}
1908 		track_vma_bind(vma);
1909 
1910 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1911 	}
1912 
1913 out:
1914 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1915 		i915_gem_object_unpin_pages(obj);
1916 		i915_gem_object_put(obj);
1917 	}
1918 	return err;
1919 }
1920 
1921 int i915_gem_gtt_mock_selftests(void)
1922 {
1923 	static const struct i915_subtest tests[] = {
1924 		SUBTEST(igt_mock_drunk),
1925 		SUBTEST(igt_mock_walk),
1926 		SUBTEST(igt_mock_pot),
1927 		SUBTEST(igt_mock_fill),
1928 		SUBTEST(igt_gtt_reserve),
1929 		SUBTEST(igt_gtt_insert),
1930 	};
1931 	struct drm_i915_private *i915;
1932 	struct intel_gt *gt;
1933 	int err;
1934 
1935 	i915 = mock_gem_device();
1936 	if (!i915)
1937 		return -ENOMEM;
1938 
1939 	/* allocate the ggtt */
1940 	err = intel_gt_assign_ggtt(to_gt(i915));
1941 	if (err)
1942 		goto out_put;
1943 
1944 	gt = to_gt(i915);
1945 
1946 	mock_init_ggtt(gt);
1947 
1948 	err = i915_subtests(tests, gt->ggtt);
1949 
1950 	mock_device_flush(i915);
1951 	i915_gem_drain_freed_objects(i915);
1952 	mock_fini_ggtt(gt->ggtt);
1953 
1954 out_put:
1955 	mock_destroy_device(i915);
1956 	return err;
1957 }
1958 
1959 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1960 {
1961 	static const struct i915_subtest tests[] = {
1962 		SUBTEST(igt_ppgtt_alloc),
1963 		SUBTEST(igt_ppgtt_lowlevel),
1964 		SUBTEST(igt_ppgtt_drunk),
1965 		SUBTEST(igt_ppgtt_walk),
1966 		SUBTEST(igt_ppgtt_pot),
1967 		SUBTEST(igt_ppgtt_fill),
1968 		SUBTEST(igt_ppgtt_shrink),
1969 		SUBTEST(igt_ppgtt_shrink_boom),
1970 		SUBTEST(igt_ppgtt_misaligned_pin),
1971 		SUBTEST(igt_ggtt_lowlevel),
1972 		SUBTEST(igt_ggtt_drunk),
1973 		SUBTEST(igt_ggtt_walk),
1974 		SUBTEST(igt_ggtt_pot),
1975 		SUBTEST(igt_ggtt_fill),
1976 		SUBTEST(igt_ggtt_page),
1977 		SUBTEST(igt_ggtt_misaligned_pin),
1978 	};
1979 
1980 	GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
1981 
1982 	return i915_live_subtests(tests, i915);
1983 }
1984