xref: /linux/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c (revision 38cb89a6c924c35d7d17ed13ccd3952c82b4e0d1)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/prime_numbers.h>
9 
10 #include <drm/drm_print.h>
11 
12 #include "gem/i915_gem_internal.h"
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h"
16 #include "gem/i915_gem_ttm_move.h"
17 #include "gt/intel_engine_pm.h"
18 #include "gt/intel_gpu_commands.h"
19 #include "gt/intel_gt.h"
20 #include "gt/intel_gt_pm.h"
21 #include "gt/intel_migrate.h"
22 #include "i915_reg.h"
23 #include "i915_ttm_buddy_manager.h"
24 
25 #include "huge_gem_object.h"
26 #include "i915_selftest.h"
27 #include "selftests/i915_random.h"
28 #include "selftests/igt_flush_test.h"
29 #include "selftests/igt_reset.h"
30 #include "selftests/igt_mmap.h"
31 
32 struct tile {
33 	unsigned int width;
34 	unsigned int height;
35 	unsigned int stride;
36 	unsigned int size;
37 	unsigned int tiling;
38 	unsigned int swizzle;
39 };
40 
41 static u64 swizzle_bit(unsigned int bit, u64 offset)
42 {
43 	return (offset & BIT_ULL(bit)) >> (bit - 6);
44 }
45 
46 static u64 tiled_offset(const struct tile *tile, u64 v)
47 {
48 	u64 x, y;
49 
50 	if (tile->tiling == I915_TILING_NONE)
51 		return v;
52 
53 	y = div64_u64_rem(v, tile->stride, &x);
54 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
55 
56 	if (tile->tiling == I915_TILING_X) {
57 		v += y * tile->width;
58 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
59 		v += x;
60 	} else if (tile->width == 128) {
61 		const unsigned int ytile_span = 16;
62 		const unsigned int ytile_height = 512;
63 
64 		v += y * ytile_span;
65 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
66 		v += x;
67 	} else {
68 		const unsigned int ytile_span = 32;
69 		const unsigned int ytile_height = 256;
70 
71 		v += y * ytile_span;
72 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
73 		v += x;
74 	}
75 
76 	switch (tile->swizzle) {
77 	case I915_BIT_6_SWIZZLE_9:
78 		v ^= swizzle_bit(9, v);
79 		break;
80 	case I915_BIT_6_SWIZZLE_9_10:
81 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
82 		break;
83 	case I915_BIT_6_SWIZZLE_9_11:
84 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
85 		break;
86 	case I915_BIT_6_SWIZZLE_9_10_11:
87 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
88 		break;
89 	}
90 
91 	return v;
92 }
93 
94 static int check_partial_mapping(struct drm_i915_gem_object *obj,
95 				 const struct tile *tile,
96 				 struct rnd_state *prng)
97 {
98 	const unsigned long npages = obj->base.size / PAGE_SIZE;
99 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
100 	struct i915_gtt_view view;
101 	struct i915_vma *vma;
102 	unsigned long offset;
103 	unsigned long page;
104 	u32 __iomem *io;
105 	struct page *p;
106 	unsigned int n;
107 	u32 *cpu;
108 	int err;
109 
110 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
111 	if (err) {
112 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
113 		       tile->tiling, tile->stride, err);
114 		return err;
115 	}
116 
117 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
118 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
119 
120 	i915_gem_object_lock(obj, NULL);
121 	err = i915_gem_object_set_to_gtt_domain(obj, true);
122 	i915_gem_object_unlock(obj);
123 	if (err) {
124 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
125 		return err;
126 	}
127 
128 	page = i915_prandom_u32_max_state(npages, prng);
129 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
130 
131 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
132 	if (IS_ERR(vma)) {
133 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
134 		       page, (int)PTR_ERR(vma));
135 		return PTR_ERR(vma);
136 	}
137 
138 	n = page - view.partial.offset;
139 	GEM_BUG_ON(n >= view.partial.size);
140 
141 	io = i915_vma_pin_iomap(vma);
142 	i915_vma_unpin(vma);
143 	if (IS_ERR(io)) {
144 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
145 		       page, (int)PTR_ERR(io));
146 		err = PTR_ERR(io);
147 		goto out;
148 	}
149 
150 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
151 	i915_vma_unpin_iomap(vma);
152 
153 	offset = tiled_offset(tile, page << PAGE_SHIFT);
154 	if (offset >= obj->base.size)
155 		goto out;
156 
157 	intel_gt_flush_ggtt_writes(to_gt(i915));
158 
159 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
160 	cpu = kmap(p) + offset_in_page(offset);
161 	drm_clflush_virt_range(cpu, sizeof(*cpu));
162 	if (*cpu != (u32)page) {
163 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
164 		       page, n,
165 		       view.partial.offset,
166 		       view.partial.size,
167 		       vma->size >> PAGE_SHIFT,
168 		       tile->tiling ? tile_row_pages(obj) : 0,
169 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
170 		       offset >> PAGE_SHIFT,
171 		       (unsigned int)offset_in_page(offset),
172 		       offset,
173 		       (u32)page, *cpu);
174 		err = -EINVAL;
175 	}
176 	*cpu = 0;
177 	drm_clflush_virt_range(cpu, sizeof(*cpu));
178 	kunmap(p);
179 
180 out:
181 	i915_gem_object_lock(obj, NULL);
182 	i915_vma_destroy(vma);
183 	i915_gem_object_unlock(obj);
184 	return err;
185 }
186 
187 static int check_partial_mappings(struct drm_i915_gem_object *obj,
188 				  const struct tile *tile,
189 				  unsigned long end_time)
190 {
191 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
192 	const unsigned long npages = obj->base.size / PAGE_SIZE;
193 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
194 	struct i915_vma *vma;
195 	unsigned long page;
196 	int err;
197 
198 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
199 	if (err) {
200 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
201 		       tile->tiling, tile->stride, err);
202 		return err;
203 	}
204 
205 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
206 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
207 
208 	i915_gem_object_lock(obj, NULL);
209 	err = i915_gem_object_set_to_gtt_domain(obj, true);
210 	i915_gem_object_unlock(obj);
211 	if (err) {
212 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
213 		return err;
214 	}
215 
216 	for_each_prime_number_from(page, 1, npages) {
217 		struct i915_gtt_view view =
218 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
219 		unsigned long offset;
220 		u32 __iomem *io;
221 		struct page *p;
222 		unsigned int n;
223 		u32 *cpu;
224 
225 		GEM_BUG_ON(view.partial.size > nreal);
226 		cond_resched();
227 
228 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
229 		if (IS_ERR(vma)) {
230 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
231 			       page, (int)PTR_ERR(vma));
232 			return PTR_ERR(vma);
233 		}
234 
235 		n = page - view.partial.offset;
236 		GEM_BUG_ON(n >= view.partial.size);
237 
238 		io = i915_vma_pin_iomap(vma);
239 		i915_vma_unpin(vma);
240 		if (IS_ERR(io)) {
241 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
242 			       page, (int)PTR_ERR(io));
243 			return PTR_ERR(io);
244 		}
245 
246 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
247 		i915_vma_unpin_iomap(vma);
248 
249 		offset = tiled_offset(tile, page << PAGE_SHIFT);
250 		if (offset >= obj->base.size)
251 			continue;
252 
253 		intel_gt_flush_ggtt_writes(to_gt(i915));
254 
255 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
256 		cpu = kmap(p) + offset_in_page(offset);
257 		drm_clflush_virt_range(cpu, sizeof(*cpu));
258 		if (*cpu != (u32)page) {
259 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
260 			       page, n,
261 			       view.partial.offset,
262 			       view.partial.size,
263 			       vma->size >> PAGE_SHIFT,
264 			       tile->tiling ? tile_row_pages(obj) : 0,
265 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
266 			       offset >> PAGE_SHIFT,
267 			       (unsigned int)offset_in_page(offset),
268 			       offset,
269 			       (u32)page, *cpu);
270 			err = -EINVAL;
271 		}
272 		*cpu = 0;
273 		drm_clflush_virt_range(cpu, sizeof(*cpu));
274 		kunmap(p);
275 		if (err)
276 			return err;
277 
278 		i915_gem_object_lock(obj, NULL);
279 		i915_vma_destroy(vma);
280 		i915_gem_object_unlock(obj);
281 
282 		if (igt_timeout(end_time,
283 				"%s: timed out after tiling=%d stride=%d\n",
284 				__func__, tile->tiling, tile->stride))
285 			return -EINTR;
286 	}
287 
288 	return 0;
289 }
290 
291 static unsigned int
292 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
293 {
294 	if (GRAPHICS_VER(i915) <= 2) {
295 		tile->height = 16;
296 		tile->width = 128;
297 		tile->size = 11;
298 	} else if (tile->tiling == I915_TILING_Y &&
299 		   HAS_128_BYTE_Y_TILING(i915)) {
300 		tile->height = 32;
301 		tile->width = 128;
302 		tile->size = 12;
303 	} else {
304 		tile->height = 8;
305 		tile->width = 512;
306 		tile->size = 12;
307 	}
308 
309 	if (GRAPHICS_VER(i915) < 4)
310 		return 8192 / tile->width;
311 	else if (GRAPHICS_VER(i915) < 7)
312 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
313 	else
314 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
315 }
316 
317 static int igt_partial_tiling(void *arg)
318 {
319 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
320 	struct drm_i915_private *i915 = arg;
321 	struct drm_i915_gem_object *obj;
322 	intel_wakeref_t wakeref;
323 	int tiling;
324 	int err;
325 
326 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
327 		return 0;
328 
329 	/* We want to check the page mapping and fencing of a large object
330 	 * mmapped through the GTT. The object we create is larger than can
331 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
332 	 * We then check that a write through each partial GGTT vma ends up
333 	 * in the right set of pages within the object, and with the expected
334 	 * tiling, which we verify by manual swizzling.
335 	 */
336 
337 	obj = huge_gem_object(i915,
338 			      nreal << PAGE_SHIFT,
339 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
340 	if (IS_ERR(obj))
341 		return PTR_ERR(obj);
342 
343 	err = i915_gem_object_pin_pages_unlocked(obj);
344 	if (err) {
345 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
346 		       nreal, obj->base.size / PAGE_SIZE, err);
347 		goto out;
348 	}
349 
350 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
351 
352 	if (1) {
353 		IGT_TIMEOUT(end);
354 		struct tile tile;
355 
356 		tile.height = 1;
357 		tile.width = 1;
358 		tile.size = 0;
359 		tile.stride = 0;
360 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
361 		tile.tiling = I915_TILING_NONE;
362 
363 		err = check_partial_mappings(obj, &tile, end);
364 		if (err && err != -EINTR)
365 			goto out_unlock;
366 	}
367 
368 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
369 		IGT_TIMEOUT(end);
370 		unsigned int max_pitch;
371 		unsigned int pitch;
372 		struct tile tile;
373 
374 		if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
375 			/*
376 			 * The swizzling pattern is actually unknown as it
377 			 * varies based on physical address of each page.
378 			 * See i915_gem_detect_bit_6_swizzle().
379 			 */
380 			break;
381 
382 		tile.tiling = tiling;
383 		switch (tiling) {
384 		case I915_TILING_X:
385 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
386 			break;
387 		case I915_TILING_Y:
388 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
389 			break;
390 		}
391 
392 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
393 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
394 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
395 			continue;
396 
397 		max_pitch = setup_tile_size(&tile, i915);
398 
399 		for (pitch = max_pitch; pitch; pitch >>= 1) {
400 			tile.stride = tile.width * pitch;
401 			err = check_partial_mappings(obj, &tile, end);
402 			if (err == -EINTR)
403 				goto next_tiling;
404 			if (err)
405 				goto out_unlock;
406 
407 			if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
408 				tile.stride = tile.width * (pitch - 1);
409 				err = check_partial_mappings(obj, &tile, end);
410 				if (err == -EINTR)
411 					goto next_tiling;
412 				if (err)
413 					goto out_unlock;
414 			}
415 
416 			if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
417 				tile.stride = tile.width * (pitch + 1);
418 				err = check_partial_mappings(obj, &tile, end);
419 				if (err == -EINTR)
420 					goto next_tiling;
421 				if (err)
422 					goto out_unlock;
423 			}
424 		}
425 
426 		if (GRAPHICS_VER(i915) >= 4) {
427 			for_each_prime_number(pitch, max_pitch) {
428 				tile.stride = tile.width * pitch;
429 				err = check_partial_mappings(obj, &tile, end);
430 				if (err == -EINTR)
431 					goto next_tiling;
432 				if (err)
433 					goto out_unlock;
434 			}
435 		}
436 
437 next_tiling: ;
438 	}
439 
440 out_unlock:
441 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
442 	i915_gem_object_unpin_pages(obj);
443 out:
444 	i915_gem_object_put(obj);
445 	return err;
446 }
447 
448 static int igt_smoke_tiling(void *arg)
449 {
450 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
451 	struct drm_i915_private *i915 = arg;
452 	struct drm_i915_gem_object *obj;
453 	intel_wakeref_t wakeref;
454 	I915_RND_STATE(prng);
455 	unsigned long count;
456 	IGT_TIMEOUT(end);
457 	int err;
458 
459 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
460 		return 0;
461 
462 	/*
463 	 * igt_partial_tiling() does an exhastive check of partial tiling
464 	 * chunking, but will undoubtably run out of time. Here, we do a
465 	 * randomised search and hope over many runs of 1s with different
466 	 * seeds we will do a thorough check.
467 	 *
468 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
469 	 */
470 
471 	if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
472 		return 0;
473 
474 	obj = huge_gem_object(i915,
475 			      nreal << PAGE_SHIFT,
476 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
477 	if (IS_ERR(obj))
478 		return PTR_ERR(obj);
479 
480 	err = i915_gem_object_pin_pages_unlocked(obj);
481 	if (err) {
482 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
483 		       nreal, obj->base.size / PAGE_SIZE, err);
484 		goto out;
485 	}
486 
487 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
488 
489 	count = 0;
490 	do {
491 		struct tile tile;
492 
493 		tile.tiling =
494 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
495 		switch (tile.tiling) {
496 		case I915_TILING_NONE:
497 			tile.height = 1;
498 			tile.width = 1;
499 			tile.size = 0;
500 			tile.stride = 0;
501 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
502 			break;
503 
504 		case I915_TILING_X:
505 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
506 			break;
507 		case I915_TILING_Y:
508 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
509 			break;
510 		}
511 
512 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
513 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
514 			continue;
515 
516 		if (tile.tiling != I915_TILING_NONE) {
517 			unsigned int max_pitch = setup_tile_size(&tile, i915);
518 
519 			tile.stride =
520 				i915_prandom_u32_max_state(max_pitch, &prng);
521 			tile.stride = (1 + tile.stride) * tile.width;
522 			if (GRAPHICS_VER(i915) < 4)
523 				tile.stride = rounddown_pow_of_two(tile.stride);
524 		}
525 
526 		err = check_partial_mapping(obj, &tile, &prng);
527 		if (err)
528 			break;
529 
530 		count++;
531 	} while (!__igt_timeout(end, NULL));
532 
533 	pr_info("%s: Completed %lu trials\n", __func__, count);
534 
535 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
536 	i915_gem_object_unpin_pages(obj);
537 out:
538 	i915_gem_object_put(obj);
539 	return err;
540 }
541 
542 static int make_obj_busy(struct drm_i915_gem_object *obj)
543 {
544 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
545 	struct intel_engine_cs *engine;
546 
547 	for_each_uabi_engine(engine, i915) {
548 		struct i915_request *rq;
549 		struct i915_vma *vma;
550 		struct i915_gem_ww_ctx ww;
551 		int err;
552 
553 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
554 		if (IS_ERR(vma))
555 			return PTR_ERR(vma);
556 
557 		i915_gem_ww_ctx_init(&ww, false);
558 retry:
559 		err = i915_gem_object_lock(obj, &ww);
560 		if (!err)
561 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
562 		if (err)
563 			goto err;
564 
565 		rq = intel_engine_create_kernel_request(engine);
566 		if (IS_ERR(rq)) {
567 			err = PTR_ERR(rq);
568 			goto err_unpin;
569 		}
570 
571 		err = i915_vma_move_to_active(vma, rq,
572 					      EXEC_OBJECT_WRITE);
573 
574 		i915_request_add(rq);
575 err_unpin:
576 		i915_vma_unpin(vma);
577 err:
578 		if (err == -EDEADLK) {
579 			err = i915_gem_ww_ctx_backoff(&ww);
580 			if (!err)
581 				goto retry;
582 		}
583 		i915_gem_ww_ctx_fini(&ww);
584 		if (err)
585 			return err;
586 	}
587 
588 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
589 	return 0;
590 }
591 
592 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
593 {
594 	if (HAS_LMEM(i915))
595 		return I915_MMAP_TYPE_FIXED;
596 
597 	return I915_MMAP_TYPE_GTT;
598 }
599 
600 static struct drm_i915_gem_object *
601 create_sys_or_internal(struct drm_i915_private *i915,
602 		       unsigned long size)
603 {
604 	if (HAS_LMEM(i915)) {
605 		struct intel_memory_region *sys_region =
606 			i915->mm.regions[INTEL_REGION_SMEM];
607 
608 		return __i915_gem_object_create_user(i915, size, &sys_region, 1);
609 	}
610 
611 	return i915_gem_object_create_internal(i915, size);
612 }
613 
614 static bool assert_mmap_offset(struct drm_i915_private *i915,
615 			       unsigned long size,
616 			       int expected)
617 {
618 	struct drm_i915_gem_object *obj;
619 	u64 offset;
620 	int ret;
621 
622 	obj = create_sys_or_internal(i915, size);
623 	if (IS_ERR(obj))
624 		return expected && expected == PTR_ERR(obj);
625 
626 	ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
627 	i915_gem_object_put(obj);
628 
629 	return ret == expected;
630 }
631 
632 static void disable_retire_worker(struct drm_i915_private *i915)
633 {
634 	i915_gem_driver_unregister__shrinker(i915);
635 	intel_gt_pm_get_untracked(to_gt(i915));
636 	cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
637 }
638 
639 static void restore_retire_worker(struct drm_i915_private *i915)
640 {
641 	igt_flush_test(i915);
642 	intel_gt_pm_put_untracked(to_gt(i915));
643 	i915_gem_driver_register__shrinker(i915);
644 }
645 
646 static void mmap_offset_lock(struct drm_i915_private *i915)
647 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
648 {
649 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
650 }
651 
652 static void mmap_offset_unlock(struct drm_i915_private *i915)
653 	__releases(&i915->drm.vma_offset_manager->vm_lock)
654 {
655 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
656 }
657 
658 static int igt_mmap_offset_exhaustion(void *arg)
659 {
660 	struct drm_i915_private *i915 = arg;
661 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
662 	struct drm_i915_gem_object *obj;
663 	struct drm_mm_node *hole, *next;
664 	int loop, err = 0;
665 	u64 offset;
666 	int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
667 
668 	/* Disable background reaper */
669 	disable_retire_worker(i915);
670 	GEM_BUG_ON(!to_gt(i915)->awake);
671 	intel_gt_retire_requests(to_gt(i915));
672 	i915_gem_drain_freed_objects(i915);
673 
674 	/* Trim the device mmap space to only a page */
675 	mmap_offset_lock(i915);
676 	loop = 1; /* PAGE_SIZE units */
677 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
678 		struct drm_mm_node *resv;
679 
680 		resv = kzalloc_obj(*resv, GFP_NOWAIT);
681 		if (!resv) {
682 			err = -ENOMEM;
683 			goto out_park;
684 		}
685 
686 		resv->start = drm_mm_hole_node_start(hole) + loop;
687 		resv->size = hole->hole_size - loop;
688 		resv->color = -1ul;
689 		loop = 0;
690 
691 		if (!resv->size) {
692 			kfree(resv);
693 			continue;
694 		}
695 
696 		pr_debug("Reserving hole [%llx + %llx]\n",
697 			 resv->start, resv->size);
698 
699 		err = drm_mm_reserve_node(mm, resv);
700 		if (err) {
701 			pr_err("Failed to trim VMA manager, err=%d\n", err);
702 			kfree(resv);
703 			goto out_park;
704 		}
705 	}
706 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
707 	mmap_offset_unlock(i915);
708 
709 	/* Just fits! */
710 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
711 		pr_err("Unable to insert object into single page hole\n");
712 		err = -EINVAL;
713 		goto out;
714 	}
715 
716 	/* Too large */
717 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
718 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
719 		err = -EINVAL;
720 		goto out;
721 	}
722 
723 	/* Fill the hole, further allocation attempts should then fail */
724 	obj = create_sys_or_internal(i915, PAGE_SIZE);
725 	if (IS_ERR(obj)) {
726 		err = PTR_ERR(obj);
727 		pr_err("Unable to create object for reclaimed hole\n");
728 		goto out;
729 	}
730 
731 	err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
732 	if (err) {
733 		pr_err("Unable to insert object into reclaimed hole\n");
734 		goto err_obj;
735 	}
736 
737 	if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
738 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
739 		err = -EINVAL;
740 		goto err_obj;
741 	}
742 
743 	i915_gem_object_put(obj);
744 
745 	/* Now fill with busy dead objects that we expect to reap */
746 	for (loop = 0; loop < 3; loop++) {
747 		if (intel_gt_is_wedged(to_gt(i915)))
748 			break;
749 
750 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
751 		if (IS_ERR(obj)) {
752 			err = PTR_ERR(obj);
753 			goto out;
754 		}
755 
756 		err = make_obj_busy(obj);
757 		if (err) {
758 			pr_err("[loop %d] Failed to busy the object\n", loop);
759 			goto err_obj;
760 		}
761 	}
762 
763 out:
764 	mmap_offset_lock(i915);
765 out_park:
766 	drm_mm_for_each_node_safe(hole, next, mm) {
767 		if (hole->color != -1ul)
768 			continue;
769 
770 		drm_mm_remove_node(hole);
771 		kfree(hole);
772 	}
773 	mmap_offset_unlock(i915);
774 	restore_retire_worker(i915);
775 	return err;
776 err_obj:
777 	i915_gem_object_put(obj);
778 	goto out;
779 }
780 
781 static int gtt_set(struct drm_i915_gem_object *obj)
782 {
783 	intel_wakeref_t wakeref;
784 	struct i915_vma *vma;
785 	void __iomem *map;
786 	int err = 0;
787 
788 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
789 	if (IS_ERR(vma))
790 		return PTR_ERR(vma);
791 
792 	wakeref = intel_gt_pm_get(vma->vm->gt);
793 	map = i915_vma_pin_iomap(vma);
794 	i915_vma_unpin(vma);
795 	if (IS_ERR(map)) {
796 		err = PTR_ERR(map);
797 		goto out;
798 	}
799 
800 	memset_io(map, POISON_INUSE, obj->base.size);
801 	i915_vma_unpin_iomap(vma);
802 
803 out:
804 	intel_gt_pm_put(vma->vm->gt, wakeref);
805 	return err;
806 }
807 
808 static int gtt_check(struct drm_i915_gem_object *obj)
809 {
810 	intel_wakeref_t wakeref;
811 	struct i915_vma *vma;
812 	void __iomem *map;
813 	int err = 0;
814 
815 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
816 	if (IS_ERR(vma))
817 		return PTR_ERR(vma);
818 
819 	wakeref = intel_gt_pm_get(vma->vm->gt);
820 	map = i915_vma_pin_iomap(vma);
821 	i915_vma_unpin(vma);
822 	if (IS_ERR(map)) {
823 		err = PTR_ERR(map);
824 		goto out;
825 	}
826 
827 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
828 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
829 		       obj->mm.region->name);
830 		err = -EINVAL;
831 	}
832 	i915_vma_unpin_iomap(vma);
833 
834 out:
835 	intel_gt_pm_put(vma->vm->gt, wakeref);
836 	return err;
837 }
838 
839 static int wc_set(struct drm_i915_gem_object *obj)
840 {
841 	void *vaddr;
842 
843 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
844 	if (IS_ERR(vaddr))
845 		return PTR_ERR(vaddr);
846 
847 	memset(vaddr, POISON_INUSE, obj->base.size);
848 	i915_gem_object_flush_map(obj);
849 	i915_gem_object_unpin_map(obj);
850 
851 	return 0;
852 }
853 
854 static int wc_check(struct drm_i915_gem_object *obj)
855 {
856 	void *vaddr;
857 	int err = 0;
858 
859 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
860 	if (IS_ERR(vaddr))
861 		return PTR_ERR(vaddr);
862 
863 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
864 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
865 		       obj->mm.region->name);
866 		err = -EINVAL;
867 	}
868 	i915_gem_object_unpin_map(obj);
869 
870 	return err;
871 }
872 
873 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
874 {
875 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
876 	bool no_map;
877 
878 	if (obj->ops->mmap_offset)
879 		return type == I915_MMAP_TYPE_FIXED;
880 	else if (type == I915_MMAP_TYPE_FIXED)
881 		return false;
882 
883 	if (type == I915_MMAP_TYPE_GTT &&
884 	    !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
885 		return false;
886 
887 	i915_gem_object_lock(obj, NULL);
888 	no_map = (type != I915_MMAP_TYPE_GTT &&
889 		  !i915_gem_object_has_struct_page(obj) &&
890 		  !i915_gem_object_has_iomem(obj));
891 	i915_gem_object_unlock(obj);
892 
893 	return !no_map;
894 }
895 
896 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
897 static int __igt_mmap(struct drm_i915_private *i915,
898 		      struct drm_i915_gem_object *obj,
899 		      enum i915_mmap_type type)
900 {
901 	struct vm_area_struct *area;
902 	unsigned long addr;
903 	int err, i;
904 	u64 offset;
905 
906 	if (!can_mmap(obj, type))
907 		return 0;
908 
909 	err = wc_set(obj);
910 	if (err == -ENXIO)
911 		err = gtt_set(obj);
912 	if (err)
913 		return err;
914 
915 	err = __assign_mmap_offset(obj, type, &offset, NULL);
916 	if (err)
917 		return err;
918 
919 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
920 	if (IS_ERR_VALUE(addr))
921 		return addr;
922 
923 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
924 
925 	mmap_read_lock(current->mm);
926 	area = vma_lookup(current->mm, addr);
927 	mmap_read_unlock(current->mm);
928 	if (!area) {
929 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
930 		       obj->mm.region->name);
931 		err = -EINVAL;
932 		goto out_unmap;
933 	}
934 
935 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
936 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
937 		u32 x;
938 
939 		if (get_user(x, ux)) {
940 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
941 			       obj->mm.region->name, i * sizeof(x));
942 			err = -EFAULT;
943 			goto out_unmap;
944 		}
945 
946 		if (x != expand32(POISON_INUSE)) {
947 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
948 			       obj->mm.region->name,
949 			       i * sizeof(x), x, expand32(POISON_INUSE));
950 			err = -EINVAL;
951 			goto out_unmap;
952 		}
953 
954 		x = expand32(POISON_FREE);
955 		if (put_user(x, ux)) {
956 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
957 			       obj->mm.region->name, i * sizeof(x));
958 			err = -EFAULT;
959 			goto out_unmap;
960 		}
961 	}
962 
963 	if (type == I915_MMAP_TYPE_GTT)
964 		intel_gt_flush_ggtt_writes(to_gt(i915));
965 
966 	err = wc_check(obj);
967 	if (err == -ENXIO)
968 		err = gtt_check(obj);
969 out_unmap:
970 	vm_munmap(addr, obj->base.size);
971 	return err;
972 }
973 
974 static int igt_mmap(void *arg)
975 {
976 	struct drm_i915_private *i915 = arg;
977 	struct intel_memory_region *mr;
978 	enum intel_region_id id;
979 
980 	for_each_memory_region(mr, i915, id) {
981 		unsigned long sizes[] = {
982 			PAGE_SIZE,
983 			mr->min_page_size,
984 			SZ_4M,
985 		};
986 		int i;
987 
988 		if (mr->private)
989 			continue;
990 
991 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
992 			struct drm_i915_gem_object *obj;
993 			int err;
994 
995 			obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
996 			if (obj == ERR_PTR(-ENODEV))
997 				continue;
998 
999 			if (IS_ERR(obj))
1000 				return PTR_ERR(obj);
1001 
1002 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
1003 			if (err == 0)
1004 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
1005 			if (err == 0)
1006 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
1007 
1008 			i915_gem_object_put(obj);
1009 			if (err)
1010 				return err;
1011 		}
1012 	}
1013 
1014 	return 0;
1015 }
1016 
1017 static void igt_close_objects(struct drm_i915_private *i915,
1018 			      struct list_head *objects)
1019 {
1020 	struct drm_i915_gem_object *obj, *on;
1021 
1022 	list_for_each_entry_safe(obj, on, objects, st_link) {
1023 		i915_gem_object_lock(obj, NULL);
1024 		if (i915_gem_object_has_pinned_pages(obj))
1025 			i915_gem_object_unpin_pages(obj);
1026 		/* No polluting the memory region between tests */
1027 		__i915_gem_object_put_pages(obj);
1028 		i915_gem_object_unlock(obj);
1029 		list_del(&obj->st_link);
1030 		i915_gem_object_put(obj);
1031 	}
1032 
1033 	cond_resched();
1034 
1035 	i915_gem_drain_freed_objects(i915);
1036 }
1037 
1038 static void igt_make_evictable(struct list_head *objects)
1039 {
1040 	struct drm_i915_gem_object *obj;
1041 
1042 	list_for_each_entry(obj, objects, st_link) {
1043 		i915_gem_object_lock(obj, NULL);
1044 		if (i915_gem_object_has_pinned_pages(obj))
1045 			i915_gem_object_unpin_pages(obj);
1046 		i915_gem_object_unlock(obj);
1047 	}
1048 
1049 	cond_resched();
1050 }
1051 
1052 static int igt_fill_mappable(struct intel_memory_region *mr,
1053 			     struct list_head *objects)
1054 {
1055 	u64 size, total;
1056 	int err;
1057 
1058 	total = 0;
1059 	size = resource_size(&mr->io);
1060 	do {
1061 		struct drm_i915_gem_object *obj;
1062 
1063 		obj = i915_gem_object_create_region(mr, size, 0, 0);
1064 		if (IS_ERR(obj)) {
1065 			err = PTR_ERR(obj);
1066 			goto err_close;
1067 		}
1068 
1069 		list_add(&obj->st_link, objects);
1070 
1071 		err = i915_gem_object_pin_pages_unlocked(obj);
1072 		if (err) {
1073 			if (err != -ENXIO && err != -ENOMEM)
1074 				goto err_close;
1075 
1076 			if (size == mr->min_page_size) {
1077 				err = 0;
1078 				break;
1079 			}
1080 
1081 			size >>= 1;
1082 			continue;
1083 		}
1084 
1085 		total += obj->base.size;
1086 	} while (1);
1087 
1088 	pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
1089 	return 0;
1090 
1091 err_close:
1092 	igt_close_objects(mr->i915, objects);
1093 	return err;
1094 }
1095 
1096 static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1097 			       struct drm_i915_gem_object *obj,
1098 			       unsigned long addr,
1099 			       bool unfaultable)
1100 {
1101 	int i;
1102 
1103 	pr_info("igt_mmap(%s, %d) @ %lx\n",
1104 		obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
1105 
1106 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
1107 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
1108 		u32 x;
1109 
1110 		if (get_user(x, ux)) {
1111 			if (!unfaultable) {
1112 				pr_err("%s: Unable to read from mmap, offset:%zd\n",
1113 				       obj->mm.region->name, i * sizeof(x));
1114 				return -EFAULT;
1115 			}
1116 
1117 			continue;
1118 		}
1119 
1120 		if (unfaultable) {
1121 			pr_err("%s: Faulted unmappable memory\n",
1122 			       obj->mm.region->name);
1123 			return -EINVAL;
1124 		}
1125 
1126 		if (x != expand32(POISON_INUSE)) {
1127 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
1128 			       obj->mm.region->name,
1129 			       i * sizeof(x), x, expand32(POISON_INUSE));
1130 			return -EINVAL;
1131 		}
1132 
1133 		x = expand32(POISON_FREE);
1134 		if (put_user(x, ux)) {
1135 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
1136 			       obj->mm.region->name, i * sizeof(x));
1137 			return -EFAULT;
1138 		}
1139 	}
1140 
1141 	if (unfaultable)
1142 		return 0;
1143 
1144 	obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
1145 	return wc_check(obj);
1146 }
1147 
1148 #define IGT_MMAP_MIGRATE_TOPDOWN     (1 << 0)
1149 #define IGT_MMAP_MIGRATE_FILL        (1 << 1)
1150 #define IGT_MMAP_MIGRATE_EVICTABLE   (1 << 2)
1151 #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
1152 #define IGT_MMAP_MIGRATE_FAIL_GPU    (1 << 4)
1153 static int __igt_mmap_migrate(struct intel_memory_region **placements,
1154 			      int n_placements,
1155 			      struct intel_memory_region *expected_mr,
1156 			      unsigned int flags)
1157 {
1158 	struct drm_i915_private *i915 = placements[0]->i915;
1159 	struct drm_i915_gem_object *obj;
1160 	struct i915_request *rq = NULL;
1161 	struct vm_area_struct *area;
1162 	struct file *mock_file;
1163 	unsigned long addr;
1164 	LIST_HEAD(objects);
1165 	u64 offset;
1166 	int err;
1167 
1168 	obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1169 					    placements,
1170 					    n_placements);
1171 	if (IS_ERR(obj))
1172 		return PTR_ERR(obj);
1173 
1174 	if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
1175 		obj->flags |= I915_BO_ALLOC_GPU_ONLY;
1176 
1177 	err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
1178 	if (err)
1179 		goto out_put;
1180 
1181 	/*
1182 	 * Pretend to open("/dev/dri/card0"), which will eventually create a GEM
1183 	 * context along with multiple GEM objects (for paging structures and
1184 	 * scratch) that are placed in mappable portion of GPU memory.
1185 	 * Calling fput() on the file places objects' cleanup routines in delayed
1186 	 * worqueues, which execute after unspecified amount of time.
1187 	 * Keep the file open until migration and page fault checks are done to
1188 	 * make sure object cleanup is not executed after igt_fill_mappable()
1189 	 * finishes and before migration is attempted - that would leave a gap
1190 	 * large enough for the migration to succeed, when we'd expect it to fail.
1191 	 */
1192 	mock_file = mock_drm_getfile(i915->drm.primary, O_RDWR);
1193 	if (IS_ERR(mock_file))
1194 		return PTR_ERR(mock_file);
1195 
1196 	addr = igt_mmap_offset_with_file(i915, offset, obj->base.size,
1197 					 PROT_WRITE, MAP_SHARED, mock_file);
1198 	if (IS_ERR_VALUE(addr)) {
1199 		err = addr;
1200 		goto out_fput;
1201 	}
1202 
1203 	mmap_read_lock(current->mm);
1204 	area = vma_lookup(current->mm, addr);
1205 	mmap_read_unlock(current->mm);
1206 	if (!area) {
1207 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
1208 		       obj->mm.region->name);
1209 		err = -EINVAL;
1210 		goto out_addr;
1211 	}
1212 
1213 	if (flags & IGT_MMAP_MIGRATE_FILL) {
1214 		err = igt_fill_mappable(placements[0], &objects);
1215 		if (err)
1216 			goto out_addr;
1217 	}
1218 
1219 	err = i915_gem_object_lock(obj, NULL);
1220 	if (err)
1221 		goto out_addr;
1222 
1223 	err = i915_gem_object_pin_pages(obj);
1224 	if (err) {
1225 		i915_gem_object_unlock(obj);
1226 		goto out_addr;
1227 	}
1228 
1229 	err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
1230 					  obj->mm.pages->sgl, obj->pat_index,
1231 					  i915_gem_object_is_lmem(obj),
1232 					  expand32(POISON_INUSE), &rq);
1233 	i915_gem_object_unpin_pages(obj);
1234 	if (rq && !err) {
1235 		err = dma_resv_reserve_fences(obj->base.resv, 1);
1236 		if (!err)
1237 			dma_resv_add_fence(obj->base.resv, &rq->fence,
1238 					   DMA_RESV_USAGE_KERNEL);
1239 		i915_request_put(rq);
1240 	}
1241 	i915_gem_object_unlock(obj);
1242 	if (err)
1243 		goto out_addr;
1244 
1245 	if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
1246 		igt_make_evictable(&objects);
1247 
1248 	if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1249 		err = i915_gem_object_lock(obj, NULL);
1250 		if (err)
1251 			goto out_addr;
1252 
1253 		/*
1254 		 * Ensure we only simulate the gpu failure when faulting the
1255 		 * pages.
1256 		 */
1257 		err = i915_gem_object_wait_moving_fence(obj, true);
1258 		i915_gem_object_unlock(obj);
1259 		if (err)
1260 			goto out_addr;
1261 		i915_ttm_migrate_set_failure_modes(true, false);
1262 	}
1263 
1264 	err = ___igt_mmap_migrate(i915, obj, addr,
1265 				  flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
1266 
1267 	if (!err && obj->mm.region != expected_mr) {
1268 		pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
1269 		err = -EINVAL;
1270 	}
1271 
1272 	if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1273 		struct intel_gt *gt;
1274 		unsigned int id;
1275 
1276 		i915_ttm_migrate_set_failure_modes(false, false);
1277 
1278 		for_each_gt(gt, i915, id) {
1279 			intel_wakeref_t wakeref;
1280 			bool wedged;
1281 
1282 			mutex_lock(&gt->reset.mutex);
1283 			wedged = test_bit(I915_WEDGED, &gt->reset.flags);
1284 			mutex_unlock(&gt->reset.mutex);
1285 			if (!wedged) {
1286 				pr_err("gt(%u) not wedged\n", id);
1287 				err = -EINVAL;
1288 				continue;
1289 			}
1290 
1291 			wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1292 			igt_global_reset_lock(gt);
1293 			intel_gt_reset(gt, ALL_ENGINES, NULL);
1294 			igt_global_reset_unlock(gt);
1295 			intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1296 		}
1297 
1298 		if (!i915_gem_object_has_unknown_state(obj)) {
1299 			pr_err("object missing unknown_state\n");
1300 			err = -EINVAL;
1301 		}
1302 	}
1303 
1304 out_addr:
1305 	vm_munmap(addr, obj->base.size);
1306 
1307 out_fput:
1308 	fput(mock_file);
1309 
1310 out_put:
1311 	i915_gem_object_put(obj);
1312 	igt_close_objects(i915, &objects);
1313 	return err;
1314 }
1315 
1316 static int igt_mmap_migrate(void *arg)
1317 {
1318 	struct drm_i915_private *i915 = arg;
1319 	struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1320 	struct intel_memory_region *mr;
1321 	enum intel_region_id id;
1322 
1323 	for_each_memory_region(mr, i915, id) {
1324 		struct intel_memory_region *mixed[] = { mr, system };
1325 		struct intel_memory_region *single[] = { mr };
1326 		struct ttm_resource_manager *man = mr->region_private;
1327 		struct resource saved_io;
1328 		int err;
1329 
1330 		if (mr->private)
1331 			continue;
1332 
1333 		if (!resource_size(&mr->io))
1334 			continue;
1335 
1336 		/*
1337 		 * For testing purposes let's force small BAR, if not already
1338 		 * present.
1339 		 */
1340 		saved_io = mr->io;
1341 		if (resource_size(&mr->io) == mr->total) {
1342 			resource_size_t io_size = resource_size(&mr->io);
1343 
1344 			io_size = rounddown_pow_of_two(io_size >> 1);
1345 			if (io_size < PAGE_SIZE)
1346 				continue;
1347 
1348 			mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
1349 			i915_ttm_buddy_man_force_visible_size(man,
1350 							      io_size >> PAGE_SHIFT);
1351 		}
1352 
1353 		/*
1354 		 * Allocate in the mappable portion, should be no surprises here.
1355 		 */
1356 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
1357 		if (err)
1358 			goto out_io_size;
1359 
1360 		/*
1361 		 * Allocate in the non-mappable portion, but force migrating to
1362 		 * the mappable portion on fault (LMEM -> LMEM)
1363 		 */
1364 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1365 					 IGT_MMAP_MIGRATE_TOPDOWN |
1366 					 IGT_MMAP_MIGRATE_FILL |
1367 					 IGT_MMAP_MIGRATE_EVICTABLE);
1368 		if (err)
1369 			goto out_io_size;
1370 
1371 		/*
1372 		 * Allocate in the non-mappable portion, but force spilling into
1373 		 * system memory on fault (LMEM -> SMEM)
1374 		 */
1375 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
1376 					 IGT_MMAP_MIGRATE_TOPDOWN |
1377 					 IGT_MMAP_MIGRATE_FILL);
1378 		if (err)
1379 			goto out_io_size;
1380 
1381 		/*
1382 		 * Allocate in the non-mappable portion, but since the mappable
1383 		 * portion is already full, and we can't spill to system memory,
1384 		 * then we should expect the fault to fail.
1385 		 */
1386 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1387 					 IGT_MMAP_MIGRATE_TOPDOWN |
1388 					 IGT_MMAP_MIGRATE_FILL |
1389 					 IGT_MMAP_MIGRATE_UNFAULTABLE);
1390 		if (err)
1391 			goto out_io_size;
1392 
1393 		/*
1394 		 * Allocate in the non-mappable portion, but force migrating to
1395 		 * the mappable portion on fault (LMEM -> LMEM). We then also
1396 		 * simulate a gpu error when moving the pages when faulting the
1397 		 * pages, which should result in wedging the gpu and returning
1398 		 * SIGBUS in the fault handler, since we can't fallback to
1399 		 * memcpy.
1400 		 */
1401 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1402 					 IGT_MMAP_MIGRATE_TOPDOWN |
1403 					 IGT_MMAP_MIGRATE_FILL |
1404 					 IGT_MMAP_MIGRATE_EVICTABLE |
1405 					 IGT_MMAP_MIGRATE_FAIL_GPU |
1406 					 IGT_MMAP_MIGRATE_UNFAULTABLE);
1407 out_io_size:
1408 		mr->io = saved_io;
1409 		i915_ttm_buddy_man_force_visible_size(man,
1410 						      resource_size(&mr->io) >> PAGE_SHIFT);
1411 		if (err)
1412 			return err;
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 static const char *repr_mmap_type(enum i915_mmap_type type)
1419 {
1420 	switch (type) {
1421 	case I915_MMAP_TYPE_GTT: return "gtt";
1422 	case I915_MMAP_TYPE_WB: return "wb";
1423 	case I915_MMAP_TYPE_WC: return "wc";
1424 	case I915_MMAP_TYPE_UC: return "uc";
1425 	case I915_MMAP_TYPE_FIXED: return "fixed";
1426 	default: return "unknown";
1427 	}
1428 }
1429 
1430 static bool can_access(struct drm_i915_gem_object *obj)
1431 {
1432 	bool access;
1433 
1434 	i915_gem_object_lock(obj, NULL);
1435 	access = i915_gem_object_has_struct_page(obj) ||
1436 		i915_gem_object_has_iomem(obj);
1437 	i915_gem_object_unlock(obj);
1438 
1439 	return access;
1440 }
1441 
1442 static int __igt_mmap_access(struct drm_i915_private *i915,
1443 			     struct drm_i915_gem_object *obj,
1444 			     enum i915_mmap_type type)
1445 {
1446 	unsigned long __user *ptr;
1447 	unsigned long A, B;
1448 	unsigned long x, y;
1449 	unsigned long addr;
1450 	int err;
1451 	u64 offset;
1452 
1453 	memset(&A, 0xAA, sizeof(A));
1454 	memset(&B, 0xBB, sizeof(B));
1455 
1456 	if (!can_mmap(obj, type) || !can_access(obj))
1457 		return 0;
1458 
1459 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1460 	if (err)
1461 		return err;
1462 
1463 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1464 	if (IS_ERR_VALUE(addr))
1465 		return addr;
1466 	ptr = (unsigned long __user *)addr;
1467 
1468 	err = __put_user(A, ptr);
1469 	if (err) {
1470 		pr_err("%s(%s): failed to write into user mmap\n",
1471 		       obj->mm.region->name, repr_mmap_type(type));
1472 		goto out_unmap;
1473 	}
1474 
1475 	intel_gt_flush_ggtt_writes(to_gt(i915));
1476 
1477 	err = access_process_vm(current, addr, &x, sizeof(x), 0);
1478 	if (err != sizeof(x)) {
1479 		pr_err("%s(%s): access_process_vm() read failed\n",
1480 		       obj->mm.region->name, repr_mmap_type(type));
1481 		goto out_unmap;
1482 	}
1483 
1484 	err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1485 	if (err != sizeof(B)) {
1486 		pr_err("%s(%s): access_process_vm() write failed\n",
1487 		       obj->mm.region->name, repr_mmap_type(type));
1488 		goto out_unmap;
1489 	}
1490 
1491 	intel_gt_flush_ggtt_writes(to_gt(i915));
1492 
1493 	err = __get_user(y, ptr);
1494 	if (err) {
1495 		pr_err("%s(%s): failed to read from user mmap\n",
1496 		       obj->mm.region->name, repr_mmap_type(type));
1497 		goto out_unmap;
1498 	}
1499 
1500 	if (x != A || y != B) {
1501 		pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1502 		       obj->mm.region->name, repr_mmap_type(type),
1503 		       x, y);
1504 		err = -EINVAL;
1505 		goto out_unmap;
1506 	}
1507 
1508 out_unmap:
1509 	vm_munmap(addr, obj->base.size);
1510 	return err;
1511 }
1512 
1513 static int igt_mmap_access(void *arg)
1514 {
1515 	struct drm_i915_private *i915 = arg;
1516 	struct intel_memory_region *mr;
1517 	enum intel_region_id id;
1518 
1519 	for_each_memory_region(mr, i915, id) {
1520 		struct drm_i915_gem_object *obj;
1521 		int err;
1522 
1523 		if (mr->private)
1524 			continue;
1525 
1526 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1527 		if (obj == ERR_PTR(-ENODEV))
1528 			continue;
1529 
1530 		if (IS_ERR(obj))
1531 			return PTR_ERR(obj);
1532 
1533 		err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1534 		if (err == 0)
1535 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1536 		if (err == 0)
1537 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1538 		if (err == 0)
1539 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1540 		if (err == 0)
1541 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1542 
1543 		i915_gem_object_put(obj);
1544 		if (err)
1545 			return err;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1552 			  struct drm_i915_gem_object *obj,
1553 			  enum i915_mmap_type type)
1554 {
1555 	struct intel_engine_cs *engine;
1556 	unsigned long addr;
1557 	u32 __user *ux;
1558 	u32 bbe;
1559 	int err;
1560 	u64 offset;
1561 
1562 	/*
1563 	 * Verify that the mmap access into the backing store aligns with
1564 	 * that of the GPU, i.e. that mmap is indeed writing into the same
1565 	 * page as being read by the GPU.
1566 	 */
1567 
1568 	if (!can_mmap(obj, type))
1569 		return 0;
1570 
1571 	err = wc_set(obj);
1572 	if (err == -ENXIO)
1573 		err = gtt_set(obj);
1574 	if (err)
1575 		return err;
1576 
1577 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1578 	if (err)
1579 		return err;
1580 
1581 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1582 	if (IS_ERR_VALUE(addr))
1583 		return addr;
1584 
1585 	ux = u64_to_user_ptr((u64)addr);
1586 	bbe = MI_BATCH_BUFFER_END;
1587 	if (put_user(bbe, ux)) {
1588 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1589 		err = -EFAULT;
1590 		goto out_unmap;
1591 	}
1592 
1593 	if (type == I915_MMAP_TYPE_GTT)
1594 		intel_gt_flush_ggtt_writes(to_gt(i915));
1595 
1596 	for_each_uabi_engine(engine, i915) {
1597 		struct i915_request *rq;
1598 		struct i915_vma *vma;
1599 		struct i915_gem_ww_ctx ww;
1600 
1601 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1602 		if (IS_ERR(vma)) {
1603 			err = PTR_ERR(vma);
1604 			goto out_unmap;
1605 		}
1606 
1607 		i915_gem_ww_ctx_init(&ww, false);
1608 retry:
1609 		err = i915_gem_object_lock(obj, &ww);
1610 		if (!err)
1611 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1612 		if (err)
1613 			goto out_ww;
1614 
1615 		rq = i915_request_create(engine->kernel_context);
1616 		if (IS_ERR(rq)) {
1617 			err = PTR_ERR(rq);
1618 			goto out_unpin;
1619 		}
1620 
1621 		err = i915_vma_move_to_active(vma, rq, 0);
1622 
1623 		err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
1624 		i915_request_get(rq);
1625 		i915_request_add(rq);
1626 
1627 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1628 			struct drm_printer p =
1629 				drm_info_printer(engine->i915->drm.dev);
1630 
1631 			pr_err("%s(%s, %s): Failed to execute batch\n",
1632 			       __func__, engine->name, obj->mm.region->name);
1633 			intel_engine_dump(engine, &p,
1634 					  "%s\n", engine->name);
1635 
1636 			intel_gt_set_wedged(engine->gt);
1637 			err = -EIO;
1638 		}
1639 		i915_request_put(rq);
1640 
1641 out_unpin:
1642 		i915_vma_unpin(vma);
1643 out_ww:
1644 		if (err == -EDEADLK) {
1645 			err = i915_gem_ww_ctx_backoff(&ww);
1646 			if (!err)
1647 				goto retry;
1648 		}
1649 		i915_gem_ww_ctx_fini(&ww);
1650 		if (err)
1651 			goto out_unmap;
1652 	}
1653 
1654 out_unmap:
1655 	vm_munmap(addr, obj->base.size);
1656 	return err;
1657 }
1658 
1659 static int igt_mmap_gpu(void *arg)
1660 {
1661 	struct drm_i915_private *i915 = arg;
1662 	struct intel_memory_region *mr;
1663 	enum intel_region_id id;
1664 
1665 	for_each_memory_region(mr, i915, id) {
1666 		struct drm_i915_gem_object *obj;
1667 		int err;
1668 
1669 		if (mr->private)
1670 			continue;
1671 
1672 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1673 		if (obj == ERR_PTR(-ENODEV))
1674 			continue;
1675 
1676 		if (IS_ERR(obj))
1677 			return PTR_ERR(obj);
1678 
1679 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1680 		if (err == 0)
1681 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1682 		if (err == 0)
1683 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1684 
1685 		i915_gem_object_put(obj);
1686 		if (err)
1687 			return err;
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1694 {
1695 	pte_t ptent = ptep_get(pte);
1696 
1697 	if (!pte_present(ptent) || pte_none(ptent)) {
1698 		pr_err("missing PTE:%lx\n",
1699 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1700 		return -EINVAL;
1701 	}
1702 
1703 	return 0;
1704 }
1705 
1706 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1707 {
1708 	pte_t ptent = ptep_get(pte);
1709 
1710 	if (pte_present(ptent) && !pte_none(ptent)) {
1711 		pr_err("present PTE:%lx; expected to be revoked\n",
1712 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1713 		return -EINVAL;
1714 	}
1715 
1716 	return 0;
1717 }
1718 
1719 static int check_present(unsigned long addr, unsigned long len)
1720 {
1721 	return apply_to_page_range(current->mm, addr, len,
1722 				   check_present_pte, (void *)addr);
1723 }
1724 
1725 static int check_absent(unsigned long addr, unsigned long len)
1726 {
1727 	return apply_to_page_range(current->mm, addr, len,
1728 				   check_absent_pte, (void *)addr);
1729 }
1730 
1731 static int prefault_range(u64 start, u64 len)
1732 {
1733 	const char __user *addr, *end;
1734 	char __maybe_unused c;
1735 	int err;
1736 
1737 	addr = u64_to_user_ptr(start);
1738 	end = addr + len;
1739 
1740 	for (; addr < end; addr += PAGE_SIZE) {
1741 		err = __get_user(c, addr);
1742 		if (err)
1743 			return err;
1744 	}
1745 
1746 	return __get_user(c, end - 1);
1747 }
1748 
1749 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1750 			     struct drm_i915_gem_object *obj,
1751 			     enum i915_mmap_type type)
1752 {
1753 	unsigned long addr;
1754 	int err;
1755 	u64 offset;
1756 
1757 	if (!can_mmap(obj, type))
1758 		return 0;
1759 
1760 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1761 	if (err)
1762 		return err;
1763 
1764 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1765 	if (IS_ERR_VALUE(addr))
1766 		return addr;
1767 
1768 	err = prefault_range(addr, obj->base.size);
1769 	if (err)
1770 		goto out_unmap;
1771 
1772 	err = check_present(addr, obj->base.size);
1773 	if (err) {
1774 		pr_err("%s: was not present\n", obj->mm.region->name);
1775 		goto out_unmap;
1776 	}
1777 
1778 	/*
1779 	 * After unbinding the object from the GGTT, its address may be reused
1780 	 * for other objects. Ergo we have to revoke the previous mmap PTE
1781 	 * access as it no longer points to the same object.
1782 	 */
1783 	i915_gem_object_lock(obj, NULL);
1784 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1785 	i915_gem_object_unlock(obj);
1786 	if (err) {
1787 		pr_err("Failed to unbind object!\n");
1788 		goto out_unmap;
1789 	}
1790 
1791 	if (type != I915_MMAP_TYPE_GTT) {
1792 		i915_gem_object_lock(obj, NULL);
1793 		__i915_gem_object_put_pages(obj);
1794 		i915_gem_object_unlock(obj);
1795 		if (i915_gem_object_has_pages(obj)) {
1796 			pr_err("Failed to put-pages object!\n");
1797 			err = -EINVAL;
1798 			goto out_unmap;
1799 		}
1800 	}
1801 
1802 	err = check_absent(addr, obj->base.size);
1803 	if (err) {
1804 		pr_err("%s: was not absent\n", obj->mm.region->name);
1805 		goto out_unmap;
1806 	}
1807 
1808 out_unmap:
1809 	vm_munmap(addr, obj->base.size);
1810 	return err;
1811 }
1812 
1813 static int igt_mmap_revoke(void *arg)
1814 {
1815 	struct drm_i915_private *i915 = arg;
1816 	struct intel_memory_region *mr;
1817 	enum intel_region_id id;
1818 
1819 	for_each_memory_region(mr, i915, id) {
1820 		struct drm_i915_gem_object *obj;
1821 		int err;
1822 
1823 		if (mr->private)
1824 			continue;
1825 
1826 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1827 		if (obj == ERR_PTR(-ENODEV))
1828 			continue;
1829 
1830 		if (IS_ERR(obj))
1831 			return PTR_ERR(obj);
1832 
1833 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1834 		if (err == 0)
1835 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1836 		if (err == 0)
1837 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1838 
1839 		i915_gem_object_put(obj);
1840 		if (err)
1841 			return err;
1842 	}
1843 
1844 	return 0;
1845 }
1846 
1847 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1848 {
1849 	int ret;
1850 	bool unuse_mm = false;
1851 	static const struct i915_subtest tests[] = {
1852 		SUBTEST(igt_partial_tiling),
1853 		SUBTEST(igt_smoke_tiling),
1854 		SUBTEST(igt_mmap_offset_exhaustion),
1855 		SUBTEST(igt_mmap),
1856 		SUBTEST(igt_mmap_migrate),
1857 		SUBTEST(igt_mmap_access),
1858 		SUBTEST(igt_mmap_revoke),
1859 		SUBTEST(igt_mmap_gpu),
1860 	};
1861 
1862 	if (!current->mm) {
1863 		kthread_use_mm(current->active_mm);
1864 		unuse_mm = true;
1865 	}
1866 
1867 	ret = i915_live_subtests(tests, i915);
1868 
1869 	if (unuse_mm)
1870 		kthread_unuse_mm(current->active_mm);
1871 
1872 	return ret;
1873 }
1874