xref: /linux/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c (revision 44343e8b250abb2f6bfd615493ca07a7f11f3cc2)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/prime_numbers.h>
9 
10 #include "gem/i915_gem_internal.h"
11 #include "gem/i915_gem_lmem.h"
12 #include "gem/i915_gem_region.h"
13 #include "gem/i915_gem_ttm.h"
14 #include "gem/i915_gem_ttm_move.h"
15 #include "gt/intel_engine_pm.h"
16 #include "gt/intel_gpu_commands.h"
17 #include "gt/intel_gt.h"
18 #include "gt/intel_gt_pm.h"
19 #include "gt/intel_migrate.h"
20 #include "i915_reg.h"
21 #include "i915_ttm_buddy_manager.h"
22 
23 #include "huge_gem_object.h"
24 #include "i915_selftest.h"
25 #include "selftests/i915_random.h"
26 #include "selftests/igt_flush_test.h"
27 #include "selftests/igt_reset.h"
28 #include "selftests/igt_mmap.h"
29 
30 struct tile {
31 	unsigned int width;
32 	unsigned int height;
33 	unsigned int stride;
34 	unsigned int size;
35 	unsigned int tiling;
36 	unsigned int swizzle;
37 };
38 
39 static u64 swizzle_bit(unsigned int bit, u64 offset)
40 {
41 	return (offset & BIT_ULL(bit)) >> (bit - 6);
42 }
43 
44 static u64 tiled_offset(const struct tile *tile, u64 v)
45 {
46 	u64 x, y;
47 
48 	if (tile->tiling == I915_TILING_NONE)
49 		return v;
50 
51 	y = div64_u64_rem(v, tile->stride, &x);
52 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
53 
54 	if (tile->tiling == I915_TILING_X) {
55 		v += y * tile->width;
56 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
57 		v += x;
58 	} else if (tile->width == 128) {
59 		const unsigned int ytile_span = 16;
60 		const unsigned int ytile_height = 512;
61 
62 		v += y * ytile_span;
63 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
64 		v += x;
65 	} else {
66 		const unsigned int ytile_span = 32;
67 		const unsigned int ytile_height = 256;
68 
69 		v += y * ytile_span;
70 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
71 		v += x;
72 	}
73 
74 	switch (tile->swizzle) {
75 	case I915_BIT_6_SWIZZLE_9:
76 		v ^= swizzle_bit(9, v);
77 		break;
78 	case I915_BIT_6_SWIZZLE_9_10:
79 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
80 		break;
81 	case I915_BIT_6_SWIZZLE_9_11:
82 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
83 		break;
84 	case I915_BIT_6_SWIZZLE_9_10_11:
85 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
86 		break;
87 	}
88 
89 	return v;
90 }
91 
92 static int check_partial_mapping(struct drm_i915_gem_object *obj,
93 				 const struct tile *tile,
94 				 struct rnd_state *prng)
95 {
96 	const unsigned long npages = obj->base.size / PAGE_SIZE;
97 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
98 	struct i915_gtt_view view;
99 	struct i915_vma *vma;
100 	unsigned long offset;
101 	unsigned long page;
102 	u32 __iomem *io;
103 	struct page *p;
104 	unsigned int n;
105 	u32 *cpu;
106 	int err;
107 
108 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
109 	if (err) {
110 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
111 		       tile->tiling, tile->stride, err);
112 		return err;
113 	}
114 
115 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
116 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
117 
118 	i915_gem_object_lock(obj, NULL);
119 	err = i915_gem_object_set_to_gtt_domain(obj, true);
120 	i915_gem_object_unlock(obj);
121 	if (err) {
122 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
123 		return err;
124 	}
125 
126 	page = i915_prandom_u32_max_state(npages, prng);
127 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
128 
129 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
130 	if (IS_ERR(vma)) {
131 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
132 		       page, (int)PTR_ERR(vma));
133 		return PTR_ERR(vma);
134 	}
135 
136 	n = page - view.partial.offset;
137 	GEM_BUG_ON(n >= view.partial.size);
138 
139 	io = i915_vma_pin_iomap(vma);
140 	i915_vma_unpin(vma);
141 	if (IS_ERR(io)) {
142 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
143 		       page, (int)PTR_ERR(io));
144 		err = PTR_ERR(io);
145 		goto out;
146 	}
147 
148 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
149 	i915_vma_unpin_iomap(vma);
150 
151 	offset = tiled_offset(tile, page << PAGE_SHIFT);
152 	if (offset >= obj->base.size)
153 		goto out;
154 
155 	intel_gt_flush_ggtt_writes(to_gt(i915));
156 
157 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
158 	cpu = kmap(p) + offset_in_page(offset);
159 	drm_clflush_virt_range(cpu, sizeof(*cpu));
160 	if (*cpu != (u32)page) {
161 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
162 		       page, n,
163 		       view.partial.offset,
164 		       view.partial.size,
165 		       vma->size >> PAGE_SHIFT,
166 		       tile->tiling ? tile_row_pages(obj) : 0,
167 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
168 		       offset >> PAGE_SHIFT,
169 		       (unsigned int)offset_in_page(offset),
170 		       offset,
171 		       (u32)page, *cpu);
172 		err = -EINVAL;
173 	}
174 	*cpu = 0;
175 	drm_clflush_virt_range(cpu, sizeof(*cpu));
176 	kunmap(p);
177 
178 out:
179 	i915_gem_object_lock(obj, NULL);
180 	i915_vma_destroy(vma);
181 	i915_gem_object_unlock(obj);
182 	return err;
183 }
184 
185 static int check_partial_mappings(struct drm_i915_gem_object *obj,
186 				  const struct tile *tile,
187 				  unsigned long end_time)
188 {
189 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
190 	const unsigned long npages = obj->base.size / PAGE_SIZE;
191 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
192 	struct i915_vma *vma;
193 	unsigned long page;
194 	int err;
195 
196 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
197 	if (err) {
198 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
199 		       tile->tiling, tile->stride, err);
200 		return err;
201 	}
202 
203 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
204 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
205 
206 	i915_gem_object_lock(obj, NULL);
207 	err = i915_gem_object_set_to_gtt_domain(obj, true);
208 	i915_gem_object_unlock(obj);
209 	if (err) {
210 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
211 		return err;
212 	}
213 
214 	for_each_prime_number_from(page, 1, npages) {
215 		struct i915_gtt_view view =
216 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
217 		unsigned long offset;
218 		u32 __iomem *io;
219 		struct page *p;
220 		unsigned int n;
221 		u32 *cpu;
222 
223 		GEM_BUG_ON(view.partial.size > nreal);
224 		cond_resched();
225 
226 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
227 		if (IS_ERR(vma)) {
228 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
229 			       page, (int)PTR_ERR(vma));
230 			return PTR_ERR(vma);
231 		}
232 
233 		n = page - view.partial.offset;
234 		GEM_BUG_ON(n >= view.partial.size);
235 
236 		io = i915_vma_pin_iomap(vma);
237 		i915_vma_unpin(vma);
238 		if (IS_ERR(io)) {
239 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
240 			       page, (int)PTR_ERR(io));
241 			return PTR_ERR(io);
242 		}
243 
244 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
245 		i915_vma_unpin_iomap(vma);
246 
247 		offset = tiled_offset(tile, page << PAGE_SHIFT);
248 		if (offset >= obj->base.size)
249 			continue;
250 
251 		intel_gt_flush_ggtt_writes(to_gt(i915));
252 
253 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
254 		cpu = kmap(p) + offset_in_page(offset);
255 		drm_clflush_virt_range(cpu, sizeof(*cpu));
256 		if (*cpu != (u32)page) {
257 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
258 			       page, n,
259 			       view.partial.offset,
260 			       view.partial.size,
261 			       vma->size >> PAGE_SHIFT,
262 			       tile->tiling ? tile_row_pages(obj) : 0,
263 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
264 			       offset >> PAGE_SHIFT,
265 			       (unsigned int)offset_in_page(offset),
266 			       offset,
267 			       (u32)page, *cpu);
268 			err = -EINVAL;
269 		}
270 		*cpu = 0;
271 		drm_clflush_virt_range(cpu, sizeof(*cpu));
272 		kunmap(p);
273 		if (err)
274 			return err;
275 
276 		i915_gem_object_lock(obj, NULL);
277 		i915_vma_destroy(vma);
278 		i915_gem_object_unlock(obj);
279 
280 		if (igt_timeout(end_time,
281 				"%s: timed out after tiling=%d stride=%d\n",
282 				__func__, tile->tiling, tile->stride))
283 			return -EINTR;
284 	}
285 
286 	return 0;
287 }
288 
289 static unsigned int
290 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
291 {
292 	if (GRAPHICS_VER(i915) <= 2) {
293 		tile->height = 16;
294 		tile->width = 128;
295 		tile->size = 11;
296 	} else if (tile->tiling == I915_TILING_Y &&
297 		   HAS_128_BYTE_Y_TILING(i915)) {
298 		tile->height = 32;
299 		tile->width = 128;
300 		tile->size = 12;
301 	} else {
302 		tile->height = 8;
303 		tile->width = 512;
304 		tile->size = 12;
305 	}
306 
307 	if (GRAPHICS_VER(i915) < 4)
308 		return 8192 / tile->width;
309 	else if (GRAPHICS_VER(i915) < 7)
310 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
311 	else
312 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
313 }
314 
315 static int igt_partial_tiling(void *arg)
316 {
317 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
318 	struct drm_i915_private *i915 = arg;
319 	struct drm_i915_gem_object *obj;
320 	intel_wakeref_t wakeref;
321 	int tiling;
322 	int err;
323 
324 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
325 		return 0;
326 
327 	/* We want to check the page mapping and fencing of a large object
328 	 * mmapped through the GTT. The object we create is larger than can
329 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
330 	 * We then check that a write through each partial GGTT vma ends up
331 	 * in the right set of pages within the object, and with the expected
332 	 * tiling, which we verify by manual swizzling.
333 	 */
334 
335 	obj = huge_gem_object(i915,
336 			      nreal << PAGE_SHIFT,
337 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
338 	if (IS_ERR(obj))
339 		return PTR_ERR(obj);
340 
341 	err = i915_gem_object_pin_pages_unlocked(obj);
342 	if (err) {
343 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
344 		       nreal, obj->base.size / PAGE_SIZE, err);
345 		goto out;
346 	}
347 
348 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
349 
350 	if (1) {
351 		IGT_TIMEOUT(end);
352 		struct tile tile;
353 
354 		tile.height = 1;
355 		tile.width = 1;
356 		tile.size = 0;
357 		tile.stride = 0;
358 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
359 		tile.tiling = I915_TILING_NONE;
360 
361 		err = check_partial_mappings(obj, &tile, end);
362 		if (err && err != -EINTR)
363 			goto out_unlock;
364 	}
365 
366 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
367 		IGT_TIMEOUT(end);
368 		unsigned int max_pitch;
369 		unsigned int pitch;
370 		struct tile tile;
371 
372 		if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
373 			/*
374 			 * The swizzling pattern is actually unknown as it
375 			 * varies based on physical address of each page.
376 			 * See i915_gem_detect_bit_6_swizzle().
377 			 */
378 			break;
379 
380 		tile.tiling = tiling;
381 		switch (tiling) {
382 		case I915_TILING_X:
383 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
384 			break;
385 		case I915_TILING_Y:
386 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
387 			break;
388 		}
389 
390 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
391 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
392 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
393 			continue;
394 
395 		max_pitch = setup_tile_size(&tile, i915);
396 
397 		for (pitch = max_pitch; pitch; pitch >>= 1) {
398 			tile.stride = tile.width * pitch;
399 			err = check_partial_mappings(obj, &tile, end);
400 			if (err == -EINTR)
401 				goto next_tiling;
402 			if (err)
403 				goto out_unlock;
404 
405 			if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
406 				tile.stride = tile.width * (pitch - 1);
407 				err = check_partial_mappings(obj, &tile, end);
408 				if (err == -EINTR)
409 					goto next_tiling;
410 				if (err)
411 					goto out_unlock;
412 			}
413 
414 			if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
415 				tile.stride = tile.width * (pitch + 1);
416 				err = check_partial_mappings(obj, &tile, end);
417 				if (err == -EINTR)
418 					goto next_tiling;
419 				if (err)
420 					goto out_unlock;
421 			}
422 		}
423 
424 		if (GRAPHICS_VER(i915) >= 4) {
425 			for_each_prime_number(pitch, max_pitch) {
426 				tile.stride = tile.width * pitch;
427 				err = check_partial_mappings(obj, &tile, end);
428 				if (err == -EINTR)
429 					goto next_tiling;
430 				if (err)
431 					goto out_unlock;
432 			}
433 		}
434 
435 next_tiling: ;
436 	}
437 
438 out_unlock:
439 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
440 	i915_gem_object_unpin_pages(obj);
441 out:
442 	i915_gem_object_put(obj);
443 	return err;
444 }
445 
446 static int igt_smoke_tiling(void *arg)
447 {
448 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
449 	struct drm_i915_private *i915 = arg;
450 	struct drm_i915_gem_object *obj;
451 	intel_wakeref_t wakeref;
452 	I915_RND_STATE(prng);
453 	unsigned long count;
454 	IGT_TIMEOUT(end);
455 	int err;
456 
457 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
458 		return 0;
459 
460 	/*
461 	 * igt_partial_tiling() does an exhastive check of partial tiling
462 	 * chunking, but will undoubtably run out of time. Here, we do a
463 	 * randomised search and hope over many runs of 1s with different
464 	 * seeds we will do a thorough check.
465 	 *
466 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
467 	 */
468 
469 	if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
470 		return 0;
471 
472 	obj = huge_gem_object(i915,
473 			      nreal << PAGE_SHIFT,
474 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
475 	if (IS_ERR(obj))
476 		return PTR_ERR(obj);
477 
478 	err = i915_gem_object_pin_pages_unlocked(obj);
479 	if (err) {
480 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
481 		       nreal, obj->base.size / PAGE_SIZE, err);
482 		goto out;
483 	}
484 
485 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
486 
487 	count = 0;
488 	do {
489 		struct tile tile;
490 
491 		tile.tiling =
492 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
493 		switch (tile.tiling) {
494 		case I915_TILING_NONE:
495 			tile.height = 1;
496 			tile.width = 1;
497 			tile.size = 0;
498 			tile.stride = 0;
499 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
500 			break;
501 
502 		case I915_TILING_X:
503 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
504 			break;
505 		case I915_TILING_Y:
506 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
507 			break;
508 		}
509 
510 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
511 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
512 			continue;
513 
514 		if (tile.tiling != I915_TILING_NONE) {
515 			unsigned int max_pitch = setup_tile_size(&tile, i915);
516 
517 			tile.stride =
518 				i915_prandom_u32_max_state(max_pitch, &prng);
519 			tile.stride = (1 + tile.stride) * tile.width;
520 			if (GRAPHICS_VER(i915) < 4)
521 				tile.stride = rounddown_pow_of_two(tile.stride);
522 		}
523 
524 		err = check_partial_mapping(obj, &tile, &prng);
525 		if (err)
526 			break;
527 
528 		count++;
529 	} while (!__igt_timeout(end, NULL));
530 
531 	pr_info("%s: Completed %lu trials\n", __func__, count);
532 
533 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
534 	i915_gem_object_unpin_pages(obj);
535 out:
536 	i915_gem_object_put(obj);
537 	return err;
538 }
539 
540 static int make_obj_busy(struct drm_i915_gem_object *obj)
541 {
542 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
543 	struct intel_engine_cs *engine;
544 
545 	for_each_uabi_engine(engine, i915) {
546 		struct i915_request *rq;
547 		struct i915_vma *vma;
548 		struct i915_gem_ww_ctx ww;
549 		int err;
550 
551 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
552 		if (IS_ERR(vma))
553 			return PTR_ERR(vma);
554 
555 		i915_gem_ww_ctx_init(&ww, false);
556 retry:
557 		err = i915_gem_object_lock(obj, &ww);
558 		if (!err)
559 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
560 		if (err)
561 			goto err;
562 
563 		rq = intel_engine_create_kernel_request(engine);
564 		if (IS_ERR(rq)) {
565 			err = PTR_ERR(rq);
566 			goto err_unpin;
567 		}
568 
569 		err = i915_vma_move_to_active(vma, rq,
570 					      EXEC_OBJECT_WRITE);
571 
572 		i915_request_add(rq);
573 err_unpin:
574 		i915_vma_unpin(vma);
575 err:
576 		if (err == -EDEADLK) {
577 			err = i915_gem_ww_ctx_backoff(&ww);
578 			if (!err)
579 				goto retry;
580 		}
581 		i915_gem_ww_ctx_fini(&ww);
582 		if (err)
583 			return err;
584 	}
585 
586 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
587 	return 0;
588 }
589 
590 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
591 {
592 	if (HAS_LMEM(i915))
593 		return I915_MMAP_TYPE_FIXED;
594 
595 	return I915_MMAP_TYPE_GTT;
596 }
597 
598 static struct drm_i915_gem_object *
599 create_sys_or_internal(struct drm_i915_private *i915,
600 		       unsigned long size)
601 {
602 	if (HAS_LMEM(i915)) {
603 		struct intel_memory_region *sys_region =
604 			i915->mm.regions[INTEL_REGION_SMEM];
605 
606 		return __i915_gem_object_create_user(i915, size, &sys_region, 1);
607 	}
608 
609 	return i915_gem_object_create_internal(i915, size);
610 }
611 
612 static bool assert_mmap_offset(struct drm_i915_private *i915,
613 			       unsigned long size,
614 			       int expected)
615 {
616 	struct drm_i915_gem_object *obj;
617 	u64 offset;
618 	int ret;
619 
620 	obj = create_sys_or_internal(i915, size);
621 	if (IS_ERR(obj))
622 		return expected && expected == PTR_ERR(obj);
623 
624 	ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
625 	i915_gem_object_put(obj);
626 
627 	return ret == expected;
628 }
629 
630 static void disable_retire_worker(struct drm_i915_private *i915)
631 {
632 	i915_gem_driver_unregister__shrinker(i915);
633 	intel_gt_pm_get_untracked(to_gt(i915));
634 	cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
635 }
636 
637 static void restore_retire_worker(struct drm_i915_private *i915)
638 {
639 	igt_flush_test(i915);
640 	intel_gt_pm_put_untracked(to_gt(i915));
641 	i915_gem_driver_register__shrinker(i915);
642 }
643 
644 static void mmap_offset_lock(struct drm_i915_private *i915)
645 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
646 {
647 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
648 }
649 
650 static void mmap_offset_unlock(struct drm_i915_private *i915)
651 	__releases(&i915->drm.vma_offset_manager->vm_lock)
652 {
653 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
654 }
655 
656 static int igt_mmap_offset_exhaustion(void *arg)
657 {
658 	struct drm_i915_private *i915 = arg;
659 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
660 	struct drm_i915_gem_object *obj;
661 	struct drm_mm_node *hole, *next;
662 	int loop, err = 0;
663 	u64 offset;
664 	int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
665 
666 	/* Disable background reaper */
667 	disable_retire_worker(i915);
668 	GEM_BUG_ON(!to_gt(i915)->awake);
669 	intel_gt_retire_requests(to_gt(i915));
670 	i915_gem_drain_freed_objects(i915);
671 
672 	/* Trim the device mmap space to only a page */
673 	mmap_offset_lock(i915);
674 	loop = 1; /* PAGE_SIZE units */
675 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
676 		struct drm_mm_node *resv;
677 
678 		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
679 		if (!resv) {
680 			err = -ENOMEM;
681 			goto out_park;
682 		}
683 
684 		resv->start = drm_mm_hole_node_start(hole) + loop;
685 		resv->size = hole->hole_size - loop;
686 		resv->color = -1ul;
687 		loop = 0;
688 
689 		if (!resv->size) {
690 			kfree(resv);
691 			continue;
692 		}
693 
694 		pr_debug("Reserving hole [%llx + %llx]\n",
695 			 resv->start, resv->size);
696 
697 		err = drm_mm_reserve_node(mm, resv);
698 		if (err) {
699 			pr_err("Failed to trim VMA manager, err=%d\n", err);
700 			kfree(resv);
701 			goto out_park;
702 		}
703 	}
704 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
705 	mmap_offset_unlock(i915);
706 
707 	/* Just fits! */
708 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
709 		pr_err("Unable to insert object into single page hole\n");
710 		err = -EINVAL;
711 		goto out;
712 	}
713 
714 	/* Too large */
715 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
716 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
717 		err = -EINVAL;
718 		goto out;
719 	}
720 
721 	/* Fill the hole, further allocation attempts should then fail */
722 	obj = create_sys_or_internal(i915, PAGE_SIZE);
723 	if (IS_ERR(obj)) {
724 		err = PTR_ERR(obj);
725 		pr_err("Unable to create object for reclaimed hole\n");
726 		goto out;
727 	}
728 
729 	err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
730 	if (err) {
731 		pr_err("Unable to insert object into reclaimed hole\n");
732 		goto err_obj;
733 	}
734 
735 	if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
736 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
737 		err = -EINVAL;
738 		goto err_obj;
739 	}
740 
741 	i915_gem_object_put(obj);
742 
743 	/* Now fill with busy dead objects that we expect to reap */
744 	for (loop = 0; loop < 3; loop++) {
745 		if (intel_gt_is_wedged(to_gt(i915)))
746 			break;
747 
748 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
749 		if (IS_ERR(obj)) {
750 			err = PTR_ERR(obj);
751 			goto out;
752 		}
753 
754 		err = make_obj_busy(obj);
755 		if (err) {
756 			pr_err("[loop %d] Failed to busy the object\n", loop);
757 			goto err_obj;
758 		}
759 	}
760 
761 out:
762 	mmap_offset_lock(i915);
763 out_park:
764 	drm_mm_for_each_node_safe(hole, next, mm) {
765 		if (hole->color != -1ul)
766 			continue;
767 
768 		drm_mm_remove_node(hole);
769 		kfree(hole);
770 	}
771 	mmap_offset_unlock(i915);
772 	restore_retire_worker(i915);
773 	return err;
774 err_obj:
775 	i915_gem_object_put(obj);
776 	goto out;
777 }
778 
779 static int gtt_set(struct drm_i915_gem_object *obj)
780 {
781 	intel_wakeref_t wakeref;
782 	struct i915_vma *vma;
783 	void __iomem *map;
784 	int err = 0;
785 
786 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
787 	if (IS_ERR(vma))
788 		return PTR_ERR(vma);
789 
790 	wakeref = intel_gt_pm_get(vma->vm->gt);
791 	map = i915_vma_pin_iomap(vma);
792 	i915_vma_unpin(vma);
793 	if (IS_ERR(map)) {
794 		err = PTR_ERR(map);
795 		goto out;
796 	}
797 
798 	memset_io(map, POISON_INUSE, obj->base.size);
799 	i915_vma_unpin_iomap(vma);
800 
801 out:
802 	intel_gt_pm_put(vma->vm->gt, wakeref);
803 	return err;
804 }
805 
806 static int gtt_check(struct drm_i915_gem_object *obj)
807 {
808 	intel_wakeref_t wakeref;
809 	struct i915_vma *vma;
810 	void __iomem *map;
811 	int err = 0;
812 
813 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
814 	if (IS_ERR(vma))
815 		return PTR_ERR(vma);
816 
817 	wakeref = intel_gt_pm_get(vma->vm->gt);
818 	map = i915_vma_pin_iomap(vma);
819 	i915_vma_unpin(vma);
820 	if (IS_ERR(map)) {
821 		err = PTR_ERR(map);
822 		goto out;
823 	}
824 
825 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
826 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
827 		       obj->mm.region->name);
828 		err = -EINVAL;
829 	}
830 	i915_vma_unpin_iomap(vma);
831 
832 out:
833 	intel_gt_pm_put(vma->vm->gt, wakeref);
834 	return err;
835 }
836 
837 static int wc_set(struct drm_i915_gem_object *obj)
838 {
839 	void *vaddr;
840 
841 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
842 	if (IS_ERR(vaddr))
843 		return PTR_ERR(vaddr);
844 
845 	memset(vaddr, POISON_INUSE, obj->base.size);
846 	i915_gem_object_flush_map(obj);
847 	i915_gem_object_unpin_map(obj);
848 
849 	return 0;
850 }
851 
852 static int wc_check(struct drm_i915_gem_object *obj)
853 {
854 	void *vaddr;
855 	int err = 0;
856 
857 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
858 	if (IS_ERR(vaddr))
859 		return PTR_ERR(vaddr);
860 
861 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
862 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
863 		       obj->mm.region->name);
864 		err = -EINVAL;
865 	}
866 	i915_gem_object_unpin_map(obj);
867 
868 	return err;
869 }
870 
871 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
872 {
873 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
874 	bool no_map;
875 
876 	if (obj->ops->mmap_offset)
877 		return type == I915_MMAP_TYPE_FIXED;
878 	else if (type == I915_MMAP_TYPE_FIXED)
879 		return false;
880 
881 	if (type == I915_MMAP_TYPE_GTT &&
882 	    !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
883 		return false;
884 
885 	i915_gem_object_lock(obj, NULL);
886 	no_map = (type != I915_MMAP_TYPE_GTT &&
887 		  !i915_gem_object_has_struct_page(obj) &&
888 		  !i915_gem_object_has_iomem(obj));
889 	i915_gem_object_unlock(obj);
890 
891 	return !no_map;
892 }
893 
894 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
895 static int __igt_mmap(struct drm_i915_private *i915,
896 		      struct drm_i915_gem_object *obj,
897 		      enum i915_mmap_type type)
898 {
899 	struct vm_area_struct *area;
900 	unsigned long addr;
901 	int err, i;
902 	u64 offset;
903 
904 	if (!can_mmap(obj, type))
905 		return 0;
906 
907 	err = wc_set(obj);
908 	if (err == -ENXIO)
909 		err = gtt_set(obj);
910 	if (err)
911 		return err;
912 
913 	err = __assign_mmap_offset(obj, type, &offset, NULL);
914 	if (err)
915 		return err;
916 
917 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
918 	if (IS_ERR_VALUE(addr))
919 		return addr;
920 
921 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
922 
923 	mmap_read_lock(current->mm);
924 	area = vma_lookup(current->mm, addr);
925 	mmap_read_unlock(current->mm);
926 	if (!area) {
927 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
928 		       obj->mm.region->name);
929 		err = -EINVAL;
930 		goto out_unmap;
931 	}
932 
933 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
934 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
935 		u32 x;
936 
937 		if (get_user(x, ux)) {
938 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
939 			       obj->mm.region->name, i * sizeof(x));
940 			err = -EFAULT;
941 			goto out_unmap;
942 		}
943 
944 		if (x != expand32(POISON_INUSE)) {
945 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
946 			       obj->mm.region->name,
947 			       i * sizeof(x), x, expand32(POISON_INUSE));
948 			err = -EINVAL;
949 			goto out_unmap;
950 		}
951 
952 		x = expand32(POISON_FREE);
953 		if (put_user(x, ux)) {
954 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
955 			       obj->mm.region->name, i * sizeof(x));
956 			err = -EFAULT;
957 			goto out_unmap;
958 		}
959 	}
960 
961 	if (type == I915_MMAP_TYPE_GTT)
962 		intel_gt_flush_ggtt_writes(to_gt(i915));
963 
964 	err = wc_check(obj);
965 	if (err == -ENXIO)
966 		err = gtt_check(obj);
967 out_unmap:
968 	vm_munmap(addr, obj->base.size);
969 	return err;
970 }
971 
972 static int igt_mmap(void *arg)
973 {
974 	struct drm_i915_private *i915 = arg;
975 	struct intel_memory_region *mr;
976 	enum intel_region_id id;
977 
978 	for_each_memory_region(mr, i915, id) {
979 		unsigned long sizes[] = {
980 			PAGE_SIZE,
981 			mr->min_page_size,
982 			SZ_4M,
983 		};
984 		int i;
985 
986 		if (mr->private)
987 			continue;
988 
989 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
990 			struct drm_i915_gem_object *obj;
991 			int err;
992 
993 			obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
994 			if (obj == ERR_PTR(-ENODEV))
995 				continue;
996 
997 			if (IS_ERR(obj))
998 				return PTR_ERR(obj);
999 
1000 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
1001 			if (err == 0)
1002 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
1003 			if (err == 0)
1004 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
1005 
1006 			i915_gem_object_put(obj);
1007 			if (err)
1008 				return err;
1009 		}
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 static void igt_close_objects(struct drm_i915_private *i915,
1016 			      struct list_head *objects)
1017 {
1018 	struct drm_i915_gem_object *obj, *on;
1019 
1020 	list_for_each_entry_safe(obj, on, objects, st_link) {
1021 		i915_gem_object_lock(obj, NULL);
1022 		if (i915_gem_object_has_pinned_pages(obj))
1023 			i915_gem_object_unpin_pages(obj);
1024 		/* No polluting the memory region between tests */
1025 		__i915_gem_object_put_pages(obj);
1026 		i915_gem_object_unlock(obj);
1027 		list_del(&obj->st_link);
1028 		i915_gem_object_put(obj);
1029 	}
1030 
1031 	cond_resched();
1032 
1033 	i915_gem_drain_freed_objects(i915);
1034 }
1035 
1036 static void igt_make_evictable(struct list_head *objects)
1037 {
1038 	struct drm_i915_gem_object *obj;
1039 
1040 	list_for_each_entry(obj, objects, st_link) {
1041 		i915_gem_object_lock(obj, NULL);
1042 		if (i915_gem_object_has_pinned_pages(obj))
1043 			i915_gem_object_unpin_pages(obj);
1044 		i915_gem_object_unlock(obj);
1045 	}
1046 
1047 	cond_resched();
1048 }
1049 
1050 static int igt_fill_mappable(struct intel_memory_region *mr,
1051 			     struct list_head *objects)
1052 {
1053 	u64 size, total;
1054 	int err;
1055 
1056 	total = 0;
1057 	size = resource_size(&mr->io);
1058 	do {
1059 		struct drm_i915_gem_object *obj;
1060 
1061 		obj = i915_gem_object_create_region(mr, size, 0, 0);
1062 		if (IS_ERR(obj)) {
1063 			err = PTR_ERR(obj);
1064 			goto err_close;
1065 		}
1066 
1067 		list_add(&obj->st_link, objects);
1068 
1069 		err = i915_gem_object_pin_pages_unlocked(obj);
1070 		if (err) {
1071 			if (err != -ENXIO && err != -ENOMEM)
1072 				goto err_close;
1073 
1074 			if (size == mr->min_page_size) {
1075 				err = 0;
1076 				break;
1077 			}
1078 
1079 			size >>= 1;
1080 			continue;
1081 		}
1082 
1083 		total += obj->base.size;
1084 	} while (1);
1085 
1086 	pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
1087 	return 0;
1088 
1089 err_close:
1090 	igt_close_objects(mr->i915, objects);
1091 	return err;
1092 }
1093 
1094 static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1095 			       struct drm_i915_gem_object *obj,
1096 			       unsigned long addr,
1097 			       bool unfaultable)
1098 {
1099 	int i;
1100 
1101 	pr_info("igt_mmap(%s, %d) @ %lx\n",
1102 		obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
1103 
1104 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
1105 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
1106 		u32 x;
1107 
1108 		if (get_user(x, ux)) {
1109 			if (!unfaultable) {
1110 				pr_err("%s: Unable to read from mmap, offset:%zd\n",
1111 				       obj->mm.region->name, i * sizeof(x));
1112 				return -EFAULT;
1113 			}
1114 
1115 			continue;
1116 		}
1117 
1118 		if (unfaultable) {
1119 			pr_err("%s: Faulted unmappable memory\n",
1120 			       obj->mm.region->name);
1121 			return -EINVAL;
1122 		}
1123 
1124 		if (x != expand32(POISON_INUSE)) {
1125 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
1126 			       obj->mm.region->name,
1127 			       i * sizeof(x), x, expand32(POISON_INUSE));
1128 			return -EINVAL;
1129 		}
1130 
1131 		x = expand32(POISON_FREE);
1132 		if (put_user(x, ux)) {
1133 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
1134 			       obj->mm.region->name, i * sizeof(x));
1135 			return -EFAULT;
1136 		}
1137 	}
1138 
1139 	if (unfaultable)
1140 		return 0;
1141 
1142 	obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
1143 	return wc_check(obj);
1144 }
1145 
1146 #define IGT_MMAP_MIGRATE_TOPDOWN     (1 << 0)
1147 #define IGT_MMAP_MIGRATE_FILL        (1 << 1)
1148 #define IGT_MMAP_MIGRATE_EVICTABLE   (1 << 2)
1149 #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
1150 #define IGT_MMAP_MIGRATE_FAIL_GPU    (1 << 4)
1151 static int __igt_mmap_migrate(struct intel_memory_region **placements,
1152 			      int n_placements,
1153 			      struct intel_memory_region *expected_mr,
1154 			      unsigned int flags)
1155 {
1156 	struct drm_i915_private *i915 = placements[0]->i915;
1157 	struct drm_i915_gem_object *obj;
1158 	struct i915_request *rq = NULL;
1159 	struct vm_area_struct *area;
1160 	unsigned long addr;
1161 	LIST_HEAD(objects);
1162 	u64 offset;
1163 	int err;
1164 
1165 	obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1166 					    placements,
1167 					    n_placements);
1168 	if (IS_ERR(obj))
1169 		return PTR_ERR(obj);
1170 
1171 	if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
1172 		obj->flags |= I915_BO_ALLOC_GPU_ONLY;
1173 
1174 	err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
1175 	if (err)
1176 		goto out_put;
1177 
1178 	/*
1179 	 * This will eventually create a GEM context, due to opening dummy drm
1180 	 * file, which needs a tiny amount of mappable device memory for the top
1181 	 * level paging structures(and perhaps scratch), so make sure we
1182 	 * allocate early, to avoid tears.
1183 	 */
1184 	addr = igt_mmap_offset(i915, offset, obj->base.size,
1185 			       PROT_WRITE, MAP_SHARED);
1186 	if (IS_ERR_VALUE(addr)) {
1187 		err = addr;
1188 		goto out_put;
1189 	}
1190 
1191 	mmap_read_lock(current->mm);
1192 	area = vma_lookup(current->mm, addr);
1193 	mmap_read_unlock(current->mm);
1194 	if (!area) {
1195 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
1196 		       obj->mm.region->name);
1197 		err = -EINVAL;
1198 		goto out_addr;
1199 	}
1200 
1201 	if (flags & IGT_MMAP_MIGRATE_FILL) {
1202 		err = igt_fill_mappable(placements[0], &objects);
1203 		if (err)
1204 			goto out_addr;
1205 	}
1206 
1207 	err = i915_gem_object_lock(obj, NULL);
1208 	if (err)
1209 		goto out_addr;
1210 
1211 	err = i915_gem_object_pin_pages(obj);
1212 	if (err) {
1213 		i915_gem_object_unlock(obj);
1214 		goto out_addr;
1215 	}
1216 
1217 	err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
1218 					  obj->mm.pages->sgl, obj->pat_index,
1219 					  i915_gem_object_is_lmem(obj),
1220 					  expand32(POISON_INUSE), &rq);
1221 	i915_gem_object_unpin_pages(obj);
1222 	if (rq && !err) {
1223 		err = dma_resv_reserve_fences(obj->base.resv, 1);
1224 		if (!err)
1225 			dma_resv_add_fence(obj->base.resv, &rq->fence,
1226 					   DMA_RESV_USAGE_KERNEL);
1227 		i915_request_put(rq);
1228 	}
1229 	i915_gem_object_unlock(obj);
1230 	if (err)
1231 		goto out_addr;
1232 
1233 	if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
1234 		igt_make_evictable(&objects);
1235 
1236 	if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1237 		err = i915_gem_object_lock(obj, NULL);
1238 		if (err)
1239 			goto out_addr;
1240 
1241 		/*
1242 		 * Ensure we only simulate the gpu failure when faulting the
1243 		 * pages.
1244 		 */
1245 		err = i915_gem_object_wait_moving_fence(obj, true);
1246 		i915_gem_object_unlock(obj);
1247 		if (err)
1248 			goto out_addr;
1249 		i915_ttm_migrate_set_failure_modes(true, false);
1250 	}
1251 
1252 	err = ___igt_mmap_migrate(i915, obj, addr,
1253 				  flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
1254 
1255 	if (!err && obj->mm.region != expected_mr) {
1256 		pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
1257 		err = -EINVAL;
1258 	}
1259 
1260 	if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1261 		struct intel_gt *gt;
1262 		unsigned int id;
1263 
1264 		i915_ttm_migrate_set_failure_modes(false, false);
1265 
1266 		for_each_gt(gt, i915, id) {
1267 			intel_wakeref_t wakeref;
1268 			bool wedged;
1269 
1270 			mutex_lock(&gt->reset.mutex);
1271 			wedged = test_bit(I915_WEDGED, &gt->reset.flags);
1272 			mutex_unlock(&gt->reset.mutex);
1273 			if (!wedged) {
1274 				pr_err("gt(%u) not wedged\n", id);
1275 				err = -EINVAL;
1276 				continue;
1277 			}
1278 
1279 			wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1280 			igt_global_reset_lock(gt);
1281 			intel_gt_reset(gt, ALL_ENGINES, NULL);
1282 			igt_global_reset_unlock(gt);
1283 			intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1284 		}
1285 
1286 		if (!i915_gem_object_has_unknown_state(obj)) {
1287 			pr_err("object missing unknown_state\n");
1288 			err = -EINVAL;
1289 		}
1290 	}
1291 
1292 out_addr:
1293 	vm_munmap(addr, obj->base.size);
1294 
1295 out_put:
1296 	i915_gem_object_put(obj);
1297 	igt_close_objects(i915, &objects);
1298 	return err;
1299 }
1300 
1301 static int igt_mmap_migrate(void *arg)
1302 {
1303 	struct drm_i915_private *i915 = arg;
1304 	struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1305 	struct intel_memory_region *mr;
1306 	enum intel_region_id id;
1307 
1308 	for_each_memory_region(mr, i915, id) {
1309 		struct intel_memory_region *mixed[] = { mr, system };
1310 		struct intel_memory_region *single[] = { mr };
1311 		struct ttm_resource_manager *man = mr->region_private;
1312 		struct resource saved_io;
1313 		int err;
1314 
1315 		if (mr->private)
1316 			continue;
1317 
1318 		if (!resource_size(&mr->io))
1319 			continue;
1320 
1321 		/*
1322 		 * For testing purposes let's force small BAR, if not already
1323 		 * present.
1324 		 */
1325 		saved_io = mr->io;
1326 		if (resource_size(&mr->io) == mr->total) {
1327 			resource_size_t io_size = resource_size(&mr->io);
1328 
1329 			io_size = rounddown_pow_of_two(io_size >> 1);
1330 			if (io_size < PAGE_SIZE)
1331 				continue;
1332 
1333 			mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
1334 			i915_ttm_buddy_man_force_visible_size(man,
1335 							      io_size >> PAGE_SHIFT);
1336 		}
1337 
1338 		/*
1339 		 * Allocate in the mappable portion, should be no surprises here.
1340 		 */
1341 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
1342 		if (err)
1343 			goto out_io_size;
1344 
1345 		/*
1346 		 * Allocate in the non-mappable portion, but force migrating to
1347 		 * the mappable portion on fault (LMEM -> LMEM)
1348 		 */
1349 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1350 					 IGT_MMAP_MIGRATE_TOPDOWN |
1351 					 IGT_MMAP_MIGRATE_FILL |
1352 					 IGT_MMAP_MIGRATE_EVICTABLE);
1353 		if (err)
1354 			goto out_io_size;
1355 
1356 		/*
1357 		 * Allocate in the non-mappable portion, but force spilling into
1358 		 * system memory on fault (LMEM -> SMEM)
1359 		 */
1360 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
1361 					 IGT_MMAP_MIGRATE_TOPDOWN |
1362 					 IGT_MMAP_MIGRATE_FILL);
1363 		if (err)
1364 			goto out_io_size;
1365 
1366 		/*
1367 		 * Allocate in the non-mappable portion, but since the mappable
1368 		 * portion is already full, and we can't spill to system memory,
1369 		 * then we should expect the fault to fail.
1370 		 */
1371 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1372 					 IGT_MMAP_MIGRATE_TOPDOWN |
1373 					 IGT_MMAP_MIGRATE_FILL |
1374 					 IGT_MMAP_MIGRATE_UNFAULTABLE);
1375 		if (err)
1376 			goto out_io_size;
1377 
1378 		/*
1379 		 * Allocate in the non-mappable portion, but force migrating to
1380 		 * the mappable portion on fault (LMEM -> LMEM). We then also
1381 		 * simulate a gpu error when moving the pages when faulting the
1382 		 * pages, which should result in wedging the gpu and returning
1383 		 * SIGBUS in the fault handler, since we can't fallback to
1384 		 * memcpy.
1385 		 */
1386 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1387 					 IGT_MMAP_MIGRATE_TOPDOWN |
1388 					 IGT_MMAP_MIGRATE_FILL |
1389 					 IGT_MMAP_MIGRATE_EVICTABLE |
1390 					 IGT_MMAP_MIGRATE_FAIL_GPU |
1391 					 IGT_MMAP_MIGRATE_UNFAULTABLE);
1392 out_io_size:
1393 		mr->io = saved_io;
1394 		i915_ttm_buddy_man_force_visible_size(man,
1395 						      resource_size(&mr->io) >> PAGE_SHIFT);
1396 		if (err)
1397 			return err;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 static const char *repr_mmap_type(enum i915_mmap_type type)
1404 {
1405 	switch (type) {
1406 	case I915_MMAP_TYPE_GTT: return "gtt";
1407 	case I915_MMAP_TYPE_WB: return "wb";
1408 	case I915_MMAP_TYPE_WC: return "wc";
1409 	case I915_MMAP_TYPE_UC: return "uc";
1410 	case I915_MMAP_TYPE_FIXED: return "fixed";
1411 	default: return "unknown";
1412 	}
1413 }
1414 
1415 static bool can_access(struct drm_i915_gem_object *obj)
1416 {
1417 	bool access;
1418 
1419 	i915_gem_object_lock(obj, NULL);
1420 	access = i915_gem_object_has_struct_page(obj) ||
1421 		i915_gem_object_has_iomem(obj);
1422 	i915_gem_object_unlock(obj);
1423 
1424 	return access;
1425 }
1426 
1427 static int __igt_mmap_access(struct drm_i915_private *i915,
1428 			     struct drm_i915_gem_object *obj,
1429 			     enum i915_mmap_type type)
1430 {
1431 	unsigned long __user *ptr;
1432 	unsigned long A, B;
1433 	unsigned long x, y;
1434 	unsigned long addr;
1435 	int err;
1436 	u64 offset;
1437 
1438 	memset(&A, 0xAA, sizeof(A));
1439 	memset(&B, 0xBB, sizeof(B));
1440 
1441 	if (!can_mmap(obj, type) || !can_access(obj))
1442 		return 0;
1443 
1444 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1445 	if (err)
1446 		return err;
1447 
1448 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1449 	if (IS_ERR_VALUE(addr))
1450 		return addr;
1451 	ptr = (unsigned long __user *)addr;
1452 
1453 	err = __put_user(A, ptr);
1454 	if (err) {
1455 		pr_err("%s(%s): failed to write into user mmap\n",
1456 		       obj->mm.region->name, repr_mmap_type(type));
1457 		goto out_unmap;
1458 	}
1459 
1460 	intel_gt_flush_ggtt_writes(to_gt(i915));
1461 
1462 	err = access_process_vm(current, addr, &x, sizeof(x), 0);
1463 	if (err != sizeof(x)) {
1464 		pr_err("%s(%s): access_process_vm() read failed\n",
1465 		       obj->mm.region->name, repr_mmap_type(type));
1466 		goto out_unmap;
1467 	}
1468 
1469 	err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1470 	if (err != sizeof(B)) {
1471 		pr_err("%s(%s): access_process_vm() write failed\n",
1472 		       obj->mm.region->name, repr_mmap_type(type));
1473 		goto out_unmap;
1474 	}
1475 
1476 	intel_gt_flush_ggtt_writes(to_gt(i915));
1477 
1478 	err = __get_user(y, ptr);
1479 	if (err) {
1480 		pr_err("%s(%s): failed to read from user mmap\n",
1481 		       obj->mm.region->name, repr_mmap_type(type));
1482 		goto out_unmap;
1483 	}
1484 
1485 	if (x != A || y != B) {
1486 		pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1487 		       obj->mm.region->name, repr_mmap_type(type),
1488 		       x, y);
1489 		err = -EINVAL;
1490 		goto out_unmap;
1491 	}
1492 
1493 out_unmap:
1494 	vm_munmap(addr, obj->base.size);
1495 	return err;
1496 }
1497 
1498 static int igt_mmap_access(void *arg)
1499 {
1500 	struct drm_i915_private *i915 = arg;
1501 	struct intel_memory_region *mr;
1502 	enum intel_region_id id;
1503 
1504 	for_each_memory_region(mr, i915, id) {
1505 		struct drm_i915_gem_object *obj;
1506 		int err;
1507 
1508 		if (mr->private)
1509 			continue;
1510 
1511 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1512 		if (obj == ERR_PTR(-ENODEV))
1513 			continue;
1514 
1515 		if (IS_ERR(obj))
1516 			return PTR_ERR(obj);
1517 
1518 		err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1519 		if (err == 0)
1520 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1521 		if (err == 0)
1522 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1523 		if (err == 0)
1524 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1525 		if (err == 0)
1526 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1527 
1528 		i915_gem_object_put(obj);
1529 		if (err)
1530 			return err;
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1537 			  struct drm_i915_gem_object *obj,
1538 			  enum i915_mmap_type type)
1539 {
1540 	struct intel_engine_cs *engine;
1541 	unsigned long addr;
1542 	u32 __user *ux;
1543 	u32 bbe;
1544 	int err;
1545 	u64 offset;
1546 
1547 	/*
1548 	 * Verify that the mmap access into the backing store aligns with
1549 	 * that of the GPU, i.e. that mmap is indeed writing into the same
1550 	 * page as being read by the GPU.
1551 	 */
1552 
1553 	if (!can_mmap(obj, type))
1554 		return 0;
1555 
1556 	err = wc_set(obj);
1557 	if (err == -ENXIO)
1558 		err = gtt_set(obj);
1559 	if (err)
1560 		return err;
1561 
1562 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1563 	if (err)
1564 		return err;
1565 
1566 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1567 	if (IS_ERR_VALUE(addr))
1568 		return addr;
1569 
1570 	ux = u64_to_user_ptr((u64)addr);
1571 	bbe = MI_BATCH_BUFFER_END;
1572 	if (put_user(bbe, ux)) {
1573 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1574 		err = -EFAULT;
1575 		goto out_unmap;
1576 	}
1577 
1578 	if (type == I915_MMAP_TYPE_GTT)
1579 		intel_gt_flush_ggtt_writes(to_gt(i915));
1580 
1581 	for_each_uabi_engine(engine, i915) {
1582 		struct i915_request *rq;
1583 		struct i915_vma *vma;
1584 		struct i915_gem_ww_ctx ww;
1585 
1586 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1587 		if (IS_ERR(vma)) {
1588 			err = PTR_ERR(vma);
1589 			goto out_unmap;
1590 		}
1591 
1592 		i915_gem_ww_ctx_init(&ww, false);
1593 retry:
1594 		err = i915_gem_object_lock(obj, &ww);
1595 		if (!err)
1596 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1597 		if (err)
1598 			goto out_ww;
1599 
1600 		rq = i915_request_create(engine->kernel_context);
1601 		if (IS_ERR(rq)) {
1602 			err = PTR_ERR(rq);
1603 			goto out_unpin;
1604 		}
1605 
1606 		err = i915_vma_move_to_active(vma, rq, 0);
1607 
1608 		err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
1609 		i915_request_get(rq);
1610 		i915_request_add(rq);
1611 
1612 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1613 			struct drm_printer p =
1614 				drm_info_printer(engine->i915->drm.dev);
1615 
1616 			pr_err("%s(%s, %s): Failed to execute batch\n",
1617 			       __func__, engine->name, obj->mm.region->name);
1618 			intel_engine_dump(engine, &p,
1619 					  "%s\n", engine->name);
1620 
1621 			intel_gt_set_wedged(engine->gt);
1622 			err = -EIO;
1623 		}
1624 		i915_request_put(rq);
1625 
1626 out_unpin:
1627 		i915_vma_unpin(vma);
1628 out_ww:
1629 		if (err == -EDEADLK) {
1630 			err = i915_gem_ww_ctx_backoff(&ww);
1631 			if (!err)
1632 				goto retry;
1633 		}
1634 		i915_gem_ww_ctx_fini(&ww);
1635 		if (err)
1636 			goto out_unmap;
1637 	}
1638 
1639 out_unmap:
1640 	vm_munmap(addr, obj->base.size);
1641 	return err;
1642 }
1643 
1644 static int igt_mmap_gpu(void *arg)
1645 {
1646 	struct drm_i915_private *i915 = arg;
1647 	struct intel_memory_region *mr;
1648 	enum intel_region_id id;
1649 
1650 	for_each_memory_region(mr, i915, id) {
1651 		struct drm_i915_gem_object *obj;
1652 		int err;
1653 
1654 		if (mr->private)
1655 			continue;
1656 
1657 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1658 		if (obj == ERR_PTR(-ENODEV))
1659 			continue;
1660 
1661 		if (IS_ERR(obj))
1662 			return PTR_ERR(obj);
1663 
1664 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1665 		if (err == 0)
1666 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1667 		if (err == 0)
1668 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1669 
1670 		i915_gem_object_put(obj);
1671 		if (err)
1672 			return err;
1673 	}
1674 
1675 	return 0;
1676 }
1677 
1678 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1679 {
1680 	pte_t ptent = ptep_get(pte);
1681 
1682 	if (!pte_present(ptent) || pte_none(ptent)) {
1683 		pr_err("missing PTE:%lx\n",
1684 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1685 		return -EINVAL;
1686 	}
1687 
1688 	return 0;
1689 }
1690 
1691 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1692 {
1693 	pte_t ptent = ptep_get(pte);
1694 
1695 	if (pte_present(ptent) && !pte_none(ptent)) {
1696 		pr_err("present PTE:%lx; expected to be revoked\n",
1697 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1698 		return -EINVAL;
1699 	}
1700 
1701 	return 0;
1702 }
1703 
1704 static int check_present(unsigned long addr, unsigned long len)
1705 {
1706 	return apply_to_page_range(current->mm, addr, len,
1707 				   check_present_pte, (void *)addr);
1708 }
1709 
1710 static int check_absent(unsigned long addr, unsigned long len)
1711 {
1712 	return apply_to_page_range(current->mm, addr, len,
1713 				   check_absent_pte, (void *)addr);
1714 }
1715 
1716 static int prefault_range(u64 start, u64 len)
1717 {
1718 	const char __user *addr, *end;
1719 	char __maybe_unused c;
1720 	int err;
1721 
1722 	addr = u64_to_user_ptr(start);
1723 	end = addr + len;
1724 
1725 	for (; addr < end; addr += PAGE_SIZE) {
1726 		err = __get_user(c, addr);
1727 		if (err)
1728 			return err;
1729 	}
1730 
1731 	return __get_user(c, end - 1);
1732 }
1733 
1734 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1735 			     struct drm_i915_gem_object *obj,
1736 			     enum i915_mmap_type type)
1737 {
1738 	unsigned long addr;
1739 	int err;
1740 	u64 offset;
1741 
1742 	if (!can_mmap(obj, type))
1743 		return 0;
1744 
1745 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1746 	if (err)
1747 		return err;
1748 
1749 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1750 	if (IS_ERR_VALUE(addr))
1751 		return addr;
1752 
1753 	err = prefault_range(addr, obj->base.size);
1754 	if (err)
1755 		goto out_unmap;
1756 
1757 	err = check_present(addr, obj->base.size);
1758 	if (err) {
1759 		pr_err("%s: was not present\n", obj->mm.region->name);
1760 		goto out_unmap;
1761 	}
1762 
1763 	/*
1764 	 * After unbinding the object from the GGTT, its address may be reused
1765 	 * for other objects. Ergo we have to revoke the previous mmap PTE
1766 	 * access as it no longer points to the same object.
1767 	 */
1768 	i915_gem_object_lock(obj, NULL);
1769 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1770 	i915_gem_object_unlock(obj);
1771 	if (err) {
1772 		pr_err("Failed to unbind object!\n");
1773 		goto out_unmap;
1774 	}
1775 
1776 	if (type != I915_MMAP_TYPE_GTT) {
1777 		i915_gem_object_lock(obj, NULL);
1778 		__i915_gem_object_put_pages(obj);
1779 		i915_gem_object_unlock(obj);
1780 		if (i915_gem_object_has_pages(obj)) {
1781 			pr_err("Failed to put-pages object!\n");
1782 			err = -EINVAL;
1783 			goto out_unmap;
1784 		}
1785 	}
1786 
1787 	err = check_absent(addr, obj->base.size);
1788 	if (err) {
1789 		pr_err("%s: was not absent\n", obj->mm.region->name);
1790 		goto out_unmap;
1791 	}
1792 
1793 out_unmap:
1794 	vm_munmap(addr, obj->base.size);
1795 	return err;
1796 }
1797 
1798 static int igt_mmap_revoke(void *arg)
1799 {
1800 	struct drm_i915_private *i915 = arg;
1801 	struct intel_memory_region *mr;
1802 	enum intel_region_id id;
1803 
1804 	for_each_memory_region(mr, i915, id) {
1805 		struct drm_i915_gem_object *obj;
1806 		int err;
1807 
1808 		if (mr->private)
1809 			continue;
1810 
1811 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1812 		if (obj == ERR_PTR(-ENODEV))
1813 			continue;
1814 
1815 		if (IS_ERR(obj))
1816 			return PTR_ERR(obj);
1817 
1818 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1819 		if (err == 0)
1820 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1821 		if (err == 0)
1822 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1823 
1824 		i915_gem_object_put(obj);
1825 		if (err)
1826 			return err;
1827 	}
1828 
1829 	return 0;
1830 }
1831 
1832 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1833 {
1834 	int ret;
1835 	bool unuse_mm = false;
1836 	static const struct i915_subtest tests[] = {
1837 		SUBTEST(igt_partial_tiling),
1838 		SUBTEST(igt_smoke_tiling),
1839 		SUBTEST(igt_mmap_offset_exhaustion),
1840 		SUBTEST(igt_mmap),
1841 		SUBTEST(igt_mmap_migrate),
1842 		SUBTEST(igt_mmap_access),
1843 		SUBTEST(igt_mmap_revoke),
1844 		SUBTEST(igt_mmap_gpu),
1845 	};
1846 
1847 	if (!current->mm) {
1848 		kthread_use_mm(current->active_mm);
1849 		unuse_mm = true;
1850 	}
1851 
1852 	ret = i915_live_subtests(tests, i915);
1853 
1854 	if (unuse_mm)
1855 		kthread_unuse_mm(current->active_mm);
1856 
1857 	return ret;
1858 }
1859