xref: /linux/drivers/gpu/drm/i915/gt/selftest_workarounds.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12 
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17 
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 
21 static const struct wo_register {
22 	enum intel_platform platform;
23 	u32 reg;
24 } wo_registers[] = {
25 	{ INTEL_GEMINILAKE, 0x731c }
26 };
27 
28 struct wa_lists {
29 	struct i915_wa_list gt_wa_list;
30 	struct {
31 		struct i915_wa_list wa_list;
32 		struct i915_wa_list ctx_wa_list;
33 	} engine[I915_NUM_ENGINES];
34 };
35 
36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38 	i915_request_get(rq);
39 	i915_request_add(rq);
40 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
41 		err = -EIO;
42 	i915_request_put(rq);
43 
44 	return err;
45 }
46 
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49 	int err = 0;
50 
51 	i915_request_get(rq);
52 	i915_request_add(rq);
53 	if (spin && !igt_wait_for_spinner(spin, rq))
54 		err = -ETIMEDOUT;
55 	i915_request_put(rq);
56 
57 	return err;
58 }
59 
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63 	struct intel_engine_cs *engine;
64 	enum intel_engine_id id;
65 
66 	memset(lists, 0, sizeof(*lists));
67 
68 	wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69 	gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70 	wa_init_finish(&lists->gt_wa_list);
71 
72 	for_each_engine(engine, gt, id) {
73 		struct i915_wa_list *wal = &lists->engine[id].wa_list;
74 
75 		wa_init_start(wal, "REF", engine->name);
76 		engine_init_workarounds(engine, wal);
77 		wa_init_finish(wal);
78 
79 		__intel_engine_init_ctx_wa(engine,
80 					   &lists->engine[id].ctx_wa_list,
81 					   "CTX_REF");
82 	}
83 }
84 
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88 	struct intel_engine_cs *engine;
89 	enum intel_engine_id id;
90 
91 	for_each_engine(engine, gt, id)
92 		intel_wa_list_free(&lists->engine[id].wa_list);
93 
94 	intel_wa_list_free(&lists->gt_wa_list);
95 }
96 
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
99 {
100 	const u32 base = engine->mmio_base;
101 	struct drm_i915_gem_object *result;
102 	struct i915_request *rq;
103 	struct i915_vma *vma;
104 	u32 srm, *cs;
105 	int err;
106 	int i;
107 
108 	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109 	if (IS_ERR(result))
110 		return result;
111 
112 	i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113 
114 	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
115 	if (IS_ERR(cs)) {
116 		err = PTR_ERR(cs);
117 		goto err_obj;
118 	}
119 	memset(cs, 0xc5, PAGE_SIZE);
120 	i915_gem_object_flush_map(result);
121 	i915_gem_object_unpin_map(result);
122 
123 	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124 	if (IS_ERR(vma)) {
125 		err = PTR_ERR(vma);
126 		goto err_obj;
127 	}
128 
129 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130 	if (err)
131 		goto err_obj;
132 
133 	rq = igt_request_alloc(ctx, engine);
134 	if (IS_ERR(rq)) {
135 		err = PTR_ERR(rq);
136 		goto err_pin;
137 	}
138 
139 	i915_vma_lock(vma);
140 	err = i915_request_await_object(rq, vma->obj, true);
141 	if (err == 0)
142 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143 	i915_vma_unlock(vma);
144 	if (err)
145 		goto err_req;
146 
147 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148 	if (INTEL_GEN(ctx->i915) >= 8)
149 		srm++;
150 
151 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152 	if (IS_ERR(cs)) {
153 		err = PTR_ERR(cs);
154 		goto err_req;
155 	}
156 
157 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158 		*cs++ = srm;
159 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161 		*cs++ = 0;
162 	}
163 	intel_ring_advance(rq, cs);
164 
165 	i915_request_add(rq);
166 	i915_vma_unpin(vma);
167 
168 	return result;
169 
170 err_req:
171 	i915_request_add(rq);
172 err_pin:
173 	i915_vma_unpin(vma);
174 err_obj:
175 	i915_gem_object_put(result);
176 	return ERR_PTR(err);
177 }
178 
179 static u32
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181 {
182 	i915_reg_t reg = i < engine->whitelist.count ?
183 			 engine->whitelist.list[i].reg :
184 			 RING_NOPID(engine->mmio_base);
185 
186 	return i915_mmio_reg_offset(reg);
187 }
188 
189 static void
190 print_results(const struct intel_engine_cs *engine, const u32 *results)
191 {
192 	unsigned int i;
193 
194 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195 		u32 expected = get_whitelist_reg(engine, i);
196 		u32 actual = results[i];
197 
198 		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 			i, expected, actual);
200 	}
201 }
202 
203 static int check_whitelist(struct i915_gem_context *ctx,
204 			   struct intel_engine_cs *engine)
205 {
206 	struct drm_i915_gem_object *results;
207 	struct intel_wedge_me wedge;
208 	u32 *vaddr;
209 	int err;
210 	int i;
211 
212 	results = read_nonprivs(ctx, engine);
213 	if (IS_ERR(results))
214 		return PTR_ERR(results);
215 
216 	err = 0;
217 	i915_gem_object_lock(results);
218 	intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219 		err = i915_gem_object_set_to_cpu_domain(results, false);
220 	i915_gem_object_unlock(results);
221 	if (intel_gt_is_wedged(engine->gt))
222 		err = -EIO;
223 	if (err)
224 		goto out_put;
225 
226 	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227 	if (IS_ERR(vaddr)) {
228 		err = PTR_ERR(vaddr);
229 		goto out_put;
230 	}
231 
232 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233 		u32 expected = get_whitelist_reg(engine, i);
234 		u32 actual = vaddr[i];
235 
236 		if (expected != actual) {
237 			print_results(engine, vaddr);
238 			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 			       i, expected, actual);
240 
241 			err = -EINVAL;
242 			break;
243 		}
244 	}
245 
246 	i915_gem_object_unpin_map(results);
247 out_put:
248 	i915_gem_object_put(results);
249 	return err;
250 }
251 
252 static int do_device_reset(struct intel_engine_cs *engine)
253 {
254 	intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255 	return 0;
256 }
257 
258 static int do_engine_reset(struct intel_engine_cs *engine)
259 {
260 	return intel_engine_reset(engine, "live_workarounds");
261 }
262 
263 static int
264 switch_to_scratch_context(struct intel_engine_cs *engine,
265 			  struct igt_spinner *spin)
266 {
267 	struct i915_gem_context *ctx;
268 	struct intel_context *ce;
269 	struct i915_request *rq;
270 	int err = 0;
271 
272 	ctx = kernel_context(engine->i915);
273 	if (IS_ERR(ctx))
274 		return PTR_ERR(ctx);
275 
276 	GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
277 
278 	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
279 	GEM_BUG_ON(IS_ERR(ce));
280 
281 	rq = igt_spinner_create_request(spin, ce, MI_NOOP);
282 
283 	intel_context_put(ce);
284 
285 	if (IS_ERR(rq)) {
286 		spin = NULL;
287 		err = PTR_ERR(rq);
288 		goto err;
289 	}
290 
291 	err = request_add_spin(rq, spin);
292 err:
293 	if (err && spin)
294 		igt_spinner_end(spin);
295 
296 	kernel_context_close(ctx);
297 	return err;
298 }
299 
300 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
301 					int (*reset)(struct intel_engine_cs *),
302 					const char *name)
303 {
304 	struct drm_i915_private *i915 = engine->i915;
305 	struct i915_gem_context *ctx, *tmp;
306 	struct igt_spinner spin;
307 	intel_wakeref_t wakeref;
308 	int err;
309 
310 	pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
311 		engine->whitelist.count, engine->name, name);
312 
313 	ctx = kernel_context(i915);
314 	if (IS_ERR(ctx))
315 		return PTR_ERR(ctx);
316 
317 	err = igt_spinner_init(&spin, engine->gt);
318 	if (err)
319 		goto out_ctx;
320 
321 	err = check_whitelist(ctx, engine);
322 	if (err) {
323 		pr_err("Invalid whitelist *before* %s reset!\n", name);
324 		goto out_spin;
325 	}
326 
327 	err = switch_to_scratch_context(engine, &spin);
328 	if (err)
329 		goto out_spin;
330 
331 	with_intel_runtime_pm(engine->uncore->rpm, wakeref)
332 		err = reset(engine);
333 
334 	igt_spinner_end(&spin);
335 
336 	if (err) {
337 		pr_err("%s reset failed\n", name);
338 		goto out_spin;
339 	}
340 
341 	err = check_whitelist(ctx, engine);
342 	if (err) {
343 		pr_err("Whitelist not preserved in context across %s reset!\n",
344 		       name);
345 		goto out_spin;
346 	}
347 
348 	tmp = kernel_context(i915);
349 	if (IS_ERR(tmp)) {
350 		err = PTR_ERR(tmp);
351 		goto out_spin;
352 	}
353 	kernel_context_close(ctx);
354 	ctx = tmp;
355 
356 	err = check_whitelist(ctx, engine);
357 	if (err) {
358 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
359 		       name);
360 		goto out_spin;
361 	}
362 
363 out_spin:
364 	igt_spinner_fini(&spin);
365 out_ctx:
366 	kernel_context_close(ctx);
367 	return err;
368 }
369 
370 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
371 {
372 	struct drm_i915_gem_object *obj;
373 	struct i915_address_space *vm;
374 	struct i915_vma *vma;
375 	int err;
376 
377 	obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
378 	if (IS_ERR(obj))
379 		return ERR_CAST(obj);
380 
381 	vm = i915_gem_context_get_vm_rcu(ctx);
382 	vma = i915_vma_instance(obj, vm, NULL);
383 	i915_vm_put(vm);
384 	if (IS_ERR(vma)) {
385 		err = PTR_ERR(vma);
386 		goto err_obj;
387 	}
388 
389 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
390 	if (err)
391 		goto err_obj;
392 
393 	return vma;
394 
395 err_obj:
396 	i915_gem_object_put(obj);
397 	return ERR_PTR(err);
398 }
399 
400 static u32 reg_write(u32 old, u32 new, u32 rsvd)
401 {
402 	if (rsvd == 0x0000ffff) {
403 		old &= ~(new >> 16);
404 		old |= new & (new >> 16);
405 	} else {
406 		old &= ~rsvd;
407 		old |= new & rsvd;
408 	}
409 
410 	return old;
411 }
412 
413 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
414 {
415 	enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
416 	int i;
417 
418 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
419 	     RING_FORCE_TO_NONPRIV_ACCESS_WR)
420 		return true;
421 
422 	for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
423 		if (wo_registers[i].platform == platform &&
424 		    wo_registers[i].reg == reg)
425 			return true;
426 	}
427 
428 	return false;
429 }
430 
431 static bool ro_register(u32 reg)
432 {
433 	if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
434 	     RING_FORCE_TO_NONPRIV_ACCESS_RD)
435 		return true;
436 
437 	return false;
438 }
439 
440 static int whitelist_writable_count(struct intel_engine_cs *engine)
441 {
442 	int count = engine->whitelist.count;
443 	int i;
444 
445 	for (i = 0; i < engine->whitelist.count; i++) {
446 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
447 
448 		if (ro_register(reg))
449 			count--;
450 	}
451 
452 	return count;
453 }
454 
455 static int check_dirty_whitelist(struct i915_gem_context *ctx,
456 				 struct intel_engine_cs *engine)
457 {
458 	const u32 values[] = {
459 		0x00000000,
460 		0x01010101,
461 		0x10100101,
462 		0x03030303,
463 		0x30300303,
464 		0x05050505,
465 		0x50500505,
466 		0x0f0f0f0f,
467 		0xf00ff00f,
468 		0x10101010,
469 		0xf0f01010,
470 		0x30303030,
471 		0xa0a03030,
472 		0x50505050,
473 		0xc0c05050,
474 		0xf0f0f0f0,
475 		0x11111111,
476 		0x33333333,
477 		0x55555555,
478 		0x0000ffff,
479 		0x00ff00ff,
480 		0xff0000ff,
481 		0xffff00ff,
482 		0xffffffff,
483 	};
484 	struct i915_address_space *vm;
485 	struct i915_vma *scratch;
486 	struct i915_vma *batch;
487 	int err = 0, i, v;
488 	u32 *cs, *results;
489 
490 	vm = i915_gem_context_get_vm_rcu(ctx);
491 	scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
492 	i915_vm_put(vm);
493 	if (IS_ERR(scratch))
494 		return PTR_ERR(scratch);
495 
496 	batch = create_batch(ctx);
497 	if (IS_ERR(batch)) {
498 		err = PTR_ERR(batch);
499 		goto out_scratch;
500 	}
501 
502 	for (i = 0; i < engine->whitelist.count; i++) {
503 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
504 		u64 addr = scratch->node.start;
505 		struct i915_request *rq;
506 		u32 srm, lrm, rsvd;
507 		u32 expect;
508 		int idx;
509 		bool ro_reg;
510 
511 		if (wo_register(engine, reg))
512 			continue;
513 
514 		ro_reg = ro_register(reg);
515 
516 		/* Clear non priv flags */
517 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
518 
519 		srm = MI_STORE_REGISTER_MEM;
520 		lrm = MI_LOAD_REGISTER_MEM;
521 		if (INTEL_GEN(ctx->i915) >= 8)
522 			lrm++, srm++;
523 
524 		pr_debug("%s: Writing garbage to %x\n",
525 			 engine->name, reg);
526 
527 		cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
528 		if (IS_ERR(cs)) {
529 			err = PTR_ERR(cs);
530 			goto out_batch;
531 		}
532 
533 		/* SRM original */
534 		*cs++ = srm;
535 		*cs++ = reg;
536 		*cs++ = lower_32_bits(addr);
537 		*cs++ = upper_32_bits(addr);
538 
539 		idx = 1;
540 		for (v = 0; v < ARRAY_SIZE(values); v++) {
541 			/* LRI garbage */
542 			*cs++ = MI_LOAD_REGISTER_IMM(1);
543 			*cs++ = reg;
544 			*cs++ = values[v];
545 
546 			/* SRM result */
547 			*cs++ = srm;
548 			*cs++ = reg;
549 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
550 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
551 			idx++;
552 		}
553 		for (v = 0; v < ARRAY_SIZE(values); v++) {
554 			/* LRI garbage */
555 			*cs++ = MI_LOAD_REGISTER_IMM(1);
556 			*cs++ = reg;
557 			*cs++ = ~values[v];
558 
559 			/* SRM result */
560 			*cs++ = srm;
561 			*cs++ = reg;
562 			*cs++ = lower_32_bits(addr + sizeof(u32) * idx);
563 			*cs++ = upper_32_bits(addr + sizeof(u32) * idx);
564 			idx++;
565 		}
566 		GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
567 
568 		/* LRM original -- don't leave garbage in the context! */
569 		*cs++ = lrm;
570 		*cs++ = reg;
571 		*cs++ = lower_32_bits(addr);
572 		*cs++ = upper_32_bits(addr);
573 
574 		*cs++ = MI_BATCH_BUFFER_END;
575 
576 		i915_gem_object_flush_map(batch->obj);
577 		i915_gem_object_unpin_map(batch->obj);
578 		intel_gt_chipset_flush(engine->gt);
579 
580 		rq = igt_request_alloc(ctx, engine);
581 		if (IS_ERR(rq)) {
582 			err = PTR_ERR(rq);
583 			goto out_batch;
584 		}
585 
586 		if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
587 			err = engine->emit_init_breadcrumb(rq);
588 			if (err)
589 				goto err_request;
590 		}
591 
592 		i915_vma_lock(batch);
593 		err = i915_request_await_object(rq, batch->obj, false);
594 		if (err == 0)
595 			err = i915_vma_move_to_active(batch, rq, 0);
596 		i915_vma_unlock(batch);
597 		if (err)
598 			goto err_request;
599 
600 		err = engine->emit_bb_start(rq,
601 					    batch->node.start, PAGE_SIZE,
602 					    0);
603 		if (err)
604 			goto err_request;
605 
606 err_request:
607 		err = request_add_sync(rq, err);
608 		if (err) {
609 			pr_err("%s: Futzing %x timedout; cancelling test\n",
610 			       engine->name, reg);
611 			intel_gt_set_wedged(engine->gt);
612 			goto out_batch;
613 		}
614 
615 		results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
616 		if (IS_ERR(results)) {
617 			err = PTR_ERR(results);
618 			goto out_batch;
619 		}
620 
621 		GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
622 		if (!ro_reg) {
623 			/* detect write masking */
624 			rsvd = results[ARRAY_SIZE(values)];
625 			if (!rsvd) {
626 				pr_err("%s: Unable to write to whitelisted register %x\n",
627 				       engine->name, reg);
628 				err = -EINVAL;
629 				goto out_unpin;
630 			}
631 		}
632 
633 		expect = results[0];
634 		idx = 1;
635 		for (v = 0; v < ARRAY_SIZE(values); v++) {
636 			if (ro_reg)
637 				expect = results[0];
638 			else
639 				expect = reg_write(expect, values[v], rsvd);
640 
641 			if (results[idx] != expect)
642 				err++;
643 			idx++;
644 		}
645 		for (v = 0; v < ARRAY_SIZE(values); v++) {
646 			if (ro_reg)
647 				expect = results[0];
648 			else
649 				expect = reg_write(expect, ~values[v], rsvd);
650 
651 			if (results[idx] != expect)
652 				err++;
653 			idx++;
654 		}
655 		if (err) {
656 			pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
657 			       engine->name, err, reg);
658 
659 			if (ro_reg)
660 				pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
661 					engine->name, reg, results[0]);
662 			else
663 				pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
664 					engine->name, reg, results[0], rsvd);
665 
666 			expect = results[0];
667 			idx = 1;
668 			for (v = 0; v < ARRAY_SIZE(values); v++) {
669 				u32 w = values[v];
670 
671 				if (ro_reg)
672 					expect = results[0];
673 				else
674 					expect = reg_write(expect, w, rsvd);
675 				pr_info("Wrote %08x, read %08x, expect %08x\n",
676 					w, results[idx], expect);
677 				idx++;
678 			}
679 			for (v = 0; v < ARRAY_SIZE(values); v++) {
680 				u32 w = ~values[v];
681 
682 				if (ro_reg)
683 					expect = results[0];
684 				else
685 					expect = reg_write(expect, w, rsvd);
686 				pr_info("Wrote %08x, read %08x, expect %08x\n",
687 					w, results[idx], expect);
688 				idx++;
689 			}
690 
691 			err = -EINVAL;
692 		}
693 out_unpin:
694 		i915_gem_object_unpin_map(scratch->obj);
695 		if (err)
696 			break;
697 	}
698 
699 	if (igt_flush_test(ctx->i915))
700 		err = -EIO;
701 out_batch:
702 	i915_vma_unpin_and_release(&batch, 0);
703 out_scratch:
704 	i915_vma_unpin_and_release(&scratch, 0);
705 	return err;
706 }
707 
708 static int live_dirty_whitelist(void *arg)
709 {
710 	struct intel_gt *gt = arg;
711 	struct intel_engine_cs *engine;
712 	struct i915_gem_context *ctx;
713 	enum intel_engine_id id;
714 	struct drm_file *file;
715 	int err = 0;
716 
717 	/* Can the user write to the whitelisted registers? */
718 
719 	if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
720 		return 0;
721 
722 	file = mock_file(gt->i915);
723 	if (IS_ERR(file))
724 		return PTR_ERR(file);
725 
726 	ctx = live_context(gt->i915, file);
727 	if (IS_ERR(ctx)) {
728 		err = PTR_ERR(ctx);
729 		goto out_file;
730 	}
731 
732 	for_each_engine(engine, gt, id) {
733 		if (engine->whitelist.count == 0)
734 			continue;
735 
736 		err = check_dirty_whitelist(ctx, engine);
737 		if (err)
738 			goto out_file;
739 	}
740 
741 out_file:
742 	mock_file_free(gt->i915, file);
743 	return err;
744 }
745 
746 static int live_reset_whitelist(void *arg)
747 {
748 	struct intel_gt *gt = arg;
749 	struct intel_engine_cs *engine;
750 	enum intel_engine_id id;
751 	int err = 0;
752 
753 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
754 	igt_global_reset_lock(gt);
755 
756 	for_each_engine(engine, gt, id) {
757 		if (engine->whitelist.count == 0)
758 			continue;
759 
760 		if (intel_has_reset_engine(gt)) {
761 			err = check_whitelist_across_reset(engine,
762 							   do_engine_reset,
763 							   "engine");
764 			if (err)
765 				goto out;
766 		}
767 
768 		if (intel_has_gpu_reset(gt)) {
769 			err = check_whitelist_across_reset(engine,
770 							   do_device_reset,
771 							   "device");
772 			if (err)
773 				goto out;
774 		}
775 	}
776 
777 out:
778 	igt_global_reset_unlock(gt);
779 	return err;
780 }
781 
782 static int read_whitelisted_registers(struct i915_gem_context *ctx,
783 				      struct intel_engine_cs *engine,
784 				      struct i915_vma *results)
785 {
786 	struct i915_request *rq;
787 	int i, err = 0;
788 	u32 srm, *cs;
789 
790 	rq = igt_request_alloc(ctx, engine);
791 	if (IS_ERR(rq))
792 		return PTR_ERR(rq);
793 
794 	i915_vma_lock(results);
795 	err = i915_request_await_object(rq, results->obj, true);
796 	if (err == 0)
797 		err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
798 	i915_vma_unlock(results);
799 	if (err)
800 		goto err_req;
801 
802 	srm = MI_STORE_REGISTER_MEM;
803 	if (INTEL_GEN(ctx->i915) >= 8)
804 		srm++;
805 
806 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
807 	if (IS_ERR(cs)) {
808 		err = PTR_ERR(cs);
809 		goto err_req;
810 	}
811 
812 	for (i = 0; i < engine->whitelist.count; i++) {
813 		u64 offset = results->node.start + sizeof(u32) * i;
814 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
815 
816 		/* Clear non priv flags */
817 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
818 
819 		*cs++ = srm;
820 		*cs++ = reg;
821 		*cs++ = lower_32_bits(offset);
822 		*cs++ = upper_32_bits(offset);
823 	}
824 	intel_ring_advance(rq, cs);
825 
826 err_req:
827 	return request_add_sync(rq, err);
828 }
829 
830 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
831 				       struct intel_engine_cs *engine)
832 {
833 	struct i915_request *rq;
834 	struct i915_vma *batch;
835 	int i, err = 0;
836 	u32 *cs;
837 
838 	batch = create_batch(ctx);
839 	if (IS_ERR(batch))
840 		return PTR_ERR(batch);
841 
842 	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
843 	if (IS_ERR(cs)) {
844 		err = PTR_ERR(cs);
845 		goto err_batch;
846 	}
847 
848 	*cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
849 	for (i = 0; i < engine->whitelist.count; i++) {
850 		u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
851 
852 		if (ro_register(reg))
853 			continue;
854 
855 		/* Clear non priv flags */
856 		reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
857 
858 		*cs++ = reg;
859 		*cs++ = 0xffffffff;
860 	}
861 	*cs++ = MI_BATCH_BUFFER_END;
862 
863 	i915_gem_object_flush_map(batch->obj);
864 	intel_gt_chipset_flush(engine->gt);
865 
866 	rq = igt_request_alloc(ctx, engine);
867 	if (IS_ERR(rq)) {
868 		err = PTR_ERR(rq);
869 		goto err_unpin;
870 	}
871 
872 	if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
873 		err = engine->emit_init_breadcrumb(rq);
874 		if (err)
875 			goto err_request;
876 	}
877 
878 	i915_vma_lock(batch);
879 	err = i915_request_await_object(rq, batch->obj, false);
880 	if (err == 0)
881 		err = i915_vma_move_to_active(batch, rq, 0);
882 	i915_vma_unlock(batch);
883 	if (err)
884 		goto err_request;
885 
886 	/* Perform the writes from an unprivileged "user" batch */
887 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
888 
889 err_request:
890 	err = request_add_sync(rq, err);
891 
892 err_unpin:
893 	i915_gem_object_unpin_map(batch->obj);
894 err_batch:
895 	i915_vma_unpin_and_release(&batch, 0);
896 	return err;
897 }
898 
899 struct regmask {
900 	i915_reg_t reg;
901 	unsigned long gen_mask;
902 };
903 
904 static bool find_reg(struct drm_i915_private *i915,
905 		     i915_reg_t reg,
906 		     const struct regmask *tbl,
907 		     unsigned long count)
908 {
909 	u32 offset = i915_mmio_reg_offset(reg);
910 
911 	while (count--) {
912 		if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
913 		    i915_mmio_reg_offset(tbl->reg) == offset)
914 			return true;
915 		tbl++;
916 	}
917 
918 	return false;
919 }
920 
921 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
922 {
923 	/* Alas, we must pardon some whitelists. Mistakes already made */
924 	static const struct regmask pardon[] = {
925 		{ GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
926 		{ GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
927 	};
928 
929 	return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
930 }
931 
932 static bool result_eq(struct intel_engine_cs *engine,
933 		      u32 a, u32 b, i915_reg_t reg)
934 {
935 	if (a != b && !pardon_reg(engine->i915, reg)) {
936 		pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
937 		       i915_mmio_reg_offset(reg), a, b);
938 		return false;
939 	}
940 
941 	return true;
942 }
943 
944 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
945 {
946 	/* Some registers do not seem to behave and our writes unreadable */
947 	static const struct regmask wo[] = {
948 		{ GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
949 	};
950 
951 	return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
952 }
953 
954 static bool result_neq(struct intel_engine_cs *engine,
955 		       u32 a, u32 b, i915_reg_t reg)
956 {
957 	if (a == b && !writeonly_reg(engine->i915, reg)) {
958 		pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
959 		       i915_mmio_reg_offset(reg), a);
960 		return false;
961 	}
962 
963 	return true;
964 }
965 
966 static int
967 check_whitelisted_registers(struct intel_engine_cs *engine,
968 			    struct i915_vma *A,
969 			    struct i915_vma *B,
970 			    bool (*fn)(struct intel_engine_cs *engine,
971 				       u32 a, u32 b,
972 				       i915_reg_t reg))
973 {
974 	u32 *a, *b;
975 	int i, err;
976 
977 	a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
978 	if (IS_ERR(a))
979 		return PTR_ERR(a);
980 
981 	b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
982 	if (IS_ERR(b)) {
983 		err = PTR_ERR(b);
984 		goto err_a;
985 	}
986 
987 	err = 0;
988 	for (i = 0; i < engine->whitelist.count; i++) {
989 		const struct i915_wa *wa = &engine->whitelist.list[i];
990 
991 		if (i915_mmio_reg_offset(wa->reg) &
992 		    RING_FORCE_TO_NONPRIV_ACCESS_RD)
993 			continue;
994 
995 		if (!fn(engine, a[i], b[i], wa->reg))
996 			err = -EINVAL;
997 	}
998 
999 	i915_gem_object_unpin_map(B->obj);
1000 err_a:
1001 	i915_gem_object_unpin_map(A->obj);
1002 	return err;
1003 }
1004 
1005 static int live_isolated_whitelist(void *arg)
1006 {
1007 	struct intel_gt *gt = arg;
1008 	struct {
1009 		struct i915_gem_context *ctx;
1010 		struct i915_vma *scratch[2];
1011 	} client[2] = {};
1012 	struct intel_engine_cs *engine;
1013 	enum intel_engine_id id;
1014 	int i, err = 0;
1015 
1016 	/*
1017 	 * Check that a write into a whitelist register works, but
1018 	 * invisible to a second context.
1019 	 */
1020 
1021 	if (!intel_engines_has_context_isolation(gt->i915))
1022 		return 0;
1023 
1024 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1025 		struct i915_address_space *vm;
1026 		struct i915_gem_context *c;
1027 
1028 		c = kernel_context(gt->i915);
1029 		if (IS_ERR(c)) {
1030 			err = PTR_ERR(c);
1031 			goto err;
1032 		}
1033 
1034 		vm = i915_gem_context_get_vm_rcu(c);
1035 
1036 		client[i].scratch[0] = create_scratch(vm, 1024);
1037 		if (IS_ERR(client[i].scratch[0])) {
1038 			err = PTR_ERR(client[i].scratch[0]);
1039 			i915_vm_put(vm);
1040 			kernel_context_close(c);
1041 			goto err;
1042 		}
1043 
1044 		client[i].scratch[1] = create_scratch(vm, 1024);
1045 		if (IS_ERR(client[i].scratch[1])) {
1046 			err = PTR_ERR(client[i].scratch[1]);
1047 			i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1048 			i915_vm_put(vm);
1049 			kernel_context_close(c);
1050 			goto err;
1051 		}
1052 
1053 		client[i].ctx = c;
1054 		i915_vm_put(vm);
1055 	}
1056 
1057 	for_each_engine(engine, gt, id) {
1058 		if (!engine->kernel_context->vm)
1059 			continue;
1060 
1061 		if (!whitelist_writable_count(engine))
1062 			continue;
1063 
1064 		/* Read default values */
1065 		err = read_whitelisted_registers(client[0].ctx, engine,
1066 						 client[0].scratch[0]);
1067 		if (err)
1068 			goto err;
1069 
1070 		/* Try to overwrite registers (should only affect ctx0) */
1071 		err = scrub_whitelisted_registers(client[0].ctx, engine);
1072 		if (err)
1073 			goto err;
1074 
1075 		/* Read values from ctx1, we expect these to be defaults */
1076 		err = read_whitelisted_registers(client[1].ctx, engine,
1077 						 client[1].scratch[0]);
1078 		if (err)
1079 			goto err;
1080 
1081 		/* Verify that both reads return the same default values */
1082 		err = check_whitelisted_registers(engine,
1083 						  client[0].scratch[0],
1084 						  client[1].scratch[0],
1085 						  result_eq);
1086 		if (err)
1087 			goto err;
1088 
1089 		/* Read back the updated values in ctx0 */
1090 		err = read_whitelisted_registers(client[0].ctx, engine,
1091 						 client[0].scratch[1]);
1092 		if (err)
1093 			goto err;
1094 
1095 		/* User should be granted privilege to overwhite regs */
1096 		err = check_whitelisted_registers(engine,
1097 						  client[0].scratch[0],
1098 						  client[0].scratch[1],
1099 						  result_neq);
1100 		if (err)
1101 			goto err;
1102 	}
1103 
1104 err:
1105 	for (i = 0; i < ARRAY_SIZE(client); i++) {
1106 		if (!client[i].ctx)
1107 			break;
1108 
1109 		i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1110 		i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1111 		kernel_context_close(client[i].ctx);
1112 	}
1113 
1114 	if (igt_flush_test(gt->i915))
1115 		err = -EIO;
1116 
1117 	return err;
1118 }
1119 
1120 static bool
1121 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1122 		const char *str)
1123 {
1124 	struct drm_i915_private *i915 = ctx->i915;
1125 	struct i915_gem_engines_iter it;
1126 	struct intel_context *ce;
1127 	bool ok = true;
1128 
1129 	ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1130 
1131 	for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1132 		enum intel_engine_id id = ce->engine->id;
1133 
1134 		ok &= engine_wa_list_verify(ce,
1135 					    &lists->engine[id].wa_list,
1136 					    str) == 0;
1137 
1138 		ok &= engine_wa_list_verify(ce,
1139 					    &lists->engine[id].ctx_wa_list,
1140 					    str) == 0;
1141 	}
1142 
1143 	return ok;
1144 }
1145 
1146 static int
1147 live_gpu_reset_workarounds(void *arg)
1148 {
1149 	struct intel_gt *gt = arg;
1150 	struct i915_gem_context *ctx;
1151 	intel_wakeref_t wakeref;
1152 	struct wa_lists lists;
1153 	bool ok;
1154 
1155 	if (!intel_has_gpu_reset(gt))
1156 		return 0;
1157 
1158 	ctx = kernel_context(gt->i915);
1159 	if (IS_ERR(ctx))
1160 		return PTR_ERR(ctx);
1161 
1162 	i915_gem_context_lock_engines(ctx);
1163 
1164 	pr_info("Verifying after GPU reset...\n");
1165 
1166 	igt_global_reset_lock(gt);
1167 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1168 
1169 	reference_lists_init(gt, &lists);
1170 
1171 	ok = verify_wa_lists(ctx, &lists, "before reset");
1172 	if (!ok)
1173 		goto out;
1174 
1175 	intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1176 
1177 	ok = verify_wa_lists(ctx, &lists, "after reset");
1178 
1179 out:
1180 	i915_gem_context_unlock_engines(ctx);
1181 	kernel_context_close(ctx);
1182 	reference_lists_fini(gt, &lists);
1183 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1184 	igt_global_reset_unlock(gt);
1185 
1186 	return ok ? 0 : -ESRCH;
1187 }
1188 
1189 static int
1190 live_engine_reset_workarounds(void *arg)
1191 {
1192 	struct intel_gt *gt = arg;
1193 	struct i915_gem_engines_iter it;
1194 	struct i915_gem_context *ctx;
1195 	struct intel_context *ce;
1196 	struct igt_spinner spin;
1197 	struct i915_request *rq;
1198 	intel_wakeref_t wakeref;
1199 	struct wa_lists lists;
1200 	int ret = 0;
1201 
1202 	if (!intel_has_reset_engine(gt))
1203 		return 0;
1204 
1205 	ctx = kernel_context(gt->i915);
1206 	if (IS_ERR(ctx))
1207 		return PTR_ERR(ctx);
1208 
1209 	igt_global_reset_lock(gt);
1210 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1211 
1212 	reference_lists_init(gt, &lists);
1213 
1214 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1215 		struct intel_engine_cs *engine = ce->engine;
1216 		bool ok;
1217 
1218 		pr_info("Verifying after %s reset...\n", engine->name);
1219 
1220 		ok = verify_wa_lists(ctx, &lists, "before reset");
1221 		if (!ok) {
1222 			ret = -ESRCH;
1223 			goto err;
1224 		}
1225 
1226 		intel_engine_reset(engine, "live_workarounds");
1227 
1228 		ok = verify_wa_lists(ctx, &lists, "after idle reset");
1229 		if (!ok) {
1230 			ret = -ESRCH;
1231 			goto err;
1232 		}
1233 
1234 		ret = igt_spinner_init(&spin, engine->gt);
1235 		if (ret)
1236 			goto err;
1237 
1238 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1239 		if (IS_ERR(rq)) {
1240 			ret = PTR_ERR(rq);
1241 			igt_spinner_fini(&spin);
1242 			goto err;
1243 		}
1244 
1245 		ret = request_add_spin(rq, &spin);
1246 		if (ret) {
1247 			pr_err("Spinner failed to start\n");
1248 			igt_spinner_fini(&spin);
1249 			goto err;
1250 		}
1251 
1252 		intel_engine_reset(engine, "live_workarounds");
1253 
1254 		igt_spinner_end(&spin);
1255 		igt_spinner_fini(&spin);
1256 
1257 		ok = verify_wa_lists(ctx, &lists, "after busy reset");
1258 		if (!ok) {
1259 			ret = -ESRCH;
1260 			goto err;
1261 		}
1262 	}
1263 err:
1264 	i915_gem_context_unlock_engines(ctx);
1265 	reference_lists_fini(gt, &lists);
1266 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1267 	igt_global_reset_unlock(gt);
1268 	kernel_context_close(ctx);
1269 
1270 	igt_flush_test(gt->i915);
1271 
1272 	return ret;
1273 }
1274 
1275 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1276 {
1277 	static const struct i915_subtest tests[] = {
1278 		SUBTEST(live_dirty_whitelist),
1279 		SUBTEST(live_reset_whitelist),
1280 		SUBTEST(live_isolated_whitelist),
1281 		SUBTEST(live_gpu_reset_workarounds),
1282 		SUBTEST(live_engine_reset_workarounds),
1283 	};
1284 
1285 	if (intel_gt_is_wedged(&i915->gt))
1286 		return 0;
1287 
1288 	return intel_gt_live_subtests(tests, &i915->gt);
1289 }
1290