xref: /linux/drivers/gpu/drm/i915/gt/selftest_mocs.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "gt/intel_engine_pm.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "i915_selftest.h"
9 
10 #include "gem/selftests/igt_gem_utils.h"
11 #include "gem/selftests/mock_context.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_spinner.h"
14 #include "selftests/intel_scheduler_helpers.h"
15 
16 struct live_mocs {
17 	struct drm_i915_mocs_table table;
18 	struct drm_i915_mocs_table *mocs;
19 	struct drm_i915_mocs_table *l3cc;
20 	struct i915_vma *scratch;
21 	void *vaddr;
22 };
23 
24 static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
25 {
26 	struct intel_context *ce;
27 
28 	ce = intel_context_create(engine);
29 	if (IS_ERR(ce))
30 		return ce;
31 
32 	/* We build large requests to read the registers from the ring */
33 	ce->ring_size = SZ_16K;
34 
35 	return ce;
36 }
37 
38 static int request_add_sync(struct i915_request *rq, int err)
39 {
40 	i915_request_get(rq);
41 	i915_request_add(rq);
42 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
43 		err = -ETIME;
44 	i915_request_put(rq);
45 
46 	return err;
47 }
48 
49 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
50 {
51 	int err = 0;
52 
53 	i915_request_get(rq);
54 	i915_request_add(rq);
55 	if (spin && !igt_wait_for_spinner(spin, rq))
56 		err = -ETIME;
57 	i915_request_put(rq);
58 
59 	return err;
60 }
61 
62 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
63 {
64 	unsigned int flags;
65 	int err;
66 
67 	memset(arg, 0, sizeof(*arg));
68 
69 	flags = get_mocs_settings(gt->i915, &arg->table);
70 	if (!flags)
71 		return -EINVAL;
72 
73 	if (flags & HAS_RENDER_L3CC)
74 		arg->l3cc = &arg->table;
75 
76 	if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
77 		arg->mocs = &arg->table;
78 
79 	arg->scratch =
80 		__vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
81 	if (IS_ERR(arg->scratch))
82 		return PTR_ERR(arg->scratch);
83 
84 	arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
85 	if (IS_ERR(arg->vaddr)) {
86 		err = PTR_ERR(arg->vaddr);
87 		goto err_scratch;
88 	}
89 
90 	return 0;
91 
92 err_scratch:
93 	i915_vma_unpin_and_release(&arg->scratch, 0);
94 	return err;
95 }
96 
97 static void live_mocs_fini(struct live_mocs *arg)
98 {
99 	i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
100 }
101 
102 static int read_regs(struct i915_request *rq,
103 		     u32 addr, unsigned int count,
104 		     u32 *offset)
105 {
106 	unsigned int i;
107 	u32 *cs;
108 
109 	GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
110 
111 	cs = intel_ring_begin(rq, 4 * count);
112 	if (IS_ERR(cs))
113 		return PTR_ERR(cs);
114 
115 	for (i = 0; i < count; i++) {
116 		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
117 		*cs++ = addr;
118 		*cs++ = *offset;
119 		*cs++ = 0;
120 
121 		addr += sizeof(u32);
122 		*offset += sizeof(u32);
123 	}
124 
125 	intel_ring_advance(rq, cs);
126 
127 	return 0;
128 }
129 
130 static int read_mocs_table(struct i915_request *rq,
131 			   const struct drm_i915_mocs_table *table,
132 			   u32 *offset)
133 {
134 	u32 addr;
135 
136 	if (!table)
137 		return 0;
138 
139 	if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
140 		addr = global_mocs_offset();
141 	else
142 		addr = mocs_offset(rq->engine);
143 
144 	return read_regs(rq, addr, table->n_entries, offset);
145 }
146 
147 static int read_l3cc_table(struct i915_request *rq,
148 			   const struct drm_i915_mocs_table *table,
149 			   u32 *offset)
150 {
151 	u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
152 
153 	if (!table)
154 		return 0;
155 
156 	return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
157 }
158 
159 static int check_mocs_table(struct intel_engine_cs *engine,
160 			    const struct drm_i915_mocs_table *table,
161 			    u32 **vaddr)
162 {
163 	unsigned int i;
164 	u32 expect;
165 
166 	if (!table)
167 		return 0;
168 
169 	for_each_mocs(expect, table, i) {
170 		if (**vaddr != expect) {
171 			pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
172 			       engine->name, i, **vaddr, expect);
173 			return -EINVAL;
174 		}
175 		++*vaddr;
176 	}
177 
178 	return 0;
179 }
180 
181 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
182 {
183 	/*
184 	 * Registers in this range are affected by the MCR selector
185 	 * which only controls CPU initiated MMIO. Routing does not
186 	 * work for CS access so we cannot verify them on this path.
187 	 */
188 	return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
189 }
190 
191 static int check_l3cc_table(struct intel_engine_cs *engine,
192 			    const struct drm_i915_mocs_table *table,
193 			    u32 **vaddr)
194 {
195 	/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
196 	u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
197 	unsigned int i;
198 	u32 expect;
199 
200 	if (!table)
201 		return 0;
202 
203 	for_each_l3cc(expect, table, i) {
204 		if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
205 			pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
206 			       engine->name, i, **vaddr, expect);
207 			return -EINVAL;
208 		}
209 		++*vaddr;
210 		reg += 4;
211 	}
212 
213 	return 0;
214 }
215 
216 static int check_mocs_engine(struct live_mocs *arg,
217 			     struct intel_context *ce)
218 {
219 	struct i915_vma *vma = arg->scratch;
220 	struct i915_request *rq;
221 	u32 offset;
222 	u32 *vaddr;
223 	int err;
224 
225 	memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
226 
227 	rq = intel_context_create_request(ce);
228 	if (IS_ERR(rq))
229 		return PTR_ERR(rq);
230 
231 	err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
232 
233 	/* Read the mocs tables back using SRM */
234 	offset = i915_ggtt_offset(vma);
235 	if (!err)
236 		err = read_mocs_table(rq, arg->mocs, &offset);
237 	if (!err && ce->engine->class == RENDER_CLASS)
238 		err = read_l3cc_table(rq, arg->l3cc, &offset);
239 	offset -= i915_ggtt_offset(vma);
240 	GEM_BUG_ON(offset > PAGE_SIZE);
241 
242 	err = request_add_sync(rq, err);
243 	if (err)
244 		return err;
245 
246 	/* Compare the results against the expected tables */
247 	vaddr = arg->vaddr;
248 	if (!err)
249 		err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
250 	if (!err && ce->engine->class == RENDER_CLASS)
251 		err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
252 	if (err)
253 		return err;
254 
255 	GEM_BUG_ON(arg->vaddr + offset != vaddr);
256 	return 0;
257 }
258 
259 static int live_mocs_kernel(void *arg)
260 {
261 	struct intel_gt *gt = arg;
262 	struct intel_engine_cs *engine;
263 	enum intel_engine_id id;
264 	struct live_mocs mocs;
265 	int err;
266 
267 	/* Basic check the system is configured with the expected mocs table */
268 
269 	err = live_mocs_init(&mocs, gt);
270 	if (err)
271 		return err;
272 
273 	for_each_engine(engine, gt, id) {
274 		intel_engine_pm_get(engine);
275 		err = check_mocs_engine(&mocs, engine->kernel_context);
276 		intel_engine_pm_put(engine);
277 		if (err)
278 			break;
279 	}
280 
281 	live_mocs_fini(&mocs);
282 	return err;
283 }
284 
285 static int live_mocs_clean(void *arg)
286 {
287 	struct intel_gt *gt = arg;
288 	struct intel_engine_cs *engine;
289 	enum intel_engine_id id;
290 	struct live_mocs mocs;
291 	int err;
292 
293 	/* Every new context should see the same mocs table */
294 
295 	err = live_mocs_init(&mocs, gt);
296 	if (err)
297 		return err;
298 
299 	for_each_engine(engine, gt, id) {
300 		struct intel_context *ce;
301 
302 		ce = mocs_context_create(engine);
303 		if (IS_ERR(ce)) {
304 			err = PTR_ERR(ce);
305 			break;
306 		}
307 
308 		err = check_mocs_engine(&mocs, ce);
309 		intel_context_put(ce);
310 		if (err)
311 			break;
312 	}
313 
314 	live_mocs_fini(&mocs);
315 	return err;
316 }
317 
318 static int active_engine_reset(struct intel_context *ce,
319 			       const char *reason,
320 			       bool using_guc)
321 {
322 	struct igt_spinner spin;
323 	struct i915_request *rq;
324 	int err;
325 
326 	err = igt_spinner_init(&spin, ce->engine->gt);
327 	if (err)
328 		return err;
329 
330 	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
331 	if (IS_ERR(rq)) {
332 		igt_spinner_fini(&spin);
333 		return PTR_ERR(rq);
334 	}
335 
336 	err = request_add_spin(rq, &spin);
337 	if (err == 0 && !using_guc)
338 		err = intel_engine_reset(ce->engine, reason);
339 
340 	/* Ensure the reset happens and kills the engine */
341 	if (err == 0)
342 		err = intel_selftest_wait_for_rq(rq);
343 
344 	igt_spinner_end(&spin);
345 	igt_spinner_fini(&spin);
346 
347 	return err;
348 }
349 
350 static int __live_mocs_reset(struct live_mocs *mocs,
351 			     struct intel_context *ce, bool using_guc)
352 {
353 	struct intel_gt *gt = ce->engine->gt;
354 	int err;
355 
356 	if (intel_has_reset_engine(gt)) {
357 		if (!using_guc) {
358 			err = intel_engine_reset(ce->engine, "mocs");
359 			if (err)
360 				return err;
361 
362 			err = check_mocs_engine(mocs, ce);
363 			if (err)
364 				return err;
365 		}
366 
367 		err = active_engine_reset(ce, "mocs", using_guc);
368 		if (err)
369 			return err;
370 
371 		err = check_mocs_engine(mocs, ce);
372 		if (err)
373 			return err;
374 	}
375 
376 	if (intel_has_gpu_reset(gt)) {
377 		intel_gt_reset(gt, ce->engine->mask, "mocs");
378 
379 		err = check_mocs_engine(mocs, ce);
380 		if (err)
381 			return err;
382 	}
383 
384 	return 0;
385 }
386 
387 static int live_mocs_reset(void *arg)
388 {
389 	struct intel_gt *gt = arg;
390 	struct intel_engine_cs *engine;
391 	enum intel_engine_id id;
392 	struct live_mocs mocs;
393 	int err = 0;
394 
395 	/* Check the mocs setup is retained over per-engine and global resets */
396 
397 	err = live_mocs_init(&mocs, gt);
398 	if (err)
399 		return err;
400 
401 	igt_global_reset_lock(gt);
402 	for_each_engine(engine, gt, id) {
403 		bool using_guc = intel_engine_uses_guc(engine);
404 		struct intel_selftest_saved_policy saved;
405 		struct intel_context *ce;
406 		int err2;
407 
408 		err = intel_selftest_modify_policy(engine, &saved,
409 						   SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
410 		if (err)
411 			break;
412 
413 		ce = mocs_context_create(engine);
414 		if (IS_ERR(ce)) {
415 			err = PTR_ERR(ce);
416 			goto restore;
417 		}
418 
419 		intel_engine_pm_get(engine);
420 
421 		err = __live_mocs_reset(&mocs, ce, using_guc);
422 
423 		intel_engine_pm_put(engine);
424 		intel_context_put(ce);
425 
426 restore:
427 		err2 = intel_selftest_restore_policy(engine, &saved);
428 		if (err == 0)
429 			err = err2;
430 		if (err)
431 			break;
432 	}
433 	igt_global_reset_unlock(gt);
434 
435 	live_mocs_fini(&mocs);
436 	return err;
437 }
438 
439 int intel_mocs_live_selftests(struct drm_i915_private *i915)
440 {
441 	static const struct i915_subtest tests[] = {
442 		SUBTEST(live_mocs_kernel),
443 		SUBTEST(live_mocs_clean),
444 		SUBTEST(live_mocs_reset),
445 	};
446 	struct drm_i915_mocs_table table;
447 
448 	if (!get_mocs_settings(i915, &table))
449 		return 0;
450 
451 	return intel_gt_live_subtests(tests, to_gt(i915));
452 }
453