xref: /linux/drivers/gpu/drm/i915/gt/selftest_slpc.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #define NUM_STEPS 5
7 #define H2G_DELAY 50000
8 #define delay_for_h2g() usleep_range(H2G_DELAY, H2G_DELAY + 10000)
9 #define FREQUENCY_REQ_UNIT	DIV_ROUND_CLOSEST(GT_FREQUENCY_MULTIPLIER, \
10 						  GEN9_FREQ_SCALER)
11 enum test_type {
12 	VARY_MIN,
13 	VARY_MAX,
14 	MAX_GRANTED,
15 	SLPC_POWER,
16 	TILE_INTERACTION,
17 };
18 
19 struct slpc_thread {
20 	struct kthread_worker *worker;
21 	struct kthread_work work;
22 	struct intel_gt *gt;
23 	int result;
24 };
25 
26 static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq)
27 {
28 	int ret;
29 
30 	ret = intel_guc_slpc_set_min_freq(slpc, freq);
31 	if (ret)
32 		pr_err("Could not set min frequency to [%u]\n", freq);
33 	else /* Delay to ensure h2g completes */
34 		delay_for_h2g();
35 
36 	return ret;
37 }
38 
39 static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
40 {
41 	int ret;
42 
43 	ret = intel_guc_slpc_set_max_freq(slpc, freq);
44 	if (ret)
45 		pr_err("Could not set maximum frequency [%u]\n",
46 		       freq);
47 	else /* Delay to ensure h2g completes */
48 		delay_for_h2g();
49 
50 	return ret;
51 }
52 
53 static int slpc_set_freq(struct intel_gt *gt, u32 freq)
54 {
55 	int err;
56 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
57 
58 	err = slpc_set_max_freq(slpc, freq);
59 	if (err) {
60 		pr_err("Unable to update max freq");
61 		return err;
62 	}
63 
64 	err = slpc_set_min_freq(slpc, freq);
65 	if (err) {
66 		pr_err("Unable to update min freq");
67 		return err;
68 	}
69 
70 	return err;
71 }
72 
73 static int slpc_restore_freq(struct intel_guc_slpc *slpc, u32 min, u32 max)
74 {
75 	int err;
76 
77 	err = slpc_set_max_freq(slpc, max);
78 	if (err) {
79 		pr_err("Unable to restore max freq");
80 		return err;
81 	}
82 
83 	err = slpc_set_min_freq(slpc, min);
84 	if (err) {
85 		pr_err("Unable to restore min freq");
86 		return err;
87 	}
88 
89 	err = intel_guc_slpc_set_ignore_eff_freq(slpc, false);
90 	if (err) {
91 		pr_err("Unable to restore efficient freq");
92 		return err;
93 	}
94 
95 	return 0;
96 }
97 
98 static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
99 {
100 	int err = 0;
101 
102 	err = slpc_set_freq(gt, *freq);
103 	if (err)
104 		return err;
105 	*freq = intel_rps_read_actual_frequency(&gt->rps);
106 	*power = measure_power(&gt->rps, freq);
107 
108 	return err;
109 }
110 
111 static int vary_max_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
112 			 u32 *max_act_freq)
113 {
114 	u32 step, max_freq, req_freq;
115 	u32 act_freq;
116 	int err = 0;
117 
118 	/* Go from max to min in 5 steps */
119 	step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
120 	*max_act_freq = slpc->min_freq;
121 	for (max_freq = slpc->rp0_freq; max_freq > slpc->min_freq;
122 				max_freq -= step) {
123 		err = slpc_set_max_freq(slpc, max_freq);
124 		if (err)
125 			break;
126 
127 		req_freq = intel_rps_read_punit_req_frequency(rps);
128 
129 		/* GuC requests freq in multiples of 50/3 MHz */
130 		if (req_freq > (max_freq + FREQUENCY_REQ_UNIT)) {
131 			pr_err("SWReq is %d, should be at most %d\n", req_freq,
132 			       max_freq + FREQUENCY_REQ_UNIT);
133 			err = -EINVAL;
134 		}
135 
136 		act_freq =  intel_rps_read_actual_frequency(rps);
137 		if (act_freq > *max_act_freq)
138 			*max_act_freq = act_freq;
139 
140 		if (err)
141 			break;
142 	}
143 
144 	return err;
145 }
146 
147 static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
148 			 u32 *max_act_freq)
149 {
150 	u32 step, min_freq, req_freq;
151 	u32 act_freq;
152 	int err = 0;
153 
154 	/* Go from min to max in 5 steps */
155 	step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
156 	*max_act_freq = slpc->min_freq;
157 	for (min_freq = slpc->min_freq; min_freq < slpc->rp0_freq;
158 				min_freq += step) {
159 		err = slpc_set_min_freq(slpc, min_freq);
160 		if (err)
161 			break;
162 
163 		req_freq = intel_rps_read_punit_req_frequency(rps);
164 
165 		/* GuC requests freq in multiples of 50/3 MHz */
166 		if (req_freq < (min_freq - FREQUENCY_REQ_UNIT)) {
167 			pr_err("SWReq is %d, should be at least %d\n", req_freq,
168 			       min_freq - FREQUENCY_REQ_UNIT);
169 			err = -EINVAL;
170 		}
171 
172 		act_freq =  intel_rps_read_actual_frequency(rps);
173 		if (act_freq > *max_act_freq)
174 			*max_act_freq = act_freq;
175 
176 		if (err)
177 			break;
178 	}
179 
180 	return err;
181 }
182 
183 static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine)
184 {
185 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
186 	struct {
187 		u64 power;
188 		int freq;
189 	} min, max;
190 	int err = 0;
191 
192 	/*
193 	 * Our fundamental assumption is that running at lower frequency
194 	 * actually saves power. Let's see if our RAPL measurement supports
195 	 * that theory.
196 	 */
197 	if (!librapl_supported(gt->i915))
198 		return 0;
199 
200 	min.freq = slpc->min_freq;
201 	err = measure_power_at_freq(gt, &min.freq, &min.power);
202 
203 	if (err)
204 		return err;
205 
206 	max.freq = slpc->rp0_freq;
207 	err = measure_power_at_freq(gt, &max.freq, &max.power);
208 
209 	if (err)
210 		return err;
211 
212 	pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
213 		engine->name,
214 		min.power, min.freq,
215 		max.power, max.freq);
216 
217 	if (10 * min.freq >= 9 * max.freq) {
218 		pr_notice("Could not control frequency, ran at [%uMHz, %uMhz]\n",
219 			  min.freq, max.freq);
220 	}
221 
222 	if (11 * min.power > 10 * max.power) {
223 		pr_err("%s: did not conserve power when setting lower frequency!\n",
224 		       engine->name);
225 		err = -EINVAL;
226 	}
227 
228 	/* Restore min/max frequencies */
229 	slpc_set_max_freq(slpc, slpc->rp0_freq);
230 	slpc_set_min_freq(slpc, slpc->min_freq);
231 
232 	return err;
233 }
234 
235 static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, u32 *max_act_freq)
236 {
237 	struct intel_gt *gt = rps_to_gt(rps);
238 	u32 perf_limit_reasons;
239 	int err = 0;
240 
241 	err = slpc_set_min_freq(slpc, slpc->rp0_freq);
242 	if (err)
243 		return err;
244 
245 	*max_act_freq =  intel_rps_read_actual_frequency(rps);
246 	if (*max_act_freq != slpc->rp0_freq) {
247 		/* Check if there was some throttling by pcode */
248 		perf_limit_reasons = intel_uncore_read(gt->uncore,
249 						       intel_gt_perf_limit_reasons_reg(gt));
250 
251 		/* If not, this is an error */
252 		if (!(perf_limit_reasons & GT0_PERF_LIMIT_REASONS_MASK)) {
253 			pr_err("Pcode did not grant max freq\n");
254 			err = -EINVAL;
255 		} else {
256 			pr_info("Pcode throttled frequency 0x%x\n", perf_limit_reasons);
257 		}
258 	}
259 
260 	return err;
261 }
262 
263 static int run_test(struct intel_gt *gt, int test_type)
264 {
265 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
266 	struct intel_rps *rps = &gt->rps;
267 	struct intel_engine_cs *engine;
268 	enum intel_engine_id id;
269 	intel_wakeref_t wakeref;
270 	struct igt_spinner spin;
271 	u32 slpc_min_freq, slpc_max_freq;
272 	int err = 0;
273 
274 	if (!intel_uc_uses_guc_slpc(&gt->uc))
275 		return 0;
276 
277 	if (slpc->min_freq == slpc->rp0_freq) {
278 		pr_err("Min/Max are fused to the same value\n");
279 		return -EINVAL;
280 	}
281 
282 	if (igt_spinner_init(&spin, gt))
283 		return -ENOMEM;
284 
285 	if (intel_guc_slpc_get_max_freq(slpc, &slpc_max_freq)) {
286 		pr_err("Could not get SLPC max freq\n");
287 		return -EIO;
288 	}
289 
290 	if (intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq)) {
291 		pr_err("Could not get SLPC min freq\n");
292 		return -EIO;
293 	}
294 
295 	/*
296 	 * Set min frequency to RPn so that we can test the whole
297 	 * range of RPn-RP0.
298 	 */
299 	err = slpc_set_min_freq(slpc, slpc->min_freq);
300 	if (err) {
301 		pr_err("Unable to update min freq!");
302 		return err;
303 	}
304 
305 	/*
306 	 * Turn off efficient frequency so RPn/RP0 ranges are obeyed.
307 	 */
308 	err = intel_guc_slpc_set_ignore_eff_freq(slpc, true);
309 	if (err) {
310 		pr_err("Unable to turn off efficient freq!");
311 		return err;
312 	}
313 
314 	intel_gt_pm_wait_for_idle(gt);
315 	wakeref = intel_gt_pm_get(gt);
316 	for_each_engine(engine, gt, id) {
317 		struct i915_request *rq;
318 		u32 max_act_freq;
319 
320 		if (!intel_engine_can_store_dword(engine))
321 			continue;
322 
323 		st_engine_heartbeat_disable(engine);
324 
325 		rq = igt_spinner_create_request(&spin,
326 						engine->kernel_context,
327 						MI_NOOP);
328 		if (IS_ERR(rq)) {
329 			err = PTR_ERR(rq);
330 			st_engine_heartbeat_enable(engine);
331 			break;
332 		}
333 
334 		i915_request_add(rq);
335 
336 		if (!igt_wait_for_spinner(&spin, rq)) {
337 			pr_err("%s: Spinner did not start\n",
338 			       engine->name);
339 			igt_spinner_end(&spin);
340 			st_engine_heartbeat_enable(engine);
341 			intel_gt_set_wedged(engine->gt);
342 			err = -EIO;
343 			break;
344 		}
345 
346 		switch (test_type) {
347 		case VARY_MIN:
348 			err = vary_min_freq(slpc, rps, &max_act_freq);
349 			break;
350 
351 		case VARY_MAX:
352 			err = vary_max_freq(slpc, rps, &max_act_freq);
353 			break;
354 
355 		case MAX_GRANTED:
356 		case TILE_INTERACTION:
357 			/* Media engines have a different RP0 */
358 			if (gt->type != GT_MEDIA && (engine->class == VIDEO_DECODE_CLASS ||
359 						     engine->class == VIDEO_ENHANCEMENT_CLASS)) {
360 				igt_spinner_end(&spin);
361 				st_engine_heartbeat_enable(engine);
362 				err = 0;
363 				continue;
364 			}
365 
366 			err = max_granted_freq(slpc, rps, &max_act_freq);
367 			break;
368 
369 		case SLPC_POWER:
370 			err = slpc_power(gt, engine);
371 			break;
372 		}
373 
374 		if (test_type != SLPC_POWER) {
375 			pr_info("Max actual frequency for %s was %d\n",
376 				engine->name, max_act_freq);
377 
378 			/* Actual frequency should rise above min */
379 			if (max_act_freq <= slpc->min_freq) {
380 				pr_err("Actual freq did not rise above min\n");
381 				pr_err("Perf Limit Reasons: 0x%x\n",
382 				       intel_uncore_read(gt->uncore,
383 							 intel_gt_perf_limit_reasons_reg(gt)));
384 				err = -EINVAL;
385 			}
386 		}
387 
388 		igt_spinner_end(&spin);
389 		st_engine_heartbeat_enable(engine);
390 
391 		if (err)
392 			break;
393 	}
394 
395 	/* Restore min/max/efficient frequencies */
396 	err = slpc_restore_freq(slpc, slpc_min_freq, slpc_max_freq);
397 
398 	if (igt_flush_test(gt->i915))
399 		err = -EIO;
400 
401 	intel_gt_pm_put(gt, wakeref);
402 	igt_spinner_fini(&spin);
403 	intel_gt_pm_wait_for_idle(gt);
404 
405 	return err;
406 }
407 
408 static int live_slpc_vary_min(void *arg)
409 {
410 	struct drm_i915_private *i915 = arg;
411 	struct intel_gt *gt;
412 	unsigned int i;
413 	int ret;
414 
415 	for_each_gt(gt, i915, i) {
416 		ret = run_test(gt, VARY_MIN);
417 		if (ret)
418 			return ret;
419 	}
420 
421 	return ret;
422 }
423 
424 static int live_slpc_vary_max(void *arg)
425 {
426 	struct drm_i915_private *i915 = arg;
427 	struct intel_gt *gt;
428 	unsigned int i;
429 	int ret;
430 
431 	for_each_gt(gt, i915, i) {
432 		ret = run_test(gt, VARY_MAX);
433 		if (ret)
434 			return ret;
435 	}
436 
437 	return ret;
438 }
439 
440 /* check if pcode can grant RP0 */
441 static int live_slpc_max_granted(void *arg)
442 {
443 	struct drm_i915_private *i915 = arg;
444 	struct intel_gt *gt;
445 	unsigned int i;
446 	int ret;
447 
448 	for_each_gt(gt, i915, i) {
449 		ret = run_test(gt, MAX_GRANTED);
450 		if (ret)
451 			return ret;
452 	}
453 
454 	return ret;
455 }
456 
457 static int live_slpc_power(void *arg)
458 {
459 	struct drm_i915_private *i915 = arg;
460 	struct intel_gt *gt;
461 	unsigned int i;
462 	int ret;
463 
464 	for_each_gt(gt, i915, i) {
465 		ret = run_test(gt, SLPC_POWER);
466 		if (ret)
467 			return ret;
468 	}
469 
470 	return ret;
471 }
472 
473 static void slpc_spinner_thread(struct kthread_work *work)
474 {
475 	struct slpc_thread *thread = container_of(work, typeof(*thread), work);
476 
477 	thread->result = run_test(thread->gt, TILE_INTERACTION);
478 }
479 
480 static int live_slpc_tile_interaction(void *arg)
481 {
482 	struct drm_i915_private *i915 = arg;
483 	struct intel_gt *gt;
484 	struct slpc_thread *threads;
485 	int i = 0, ret = 0;
486 
487 	threads = kcalloc(I915_MAX_GT, sizeof(*threads), GFP_KERNEL);
488 	if (!threads)
489 		return -ENOMEM;
490 
491 	for_each_gt(gt, i915, i) {
492 		threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
493 
494 		if (IS_ERR(threads[i].worker)) {
495 			ret = PTR_ERR(threads[i].worker);
496 			break;
497 		}
498 
499 		threads[i].gt = gt;
500 		kthread_init_work(&threads[i].work, slpc_spinner_thread);
501 		kthread_queue_work(threads[i].worker, &threads[i].work);
502 	}
503 
504 	for_each_gt(gt, i915, i) {
505 		int status;
506 
507 		if (IS_ERR_OR_NULL(threads[i].worker))
508 			continue;
509 
510 		kthread_flush_work(&threads[i].work);
511 		status = READ_ONCE(threads[i].result);
512 		if (status && !ret) {
513 			pr_err("%s GT %d failed ", __func__, gt->info.id);
514 			ret = status;
515 		}
516 		kthread_destroy_worker(threads[i].worker);
517 	}
518 
519 	kfree(threads);
520 	return ret;
521 }
522 
523 int intel_slpc_live_selftests(struct drm_i915_private *i915)
524 {
525 	static const struct i915_subtest tests[] = {
526 		SUBTEST(live_slpc_vary_max),
527 		SUBTEST(live_slpc_vary_min),
528 		SUBTEST(live_slpc_max_granted),
529 		SUBTEST(live_slpc_power),
530 		SUBTEST(live_slpc_tile_interaction),
531 	};
532 
533 	struct intel_gt *gt;
534 	unsigned int i;
535 
536 	for_each_gt(gt, i915, i) {
537 		if (intel_gt_is_wedged(gt))
538 			return 0;
539 	}
540 
541 	return i915_live_subtests(tests, i915);
542 }
543