1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/sort.h>
7
8 #include "intel_engine_regs.h"
9 #include "intel_gt_clock_utils.h"
10
11 #include "selftest_llc.h"
12 #include "selftest_rc6.h"
13 #include "selftest_rps.h"
14
cmp_u64(const void * A,const void * B)15 static int cmp_u64(const void *A, const void *B)
16 {
17 const u64 *a = A, *b = B;
18
19 if (a < b)
20 return -1;
21 else if (a > b)
22 return 1;
23 else
24 return 0;
25 }
26
cmp_u32(const void * A,const void * B)27 static int cmp_u32(const void *A, const void *B)
28 {
29 const u32 *a = A, *b = B;
30
31 if (a < b)
32 return -1;
33 else if (a > b)
34 return 1;
35 else
36 return 0;
37 }
38
read_timestamp(struct intel_engine_cs * engine)39 static u32 read_timestamp(struct intel_engine_cs *engine)
40 {
41 struct drm_i915_private *i915 = engine->i915;
42
43 /* On i965 the first read tends to give a stale value */
44 ENGINE_READ_FW(engine, RING_TIMESTAMP);
45
46 if (GRAPHICS_VER(i915) == 5 || IS_G4X(i915))
47 return ENGINE_READ_FW(engine, RING_TIMESTAMP_UDW);
48 else
49 return ENGINE_READ_FW(engine, RING_TIMESTAMP);
50 }
51
measure_clocks(struct intel_engine_cs * engine,u32 * out_cycles,ktime_t * out_dt)52 static void measure_clocks(struct intel_engine_cs *engine,
53 u32 *out_cycles, ktime_t *out_dt)
54 {
55 ktime_t dt[5];
56 u32 cycles[5];
57 int i;
58
59 for (i = 0; i < 5; i++) {
60 local_irq_disable();
61 cycles[i] = -read_timestamp(engine);
62 dt[i] = ktime_get();
63
64 udelay(1000);
65
66 cycles[i] += read_timestamp(engine);
67 dt[i] = ktime_sub(ktime_get(), dt[i]);
68 local_irq_enable();
69 }
70
71 /* Use the median of both cycle/dt; close enough */
72 sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL);
73 *out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4;
74
75 sort(dt, 5, sizeof(*dt), cmp_u64, NULL);
76 *out_dt = div_u64(dt[1] + 2 * dt[2] + dt[3], 4);
77 }
78
live_gt_clocks(void * arg)79 static int live_gt_clocks(void *arg)
80 {
81 struct intel_gt *gt = arg;
82 struct intel_engine_cs *engine;
83 enum intel_engine_id id;
84 intel_wakeref_t wakeref;
85 int err = 0;
86
87 if (!gt->clock_frequency) { /* unknown */
88 pr_info("CS_TIMESTAMP frequency unknown\n");
89 return 0;
90 }
91
92 if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
93 return 0;
94
95 wakeref = intel_gt_pm_get(gt);
96 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
97
98 for_each_engine(engine, gt, id) {
99 u32 cycles;
100 u32 expected;
101 u64 time;
102 u64 dt;
103
104 if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
105 continue;
106
107 measure_clocks(engine, &cycles, &dt);
108
109 time = intel_gt_clock_interval_to_ns(engine->gt, cycles);
110 expected = intel_gt_ns_to_clock_interval(engine->gt, dt);
111
112 pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
113 engine->name, cycles, time, dt, expected,
114 engine->gt->clock_frequency / 1000);
115
116 if (9 * time < 8 * dt || 8 * time > 9 * dt) {
117 pr_err("%s: CS ticks did not match walltime!\n",
118 engine->name);
119 err = -EINVAL;
120 break;
121 }
122
123 if (9 * expected < 8 * cycles || 8 * expected > 9 * cycles) {
124 pr_err("%s: walltime did not match CS ticks!\n",
125 engine->name);
126 err = -EINVAL;
127 break;
128 }
129 }
130
131 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
132 intel_gt_pm_put(gt, wakeref);
133
134 return err;
135 }
136
live_gt_resume(void * arg)137 static int live_gt_resume(void *arg)
138 {
139 struct intel_gt *gt = arg;
140 IGT_TIMEOUT(end_time);
141 int err;
142
143 /* Do several suspend/resume cycles to check we don't explode! */
144 do {
145 intel_gt_suspend_prepare(gt);
146 intel_gt_suspend_late(gt);
147
148 if (gt->rc6.enabled) {
149 pr_err("rc6 still enabled after suspend!\n");
150 intel_gt_set_wedged_on_init(gt);
151 err = -EINVAL;
152 break;
153 }
154
155 err = intel_gt_resume(gt);
156 if (err)
157 break;
158
159 if (gt->rc6.supported && !gt->rc6.enabled) {
160 pr_err("rc6 not enabled upon resume!\n");
161 intel_gt_set_wedged_on_init(gt);
162 err = -EINVAL;
163 break;
164 }
165
166 err = st_llc_verify(>->llc);
167 if (err) {
168 pr_err("llc state not restored upon resume!\n");
169 intel_gt_set_wedged_on_init(gt);
170 break;
171 }
172 } while (!__igt_timeout(end_time, NULL));
173
174 return err;
175 }
176
intel_gt_pm_live_selftests(struct drm_i915_private * i915)177 int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
178 {
179 static const struct i915_subtest tests[] = {
180 SUBTEST(live_gt_clocks),
181 SUBTEST(live_rc6_manual),
182 SUBTEST(live_rps_clock_interval),
183 SUBTEST(live_rps_control),
184 SUBTEST(live_rps_frequency_cs),
185 SUBTEST(live_rps_frequency_srm),
186 SUBTEST(live_rps_power),
187 SUBTEST(live_rps_interrupt),
188 SUBTEST(live_rps_dynamic),
189 SUBTEST(live_gt_resume),
190 };
191
192 if (intel_gt_is_wedged(to_gt(i915)))
193 return 0;
194
195 return intel_gt_live_subtests(tests, to_gt(i915));
196 }
197
intel_gt_pm_late_selftests(struct drm_i915_private * i915)198 int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
199 {
200 static const struct i915_subtest tests[] = {
201 /*
202 * These tests may leave the system in an undesirable state.
203 * They are intended to be run last in CI and the system
204 * rebooted afterwards.
205 */
206 SUBTEST(live_rc6_ctx_wa),
207 };
208
209 if (intel_gt_is_wedged(to_gt(i915)))
210 return 0;
211
212 return intel_gt_live_subtests(tests, to_gt(i915));
213 }
214