xref: /linux/drivers/gpu/drm/i915/selftests/intel_uncore.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "../i915_selftest.h"
26 
27 #include "gt/intel_gt.h"
28 
intel_fw_table_check(const struct intel_forcewake_range * ranges,unsigned int num_ranges,bool is_watertight)29 static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
30 				unsigned int num_ranges,
31 				bool is_watertight)
32 {
33 	unsigned int i;
34 	s32 prev;
35 
36 	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
37 		/* Check that the table is watertight */
38 		if (is_watertight && (prev + 1) != (s32)ranges->start) {
39 			pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
40 			       __func__, i, ranges->start, ranges->end, prev);
41 			return -EINVAL;
42 		}
43 
44 		/* Check that the table never goes backwards */
45 		if (prev >= (s32)ranges->start) {
46 			pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
47 			       __func__, i, ranges->start, ranges->end, prev);
48 			return -EINVAL;
49 		}
50 
51 		/* Check that the entry is valid */
52 		if (ranges->start >= ranges->end) {
53 			pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
54 			       __func__, i, ranges->start, ranges->end);
55 			return -EINVAL;
56 		}
57 
58 		prev = ranges->end;
59 	}
60 
61 	return 0;
62 }
63 
intel_shadow_table_check(void)64 static int intel_shadow_table_check(void)
65 {
66 	struct {
67 		const struct i915_range *regs;
68 		unsigned int size;
69 	} range_lists[] = {
70 		{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
71 		{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
72 		{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
73 		{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
74 		{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
75 		{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
76 	};
77 	const struct i915_range *range;
78 	unsigned int i, j;
79 	s32 prev;
80 
81 	for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
82 		range = range_lists[j].regs;
83 		for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
84 			if (range->end < range->start) {
85 				pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
86 				       __func__, i, range->start, range->end);
87 				return -EINVAL;
88 			}
89 
90 			if (prev >= (s32)range->start) {
91 				pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
92 				       __func__, i, range->start, range->end, prev);
93 				return -EINVAL;
94 			}
95 
96 			if (range->start % 4) {
97 				pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
98 				       __func__, i, range->start, range->end);
99 				return -EINVAL;
100 			}
101 
102 			prev = range->end;
103 		}
104 	}
105 
106 	return 0;
107 }
108 
intel_uncore_mock_selftests(void)109 int intel_uncore_mock_selftests(void)
110 {
111 	struct {
112 		const struct intel_forcewake_range *ranges;
113 		unsigned int num_ranges;
114 		bool is_watertight;
115 	} fw[] = {
116 		{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
117 		{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
118 		{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
119 		{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
120 		{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
121 		{ __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true },
122 		{ __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
123 	};
124 	int err, i;
125 
126 	for (i = 0; i < ARRAY_SIZE(fw); i++) {
127 		err = intel_fw_table_check(fw[i].ranges,
128 					   fw[i].num_ranges,
129 					   fw[i].is_watertight);
130 		if (err)
131 			return err;
132 	}
133 
134 	err = intel_shadow_table_check();
135 	if (err)
136 		return err;
137 
138 	return 0;
139 }
140 
live_forcewake_ops(void * arg)141 static int live_forcewake_ops(void *arg)
142 {
143 	static const struct reg {
144 		const char *name;
145 		u8 min_graphics_ver;
146 		u8 max_graphics_ver;
147 		unsigned long platforms;
148 		unsigned int offset;
149 	} registers[] = {
150 		{
151 			"RING_START",
152 			6, 7,
153 			0x38,
154 		},
155 		{
156 			"RING_MI_MODE",
157 			8, U8_MAX,
158 			0x9c,
159 		}
160 	};
161 	const struct reg *r;
162 	struct intel_gt *gt = arg;
163 	struct intel_uncore_forcewake_domain *domain;
164 	struct intel_uncore *uncore = gt->uncore;
165 	struct intel_engine_cs *engine;
166 	enum intel_engine_id id;
167 	intel_wakeref_t wakeref;
168 	unsigned int tmp;
169 	int err = 0;
170 
171 	GEM_BUG_ON(gt->awake);
172 
173 	/* vlv/chv with their pcu behave differently wrt reads */
174 	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
175 		pr_debug("PCU fakes forcewake badly; skipping\n");
176 		return 0;
177 	}
178 
179 	/*
180 	 * Not quite as reliable across the gen as one would hope.
181 	 *
182 	 * Either our theory of operation is incorrect, or there remain
183 	 * external parties interfering with the powerwells.
184 	 *
185 	 * https://bugs.freedesktop.org/show_bug.cgi?id=110210
186 	 */
187 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
188 		return 0;
189 
190 	/* We have to pick carefully to get the exact behaviour we need */
191 	for (r = registers; r->name; r++)
192 		if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
193 			break;
194 	if (!r->name) {
195 		pr_debug("Forcewaked register not known for %s; skipping\n",
196 			 intel_platform_name(INTEL_INFO(gt->i915)->platform));
197 		return 0;
198 	}
199 
200 	wakeref = intel_runtime_pm_get(uncore->rpm);
201 
202 	for_each_fw_domain(domain, uncore, tmp) {
203 		smp_store_mb(domain->active, false);
204 		if (!hrtimer_cancel(&domain->timer))
205 			continue;
206 
207 		intel_uncore_fw_release_timer(&domain->timer);
208 	}
209 
210 	for_each_engine(engine, gt, id) {
211 		i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
212 		u32 __iomem *reg = intel_uncore_regs(uncore) + engine->mmio_base + r->offset;
213 		enum forcewake_domains fw_domains;
214 		u32 val;
215 
216 		if (!engine->default_state)
217 			continue;
218 
219 		fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
220 							    FW_REG_READ);
221 		if (!fw_domains)
222 			continue;
223 
224 		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
225 			if (!domain->wake_count)
226 				continue;
227 
228 			pr_err("fw_domain %s still active, aborting test!\n",
229 			       intel_uncore_forcewake_domain_to_str(domain->id));
230 			err = -EINVAL;
231 			goto out_rpm;
232 		}
233 
234 		intel_uncore_forcewake_get(uncore, fw_domains);
235 		val = readl(reg);
236 		intel_uncore_forcewake_put(uncore, fw_domains);
237 
238 		/* Flush the forcewake release (delayed onto a timer) */
239 		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
240 			smp_store_mb(domain->active, false);
241 			if (hrtimer_cancel(&domain->timer))
242 				intel_uncore_fw_release_timer(&domain->timer);
243 
244 			preempt_disable();
245 			err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
246 			preempt_enable();
247 			if (err) {
248 				pr_err("Failed to clear fw_domain %s\n",
249 				       intel_uncore_forcewake_domain_to_str(domain->id));
250 				goto out_rpm;
251 			}
252 		}
253 
254 		if (!val) {
255 			pr_err("%s:%s was zero while fw was held!\n",
256 			       engine->name, r->name);
257 			err = -EINVAL;
258 			goto out_rpm;
259 		}
260 
261 		/* We then expect the read to return 0 outside of the fw */
262 		if (wait_for(readl(reg) == 0, 100)) {
263 			pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
264 			       engine->name, r->name, readl(reg), fw_domains);
265 			err = -ETIMEDOUT;
266 			goto out_rpm;
267 		}
268 	}
269 
270 out_rpm:
271 	intel_runtime_pm_put(uncore->rpm, wakeref);
272 	return err;
273 }
274 
live_forcewake_domains(void * arg)275 static int live_forcewake_domains(void *arg)
276 {
277 #define FW_RANGE 0x40000
278 	struct intel_gt *gt = arg;
279 	struct intel_uncore *uncore = gt->uncore;
280 	unsigned long *valid;
281 	u32 offset;
282 	int err;
283 
284 	if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
285 	    !IS_VALLEYVIEW(gt->i915) &&
286 	    !IS_CHERRYVIEW(gt->i915))
287 		return 0;
288 
289 	/*
290 	 * This test may lockup the machine or cause GPU hangs afterwards.
291 	 */
292 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
293 		return 0;
294 
295 	valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
296 	if (!valid)
297 		return -ENOMEM;
298 
299 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
300 
301 	check_for_unclaimed_mmio(uncore);
302 	for (offset = 0; offset < FW_RANGE; offset += 4) {
303 		i915_reg_t reg = { offset };
304 
305 		intel_uncore_posting_read_fw(uncore, reg);
306 		if (!check_for_unclaimed_mmio(uncore))
307 			set_bit(offset, valid);
308 	}
309 
310 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
311 
312 	err = 0;
313 	for_each_set_bit(offset, valid, FW_RANGE) {
314 		i915_reg_t reg = { offset };
315 
316 		iosf_mbi_punit_acquire();
317 		intel_uncore_forcewake_reset(uncore);
318 		iosf_mbi_punit_release();
319 
320 		check_for_unclaimed_mmio(uncore);
321 
322 		intel_uncore_posting_read_fw(uncore, reg);
323 		if (check_for_unclaimed_mmio(uncore)) {
324 			pr_err("Unclaimed mmio read to register 0x%04x\n",
325 			       offset);
326 			err = -EINVAL;
327 		}
328 	}
329 
330 	bitmap_free(valid);
331 	return err;
332 }
333 
live_fw_table(void * arg)334 static int live_fw_table(void *arg)
335 {
336 	struct intel_gt *gt = arg;
337 
338 	/* Confirm the table we load is still valid */
339 	return intel_fw_table_check(gt->uncore->fw_domains_table,
340 				    gt->uncore->fw_domains_table_entries,
341 				    GRAPHICS_VER(gt->i915) >= 9);
342 }
343 
intel_uncore_live_selftests(struct drm_i915_private * i915)344 int intel_uncore_live_selftests(struct drm_i915_private *i915)
345 {
346 	static const struct i915_subtest tests[] = {
347 		SUBTEST(live_fw_table),
348 		SUBTEST(live_forcewake_ops),
349 		SUBTEST(live_forcewake_domains),
350 	};
351 
352 	return intel_gt_live_subtests(tests, to_gt(i915));
353 }
354