xref: /linux/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 
3 /*
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/seq_file.h>
8 #include <linux/string_helpers.h>
9 
10 #include <drm/drm_print.h>
11 
12 #include "i915_drv.h"
13 #include "i915_reg.h"
14 #include "intel_gt.h"
15 #include "intel_gt_clock_utils.h"
16 #include "intel_gt_debugfs.h"
17 #include "intel_gt_pm.h"
18 #include "intel_gt_pm_debugfs.h"
19 #include "intel_gt_regs.h"
20 #include "intel_llc.h"
21 #include "intel_mchbar_regs.h"
22 #include "intel_pcode.h"
23 #include "intel_rc6.h"
24 #include "intel_rps.h"
25 #include "intel_runtime_pm.h"
26 #include "intel_uncore.h"
27 #include "vlv_iosf_sb.h"
28 
29 void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
30 {
31 	atomic_inc(&gt->user_wakeref);
32 	intel_gt_pm_get_untracked(gt);
33 	if (GRAPHICS_VER(gt->i915) >= 6)
34 		intel_uncore_forcewake_user_get(gt->uncore);
35 }
36 
37 void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
38 {
39 	if (GRAPHICS_VER(gt->i915) >= 6)
40 		intel_uncore_forcewake_user_put(gt->uncore);
41 	intel_gt_pm_put_untracked(gt);
42 	atomic_dec(&gt->user_wakeref);
43 }
44 
45 static int forcewake_user_open(struct inode *inode, struct file *file)
46 {
47 	struct intel_gt *gt = inode->i_private;
48 
49 	intel_gt_pm_debugfs_forcewake_user_open(gt);
50 
51 	return 0;
52 }
53 
54 static int forcewake_user_release(struct inode *inode, struct file *file)
55 {
56 	struct intel_gt *gt = inode->i_private;
57 
58 	intel_gt_pm_debugfs_forcewake_user_release(gt);
59 
60 	return 0;
61 }
62 
63 static const struct file_operations forcewake_user_fops = {
64 	.owner = THIS_MODULE,
65 	.open = forcewake_user_open,
66 	.release = forcewake_user_release,
67 };
68 
69 static int fw_domains_show(struct seq_file *m, void *data)
70 {
71 	struct intel_gt *gt = m->private;
72 	struct intel_uncore *uncore = gt->uncore;
73 	struct intel_uncore_forcewake_domain *fw_domain;
74 	unsigned int tmp;
75 
76 	spin_lock_irq(&uncore->lock);
77 
78 	seq_printf(m, "user.bypass_count = %u\n",
79 		   uncore->user_forcewake_count);
80 
81 	for_each_fw_domain(fw_domain, uncore, tmp)
82 		seq_printf(m, "%s.wake_count = %u\n",
83 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
84 			   READ_ONCE(fw_domain->wake_count));
85 
86 	spin_unlock_irq(&uncore->lock);
87 
88 	return 0;
89 }
90 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
91 
92 static int vlv_drpc(struct seq_file *m)
93 {
94 	struct intel_gt *gt = m->private;
95 	struct intel_uncore *uncore = gt->uncore;
96 	u32 rcctl1, pw_status, mt_fwake_req;
97 
98 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
99 	pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
100 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
101 
102 	seq_printf(m, "RC6 Enabled: %s\n",
103 		   str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
104 					GEN6_RC_CTL_EI_MODE(1))));
105 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
106 	seq_printf(m, "Render Power Well: %s\n",
107 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
108 	seq_printf(m, "Media Power Well: %s\n",
109 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
110 
111 	intel_rc6_print_residency(m, "Render RC6 residency since boot:", INTEL_RC6_RES_RC6);
112 	intel_rc6_print_residency(m, "Media RC6 residency since boot:", INTEL_RC6_RES_VLV_MEDIA);
113 
114 	return fw_domains_show(m, NULL);
115 }
116 
117 static int gen6_drpc(struct seq_file *m)
118 {
119 	struct intel_gt *gt = m->private;
120 	struct drm_i915_private *i915 = gt->i915;
121 	struct intel_uncore *uncore = gt->uncore;
122 	u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0;
123 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
124 
125 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
126 	gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
127 
128 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
129 	if (GRAPHICS_VER(i915) >= 9) {
130 		gen9_powergate_enable =
131 			intel_uncore_read(uncore, GEN9_PG_ENABLE);
132 		gen9_powergate_status =
133 			intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
134 	}
135 
136 	if (GRAPHICS_VER(i915) <= 7)
137 		snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
138 
139 	seq_printf(m, "RC1e Enabled: %s\n",
140 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
141 	seq_printf(m, "RC6 Enabled: %s\n",
142 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
143 	if (GRAPHICS_VER(i915) >= 9) {
144 		seq_printf(m, "Render Well Gating Enabled: %s\n",
145 			   str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
146 		seq_printf(m, "Media Well Gating Enabled: %s\n",
147 			   str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
148 	}
149 	seq_printf(m, "Deep RC6 Enabled: %s\n",
150 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
151 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
152 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
153 	seq_puts(m, "Current RC state: ");
154 	switch (gt_core_status & GEN6_RCn_MASK) {
155 	case GEN6_RC0:
156 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
157 			seq_puts(m, "Core Power Down\n");
158 		else
159 			seq_puts(m, "on\n");
160 		break;
161 	case GEN6_RC3:
162 		seq_puts(m, "RC3\n");
163 		break;
164 	case GEN6_RC6:
165 		seq_puts(m, "RC6\n");
166 		break;
167 	case GEN6_RC7:
168 		seq_puts(m, "RC7\n");
169 		break;
170 	default:
171 		seq_puts(m, "Unknown\n");
172 		break;
173 	}
174 
175 	seq_printf(m, "Core Power Down: %s\n",
176 		   str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
177 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
178 	if (GRAPHICS_VER(i915) >= 9) {
179 		seq_printf(m, "Render Power Well: %s\n",
180 			   (gen9_powergate_status &
181 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
182 		seq_printf(m, "Media Power Well: %s\n",
183 			   (gen9_powergate_status &
184 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
185 	}
186 
187 	/* Not exactly sure what this is */
188 	intel_rc6_print_residency(m, "RC6 \"Locked to RPn\" residency since boot:",
189 				  INTEL_RC6_RES_RC6_LOCKED);
190 	intel_rc6_print_residency(m, "RC6 residency since boot:", INTEL_RC6_RES_RC6);
191 	intel_rc6_print_residency(m, "RC6+ residency since boot:", INTEL_RC6_RES_RC6p);
192 	intel_rc6_print_residency(m, "RC6++ residency since boot:", INTEL_RC6_RES_RC6pp);
193 
194 	if (GRAPHICS_VER(i915) <= 7) {
195 		seq_printf(m, "RC6   voltage: %dmV\n",
196 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
197 		seq_printf(m, "RC6+  voltage: %dmV\n",
198 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
199 		seq_printf(m, "RC6++ voltage: %dmV\n",
200 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
201 	}
202 
203 	return fw_domains_show(m, NULL);
204 }
205 
206 static int ilk_drpc(struct seq_file *m)
207 {
208 	struct intel_gt *gt = m->private;
209 	struct intel_uncore *uncore = gt->uncore;
210 	u32 rgvmodectl, rstdbyctl;
211 	u16 crstandvid;
212 
213 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
214 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
215 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
216 
217 	seq_printf(m, "HD boost: %s\n",
218 		   str_yes_no(rgvmodectl & MEMMODE_BOOST_EN));
219 	seq_printf(m, "Boost freq: %d\n",
220 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
221 		   MEMMODE_BOOST_FREQ_SHIFT);
222 	seq_printf(m, "HW control enabled: %s\n",
223 		   str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN));
224 	seq_printf(m, "SW control enabled: %s\n",
225 		   str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN));
226 	seq_printf(m, "Gated voltage change: %s\n",
227 		   str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE));
228 	seq_printf(m, "Starting frequency: P%d\n",
229 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
230 	seq_printf(m, "Max P-state: P%d\n",
231 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
232 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
233 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
234 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
235 	seq_printf(m, "Render standby enabled: %s\n",
236 		   str_yes_no(!(rstdbyctl & RCX_SW_EXIT)));
237 	seq_puts(m, "Current RS state: ");
238 	switch (rstdbyctl & RSX_STATUS_MASK) {
239 	case RSX_STATUS_ON:
240 		seq_puts(m, "on\n");
241 		break;
242 	case RSX_STATUS_RC1:
243 		seq_puts(m, "RC1\n");
244 		break;
245 	case RSX_STATUS_RC1E:
246 		seq_puts(m, "RC1E\n");
247 		break;
248 	case RSX_STATUS_RS1:
249 		seq_puts(m, "RS1\n");
250 		break;
251 	case RSX_STATUS_RS2:
252 		seq_puts(m, "RS2 (RC6)\n");
253 		break;
254 	case RSX_STATUS_RS3:
255 		seq_puts(m, "RC3 (RC6+)\n");
256 		break;
257 	default:
258 		seq_puts(m, "unknown\n");
259 		break;
260 	}
261 
262 	return 0;
263 }
264 
265 static int mtl_drpc(struct seq_file *m)
266 {
267 	struct intel_gt *gt = m->private;
268 	struct intel_uncore *uncore = gt->uncore;
269 	u32 gt_core_status, rcctl1, mt_fwake_req;
270 	u32 mtl_powergate_enable = 0, mtl_powergate_status = 0;
271 
272 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
273 	gt_core_status = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1);
274 
275 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
276 	mtl_powergate_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
277 	mtl_powergate_status = intel_uncore_read(uncore,
278 						 GEN9_PWRGT_DOMAIN_STATUS);
279 
280 	seq_printf(m, "RC6 Enabled: %s\n",
281 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
282 	if (gt->type == GT_MEDIA) {
283 		seq_printf(m, "Media Well Gating Enabled: %s\n",
284 			   str_yes_no(mtl_powergate_enable & GEN9_MEDIA_PG_ENABLE));
285 	} else {
286 		seq_printf(m, "Render Well Gating Enabled: %s\n",
287 			   str_yes_no(mtl_powergate_enable & GEN9_RENDER_PG_ENABLE));
288 	}
289 
290 	seq_puts(m, "Current RC state: ");
291 	switch (REG_FIELD_GET(MTL_CC_MASK, gt_core_status)) {
292 	case MTL_CC0:
293 		seq_puts(m, "RC0\n");
294 		break;
295 	case MTL_CC6:
296 		seq_puts(m, "RC6\n");
297 		break;
298 	default:
299 		seq_puts(m, "Unknown\n");
300 		break;
301 	}
302 
303 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
304 	if (gt->type == GT_MEDIA)
305 		seq_printf(m, "Media Power Well: %s\n",
306 			   (mtl_powergate_status &
307 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
308 	else
309 		seq_printf(m, "Render Power Well: %s\n",
310 			   (mtl_powergate_status &
311 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
312 
313 	/* Works for both render and media gt's */
314 	intel_rc6_print_residency(m, "RC6 residency since boot:", INTEL_RC6_RES_RC6);
315 
316 	return fw_domains_show(m, NULL);
317 }
318 
319 static int drpc_show(struct seq_file *m, void *unused)
320 {
321 	struct intel_gt *gt = m->private;
322 	struct drm_i915_private *i915 = gt->i915;
323 	intel_wakeref_t wakeref;
324 	int err = -ENODEV;
325 
326 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
327 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
328 			err = mtl_drpc(m);
329 		else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
330 			err = vlv_drpc(m);
331 		else if (GRAPHICS_VER(i915) >= 6)
332 			err = gen6_drpc(m);
333 		else
334 			err = ilk_drpc(m);
335 	}
336 
337 	return err;
338 }
339 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
340 
341 void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
342 {
343 	struct drm_i915_private *i915 = gt->i915;
344 	struct intel_uncore *uncore = gt->uncore;
345 	struct intel_rps *rps = &gt->rps;
346 	intel_wakeref_t wakeref;
347 
348 	wakeref = intel_runtime_pm_get(uncore->rpm);
349 
350 	if (GRAPHICS_VER(i915) == 5) {
351 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
352 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
353 
354 		drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
355 		drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
356 		drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
357 			   MEMSTAT_VID_SHIFT);
358 		drm_printf(p, "Current P-state: %d\n",
359 			   REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rgvstat));
360 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
361 		u32 rpmodectl, freq_sts;
362 
363 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
364 		drm_printf(p, "Video Turbo Mode: %s\n",
365 			   str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
366 		drm_printf(p, "HW control enabled: %s\n",
367 			   str_yes_no(rpmodectl & GEN6_RP_ENABLE));
368 		drm_printf(p, "SW control enabled: %s\n",
369 			   str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
370 
371 		vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
372 		freq_sts = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
373 		vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
374 
375 		drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
376 
377 		drm_printf(p, "actual GPU freq: %d MHz\n",
378 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
379 
380 		drm_printf(p, "current GPU freq: %d MHz\n",
381 			   intel_gpu_freq(rps, rps->cur_freq));
382 
383 		drm_printf(p, "max GPU freq: %d MHz\n",
384 			   intel_gpu_freq(rps, rps->max_freq));
385 
386 		drm_printf(p, "min GPU freq: %d MHz\n",
387 			   intel_gpu_freq(rps, rps->min_freq));
388 
389 		drm_printf(p, "idle GPU freq: %d MHz\n",
390 			   intel_gpu_freq(rps, rps->idle_freq));
391 
392 		drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
393 			   intel_gpu_freq(rps, rps->efficient_freq));
394 	} else if (GRAPHICS_VER(i915) >= 6) {
395 		gen6_rps_frequency_dump(rps, p);
396 	} else {
397 		drm_puts(p, "no P-state info available\n");
398 	}
399 
400 	intel_runtime_pm_put(uncore->rpm, wakeref);
401 }
402 
403 static int frequency_show(struct seq_file *m, void *unused)
404 {
405 	struct intel_gt *gt = m->private;
406 	struct drm_printer p = drm_seq_file_printer(m);
407 
408 	intel_gt_pm_frequency_dump(gt, &p);
409 
410 	return 0;
411 }
412 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
413 
414 static int llc_show(struct seq_file *m, void *data)
415 {
416 	struct intel_gt *gt = m->private;
417 	struct drm_i915_private *i915 = gt->i915;
418 	const bool edram = GRAPHICS_VER(i915) > 8;
419 	struct intel_rps *rps = &gt->rps;
420 	unsigned int max_gpu_freq, min_gpu_freq;
421 	intel_wakeref_t wakeref;
422 	int gpu_freq, ia_freq;
423 
424 	seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915)));
425 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
426 		   i915->edram_size_mb);
427 
428 	min_gpu_freq = rps->min_freq;
429 	max_gpu_freq = rps->max_freq;
430 	if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
431 		/* Convert GT frequency to 50 HZ units */
432 		min_gpu_freq /= GEN9_FREQ_SCALER;
433 		max_gpu_freq /= GEN9_FREQ_SCALER;
434 	}
435 
436 	seq_puts(m, "GPU freq (MHz)\tEffective GPU freq (MHz)\tEffective Ring freq (MHz)\n");
437 
438 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
439 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
440 		ia_freq = gpu_freq;
441 		snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
442 			       &ia_freq, NULL);
443 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
444 			   intel_gpu_freq(rps,
445 					  (gpu_freq *
446 					   (IS_GEN9_BC(i915) ||
447 					    GRAPHICS_VER(i915) >= 11 ?
448 					    GEN9_FREQ_SCALER : 1))),
449 			   ((ia_freq >> 0) & 0xff) * 100,
450 			   ((ia_freq >> 8) & 0xff) * 100);
451 	}
452 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
453 
454 	return 0;
455 }
456 
457 static bool llc_eval(void *data)
458 {
459 	struct intel_gt *gt = data;
460 
461 	return HAS_LLC(gt->i915);
462 }
463 
464 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
465 
466 static const char *rps_power_to_str(unsigned int power)
467 {
468 	static const char * const strings[] = {
469 		[LOW_POWER] = "low power",
470 		[BETWEEN] = "mixed",
471 		[HIGH_POWER] = "high power",
472 	};
473 
474 	if (power >= ARRAY_SIZE(strings) || !strings[power])
475 		return "unknown";
476 
477 	return strings[power];
478 }
479 
480 static int rps_boost_show(struct seq_file *m, void *data)
481 {
482 	struct intel_gt *gt = m->private;
483 	struct drm_i915_private *i915 = gt->i915;
484 	struct intel_rps *rps = &gt->rps;
485 
486 	seq_printf(m, "RPS enabled? %s\n",
487 		   str_yes_no(intel_rps_is_enabled(rps)));
488 	seq_printf(m, "RPS active? %s\n",
489 		   str_yes_no(intel_rps_is_active(rps)));
490 	seq_printf(m, "GPU busy? %s, %llums\n",
491 		   str_yes_no(gt->awake),
492 		   ktime_to_ms(intel_gt_get_awake_time(gt)));
493 	seq_printf(m, "Boosts outstanding? %d\n",
494 		   atomic_read(&rps->num_waiters));
495 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
496 	seq_printf(m, "Frequency requested %d, actual %d\n",
497 		   intel_gpu_freq(rps, rps->cur_freq),
498 		   intel_rps_read_actual_frequency(rps));
499 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
500 		   intel_gpu_freq(rps, rps->min_freq),
501 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
502 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
503 		   intel_gpu_freq(rps, rps->max_freq));
504 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
505 		   intel_gpu_freq(rps, rps->idle_freq),
506 		   intel_gpu_freq(rps, rps->efficient_freq),
507 		   intel_gpu_freq(rps, rps->boost_freq));
508 
509 	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
510 
511 	if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
512 		struct intel_uncore *uncore = gt->uncore;
513 		u32 rpup, rpupei;
514 		u32 rpdown, rpdownei;
515 
516 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
517 		rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
518 		rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
519 		rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
520 		rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
521 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
522 
523 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
524 			   rps_power_to_str(rps->power.mode));
525 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
526 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
527 			   rps->power.up_threshold);
528 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
529 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
530 			   rps->power.down_threshold);
531 	} else {
532 		seq_puts(m, "\nRPS Autotuning inactive\n");
533 	}
534 
535 	return 0;
536 }
537 
538 static bool rps_eval(void *data)
539 {
540 	struct intel_gt *gt = data;
541 
542 	if (intel_guc_slpc_is_used(gt_to_guc(gt)))
543 		return false;
544 	else
545 		return HAS_RPS(gt->i915);
546 }
547 
548 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
549 
550 static int perf_limit_reasons_get(void *data, u64 *val)
551 {
552 	struct intel_gt *gt = data;
553 	intel_wakeref_t wakeref;
554 
555 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
556 		*val = intel_uncore_read(gt->uncore, intel_gt_perf_limit_reasons_reg(gt));
557 
558 	return 0;
559 }
560 
561 static int perf_limit_reasons_clear(void *data, u64 val)
562 {
563 	struct intel_gt *gt = data;
564 	intel_wakeref_t wakeref;
565 
566 	/*
567 	 * Clear the upper 16 "log" bits, the lower 16 "status" bits are
568 	 * read-only. The upper 16 "log" bits are identical to the lower 16
569 	 * "status" bits except that the "log" bits remain set until cleared.
570 	 */
571 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
572 		intel_uncore_rmw(gt->uncore, intel_gt_perf_limit_reasons_reg(gt),
573 				 GT0_PERF_LIMIT_REASONS_LOG_MASK, 0);
574 
575 	return 0;
576 }
577 
578 static bool perf_limit_reasons_eval(void *data)
579 {
580 	struct intel_gt *gt = data;
581 
582 	return i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt));
583 }
584 
585 DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
586 			perf_limit_reasons_clear, "0x%llx\n");
587 
588 void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
589 {
590 	static const struct intel_gt_debugfs_file files[] = {
591 		{ "drpc", &drpc_fops, NULL },
592 		{ "frequency", &frequency_fops, NULL },
593 		{ "forcewake", &fw_domains_fops, NULL },
594 		{ "forcewake_user", &forcewake_user_fops, NULL},
595 		{ "llc", &llc_fops, llc_eval },
596 		{ "rps_boost", &rps_boost_fops, rps_eval },
597 		{ "perf_limit_reasons", &perf_limit_reasons_fops, perf_limit_reasons_eval },
598 	};
599 
600 	intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
601 }
602