xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 5d95cbf21a4a550f2a2050c947083de2587cf46d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 #include <linux/ktime.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_print.h>
13 #include <generated/xe_wa_oob.h>
14 
15 #include "abi/guc_actions_slpc_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_regs.h"
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_force_wake.h"
21 #include "xe_gt.h"
22 #include "xe_gt_idle.h"
23 #include "xe_gt_printk.h"
24 #include "xe_gt_throttle.h"
25 #include "xe_gt_types.h"
26 #include "xe_guc.h"
27 #include "xe_guc_ct.h"
28 #include "xe_map.h"
29 #include "xe_mmio.h"
30 #include "xe_pcode.h"
31 #include "xe_pm.h"
32 #include "xe_sriov.h"
33 #include "xe_wa.h"
34 
35 #define MCHBAR_MIRROR_BASE_SNB	0x140000
36 
37 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
38 #define   RP0_MASK		REG_GENMASK(7, 0)
39 #define   RP1_MASK		REG_GENMASK(15, 8)
40 #define   RPN_MASK		REG_GENMASK(23, 16)
41 
42 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
43 #define   RPE_MASK		REG_GENMASK(15, 8)
44 #define   RPA_MASK		REG_GENMASK(31, 16)
45 
46 #define GT_PERF_STATUS		XE_REG(0x1381b4)
47 #define   CAGF_MASK	REG_GENMASK(19, 11)
48 
49 #define GT_FREQUENCY_MULTIPLIER	50
50 #define GT_FREQUENCY_SCALER	3
51 
52 #define LNL_MERT_FREQ_CAP	800
53 #define BMG_MERT_FREQ_CAP	2133
54 #define BMG_MIN_FREQ		1200
55 
56 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
57 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
58 
59 /**
60  * DOC: GuC Power Conservation (PC)
61  *
62  * GuC Power Conservation (PC) supports multiple features for the most
63  * efficient and performing use of the GT when GuC submission is enabled,
64  * including frequency management, Render-C states management, and various
65  * algorithms for power balancing.
66  *
67  * Single Loop Power Conservation (SLPC) is the name given to the suite of
68  * connected power conservation features in the GuC firmware. The firmware
69  * exposes a programming interface to the host for the control of SLPC.
70  *
71  * Frequency management:
72  * =====================
73  *
74  * Xe driver enables SLPC with all of its defaults features and frequency
75  * selection, which varies per platform.
76  *
77  * Render-C States:
78  * ================
79  *
80  * Render-C states is also a GuC PC feature that is now enabled in Xe for
81  * all platforms.
82  *
83  */
84 
85 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
86 {
87 	return container_of(pc, struct xe_guc, pc);
88 }
89 
90 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
91 {
92 	return &pc_to_guc(pc)->ct;
93 }
94 
95 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
96 {
97 	return guc_to_gt(pc_to_guc(pc));
98 }
99 
100 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
101 {
102 	return guc_to_xe(pc_to_guc(pc));
103 }
104 
105 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
106 {
107 	return &pc->bo->vmap;
108 }
109 
110 #define slpc_shared_data_read(pc_, field_) \
111 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
112 			struct slpc_shared_data, field_)
113 
114 #define slpc_shared_data_write(pc_, field_, val_) \
115 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
116 			struct slpc_shared_data, field_, val_)
117 
118 #define SLPC_EVENT(id, count) \
119 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
120 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
121 
122 static int wait_for_pc_state(struct xe_guc_pc *pc,
123 			     enum slpc_global_state state,
124 			     int timeout_ms)
125 {
126 	int timeout_us = 1000 * timeout_ms;
127 	int slept, wait = 10;
128 
129 	xe_device_assert_mem_access(pc_to_xe(pc));
130 
131 	for (slept = 0; slept < timeout_us;) {
132 		if (slpc_shared_data_read(pc, header.global_state) == state)
133 			return 0;
134 
135 		usleep_range(wait, wait << 1);
136 		slept += wait;
137 		wait <<= 1;
138 		if (slept + wait > timeout_us)
139 			wait = timeout_us - slept;
140 	}
141 
142 	return -ETIMEDOUT;
143 }
144 
145 static int pc_action_reset(struct xe_guc_pc *pc)
146 {
147 	struct xe_guc_ct *ct = pc_to_ct(pc);
148 	u32 action[] = {
149 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
150 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
151 		xe_bo_ggtt_addr(pc->bo),
152 		0,
153 	};
154 	int ret;
155 
156 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
157 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
158 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
159 			  ERR_PTR(ret));
160 
161 	return ret;
162 }
163 
164 static int pc_action_query_task_state(struct xe_guc_pc *pc)
165 {
166 	struct xe_guc_ct *ct = pc_to_ct(pc);
167 	u32 action[] = {
168 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
169 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
170 		xe_bo_ggtt_addr(pc->bo),
171 		0,
172 	};
173 	int ret;
174 
175 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
176 			      SLPC_RESET_TIMEOUT_MS))
177 		return -EAGAIN;
178 
179 	/* Blocking here to ensure the results are ready before reading them */
180 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
181 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
182 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
183 			  ERR_PTR(ret));
184 
185 	return ret;
186 }
187 
188 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
189 {
190 	struct xe_guc_ct *ct = pc_to_ct(pc);
191 	u32 action[] = {
192 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
193 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
194 		id,
195 		value,
196 	};
197 	int ret;
198 
199 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
200 			      SLPC_RESET_TIMEOUT_MS))
201 		return -EAGAIN;
202 
203 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
204 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
205 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
206 			  id, value, ERR_PTR(ret));
207 
208 	return ret;
209 }
210 
211 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
212 {
213 	u32 action[] = {
214 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
215 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
216 		id,
217 	};
218 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
219 	int ret;
220 
221 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
222 			      SLPC_RESET_TIMEOUT_MS))
223 		return -EAGAIN;
224 
225 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
226 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
227 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
228 			  ERR_PTR(ret));
229 
230 	return ret;
231 }
232 
233 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
234 {
235 	struct xe_guc_ct *ct = pc_to_ct(pc);
236 	u32 action[] = {
237 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
238 		mode,
239 	};
240 	int ret;
241 
242 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
243 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
244 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
245 			  mode, ERR_PTR(ret));
246 	return ret;
247 }
248 
249 static u32 decode_freq(u32 raw)
250 {
251 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
252 				 GT_FREQUENCY_SCALER);
253 }
254 
255 static u32 encode_freq(u32 freq)
256 {
257 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
258 				 GT_FREQUENCY_MULTIPLIER);
259 }
260 
261 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
262 {
263 	u32 freq;
264 
265 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
266 			 slpc_shared_data_read(pc, task_state_data.freq));
267 
268 	return decode_freq(freq);
269 }
270 
271 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
272 {
273 	struct xe_gt *gt = pc_to_gt(pc);
274 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
275 
276 	/* Allow/Disallow punit to process software freq requests */
277 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
278 }
279 
280 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
281 {
282 	struct xe_gt *gt = pc_to_gt(pc);
283 	u32 rpnswreq;
284 
285 	pc_set_manual_rp_ctrl(pc, true);
286 
287 	/* Req freq is in units of 16.66 Mhz */
288 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
289 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
290 
291 	/* Sleep for a small time to allow pcode to respond */
292 	usleep_range(100, 300);
293 
294 	pc_set_manual_rp_ctrl(pc, false);
295 }
296 
297 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
298 {
299 	/*
300 	 * Let's only check for the rpn-rp0 range. If max < min,
301 	 * min becomes a fixed request.
302 	 */
303 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
304 		return -EINVAL;
305 
306 	/*
307 	 * GuC policy is to elevate minimum frequency to the efficient levels
308 	 * Our goal is to have the admin choices respected.
309 	 */
310 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
311 			    freq < pc->rpe_freq);
312 
313 	return pc_action_set_param(pc,
314 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
315 				   freq);
316 }
317 
318 static int pc_get_max_freq(struct xe_guc_pc *pc)
319 {
320 	u32 freq;
321 
322 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
323 			 slpc_shared_data_read(pc, task_state_data.freq));
324 
325 	return decode_freq(freq);
326 }
327 
328 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
329 {
330 	/*
331 	 * Let's only check for the rpn-rp0 range. If max < min,
332 	 * min becomes a fixed request.
333 	 * Also, overclocking is not supported.
334 	 */
335 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
336 		return -EINVAL;
337 
338 	return pc_action_set_param(pc,
339 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
340 				   freq);
341 }
342 
343 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
344 {
345 	struct xe_gt *gt = pc_to_gt(pc);
346 	u32 reg;
347 
348 	if (xe_gt_is_media_type(gt))
349 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
350 	else
351 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
352 
353 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
354 }
355 
356 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
357 {
358 	struct xe_gt *gt = pc_to_gt(pc);
359 	u32 reg;
360 
361 	if (xe_gt_is_media_type(gt))
362 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
363 	else
364 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
365 
366 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
367 }
368 
369 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
370 {
371 	struct xe_gt *gt = pc_to_gt(pc);
372 	struct xe_device *xe = gt_to_xe(gt);
373 	u32 reg;
374 
375 	/*
376 	 * For PVC we still need to use fused RP0 as the approximation for RPa
377 	 * For other platforms than PVC we get the resolved RPa directly from
378 	 * PCODE at a different register
379 	 */
380 	if (xe->info.platform == XE_PVC) {
381 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
382 		pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
383 	} else {
384 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
385 		pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
386 	}
387 }
388 
389 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
390 {
391 	struct xe_gt *gt = pc_to_gt(pc);
392 	struct xe_device *xe = gt_to_xe(gt);
393 	u32 reg;
394 
395 	/*
396 	 * For PVC we still need to use fused RP1 as the approximation for RPe
397 	 * For other platforms than PVC we get the resolved RPe directly from
398 	 * PCODE at a different register
399 	 */
400 	if (xe->info.platform == XE_PVC) {
401 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
402 		pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
403 	} else {
404 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
405 		pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
406 	}
407 }
408 
409 static void pc_update_rp_values(struct xe_guc_pc *pc)
410 {
411 	struct xe_gt *gt = pc_to_gt(pc);
412 	struct xe_device *xe = gt_to_xe(gt);
413 
414 	if (GRAPHICS_VERx100(xe) >= 1270) {
415 		mtl_update_rpa_value(pc);
416 		mtl_update_rpe_value(pc);
417 	} else {
418 		tgl_update_rpa_value(pc);
419 		tgl_update_rpe_value(pc);
420 	}
421 
422 	/*
423 	 * RPe is decided at runtime by PCODE. In the rare case where that's
424 	 * smaller than the fused min, we will trust the PCODE and use that
425 	 * as our minimum one.
426 	 */
427 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
428 }
429 
430 /**
431  * xe_guc_pc_get_act_freq - Get Actual running frequency
432  * @pc: The GuC PC
433  *
434  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
435  */
436 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
437 {
438 	struct xe_gt *gt = pc_to_gt(pc);
439 	struct xe_device *xe = gt_to_xe(gt);
440 	u32 freq;
441 
442 	/* When in RC6, actual frequency reported will be 0. */
443 	if (GRAPHICS_VERx100(xe) >= 1270) {
444 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
445 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
446 	} else {
447 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
448 		freq = REG_FIELD_GET(CAGF_MASK, freq);
449 	}
450 
451 	freq = decode_freq(freq);
452 
453 	return freq;
454 }
455 
456 static u32 get_cur_freq(struct xe_gt *gt)
457 {
458 	u32 freq;
459 
460 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
461 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
462 	return decode_freq(freq);
463 }
464 
465 /**
466  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
467  * @pc: The GuC PC
468  *
469  * Returns: the requested frequency for that GT instance
470  */
471 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
472 {
473 	struct xe_gt *gt = pc_to_gt(pc);
474 
475 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
476 
477 	return get_cur_freq(gt);
478 }
479 
480 /**
481  * xe_guc_pc_get_cur_freq - Get Current requested frequency
482  * @pc: The GuC PC
483  * @freq: A pointer to a u32 where the freq value will be returned
484  *
485  * Returns: 0 on success,
486  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
487  */
488 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
489 {
490 	struct xe_gt *gt = pc_to_gt(pc);
491 	unsigned int fw_ref;
492 
493 	/*
494 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
495 	 * Block RC6 for a more reliable read.
496 	 */
497 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
498 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
499 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
500 		return -ETIMEDOUT;
501 	}
502 
503 	*freq = get_cur_freq(gt);
504 
505 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
506 	return 0;
507 }
508 
509 /**
510  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
511  * @pc: The GuC PC
512  *
513  * Returns: RP0 freq.
514  */
515 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
516 {
517 	return pc->rp0_freq;
518 }
519 
520 /**
521  * xe_guc_pc_get_rpa_freq - Get the RPa freq
522  * @pc: The GuC PC
523  *
524  * Returns: RPa freq.
525  */
526 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
527 {
528 	pc_update_rp_values(pc);
529 
530 	return pc->rpa_freq;
531 }
532 
533 /**
534  * xe_guc_pc_get_rpe_freq - Get the RPe freq
535  * @pc: The GuC PC
536  *
537  * Returns: RPe freq.
538  */
539 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
540 {
541 	pc_update_rp_values(pc);
542 
543 	return pc->rpe_freq;
544 }
545 
546 /**
547  * xe_guc_pc_get_rpn_freq - Get the RPn freq
548  * @pc: The GuC PC
549  *
550  * Returns: RPn freq.
551  */
552 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
553 {
554 	return pc->rpn_freq;
555 }
556 
557 /**
558  * xe_guc_pc_get_min_freq - Get the min operational frequency
559  * @pc: The GuC PC
560  * @freq: A pointer to a u32 where the freq value will be returned
561  *
562  * Returns: 0 on success,
563  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
564  */
565 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
566 {
567 	int ret;
568 
569 	xe_device_assert_mem_access(pc_to_xe(pc));
570 
571 	mutex_lock(&pc->freq_lock);
572 	if (!pc->freq_ready) {
573 		/* Might be in the middle of a gt reset */
574 		ret = -EAGAIN;
575 		goto out;
576 	}
577 
578 	ret = pc_action_query_task_state(pc);
579 	if (ret)
580 		goto out;
581 
582 	*freq = pc_get_min_freq(pc);
583 
584 out:
585 	mutex_unlock(&pc->freq_lock);
586 	return ret;
587 }
588 
589 /**
590  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
591  * @pc: The GuC PC
592  * @freq: The selected minimal frequency
593  *
594  * Returns: 0 on success,
595  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
596  *         -EINVAL if value out of bounds.
597  */
598 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
599 {
600 	int ret;
601 
602 	mutex_lock(&pc->freq_lock);
603 	if (!pc->freq_ready) {
604 		/* Might be in the middle of a gt reset */
605 		ret = -EAGAIN;
606 		goto out;
607 	}
608 
609 	ret = pc_set_min_freq(pc, freq);
610 	if (ret)
611 		goto out;
612 
613 	pc->user_requested_min = freq;
614 
615 out:
616 	mutex_unlock(&pc->freq_lock);
617 	return ret;
618 }
619 
620 /**
621  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
622  * @pc: The GuC PC
623  * @freq: A pointer to a u32 where the freq value will be returned
624  *
625  * Returns: 0 on success,
626  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
627  */
628 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
629 {
630 	int ret;
631 
632 	mutex_lock(&pc->freq_lock);
633 	if (!pc->freq_ready) {
634 		/* Might be in the middle of a gt reset */
635 		ret = -EAGAIN;
636 		goto out;
637 	}
638 
639 	ret = pc_action_query_task_state(pc);
640 	if (ret)
641 		goto out;
642 
643 	*freq = pc_get_max_freq(pc);
644 
645 out:
646 	mutex_unlock(&pc->freq_lock);
647 	return ret;
648 }
649 
650 /**
651  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
652  * @pc: The GuC PC
653  * @freq: The selected maximum frequency value
654  *
655  * Returns: 0 on success,
656  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
657  *         -EINVAL if value out of bounds.
658  */
659 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
660 {
661 	int ret;
662 
663 	mutex_lock(&pc->freq_lock);
664 	if (!pc->freq_ready) {
665 		/* Might be in the middle of a gt reset */
666 		ret = -EAGAIN;
667 		goto out;
668 	}
669 
670 	ret = pc_set_max_freq(pc, freq);
671 	if (ret)
672 		goto out;
673 
674 	pc->user_requested_max = freq;
675 
676 out:
677 	mutex_unlock(&pc->freq_lock);
678 	return ret;
679 }
680 
681 /**
682  * xe_guc_pc_c_status - get the current GT C state
683  * @pc: XE_GuC_PC instance
684  */
685 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
686 {
687 	struct xe_gt *gt = pc_to_gt(pc);
688 	u32 reg, gt_c_state;
689 
690 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
691 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
692 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
693 	} else {
694 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
695 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
696 	}
697 
698 	switch (gt_c_state) {
699 	case GT_C6:
700 		return GT_IDLE_C6;
701 	case GT_C0:
702 		return GT_IDLE_C0;
703 	default:
704 		return GT_IDLE_UNKNOWN;
705 	}
706 }
707 
708 /**
709  * xe_guc_pc_rc6_residency - rc6 residency counter
710  * @pc: Xe_GuC_PC instance
711  */
712 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
713 {
714 	struct xe_gt *gt = pc_to_gt(pc);
715 	u32 reg;
716 
717 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
718 
719 	return reg;
720 }
721 
722 /**
723  * xe_guc_pc_mc6_residency - mc6 residency counter
724  * @pc: Xe_GuC_PC instance
725  */
726 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
727 {
728 	struct xe_gt *gt = pc_to_gt(pc);
729 	u64 reg;
730 
731 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
732 
733 	return reg;
734 }
735 
736 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
737 {
738 	struct xe_gt *gt = pc_to_gt(pc);
739 	u32 reg;
740 
741 	xe_device_assert_mem_access(pc_to_xe(pc));
742 
743 	if (xe_gt_is_media_type(gt))
744 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
745 	else
746 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
747 
748 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
749 
750 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
751 }
752 
753 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
754 {
755 	struct xe_gt *gt = pc_to_gt(pc);
756 	struct xe_device *xe = gt_to_xe(gt);
757 	u32 reg;
758 
759 	xe_device_assert_mem_access(pc_to_xe(pc));
760 
761 	if (xe->info.platform == XE_PVC)
762 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
763 	else
764 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
765 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
766 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
767 }
768 
769 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
770 {
771 	struct xe_gt *gt = pc_to_gt(pc);
772 	struct xe_device *xe = gt_to_xe(gt);
773 
774 	if (GRAPHICS_VERx100(xe) >= 1270)
775 		mtl_init_fused_rp_values(pc);
776 	else
777 		tgl_init_fused_rp_values(pc);
778 }
779 
780 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
781 {
782 	struct xe_gt *gt = pc_to_gt(pc);
783 
784 	if (XE_WA(gt, 22019338487)) {
785 		if (xe_gt_is_media_type(gt))
786 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
787 		else
788 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
789 	} else {
790 		return pc->rp0_freq;
791 	}
792 }
793 
794 /**
795  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
796  * frequency to allow faster GuC load times
797  * @pc: Xe_GuC_PC instance
798  */
799 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
800 {
801 	struct xe_gt *gt = pc_to_gt(pc);
802 
803 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
804 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
805 }
806 
807 /**
808  * xe_guc_pc_init_early - Initialize RPx values
809  * @pc: Xe_GuC_PC instance
810  */
811 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
812 {
813 	struct xe_gt *gt = pc_to_gt(pc);
814 
815 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
816 	pc_init_fused_rp_values(pc);
817 }
818 
819 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
820 {
821 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
822 	int ret;
823 
824 	lockdep_assert_held(&pc->freq_lock);
825 
826 	ret = pc_action_query_task_state(pc);
827 	if (ret)
828 		goto out;
829 
830 	/*
831 	 * GuC defaults to some RPmax that is not actually achievable without
832 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
833 	 * regular maximum
834 	 */
835 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
836 		ret = pc_set_max_freq(pc, pc->rp0_freq);
837 		if (ret)
838 			goto out;
839 	}
840 
841 	/*
842 	 * Same thing happens for Server platforms where min is listed as
843 	 * RPMax
844 	 */
845 	if (pc_get_min_freq(pc) > pc->rp0_freq)
846 		ret = pc_set_min_freq(pc, pc->rp0_freq);
847 
848 	if (XE_WA(tile->primary_gt, 14022085890))
849 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
850 
851 out:
852 	return ret;
853 }
854 
855 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
856 {
857 	int ret = 0;
858 
859 	lockdep_assert_held(&pc->freq_lock);
860 
861 	if (pc->user_requested_min != 0) {
862 		ret = pc_set_min_freq(pc, pc->user_requested_min);
863 		if (ret)
864 			return ret;
865 	}
866 
867 	if (pc->user_requested_max != 0) {
868 		ret = pc_set_max_freq(pc, pc->user_requested_max);
869 		if (ret)
870 			return ret;
871 	}
872 
873 	return ret;
874 }
875 
876 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
877 {
878 	int ret = 0;
879 
880 	if (XE_WA(pc_to_gt(pc), 22019338487)) {
881 		/*
882 		 * Get updated min/max and stash them.
883 		 */
884 		ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
885 		if (!ret)
886 			ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
887 		if (ret)
888 			return ret;
889 
890 		/*
891 		 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
892 		 */
893 		mutex_lock(&pc->freq_lock);
894 		ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
895 		if (!ret)
896 			ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
897 		mutex_unlock(&pc->freq_lock);
898 	}
899 
900 	return ret;
901 }
902 
903 /**
904  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
905  * @pc: The GuC PC
906  *
907  * Returns: 0 on success,
908  *          error code on failure
909  */
910 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
911 {
912 	int ret = 0;
913 
914 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
915 		return 0;
916 
917 	mutex_lock(&pc->freq_lock);
918 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
919 	if (!ret)
920 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
921 	mutex_unlock(&pc->freq_lock);
922 
923 	return ret;
924 }
925 
926 /**
927  * xe_guc_pc_gucrc_disable - Disable GuC RC
928  * @pc: Xe_GuC_PC instance
929  *
930  * Disables GuC RC by taking control of RC6 back from GuC.
931  *
932  * Return: 0 on success, negative error code on error.
933  */
934 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
935 {
936 	struct xe_device *xe = pc_to_xe(pc);
937 	struct xe_gt *gt = pc_to_gt(pc);
938 	unsigned int fw_ref;
939 	int ret = 0;
940 
941 	if (xe->info.skip_guc_pc)
942 		return 0;
943 
944 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
945 	if (ret)
946 		return ret;
947 
948 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
949 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
950 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
951 		return -ETIMEDOUT;
952 	}
953 
954 	xe_gt_idle_disable_c6(gt);
955 
956 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
957 
958 	return 0;
959 }
960 
961 /**
962  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
963  * @pc: Xe_GuC_PC instance
964  * @mode: new value of the mode.
965  *
966  * Return: 0 on success, negative error code on error
967  */
968 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
969 {
970 	int ret;
971 
972 	xe_pm_runtime_get(pc_to_xe(pc));
973 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
974 	xe_pm_runtime_put(pc_to_xe(pc));
975 
976 	return ret;
977 }
978 
979 /**
980  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
981  * @pc: Xe_GuC_PC instance
982  *
983  * Return: 0 on success, negative error code on error
984  */
985 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
986 {
987 	int ret;
988 
989 	xe_pm_runtime_get(pc_to_xe(pc));
990 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
991 	xe_pm_runtime_put(pc_to_xe(pc));
992 
993 	return ret;
994 }
995 
996 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
997 {
998 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
999 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1000 
1001 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1002 }
1003 
1004 static int pc_init_freqs(struct xe_guc_pc *pc)
1005 {
1006 	int ret;
1007 
1008 	mutex_lock(&pc->freq_lock);
1009 
1010 	ret = pc_adjust_freq_bounds(pc);
1011 	if (ret)
1012 		goto out;
1013 
1014 	ret = pc_adjust_requested_freq(pc);
1015 	if (ret)
1016 		goto out;
1017 
1018 	pc_update_rp_values(pc);
1019 
1020 	pc_init_pcode_freq(pc);
1021 
1022 	/*
1023 	 * The frequencies are really ready for use only after the user
1024 	 * requested ones got restored.
1025 	 */
1026 	pc->freq_ready = true;
1027 
1028 out:
1029 	mutex_unlock(&pc->freq_lock);
1030 	return ret;
1031 }
1032 
1033 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1034 {
1035 	int ret = 0;
1036 
1037 	ret = pc_action_set_param(pc,
1038 				  SLPC_PARAM_STRATEGIES,
1039 				  val);
1040 
1041 	return ret;
1042 }
1043 
1044 /**
1045  * xe_guc_pc_start - Start GuC's Power Conservation component
1046  * @pc: Xe_GuC_PC instance
1047  */
1048 int xe_guc_pc_start(struct xe_guc_pc *pc)
1049 {
1050 	struct xe_device *xe = pc_to_xe(pc);
1051 	struct xe_gt *gt = pc_to_gt(pc);
1052 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1053 	unsigned int fw_ref;
1054 	ktime_t earlier;
1055 	int ret;
1056 
1057 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1058 
1059 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1060 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1061 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1062 		return -ETIMEDOUT;
1063 	}
1064 
1065 	if (xe->info.skip_guc_pc) {
1066 		if (xe->info.platform != XE_PVC)
1067 			xe_gt_idle_enable_c6(gt);
1068 
1069 		/* Request max possible since dynamic freq mgmt is not enabled */
1070 		pc_set_cur_freq(pc, UINT_MAX);
1071 
1072 		ret = 0;
1073 		goto out;
1074 	}
1075 
1076 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1077 	slpc_shared_data_write(pc, header.size, size);
1078 
1079 	earlier = ktime_get();
1080 	ret = pc_action_reset(pc);
1081 	if (ret)
1082 		goto out;
1083 
1084 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1085 			      SLPC_RESET_TIMEOUT_MS)) {
1086 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1087 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1088 			   xe_gt_throttle_get_limit_reasons(gt));
1089 
1090 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1091 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1092 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1093 			ret = -EIO;
1094 			goto out;
1095 		}
1096 
1097 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1098 			   ktime_ms_delta(ktime_get(), earlier));
1099 	}
1100 
1101 	ret = pc_init_freqs(pc);
1102 	if (ret)
1103 		goto out;
1104 
1105 	ret = pc_set_mert_freq_cap(pc);
1106 	if (ret)
1107 		goto out;
1108 
1109 	if (xe->info.platform == XE_PVC) {
1110 		xe_guc_pc_gucrc_disable(pc);
1111 		ret = 0;
1112 		goto out;
1113 	}
1114 
1115 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1116 	if (ret)
1117 		goto out;
1118 
1119 	/* Enable SLPC Optimized Strategy for compute */
1120 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1121 
1122 out:
1123 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1124 	return ret;
1125 }
1126 
1127 /**
1128  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1129  * @pc: Xe_GuC_PC instance
1130  */
1131 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1132 {
1133 	struct xe_device *xe = pc_to_xe(pc);
1134 
1135 	if (xe->info.skip_guc_pc) {
1136 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1137 		return 0;
1138 	}
1139 
1140 	mutex_lock(&pc->freq_lock);
1141 	pc->freq_ready = false;
1142 	mutex_unlock(&pc->freq_lock);
1143 
1144 	return 0;
1145 }
1146 
1147 /**
1148  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1149  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1150  */
1151 static void xe_guc_pc_fini_hw(void *arg)
1152 {
1153 	struct xe_guc_pc *pc = arg;
1154 	struct xe_device *xe = pc_to_xe(pc);
1155 	unsigned int fw_ref;
1156 
1157 	if (xe_device_wedged(xe))
1158 		return;
1159 
1160 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1161 	xe_guc_pc_gucrc_disable(pc);
1162 	XE_WARN_ON(xe_guc_pc_stop(pc));
1163 
1164 	/* Bind requested freq to mert_freq_cap before unload */
1165 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1166 
1167 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1168 }
1169 
1170 /**
1171  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1172  * @pc: Xe_GuC_PC instance
1173  */
1174 int xe_guc_pc_init(struct xe_guc_pc *pc)
1175 {
1176 	struct xe_gt *gt = pc_to_gt(pc);
1177 	struct xe_tile *tile = gt_to_tile(gt);
1178 	struct xe_device *xe = gt_to_xe(gt);
1179 	struct xe_bo *bo;
1180 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1181 	int err;
1182 
1183 	if (xe->info.skip_guc_pc)
1184 		return 0;
1185 
1186 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1187 	if (err)
1188 		return err;
1189 
1190 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1191 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1192 					  XE_BO_FLAG_GGTT |
1193 					  XE_BO_FLAG_GGTT_INVALIDATE |
1194 					  XE_BO_FLAG_PINNED_NORESTORE);
1195 	if (IS_ERR(bo))
1196 		return PTR_ERR(bo);
1197 
1198 	pc->bo = bo;
1199 
1200 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1201 }
1202 
1203 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1204 {
1205 	switch (slpc_shared_data_read(pc, header.global_state)) {
1206 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1207 		return "not running";
1208 	case SLPC_GLOBAL_STATE_INITIALIZING:
1209 		return "initializing";
1210 	case SLPC_GLOBAL_STATE_RESETTING:
1211 		return "resetting";
1212 	case SLPC_GLOBAL_STATE_RUNNING:
1213 		return "running";
1214 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1215 		return "shutting down";
1216 	case SLPC_GLOBAL_STATE_ERROR:
1217 		return "error";
1218 	default:
1219 		return "unknown";
1220 	}
1221 }
1222 
1223 /**
1224  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1225  * @pc: Xe_GuC_PC instance
1226  * @p: drm_printer
1227  */
1228 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1229 {
1230 	drm_printf(p, "SLPC Shared Data Header:\n");
1231 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1232 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1233 
1234 	if (pc_action_query_task_state(pc))
1235 		return;
1236 
1237 	drm_printf(p, "\nSLPC Tasks Status:\n");
1238 	drm_printf(p, "\tGTPERF enabled: %s\n",
1239 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1240 			      SLPC_GTPERF_TASK_ENABLED));
1241 	drm_printf(p, "\tDCC enabled: %s\n",
1242 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1243 			      SLPC_DCC_TASK_ENABLED));
1244 	drm_printf(p, "\tDCC in use: %s\n",
1245 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1246 			      SLPC_IN_DCC));
1247 	drm_printf(p, "\tBalancer enabled: %s\n",
1248 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1249 			      SLPC_BALANCER_ENABLED));
1250 	drm_printf(p, "\tIBC enabled: %s\n",
1251 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1252 			      SLPC_IBC_TASK_ENABLED));
1253 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1254 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1255 			      SLPC_BALANCER_IA_LMT_ENABLED));
1256 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1257 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1258 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1259 }
1260