xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision f9f0b4a1f35d39a1a2a2f8ec46eb7b81efc70a63)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/iopoll.h>
11 #include <linux/jiffies.h>
12 #include <linux/ktime.h>
13 #include <linux/wait_bit.h>
14 
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 #include <generated/xe_device_wa_oob.h>
18 #include <generated/xe_wa_oob.h>
19 
20 #include "abi/guc_actions_slpc_abi.h"
21 #include "regs/xe_gt_regs.h"
22 #include "regs/xe_regs.h"
23 #include "xe_bo.h"
24 #include "xe_device.h"
25 #include "xe_force_wake.h"
26 #include "xe_gt.h"
27 #include "xe_gt_idle.h"
28 #include "xe_gt_printk.h"
29 #include "xe_gt_throttle.h"
30 #include "xe_gt_types.h"
31 #include "xe_guc.h"
32 #include "xe_guc_ct.h"
33 #include "xe_map.h"
34 #include "xe_mmio.h"
35 #include "xe_pcode.h"
36 #include "xe_pm.h"
37 #include "xe_sriov.h"
38 #include "xe_wa.h"
39 
40 #define MCHBAR_MIRROR_BASE_SNB	0x140000
41 
42 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
43 #define   RP0_MASK		REG_GENMASK(7, 0)
44 #define   RP1_MASK		REG_GENMASK(15, 8)
45 #define   RPN_MASK		REG_GENMASK(23, 16)
46 
47 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
48 #define   RPE_MASK		REG_GENMASK(15, 8)
49 #define   RPA_MASK		REG_GENMASK(31, 16)
50 
51 #define GT_PERF_STATUS		XE_REG(0x1381b4)
52 #define   CAGF_MASK	REG_GENMASK(19, 11)
53 
54 #define GT_FREQUENCY_MULTIPLIER	50
55 #define GT_FREQUENCY_SCALER	3
56 
57 #define LNL_MERT_FREQ_CAP	800
58 #define BMG_MERT_FREQ_CAP	2133
59 #define BMG_MIN_FREQ		1200
60 #define BMG_MERT_FLUSH_FREQ_CAP	2600
61 
62 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
63 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
64 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
65 
66 /**
67  * DOC: GuC Power Conservation (PC)
68  *
69  * GuC Power Conservation (PC) supports multiple features for the most
70  * efficient and performing use of the GT when GuC submission is enabled,
71  * including frequency management, Render-C states management, and various
72  * algorithms for power balancing.
73  *
74  * Single Loop Power Conservation (SLPC) is the name given to the suite of
75  * connected power conservation features in the GuC firmware. The firmware
76  * exposes a programming interface to the host for the control of SLPC.
77  *
78  * Frequency management:
79  * ---------------------
80  *
81  * Xe driver enables SLPC with all of its defaults features and frequency
82  * selection, which varies per platform.
83  *
84  * Power profiles add another level of control to SLPC. When power saving
85  * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
86  * thus saving power. Base profile is default and ensures balanced performance
87  * for any workload.
88  *
89  * Render-C States:
90  * ----------------
91  *
92  * Render-C states is also a GuC PC feature that is now enabled in Xe for
93  * all platforms.
94  *
95  * Implementation details:
96  * -----------------------
97  * The implementation for GuC Power Management features is split as follows:
98  *
99  * xe_guc_rc:  Logic for handling GuC RC
100  * xe_gt_idle: Host side logic for RC6 and Coarse Power gating (CPG)
101  * xe_guc_pc:  Logic for all other SLPC related features
102  *
103  * There is some cross interaction between these where host C6 will need to be
104  * enabled when we plan to skip GuC RC. Also, the GuC RC mode is currently
105  * overridden through 0x3003 which is an SLPC H2G call.
106  */
107 
108 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
109 {
110 	return container_of(pc, struct xe_guc, pc);
111 }
112 
113 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
114 {
115 	return &pc_to_guc(pc)->ct;
116 }
117 
118 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
119 {
120 	return guc_to_gt(pc_to_guc(pc));
121 }
122 
123 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
124 {
125 	return guc_to_xe(pc_to_guc(pc));
126 }
127 
128 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
129 {
130 	return &pc->bo->vmap;
131 }
132 
133 #define slpc_shared_data_read(pc_, field_) \
134 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
135 			struct slpc_shared_data, field_)
136 
137 #define slpc_shared_data_write(pc_, field_, val_) \
138 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
139 			struct slpc_shared_data, field_, val_)
140 
141 #define SLPC_EVENT(id, count) \
142 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
143 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
144 
145 static int wait_for_pc_state(struct xe_guc_pc *pc,
146 			     enum slpc_global_state target_state,
147 			     int timeout_ms)
148 {
149 	enum slpc_global_state state;
150 
151 	xe_device_assert_mem_access(pc_to_xe(pc));
152 
153 	return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
154 			       state == target_state,
155 			       20, timeout_ms * USEC_PER_MSEC, false);
156 }
157 
158 static int wait_for_flush_complete(struct xe_guc_pc *pc)
159 {
160 	const unsigned long timeout = msecs_to_jiffies(30);
161 
162 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
163 				    !atomic_read(&pc->flush_freq_limit),
164 				    timeout))
165 		return -ETIMEDOUT;
166 
167 	return 0;
168 }
169 
170 static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
171 {
172 	u32 freq;
173 
174 	return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
175 			       freq <= max_limit,
176 			       20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
177 }
178 
179 static int pc_action_reset(struct xe_guc_pc *pc)
180 {
181 	struct xe_guc_ct *ct = pc_to_ct(pc);
182 	u32 action[] = {
183 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
184 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
185 		xe_bo_ggtt_addr(pc->bo),
186 		0,
187 	};
188 	int ret;
189 
190 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
191 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
192 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
193 			  ERR_PTR(ret));
194 
195 	return ret;
196 }
197 
198 static int pc_action_query_task_state(struct xe_guc_pc *pc)
199 {
200 	struct xe_guc_ct *ct = pc_to_ct(pc);
201 	u32 action[] = {
202 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
203 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
204 		xe_bo_ggtt_addr(pc->bo),
205 		0,
206 	};
207 	int ret;
208 
209 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
210 			      SLPC_RESET_TIMEOUT_MS))
211 		return -EAGAIN;
212 
213 	/* Blocking here to ensure the results are ready before reading them */
214 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
215 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
216 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
217 			  ERR_PTR(ret));
218 
219 	return ret;
220 }
221 
222 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
223 {
224 	struct xe_guc_ct *ct = pc_to_ct(pc);
225 	u32 action[] = {
226 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
227 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
228 		id,
229 		value,
230 	};
231 	int ret;
232 
233 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
234 			      SLPC_RESET_TIMEOUT_MS))
235 		return -EAGAIN;
236 
237 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
238 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
239 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
240 			  id, value, ERR_PTR(ret));
241 
242 	return ret;
243 }
244 
245 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
246 {
247 	u32 action[] = {
248 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
249 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
250 		id,
251 	};
252 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
253 	int ret;
254 
255 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
256 			      SLPC_RESET_TIMEOUT_MS))
257 		return -EAGAIN;
258 
259 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
260 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
261 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
262 			  ERR_PTR(ret));
263 
264 	return ret;
265 }
266 
267 /**
268  * xe_guc_pc_action_set_param() - Set value of SLPC param
269  * @pc: Xe_GuC_PC instance
270  * @id: Param id
271  * @value: Value to set
272  *
273  * This function can be used to set any SLPC param.
274  *
275  * Return: 0 on Success
276  */
277 int xe_guc_pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
278 {
279 	xe_device_assert_mem_access(pc_to_xe(pc));
280 	return pc_action_set_param(pc, id, value);
281 }
282 
283 /**
284  * xe_guc_pc_action_unset_param() - Revert to default value
285  * @pc: Xe_GuC_PC instance
286  * @id: Param id
287  *
288  * This function can be used revert any SLPC param to its default value.
289  *
290  * Return: 0 on Success
291  */
292 int xe_guc_pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
293 {
294 	xe_device_assert_mem_access(pc_to_xe(pc));
295 	return pc_action_unset_param(pc, id);
296 }
297 
298 static u32 decode_freq(u32 raw)
299 {
300 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
301 				 GT_FREQUENCY_SCALER);
302 }
303 
304 static u32 encode_freq(u32 freq)
305 {
306 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
307 				 GT_FREQUENCY_MULTIPLIER);
308 }
309 
310 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
311 {
312 	u32 freq;
313 
314 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
315 			 slpc_shared_data_read(pc, task_state_data.freq));
316 
317 	return decode_freq(freq);
318 }
319 
320 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
321 {
322 	struct xe_gt *gt = pc_to_gt(pc);
323 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
324 
325 	/* Allow/Disallow punit to process software freq requests */
326 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
327 }
328 
329 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
330 {
331 	struct xe_gt *gt = pc_to_gt(pc);
332 	u32 rpnswreq;
333 
334 	pc_set_manual_rp_ctrl(pc, true);
335 
336 	/* Req freq is in units of 16.66 Mhz */
337 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
338 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
339 
340 	/* Sleep for a small time to allow pcode to respond */
341 	usleep_range(100, 300);
342 
343 	pc_set_manual_rp_ctrl(pc, false);
344 }
345 
346 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
347 {
348 	/*
349 	 * Let's only check for the rpn-rp0 range. If max < min,
350 	 * min becomes a fixed request.
351 	 */
352 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
353 		return -EINVAL;
354 
355 	/*
356 	 * GuC policy is to elevate minimum frequency to the efficient levels
357 	 * Our goal is to have the admin choices respected.
358 	 */
359 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
360 			    freq < xe_guc_pc_get_rpe_freq(pc));
361 
362 	return pc_action_set_param(pc,
363 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
364 				   freq);
365 }
366 
367 static int pc_get_max_freq(struct xe_guc_pc *pc)
368 {
369 	u32 freq;
370 
371 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
372 			 slpc_shared_data_read(pc, task_state_data.freq));
373 
374 	return decode_freq(freq);
375 }
376 
377 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
378 {
379 	/*
380 	 * Let's only check for the rpn-rp0 range. If max < min,
381 	 * min becomes a fixed request.
382 	 * Also, overclocking is not supported.
383 	 */
384 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
385 		return -EINVAL;
386 
387 	return pc_action_set_param(pc,
388 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
389 				   freq);
390 }
391 
392 static u32 mtl_get_rpa_freq(struct xe_guc_pc *pc)
393 {
394 	struct xe_gt *gt = pc_to_gt(pc);
395 	u32 reg;
396 
397 	if (xe_gt_is_media_type(gt))
398 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
399 	else
400 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
401 
402 	return decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
403 }
404 
405 static u32 mtl_get_rpe_freq(struct xe_guc_pc *pc)
406 {
407 	struct xe_gt *gt = pc_to_gt(pc);
408 	u32 reg;
409 
410 	if (xe_gt_is_media_type(gt))
411 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
412 	else
413 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
414 
415 	return decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
416 }
417 
418 static u32 pvc_get_rpa_freq(struct xe_guc_pc *pc)
419 {
420 	/*
421 	 * For PVC we still need to use fused RP0 as the approximation for RPa
422 	 * For other platforms than PVC we get the resolved RPa directly from
423 	 * PCODE at a different register
424 	 */
425 
426 	struct xe_gt *gt = pc_to_gt(pc);
427 	u32 reg;
428 
429 	reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
430 	return REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
431 }
432 
433 static u32 tgl_get_rpa_freq(struct xe_guc_pc *pc)
434 {
435 	struct xe_gt *gt = pc_to_gt(pc);
436 	u32 reg;
437 
438 	reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
439 	return REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
440 }
441 
442 static u32 pvc_get_rpe_freq(struct xe_guc_pc *pc)
443 {
444 	struct xe_gt *gt = pc_to_gt(pc);
445 	u32 reg;
446 
447 	/*
448 	 * For PVC we still need to use fused RP1 as the approximation for RPe
449 	 */
450 	reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
451 	return REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
452 }
453 
454 static u32 tgl_get_rpe_freq(struct xe_guc_pc *pc)
455 {
456 	struct xe_gt *gt = pc_to_gt(pc);
457 	u32 reg;
458 
459 	/*
460 	 * For other platforms than PVC, we get the resolved RPe directly from
461 	 * PCODE at a different register
462 	 */
463 	reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
464 	return REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
465 }
466 
467 /**
468  * xe_guc_pc_get_act_freq - Get Actual running frequency
469  * @pc: The GuC PC
470  *
471  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
472  */
473 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
474 {
475 	struct xe_gt *gt = pc_to_gt(pc);
476 	struct xe_device *xe = gt_to_xe(gt);
477 	u32 freq;
478 
479 	/* When in RC6, actual frequency reported will be 0. */
480 	if (GRAPHICS_VERx100(xe) >= 1270) {
481 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
482 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
483 	} else {
484 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
485 		freq = REG_FIELD_GET(CAGF_MASK, freq);
486 	}
487 
488 	freq = decode_freq(freq);
489 
490 	return freq;
491 }
492 
493 static u32 get_cur_freq(struct xe_gt *gt)
494 {
495 	u32 freq;
496 
497 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
498 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
499 	return decode_freq(freq);
500 }
501 
502 /**
503  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
504  * @pc: The GuC PC
505  *
506  * Returns: the requested frequency for that GT instance
507  */
508 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
509 {
510 	struct xe_gt *gt = pc_to_gt(pc);
511 
512 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
513 
514 	return get_cur_freq(gt);
515 }
516 
517 /**
518  * xe_guc_pc_get_cur_freq - Get Current requested frequency
519  * @pc: The GuC PC
520  * @freq: A pointer to a u32 where the freq value will be returned
521  *
522  * Returns: 0 on success,
523  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
524  */
525 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
526 {
527 	struct xe_gt *gt = pc_to_gt(pc);
528 
529 	/*
530 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
531 	 * Block RC6 for a more reliable read.
532 	 */
533 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
534 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
535 		return -ETIMEDOUT;
536 
537 	*freq = get_cur_freq(gt);
538 
539 	return 0;
540 }
541 
542 /**
543  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
544  * @pc: The GuC PC
545  *
546  * Returns: RP0 freq.
547  */
548 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
549 {
550 	return pc->rp0_freq;
551 }
552 
553 /**
554  * xe_guc_pc_get_rpa_freq - Get the RPa freq
555  * @pc: The GuC PC
556  *
557  * Returns: RPa freq.
558  */
559 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
560 {
561 	struct xe_gt *gt = pc_to_gt(pc);
562 	struct xe_device *xe = gt_to_xe(gt);
563 
564 	if (GRAPHICS_VERx100(xe) == 1260)
565 		return pvc_get_rpa_freq(pc);
566 	else if (GRAPHICS_VERx100(xe) >= 1270)
567 		return mtl_get_rpa_freq(pc);
568 	else
569 		return tgl_get_rpa_freq(pc);
570 }
571 
572 /**
573  * xe_guc_pc_get_rpe_freq - Get the RPe freq
574  * @pc: The GuC PC
575  *
576  * Returns: RPe freq.
577  */
578 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
579 {
580 	struct xe_device *xe = pc_to_xe(pc);
581 	u32 freq;
582 
583 	if (GRAPHICS_VERx100(xe) == 1260)
584 		freq = pvc_get_rpe_freq(pc);
585 	else if (GRAPHICS_VERx100(xe) >= 1270)
586 		freq = mtl_get_rpe_freq(pc);
587 	else
588 		freq = tgl_get_rpe_freq(pc);
589 
590 	return freq;
591 }
592 
593 /**
594  * xe_guc_pc_get_rpn_freq - Get the RPn freq
595  * @pc: The GuC PC
596  *
597  * Returns: RPn freq.
598  */
599 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
600 {
601 	return pc->rpn_freq;
602 }
603 
604 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
605 {
606 	int ret;
607 
608 	lockdep_assert_held(&pc->freq_lock);
609 
610 	/* Might be in the middle of a gt reset */
611 	if (!pc->freq_ready)
612 		return -EAGAIN;
613 
614 	ret = pc_action_query_task_state(pc);
615 	if (ret)
616 		return ret;
617 
618 	*freq = pc_get_min_freq(pc);
619 
620 	return 0;
621 }
622 
623 /**
624  * xe_guc_pc_get_min_freq - Get the min operational frequency
625  * @pc: The GuC PC
626  * @freq: A pointer to a u32 where the freq value will be returned
627  *
628  * Returns: 0 on success,
629  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
630  */
631 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
632 {
633 	guard(mutex)(&pc->freq_lock);
634 
635 	return xe_guc_pc_get_min_freq_locked(pc, freq);
636 }
637 
638 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
639 {
640 	int ret;
641 
642 	lockdep_assert_held(&pc->freq_lock);
643 
644 	/* Might be in the middle of a gt reset */
645 	if (!pc->freq_ready)
646 		return -EAGAIN;
647 
648 	ret = pc_set_min_freq(pc, freq);
649 	if (ret)
650 		return ret;
651 
652 	pc->user_requested_min = freq;
653 
654 	return 0;
655 }
656 
657 /**
658  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
659  * @pc: The GuC PC
660  * @freq: The selected minimal frequency
661  *
662  * Returns: 0 on success,
663  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
664  *         -EINVAL if value out of bounds.
665  */
666 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
667 {
668 	guard(mutex)(&pc->freq_lock);
669 
670 	return xe_guc_pc_set_min_freq_locked(pc, freq);
671 }
672 
673 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
674 {
675 	int ret;
676 
677 	lockdep_assert_held(&pc->freq_lock);
678 
679 	/* Might be in the middle of a gt reset */
680 	if (!pc->freq_ready)
681 		return -EAGAIN;
682 
683 	ret = pc_action_query_task_state(pc);
684 	if (ret)
685 		return ret;
686 
687 	*freq = pc_get_max_freq(pc);
688 
689 	return 0;
690 }
691 
692 /**
693  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
694  * @pc: The GuC PC
695  * @freq: A pointer to a u32 where the freq value will be returned
696  *
697  * Returns: 0 on success,
698  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
699  */
700 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
701 {
702 	guard(mutex)(&pc->freq_lock);
703 
704 	return xe_guc_pc_get_max_freq_locked(pc, freq);
705 }
706 
707 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
708 {
709 	int ret;
710 
711 	lockdep_assert_held(&pc->freq_lock);
712 
713 	/* Might be in the middle of a gt reset */
714 	if (!pc->freq_ready)
715 		return -EAGAIN;
716 
717 	ret = pc_set_max_freq(pc, freq);
718 	if (ret)
719 		return ret;
720 
721 	pc->user_requested_max = freq;
722 
723 	return 0;
724 }
725 
726 /**
727  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
728  * @pc: The GuC PC
729  * @freq: The selected maximum frequency value
730  *
731  * Returns: 0 on success,
732  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
733  *         -EINVAL if value out of bounds.
734  */
735 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
736 {
737 	if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
738 		if (wait_for_flush_complete(pc) != 0)
739 			return -EAGAIN;
740 	}
741 
742 	guard(mutex)(&pc->freq_lock);
743 
744 	return xe_guc_pc_set_max_freq_locked(pc, freq);
745 }
746 
747 /**
748  * xe_guc_pc_c_status - get the current GT C state
749  * @pc: XE_GuC_PC instance
750  */
751 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
752 {
753 	struct xe_gt *gt = pc_to_gt(pc);
754 	u32 reg, gt_c_state;
755 
756 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
757 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
758 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
759 	} else {
760 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
761 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
762 	}
763 
764 	switch (gt_c_state) {
765 	case GT_C6:
766 		return GT_IDLE_C6;
767 	case GT_C0:
768 		return GT_IDLE_C0;
769 	default:
770 		return GT_IDLE_UNKNOWN;
771 	}
772 }
773 
774 /**
775  * xe_guc_pc_rc6_residency - rc6 residency counter
776  * @pc: Xe_GuC_PC instance
777  */
778 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
779 {
780 	struct xe_gt *gt = pc_to_gt(pc);
781 	u32 reg;
782 
783 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
784 
785 	return reg;
786 }
787 
788 /**
789  * xe_guc_pc_mc6_residency - mc6 residency counter
790  * @pc: Xe_GuC_PC instance
791  */
792 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
793 {
794 	struct xe_gt *gt = pc_to_gt(pc);
795 	u64 reg;
796 
797 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
798 
799 	return reg;
800 }
801 
802 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
803 {
804 	struct xe_gt *gt = pc_to_gt(pc);
805 	u32 reg;
806 
807 	xe_device_assert_mem_access(pc_to_xe(pc));
808 
809 	if (xe_gt_is_media_type(gt))
810 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
811 	else
812 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
813 
814 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
815 
816 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
817 }
818 
819 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
820 {
821 	struct xe_gt *gt = pc_to_gt(pc);
822 	struct xe_device *xe = gt_to_xe(gt);
823 	u32 reg;
824 
825 	xe_device_assert_mem_access(pc_to_xe(pc));
826 
827 	if (xe->info.platform == XE_PVC)
828 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
829 	else
830 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
831 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
832 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
833 }
834 
835 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
836 {
837 	struct xe_gt *gt = pc_to_gt(pc);
838 	struct xe_device *xe = gt_to_xe(gt);
839 
840 	if (GRAPHICS_VERx100(xe) >= 1270)
841 		mtl_init_fused_rp_values(pc);
842 	else
843 		tgl_init_fused_rp_values(pc);
844 }
845 
846 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
847 {
848 	struct xe_gt *gt = pc_to_gt(pc);
849 
850 	if (XE_GT_WA(gt, 22019338487)) {
851 		if (xe_gt_is_media_type(gt))
852 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
853 		else
854 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
855 	} else {
856 		return pc->rp0_freq;
857 	}
858 }
859 
860 /**
861  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
862  * frequency to allow faster GuC load times
863  * @pc: Xe_GuC_PC instance
864  */
865 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
866 {
867 	struct xe_gt *gt = pc_to_gt(pc);
868 
869 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
870 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
871 }
872 
873 /**
874  * xe_guc_pc_init_early - Initialize RPx values
875  * @pc: Xe_GuC_PC instance
876  */
877 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
878 {
879 	struct xe_gt *gt = pc_to_gt(pc);
880 
881 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
882 	pc_init_fused_rp_values(pc);
883 }
884 
885 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
886 {
887 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
888 	int ret;
889 
890 	lockdep_assert_held(&pc->freq_lock);
891 
892 	ret = pc_action_query_task_state(pc);
893 	if (ret)
894 		goto out;
895 
896 	/*
897 	 * GuC defaults to some RPmax that is not actually achievable without
898 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
899 	 * regular maximum
900 	 */
901 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
902 		ret = pc_set_max_freq(pc, pc->rp0_freq);
903 		if (ret)
904 			goto out;
905 	}
906 
907 	/*
908 	 * Same thing happens for Server platforms where min is listed as
909 	 * RPMax
910 	 */
911 	if (pc_get_min_freq(pc) > pc->rp0_freq)
912 		ret = pc_set_min_freq(pc, pc->rp0_freq);
913 
914 	if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890))
915 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
916 
917 out:
918 	return ret;
919 }
920 
921 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
922 {
923 	int ret = 0;
924 
925 	lockdep_assert_held(&pc->freq_lock);
926 
927 	if (pc->user_requested_min != 0) {
928 		ret = pc_set_min_freq(pc, pc->user_requested_min);
929 		if (ret)
930 			return ret;
931 	}
932 
933 	if (pc->user_requested_max != 0) {
934 		ret = pc_set_max_freq(pc, pc->user_requested_max);
935 		if (ret)
936 			return ret;
937 	}
938 
939 	return ret;
940 }
941 
942 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
943 {
944 	struct xe_gt *gt = pc_to_gt(pc);
945 
946 	return  XE_GT_WA(gt, 22019338487) &&
947 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
948 }
949 
950 /**
951  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
952  * @pc: the xe_guc_pc object
953  *
954  * As per the WA, reduce max GT frequency during L2 cache flush
955  */
956 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
957 {
958 	struct xe_gt *gt = pc_to_gt(pc);
959 	u32 max_freq;
960 	int ret;
961 
962 	if (!needs_flush_freq_limit(pc))
963 		return;
964 
965 	guard(mutex)(&pc->freq_lock);
966 
967 	ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
968 	if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
969 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
970 		if (ret) {
971 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
972 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
973 			return;
974 		}
975 
976 		atomic_set(&pc->flush_freq_limit, 1);
977 
978 		/*
979 		 * If user has previously changed max freq, stash that value to
980 		 * restore later, otherwise use the current max. New user
981 		 * requests wait on flush.
982 		 */
983 		if (pc->user_requested_max != 0)
984 			pc->stashed_max_freq = pc->user_requested_max;
985 		else
986 			pc->stashed_max_freq = max_freq;
987 	}
988 
989 	/*
990 	 * Wait for actual freq to go below the flush cap: even if the previous
991 	 * max was below cap, the current one might still be above it
992 	 */
993 	ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
994 	if (ret)
995 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
996 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
997 }
998 
999 /**
1000  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
1001  * @pc: the xe_guc_pc object
1002  *
1003  * Retrieve the previous GT max frequency value.
1004  */
1005 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
1006 {
1007 	struct xe_gt *gt = pc_to_gt(pc);
1008 	int ret = 0;
1009 
1010 	if (!needs_flush_freq_limit(pc))
1011 		return;
1012 
1013 	if (!atomic_read(&pc->flush_freq_limit))
1014 		return;
1015 
1016 	mutex_lock(&pc->freq_lock);
1017 
1018 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
1019 	if (ret)
1020 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
1021 			       pc->stashed_max_freq, ret);
1022 
1023 	atomic_set(&pc->flush_freq_limit, 0);
1024 	mutex_unlock(&pc->freq_lock);
1025 	wake_up_var(&pc->flush_freq_limit);
1026 }
1027 
1028 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
1029 {
1030 	int ret;
1031 
1032 	if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
1033 		return 0;
1034 
1035 	guard(mutex)(&pc->freq_lock);
1036 
1037 	/*
1038 	 * Get updated min/max and stash them.
1039 	 */
1040 	ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
1041 	if (!ret)
1042 		ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
1043 	if (ret)
1044 		return ret;
1045 
1046 	/*
1047 	 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
1048 	 */
1049 	ret = pc_set_min_freq(pc, min(xe_guc_pc_get_rpe_freq(pc), pc_max_freq_cap(pc)));
1050 	if (!ret)
1051 		ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
1052 
1053 	return ret;
1054 }
1055 
1056 /**
1057  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
1058  * @pc: The GuC PC
1059  *
1060  * Returns: 0 on success,
1061  *          error code on failure
1062  */
1063 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
1064 {
1065 	int ret = 0;
1066 
1067 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
1068 		return 0;
1069 
1070 	mutex_lock(&pc->freq_lock);
1071 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
1072 	if (!ret)
1073 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
1074 	mutex_unlock(&pc->freq_lock);
1075 
1076 	return ret;
1077 }
1078 
1079 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1080 {
1081 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1082 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1083 
1084 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1085 }
1086 
1087 static int pc_init_freqs(struct xe_guc_pc *pc)
1088 {
1089 	int ret;
1090 
1091 	mutex_lock(&pc->freq_lock);
1092 
1093 	ret = pc_adjust_freq_bounds(pc);
1094 	if (ret)
1095 		goto out;
1096 
1097 	ret = pc_adjust_requested_freq(pc);
1098 	if (ret)
1099 		goto out;
1100 
1101 	pc_init_pcode_freq(pc);
1102 
1103 	/*
1104 	 * The frequencies are really ready for use only after the user
1105 	 * requested ones got restored.
1106 	 */
1107 	pc->freq_ready = true;
1108 
1109 out:
1110 	mutex_unlock(&pc->freq_lock);
1111 	return ret;
1112 }
1113 
1114 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1115 {
1116 	int ret = 0;
1117 
1118 	ret = pc_action_set_param(pc,
1119 				  SLPC_PARAM_STRATEGIES,
1120 				  val);
1121 
1122 	return ret;
1123 }
1124 
1125 static const char *power_profile_to_string(struct xe_guc_pc *pc)
1126 {
1127 	switch (pc->power_profile) {
1128 	case SLPC_POWER_PROFILE_BASE:
1129 		return "base";
1130 	case SLPC_POWER_PROFILE_POWER_SAVING:
1131 		return "power_saving";
1132 	default:
1133 		return "invalid";
1134 	}
1135 }
1136 
1137 void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
1138 {
1139 	switch (pc->power_profile) {
1140 	case SLPC_POWER_PROFILE_BASE:
1141 		sprintf(profile, "[%s]    %s\n", "base", "power_saving");
1142 		break;
1143 	case SLPC_POWER_PROFILE_POWER_SAVING:
1144 		sprintf(profile, "%s    [%s]\n", "base", "power_saving");
1145 		break;
1146 	default:
1147 		sprintf(profile, "invalid");
1148 	}
1149 }
1150 
1151 int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
1152 {
1153 	int ret = 0;
1154 	u32 val;
1155 
1156 	if (strncmp("base", buf, strlen("base")) == 0)
1157 		val = SLPC_POWER_PROFILE_BASE;
1158 	else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
1159 		val = SLPC_POWER_PROFILE_POWER_SAVING;
1160 	else
1161 		return -EINVAL;
1162 
1163 	guard(mutex)(&pc->freq_lock);
1164 	guard(xe_pm_runtime_noresume)(pc_to_xe(pc));
1165 
1166 	ret = pc_action_set_param(pc,
1167 				  SLPC_PARAM_POWER_PROFILE,
1168 				  val);
1169 	if (ret)
1170 		xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
1171 			       val, ERR_PTR(ret));
1172 	else
1173 		pc->power_profile = val;
1174 
1175 	return ret;
1176 }
1177 
1178 static int pc_action_set_dcc(struct xe_guc_pc *pc, bool enable)
1179 {
1180 	int ret;
1181 
1182 	ret = pc_action_set_param(pc,
1183 				  SLPC_PARAM_TASK_ENABLE_DCC,
1184 				  enable);
1185 	if (!ret)
1186 		return pc_action_set_param(pc,
1187 					   SLPC_PARAM_TASK_DISABLE_DCC,
1188 					   !enable);
1189 	else
1190 		return ret;
1191 }
1192 
1193 static int pc_modify_defaults(struct xe_guc_pc *pc)
1194 {
1195 	struct xe_device *xe = pc_to_xe(pc);
1196 	struct xe_gt *gt = pc_to_gt(pc);
1197 	int ret = 0;
1198 
1199 	if (xe->info.platform == XE_PANTHERLAKE) {
1200 		ret = pc_action_set_dcc(pc, false);
1201 		if (unlikely(ret))
1202 			xe_gt_err(gt, "Failed to modify DCC default: %pe\n", ERR_PTR(ret));
1203 	}
1204 
1205 	return ret;
1206 }
1207 
1208 /**
1209  * xe_guc_pc_start - Start GuC's Power Conservation component
1210  * @pc: Xe_GuC_PC instance
1211  */
1212 int xe_guc_pc_start(struct xe_guc_pc *pc)
1213 {
1214 	struct xe_device *xe = pc_to_xe(pc);
1215 	struct xe_gt *gt = pc_to_gt(pc);
1216 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1217 	ktime_t earlier;
1218 	int ret;
1219 
1220 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1221 
1222 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1223 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
1224 		return -ETIMEDOUT;
1225 
1226 	if (xe->info.skip_guc_pc) {
1227 		/* Request max possible since dynamic freq mgmt is not enabled */
1228 		pc_set_cur_freq(pc, UINT_MAX);
1229 		return 0;
1230 	}
1231 
1232 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1233 	slpc_shared_data_write(pc, header.size, size);
1234 
1235 	earlier = ktime_get();
1236 	ret = pc_action_reset(pc);
1237 	if (ret)
1238 		return ret;
1239 
1240 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1241 			      SLPC_RESET_TIMEOUT_MS)) {
1242 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1243 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1244 			   xe_gt_throttle_get_limit_reasons(gt));
1245 
1246 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1247 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1248 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1249 			return -EIO;
1250 		}
1251 
1252 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1253 			   ktime_ms_delta(ktime_get(), earlier));
1254 	}
1255 
1256 	ret = pc_modify_defaults(pc);
1257 	if (ret)
1258 		return ret;
1259 
1260 	ret = pc_init_freqs(pc);
1261 	if (ret)
1262 		return ret;
1263 
1264 	ret = pc_set_mert_freq_cap(pc);
1265 	if (ret)
1266 		return ret;
1267 
1268 	/* Enable SLPC Optimized Strategy for compute */
1269 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1270 
1271 	/* Set cached value of power_profile */
1272 	ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
1273 	if (unlikely(ret))
1274 		xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
1275 
1276 	return ret;
1277 }
1278 
1279 /**
1280  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1281  * @pc: Xe_GuC_PC instance
1282  */
1283 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1284 {
1285 	struct xe_device *xe = pc_to_xe(pc);
1286 
1287 	if (xe->info.skip_guc_pc)
1288 		return 0;
1289 
1290 	mutex_lock(&pc->freq_lock);
1291 	pc->freq_ready = false;
1292 	mutex_unlock(&pc->freq_lock);
1293 
1294 	return 0;
1295 }
1296 
1297 /**
1298  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1299  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1300  */
1301 static void xe_guc_pc_fini_hw(void *arg)
1302 {
1303 	struct xe_guc_pc *pc = arg;
1304 	struct xe_device *xe = pc_to_xe(pc);
1305 
1306 	if (xe_device_wedged(xe))
1307 		return;
1308 
1309 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(pc_to_gt(pc)), XE_FW_GT);
1310 	XE_WARN_ON(xe_guc_pc_stop(pc));
1311 
1312 	/* Bind requested freq to mert_freq_cap before unload */
1313 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), xe_guc_pc_get_rpe_freq(pc)));
1314 }
1315 
1316 /**
1317  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1318  * @pc: Xe_GuC_PC instance
1319  */
1320 int xe_guc_pc_init(struct xe_guc_pc *pc)
1321 {
1322 	struct xe_gt *gt = pc_to_gt(pc);
1323 	struct xe_tile *tile = gt_to_tile(gt);
1324 	struct xe_device *xe = gt_to_xe(gt);
1325 	struct xe_bo *bo;
1326 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1327 	int err;
1328 
1329 	if (xe->info.skip_guc_pc)
1330 		return 0;
1331 
1332 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1333 	if (err)
1334 		return err;
1335 
1336 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1337 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1338 					  XE_BO_FLAG_GGTT |
1339 					  XE_BO_FLAG_GGTT_INVALIDATE |
1340 					  XE_BO_FLAG_PINNED_NORESTORE);
1341 	if (IS_ERR(bo))
1342 		return PTR_ERR(bo);
1343 
1344 	pc->bo = bo;
1345 
1346 	pc->power_profile = SLPC_POWER_PROFILE_BASE;
1347 
1348 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1349 }
1350 
1351 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1352 {
1353 	switch (slpc_shared_data_read(pc, header.global_state)) {
1354 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1355 		return "not running";
1356 	case SLPC_GLOBAL_STATE_INITIALIZING:
1357 		return "initializing";
1358 	case SLPC_GLOBAL_STATE_RESETTING:
1359 		return "resetting";
1360 	case SLPC_GLOBAL_STATE_RUNNING:
1361 		return "running";
1362 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1363 		return "shutting down";
1364 	case SLPC_GLOBAL_STATE_ERROR:
1365 		return "error";
1366 	default:
1367 		return "unknown";
1368 	}
1369 }
1370 
1371 /**
1372  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1373  * @pc: Xe_GuC_PC instance
1374  * @p: drm_printer
1375  */
1376 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1377 {
1378 	drm_printf(p, "SLPC Shared Data Header:\n");
1379 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1380 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1381 
1382 	if (pc_action_query_task_state(pc))
1383 		return;
1384 
1385 	drm_printf(p, "\nSLPC Tasks Status:\n");
1386 	drm_printf(p, "\tGTPERF enabled: %s\n",
1387 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1388 			      SLPC_GTPERF_TASK_ENABLED));
1389 	drm_printf(p, "\tDCC enabled: %s\n",
1390 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1391 			      SLPC_DCC_TASK_ENABLED));
1392 	drm_printf(p, "\tDCC in use: %s\n",
1393 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1394 			      SLPC_IN_DCC));
1395 	drm_printf(p, "\tBalancer enabled: %s\n",
1396 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1397 			      SLPC_BALANCER_ENABLED));
1398 	drm_printf(p, "\tIBC enabled: %s\n",
1399 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1400 			      SLPC_IBC_TASK_ENABLED));
1401 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1402 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1403 			      SLPC_BALANCER_IA_LMT_ENABLED));
1404 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1405 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1406 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1407 }
1408