xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 08c98f3f2bd7bf5dddd98b17c7caf94d07fad107)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/iopoll.h>
11 #include <linux/jiffies.h>
12 #include <linux/ktime.h>
13 #include <linux/wait_bit.h>
14 
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 #include <generated/xe_wa_oob.h>
18 
19 #include "abi/guc_actions_slpc_abi.h"
20 #include "regs/xe_gt_regs.h"
21 #include "regs/xe_regs.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_force_wake.h"
25 #include "xe_gt.h"
26 #include "xe_gt_idle.h"
27 #include "xe_gt_printk.h"
28 #include "xe_gt_throttle.h"
29 #include "xe_gt_types.h"
30 #include "xe_guc.h"
31 #include "xe_guc_ct.h"
32 #include "xe_map.h"
33 #include "xe_mmio.h"
34 #include "xe_pcode.h"
35 #include "xe_pm.h"
36 #include "xe_sriov.h"
37 #include "xe_wa.h"
38 
39 #define MCHBAR_MIRROR_BASE_SNB	0x140000
40 
41 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
42 #define   RP0_MASK		REG_GENMASK(7, 0)
43 #define   RP1_MASK		REG_GENMASK(15, 8)
44 #define   RPN_MASK		REG_GENMASK(23, 16)
45 
46 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
47 #define   RPE_MASK		REG_GENMASK(15, 8)
48 #define   RPA_MASK		REG_GENMASK(31, 16)
49 
50 #define GT_PERF_STATUS		XE_REG(0x1381b4)
51 #define   CAGF_MASK	REG_GENMASK(19, 11)
52 
53 #define GT_FREQUENCY_MULTIPLIER	50
54 #define GT_FREQUENCY_SCALER	3
55 
56 #define LNL_MERT_FREQ_CAP	800
57 #define BMG_MERT_FREQ_CAP	2133
58 #define BMG_MIN_FREQ		1200
59 #define BMG_MERT_FLUSH_FREQ_CAP	2600
60 
61 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
62 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
63 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
64 
65 /**
66  * DOC: GuC Power Conservation (PC)
67  *
68  * GuC Power Conservation (PC) supports multiple features for the most
69  * efficient and performing use of the GT when GuC submission is enabled,
70  * including frequency management, Render-C states management, and various
71  * algorithms for power balancing.
72  *
73  * Single Loop Power Conservation (SLPC) is the name given to the suite of
74  * connected power conservation features in the GuC firmware. The firmware
75  * exposes a programming interface to the host for the control of SLPC.
76  *
77  * Frequency management:
78  * =====================
79  *
80  * Xe driver enables SLPC with all of its defaults features and frequency
81  * selection, which varies per platform.
82  *
83  * Power profiles add another level of control to SLPC. When power saving
84  * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
85  * thus saving power. Base profile is default and ensures balanced performance
86  * for any workload.
87  *
88  * Render-C States:
89  * ================
90  *
91  * Render-C states is also a GuC PC feature that is now enabled in Xe for
92  * all platforms.
93  *
94  */
95 
96 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
97 {
98 	return container_of(pc, struct xe_guc, pc);
99 }
100 
101 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
102 {
103 	return &pc_to_guc(pc)->ct;
104 }
105 
106 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
107 {
108 	return guc_to_gt(pc_to_guc(pc));
109 }
110 
111 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
112 {
113 	return guc_to_xe(pc_to_guc(pc));
114 }
115 
116 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
117 {
118 	return &pc->bo->vmap;
119 }
120 
121 #define slpc_shared_data_read(pc_, field_) \
122 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
123 			struct slpc_shared_data, field_)
124 
125 #define slpc_shared_data_write(pc_, field_, val_) \
126 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
127 			struct slpc_shared_data, field_, val_)
128 
129 #define SLPC_EVENT(id, count) \
130 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
131 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
132 
133 static int wait_for_pc_state(struct xe_guc_pc *pc,
134 			     enum slpc_global_state target_state,
135 			     int timeout_ms)
136 {
137 	enum slpc_global_state state;
138 
139 	xe_device_assert_mem_access(pc_to_xe(pc));
140 
141 	return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
142 			       state == target_state,
143 			       20, timeout_ms * USEC_PER_MSEC, false);
144 }
145 
146 static int wait_for_flush_complete(struct xe_guc_pc *pc)
147 {
148 	const unsigned long timeout = msecs_to_jiffies(30);
149 
150 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
151 				    !atomic_read(&pc->flush_freq_limit),
152 				    timeout))
153 		return -ETIMEDOUT;
154 
155 	return 0;
156 }
157 
158 static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
159 {
160 	u32 freq;
161 
162 	return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
163 			       freq <= max_limit,
164 			       20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
165 }
166 
167 static int pc_action_reset(struct xe_guc_pc *pc)
168 {
169 	struct xe_guc_ct *ct = pc_to_ct(pc);
170 	u32 action[] = {
171 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
172 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
173 		xe_bo_ggtt_addr(pc->bo),
174 		0,
175 	};
176 	int ret;
177 
178 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
179 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
180 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
181 			  ERR_PTR(ret));
182 
183 	return ret;
184 }
185 
186 static int pc_action_query_task_state(struct xe_guc_pc *pc)
187 {
188 	struct xe_guc_ct *ct = pc_to_ct(pc);
189 	u32 action[] = {
190 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
191 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
192 		xe_bo_ggtt_addr(pc->bo),
193 		0,
194 	};
195 	int ret;
196 
197 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
198 			      SLPC_RESET_TIMEOUT_MS))
199 		return -EAGAIN;
200 
201 	/* Blocking here to ensure the results are ready before reading them */
202 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
203 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
204 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
205 			  ERR_PTR(ret));
206 
207 	return ret;
208 }
209 
210 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
211 {
212 	struct xe_guc_ct *ct = pc_to_ct(pc);
213 	u32 action[] = {
214 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
215 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
216 		id,
217 		value,
218 	};
219 	int ret;
220 
221 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
222 			      SLPC_RESET_TIMEOUT_MS))
223 		return -EAGAIN;
224 
225 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
226 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
227 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
228 			  id, value, ERR_PTR(ret));
229 
230 	return ret;
231 }
232 
233 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
234 {
235 	u32 action[] = {
236 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
237 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
238 		id,
239 	};
240 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
241 	int ret;
242 
243 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
244 			      SLPC_RESET_TIMEOUT_MS))
245 		return -EAGAIN;
246 
247 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
248 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
249 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
250 			  ERR_PTR(ret));
251 
252 	return ret;
253 }
254 
255 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
256 {
257 	struct xe_guc_ct *ct = pc_to_ct(pc);
258 	u32 action[] = {
259 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
260 		mode,
261 	};
262 	int ret;
263 
264 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
265 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
266 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
267 			  mode, ERR_PTR(ret));
268 	return ret;
269 }
270 
271 static u32 decode_freq(u32 raw)
272 {
273 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
274 				 GT_FREQUENCY_SCALER);
275 }
276 
277 static u32 encode_freq(u32 freq)
278 {
279 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
280 				 GT_FREQUENCY_MULTIPLIER);
281 }
282 
283 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
284 {
285 	u32 freq;
286 
287 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
288 			 slpc_shared_data_read(pc, task_state_data.freq));
289 
290 	return decode_freq(freq);
291 }
292 
293 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
294 {
295 	struct xe_gt *gt = pc_to_gt(pc);
296 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
297 
298 	/* Allow/Disallow punit to process software freq requests */
299 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
300 }
301 
302 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
303 {
304 	struct xe_gt *gt = pc_to_gt(pc);
305 	u32 rpnswreq;
306 
307 	pc_set_manual_rp_ctrl(pc, true);
308 
309 	/* Req freq is in units of 16.66 Mhz */
310 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
311 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
312 
313 	/* Sleep for a small time to allow pcode to respond */
314 	usleep_range(100, 300);
315 
316 	pc_set_manual_rp_ctrl(pc, false);
317 }
318 
319 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
320 {
321 	/*
322 	 * Let's only check for the rpn-rp0 range. If max < min,
323 	 * min becomes a fixed request.
324 	 */
325 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
326 		return -EINVAL;
327 
328 	/*
329 	 * GuC policy is to elevate minimum frequency to the efficient levels
330 	 * Our goal is to have the admin choices respected.
331 	 */
332 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
333 			    freq < pc->rpe_freq);
334 
335 	return pc_action_set_param(pc,
336 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
337 				   freq);
338 }
339 
340 static int pc_get_max_freq(struct xe_guc_pc *pc)
341 {
342 	u32 freq;
343 
344 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
345 			 slpc_shared_data_read(pc, task_state_data.freq));
346 
347 	return decode_freq(freq);
348 }
349 
350 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
351 {
352 	/*
353 	 * Let's only check for the rpn-rp0 range. If max < min,
354 	 * min becomes a fixed request.
355 	 * Also, overclocking is not supported.
356 	 */
357 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
358 		return -EINVAL;
359 
360 	return pc_action_set_param(pc,
361 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
362 				   freq);
363 }
364 
365 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
366 {
367 	struct xe_gt *gt = pc_to_gt(pc);
368 	u32 reg;
369 
370 	if (xe_gt_is_media_type(gt))
371 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
372 	else
373 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
374 
375 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
376 }
377 
378 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
379 {
380 	struct xe_gt *gt = pc_to_gt(pc);
381 	u32 reg;
382 
383 	if (xe_gt_is_media_type(gt))
384 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
385 	else
386 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
387 
388 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
389 }
390 
391 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
392 {
393 	struct xe_gt *gt = pc_to_gt(pc);
394 	struct xe_device *xe = gt_to_xe(gt);
395 	u32 reg;
396 
397 	/*
398 	 * For PVC we still need to use fused RP0 as the approximation for RPa
399 	 * For other platforms than PVC we get the resolved RPa directly from
400 	 * PCODE at a different register
401 	 */
402 	if (xe->info.platform == XE_PVC) {
403 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
404 		pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
405 	} else {
406 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
407 		pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
408 	}
409 }
410 
411 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
412 {
413 	struct xe_gt *gt = pc_to_gt(pc);
414 	struct xe_device *xe = gt_to_xe(gt);
415 	u32 reg;
416 
417 	/*
418 	 * For PVC we still need to use fused RP1 as the approximation for RPe
419 	 * For other platforms than PVC we get the resolved RPe directly from
420 	 * PCODE at a different register
421 	 */
422 	if (xe->info.platform == XE_PVC) {
423 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
424 		pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
425 	} else {
426 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
427 		pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
428 	}
429 }
430 
431 static void pc_update_rp_values(struct xe_guc_pc *pc)
432 {
433 	struct xe_gt *gt = pc_to_gt(pc);
434 	struct xe_device *xe = gt_to_xe(gt);
435 
436 	if (GRAPHICS_VERx100(xe) >= 1270) {
437 		mtl_update_rpa_value(pc);
438 		mtl_update_rpe_value(pc);
439 	} else {
440 		tgl_update_rpa_value(pc);
441 		tgl_update_rpe_value(pc);
442 	}
443 
444 	/*
445 	 * RPe is decided at runtime by PCODE. In the rare case where that's
446 	 * smaller than the fused min, we will trust the PCODE and use that
447 	 * as our minimum one.
448 	 */
449 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
450 }
451 
452 /**
453  * xe_guc_pc_get_act_freq - Get Actual running frequency
454  * @pc: The GuC PC
455  *
456  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
457  */
458 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
459 {
460 	struct xe_gt *gt = pc_to_gt(pc);
461 	struct xe_device *xe = gt_to_xe(gt);
462 	u32 freq;
463 
464 	/* When in RC6, actual frequency reported will be 0. */
465 	if (GRAPHICS_VERx100(xe) >= 1270) {
466 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
467 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
468 	} else {
469 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
470 		freq = REG_FIELD_GET(CAGF_MASK, freq);
471 	}
472 
473 	freq = decode_freq(freq);
474 
475 	return freq;
476 }
477 
478 static u32 get_cur_freq(struct xe_gt *gt)
479 {
480 	u32 freq;
481 
482 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
483 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
484 	return decode_freq(freq);
485 }
486 
487 /**
488  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
489  * @pc: The GuC PC
490  *
491  * Returns: the requested frequency for that GT instance
492  */
493 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
494 {
495 	struct xe_gt *gt = pc_to_gt(pc);
496 
497 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
498 
499 	return get_cur_freq(gt);
500 }
501 
502 /**
503  * xe_guc_pc_get_cur_freq - Get Current requested frequency
504  * @pc: The GuC PC
505  * @freq: A pointer to a u32 where the freq value will be returned
506  *
507  * Returns: 0 on success,
508  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
509  */
510 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
511 {
512 	struct xe_gt *gt = pc_to_gt(pc);
513 	unsigned int fw_ref;
514 
515 	/*
516 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
517 	 * Block RC6 for a more reliable read.
518 	 */
519 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
520 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
521 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
522 		return -ETIMEDOUT;
523 	}
524 
525 	*freq = get_cur_freq(gt);
526 
527 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
528 	return 0;
529 }
530 
531 /**
532  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
533  * @pc: The GuC PC
534  *
535  * Returns: RP0 freq.
536  */
537 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
538 {
539 	return pc->rp0_freq;
540 }
541 
542 /**
543  * xe_guc_pc_get_rpa_freq - Get the RPa freq
544  * @pc: The GuC PC
545  *
546  * Returns: RPa freq.
547  */
548 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
549 {
550 	pc_update_rp_values(pc);
551 
552 	return pc->rpa_freq;
553 }
554 
555 /**
556  * xe_guc_pc_get_rpe_freq - Get the RPe freq
557  * @pc: The GuC PC
558  *
559  * Returns: RPe freq.
560  */
561 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
562 {
563 	pc_update_rp_values(pc);
564 
565 	return pc->rpe_freq;
566 }
567 
568 /**
569  * xe_guc_pc_get_rpn_freq - Get the RPn freq
570  * @pc: The GuC PC
571  *
572  * Returns: RPn freq.
573  */
574 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
575 {
576 	return pc->rpn_freq;
577 }
578 
579 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
580 {
581 	int ret;
582 
583 	lockdep_assert_held(&pc->freq_lock);
584 
585 	/* Might be in the middle of a gt reset */
586 	if (!pc->freq_ready)
587 		return -EAGAIN;
588 
589 	ret = pc_action_query_task_state(pc);
590 	if (ret)
591 		return ret;
592 
593 	*freq = pc_get_min_freq(pc);
594 
595 	return 0;
596 }
597 
598 /**
599  * xe_guc_pc_get_min_freq - Get the min operational frequency
600  * @pc: The GuC PC
601  * @freq: A pointer to a u32 where the freq value will be returned
602  *
603  * Returns: 0 on success,
604  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
605  */
606 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
607 {
608 	guard(mutex)(&pc->freq_lock);
609 
610 	return xe_guc_pc_get_min_freq_locked(pc, freq);
611 }
612 
613 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
614 {
615 	int ret;
616 
617 	lockdep_assert_held(&pc->freq_lock);
618 
619 	/* Might be in the middle of a gt reset */
620 	if (!pc->freq_ready)
621 		return -EAGAIN;
622 
623 	ret = pc_set_min_freq(pc, freq);
624 	if (ret)
625 		return ret;
626 
627 	pc->user_requested_min = freq;
628 
629 	return 0;
630 }
631 
632 /**
633  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
634  * @pc: The GuC PC
635  * @freq: The selected minimal frequency
636  *
637  * Returns: 0 on success,
638  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
639  *         -EINVAL if value out of bounds.
640  */
641 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
642 {
643 	guard(mutex)(&pc->freq_lock);
644 
645 	return xe_guc_pc_set_min_freq_locked(pc, freq);
646 }
647 
648 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
649 {
650 	int ret;
651 
652 	lockdep_assert_held(&pc->freq_lock);
653 
654 	/* Might be in the middle of a gt reset */
655 	if (!pc->freq_ready)
656 		return -EAGAIN;
657 
658 	ret = pc_action_query_task_state(pc);
659 	if (ret)
660 		return ret;
661 
662 	*freq = pc_get_max_freq(pc);
663 
664 	return 0;
665 }
666 
667 /**
668  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
669  * @pc: The GuC PC
670  * @freq: A pointer to a u32 where the freq value will be returned
671  *
672  * Returns: 0 on success,
673  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
674  */
675 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
676 {
677 	guard(mutex)(&pc->freq_lock);
678 
679 	return xe_guc_pc_get_max_freq_locked(pc, freq);
680 }
681 
682 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
683 {
684 	int ret;
685 
686 	lockdep_assert_held(&pc->freq_lock);
687 
688 	/* Might be in the middle of a gt reset */
689 	if (!pc->freq_ready)
690 		return -EAGAIN;
691 
692 	ret = pc_set_max_freq(pc, freq);
693 	if (ret)
694 		return ret;
695 
696 	pc->user_requested_max = freq;
697 
698 	return 0;
699 }
700 
701 /**
702  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
703  * @pc: The GuC PC
704  * @freq: The selected maximum frequency value
705  *
706  * Returns: 0 on success,
707  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
708  *         -EINVAL if value out of bounds.
709  */
710 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
711 {
712 	if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
713 		if (wait_for_flush_complete(pc) != 0)
714 			return -EAGAIN;
715 	}
716 
717 	guard(mutex)(&pc->freq_lock);
718 
719 	return xe_guc_pc_set_max_freq_locked(pc, freq);
720 }
721 
722 /**
723  * xe_guc_pc_c_status - get the current GT C state
724  * @pc: XE_GuC_PC instance
725  */
726 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
727 {
728 	struct xe_gt *gt = pc_to_gt(pc);
729 	u32 reg, gt_c_state;
730 
731 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
732 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
733 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
734 	} else {
735 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
736 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
737 	}
738 
739 	switch (gt_c_state) {
740 	case GT_C6:
741 		return GT_IDLE_C6;
742 	case GT_C0:
743 		return GT_IDLE_C0;
744 	default:
745 		return GT_IDLE_UNKNOWN;
746 	}
747 }
748 
749 /**
750  * xe_guc_pc_rc6_residency - rc6 residency counter
751  * @pc: Xe_GuC_PC instance
752  */
753 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
754 {
755 	struct xe_gt *gt = pc_to_gt(pc);
756 	u32 reg;
757 
758 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
759 
760 	return reg;
761 }
762 
763 /**
764  * xe_guc_pc_mc6_residency - mc6 residency counter
765  * @pc: Xe_GuC_PC instance
766  */
767 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
768 {
769 	struct xe_gt *gt = pc_to_gt(pc);
770 	u64 reg;
771 
772 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
773 
774 	return reg;
775 }
776 
777 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
778 {
779 	struct xe_gt *gt = pc_to_gt(pc);
780 	u32 reg;
781 
782 	xe_device_assert_mem_access(pc_to_xe(pc));
783 
784 	if (xe_gt_is_media_type(gt))
785 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
786 	else
787 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
788 
789 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
790 
791 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
792 }
793 
794 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
795 {
796 	struct xe_gt *gt = pc_to_gt(pc);
797 	struct xe_device *xe = gt_to_xe(gt);
798 	u32 reg;
799 
800 	xe_device_assert_mem_access(pc_to_xe(pc));
801 
802 	if (xe->info.platform == XE_PVC)
803 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
804 	else
805 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
806 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
807 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
808 }
809 
810 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
811 {
812 	struct xe_gt *gt = pc_to_gt(pc);
813 	struct xe_device *xe = gt_to_xe(gt);
814 
815 	if (GRAPHICS_VERx100(xe) >= 1270)
816 		mtl_init_fused_rp_values(pc);
817 	else
818 		tgl_init_fused_rp_values(pc);
819 }
820 
821 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
822 {
823 	struct xe_gt *gt = pc_to_gt(pc);
824 
825 	if (XE_GT_WA(gt, 22019338487)) {
826 		if (xe_gt_is_media_type(gt))
827 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
828 		else
829 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
830 	} else {
831 		return pc->rp0_freq;
832 	}
833 }
834 
835 /**
836  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
837  * frequency to allow faster GuC load times
838  * @pc: Xe_GuC_PC instance
839  */
840 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
841 {
842 	struct xe_gt *gt = pc_to_gt(pc);
843 
844 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
845 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
846 }
847 
848 /**
849  * xe_guc_pc_init_early - Initialize RPx values
850  * @pc: Xe_GuC_PC instance
851  */
852 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
853 {
854 	struct xe_gt *gt = pc_to_gt(pc);
855 
856 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
857 	pc_init_fused_rp_values(pc);
858 }
859 
860 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
861 {
862 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
863 	int ret;
864 
865 	lockdep_assert_held(&pc->freq_lock);
866 
867 	ret = pc_action_query_task_state(pc);
868 	if (ret)
869 		goto out;
870 
871 	/*
872 	 * GuC defaults to some RPmax that is not actually achievable without
873 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
874 	 * regular maximum
875 	 */
876 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
877 		ret = pc_set_max_freq(pc, pc->rp0_freq);
878 		if (ret)
879 			goto out;
880 	}
881 
882 	/*
883 	 * Same thing happens for Server platforms where min is listed as
884 	 * RPMax
885 	 */
886 	if (pc_get_min_freq(pc) > pc->rp0_freq)
887 		ret = pc_set_min_freq(pc, pc->rp0_freq);
888 
889 	if (XE_GT_WA(tile->primary_gt, 14022085890))
890 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
891 
892 out:
893 	return ret;
894 }
895 
896 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
897 {
898 	int ret = 0;
899 
900 	lockdep_assert_held(&pc->freq_lock);
901 
902 	if (pc->user_requested_min != 0) {
903 		ret = pc_set_min_freq(pc, pc->user_requested_min);
904 		if (ret)
905 			return ret;
906 	}
907 
908 	if (pc->user_requested_max != 0) {
909 		ret = pc_set_max_freq(pc, pc->user_requested_max);
910 		if (ret)
911 			return ret;
912 	}
913 
914 	return ret;
915 }
916 
917 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
918 {
919 	struct xe_gt *gt = pc_to_gt(pc);
920 
921 	return  XE_GT_WA(gt, 22019338487) &&
922 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
923 }
924 
925 /**
926  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
927  * @pc: the xe_guc_pc object
928  *
929  * As per the WA, reduce max GT frequency during L2 cache flush
930  */
931 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
932 {
933 	struct xe_gt *gt = pc_to_gt(pc);
934 	u32 max_freq;
935 	int ret;
936 
937 	if (!needs_flush_freq_limit(pc))
938 		return;
939 
940 	guard(mutex)(&pc->freq_lock);
941 
942 	ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
943 	if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
944 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
945 		if (ret) {
946 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
947 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
948 			return;
949 		}
950 
951 		atomic_set(&pc->flush_freq_limit, 1);
952 
953 		/*
954 		 * If user has previously changed max freq, stash that value to
955 		 * restore later, otherwise use the current max. New user
956 		 * requests wait on flush.
957 		 */
958 		if (pc->user_requested_max != 0)
959 			pc->stashed_max_freq = pc->user_requested_max;
960 		else
961 			pc->stashed_max_freq = max_freq;
962 	}
963 
964 	/*
965 	 * Wait for actual freq to go below the flush cap: even if the previous
966 	 * max was below cap, the current one might still be above it
967 	 */
968 	ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
969 	if (ret)
970 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
971 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
972 }
973 
974 /**
975  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
976  * @pc: the xe_guc_pc object
977  *
978  * Retrieve the previous GT max frequency value.
979  */
980 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
981 {
982 	struct xe_gt *gt = pc_to_gt(pc);
983 	int ret = 0;
984 
985 	if (!needs_flush_freq_limit(pc))
986 		return;
987 
988 	if (!atomic_read(&pc->flush_freq_limit))
989 		return;
990 
991 	mutex_lock(&pc->freq_lock);
992 
993 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
994 	if (ret)
995 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
996 			       pc->stashed_max_freq, ret);
997 
998 	atomic_set(&pc->flush_freq_limit, 0);
999 	mutex_unlock(&pc->freq_lock);
1000 	wake_up_var(&pc->flush_freq_limit);
1001 }
1002 
1003 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
1004 {
1005 	int ret;
1006 
1007 	if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
1008 		return 0;
1009 
1010 	guard(mutex)(&pc->freq_lock);
1011 
1012 	/*
1013 	 * Get updated min/max and stash them.
1014 	 */
1015 	ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
1016 	if (!ret)
1017 		ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
1018 	if (ret)
1019 		return ret;
1020 
1021 	/*
1022 	 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
1023 	 */
1024 	ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
1025 	if (!ret)
1026 		ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
1027 
1028 	return ret;
1029 }
1030 
1031 /**
1032  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
1033  * @pc: The GuC PC
1034  *
1035  * Returns: 0 on success,
1036  *          error code on failure
1037  */
1038 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
1039 {
1040 	int ret = 0;
1041 
1042 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
1043 		return 0;
1044 
1045 	mutex_lock(&pc->freq_lock);
1046 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
1047 	if (!ret)
1048 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
1049 	mutex_unlock(&pc->freq_lock);
1050 
1051 	return ret;
1052 }
1053 
1054 /**
1055  * xe_guc_pc_gucrc_disable - Disable GuC RC
1056  * @pc: Xe_GuC_PC instance
1057  *
1058  * Disables GuC RC by taking control of RC6 back from GuC.
1059  *
1060  * Return: 0 on success, negative error code on error.
1061  */
1062 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
1063 {
1064 	struct xe_device *xe = pc_to_xe(pc);
1065 	struct xe_gt *gt = pc_to_gt(pc);
1066 	int ret = 0;
1067 
1068 	if (xe->info.skip_guc_pc)
1069 		return 0;
1070 
1071 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
1072 	if (ret)
1073 		return ret;
1074 
1075 	return xe_gt_idle_disable_c6(gt);
1076 }
1077 
1078 /**
1079  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
1080  * @pc: Xe_GuC_PC instance
1081  * @mode: new value of the mode.
1082  *
1083  * Return: 0 on success, negative error code on error
1084  */
1085 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
1086 {
1087 	int ret;
1088 
1089 	xe_pm_runtime_get(pc_to_xe(pc));
1090 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
1091 	xe_pm_runtime_put(pc_to_xe(pc));
1092 
1093 	return ret;
1094 }
1095 
1096 /**
1097  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
1098  * @pc: Xe_GuC_PC instance
1099  *
1100  * Return: 0 on success, negative error code on error
1101  */
1102 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
1103 {
1104 	int ret;
1105 
1106 	xe_pm_runtime_get(pc_to_xe(pc));
1107 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
1108 	xe_pm_runtime_put(pc_to_xe(pc));
1109 
1110 	return ret;
1111 }
1112 
1113 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1114 {
1115 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1116 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1117 
1118 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1119 }
1120 
1121 static int pc_init_freqs(struct xe_guc_pc *pc)
1122 {
1123 	int ret;
1124 
1125 	mutex_lock(&pc->freq_lock);
1126 
1127 	ret = pc_adjust_freq_bounds(pc);
1128 	if (ret)
1129 		goto out;
1130 
1131 	ret = pc_adjust_requested_freq(pc);
1132 	if (ret)
1133 		goto out;
1134 
1135 	pc_update_rp_values(pc);
1136 
1137 	pc_init_pcode_freq(pc);
1138 
1139 	/*
1140 	 * The frequencies are really ready for use only after the user
1141 	 * requested ones got restored.
1142 	 */
1143 	pc->freq_ready = true;
1144 
1145 out:
1146 	mutex_unlock(&pc->freq_lock);
1147 	return ret;
1148 }
1149 
1150 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1151 {
1152 	int ret = 0;
1153 
1154 	ret = pc_action_set_param(pc,
1155 				  SLPC_PARAM_STRATEGIES,
1156 				  val);
1157 
1158 	return ret;
1159 }
1160 
1161 static const char *power_profile_to_string(struct xe_guc_pc *pc)
1162 {
1163 	switch (pc->power_profile) {
1164 	case SLPC_POWER_PROFILE_BASE:
1165 		return "base";
1166 	case SLPC_POWER_PROFILE_POWER_SAVING:
1167 		return "power_saving";
1168 	default:
1169 		return "invalid";
1170 	}
1171 }
1172 
1173 void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
1174 {
1175 	switch (pc->power_profile) {
1176 	case SLPC_POWER_PROFILE_BASE:
1177 		sprintf(profile, "[%s]    %s\n", "base", "power_saving");
1178 		break;
1179 	case SLPC_POWER_PROFILE_POWER_SAVING:
1180 		sprintf(profile, "%s    [%s]\n", "base", "power_saving");
1181 		break;
1182 	default:
1183 		sprintf(profile, "invalid");
1184 	}
1185 }
1186 
1187 int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
1188 {
1189 	int ret = 0;
1190 	u32 val;
1191 
1192 	if (strncmp("base", buf, strlen("base")) == 0)
1193 		val = SLPC_POWER_PROFILE_BASE;
1194 	else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
1195 		val = SLPC_POWER_PROFILE_POWER_SAVING;
1196 	else
1197 		return -EINVAL;
1198 
1199 	guard(mutex)(&pc->freq_lock);
1200 	xe_pm_runtime_get_noresume(pc_to_xe(pc));
1201 
1202 	ret = pc_action_set_param(pc,
1203 				  SLPC_PARAM_POWER_PROFILE,
1204 				  val);
1205 	if (ret)
1206 		xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
1207 			       val, ERR_PTR(ret));
1208 	else
1209 		pc->power_profile = val;
1210 
1211 	xe_pm_runtime_put(pc_to_xe(pc));
1212 
1213 	return ret;
1214 }
1215 
1216 /**
1217  * xe_guc_pc_start - Start GuC's Power Conservation component
1218  * @pc: Xe_GuC_PC instance
1219  */
1220 int xe_guc_pc_start(struct xe_guc_pc *pc)
1221 {
1222 	struct xe_device *xe = pc_to_xe(pc);
1223 	struct xe_gt *gt = pc_to_gt(pc);
1224 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1225 	unsigned int fw_ref;
1226 	ktime_t earlier;
1227 	int ret;
1228 
1229 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1230 
1231 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1232 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1233 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1234 		return -ETIMEDOUT;
1235 	}
1236 
1237 	if (xe->info.skip_guc_pc) {
1238 		if (xe->info.platform != XE_PVC)
1239 			xe_gt_idle_enable_c6(gt);
1240 
1241 		/* Request max possible since dynamic freq mgmt is not enabled */
1242 		pc_set_cur_freq(pc, UINT_MAX);
1243 
1244 		ret = 0;
1245 		goto out;
1246 	}
1247 
1248 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1249 	slpc_shared_data_write(pc, header.size, size);
1250 
1251 	earlier = ktime_get();
1252 	ret = pc_action_reset(pc);
1253 	if (ret)
1254 		goto out;
1255 
1256 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1257 			      SLPC_RESET_TIMEOUT_MS)) {
1258 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1259 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1260 			   xe_gt_throttle_get_limit_reasons(gt));
1261 
1262 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1263 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1264 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1265 			ret = -EIO;
1266 			goto out;
1267 		}
1268 
1269 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1270 			   ktime_ms_delta(ktime_get(), earlier));
1271 	}
1272 
1273 	ret = pc_init_freqs(pc);
1274 	if (ret)
1275 		goto out;
1276 
1277 	ret = pc_set_mert_freq_cap(pc);
1278 	if (ret)
1279 		goto out;
1280 
1281 	if (xe->info.platform == XE_PVC) {
1282 		xe_guc_pc_gucrc_disable(pc);
1283 		ret = 0;
1284 		goto out;
1285 	}
1286 
1287 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1288 	if (ret)
1289 		goto out;
1290 
1291 	/* Enable SLPC Optimized Strategy for compute */
1292 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1293 
1294 	/* Set cached value of power_profile */
1295 	ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
1296 	if (unlikely(ret))
1297 		xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
1298 
1299 out:
1300 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1301 	return ret;
1302 }
1303 
1304 /**
1305  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1306  * @pc: Xe_GuC_PC instance
1307  */
1308 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1309 {
1310 	struct xe_device *xe = pc_to_xe(pc);
1311 
1312 	if (xe->info.skip_guc_pc) {
1313 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1314 		return 0;
1315 	}
1316 
1317 	mutex_lock(&pc->freq_lock);
1318 	pc->freq_ready = false;
1319 	mutex_unlock(&pc->freq_lock);
1320 
1321 	return 0;
1322 }
1323 
1324 /**
1325  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1326  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1327  */
1328 static void xe_guc_pc_fini_hw(void *arg)
1329 {
1330 	struct xe_guc_pc *pc = arg;
1331 	struct xe_device *xe = pc_to_xe(pc);
1332 	unsigned int fw_ref;
1333 
1334 	if (xe_device_wedged(xe))
1335 		return;
1336 
1337 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1338 	xe_guc_pc_gucrc_disable(pc);
1339 	XE_WARN_ON(xe_guc_pc_stop(pc));
1340 
1341 	/* Bind requested freq to mert_freq_cap before unload */
1342 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1343 
1344 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1345 }
1346 
1347 /**
1348  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1349  * @pc: Xe_GuC_PC instance
1350  */
1351 int xe_guc_pc_init(struct xe_guc_pc *pc)
1352 {
1353 	struct xe_gt *gt = pc_to_gt(pc);
1354 	struct xe_tile *tile = gt_to_tile(gt);
1355 	struct xe_device *xe = gt_to_xe(gt);
1356 	struct xe_bo *bo;
1357 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1358 	int err;
1359 
1360 	if (xe->info.skip_guc_pc)
1361 		return 0;
1362 
1363 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1364 	if (err)
1365 		return err;
1366 
1367 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1368 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1369 					  XE_BO_FLAG_GGTT |
1370 					  XE_BO_FLAG_GGTT_INVALIDATE |
1371 					  XE_BO_FLAG_PINNED_NORESTORE);
1372 	if (IS_ERR(bo))
1373 		return PTR_ERR(bo);
1374 
1375 	pc->bo = bo;
1376 
1377 	pc->power_profile = SLPC_POWER_PROFILE_BASE;
1378 
1379 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1380 }
1381 
1382 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1383 {
1384 	switch (slpc_shared_data_read(pc, header.global_state)) {
1385 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1386 		return "not running";
1387 	case SLPC_GLOBAL_STATE_INITIALIZING:
1388 		return "initializing";
1389 	case SLPC_GLOBAL_STATE_RESETTING:
1390 		return "resetting";
1391 	case SLPC_GLOBAL_STATE_RUNNING:
1392 		return "running";
1393 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1394 		return "shutting down";
1395 	case SLPC_GLOBAL_STATE_ERROR:
1396 		return "error";
1397 	default:
1398 		return "unknown";
1399 	}
1400 }
1401 
1402 /**
1403  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1404  * @pc: Xe_GuC_PC instance
1405  * @p: drm_printer
1406  */
1407 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1408 {
1409 	drm_printf(p, "SLPC Shared Data Header:\n");
1410 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1411 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1412 
1413 	if (pc_action_query_task_state(pc))
1414 		return;
1415 
1416 	drm_printf(p, "\nSLPC Tasks Status:\n");
1417 	drm_printf(p, "\tGTPERF enabled: %s\n",
1418 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1419 			      SLPC_GTPERF_TASK_ENABLED));
1420 	drm_printf(p, "\tDCC enabled: %s\n",
1421 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1422 			      SLPC_DCC_TASK_ENABLED));
1423 	drm_printf(p, "\tDCC in use: %s\n",
1424 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1425 			      SLPC_IN_DCC));
1426 	drm_printf(p, "\tBalancer enabled: %s\n",
1427 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1428 			      SLPC_BALANCER_ENABLED));
1429 	drm_printf(p, "\tIBC enabled: %s\n",
1430 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1431 			      SLPC_IBC_TASK_ENABLED));
1432 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1433 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1434 			      SLPC_BALANCER_IA_LMT_ENABLED));
1435 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1436 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1437 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1438 }
1439