xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/iopoll.h>
11 #include <linux/jiffies.h>
12 #include <linux/ktime.h>
13 #include <linux/wait_bit.h>
14 
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 #include <generated/xe_device_wa_oob.h>
18 #include <generated/xe_wa_oob.h>
19 
20 #include "abi/guc_actions_slpc_abi.h"
21 #include "regs/xe_gt_regs.h"
22 #include "regs/xe_regs.h"
23 #include "xe_bo.h"
24 #include "xe_device.h"
25 #include "xe_force_wake.h"
26 #include "xe_gt.h"
27 #include "xe_gt_idle.h"
28 #include "xe_gt_printk.h"
29 #include "xe_gt_throttle.h"
30 #include "xe_gt_types.h"
31 #include "xe_guc.h"
32 #include "xe_guc_ct.h"
33 #include "xe_map.h"
34 #include "xe_mmio.h"
35 #include "xe_pcode.h"
36 #include "xe_pm.h"
37 #include "xe_sriov.h"
38 #include "xe_wa.h"
39 
40 #define MCHBAR_MIRROR_BASE_SNB	0x140000
41 
42 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
43 #define   RP0_MASK		REG_GENMASK(7, 0)
44 #define   RP1_MASK		REG_GENMASK(15, 8)
45 #define   RPN_MASK		REG_GENMASK(23, 16)
46 
47 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
48 #define   RPE_MASK		REG_GENMASK(15, 8)
49 #define   RPA_MASK		REG_GENMASK(31, 16)
50 
51 #define GT_PERF_STATUS		XE_REG(0x1381b4)
52 #define   CAGF_MASK	REG_GENMASK(19, 11)
53 
54 #define GT_FREQUENCY_MULTIPLIER	50
55 #define GT_FREQUENCY_SCALER	3
56 
57 #define LNL_MERT_FREQ_CAP	800
58 #define BMG_MERT_FREQ_CAP	2133
59 #define BMG_MIN_FREQ		1200
60 #define BMG_MERT_FLUSH_FREQ_CAP	2600
61 
62 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
63 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
64 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
65 
66 /**
67  * DOC: GuC Power Conservation (PC)
68  *
69  * GuC Power Conservation (PC) supports multiple features for the most
70  * efficient and performing use of the GT when GuC submission is enabled,
71  * including frequency management, Render-C states management, and various
72  * algorithms for power balancing.
73  *
74  * Single Loop Power Conservation (SLPC) is the name given to the suite of
75  * connected power conservation features in the GuC firmware. The firmware
76  * exposes a programming interface to the host for the control of SLPC.
77  *
78  * Frequency management:
79  * =====================
80  *
81  * Xe driver enables SLPC with all of its defaults features and frequency
82  * selection, which varies per platform.
83  *
84  * Power profiles add another level of control to SLPC. When power saving
85  * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
86  * thus saving power. Base profile is default and ensures balanced performance
87  * for any workload.
88  *
89  * Render-C States:
90  * ================
91  *
92  * Render-C states is also a GuC PC feature that is now enabled in Xe for
93  * all platforms.
94  *
95  */
96 
97 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
98 {
99 	return container_of(pc, struct xe_guc, pc);
100 }
101 
102 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
103 {
104 	return &pc_to_guc(pc)->ct;
105 }
106 
107 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
108 {
109 	return guc_to_gt(pc_to_guc(pc));
110 }
111 
112 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
113 {
114 	return guc_to_xe(pc_to_guc(pc));
115 }
116 
117 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
118 {
119 	return &pc->bo->vmap;
120 }
121 
122 #define slpc_shared_data_read(pc_, field_) \
123 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
124 			struct slpc_shared_data, field_)
125 
126 #define slpc_shared_data_write(pc_, field_, val_) \
127 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
128 			struct slpc_shared_data, field_, val_)
129 
130 #define SLPC_EVENT(id, count) \
131 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
132 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
133 
134 static int wait_for_pc_state(struct xe_guc_pc *pc,
135 			     enum slpc_global_state target_state,
136 			     int timeout_ms)
137 {
138 	enum slpc_global_state state;
139 
140 	xe_device_assert_mem_access(pc_to_xe(pc));
141 
142 	return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
143 			       state == target_state,
144 			       20, timeout_ms * USEC_PER_MSEC, false);
145 }
146 
147 static int wait_for_flush_complete(struct xe_guc_pc *pc)
148 {
149 	const unsigned long timeout = msecs_to_jiffies(30);
150 
151 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
152 				    !atomic_read(&pc->flush_freq_limit),
153 				    timeout))
154 		return -ETIMEDOUT;
155 
156 	return 0;
157 }
158 
159 static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
160 {
161 	u32 freq;
162 
163 	return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
164 			       freq <= max_limit,
165 			       20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
166 }
167 
168 static int pc_action_reset(struct xe_guc_pc *pc)
169 {
170 	struct xe_guc_ct *ct = pc_to_ct(pc);
171 	u32 action[] = {
172 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
173 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
174 		xe_bo_ggtt_addr(pc->bo),
175 		0,
176 	};
177 	int ret;
178 
179 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
180 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
181 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
182 			  ERR_PTR(ret));
183 
184 	return ret;
185 }
186 
187 static int pc_action_query_task_state(struct xe_guc_pc *pc)
188 {
189 	struct xe_guc_ct *ct = pc_to_ct(pc);
190 	u32 action[] = {
191 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
192 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
193 		xe_bo_ggtt_addr(pc->bo),
194 		0,
195 	};
196 	int ret;
197 
198 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
199 			      SLPC_RESET_TIMEOUT_MS))
200 		return -EAGAIN;
201 
202 	/* Blocking here to ensure the results are ready before reading them */
203 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
204 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
205 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
206 			  ERR_PTR(ret));
207 
208 	return ret;
209 }
210 
211 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
212 {
213 	struct xe_guc_ct *ct = pc_to_ct(pc);
214 	u32 action[] = {
215 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
216 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
217 		id,
218 		value,
219 	};
220 	int ret;
221 
222 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
223 			      SLPC_RESET_TIMEOUT_MS))
224 		return -EAGAIN;
225 
226 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
227 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
228 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
229 			  id, value, ERR_PTR(ret));
230 
231 	return ret;
232 }
233 
234 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
235 {
236 	u32 action[] = {
237 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
238 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
239 		id,
240 	};
241 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
242 	int ret;
243 
244 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
245 			      SLPC_RESET_TIMEOUT_MS))
246 		return -EAGAIN;
247 
248 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
249 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
250 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
251 			  ERR_PTR(ret));
252 
253 	return ret;
254 }
255 
256 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
257 {
258 	struct xe_guc_ct *ct = pc_to_ct(pc);
259 	u32 action[] = {
260 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
261 		mode,
262 	};
263 	int ret;
264 
265 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
266 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
267 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
268 			  mode, ERR_PTR(ret));
269 	return ret;
270 }
271 
272 static u32 decode_freq(u32 raw)
273 {
274 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
275 				 GT_FREQUENCY_SCALER);
276 }
277 
278 static u32 encode_freq(u32 freq)
279 {
280 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
281 				 GT_FREQUENCY_MULTIPLIER);
282 }
283 
284 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
285 {
286 	u32 freq;
287 
288 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
289 			 slpc_shared_data_read(pc, task_state_data.freq));
290 
291 	return decode_freq(freq);
292 }
293 
294 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
295 {
296 	struct xe_gt *gt = pc_to_gt(pc);
297 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
298 
299 	/* Allow/Disallow punit to process software freq requests */
300 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
301 }
302 
303 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
304 {
305 	struct xe_gt *gt = pc_to_gt(pc);
306 	u32 rpnswreq;
307 
308 	pc_set_manual_rp_ctrl(pc, true);
309 
310 	/* Req freq is in units of 16.66 Mhz */
311 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
312 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
313 
314 	/* Sleep for a small time to allow pcode to respond */
315 	usleep_range(100, 300);
316 
317 	pc_set_manual_rp_ctrl(pc, false);
318 }
319 
320 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
321 {
322 	/*
323 	 * Let's only check for the rpn-rp0 range. If max < min,
324 	 * min becomes a fixed request.
325 	 */
326 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
327 		return -EINVAL;
328 
329 	/*
330 	 * GuC policy is to elevate minimum frequency to the efficient levels
331 	 * Our goal is to have the admin choices respected.
332 	 */
333 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
334 			    freq < pc->rpe_freq);
335 
336 	return pc_action_set_param(pc,
337 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
338 				   freq);
339 }
340 
341 static int pc_get_max_freq(struct xe_guc_pc *pc)
342 {
343 	u32 freq;
344 
345 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
346 			 slpc_shared_data_read(pc, task_state_data.freq));
347 
348 	return decode_freq(freq);
349 }
350 
351 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
352 {
353 	/*
354 	 * Let's only check for the rpn-rp0 range. If max < min,
355 	 * min becomes a fixed request.
356 	 * Also, overclocking is not supported.
357 	 */
358 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
359 		return -EINVAL;
360 
361 	return pc_action_set_param(pc,
362 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
363 				   freq);
364 }
365 
366 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
367 {
368 	struct xe_gt *gt = pc_to_gt(pc);
369 	u32 reg;
370 
371 	if (xe_gt_is_media_type(gt))
372 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
373 	else
374 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
375 
376 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
377 }
378 
379 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
380 {
381 	struct xe_gt *gt = pc_to_gt(pc);
382 	u32 reg;
383 
384 	if (xe_gt_is_media_type(gt))
385 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
386 	else
387 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
388 
389 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
390 }
391 
392 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
393 {
394 	struct xe_gt *gt = pc_to_gt(pc);
395 	struct xe_device *xe = gt_to_xe(gt);
396 	u32 reg;
397 
398 	/*
399 	 * For PVC we still need to use fused RP0 as the approximation for RPa
400 	 * For other platforms than PVC we get the resolved RPa directly from
401 	 * PCODE at a different register
402 	 */
403 	if (xe->info.platform == XE_PVC) {
404 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
405 		pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
406 	} else {
407 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
408 		pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
409 	}
410 }
411 
412 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
413 {
414 	struct xe_gt *gt = pc_to_gt(pc);
415 	struct xe_device *xe = gt_to_xe(gt);
416 	u32 reg;
417 
418 	/*
419 	 * For PVC we still need to use fused RP1 as the approximation for RPe
420 	 * For other platforms than PVC we get the resolved RPe directly from
421 	 * PCODE at a different register
422 	 */
423 	if (xe->info.platform == XE_PVC) {
424 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
425 		pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
426 	} else {
427 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
428 		pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
429 	}
430 }
431 
432 static void pc_update_rp_values(struct xe_guc_pc *pc)
433 {
434 	struct xe_gt *gt = pc_to_gt(pc);
435 	struct xe_device *xe = gt_to_xe(gt);
436 
437 	if (GRAPHICS_VERx100(xe) >= 1270) {
438 		mtl_update_rpa_value(pc);
439 		mtl_update_rpe_value(pc);
440 	} else {
441 		tgl_update_rpa_value(pc);
442 		tgl_update_rpe_value(pc);
443 	}
444 
445 	/*
446 	 * RPe is decided at runtime by PCODE. In the rare case where that's
447 	 * smaller than the fused min, we will trust the PCODE and use that
448 	 * as our minimum one.
449 	 */
450 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
451 }
452 
453 /**
454  * xe_guc_pc_get_act_freq - Get Actual running frequency
455  * @pc: The GuC PC
456  *
457  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
458  */
459 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
460 {
461 	struct xe_gt *gt = pc_to_gt(pc);
462 	struct xe_device *xe = gt_to_xe(gt);
463 	u32 freq;
464 
465 	/* When in RC6, actual frequency reported will be 0. */
466 	if (GRAPHICS_VERx100(xe) >= 1270) {
467 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
468 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
469 	} else {
470 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
471 		freq = REG_FIELD_GET(CAGF_MASK, freq);
472 	}
473 
474 	freq = decode_freq(freq);
475 
476 	return freq;
477 }
478 
479 static u32 get_cur_freq(struct xe_gt *gt)
480 {
481 	u32 freq;
482 
483 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
484 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
485 	return decode_freq(freq);
486 }
487 
488 /**
489  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
490  * @pc: The GuC PC
491  *
492  * Returns: the requested frequency for that GT instance
493  */
494 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
495 {
496 	struct xe_gt *gt = pc_to_gt(pc);
497 
498 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
499 
500 	return get_cur_freq(gt);
501 }
502 
503 /**
504  * xe_guc_pc_get_cur_freq - Get Current requested frequency
505  * @pc: The GuC PC
506  * @freq: A pointer to a u32 where the freq value will be returned
507  *
508  * Returns: 0 on success,
509  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
510  */
511 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
512 {
513 	struct xe_gt *gt = pc_to_gt(pc);
514 	unsigned int fw_ref;
515 
516 	/*
517 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
518 	 * Block RC6 for a more reliable read.
519 	 */
520 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
521 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
522 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
523 		return -ETIMEDOUT;
524 	}
525 
526 	*freq = get_cur_freq(gt);
527 
528 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
529 	return 0;
530 }
531 
532 /**
533  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
534  * @pc: The GuC PC
535  *
536  * Returns: RP0 freq.
537  */
538 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
539 {
540 	return pc->rp0_freq;
541 }
542 
543 /**
544  * xe_guc_pc_get_rpa_freq - Get the RPa freq
545  * @pc: The GuC PC
546  *
547  * Returns: RPa freq.
548  */
549 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
550 {
551 	pc_update_rp_values(pc);
552 
553 	return pc->rpa_freq;
554 }
555 
556 /**
557  * xe_guc_pc_get_rpe_freq - Get the RPe freq
558  * @pc: The GuC PC
559  *
560  * Returns: RPe freq.
561  */
562 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
563 {
564 	pc_update_rp_values(pc);
565 
566 	return pc->rpe_freq;
567 }
568 
569 /**
570  * xe_guc_pc_get_rpn_freq - Get the RPn freq
571  * @pc: The GuC PC
572  *
573  * Returns: RPn freq.
574  */
575 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
576 {
577 	return pc->rpn_freq;
578 }
579 
580 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
581 {
582 	int ret;
583 
584 	lockdep_assert_held(&pc->freq_lock);
585 
586 	/* Might be in the middle of a gt reset */
587 	if (!pc->freq_ready)
588 		return -EAGAIN;
589 
590 	ret = pc_action_query_task_state(pc);
591 	if (ret)
592 		return ret;
593 
594 	*freq = pc_get_min_freq(pc);
595 
596 	return 0;
597 }
598 
599 /**
600  * xe_guc_pc_get_min_freq - Get the min operational frequency
601  * @pc: The GuC PC
602  * @freq: A pointer to a u32 where the freq value will be returned
603  *
604  * Returns: 0 on success,
605  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
606  */
607 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
608 {
609 	guard(mutex)(&pc->freq_lock);
610 
611 	return xe_guc_pc_get_min_freq_locked(pc, freq);
612 }
613 
614 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
615 {
616 	int ret;
617 
618 	lockdep_assert_held(&pc->freq_lock);
619 
620 	/* Might be in the middle of a gt reset */
621 	if (!pc->freq_ready)
622 		return -EAGAIN;
623 
624 	ret = pc_set_min_freq(pc, freq);
625 	if (ret)
626 		return ret;
627 
628 	pc->user_requested_min = freq;
629 
630 	return 0;
631 }
632 
633 /**
634  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
635  * @pc: The GuC PC
636  * @freq: The selected minimal frequency
637  *
638  * Returns: 0 on success,
639  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
640  *         -EINVAL if value out of bounds.
641  */
642 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
643 {
644 	guard(mutex)(&pc->freq_lock);
645 
646 	return xe_guc_pc_set_min_freq_locked(pc, freq);
647 }
648 
649 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
650 {
651 	int ret;
652 
653 	lockdep_assert_held(&pc->freq_lock);
654 
655 	/* Might be in the middle of a gt reset */
656 	if (!pc->freq_ready)
657 		return -EAGAIN;
658 
659 	ret = pc_action_query_task_state(pc);
660 	if (ret)
661 		return ret;
662 
663 	*freq = pc_get_max_freq(pc);
664 
665 	return 0;
666 }
667 
668 /**
669  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
670  * @pc: The GuC PC
671  * @freq: A pointer to a u32 where the freq value will be returned
672  *
673  * Returns: 0 on success,
674  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
675  */
676 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
677 {
678 	guard(mutex)(&pc->freq_lock);
679 
680 	return xe_guc_pc_get_max_freq_locked(pc, freq);
681 }
682 
683 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
684 {
685 	int ret;
686 
687 	lockdep_assert_held(&pc->freq_lock);
688 
689 	/* Might be in the middle of a gt reset */
690 	if (!pc->freq_ready)
691 		return -EAGAIN;
692 
693 	ret = pc_set_max_freq(pc, freq);
694 	if (ret)
695 		return ret;
696 
697 	pc->user_requested_max = freq;
698 
699 	return 0;
700 }
701 
702 /**
703  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
704  * @pc: The GuC PC
705  * @freq: The selected maximum frequency value
706  *
707  * Returns: 0 on success,
708  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
709  *         -EINVAL if value out of bounds.
710  */
711 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
712 {
713 	if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
714 		if (wait_for_flush_complete(pc) != 0)
715 			return -EAGAIN;
716 	}
717 
718 	guard(mutex)(&pc->freq_lock);
719 
720 	return xe_guc_pc_set_max_freq_locked(pc, freq);
721 }
722 
723 /**
724  * xe_guc_pc_c_status - get the current GT C state
725  * @pc: XE_GuC_PC instance
726  */
727 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
728 {
729 	struct xe_gt *gt = pc_to_gt(pc);
730 	u32 reg, gt_c_state;
731 
732 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
733 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
734 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
735 	} else {
736 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
737 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
738 	}
739 
740 	switch (gt_c_state) {
741 	case GT_C6:
742 		return GT_IDLE_C6;
743 	case GT_C0:
744 		return GT_IDLE_C0;
745 	default:
746 		return GT_IDLE_UNKNOWN;
747 	}
748 }
749 
750 /**
751  * xe_guc_pc_rc6_residency - rc6 residency counter
752  * @pc: Xe_GuC_PC instance
753  */
754 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
755 {
756 	struct xe_gt *gt = pc_to_gt(pc);
757 	u32 reg;
758 
759 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
760 
761 	return reg;
762 }
763 
764 /**
765  * xe_guc_pc_mc6_residency - mc6 residency counter
766  * @pc: Xe_GuC_PC instance
767  */
768 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
769 {
770 	struct xe_gt *gt = pc_to_gt(pc);
771 	u64 reg;
772 
773 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
774 
775 	return reg;
776 }
777 
778 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
779 {
780 	struct xe_gt *gt = pc_to_gt(pc);
781 	u32 reg;
782 
783 	xe_device_assert_mem_access(pc_to_xe(pc));
784 
785 	if (xe_gt_is_media_type(gt))
786 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
787 	else
788 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
789 
790 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
791 
792 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
793 }
794 
795 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
796 {
797 	struct xe_gt *gt = pc_to_gt(pc);
798 	struct xe_device *xe = gt_to_xe(gt);
799 	u32 reg;
800 
801 	xe_device_assert_mem_access(pc_to_xe(pc));
802 
803 	if (xe->info.platform == XE_PVC)
804 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
805 	else
806 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
807 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
808 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
809 }
810 
811 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
812 {
813 	struct xe_gt *gt = pc_to_gt(pc);
814 	struct xe_device *xe = gt_to_xe(gt);
815 
816 	if (GRAPHICS_VERx100(xe) >= 1270)
817 		mtl_init_fused_rp_values(pc);
818 	else
819 		tgl_init_fused_rp_values(pc);
820 }
821 
822 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
823 {
824 	struct xe_gt *gt = pc_to_gt(pc);
825 
826 	if (XE_GT_WA(gt, 22019338487)) {
827 		if (xe_gt_is_media_type(gt))
828 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
829 		else
830 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
831 	} else {
832 		return pc->rp0_freq;
833 	}
834 }
835 
836 /**
837  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
838  * frequency to allow faster GuC load times
839  * @pc: Xe_GuC_PC instance
840  */
841 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
842 {
843 	struct xe_gt *gt = pc_to_gt(pc);
844 
845 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
846 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
847 }
848 
849 /**
850  * xe_guc_pc_init_early - Initialize RPx values
851  * @pc: Xe_GuC_PC instance
852  */
853 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
854 {
855 	struct xe_gt *gt = pc_to_gt(pc);
856 
857 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
858 	pc_init_fused_rp_values(pc);
859 }
860 
861 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
862 {
863 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
864 	int ret;
865 
866 	lockdep_assert_held(&pc->freq_lock);
867 
868 	ret = pc_action_query_task_state(pc);
869 	if (ret)
870 		goto out;
871 
872 	/*
873 	 * GuC defaults to some RPmax that is not actually achievable without
874 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
875 	 * regular maximum
876 	 */
877 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
878 		ret = pc_set_max_freq(pc, pc->rp0_freq);
879 		if (ret)
880 			goto out;
881 	}
882 
883 	/*
884 	 * Same thing happens for Server platforms where min is listed as
885 	 * RPMax
886 	 */
887 	if (pc_get_min_freq(pc) > pc->rp0_freq)
888 		ret = pc_set_min_freq(pc, pc->rp0_freq);
889 
890 	if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890))
891 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
892 
893 out:
894 	return ret;
895 }
896 
897 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
898 {
899 	int ret = 0;
900 
901 	lockdep_assert_held(&pc->freq_lock);
902 
903 	if (pc->user_requested_min != 0) {
904 		ret = pc_set_min_freq(pc, pc->user_requested_min);
905 		if (ret)
906 			return ret;
907 	}
908 
909 	if (pc->user_requested_max != 0) {
910 		ret = pc_set_max_freq(pc, pc->user_requested_max);
911 		if (ret)
912 			return ret;
913 	}
914 
915 	return ret;
916 }
917 
918 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
919 {
920 	struct xe_gt *gt = pc_to_gt(pc);
921 
922 	return  XE_GT_WA(gt, 22019338487) &&
923 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
924 }
925 
926 /**
927  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
928  * @pc: the xe_guc_pc object
929  *
930  * As per the WA, reduce max GT frequency during L2 cache flush
931  */
932 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
933 {
934 	struct xe_gt *gt = pc_to_gt(pc);
935 	u32 max_freq;
936 	int ret;
937 
938 	if (!needs_flush_freq_limit(pc))
939 		return;
940 
941 	guard(mutex)(&pc->freq_lock);
942 
943 	ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
944 	if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
945 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
946 		if (ret) {
947 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
948 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
949 			return;
950 		}
951 
952 		atomic_set(&pc->flush_freq_limit, 1);
953 
954 		/*
955 		 * If user has previously changed max freq, stash that value to
956 		 * restore later, otherwise use the current max. New user
957 		 * requests wait on flush.
958 		 */
959 		if (pc->user_requested_max != 0)
960 			pc->stashed_max_freq = pc->user_requested_max;
961 		else
962 			pc->stashed_max_freq = max_freq;
963 	}
964 
965 	/*
966 	 * Wait for actual freq to go below the flush cap: even if the previous
967 	 * max was below cap, the current one might still be above it
968 	 */
969 	ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
970 	if (ret)
971 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
972 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
973 }
974 
975 /**
976  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
977  * @pc: the xe_guc_pc object
978  *
979  * Retrieve the previous GT max frequency value.
980  */
981 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
982 {
983 	struct xe_gt *gt = pc_to_gt(pc);
984 	int ret = 0;
985 
986 	if (!needs_flush_freq_limit(pc))
987 		return;
988 
989 	if (!atomic_read(&pc->flush_freq_limit))
990 		return;
991 
992 	mutex_lock(&pc->freq_lock);
993 
994 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
995 	if (ret)
996 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
997 			       pc->stashed_max_freq, ret);
998 
999 	atomic_set(&pc->flush_freq_limit, 0);
1000 	mutex_unlock(&pc->freq_lock);
1001 	wake_up_var(&pc->flush_freq_limit);
1002 }
1003 
1004 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
1005 {
1006 	int ret;
1007 
1008 	if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
1009 		return 0;
1010 
1011 	guard(mutex)(&pc->freq_lock);
1012 
1013 	/*
1014 	 * Get updated min/max and stash them.
1015 	 */
1016 	ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
1017 	if (!ret)
1018 		ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
1019 	if (ret)
1020 		return ret;
1021 
1022 	/*
1023 	 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
1024 	 */
1025 	ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
1026 	if (!ret)
1027 		ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
1028 
1029 	return ret;
1030 }
1031 
1032 /**
1033  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
1034  * @pc: The GuC PC
1035  *
1036  * Returns: 0 on success,
1037  *          error code on failure
1038  */
1039 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
1040 {
1041 	int ret = 0;
1042 
1043 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
1044 		return 0;
1045 
1046 	mutex_lock(&pc->freq_lock);
1047 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
1048 	if (!ret)
1049 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
1050 	mutex_unlock(&pc->freq_lock);
1051 
1052 	return ret;
1053 }
1054 
1055 /**
1056  * xe_guc_pc_gucrc_disable - Disable GuC RC
1057  * @pc: Xe_GuC_PC instance
1058  *
1059  * Disables GuC RC by taking control of RC6 back from GuC.
1060  *
1061  * Return: 0 on success, negative error code on error.
1062  */
1063 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
1064 {
1065 	struct xe_device *xe = pc_to_xe(pc);
1066 	struct xe_gt *gt = pc_to_gt(pc);
1067 	int ret = 0;
1068 
1069 	if (xe->info.skip_guc_pc)
1070 		return 0;
1071 
1072 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
1073 	if (ret)
1074 		return ret;
1075 
1076 	return xe_gt_idle_disable_c6(gt);
1077 }
1078 
1079 /**
1080  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
1081  * @pc: Xe_GuC_PC instance
1082  * @mode: new value of the mode.
1083  *
1084  * Return: 0 on success, negative error code on error
1085  */
1086 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
1087 {
1088 	int ret;
1089 
1090 	xe_pm_runtime_get(pc_to_xe(pc));
1091 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
1092 	xe_pm_runtime_put(pc_to_xe(pc));
1093 
1094 	return ret;
1095 }
1096 
1097 /**
1098  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
1099  * @pc: Xe_GuC_PC instance
1100  *
1101  * Return: 0 on success, negative error code on error
1102  */
1103 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
1104 {
1105 	int ret;
1106 
1107 	xe_pm_runtime_get(pc_to_xe(pc));
1108 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
1109 	xe_pm_runtime_put(pc_to_xe(pc));
1110 
1111 	return ret;
1112 }
1113 
1114 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1115 {
1116 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1117 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1118 
1119 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1120 }
1121 
1122 static int pc_init_freqs(struct xe_guc_pc *pc)
1123 {
1124 	int ret;
1125 
1126 	mutex_lock(&pc->freq_lock);
1127 
1128 	ret = pc_adjust_freq_bounds(pc);
1129 	if (ret)
1130 		goto out;
1131 
1132 	ret = pc_adjust_requested_freq(pc);
1133 	if (ret)
1134 		goto out;
1135 
1136 	pc_update_rp_values(pc);
1137 
1138 	pc_init_pcode_freq(pc);
1139 
1140 	/*
1141 	 * The frequencies are really ready for use only after the user
1142 	 * requested ones got restored.
1143 	 */
1144 	pc->freq_ready = true;
1145 
1146 out:
1147 	mutex_unlock(&pc->freq_lock);
1148 	return ret;
1149 }
1150 
1151 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1152 {
1153 	int ret = 0;
1154 
1155 	ret = pc_action_set_param(pc,
1156 				  SLPC_PARAM_STRATEGIES,
1157 				  val);
1158 
1159 	return ret;
1160 }
1161 
1162 static const char *power_profile_to_string(struct xe_guc_pc *pc)
1163 {
1164 	switch (pc->power_profile) {
1165 	case SLPC_POWER_PROFILE_BASE:
1166 		return "base";
1167 	case SLPC_POWER_PROFILE_POWER_SAVING:
1168 		return "power_saving";
1169 	default:
1170 		return "invalid";
1171 	}
1172 }
1173 
1174 void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
1175 {
1176 	switch (pc->power_profile) {
1177 	case SLPC_POWER_PROFILE_BASE:
1178 		sprintf(profile, "[%s]    %s\n", "base", "power_saving");
1179 		break;
1180 	case SLPC_POWER_PROFILE_POWER_SAVING:
1181 		sprintf(profile, "%s    [%s]\n", "base", "power_saving");
1182 		break;
1183 	default:
1184 		sprintf(profile, "invalid");
1185 	}
1186 }
1187 
1188 int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
1189 {
1190 	int ret = 0;
1191 	u32 val;
1192 
1193 	if (strncmp("base", buf, strlen("base")) == 0)
1194 		val = SLPC_POWER_PROFILE_BASE;
1195 	else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
1196 		val = SLPC_POWER_PROFILE_POWER_SAVING;
1197 	else
1198 		return -EINVAL;
1199 
1200 	guard(mutex)(&pc->freq_lock);
1201 	xe_pm_runtime_get_noresume(pc_to_xe(pc));
1202 
1203 	ret = pc_action_set_param(pc,
1204 				  SLPC_PARAM_POWER_PROFILE,
1205 				  val);
1206 	if (ret)
1207 		xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
1208 			       val, ERR_PTR(ret));
1209 	else
1210 		pc->power_profile = val;
1211 
1212 	xe_pm_runtime_put(pc_to_xe(pc));
1213 
1214 	return ret;
1215 }
1216 
1217 /**
1218  * xe_guc_pc_start - Start GuC's Power Conservation component
1219  * @pc: Xe_GuC_PC instance
1220  */
1221 int xe_guc_pc_start(struct xe_guc_pc *pc)
1222 {
1223 	struct xe_device *xe = pc_to_xe(pc);
1224 	struct xe_gt *gt = pc_to_gt(pc);
1225 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1226 	unsigned int fw_ref;
1227 	ktime_t earlier;
1228 	int ret;
1229 
1230 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1231 
1232 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1233 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1234 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1235 		return -ETIMEDOUT;
1236 	}
1237 
1238 	if (xe->info.skip_guc_pc) {
1239 		if (xe->info.platform != XE_PVC)
1240 			xe_gt_idle_enable_c6(gt);
1241 
1242 		/* Request max possible since dynamic freq mgmt is not enabled */
1243 		pc_set_cur_freq(pc, UINT_MAX);
1244 
1245 		ret = 0;
1246 		goto out;
1247 	}
1248 
1249 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1250 	slpc_shared_data_write(pc, header.size, size);
1251 
1252 	earlier = ktime_get();
1253 	ret = pc_action_reset(pc);
1254 	if (ret)
1255 		goto out;
1256 
1257 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1258 			      SLPC_RESET_TIMEOUT_MS)) {
1259 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1260 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1261 			   xe_gt_throttle_get_limit_reasons(gt));
1262 
1263 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1264 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1265 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1266 			ret = -EIO;
1267 			goto out;
1268 		}
1269 
1270 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1271 			   ktime_ms_delta(ktime_get(), earlier));
1272 	}
1273 
1274 	ret = pc_init_freqs(pc);
1275 	if (ret)
1276 		goto out;
1277 
1278 	ret = pc_set_mert_freq_cap(pc);
1279 	if (ret)
1280 		goto out;
1281 
1282 	if (xe->info.platform == XE_PVC) {
1283 		xe_guc_pc_gucrc_disable(pc);
1284 		ret = 0;
1285 		goto out;
1286 	}
1287 
1288 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1289 	if (ret)
1290 		goto out;
1291 
1292 	/* Enable SLPC Optimized Strategy for compute */
1293 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1294 
1295 	/* Set cached value of power_profile */
1296 	ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
1297 	if (unlikely(ret))
1298 		xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
1299 
1300 out:
1301 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1302 	return ret;
1303 }
1304 
1305 /**
1306  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1307  * @pc: Xe_GuC_PC instance
1308  */
1309 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1310 {
1311 	struct xe_device *xe = pc_to_xe(pc);
1312 
1313 	if (xe->info.skip_guc_pc) {
1314 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1315 		return 0;
1316 	}
1317 
1318 	mutex_lock(&pc->freq_lock);
1319 	pc->freq_ready = false;
1320 	mutex_unlock(&pc->freq_lock);
1321 
1322 	return 0;
1323 }
1324 
1325 /**
1326  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1327  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1328  */
1329 static void xe_guc_pc_fini_hw(void *arg)
1330 {
1331 	struct xe_guc_pc *pc = arg;
1332 	struct xe_device *xe = pc_to_xe(pc);
1333 	unsigned int fw_ref;
1334 
1335 	if (xe_device_wedged(xe))
1336 		return;
1337 
1338 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1339 	xe_guc_pc_gucrc_disable(pc);
1340 	XE_WARN_ON(xe_guc_pc_stop(pc));
1341 
1342 	/* Bind requested freq to mert_freq_cap before unload */
1343 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1344 
1345 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1346 }
1347 
1348 /**
1349  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1350  * @pc: Xe_GuC_PC instance
1351  */
1352 int xe_guc_pc_init(struct xe_guc_pc *pc)
1353 {
1354 	struct xe_gt *gt = pc_to_gt(pc);
1355 	struct xe_tile *tile = gt_to_tile(gt);
1356 	struct xe_device *xe = gt_to_xe(gt);
1357 	struct xe_bo *bo;
1358 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1359 	int err;
1360 
1361 	if (xe->info.skip_guc_pc)
1362 		return 0;
1363 
1364 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1365 	if (err)
1366 		return err;
1367 
1368 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1369 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1370 					  XE_BO_FLAG_GGTT |
1371 					  XE_BO_FLAG_GGTT_INVALIDATE |
1372 					  XE_BO_FLAG_PINNED_NORESTORE);
1373 	if (IS_ERR(bo))
1374 		return PTR_ERR(bo);
1375 
1376 	pc->bo = bo;
1377 
1378 	pc->power_profile = SLPC_POWER_PROFILE_BASE;
1379 
1380 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1381 }
1382 
1383 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1384 {
1385 	switch (slpc_shared_data_read(pc, header.global_state)) {
1386 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1387 		return "not running";
1388 	case SLPC_GLOBAL_STATE_INITIALIZING:
1389 		return "initializing";
1390 	case SLPC_GLOBAL_STATE_RESETTING:
1391 		return "resetting";
1392 	case SLPC_GLOBAL_STATE_RUNNING:
1393 		return "running";
1394 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1395 		return "shutting down";
1396 	case SLPC_GLOBAL_STATE_ERROR:
1397 		return "error";
1398 	default:
1399 		return "unknown";
1400 	}
1401 }
1402 
1403 /**
1404  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1405  * @pc: Xe_GuC_PC instance
1406  * @p: drm_printer
1407  */
1408 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1409 {
1410 	drm_printf(p, "SLPC Shared Data Header:\n");
1411 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1412 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1413 
1414 	if (pc_action_query_task_state(pc))
1415 		return;
1416 
1417 	drm_printf(p, "\nSLPC Tasks Status:\n");
1418 	drm_printf(p, "\tGTPERF enabled: %s\n",
1419 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1420 			      SLPC_GTPERF_TASK_ENABLED));
1421 	drm_printf(p, "\tDCC enabled: %s\n",
1422 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1423 			      SLPC_DCC_TASK_ENABLED));
1424 	drm_printf(p, "\tDCC in use: %s\n",
1425 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1426 			      SLPC_IN_DCC));
1427 	drm_printf(p, "\tBalancer enabled: %s\n",
1428 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1429 			      SLPC_BALANCER_ENABLED));
1430 	drm_printf(p, "\tIBC enabled: %s\n",
1431 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1432 			      SLPC_IBC_TASK_ENABLED));
1433 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1434 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1435 			      SLPC_BALANCER_IA_LMT_ENABLED));
1436 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1437 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1438 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1439 }
1440