xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/jiffies.h>
11 #include <linux/ktime.h>
12 #include <linux/wait_bit.h>
13 
14 #include <drm/drm_managed.h>
15 #include <drm/drm_print.h>
16 #include <generated/xe_wa_oob.h>
17 
18 #include "abi/guc_actions_slpc_abi.h"
19 #include "regs/xe_gt_regs.h"
20 #include "regs/xe_regs.h"
21 #include "xe_bo.h"
22 #include "xe_device.h"
23 #include "xe_force_wake.h"
24 #include "xe_gt.h"
25 #include "xe_gt_idle.h"
26 #include "xe_gt_printk.h"
27 #include "xe_gt_throttle.h"
28 #include "xe_gt_types.h"
29 #include "xe_guc.h"
30 #include "xe_guc_ct.h"
31 #include "xe_map.h"
32 #include "xe_mmio.h"
33 #include "xe_pcode.h"
34 #include "xe_pm.h"
35 #include "xe_sriov.h"
36 #include "xe_wa.h"
37 
38 #define MCHBAR_MIRROR_BASE_SNB	0x140000
39 
40 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
41 #define   RP0_MASK		REG_GENMASK(7, 0)
42 #define   RP1_MASK		REG_GENMASK(15, 8)
43 #define   RPN_MASK		REG_GENMASK(23, 16)
44 
45 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
46 #define   RPE_MASK		REG_GENMASK(15, 8)
47 #define   RPA_MASK		REG_GENMASK(31, 16)
48 
49 #define GT_PERF_STATUS		XE_REG(0x1381b4)
50 #define   CAGF_MASK	REG_GENMASK(19, 11)
51 
52 #define GT_FREQUENCY_MULTIPLIER	50
53 #define GT_FREQUENCY_SCALER	3
54 
55 #define LNL_MERT_FREQ_CAP	800
56 #define BMG_MERT_FREQ_CAP	2133
57 #define BMG_MIN_FREQ		1200
58 #define BMG_MERT_FLUSH_FREQ_CAP	2600
59 
60 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
61 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
62 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
63 
64 /**
65  * DOC: GuC Power Conservation (PC)
66  *
67  * GuC Power Conservation (PC) supports multiple features for the most
68  * efficient and performing use of the GT when GuC submission is enabled,
69  * including frequency management, Render-C states management, and various
70  * algorithms for power balancing.
71  *
72  * Single Loop Power Conservation (SLPC) is the name given to the suite of
73  * connected power conservation features in the GuC firmware. The firmware
74  * exposes a programming interface to the host for the control of SLPC.
75  *
76  * Frequency management:
77  * =====================
78  *
79  * Xe driver enables SLPC with all of its defaults features and frequency
80  * selection, which varies per platform.
81  *
82  * Power profiles add another level of control to SLPC. When power saving
83  * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
84  * thus saving power. Base profile is default and ensures balanced performance
85  * for any workload.
86  *
87  * Render-C States:
88  * ================
89  *
90  * Render-C states is also a GuC PC feature that is now enabled in Xe for
91  * all platforms.
92  *
93  */
94 
95 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
96 {
97 	return container_of(pc, struct xe_guc, pc);
98 }
99 
100 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
101 {
102 	return &pc_to_guc(pc)->ct;
103 }
104 
105 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
106 {
107 	return guc_to_gt(pc_to_guc(pc));
108 }
109 
110 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
111 {
112 	return guc_to_xe(pc_to_guc(pc));
113 }
114 
115 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
116 {
117 	return &pc->bo->vmap;
118 }
119 
120 #define slpc_shared_data_read(pc_, field_) \
121 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
122 			struct slpc_shared_data, field_)
123 
124 #define slpc_shared_data_write(pc_, field_, val_) \
125 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
126 			struct slpc_shared_data, field_, val_)
127 
128 #define SLPC_EVENT(id, count) \
129 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
130 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
131 
132 static int wait_for_pc_state(struct xe_guc_pc *pc,
133 			     enum slpc_global_state state,
134 			     int timeout_ms)
135 {
136 	int timeout_us = 1000 * timeout_ms;
137 	int slept, wait = 10;
138 
139 	xe_device_assert_mem_access(pc_to_xe(pc));
140 
141 	for (slept = 0; slept < timeout_us;) {
142 		if (slpc_shared_data_read(pc, header.global_state) == state)
143 			return 0;
144 
145 		usleep_range(wait, wait << 1);
146 		slept += wait;
147 		wait <<= 1;
148 		if (slept + wait > timeout_us)
149 			wait = timeout_us - slept;
150 	}
151 
152 	return -ETIMEDOUT;
153 }
154 
155 static int wait_for_flush_complete(struct xe_guc_pc *pc)
156 {
157 	const unsigned long timeout = msecs_to_jiffies(30);
158 
159 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
160 				    !atomic_read(&pc->flush_freq_limit),
161 				    timeout))
162 		return -ETIMEDOUT;
163 
164 	return 0;
165 }
166 
167 static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
168 {
169 	int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
170 	int slept, wait = 10;
171 
172 	for (slept = 0; slept < timeout_us;) {
173 		if (xe_guc_pc_get_act_freq(pc) <= freq)
174 			return 0;
175 
176 		usleep_range(wait, wait << 1);
177 		slept += wait;
178 		wait <<= 1;
179 		if (slept + wait > timeout_us)
180 			wait = timeout_us - slept;
181 	}
182 
183 	return -ETIMEDOUT;
184 }
185 static int pc_action_reset(struct xe_guc_pc *pc)
186 {
187 	struct xe_guc_ct *ct = pc_to_ct(pc);
188 	u32 action[] = {
189 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
190 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
191 		xe_bo_ggtt_addr(pc->bo),
192 		0,
193 	};
194 	int ret;
195 
196 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
197 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
198 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
199 			  ERR_PTR(ret));
200 
201 	return ret;
202 }
203 
204 static int pc_action_query_task_state(struct xe_guc_pc *pc)
205 {
206 	struct xe_guc_ct *ct = pc_to_ct(pc);
207 	u32 action[] = {
208 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
209 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
210 		xe_bo_ggtt_addr(pc->bo),
211 		0,
212 	};
213 	int ret;
214 
215 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
216 			      SLPC_RESET_TIMEOUT_MS))
217 		return -EAGAIN;
218 
219 	/* Blocking here to ensure the results are ready before reading them */
220 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
221 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
222 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
223 			  ERR_PTR(ret));
224 
225 	return ret;
226 }
227 
228 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
229 {
230 	struct xe_guc_ct *ct = pc_to_ct(pc);
231 	u32 action[] = {
232 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
233 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
234 		id,
235 		value,
236 	};
237 	int ret;
238 
239 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
240 			      SLPC_RESET_TIMEOUT_MS))
241 		return -EAGAIN;
242 
243 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
244 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
245 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
246 			  id, value, ERR_PTR(ret));
247 
248 	return ret;
249 }
250 
251 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
252 {
253 	u32 action[] = {
254 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
255 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
256 		id,
257 	};
258 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
259 	int ret;
260 
261 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
262 			      SLPC_RESET_TIMEOUT_MS))
263 		return -EAGAIN;
264 
265 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
266 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
267 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
268 			  ERR_PTR(ret));
269 
270 	return ret;
271 }
272 
273 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
274 {
275 	struct xe_guc_ct *ct = pc_to_ct(pc);
276 	u32 action[] = {
277 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
278 		mode,
279 	};
280 	int ret;
281 
282 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
283 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
284 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
285 			  mode, ERR_PTR(ret));
286 	return ret;
287 }
288 
289 static u32 decode_freq(u32 raw)
290 {
291 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
292 				 GT_FREQUENCY_SCALER);
293 }
294 
295 static u32 encode_freq(u32 freq)
296 {
297 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
298 				 GT_FREQUENCY_MULTIPLIER);
299 }
300 
301 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
302 {
303 	u32 freq;
304 
305 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
306 			 slpc_shared_data_read(pc, task_state_data.freq));
307 
308 	return decode_freq(freq);
309 }
310 
311 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
312 {
313 	struct xe_gt *gt = pc_to_gt(pc);
314 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
315 
316 	/* Allow/Disallow punit to process software freq requests */
317 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
318 }
319 
320 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
321 {
322 	struct xe_gt *gt = pc_to_gt(pc);
323 	u32 rpnswreq;
324 
325 	pc_set_manual_rp_ctrl(pc, true);
326 
327 	/* Req freq is in units of 16.66 Mhz */
328 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
329 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
330 
331 	/* Sleep for a small time to allow pcode to respond */
332 	usleep_range(100, 300);
333 
334 	pc_set_manual_rp_ctrl(pc, false);
335 }
336 
337 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
338 {
339 	/*
340 	 * Let's only check for the rpn-rp0 range. If max < min,
341 	 * min becomes a fixed request.
342 	 */
343 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
344 		return -EINVAL;
345 
346 	/*
347 	 * GuC policy is to elevate minimum frequency to the efficient levels
348 	 * Our goal is to have the admin choices respected.
349 	 */
350 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
351 			    freq < pc->rpe_freq);
352 
353 	return pc_action_set_param(pc,
354 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
355 				   freq);
356 }
357 
358 static int pc_get_max_freq(struct xe_guc_pc *pc)
359 {
360 	u32 freq;
361 
362 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
363 			 slpc_shared_data_read(pc, task_state_data.freq));
364 
365 	return decode_freq(freq);
366 }
367 
368 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
369 {
370 	/*
371 	 * Let's only check for the rpn-rp0 range. If max < min,
372 	 * min becomes a fixed request.
373 	 * Also, overclocking is not supported.
374 	 */
375 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
376 		return -EINVAL;
377 
378 	return pc_action_set_param(pc,
379 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
380 				   freq);
381 }
382 
383 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
384 {
385 	struct xe_gt *gt = pc_to_gt(pc);
386 	u32 reg;
387 
388 	if (xe_gt_is_media_type(gt))
389 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
390 	else
391 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
392 
393 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
394 }
395 
396 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
397 {
398 	struct xe_gt *gt = pc_to_gt(pc);
399 	u32 reg;
400 
401 	if (xe_gt_is_media_type(gt))
402 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
403 	else
404 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
405 
406 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
407 }
408 
409 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
410 {
411 	struct xe_gt *gt = pc_to_gt(pc);
412 	struct xe_device *xe = gt_to_xe(gt);
413 	u32 reg;
414 
415 	/*
416 	 * For PVC we still need to use fused RP0 as the approximation for RPa
417 	 * For other platforms than PVC we get the resolved RPa directly from
418 	 * PCODE at a different register
419 	 */
420 	if (xe->info.platform == XE_PVC) {
421 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
422 		pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
423 	} else {
424 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
425 		pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
426 	}
427 }
428 
429 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
430 {
431 	struct xe_gt *gt = pc_to_gt(pc);
432 	struct xe_device *xe = gt_to_xe(gt);
433 	u32 reg;
434 
435 	/*
436 	 * For PVC we still need to use fused RP1 as the approximation for RPe
437 	 * For other platforms than PVC we get the resolved RPe directly from
438 	 * PCODE at a different register
439 	 */
440 	if (xe->info.platform == XE_PVC) {
441 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
442 		pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
443 	} else {
444 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
445 		pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
446 	}
447 }
448 
449 static void pc_update_rp_values(struct xe_guc_pc *pc)
450 {
451 	struct xe_gt *gt = pc_to_gt(pc);
452 	struct xe_device *xe = gt_to_xe(gt);
453 
454 	if (GRAPHICS_VERx100(xe) >= 1270) {
455 		mtl_update_rpa_value(pc);
456 		mtl_update_rpe_value(pc);
457 	} else {
458 		tgl_update_rpa_value(pc);
459 		tgl_update_rpe_value(pc);
460 	}
461 
462 	/*
463 	 * RPe is decided at runtime by PCODE. In the rare case where that's
464 	 * smaller than the fused min, we will trust the PCODE and use that
465 	 * as our minimum one.
466 	 */
467 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
468 }
469 
470 /**
471  * xe_guc_pc_get_act_freq - Get Actual running frequency
472  * @pc: The GuC PC
473  *
474  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
475  */
476 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
477 {
478 	struct xe_gt *gt = pc_to_gt(pc);
479 	struct xe_device *xe = gt_to_xe(gt);
480 	u32 freq;
481 
482 	/* When in RC6, actual frequency reported will be 0. */
483 	if (GRAPHICS_VERx100(xe) >= 1270) {
484 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
485 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
486 	} else {
487 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
488 		freq = REG_FIELD_GET(CAGF_MASK, freq);
489 	}
490 
491 	freq = decode_freq(freq);
492 
493 	return freq;
494 }
495 
496 static u32 get_cur_freq(struct xe_gt *gt)
497 {
498 	u32 freq;
499 
500 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
501 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
502 	return decode_freq(freq);
503 }
504 
505 /**
506  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
507  * @pc: The GuC PC
508  *
509  * Returns: the requested frequency for that GT instance
510  */
511 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
512 {
513 	struct xe_gt *gt = pc_to_gt(pc);
514 
515 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
516 
517 	return get_cur_freq(gt);
518 }
519 
520 /**
521  * xe_guc_pc_get_cur_freq - Get Current requested frequency
522  * @pc: The GuC PC
523  * @freq: A pointer to a u32 where the freq value will be returned
524  *
525  * Returns: 0 on success,
526  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
527  */
528 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
529 {
530 	struct xe_gt *gt = pc_to_gt(pc);
531 	unsigned int fw_ref;
532 
533 	/*
534 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
535 	 * Block RC6 for a more reliable read.
536 	 */
537 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
538 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
539 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
540 		return -ETIMEDOUT;
541 	}
542 
543 	*freq = get_cur_freq(gt);
544 
545 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
546 	return 0;
547 }
548 
549 /**
550  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
551  * @pc: The GuC PC
552  *
553  * Returns: RP0 freq.
554  */
555 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
556 {
557 	return pc->rp0_freq;
558 }
559 
560 /**
561  * xe_guc_pc_get_rpa_freq - Get the RPa freq
562  * @pc: The GuC PC
563  *
564  * Returns: RPa freq.
565  */
566 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
567 {
568 	pc_update_rp_values(pc);
569 
570 	return pc->rpa_freq;
571 }
572 
573 /**
574  * xe_guc_pc_get_rpe_freq - Get the RPe freq
575  * @pc: The GuC PC
576  *
577  * Returns: RPe freq.
578  */
579 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
580 {
581 	pc_update_rp_values(pc);
582 
583 	return pc->rpe_freq;
584 }
585 
586 /**
587  * xe_guc_pc_get_rpn_freq - Get the RPn freq
588  * @pc: The GuC PC
589  *
590  * Returns: RPn freq.
591  */
592 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
593 {
594 	return pc->rpn_freq;
595 }
596 
597 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
598 {
599 	int ret;
600 
601 	lockdep_assert_held(&pc->freq_lock);
602 
603 	/* Might be in the middle of a gt reset */
604 	if (!pc->freq_ready)
605 		return -EAGAIN;
606 
607 	ret = pc_action_query_task_state(pc);
608 	if (ret)
609 		return ret;
610 
611 	*freq = pc_get_min_freq(pc);
612 
613 	return 0;
614 }
615 
616 /**
617  * xe_guc_pc_get_min_freq - Get the min operational frequency
618  * @pc: The GuC PC
619  * @freq: A pointer to a u32 where the freq value will be returned
620  *
621  * Returns: 0 on success,
622  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
623  */
624 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
625 {
626 	guard(mutex)(&pc->freq_lock);
627 
628 	return xe_guc_pc_get_min_freq_locked(pc, freq);
629 }
630 
631 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
632 {
633 	int ret;
634 
635 	lockdep_assert_held(&pc->freq_lock);
636 
637 	/* Might be in the middle of a gt reset */
638 	if (!pc->freq_ready)
639 		return -EAGAIN;
640 
641 	ret = pc_set_min_freq(pc, freq);
642 	if (ret)
643 		return ret;
644 
645 	pc->user_requested_min = freq;
646 
647 	return 0;
648 }
649 
650 /**
651  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
652  * @pc: The GuC PC
653  * @freq: The selected minimal frequency
654  *
655  * Returns: 0 on success,
656  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
657  *         -EINVAL if value out of bounds.
658  */
659 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
660 {
661 	guard(mutex)(&pc->freq_lock);
662 
663 	return xe_guc_pc_set_min_freq_locked(pc, freq);
664 }
665 
666 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
667 {
668 	int ret;
669 
670 	lockdep_assert_held(&pc->freq_lock);
671 
672 	/* Might be in the middle of a gt reset */
673 	if (!pc->freq_ready)
674 		return -EAGAIN;
675 
676 	ret = pc_action_query_task_state(pc);
677 	if (ret)
678 		return ret;
679 
680 	*freq = pc_get_max_freq(pc);
681 
682 	return 0;
683 }
684 
685 /**
686  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
687  * @pc: The GuC PC
688  * @freq: A pointer to a u32 where the freq value will be returned
689  *
690  * Returns: 0 on success,
691  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
692  */
693 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
694 {
695 	guard(mutex)(&pc->freq_lock);
696 
697 	return xe_guc_pc_get_max_freq_locked(pc, freq);
698 }
699 
700 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
701 {
702 	int ret;
703 
704 	lockdep_assert_held(&pc->freq_lock);
705 
706 	/* Might be in the middle of a gt reset */
707 	if (!pc->freq_ready)
708 		return -EAGAIN;
709 
710 	ret = pc_set_max_freq(pc, freq);
711 	if (ret)
712 		return ret;
713 
714 	pc->user_requested_max = freq;
715 
716 	return 0;
717 }
718 
719 /**
720  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
721  * @pc: The GuC PC
722  * @freq: The selected maximum frequency value
723  *
724  * Returns: 0 on success,
725  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
726  *         -EINVAL if value out of bounds.
727  */
728 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
729 {
730 	if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
731 		if (wait_for_flush_complete(pc) != 0)
732 			return -EAGAIN;
733 	}
734 
735 	guard(mutex)(&pc->freq_lock);
736 
737 	return xe_guc_pc_set_max_freq_locked(pc, freq);
738 }
739 
740 /**
741  * xe_guc_pc_c_status - get the current GT C state
742  * @pc: XE_GuC_PC instance
743  */
744 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
745 {
746 	struct xe_gt *gt = pc_to_gt(pc);
747 	u32 reg, gt_c_state;
748 
749 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
750 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
751 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
752 	} else {
753 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
754 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
755 	}
756 
757 	switch (gt_c_state) {
758 	case GT_C6:
759 		return GT_IDLE_C6;
760 	case GT_C0:
761 		return GT_IDLE_C0;
762 	default:
763 		return GT_IDLE_UNKNOWN;
764 	}
765 }
766 
767 /**
768  * xe_guc_pc_rc6_residency - rc6 residency counter
769  * @pc: Xe_GuC_PC instance
770  */
771 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
772 {
773 	struct xe_gt *gt = pc_to_gt(pc);
774 	u32 reg;
775 
776 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
777 
778 	return reg;
779 }
780 
781 /**
782  * xe_guc_pc_mc6_residency - mc6 residency counter
783  * @pc: Xe_GuC_PC instance
784  */
785 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
786 {
787 	struct xe_gt *gt = pc_to_gt(pc);
788 	u64 reg;
789 
790 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
791 
792 	return reg;
793 }
794 
795 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
796 {
797 	struct xe_gt *gt = pc_to_gt(pc);
798 	u32 reg;
799 
800 	xe_device_assert_mem_access(pc_to_xe(pc));
801 
802 	if (xe_gt_is_media_type(gt))
803 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
804 	else
805 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
806 
807 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
808 
809 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
810 }
811 
812 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
813 {
814 	struct xe_gt *gt = pc_to_gt(pc);
815 	struct xe_device *xe = gt_to_xe(gt);
816 	u32 reg;
817 
818 	xe_device_assert_mem_access(pc_to_xe(pc));
819 
820 	if (xe->info.platform == XE_PVC)
821 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
822 	else
823 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
824 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
825 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
826 }
827 
828 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
829 {
830 	struct xe_gt *gt = pc_to_gt(pc);
831 	struct xe_device *xe = gt_to_xe(gt);
832 
833 	if (GRAPHICS_VERx100(xe) >= 1270)
834 		mtl_init_fused_rp_values(pc);
835 	else
836 		tgl_init_fused_rp_values(pc);
837 }
838 
839 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
840 {
841 	struct xe_gt *gt = pc_to_gt(pc);
842 
843 	if (XE_GT_WA(gt, 22019338487)) {
844 		if (xe_gt_is_media_type(gt))
845 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
846 		else
847 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
848 	} else {
849 		return pc->rp0_freq;
850 	}
851 }
852 
853 /**
854  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
855  * frequency to allow faster GuC load times
856  * @pc: Xe_GuC_PC instance
857  */
858 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
859 {
860 	struct xe_gt *gt = pc_to_gt(pc);
861 
862 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
863 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
864 }
865 
866 /**
867  * xe_guc_pc_init_early - Initialize RPx values
868  * @pc: Xe_GuC_PC instance
869  */
870 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
871 {
872 	struct xe_gt *gt = pc_to_gt(pc);
873 
874 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
875 	pc_init_fused_rp_values(pc);
876 }
877 
878 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
879 {
880 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
881 	int ret;
882 
883 	lockdep_assert_held(&pc->freq_lock);
884 
885 	ret = pc_action_query_task_state(pc);
886 	if (ret)
887 		goto out;
888 
889 	/*
890 	 * GuC defaults to some RPmax that is not actually achievable without
891 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
892 	 * regular maximum
893 	 */
894 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
895 		ret = pc_set_max_freq(pc, pc->rp0_freq);
896 		if (ret)
897 			goto out;
898 	}
899 
900 	/*
901 	 * Same thing happens for Server platforms where min is listed as
902 	 * RPMax
903 	 */
904 	if (pc_get_min_freq(pc) > pc->rp0_freq)
905 		ret = pc_set_min_freq(pc, pc->rp0_freq);
906 
907 	if (XE_GT_WA(tile->primary_gt, 14022085890))
908 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
909 
910 out:
911 	return ret;
912 }
913 
914 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
915 {
916 	int ret = 0;
917 
918 	lockdep_assert_held(&pc->freq_lock);
919 
920 	if (pc->user_requested_min != 0) {
921 		ret = pc_set_min_freq(pc, pc->user_requested_min);
922 		if (ret)
923 			return ret;
924 	}
925 
926 	if (pc->user_requested_max != 0) {
927 		ret = pc_set_max_freq(pc, pc->user_requested_max);
928 		if (ret)
929 			return ret;
930 	}
931 
932 	return ret;
933 }
934 
935 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
936 {
937 	struct xe_gt *gt = pc_to_gt(pc);
938 
939 	return  XE_GT_WA(gt, 22019338487) &&
940 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
941 }
942 
943 /**
944  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
945  * @pc: the xe_guc_pc object
946  *
947  * As per the WA, reduce max GT frequency during L2 cache flush
948  */
949 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
950 {
951 	struct xe_gt *gt = pc_to_gt(pc);
952 	u32 max_freq;
953 	int ret;
954 
955 	if (!needs_flush_freq_limit(pc))
956 		return;
957 
958 	guard(mutex)(&pc->freq_lock);
959 
960 	ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
961 	if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
962 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
963 		if (ret) {
964 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
965 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
966 			return;
967 		}
968 
969 		atomic_set(&pc->flush_freq_limit, 1);
970 
971 		/*
972 		 * If user has previously changed max freq, stash that value to
973 		 * restore later, otherwise use the current max. New user
974 		 * requests wait on flush.
975 		 */
976 		if (pc->user_requested_max != 0)
977 			pc->stashed_max_freq = pc->user_requested_max;
978 		else
979 			pc->stashed_max_freq = max_freq;
980 	}
981 
982 	/*
983 	 * Wait for actual freq to go below the flush cap: even if the previous
984 	 * max was below cap, the current one might still be above it
985 	 */
986 	ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
987 	if (ret)
988 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
989 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
990 }
991 
992 /**
993  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
994  * @pc: the xe_guc_pc object
995  *
996  * Retrieve the previous GT max frequency value.
997  */
998 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
999 {
1000 	struct xe_gt *gt = pc_to_gt(pc);
1001 	int ret = 0;
1002 
1003 	if (!needs_flush_freq_limit(pc))
1004 		return;
1005 
1006 	if (!atomic_read(&pc->flush_freq_limit))
1007 		return;
1008 
1009 	mutex_lock(&pc->freq_lock);
1010 
1011 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
1012 	if (ret)
1013 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
1014 			       pc->stashed_max_freq, ret);
1015 
1016 	atomic_set(&pc->flush_freq_limit, 0);
1017 	mutex_unlock(&pc->freq_lock);
1018 	wake_up_var(&pc->flush_freq_limit);
1019 }
1020 
1021 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
1022 {
1023 	int ret;
1024 
1025 	if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
1026 		return 0;
1027 
1028 	guard(mutex)(&pc->freq_lock);
1029 
1030 	/*
1031 	 * Get updated min/max and stash them.
1032 	 */
1033 	ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
1034 	if (!ret)
1035 		ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
1036 	if (ret)
1037 		return ret;
1038 
1039 	/*
1040 	 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
1041 	 */
1042 	ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
1043 	if (!ret)
1044 		ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
1045 
1046 	return ret;
1047 }
1048 
1049 /**
1050  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
1051  * @pc: The GuC PC
1052  *
1053  * Returns: 0 on success,
1054  *          error code on failure
1055  */
1056 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
1057 {
1058 	int ret = 0;
1059 
1060 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
1061 		return 0;
1062 
1063 	mutex_lock(&pc->freq_lock);
1064 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
1065 	if (!ret)
1066 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
1067 	mutex_unlock(&pc->freq_lock);
1068 
1069 	return ret;
1070 }
1071 
1072 /**
1073  * xe_guc_pc_gucrc_disable - Disable GuC RC
1074  * @pc: Xe_GuC_PC instance
1075  *
1076  * Disables GuC RC by taking control of RC6 back from GuC.
1077  *
1078  * Return: 0 on success, negative error code on error.
1079  */
1080 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
1081 {
1082 	struct xe_device *xe = pc_to_xe(pc);
1083 	struct xe_gt *gt = pc_to_gt(pc);
1084 	int ret = 0;
1085 
1086 	if (xe->info.skip_guc_pc)
1087 		return 0;
1088 
1089 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
1090 	if (ret)
1091 		return ret;
1092 
1093 	return xe_gt_idle_disable_c6(gt);
1094 }
1095 
1096 /**
1097  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
1098  * @pc: Xe_GuC_PC instance
1099  * @mode: new value of the mode.
1100  *
1101  * Return: 0 on success, negative error code on error
1102  */
1103 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
1104 {
1105 	int ret;
1106 
1107 	xe_pm_runtime_get(pc_to_xe(pc));
1108 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
1109 	xe_pm_runtime_put(pc_to_xe(pc));
1110 
1111 	return ret;
1112 }
1113 
1114 /**
1115  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
1116  * @pc: Xe_GuC_PC instance
1117  *
1118  * Return: 0 on success, negative error code on error
1119  */
1120 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
1121 {
1122 	int ret;
1123 
1124 	xe_pm_runtime_get(pc_to_xe(pc));
1125 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
1126 	xe_pm_runtime_put(pc_to_xe(pc));
1127 
1128 	return ret;
1129 }
1130 
1131 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1132 {
1133 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1134 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1135 
1136 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1137 }
1138 
1139 static int pc_init_freqs(struct xe_guc_pc *pc)
1140 {
1141 	int ret;
1142 
1143 	mutex_lock(&pc->freq_lock);
1144 
1145 	ret = pc_adjust_freq_bounds(pc);
1146 	if (ret)
1147 		goto out;
1148 
1149 	ret = pc_adjust_requested_freq(pc);
1150 	if (ret)
1151 		goto out;
1152 
1153 	pc_update_rp_values(pc);
1154 
1155 	pc_init_pcode_freq(pc);
1156 
1157 	/*
1158 	 * The frequencies are really ready for use only after the user
1159 	 * requested ones got restored.
1160 	 */
1161 	pc->freq_ready = true;
1162 
1163 out:
1164 	mutex_unlock(&pc->freq_lock);
1165 	return ret;
1166 }
1167 
1168 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1169 {
1170 	int ret = 0;
1171 
1172 	ret = pc_action_set_param(pc,
1173 				  SLPC_PARAM_STRATEGIES,
1174 				  val);
1175 
1176 	return ret;
1177 }
1178 
1179 static const char *power_profile_to_string(struct xe_guc_pc *pc)
1180 {
1181 	switch (pc->power_profile) {
1182 	case SLPC_POWER_PROFILE_BASE:
1183 		return "base";
1184 	case SLPC_POWER_PROFILE_POWER_SAVING:
1185 		return "power_saving";
1186 	default:
1187 		return "invalid";
1188 	}
1189 }
1190 
1191 void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
1192 {
1193 	switch (pc->power_profile) {
1194 	case SLPC_POWER_PROFILE_BASE:
1195 		sprintf(profile, "[%s]    %s\n", "base", "power_saving");
1196 		break;
1197 	case SLPC_POWER_PROFILE_POWER_SAVING:
1198 		sprintf(profile, "%s    [%s]\n", "base", "power_saving");
1199 		break;
1200 	default:
1201 		sprintf(profile, "invalid");
1202 	}
1203 }
1204 
1205 int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
1206 {
1207 	int ret = 0;
1208 	u32 val;
1209 
1210 	if (strncmp("base", buf, strlen("base")) == 0)
1211 		val = SLPC_POWER_PROFILE_BASE;
1212 	else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
1213 		val = SLPC_POWER_PROFILE_POWER_SAVING;
1214 	else
1215 		return -EINVAL;
1216 
1217 	guard(mutex)(&pc->freq_lock);
1218 	xe_pm_runtime_get_noresume(pc_to_xe(pc));
1219 
1220 	ret = pc_action_set_param(pc,
1221 				  SLPC_PARAM_POWER_PROFILE,
1222 				  val);
1223 	if (ret)
1224 		xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
1225 			       val, ERR_PTR(ret));
1226 	else
1227 		pc->power_profile = val;
1228 
1229 	xe_pm_runtime_put(pc_to_xe(pc));
1230 
1231 	return ret;
1232 }
1233 
1234 /**
1235  * xe_guc_pc_start - Start GuC's Power Conservation component
1236  * @pc: Xe_GuC_PC instance
1237  */
1238 int xe_guc_pc_start(struct xe_guc_pc *pc)
1239 {
1240 	struct xe_device *xe = pc_to_xe(pc);
1241 	struct xe_gt *gt = pc_to_gt(pc);
1242 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1243 	unsigned int fw_ref;
1244 	ktime_t earlier;
1245 	int ret;
1246 
1247 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1248 
1249 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1250 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1251 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1252 		return -ETIMEDOUT;
1253 	}
1254 
1255 	if (xe->info.skip_guc_pc) {
1256 		if (xe->info.platform != XE_PVC)
1257 			xe_gt_idle_enable_c6(gt);
1258 
1259 		/* Request max possible since dynamic freq mgmt is not enabled */
1260 		pc_set_cur_freq(pc, UINT_MAX);
1261 
1262 		ret = 0;
1263 		goto out;
1264 	}
1265 
1266 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1267 	slpc_shared_data_write(pc, header.size, size);
1268 
1269 	earlier = ktime_get();
1270 	ret = pc_action_reset(pc);
1271 	if (ret)
1272 		goto out;
1273 
1274 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1275 			      SLPC_RESET_TIMEOUT_MS)) {
1276 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1277 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1278 			   xe_gt_throttle_get_limit_reasons(gt));
1279 
1280 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1281 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1282 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1283 			ret = -EIO;
1284 			goto out;
1285 		}
1286 
1287 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1288 			   ktime_ms_delta(ktime_get(), earlier));
1289 	}
1290 
1291 	ret = pc_init_freqs(pc);
1292 	if (ret)
1293 		goto out;
1294 
1295 	ret = pc_set_mert_freq_cap(pc);
1296 	if (ret)
1297 		goto out;
1298 
1299 	if (xe->info.platform == XE_PVC) {
1300 		xe_guc_pc_gucrc_disable(pc);
1301 		ret = 0;
1302 		goto out;
1303 	}
1304 
1305 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1306 	if (ret)
1307 		goto out;
1308 
1309 	/* Enable SLPC Optimized Strategy for compute */
1310 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1311 
1312 	/* Set cached value of power_profile */
1313 	ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
1314 	if (unlikely(ret))
1315 		xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
1316 
1317 out:
1318 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1319 	return ret;
1320 }
1321 
1322 /**
1323  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1324  * @pc: Xe_GuC_PC instance
1325  */
1326 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1327 {
1328 	struct xe_device *xe = pc_to_xe(pc);
1329 
1330 	if (xe->info.skip_guc_pc) {
1331 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1332 		return 0;
1333 	}
1334 
1335 	mutex_lock(&pc->freq_lock);
1336 	pc->freq_ready = false;
1337 	mutex_unlock(&pc->freq_lock);
1338 
1339 	return 0;
1340 }
1341 
1342 /**
1343  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1344  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1345  */
1346 static void xe_guc_pc_fini_hw(void *arg)
1347 {
1348 	struct xe_guc_pc *pc = arg;
1349 	struct xe_device *xe = pc_to_xe(pc);
1350 	unsigned int fw_ref;
1351 
1352 	if (xe_device_wedged(xe))
1353 		return;
1354 
1355 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1356 	xe_guc_pc_gucrc_disable(pc);
1357 	XE_WARN_ON(xe_guc_pc_stop(pc));
1358 
1359 	/* Bind requested freq to mert_freq_cap before unload */
1360 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1361 
1362 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1363 }
1364 
1365 /**
1366  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1367  * @pc: Xe_GuC_PC instance
1368  */
1369 int xe_guc_pc_init(struct xe_guc_pc *pc)
1370 {
1371 	struct xe_gt *gt = pc_to_gt(pc);
1372 	struct xe_tile *tile = gt_to_tile(gt);
1373 	struct xe_device *xe = gt_to_xe(gt);
1374 	struct xe_bo *bo;
1375 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1376 	int err;
1377 
1378 	if (xe->info.skip_guc_pc)
1379 		return 0;
1380 
1381 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1382 	if (err)
1383 		return err;
1384 
1385 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1386 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1387 					  XE_BO_FLAG_GGTT |
1388 					  XE_BO_FLAG_GGTT_INVALIDATE |
1389 					  XE_BO_FLAG_PINNED_NORESTORE);
1390 	if (IS_ERR(bo))
1391 		return PTR_ERR(bo);
1392 
1393 	pc->bo = bo;
1394 
1395 	pc->power_profile = SLPC_POWER_PROFILE_BASE;
1396 
1397 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1398 }
1399 
1400 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1401 {
1402 	switch (slpc_shared_data_read(pc, header.global_state)) {
1403 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1404 		return "not running";
1405 	case SLPC_GLOBAL_STATE_INITIALIZING:
1406 		return "initializing";
1407 	case SLPC_GLOBAL_STATE_RESETTING:
1408 		return "resetting";
1409 	case SLPC_GLOBAL_STATE_RUNNING:
1410 		return "running";
1411 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1412 		return "shutting down";
1413 	case SLPC_GLOBAL_STATE_ERROR:
1414 		return "error";
1415 	default:
1416 		return "unknown";
1417 	}
1418 }
1419 
1420 /**
1421  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1422  * @pc: Xe_GuC_PC instance
1423  * @p: drm_printer
1424  */
1425 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1426 {
1427 	drm_printf(p, "SLPC Shared Data Header:\n");
1428 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1429 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1430 
1431 	if (pc_action_query_task_state(pc))
1432 		return;
1433 
1434 	drm_printf(p, "\nSLPC Tasks Status:\n");
1435 	drm_printf(p, "\tGTPERF enabled: %s\n",
1436 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1437 			      SLPC_GTPERF_TASK_ENABLED));
1438 	drm_printf(p, "\tDCC enabled: %s\n",
1439 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1440 			      SLPC_DCC_TASK_ENABLED));
1441 	drm_printf(p, "\tDCC in use: %s\n",
1442 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1443 			      SLPC_IN_DCC));
1444 	drm_printf(p, "\tBalancer enabled: %s\n",
1445 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1446 			      SLPC_BALANCER_ENABLED));
1447 	drm_printf(p, "\tIBC enabled: %s\n",
1448 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1449 			      SLPC_IBC_TASK_ENABLED));
1450 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1451 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1452 			      SLPC_BALANCER_IA_LMT_ENABLED));
1453 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1454 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1455 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1456 }
1457