xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/iopoll.h>
11 #include <linux/jiffies.h>
12 #include <linux/ktime.h>
13 #include <linux/wait_bit.h>
14 
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 #include <generated/xe_device_wa_oob.h>
18 #include <generated/xe_wa_oob.h>
19 
20 #include "abi/guc_actions_slpc_abi.h"
21 #include "regs/xe_gt_regs.h"
22 #include "regs/xe_regs.h"
23 #include "xe_bo.h"
24 #include "xe_device.h"
25 #include "xe_force_wake.h"
26 #include "xe_gt.h"
27 #include "xe_gt_idle.h"
28 #include "xe_gt_printk.h"
29 #include "xe_gt_throttle.h"
30 #include "xe_gt_types.h"
31 #include "xe_guc.h"
32 #include "xe_guc_ct.h"
33 #include "xe_map.h"
34 #include "xe_mmio.h"
35 #include "xe_pcode.h"
36 #include "xe_pm.h"
37 #include "xe_sriov.h"
38 #include "xe_wa.h"
39 
40 #define MCHBAR_MIRROR_BASE_SNB	0x140000
41 
42 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
43 #define   RP0_MASK		REG_GENMASK(7, 0)
44 #define   RP1_MASK		REG_GENMASK(15, 8)
45 #define   RPN_MASK		REG_GENMASK(23, 16)
46 
47 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
48 #define   RPE_MASK		REG_GENMASK(15, 8)
49 #define   RPA_MASK		REG_GENMASK(31, 16)
50 
51 #define GT_PERF_STATUS		XE_REG(0x1381b4)
52 #define   CAGF_MASK	REG_GENMASK(19, 11)
53 
54 #define GT_FREQUENCY_MULTIPLIER	50
55 #define GT_FREQUENCY_SCALER	3
56 
57 #define LNL_MERT_FREQ_CAP	800
58 #define BMG_MERT_FREQ_CAP	2133
59 #define BMG_MIN_FREQ		1200
60 #define BMG_MERT_FLUSH_FREQ_CAP	2600
61 
62 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
63 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
64 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
65 
66 /**
67  * DOC: GuC Power Conservation (PC)
68  *
69  * GuC Power Conservation (PC) supports multiple features for the most
70  * efficient and performing use of the GT when GuC submission is enabled,
71  * including frequency management, Render-C states management, and various
72  * algorithms for power balancing.
73  *
74  * Single Loop Power Conservation (SLPC) is the name given to the suite of
75  * connected power conservation features in the GuC firmware. The firmware
76  * exposes a programming interface to the host for the control of SLPC.
77  *
78  * Frequency management:
79  * =====================
80  *
81  * Xe driver enables SLPC with all of its defaults features and frequency
82  * selection, which varies per platform.
83  *
84  * Power profiles add another level of control to SLPC. When power saving
85  * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
86  * thus saving power. Base profile is default and ensures balanced performance
87  * for any workload.
88  *
89  * Render-C States:
90  * ================
91  *
92  * Render-C states is also a GuC PC feature that is now enabled in Xe for
93  * all platforms.
94  *
95  */
96 
97 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
98 {
99 	return container_of(pc, struct xe_guc, pc);
100 }
101 
102 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
103 {
104 	return &pc_to_guc(pc)->ct;
105 }
106 
107 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
108 {
109 	return guc_to_gt(pc_to_guc(pc));
110 }
111 
112 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
113 {
114 	return guc_to_xe(pc_to_guc(pc));
115 }
116 
117 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
118 {
119 	return &pc->bo->vmap;
120 }
121 
122 #define slpc_shared_data_read(pc_, field_) \
123 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
124 			struct slpc_shared_data, field_)
125 
126 #define slpc_shared_data_write(pc_, field_, val_) \
127 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
128 			struct slpc_shared_data, field_, val_)
129 
130 #define SLPC_EVENT(id, count) \
131 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
132 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
133 
134 static int wait_for_pc_state(struct xe_guc_pc *pc,
135 			     enum slpc_global_state target_state,
136 			     int timeout_ms)
137 {
138 	enum slpc_global_state state;
139 
140 	xe_device_assert_mem_access(pc_to_xe(pc));
141 
142 	return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
143 			       state == target_state,
144 			       20, timeout_ms * USEC_PER_MSEC, false);
145 }
146 
147 static int wait_for_flush_complete(struct xe_guc_pc *pc)
148 {
149 	const unsigned long timeout = msecs_to_jiffies(30);
150 
151 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
152 				    !atomic_read(&pc->flush_freq_limit),
153 				    timeout))
154 		return -ETIMEDOUT;
155 
156 	return 0;
157 }
158 
159 static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
160 {
161 	u32 freq;
162 
163 	return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
164 			       freq <= max_limit,
165 			       20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
166 }
167 
168 static int pc_action_reset(struct xe_guc_pc *pc)
169 {
170 	struct xe_guc_ct *ct = pc_to_ct(pc);
171 	u32 action[] = {
172 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
173 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
174 		xe_bo_ggtt_addr(pc->bo),
175 		0,
176 	};
177 	int ret;
178 
179 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
180 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
181 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
182 			  ERR_PTR(ret));
183 
184 	return ret;
185 }
186 
187 static int pc_action_query_task_state(struct xe_guc_pc *pc)
188 {
189 	struct xe_guc_ct *ct = pc_to_ct(pc);
190 	u32 action[] = {
191 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
192 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
193 		xe_bo_ggtt_addr(pc->bo),
194 		0,
195 	};
196 	int ret;
197 
198 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
199 			      SLPC_RESET_TIMEOUT_MS))
200 		return -EAGAIN;
201 
202 	/* Blocking here to ensure the results are ready before reading them */
203 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
204 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
205 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
206 			  ERR_PTR(ret));
207 
208 	return ret;
209 }
210 
211 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
212 {
213 	struct xe_guc_ct *ct = pc_to_ct(pc);
214 	u32 action[] = {
215 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
216 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
217 		id,
218 		value,
219 	};
220 	int ret;
221 
222 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
223 			      SLPC_RESET_TIMEOUT_MS))
224 		return -EAGAIN;
225 
226 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
227 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
228 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
229 			  id, value, ERR_PTR(ret));
230 
231 	return ret;
232 }
233 
234 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
235 {
236 	u32 action[] = {
237 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
238 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
239 		id,
240 	};
241 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
242 	int ret;
243 
244 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
245 			      SLPC_RESET_TIMEOUT_MS))
246 		return -EAGAIN;
247 
248 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
249 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
250 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
251 			  ERR_PTR(ret));
252 
253 	return ret;
254 }
255 
256 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
257 {
258 	struct xe_guc_ct *ct = pc_to_ct(pc);
259 	u32 action[] = {
260 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
261 		mode,
262 	};
263 	int ret;
264 
265 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
266 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
267 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
268 			  mode, ERR_PTR(ret));
269 	return ret;
270 }
271 
272 static u32 decode_freq(u32 raw)
273 {
274 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
275 				 GT_FREQUENCY_SCALER);
276 }
277 
278 static u32 encode_freq(u32 freq)
279 {
280 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
281 				 GT_FREQUENCY_MULTIPLIER);
282 }
283 
284 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
285 {
286 	u32 freq;
287 
288 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
289 			 slpc_shared_data_read(pc, task_state_data.freq));
290 
291 	return decode_freq(freq);
292 }
293 
294 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
295 {
296 	struct xe_gt *gt = pc_to_gt(pc);
297 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
298 
299 	/* Allow/Disallow punit to process software freq requests */
300 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
301 }
302 
303 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
304 {
305 	struct xe_gt *gt = pc_to_gt(pc);
306 	u32 rpnswreq;
307 
308 	pc_set_manual_rp_ctrl(pc, true);
309 
310 	/* Req freq is in units of 16.66 Mhz */
311 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
312 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
313 
314 	/* Sleep for a small time to allow pcode to respond */
315 	usleep_range(100, 300);
316 
317 	pc_set_manual_rp_ctrl(pc, false);
318 }
319 
320 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
321 {
322 	/*
323 	 * Let's only check for the rpn-rp0 range. If max < min,
324 	 * min becomes a fixed request.
325 	 */
326 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
327 		return -EINVAL;
328 
329 	/*
330 	 * GuC policy is to elevate minimum frequency to the efficient levels
331 	 * Our goal is to have the admin choices respected.
332 	 */
333 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
334 			    freq < xe_guc_pc_get_rpe_freq(pc));
335 
336 	return pc_action_set_param(pc,
337 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
338 				   freq);
339 }
340 
341 static int pc_get_max_freq(struct xe_guc_pc *pc)
342 {
343 	u32 freq;
344 
345 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
346 			 slpc_shared_data_read(pc, task_state_data.freq));
347 
348 	return decode_freq(freq);
349 }
350 
351 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
352 {
353 	/*
354 	 * Let's only check for the rpn-rp0 range. If max < min,
355 	 * min becomes a fixed request.
356 	 * Also, overclocking is not supported.
357 	 */
358 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
359 		return -EINVAL;
360 
361 	return pc_action_set_param(pc,
362 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
363 				   freq);
364 }
365 
366 static u32 mtl_get_rpa_freq(struct xe_guc_pc *pc)
367 {
368 	struct xe_gt *gt = pc_to_gt(pc);
369 	u32 reg;
370 
371 	if (xe_gt_is_media_type(gt))
372 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
373 	else
374 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
375 
376 	return decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
377 }
378 
379 static u32 mtl_get_rpe_freq(struct xe_guc_pc *pc)
380 {
381 	struct xe_gt *gt = pc_to_gt(pc);
382 	u32 reg;
383 
384 	if (xe_gt_is_media_type(gt))
385 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
386 	else
387 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
388 
389 	return decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
390 }
391 
392 static u32 pvc_get_rpa_freq(struct xe_guc_pc *pc)
393 {
394 	/*
395 	 * For PVC we still need to use fused RP0 as the approximation for RPa
396 	 * For other platforms than PVC we get the resolved RPa directly from
397 	 * PCODE at a different register
398 	 */
399 
400 	struct xe_gt *gt = pc_to_gt(pc);
401 	u32 reg;
402 
403 	reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
404 	return REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
405 }
406 
407 static u32 tgl_get_rpa_freq(struct xe_guc_pc *pc)
408 {
409 	struct xe_gt *gt = pc_to_gt(pc);
410 	u32 reg;
411 
412 	reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
413 	return REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
414 }
415 
416 static u32 pvc_get_rpe_freq(struct xe_guc_pc *pc)
417 {
418 	struct xe_gt *gt = pc_to_gt(pc);
419 	u32 reg;
420 
421 	/*
422 	 * For PVC we still need to use fused RP1 as the approximation for RPe
423 	 */
424 	reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
425 	return REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
426 }
427 
428 static u32 tgl_get_rpe_freq(struct xe_guc_pc *pc)
429 {
430 	struct xe_gt *gt = pc_to_gt(pc);
431 	u32 reg;
432 
433 	/*
434 	 * For other platforms than PVC, we get the resolved RPe directly from
435 	 * PCODE at a different register
436 	 */
437 	reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
438 	return REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
439 }
440 
441 /**
442  * xe_guc_pc_get_act_freq - Get Actual running frequency
443  * @pc: The GuC PC
444  *
445  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
446  */
447 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
448 {
449 	struct xe_gt *gt = pc_to_gt(pc);
450 	struct xe_device *xe = gt_to_xe(gt);
451 	u32 freq;
452 
453 	/* When in RC6, actual frequency reported will be 0. */
454 	if (GRAPHICS_VERx100(xe) >= 1270) {
455 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
456 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
457 	} else {
458 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
459 		freq = REG_FIELD_GET(CAGF_MASK, freq);
460 	}
461 
462 	freq = decode_freq(freq);
463 
464 	return freq;
465 }
466 
467 static u32 get_cur_freq(struct xe_gt *gt)
468 {
469 	u32 freq;
470 
471 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
472 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
473 	return decode_freq(freq);
474 }
475 
476 /**
477  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
478  * @pc: The GuC PC
479  *
480  * Returns: the requested frequency for that GT instance
481  */
482 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
483 {
484 	struct xe_gt *gt = pc_to_gt(pc);
485 
486 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
487 
488 	return get_cur_freq(gt);
489 }
490 
491 /**
492  * xe_guc_pc_get_cur_freq - Get Current requested frequency
493  * @pc: The GuC PC
494  * @freq: A pointer to a u32 where the freq value will be returned
495  *
496  * Returns: 0 on success,
497  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
498  */
499 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
500 {
501 	struct xe_gt *gt = pc_to_gt(pc);
502 	unsigned int fw_ref;
503 
504 	/*
505 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
506 	 * Block RC6 for a more reliable read.
507 	 */
508 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
509 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
510 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
511 		return -ETIMEDOUT;
512 	}
513 
514 	*freq = get_cur_freq(gt);
515 
516 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
517 	return 0;
518 }
519 
520 /**
521  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
522  * @pc: The GuC PC
523  *
524  * Returns: RP0 freq.
525  */
526 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
527 {
528 	return pc->rp0_freq;
529 }
530 
531 /**
532  * xe_guc_pc_get_rpa_freq - Get the RPa freq
533  * @pc: The GuC PC
534  *
535  * Returns: RPa freq.
536  */
537 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
538 {
539 	struct xe_gt *gt = pc_to_gt(pc);
540 	struct xe_device *xe = gt_to_xe(gt);
541 
542 	if (GRAPHICS_VERx100(xe) == 1260)
543 		return pvc_get_rpa_freq(pc);
544 	else if (GRAPHICS_VERx100(xe) >= 1270)
545 		return mtl_get_rpa_freq(pc);
546 	else
547 		return tgl_get_rpa_freq(pc);
548 }
549 
550 /**
551  * xe_guc_pc_get_rpe_freq - Get the RPe freq
552  * @pc: The GuC PC
553  *
554  * Returns: RPe freq.
555  */
556 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
557 {
558 	struct xe_device *xe = pc_to_xe(pc);
559 	u32 freq;
560 
561 	if (GRAPHICS_VERx100(xe) == 1260)
562 		freq = pvc_get_rpe_freq(pc);
563 	else if (GRAPHICS_VERx100(xe) >= 1270)
564 		freq = mtl_get_rpe_freq(pc);
565 	else
566 		freq = tgl_get_rpe_freq(pc);
567 
568 	return freq;
569 }
570 
571 /**
572  * xe_guc_pc_get_rpn_freq - Get the RPn freq
573  * @pc: The GuC PC
574  *
575  * Returns: RPn freq.
576  */
577 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
578 {
579 	return pc->rpn_freq;
580 }
581 
582 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
583 {
584 	int ret;
585 
586 	lockdep_assert_held(&pc->freq_lock);
587 
588 	/* Might be in the middle of a gt reset */
589 	if (!pc->freq_ready)
590 		return -EAGAIN;
591 
592 	ret = pc_action_query_task_state(pc);
593 	if (ret)
594 		return ret;
595 
596 	*freq = pc_get_min_freq(pc);
597 
598 	return 0;
599 }
600 
601 /**
602  * xe_guc_pc_get_min_freq - Get the min operational frequency
603  * @pc: The GuC PC
604  * @freq: A pointer to a u32 where the freq value will be returned
605  *
606  * Returns: 0 on success,
607  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
608  */
609 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
610 {
611 	guard(mutex)(&pc->freq_lock);
612 
613 	return xe_guc_pc_get_min_freq_locked(pc, freq);
614 }
615 
616 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
617 {
618 	int ret;
619 
620 	lockdep_assert_held(&pc->freq_lock);
621 
622 	/* Might be in the middle of a gt reset */
623 	if (!pc->freq_ready)
624 		return -EAGAIN;
625 
626 	ret = pc_set_min_freq(pc, freq);
627 	if (ret)
628 		return ret;
629 
630 	pc->user_requested_min = freq;
631 
632 	return 0;
633 }
634 
635 /**
636  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
637  * @pc: The GuC PC
638  * @freq: The selected minimal frequency
639  *
640  * Returns: 0 on success,
641  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
642  *         -EINVAL if value out of bounds.
643  */
644 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
645 {
646 	guard(mutex)(&pc->freq_lock);
647 
648 	return xe_guc_pc_set_min_freq_locked(pc, freq);
649 }
650 
651 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
652 {
653 	int ret;
654 
655 	lockdep_assert_held(&pc->freq_lock);
656 
657 	/* Might be in the middle of a gt reset */
658 	if (!pc->freq_ready)
659 		return -EAGAIN;
660 
661 	ret = pc_action_query_task_state(pc);
662 	if (ret)
663 		return ret;
664 
665 	*freq = pc_get_max_freq(pc);
666 
667 	return 0;
668 }
669 
670 /**
671  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
672  * @pc: The GuC PC
673  * @freq: A pointer to a u32 where the freq value will be returned
674  *
675  * Returns: 0 on success,
676  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
677  */
678 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
679 {
680 	guard(mutex)(&pc->freq_lock);
681 
682 	return xe_guc_pc_get_max_freq_locked(pc, freq);
683 }
684 
685 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
686 {
687 	int ret;
688 
689 	lockdep_assert_held(&pc->freq_lock);
690 
691 	/* Might be in the middle of a gt reset */
692 	if (!pc->freq_ready)
693 		return -EAGAIN;
694 
695 	ret = pc_set_max_freq(pc, freq);
696 	if (ret)
697 		return ret;
698 
699 	pc->user_requested_max = freq;
700 
701 	return 0;
702 }
703 
704 /**
705  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
706  * @pc: The GuC PC
707  * @freq: The selected maximum frequency value
708  *
709  * Returns: 0 on success,
710  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
711  *         -EINVAL if value out of bounds.
712  */
713 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
714 {
715 	if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
716 		if (wait_for_flush_complete(pc) != 0)
717 			return -EAGAIN;
718 	}
719 
720 	guard(mutex)(&pc->freq_lock);
721 
722 	return xe_guc_pc_set_max_freq_locked(pc, freq);
723 }
724 
725 /**
726  * xe_guc_pc_c_status - get the current GT C state
727  * @pc: XE_GuC_PC instance
728  */
729 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
730 {
731 	struct xe_gt *gt = pc_to_gt(pc);
732 	u32 reg, gt_c_state;
733 
734 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
735 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
736 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
737 	} else {
738 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
739 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
740 	}
741 
742 	switch (gt_c_state) {
743 	case GT_C6:
744 		return GT_IDLE_C6;
745 	case GT_C0:
746 		return GT_IDLE_C0;
747 	default:
748 		return GT_IDLE_UNKNOWN;
749 	}
750 }
751 
752 /**
753  * xe_guc_pc_rc6_residency - rc6 residency counter
754  * @pc: Xe_GuC_PC instance
755  */
756 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
757 {
758 	struct xe_gt *gt = pc_to_gt(pc);
759 	u32 reg;
760 
761 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
762 
763 	return reg;
764 }
765 
766 /**
767  * xe_guc_pc_mc6_residency - mc6 residency counter
768  * @pc: Xe_GuC_PC instance
769  */
770 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
771 {
772 	struct xe_gt *gt = pc_to_gt(pc);
773 	u64 reg;
774 
775 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
776 
777 	return reg;
778 }
779 
780 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
781 {
782 	struct xe_gt *gt = pc_to_gt(pc);
783 	u32 reg;
784 
785 	xe_device_assert_mem_access(pc_to_xe(pc));
786 
787 	if (xe_gt_is_media_type(gt))
788 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
789 	else
790 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
791 
792 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
793 
794 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
795 }
796 
797 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
798 {
799 	struct xe_gt *gt = pc_to_gt(pc);
800 	struct xe_device *xe = gt_to_xe(gt);
801 	u32 reg;
802 
803 	xe_device_assert_mem_access(pc_to_xe(pc));
804 
805 	if (xe->info.platform == XE_PVC)
806 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
807 	else
808 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
809 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
810 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
811 }
812 
813 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
814 {
815 	struct xe_gt *gt = pc_to_gt(pc);
816 	struct xe_device *xe = gt_to_xe(gt);
817 
818 	if (GRAPHICS_VERx100(xe) >= 1270)
819 		mtl_init_fused_rp_values(pc);
820 	else
821 		tgl_init_fused_rp_values(pc);
822 }
823 
824 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
825 {
826 	struct xe_gt *gt = pc_to_gt(pc);
827 
828 	if (XE_GT_WA(gt, 22019338487)) {
829 		if (xe_gt_is_media_type(gt))
830 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
831 		else
832 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
833 	} else {
834 		return pc->rp0_freq;
835 	}
836 }
837 
838 /**
839  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
840  * frequency to allow faster GuC load times
841  * @pc: Xe_GuC_PC instance
842  */
843 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
844 {
845 	struct xe_gt *gt = pc_to_gt(pc);
846 
847 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
848 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
849 }
850 
851 /**
852  * xe_guc_pc_init_early - Initialize RPx values
853  * @pc: Xe_GuC_PC instance
854  */
855 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
856 {
857 	struct xe_gt *gt = pc_to_gt(pc);
858 
859 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
860 	pc_init_fused_rp_values(pc);
861 }
862 
863 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
864 {
865 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
866 	int ret;
867 
868 	lockdep_assert_held(&pc->freq_lock);
869 
870 	ret = pc_action_query_task_state(pc);
871 	if (ret)
872 		goto out;
873 
874 	/*
875 	 * GuC defaults to some RPmax that is not actually achievable without
876 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
877 	 * regular maximum
878 	 */
879 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
880 		ret = pc_set_max_freq(pc, pc->rp0_freq);
881 		if (ret)
882 			goto out;
883 	}
884 
885 	/*
886 	 * Same thing happens for Server platforms where min is listed as
887 	 * RPMax
888 	 */
889 	if (pc_get_min_freq(pc) > pc->rp0_freq)
890 		ret = pc_set_min_freq(pc, pc->rp0_freq);
891 
892 	if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890))
893 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
894 
895 out:
896 	return ret;
897 }
898 
899 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
900 {
901 	int ret = 0;
902 
903 	lockdep_assert_held(&pc->freq_lock);
904 
905 	if (pc->user_requested_min != 0) {
906 		ret = pc_set_min_freq(pc, pc->user_requested_min);
907 		if (ret)
908 			return ret;
909 	}
910 
911 	if (pc->user_requested_max != 0) {
912 		ret = pc_set_max_freq(pc, pc->user_requested_max);
913 		if (ret)
914 			return ret;
915 	}
916 
917 	return ret;
918 }
919 
920 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
921 {
922 	struct xe_gt *gt = pc_to_gt(pc);
923 
924 	return  XE_GT_WA(gt, 22019338487) &&
925 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
926 }
927 
928 /**
929  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
930  * @pc: the xe_guc_pc object
931  *
932  * As per the WA, reduce max GT frequency during L2 cache flush
933  */
934 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
935 {
936 	struct xe_gt *gt = pc_to_gt(pc);
937 	u32 max_freq;
938 	int ret;
939 
940 	if (!needs_flush_freq_limit(pc))
941 		return;
942 
943 	guard(mutex)(&pc->freq_lock);
944 
945 	ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
946 	if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
947 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
948 		if (ret) {
949 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
950 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
951 			return;
952 		}
953 
954 		atomic_set(&pc->flush_freq_limit, 1);
955 
956 		/*
957 		 * If user has previously changed max freq, stash that value to
958 		 * restore later, otherwise use the current max. New user
959 		 * requests wait on flush.
960 		 */
961 		if (pc->user_requested_max != 0)
962 			pc->stashed_max_freq = pc->user_requested_max;
963 		else
964 			pc->stashed_max_freq = max_freq;
965 	}
966 
967 	/*
968 	 * Wait for actual freq to go below the flush cap: even if the previous
969 	 * max was below cap, the current one might still be above it
970 	 */
971 	ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
972 	if (ret)
973 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
974 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
975 }
976 
977 /**
978  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
979  * @pc: the xe_guc_pc object
980  *
981  * Retrieve the previous GT max frequency value.
982  */
983 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
984 {
985 	struct xe_gt *gt = pc_to_gt(pc);
986 	int ret = 0;
987 
988 	if (!needs_flush_freq_limit(pc))
989 		return;
990 
991 	if (!atomic_read(&pc->flush_freq_limit))
992 		return;
993 
994 	mutex_lock(&pc->freq_lock);
995 
996 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
997 	if (ret)
998 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
999 			       pc->stashed_max_freq, ret);
1000 
1001 	atomic_set(&pc->flush_freq_limit, 0);
1002 	mutex_unlock(&pc->freq_lock);
1003 	wake_up_var(&pc->flush_freq_limit);
1004 }
1005 
1006 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
1007 {
1008 	int ret;
1009 
1010 	if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
1011 		return 0;
1012 
1013 	guard(mutex)(&pc->freq_lock);
1014 
1015 	/*
1016 	 * Get updated min/max and stash them.
1017 	 */
1018 	ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
1019 	if (!ret)
1020 		ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
1021 	if (ret)
1022 		return ret;
1023 
1024 	/*
1025 	 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
1026 	 */
1027 	ret = pc_set_min_freq(pc, min(xe_guc_pc_get_rpe_freq(pc), pc_max_freq_cap(pc)));
1028 	if (!ret)
1029 		ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
1030 
1031 	return ret;
1032 }
1033 
1034 /**
1035  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
1036  * @pc: The GuC PC
1037  *
1038  * Returns: 0 on success,
1039  *          error code on failure
1040  */
1041 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
1042 {
1043 	int ret = 0;
1044 
1045 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
1046 		return 0;
1047 
1048 	mutex_lock(&pc->freq_lock);
1049 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
1050 	if (!ret)
1051 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
1052 	mutex_unlock(&pc->freq_lock);
1053 
1054 	return ret;
1055 }
1056 
1057 /**
1058  * xe_guc_pc_gucrc_disable - Disable GuC RC
1059  * @pc: Xe_GuC_PC instance
1060  *
1061  * Disables GuC RC by taking control of RC6 back from GuC.
1062  *
1063  * Return: 0 on success, negative error code on error.
1064  */
1065 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
1066 {
1067 	struct xe_device *xe = pc_to_xe(pc);
1068 	struct xe_gt *gt = pc_to_gt(pc);
1069 	int ret = 0;
1070 
1071 	if (xe->info.skip_guc_pc)
1072 		return 0;
1073 
1074 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
1075 	if (ret)
1076 		return ret;
1077 
1078 	return xe_gt_idle_disable_c6(gt);
1079 }
1080 
1081 /**
1082  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
1083  * @pc: Xe_GuC_PC instance
1084  * @mode: new value of the mode.
1085  *
1086  * Return: 0 on success, negative error code on error
1087  */
1088 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
1089 {
1090 	int ret;
1091 
1092 	xe_pm_runtime_get(pc_to_xe(pc));
1093 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
1094 	xe_pm_runtime_put(pc_to_xe(pc));
1095 
1096 	return ret;
1097 }
1098 
1099 /**
1100  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
1101  * @pc: Xe_GuC_PC instance
1102  *
1103  * Return: 0 on success, negative error code on error
1104  */
1105 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
1106 {
1107 	int ret;
1108 
1109 	xe_pm_runtime_get(pc_to_xe(pc));
1110 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
1111 	xe_pm_runtime_put(pc_to_xe(pc));
1112 
1113 	return ret;
1114 }
1115 
1116 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1117 {
1118 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1119 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1120 
1121 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1122 }
1123 
1124 static int pc_init_freqs(struct xe_guc_pc *pc)
1125 {
1126 	int ret;
1127 
1128 	mutex_lock(&pc->freq_lock);
1129 
1130 	ret = pc_adjust_freq_bounds(pc);
1131 	if (ret)
1132 		goto out;
1133 
1134 	ret = pc_adjust_requested_freq(pc);
1135 	if (ret)
1136 		goto out;
1137 
1138 	pc_init_pcode_freq(pc);
1139 
1140 	/*
1141 	 * The frequencies are really ready for use only after the user
1142 	 * requested ones got restored.
1143 	 */
1144 	pc->freq_ready = true;
1145 
1146 out:
1147 	mutex_unlock(&pc->freq_lock);
1148 	return ret;
1149 }
1150 
1151 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1152 {
1153 	int ret = 0;
1154 
1155 	ret = pc_action_set_param(pc,
1156 				  SLPC_PARAM_STRATEGIES,
1157 				  val);
1158 
1159 	return ret;
1160 }
1161 
1162 static const char *power_profile_to_string(struct xe_guc_pc *pc)
1163 {
1164 	switch (pc->power_profile) {
1165 	case SLPC_POWER_PROFILE_BASE:
1166 		return "base";
1167 	case SLPC_POWER_PROFILE_POWER_SAVING:
1168 		return "power_saving";
1169 	default:
1170 		return "invalid";
1171 	}
1172 }
1173 
1174 void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
1175 {
1176 	switch (pc->power_profile) {
1177 	case SLPC_POWER_PROFILE_BASE:
1178 		sprintf(profile, "[%s]    %s\n", "base", "power_saving");
1179 		break;
1180 	case SLPC_POWER_PROFILE_POWER_SAVING:
1181 		sprintf(profile, "%s    [%s]\n", "base", "power_saving");
1182 		break;
1183 	default:
1184 		sprintf(profile, "invalid");
1185 	}
1186 }
1187 
1188 int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
1189 {
1190 	int ret = 0;
1191 	u32 val;
1192 
1193 	if (strncmp("base", buf, strlen("base")) == 0)
1194 		val = SLPC_POWER_PROFILE_BASE;
1195 	else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
1196 		val = SLPC_POWER_PROFILE_POWER_SAVING;
1197 	else
1198 		return -EINVAL;
1199 
1200 	guard(mutex)(&pc->freq_lock);
1201 	xe_pm_runtime_get_noresume(pc_to_xe(pc));
1202 
1203 	ret = pc_action_set_param(pc,
1204 				  SLPC_PARAM_POWER_PROFILE,
1205 				  val);
1206 	if (ret)
1207 		xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
1208 			       val, ERR_PTR(ret));
1209 	else
1210 		pc->power_profile = val;
1211 
1212 	xe_pm_runtime_put(pc_to_xe(pc));
1213 
1214 	return ret;
1215 }
1216 
1217 /**
1218  * xe_guc_pc_start - Start GuC's Power Conservation component
1219  * @pc: Xe_GuC_PC instance
1220  */
1221 int xe_guc_pc_start(struct xe_guc_pc *pc)
1222 {
1223 	struct xe_device *xe = pc_to_xe(pc);
1224 	struct xe_gt *gt = pc_to_gt(pc);
1225 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1226 	unsigned int fw_ref;
1227 	ktime_t earlier;
1228 	int ret;
1229 
1230 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1231 
1232 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1233 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1234 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1235 		return -ETIMEDOUT;
1236 	}
1237 
1238 	if (xe->info.skip_guc_pc) {
1239 		if (xe->info.platform != XE_PVC)
1240 			xe_gt_idle_enable_c6(gt);
1241 
1242 		/* Request max possible since dynamic freq mgmt is not enabled */
1243 		pc_set_cur_freq(pc, UINT_MAX);
1244 
1245 		ret = 0;
1246 		goto out;
1247 	}
1248 
1249 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1250 	slpc_shared_data_write(pc, header.size, size);
1251 
1252 	earlier = ktime_get();
1253 	ret = pc_action_reset(pc);
1254 	if (ret)
1255 		goto out;
1256 
1257 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1258 			      SLPC_RESET_TIMEOUT_MS)) {
1259 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1260 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1261 			   xe_gt_throttle_get_limit_reasons(gt));
1262 
1263 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1264 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1265 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1266 			ret = -EIO;
1267 			goto out;
1268 		}
1269 
1270 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1271 			   ktime_ms_delta(ktime_get(), earlier));
1272 	}
1273 
1274 	ret = pc_init_freqs(pc);
1275 	if (ret)
1276 		goto out;
1277 
1278 	ret = pc_set_mert_freq_cap(pc);
1279 	if (ret)
1280 		goto out;
1281 
1282 	if (xe->info.platform == XE_PVC) {
1283 		xe_guc_pc_gucrc_disable(pc);
1284 		ret = 0;
1285 		goto out;
1286 	}
1287 
1288 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1289 	if (ret)
1290 		goto out;
1291 
1292 	/* Enable SLPC Optimized Strategy for compute */
1293 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1294 
1295 	/* Set cached value of power_profile */
1296 	ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
1297 	if (unlikely(ret))
1298 		xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
1299 
1300 out:
1301 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1302 	return ret;
1303 }
1304 
1305 /**
1306  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1307  * @pc: Xe_GuC_PC instance
1308  */
1309 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1310 {
1311 	struct xe_device *xe = pc_to_xe(pc);
1312 
1313 	if (xe->info.skip_guc_pc) {
1314 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1315 		return 0;
1316 	}
1317 
1318 	mutex_lock(&pc->freq_lock);
1319 	pc->freq_ready = false;
1320 	mutex_unlock(&pc->freq_lock);
1321 
1322 	return 0;
1323 }
1324 
1325 /**
1326  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1327  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1328  */
1329 static void xe_guc_pc_fini_hw(void *arg)
1330 {
1331 	struct xe_guc_pc *pc = arg;
1332 	struct xe_device *xe = pc_to_xe(pc);
1333 	unsigned int fw_ref;
1334 
1335 	if (xe_device_wedged(xe))
1336 		return;
1337 
1338 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1339 	xe_guc_pc_gucrc_disable(pc);
1340 	XE_WARN_ON(xe_guc_pc_stop(pc));
1341 
1342 	/* Bind requested freq to mert_freq_cap before unload */
1343 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), xe_guc_pc_get_rpe_freq(pc)));
1344 
1345 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1346 }
1347 
1348 /**
1349  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1350  * @pc: Xe_GuC_PC instance
1351  */
1352 int xe_guc_pc_init(struct xe_guc_pc *pc)
1353 {
1354 	struct xe_gt *gt = pc_to_gt(pc);
1355 	struct xe_tile *tile = gt_to_tile(gt);
1356 	struct xe_device *xe = gt_to_xe(gt);
1357 	struct xe_bo *bo;
1358 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1359 	int err;
1360 
1361 	if (xe->info.skip_guc_pc)
1362 		return 0;
1363 
1364 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1365 	if (err)
1366 		return err;
1367 
1368 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1369 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1370 					  XE_BO_FLAG_GGTT |
1371 					  XE_BO_FLAG_GGTT_INVALIDATE |
1372 					  XE_BO_FLAG_PINNED_NORESTORE);
1373 	if (IS_ERR(bo))
1374 		return PTR_ERR(bo);
1375 
1376 	pc->bo = bo;
1377 
1378 	pc->power_profile = SLPC_POWER_PROFILE_BASE;
1379 
1380 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1381 }
1382 
1383 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1384 {
1385 	switch (slpc_shared_data_read(pc, header.global_state)) {
1386 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1387 		return "not running";
1388 	case SLPC_GLOBAL_STATE_INITIALIZING:
1389 		return "initializing";
1390 	case SLPC_GLOBAL_STATE_RESETTING:
1391 		return "resetting";
1392 	case SLPC_GLOBAL_STATE_RUNNING:
1393 		return "running";
1394 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1395 		return "shutting down";
1396 	case SLPC_GLOBAL_STATE_ERROR:
1397 		return "error";
1398 	default:
1399 		return "unknown";
1400 	}
1401 }
1402 
1403 /**
1404  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1405  * @pc: Xe_GuC_PC instance
1406  * @p: drm_printer
1407  */
1408 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1409 {
1410 	drm_printf(p, "SLPC Shared Data Header:\n");
1411 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1412 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1413 
1414 	if (pc_action_query_task_state(pc))
1415 		return;
1416 
1417 	drm_printf(p, "\nSLPC Tasks Status:\n");
1418 	drm_printf(p, "\tGTPERF enabled: %s\n",
1419 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1420 			      SLPC_GTPERF_TASK_ENABLED));
1421 	drm_printf(p, "\tDCC enabled: %s\n",
1422 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1423 			      SLPC_DCC_TASK_ENABLED));
1424 	drm_printf(p, "\tDCC in use: %s\n",
1425 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1426 			      SLPC_IN_DCC));
1427 	drm_printf(p, "\tBalancer enabled: %s\n",
1428 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1429 			      SLPC_BALANCER_ENABLED));
1430 	drm_printf(p, "\tIBC enabled: %s\n",
1431 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1432 			      SLPC_IBC_TASK_ENABLED));
1433 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1434 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1435 			      SLPC_BALANCER_IA_LMT_ENABLED));
1436 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1437 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1438 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1439 }
1440