xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 9156bf442ee56c0f883aa4c81af9c8471eef6846)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/iopoll.h>
11 #include <linux/jiffies.h>
12 #include <linux/ktime.h>
13 #include <linux/wait_bit.h>
14 
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 #include <generated/xe_device_wa_oob.h>
18 #include <generated/xe_wa_oob.h>
19 
20 #include "abi/guc_actions_slpc_abi.h"
21 #include "regs/xe_gt_regs.h"
22 #include "regs/xe_regs.h"
23 #include "xe_bo.h"
24 #include "xe_device.h"
25 #include "xe_force_wake.h"
26 #include "xe_gt.h"
27 #include "xe_gt_idle.h"
28 #include "xe_gt_printk.h"
29 #include "xe_gt_throttle.h"
30 #include "xe_gt_types.h"
31 #include "xe_guc.h"
32 #include "xe_guc_ct.h"
33 #include "xe_map.h"
34 #include "xe_mmio.h"
35 #include "xe_pcode.h"
36 #include "xe_pm.h"
37 #include "xe_sriov.h"
38 #include "xe_wa.h"
39 
40 #define MCHBAR_MIRROR_BASE_SNB	0x140000
41 
42 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
43 #define   RP0_MASK		REG_GENMASK(7, 0)
44 #define   RP1_MASK		REG_GENMASK(15, 8)
45 #define   RPN_MASK		REG_GENMASK(23, 16)
46 
47 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
48 #define   RPE_MASK		REG_GENMASK(15, 8)
49 #define   RPA_MASK		REG_GENMASK(31, 16)
50 
51 #define GT_PERF_STATUS		XE_REG(0x1381b4)
52 #define   CAGF_MASK	REG_GENMASK(19, 11)
53 
54 #define GT_FREQUENCY_MULTIPLIER	50
55 #define GT_FREQUENCY_SCALER	3
56 
57 #define LNL_MERT_FREQ_CAP	800
58 #define BMG_MERT_FREQ_CAP	2133
59 #define BMG_MIN_FREQ		1200
60 #define BMG_MERT_FLUSH_FREQ_CAP	2600
61 
62 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
63 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
64 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
65 
66 /**
67  * DOC: GuC Power Conservation (PC)
68  *
69  * GuC Power Conservation (PC) supports multiple features for the most
70  * efficient and performing use of the GT when GuC submission is enabled,
71  * including frequency management, Render-C states management, and various
72  * algorithms for power balancing.
73  *
74  * Single Loop Power Conservation (SLPC) is the name given to the suite of
75  * connected power conservation features in the GuC firmware. The firmware
76  * exposes a programming interface to the host for the control of SLPC.
77  *
78  * Frequency management:
79  * ---------------------
80  *
81  * Xe driver enables SLPC with all of its defaults features and frequency
82  * selection, which varies per platform.
83  *
84  * Power profiles add another level of control to SLPC. When power saving
85  * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
86  * thus saving power. Base profile is default and ensures balanced performance
87  * for any workload.
88  *
89  * Render-C States:
90  * ----------------
91  *
92  * Render-C states is also a GuC PC feature that is now enabled in Xe for
93  * all platforms.
94  *
95  */
96 
97 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
98 {
99 	return container_of(pc, struct xe_guc, pc);
100 }
101 
102 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
103 {
104 	return &pc_to_guc(pc)->ct;
105 }
106 
107 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
108 {
109 	return guc_to_gt(pc_to_guc(pc));
110 }
111 
112 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
113 {
114 	return guc_to_xe(pc_to_guc(pc));
115 }
116 
117 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
118 {
119 	return &pc->bo->vmap;
120 }
121 
122 #define slpc_shared_data_read(pc_, field_) \
123 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
124 			struct slpc_shared_data, field_)
125 
126 #define slpc_shared_data_write(pc_, field_, val_) \
127 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
128 			struct slpc_shared_data, field_, val_)
129 
130 #define SLPC_EVENT(id, count) \
131 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
132 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
133 
134 static int wait_for_pc_state(struct xe_guc_pc *pc,
135 			     enum slpc_global_state target_state,
136 			     int timeout_ms)
137 {
138 	enum slpc_global_state state;
139 
140 	xe_device_assert_mem_access(pc_to_xe(pc));
141 
142 	return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
143 			       state == target_state,
144 			       20, timeout_ms * USEC_PER_MSEC, false);
145 }
146 
147 static int wait_for_flush_complete(struct xe_guc_pc *pc)
148 {
149 	const unsigned long timeout = msecs_to_jiffies(30);
150 
151 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
152 				    !atomic_read(&pc->flush_freq_limit),
153 				    timeout))
154 		return -ETIMEDOUT;
155 
156 	return 0;
157 }
158 
159 static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
160 {
161 	u32 freq;
162 
163 	return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
164 			       freq <= max_limit,
165 			       20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
166 }
167 
168 static int pc_action_reset(struct xe_guc_pc *pc)
169 {
170 	struct xe_guc_ct *ct = pc_to_ct(pc);
171 	u32 action[] = {
172 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
173 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
174 		xe_bo_ggtt_addr(pc->bo),
175 		0,
176 	};
177 	int ret;
178 
179 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
180 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
181 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
182 			  ERR_PTR(ret));
183 
184 	return ret;
185 }
186 
187 static int pc_action_query_task_state(struct xe_guc_pc *pc)
188 {
189 	struct xe_guc_ct *ct = pc_to_ct(pc);
190 	u32 action[] = {
191 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
192 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
193 		xe_bo_ggtt_addr(pc->bo),
194 		0,
195 	};
196 	int ret;
197 
198 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
199 			      SLPC_RESET_TIMEOUT_MS))
200 		return -EAGAIN;
201 
202 	/* Blocking here to ensure the results are ready before reading them */
203 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
204 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
205 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
206 			  ERR_PTR(ret));
207 
208 	return ret;
209 }
210 
211 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
212 {
213 	struct xe_guc_ct *ct = pc_to_ct(pc);
214 	u32 action[] = {
215 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
216 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
217 		id,
218 		value,
219 	};
220 	int ret;
221 
222 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
223 			      SLPC_RESET_TIMEOUT_MS))
224 		return -EAGAIN;
225 
226 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
227 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
228 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
229 			  id, value, ERR_PTR(ret));
230 
231 	return ret;
232 }
233 
234 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
235 {
236 	u32 action[] = {
237 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
238 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
239 		id,
240 	};
241 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
242 	int ret;
243 
244 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
245 			      SLPC_RESET_TIMEOUT_MS))
246 		return -EAGAIN;
247 
248 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
249 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
250 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
251 			  ERR_PTR(ret));
252 
253 	return ret;
254 }
255 
256 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
257 {
258 	struct xe_guc_ct *ct = pc_to_ct(pc);
259 	u32 action[] = {
260 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
261 		mode,
262 	};
263 	int ret;
264 
265 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
266 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
267 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
268 			  mode, ERR_PTR(ret));
269 	return ret;
270 }
271 
272 static u32 decode_freq(u32 raw)
273 {
274 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
275 				 GT_FREQUENCY_SCALER);
276 }
277 
278 static u32 encode_freq(u32 freq)
279 {
280 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
281 				 GT_FREQUENCY_MULTIPLIER);
282 }
283 
284 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
285 {
286 	u32 freq;
287 
288 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
289 			 slpc_shared_data_read(pc, task_state_data.freq));
290 
291 	return decode_freq(freq);
292 }
293 
294 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
295 {
296 	struct xe_gt *gt = pc_to_gt(pc);
297 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
298 
299 	/* Allow/Disallow punit to process software freq requests */
300 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
301 }
302 
303 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
304 {
305 	struct xe_gt *gt = pc_to_gt(pc);
306 	u32 rpnswreq;
307 
308 	pc_set_manual_rp_ctrl(pc, true);
309 
310 	/* Req freq is in units of 16.66 Mhz */
311 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
312 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
313 
314 	/* Sleep for a small time to allow pcode to respond */
315 	usleep_range(100, 300);
316 
317 	pc_set_manual_rp_ctrl(pc, false);
318 }
319 
320 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
321 {
322 	/*
323 	 * Let's only check for the rpn-rp0 range. If max < min,
324 	 * min becomes a fixed request.
325 	 */
326 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
327 		return -EINVAL;
328 
329 	/*
330 	 * GuC policy is to elevate minimum frequency to the efficient levels
331 	 * Our goal is to have the admin choices respected.
332 	 */
333 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
334 			    freq < xe_guc_pc_get_rpe_freq(pc));
335 
336 	return pc_action_set_param(pc,
337 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
338 				   freq);
339 }
340 
341 static int pc_get_max_freq(struct xe_guc_pc *pc)
342 {
343 	u32 freq;
344 
345 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
346 			 slpc_shared_data_read(pc, task_state_data.freq));
347 
348 	return decode_freq(freq);
349 }
350 
351 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
352 {
353 	/*
354 	 * Let's only check for the rpn-rp0 range. If max < min,
355 	 * min becomes a fixed request.
356 	 * Also, overclocking is not supported.
357 	 */
358 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
359 		return -EINVAL;
360 
361 	return pc_action_set_param(pc,
362 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
363 				   freq);
364 }
365 
366 static u32 mtl_get_rpa_freq(struct xe_guc_pc *pc)
367 {
368 	struct xe_gt *gt = pc_to_gt(pc);
369 	u32 reg;
370 
371 	if (xe_gt_is_media_type(gt))
372 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
373 	else
374 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
375 
376 	return decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
377 }
378 
379 static u32 mtl_get_rpe_freq(struct xe_guc_pc *pc)
380 {
381 	struct xe_gt *gt = pc_to_gt(pc);
382 	u32 reg;
383 
384 	if (xe_gt_is_media_type(gt))
385 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
386 	else
387 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
388 
389 	return decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
390 }
391 
392 static u32 pvc_get_rpa_freq(struct xe_guc_pc *pc)
393 {
394 	/*
395 	 * For PVC we still need to use fused RP0 as the approximation for RPa
396 	 * For other platforms than PVC we get the resolved RPa directly from
397 	 * PCODE at a different register
398 	 */
399 
400 	struct xe_gt *gt = pc_to_gt(pc);
401 	u32 reg;
402 
403 	reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
404 	return REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
405 }
406 
407 static u32 tgl_get_rpa_freq(struct xe_guc_pc *pc)
408 {
409 	struct xe_gt *gt = pc_to_gt(pc);
410 	u32 reg;
411 
412 	reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
413 	return REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
414 }
415 
416 static u32 pvc_get_rpe_freq(struct xe_guc_pc *pc)
417 {
418 	struct xe_gt *gt = pc_to_gt(pc);
419 	u32 reg;
420 
421 	/*
422 	 * For PVC we still need to use fused RP1 as the approximation for RPe
423 	 */
424 	reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
425 	return REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
426 }
427 
428 static u32 tgl_get_rpe_freq(struct xe_guc_pc *pc)
429 {
430 	struct xe_gt *gt = pc_to_gt(pc);
431 	u32 reg;
432 
433 	/*
434 	 * For other platforms than PVC, we get the resolved RPe directly from
435 	 * PCODE at a different register
436 	 */
437 	reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
438 	return REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
439 }
440 
441 /**
442  * xe_guc_pc_get_act_freq - Get Actual running frequency
443  * @pc: The GuC PC
444  *
445  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
446  */
447 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
448 {
449 	struct xe_gt *gt = pc_to_gt(pc);
450 	struct xe_device *xe = gt_to_xe(gt);
451 	u32 freq;
452 
453 	/* When in RC6, actual frequency reported will be 0. */
454 	if (GRAPHICS_VERx100(xe) >= 1270) {
455 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
456 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
457 	} else {
458 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
459 		freq = REG_FIELD_GET(CAGF_MASK, freq);
460 	}
461 
462 	freq = decode_freq(freq);
463 
464 	return freq;
465 }
466 
467 static u32 get_cur_freq(struct xe_gt *gt)
468 {
469 	u32 freq;
470 
471 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
472 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
473 	return decode_freq(freq);
474 }
475 
476 /**
477  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
478  * @pc: The GuC PC
479  *
480  * Returns: the requested frequency for that GT instance
481  */
482 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
483 {
484 	struct xe_gt *gt = pc_to_gt(pc);
485 
486 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
487 
488 	return get_cur_freq(gt);
489 }
490 
491 /**
492  * xe_guc_pc_get_cur_freq - Get Current requested frequency
493  * @pc: The GuC PC
494  * @freq: A pointer to a u32 where the freq value will be returned
495  *
496  * Returns: 0 on success,
497  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
498  */
499 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
500 {
501 	struct xe_gt *gt = pc_to_gt(pc);
502 
503 	/*
504 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
505 	 * Block RC6 for a more reliable read.
506 	 */
507 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
508 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
509 		return -ETIMEDOUT;
510 
511 	*freq = get_cur_freq(gt);
512 
513 	return 0;
514 }
515 
516 /**
517  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
518  * @pc: The GuC PC
519  *
520  * Returns: RP0 freq.
521  */
522 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
523 {
524 	return pc->rp0_freq;
525 }
526 
527 /**
528  * xe_guc_pc_get_rpa_freq - Get the RPa freq
529  * @pc: The GuC PC
530  *
531  * Returns: RPa freq.
532  */
533 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
534 {
535 	struct xe_gt *gt = pc_to_gt(pc);
536 	struct xe_device *xe = gt_to_xe(gt);
537 
538 	if (GRAPHICS_VERx100(xe) == 1260)
539 		return pvc_get_rpa_freq(pc);
540 	else if (GRAPHICS_VERx100(xe) >= 1270)
541 		return mtl_get_rpa_freq(pc);
542 	else
543 		return tgl_get_rpa_freq(pc);
544 }
545 
546 /**
547  * xe_guc_pc_get_rpe_freq - Get the RPe freq
548  * @pc: The GuC PC
549  *
550  * Returns: RPe freq.
551  */
552 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
553 {
554 	struct xe_device *xe = pc_to_xe(pc);
555 	u32 freq;
556 
557 	if (GRAPHICS_VERx100(xe) == 1260)
558 		freq = pvc_get_rpe_freq(pc);
559 	else if (GRAPHICS_VERx100(xe) >= 1270)
560 		freq = mtl_get_rpe_freq(pc);
561 	else
562 		freq = tgl_get_rpe_freq(pc);
563 
564 	return freq;
565 }
566 
567 /**
568  * xe_guc_pc_get_rpn_freq - Get the RPn freq
569  * @pc: The GuC PC
570  *
571  * Returns: RPn freq.
572  */
573 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
574 {
575 	return pc->rpn_freq;
576 }
577 
578 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
579 {
580 	int ret;
581 
582 	lockdep_assert_held(&pc->freq_lock);
583 
584 	/* Might be in the middle of a gt reset */
585 	if (!pc->freq_ready)
586 		return -EAGAIN;
587 
588 	ret = pc_action_query_task_state(pc);
589 	if (ret)
590 		return ret;
591 
592 	*freq = pc_get_min_freq(pc);
593 
594 	return 0;
595 }
596 
597 /**
598  * xe_guc_pc_get_min_freq - Get the min operational frequency
599  * @pc: The GuC PC
600  * @freq: A pointer to a u32 where the freq value will be returned
601  *
602  * Returns: 0 on success,
603  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
604  */
605 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
606 {
607 	guard(mutex)(&pc->freq_lock);
608 
609 	return xe_guc_pc_get_min_freq_locked(pc, freq);
610 }
611 
612 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
613 {
614 	int ret;
615 
616 	lockdep_assert_held(&pc->freq_lock);
617 
618 	/* Might be in the middle of a gt reset */
619 	if (!pc->freq_ready)
620 		return -EAGAIN;
621 
622 	ret = pc_set_min_freq(pc, freq);
623 	if (ret)
624 		return ret;
625 
626 	pc->user_requested_min = freq;
627 
628 	return 0;
629 }
630 
631 /**
632  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
633  * @pc: The GuC PC
634  * @freq: The selected minimal frequency
635  *
636  * Returns: 0 on success,
637  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
638  *         -EINVAL if value out of bounds.
639  */
640 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
641 {
642 	guard(mutex)(&pc->freq_lock);
643 
644 	return xe_guc_pc_set_min_freq_locked(pc, freq);
645 }
646 
647 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
648 {
649 	int ret;
650 
651 	lockdep_assert_held(&pc->freq_lock);
652 
653 	/* Might be in the middle of a gt reset */
654 	if (!pc->freq_ready)
655 		return -EAGAIN;
656 
657 	ret = pc_action_query_task_state(pc);
658 	if (ret)
659 		return ret;
660 
661 	*freq = pc_get_max_freq(pc);
662 
663 	return 0;
664 }
665 
666 /**
667  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
668  * @pc: The GuC PC
669  * @freq: A pointer to a u32 where the freq value will be returned
670  *
671  * Returns: 0 on success,
672  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
673  */
674 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
675 {
676 	guard(mutex)(&pc->freq_lock);
677 
678 	return xe_guc_pc_get_max_freq_locked(pc, freq);
679 }
680 
681 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
682 {
683 	int ret;
684 
685 	lockdep_assert_held(&pc->freq_lock);
686 
687 	/* Might be in the middle of a gt reset */
688 	if (!pc->freq_ready)
689 		return -EAGAIN;
690 
691 	ret = pc_set_max_freq(pc, freq);
692 	if (ret)
693 		return ret;
694 
695 	pc->user_requested_max = freq;
696 
697 	return 0;
698 }
699 
700 /**
701  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
702  * @pc: The GuC PC
703  * @freq: The selected maximum frequency value
704  *
705  * Returns: 0 on success,
706  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
707  *         -EINVAL if value out of bounds.
708  */
709 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
710 {
711 	if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
712 		if (wait_for_flush_complete(pc) != 0)
713 			return -EAGAIN;
714 	}
715 
716 	guard(mutex)(&pc->freq_lock);
717 
718 	return xe_guc_pc_set_max_freq_locked(pc, freq);
719 }
720 
721 /**
722  * xe_guc_pc_c_status - get the current GT C state
723  * @pc: XE_GuC_PC instance
724  */
725 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
726 {
727 	struct xe_gt *gt = pc_to_gt(pc);
728 	u32 reg, gt_c_state;
729 
730 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
731 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
732 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
733 	} else {
734 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
735 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
736 	}
737 
738 	switch (gt_c_state) {
739 	case GT_C6:
740 		return GT_IDLE_C6;
741 	case GT_C0:
742 		return GT_IDLE_C0;
743 	default:
744 		return GT_IDLE_UNKNOWN;
745 	}
746 }
747 
748 /**
749  * xe_guc_pc_rc6_residency - rc6 residency counter
750  * @pc: Xe_GuC_PC instance
751  */
752 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
753 {
754 	struct xe_gt *gt = pc_to_gt(pc);
755 	u32 reg;
756 
757 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
758 
759 	return reg;
760 }
761 
762 /**
763  * xe_guc_pc_mc6_residency - mc6 residency counter
764  * @pc: Xe_GuC_PC instance
765  */
766 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
767 {
768 	struct xe_gt *gt = pc_to_gt(pc);
769 	u64 reg;
770 
771 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
772 
773 	return reg;
774 }
775 
776 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
777 {
778 	struct xe_gt *gt = pc_to_gt(pc);
779 	u32 reg;
780 
781 	xe_device_assert_mem_access(pc_to_xe(pc));
782 
783 	if (xe_gt_is_media_type(gt))
784 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
785 	else
786 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
787 
788 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
789 
790 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
791 }
792 
793 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
794 {
795 	struct xe_gt *gt = pc_to_gt(pc);
796 	struct xe_device *xe = gt_to_xe(gt);
797 	u32 reg;
798 
799 	xe_device_assert_mem_access(pc_to_xe(pc));
800 
801 	if (xe->info.platform == XE_PVC)
802 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
803 	else
804 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
805 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
806 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
807 }
808 
809 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
810 {
811 	struct xe_gt *gt = pc_to_gt(pc);
812 	struct xe_device *xe = gt_to_xe(gt);
813 
814 	if (GRAPHICS_VERx100(xe) >= 1270)
815 		mtl_init_fused_rp_values(pc);
816 	else
817 		tgl_init_fused_rp_values(pc);
818 }
819 
820 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
821 {
822 	struct xe_gt *gt = pc_to_gt(pc);
823 
824 	if (XE_GT_WA(gt, 22019338487)) {
825 		if (xe_gt_is_media_type(gt))
826 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
827 		else
828 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
829 	} else {
830 		return pc->rp0_freq;
831 	}
832 }
833 
834 /**
835  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
836  * frequency to allow faster GuC load times
837  * @pc: Xe_GuC_PC instance
838  */
839 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
840 {
841 	struct xe_gt *gt = pc_to_gt(pc);
842 
843 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
844 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
845 }
846 
847 /**
848  * xe_guc_pc_init_early - Initialize RPx values
849  * @pc: Xe_GuC_PC instance
850  */
851 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
852 {
853 	struct xe_gt *gt = pc_to_gt(pc);
854 
855 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
856 	pc_init_fused_rp_values(pc);
857 }
858 
859 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
860 {
861 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
862 	int ret;
863 
864 	lockdep_assert_held(&pc->freq_lock);
865 
866 	ret = pc_action_query_task_state(pc);
867 	if (ret)
868 		goto out;
869 
870 	/*
871 	 * GuC defaults to some RPmax that is not actually achievable without
872 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
873 	 * regular maximum
874 	 */
875 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
876 		ret = pc_set_max_freq(pc, pc->rp0_freq);
877 		if (ret)
878 			goto out;
879 	}
880 
881 	/*
882 	 * Same thing happens for Server platforms where min is listed as
883 	 * RPMax
884 	 */
885 	if (pc_get_min_freq(pc) > pc->rp0_freq)
886 		ret = pc_set_min_freq(pc, pc->rp0_freq);
887 
888 	if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890))
889 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
890 
891 out:
892 	return ret;
893 }
894 
895 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
896 {
897 	int ret = 0;
898 
899 	lockdep_assert_held(&pc->freq_lock);
900 
901 	if (pc->user_requested_min != 0) {
902 		ret = pc_set_min_freq(pc, pc->user_requested_min);
903 		if (ret)
904 			return ret;
905 	}
906 
907 	if (pc->user_requested_max != 0) {
908 		ret = pc_set_max_freq(pc, pc->user_requested_max);
909 		if (ret)
910 			return ret;
911 	}
912 
913 	return ret;
914 }
915 
916 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
917 {
918 	struct xe_gt *gt = pc_to_gt(pc);
919 
920 	return  XE_GT_WA(gt, 22019338487) &&
921 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
922 }
923 
924 /**
925  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
926  * @pc: the xe_guc_pc object
927  *
928  * As per the WA, reduce max GT frequency during L2 cache flush
929  */
930 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
931 {
932 	struct xe_gt *gt = pc_to_gt(pc);
933 	u32 max_freq;
934 	int ret;
935 
936 	if (!needs_flush_freq_limit(pc))
937 		return;
938 
939 	guard(mutex)(&pc->freq_lock);
940 
941 	ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
942 	if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
943 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
944 		if (ret) {
945 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
946 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
947 			return;
948 		}
949 
950 		atomic_set(&pc->flush_freq_limit, 1);
951 
952 		/*
953 		 * If user has previously changed max freq, stash that value to
954 		 * restore later, otherwise use the current max. New user
955 		 * requests wait on flush.
956 		 */
957 		if (pc->user_requested_max != 0)
958 			pc->stashed_max_freq = pc->user_requested_max;
959 		else
960 			pc->stashed_max_freq = max_freq;
961 	}
962 
963 	/*
964 	 * Wait for actual freq to go below the flush cap: even if the previous
965 	 * max was below cap, the current one might still be above it
966 	 */
967 	ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
968 	if (ret)
969 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
970 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
971 }
972 
973 /**
974  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
975  * @pc: the xe_guc_pc object
976  *
977  * Retrieve the previous GT max frequency value.
978  */
979 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
980 {
981 	struct xe_gt *gt = pc_to_gt(pc);
982 	int ret = 0;
983 
984 	if (!needs_flush_freq_limit(pc))
985 		return;
986 
987 	if (!atomic_read(&pc->flush_freq_limit))
988 		return;
989 
990 	mutex_lock(&pc->freq_lock);
991 
992 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
993 	if (ret)
994 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
995 			       pc->stashed_max_freq, ret);
996 
997 	atomic_set(&pc->flush_freq_limit, 0);
998 	mutex_unlock(&pc->freq_lock);
999 	wake_up_var(&pc->flush_freq_limit);
1000 }
1001 
1002 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
1003 {
1004 	int ret;
1005 
1006 	if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
1007 		return 0;
1008 
1009 	guard(mutex)(&pc->freq_lock);
1010 
1011 	/*
1012 	 * Get updated min/max and stash them.
1013 	 */
1014 	ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
1015 	if (!ret)
1016 		ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
1017 	if (ret)
1018 		return ret;
1019 
1020 	/*
1021 	 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
1022 	 */
1023 	ret = pc_set_min_freq(pc, min(xe_guc_pc_get_rpe_freq(pc), pc_max_freq_cap(pc)));
1024 	if (!ret)
1025 		ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
1026 
1027 	return ret;
1028 }
1029 
1030 /**
1031  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
1032  * @pc: The GuC PC
1033  *
1034  * Returns: 0 on success,
1035  *          error code on failure
1036  */
1037 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
1038 {
1039 	int ret = 0;
1040 
1041 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
1042 		return 0;
1043 
1044 	mutex_lock(&pc->freq_lock);
1045 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
1046 	if (!ret)
1047 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
1048 	mutex_unlock(&pc->freq_lock);
1049 
1050 	return ret;
1051 }
1052 
1053 /**
1054  * xe_guc_pc_gucrc_disable - Disable GuC RC
1055  * @pc: Xe_GuC_PC instance
1056  *
1057  * Disables GuC RC by taking control of RC6 back from GuC.
1058  *
1059  * Return: 0 on success, negative error code on error.
1060  */
1061 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
1062 {
1063 	struct xe_device *xe = pc_to_xe(pc);
1064 	struct xe_gt *gt = pc_to_gt(pc);
1065 	int ret = 0;
1066 
1067 	if (xe->info.skip_guc_pc)
1068 		return 0;
1069 
1070 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
1071 	if (ret)
1072 		return ret;
1073 
1074 	return xe_gt_idle_disable_c6(gt);
1075 }
1076 
1077 /**
1078  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
1079  * @pc: Xe_GuC_PC instance
1080  * @mode: new value of the mode.
1081  *
1082  * Return: 0 on success, negative error code on error
1083  */
1084 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
1085 {
1086 	guard(xe_pm_runtime)(pc_to_xe(pc));
1087 	return pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
1088 }
1089 
1090 /**
1091  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
1092  * @pc: Xe_GuC_PC instance
1093  *
1094  * Return: 0 on success, negative error code on error
1095  */
1096 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
1097 {
1098 	guard(xe_pm_runtime)(pc_to_xe(pc));
1099 	return pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
1100 }
1101 
1102 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1103 {
1104 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1105 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1106 
1107 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1108 }
1109 
1110 static int pc_init_freqs(struct xe_guc_pc *pc)
1111 {
1112 	int ret;
1113 
1114 	mutex_lock(&pc->freq_lock);
1115 
1116 	ret = pc_adjust_freq_bounds(pc);
1117 	if (ret)
1118 		goto out;
1119 
1120 	ret = pc_adjust_requested_freq(pc);
1121 	if (ret)
1122 		goto out;
1123 
1124 	pc_init_pcode_freq(pc);
1125 
1126 	/*
1127 	 * The frequencies are really ready for use only after the user
1128 	 * requested ones got restored.
1129 	 */
1130 	pc->freq_ready = true;
1131 
1132 out:
1133 	mutex_unlock(&pc->freq_lock);
1134 	return ret;
1135 }
1136 
1137 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1138 {
1139 	int ret = 0;
1140 
1141 	ret = pc_action_set_param(pc,
1142 				  SLPC_PARAM_STRATEGIES,
1143 				  val);
1144 
1145 	return ret;
1146 }
1147 
1148 static const char *power_profile_to_string(struct xe_guc_pc *pc)
1149 {
1150 	switch (pc->power_profile) {
1151 	case SLPC_POWER_PROFILE_BASE:
1152 		return "base";
1153 	case SLPC_POWER_PROFILE_POWER_SAVING:
1154 		return "power_saving";
1155 	default:
1156 		return "invalid";
1157 	}
1158 }
1159 
1160 void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
1161 {
1162 	switch (pc->power_profile) {
1163 	case SLPC_POWER_PROFILE_BASE:
1164 		sprintf(profile, "[%s]    %s\n", "base", "power_saving");
1165 		break;
1166 	case SLPC_POWER_PROFILE_POWER_SAVING:
1167 		sprintf(profile, "%s    [%s]\n", "base", "power_saving");
1168 		break;
1169 	default:
1170 		sprintf(profile, "invalid");
1171 	}
1172 }
1173 
1174 int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
1175 {
1176 	int ret = 0;
1177 	u32 val;
1178 
1179 	if (strncmp("base", buf, strlen("base")) == 0)
1180 		val = SLPC_POWER_PROFILE_BASE;
1181 	else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
1182 		val = SLPC_POWER_PROFILE_POWER_SAVING;
1183 	else
1184 		return -EINVAL;
1185 
1186 	guard(mutex)(&pc->freq_lock);
1187 	guard(xe_pm_runtime_noresume)(pc_to_xe(pc));
1188 
1189 	ret = pc_action_set_param(pc,
1190 				  SLPC_PARAM_POWER_PROFILE,
1191 				  val);
1192 	if (ret)
1193 		xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
1194 			       val, ERR_PTR(ret));
1195 	else
1196 		pc->power_profile = val;
1197 
1198 	return ret;
1199 }
1200 
1201 static int pc_action_set_dcc(struct xe_guc_pc *pc, bool enable)
1202 {
1203 	int ret;
1204 
1205 	ret = pc_action_set_param(pc,
1206 				  SLPC_PARAM_TASK_ENABLE_DCC,
1207 				  enable);
1208 	if (!ret)
1209 		return pc_action_set_param(pc,
1210 					   SLPC_PARAM_TASK_DISABLE_DCC,
1211 					   !enable);
1212 	else
1213 		return ret;
1214 }
1215 
1216 static int pc_modify_defaults(struct xe_guc_pc *pc)
1217 {
1218 	struct xe_device *xe = pc_to_xe(pc);
1219 	struct xe_gt *gt = pc_to_gt(pc);
1220 	int ret = 0;
1221 
1222 	if (xe->info.platform == XE_PANTHERLAKE) {
1223 		ret = pc_action_set_dcc(pc, false);
1224 		if (unlikely(ret))
1225 			xe_gt_err(gt, "Failed to modify DCC default: %pe\n", ERR_PTR(ret));
1226 	}
1227 
1228 	return ret;
1229 }
1230 
1231 /**
1232  * xe_guc_pc_start - Start GuC's Power Conservation component
1233  * @pc: Xe_GuC_PC instance
1234  */
1235 int xe_guc_pc_start(struct xe_guc_pc *pc)
1236 {
1237 	struct xe_device *xe = pc_to_xe(pc);
1238 	struct xe_gt *gt = pc_to_gt(pc);
1239 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1240 	ktime_t earlier;
1241 	int ret;
1242 
1243 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1244 
1245 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1246 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
1247 		return -ETIMEDOUT;
1248 
1249 	if (xe->info.skip_guc_pc) {
1250 		if (xe->info.platform != XE_PVC)
1251 			xe_gt_idle_enable_c6(gt);
1252 
1253 		/* Request max possible since dynamic freq mgmt is not enabled */
1254 		pc_set_cur_freq(pc, UINT_MAX);
1255 		return 0;
1256 	}
1257 
1258 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1259 	slpc_shared_data_write(pc, header.size, size);
1260 
1261 	earlier = ktime_get();
1262 	ret = pc_action_reset(pc);
1263 	if (ret)
1264 		return ret;
1265 
1266 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1267 			      SLPC_RESET_TIMEOUT_MS)) {
1268 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1269 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1270 			   xe_gt_throttle_get_limit_reasons(gt));
1271 
1272 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1273 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1274 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1275 			return -EIO;
1276 		}
1277 
1278 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1279 			   ktime_ms_delta(ktime_get(), earlier));
1280 	}
1281 
1282 	ret = pc_modify_defaults(pc);
1283 	if (ret)
1284 		return ret;
1285 
1286 	ret = pc_init_freqs(pc);
1287 	if (ret)
1288 		return ret;
1289 
1290 	ret = pc_set_mert_freq_cap(pc);
1291 	if (ret)
1292 		return ret;
1293 
1294 	if (xe->info.platform == XE_PVC) {
1295 		xe_guc_pc_gucrc_disable(pc);
1296 		return 0;
1297 	}
1298 
1299 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1300 	if (ret)
1301 		return ret;
1302 
1303 	/* Enable SLPC Optimized Strategy for compute */
1304 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1305 
1306 	/* Set cached value of power_profile */
1307 	ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
1308 	if (unlikely(ret))
1309 		xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
1310 
1311 	return ret;
1312 }
1313 
1314 /**
1315  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1316  * @pc: Xe_GuC_PC instance
1317  */
1318 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1319 {
1320 	struct xe_device *xe = pc_to_xe(pc);
1321 
1322 	if (xe->info.skip_guc_pc) {
1323 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1324 		return 0;
1325 	}
1326 
1327 	mutex_lock(&pc->freq_lock);
1328 	pc->freq_ready = false;
1329 	mutex_unlock(&pc->freq_lock);
1330 
1331 	return 0;
1332 }
1333 
1334 /**
1335  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1336  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1337  */
1338 static void xe_guc_pc_fini_hw(void *arg)
1339 {
1340 	struct xe_guc_pc *pc = arg;
1341 	struct xe_device *xe = pc_to_xe(pc);
1342 
1343 	if (xe_device_wedged(xe))
1344 		return;
1345 
1346 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1347 	xe_guc_pc_gucrc_disable(pc);
1348 	XE_WARN_ON(xe_guc_pc_stop(pc));
1349 
1350 	/* Bind requested freq to mert_freq_cap before unload */
1351 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), xe_guc_pc_get_rpe_freq(pc)));
1352 }
1353 
1354 /**
1355  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1356  * @pc: Xe_GuC_PC instance
1357  */
1358 int xe_guc_pc_init(struct xe_guc_pc *pc)
1359 {
1360 	struct xe_gt *gt = pc_to_gt(pc);
1361 	struct xe_tile *tile = gt_to_tile(gt);
1362 	struct xe_device *xe = gt_to_xe(gt);
1363 	struct xe_bo *bo;
1364 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1365 	int err;
1366 
1367 	if (xe->info.skip_guc_pc)
1368 		return 0;
1369 
1370 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1371 	if (err)
1372 		return err;
1373 
1374 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1375 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1376 					  XE_BO_FLAG_GGTT |
1377 					  XE_BO_FLAG_GGTT_INVALIDATE |
1378 					  XE_BO_FLAG_PINNED_NORESTORE);
1379 	if (IS_ERR(bo))
1380 		return PTR_ERR(bo);
1381 
1382 	pc->bo = bo;
1383 
1384 	pc->power_profile = SLPC_POWER_PROFILE_BASE;
1385 
1386 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1387 }
1388 
1389 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1390 {
1391 	switch (slpc_shared_data_read(pc, header.global_state)) {
1392 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1393 		return "not running";
1394 	case SLPC_GLOBAL_STATE_INITIALIZING:
1395 		return "initializing";
1396 	case SLPC_GLOBAL_STATE_RESETTING:
1397 		return "resetting";
1398 	case SLPC_GLOBAL_STATE_RUNNING:
1399 		return "running";
1400 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1401 		return "shutting down";
1402 	case SLPC_GLOBAL_STATE_ERROR:
1403 		return "error";
1404 	default:
1405 		return "unknown";
1406 	}
1407 }
1408 
1409 /**
1410  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1411  * @pc: Xe_GuC_PC instance
1412  * @p: drm_printer
1413  */
1414 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1415 {
1416 	drm_printf(p, "SLPC Shared Data Header:\n");
1417 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1418 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1419 
1420 	if (pc_action_query_task_state(pc))
1421 		return;
1422 
1423 	drm_printf(p, "\nSLPC Tasks Status:\n");
1424 	drm_printf(p, "\tGTPERF enabled: %s\n",
1425 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1426 			      SLPC_GTPERF_TASK_ENABLED));
1427 	drm_printf(p, "\tDCC enabled: %s\n",
1428 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1429 			      SLPC_DCC_TASK_ENABLED));
1430 	drm_printf(p, "\tDCC in use: %s\n",
1431 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1432 			      SLPC_IN_DCC));
1433 	drm_printf(p, "\tBalancer enabled: %s\n",
1434 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1435 			      SLPC_BALANCER_ENABLED));
1436 	drm_printf(p, "\tIBC enabled: %s\n",
1437 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1438 			      SLPC_IBC_TASK_ENABLED));
1439 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1440 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1441 			      SLPC_BALANCER_IA_LMT_ENABLED));
1442 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1443 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1444 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1445 }
1446