xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision e3610441d1fb47b1f00e4c38bdf333176e824729)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 #include <generated/xe_wa_oob.h>
12 
13 #include "abi/guc_actions_slpc_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_regs.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_force_wake.h"
19 #include "xe_gt.h"
20 #include "xe_gt_idle.h"
21 #include "xe_gt_printk.h"
22 #include "xe_gt_types.h"
23 #include "xe_guc.h"
24 #include "xe_guc_ct.h"
25 #include "xe_map.h"
26 #include "xe_mmio.h"
27 #include "xe_pcode.h"
28 #include "xe_pm.h"
29 #include "xe_sriov.h"
30 #include "xe_wa.h"
31 
32 #define MCHBAR_MIRROR_BASE_SNB	0x140000
33 
34 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
35 #define   RP0_MASK		REG_GENMASK(7, 0)
36 #define   RP1_MASK		REG_GENMASK(15, 8)
37 #define   RPN_MASK		REG_GENMASK(23, 16)
38 
39 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
40 #define   RPE_MASK		REG_GENMASK(15, 8)
41 #define   RPA_MASK		REG_GENMASK(31, 16)
42 
43 #define GT_PERF_STATUS		XE_REG(0x1381b4)
44 #define   CAGF_MASK	REG_GENMASK(19, 11)
45 
46 #define GT_FREQUENCY_MULTIPLIER	50
47 #define GT_FREQUENCY_SCALER	3
48 
49 #define LNL_MERT_FREQ_CAP	800
50 #define BMG_MERT_FREQ_CAP	2133
51 
52 /**
53  * DOC: GuC Power Conservation (PC)
54  *
55  * GuC Power Conservation (PC) supports multiple features for the most
56  * efficient and performing use of the GT when GuC submission is enabled,
57  * including frequency management, Render-C states management, and various
58  * algorithms for power balancing.
59  *
60  * Single Loop Power Conservation (SLPC) is the name given to the suite of
61  * connected power conservation features in the GuC firmware. The firmware
62  * exposes a programming interface to the host for the control of SLPC.
63  *
64  * Frequency management:
65  * =====================
66  *
67  * Xe driver enables SLPC with all of its defaults features and frequency
68  * selection, which varies per platform.
69  *
70  * Render-C States:
71  * ================
72  *
73  * Render-C states is also a GuC PC feature that is now enabled in Xe for
74  * all platforms.
75  *
76  */
77 
78 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
79 {
80 	return container_of(pc, struct xe_guc, pc);
81 }
82 
83 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
84 {
85 	return &pc_to_guc(pc)->ct;
86 }
87 
88 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
89 {
90 	return guc_to_gt(pc_to_guc(pc));
91 }
92 
93 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
94 {
95 	return guc_to_xe(pc_to_guc(pc));
96 }
97 
98 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
99 {
100 	return &pc->bo->vmap;
101 }
102 
103 #define slpc_shared_data_read(pc_, field_) \
104 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
105 			struct slpc_shared_data, field_)
106 
107 #define slpc_shared_data_write(pc_, field_, val_) \
108 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
109 			struct slpc_shared_data, field_, val_)
110 
111 #define SLPC_EVENT(id, count) \
112 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
113 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
114 
115 static int wait_for_pc_state(struct xe_guc_pc *pc,
116 			     enum slpc_global_state state)
117 {
118 	int timeout_us = 5000; /* rought 5ms, but no need for precision */
119 	int slept, wait = 10;
120 
121 	xe_device_assert_mem_access(pc_to_xe(pc));
122 
123 	for (slept = 0; slept < timeout_us;) {
124 		if (slpc_shared_data_read(pc, header.global_state) == state)
125 			return 0;
126 
127 		usleep_range(wait, wait << 1);
128 		slept += wait;
129 		wait <<= 1;
130 		if (slept + wait > timeout_us)
131 			wait = timeout_us - slept;
132 	}
133 
134 	return -ETIMEDOUT;
135 }
136 
137 static int pc_action_reset(struct xe_guc_pc *pc)
138 {
139 	struct xe_guc_ct *ct = pc_to_ct(pc);
140 	u32 action[] = {
141 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
142 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
143 		xe_bo_ggtt_addr(pc->bo),
144 		0,
145 	};
146 	int ret;
147 
148 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
149 	if (ret)
150 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
151 			  ERR_PTR(ret));
152 
153 	return ret;
154 }
155 
156 static int pc_action_query_task_state(struct xe_guc_pc *pc)
157 {
158 	struct xe_guc_ct *ct = pc_to_ct(pc);
159 	u32 action[] = {
160 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
161 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
162 		xe_bo_ggtt_addr(pc->bo),
163 		0,
164 	};
165 	int ret;
166 
167 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
168 		return -EAGAIN;
169 
170 	/* Blocking here to ensure the results are ready before reading them */
171 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
172 	if (ret)
173 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
174 			  ERR_PTR(ret));
175 
176 	return ret;
177 }
178 
179 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
180 {
181 	struct xe_guc_ct *ct = pc_to_ct(pc);
182 	u32 action[] = {
183 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
184 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
185 		id,
186 		value,
187 	};
188 	int ret;
189 
190 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
191 		return -EAGAIN;
192 
193 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
194 	if (ret)
195 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
196 			  id, value, ERR_PTR(ret));
197 
198 	return ret;
199 }
200 
201 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
202 {
203 	u32 action[] = {
204 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
205 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
206 		id,
207 	};
208 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
209 	int ret;
210 
211 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
212 		return -EAGAIN;
213 
214 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
215 	if (ret)
216 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
217 			  ERR_PTR(ret));
218 
219 	return ret;
220 }
221 
222 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
223 {
224 	struct xe_guc_ct *ct = pc_to_ct(pc);
225 	u32 action[] = {
226 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
227 		mode,
228 	};
229 	int ret;
230 
231 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
232 	if (ret)
233 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
234 			  mode, ERR_PTR(ret));
235 	return ret;
236 }
237 
238 static u32 decode_freq(u32 raw)
239 {
240 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
241 				 GT_FREQUENCY_SCALER);
242 }
243 
244 static u32 encode_freq(u32 freq)
245 {
246 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
247 				 GT_FREQUENCY_MULTIPLIER);
248 }
249 
250 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
251 {
252 	u32 freq;
253 
254 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
255 			 slpc_shared_data_read(pc, task_state_data.freq));
256 
257 	return decode_freq(freq);
258 }
259 
260 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
261 {
262 	struct xe_gt *gt = pc_to_gt(pc);
263 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
264 
265 	/* Allow/Disallow punit to process software freq requests */
266 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
267 }
268 
269 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
270 {
271 	struct xe_gt *gt = pc_to_gt(pc);
272 	u32 rpnswreq;
273 
274 	pc_set_manual_rp_ctrl(pc, true);
275 
276 	/* Req freq is in units of 16.66 Mhz */
277 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
278 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
279 
280 	/* Sleep for a small time to allow pcode to respond */
281 	usleep_range(100, 300);
282 
283 	pc_set_manual_rp_ctrl(pc, false);
284 }
285 
286 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
287 {
288 	/*
289 	 * Let's only check for the rpn-rp0 range. If max < min,
290 	 * min becomes a fixed request.
291 	 */
292 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
293 		return -EINVAL;
294 
295 	/*
296 	 * GuC policy is to elevate minimum frequency to the efficient levels
297 	 * Our goal is to have the admin choices respected.
298 	 */
299 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
300 			    freq < pc->rpe_freq);
301 
302 	return pc_action_set_param(pc,
303 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
304 				   freq);
305 }
306 
307 static int pc_get_max_freq(struct xe_guc_pc *pc)
308 {
309 	u32 freq;
310 
311 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
312 			 slpc_shared_data_read(pc, task_state_data.freq));
313 
314 	return decode_freq(freq);
315 }
316 
317 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
318 {
319 	/*
320 	 * Let's only check for the rpn-rp0 range. If max < min,
321 	 * min becomes a fixed request.
322 	 * Also, overclocking is not supported.
323 	 */
324 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
325 		return -EINVAL;
326 
327 	return pc_action_set_param(pc,
328 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
329 				   freq);
330 }
331 
332 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
333 {
334 	struct xe_gt *gt = pc_to_gt(pc);
335 	u32 reg;
336 
337 	if (xe_gt_is_media_type(gt))
338 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
339 	else
340 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
341 
342 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
343 }
344 
345 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
346 {
347 	struct xe_gt *gt = pc_to_gt(pc);
348 	u32 reg;
349 
350 	if (xe_gt_is_media_type(gt))
351 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
352 	else
353 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
354 
355 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
356 }
357 
358 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
359 {
360 	struct xe_gt *gt = pc_to_gt(pc);
361 	struct xe_device *xe = gt_to_xe(gt);
362 	u32 reg;
363 
364 	/*
365 	 * For PVC we still need to use fused RP1 as the approximation for RPe
366 	 * For other platforms than PVC we get the resolved RPe directly from
367 	 * PCODE at a different register
368 	 */
369 	if (xe->info.platform == XE_PVC)
370 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
371 	else
372 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
373 
374 	pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
375 }
376 
377 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
378 {
379 	struct xe_gt *gt = pc_to_gt(pc);
380 	struct xe_device *xe = gt_to_xe(gt);
381 	u32 reg;
382 
383 	/*
384 	 * For PVC we still need to use fused RP1 as the approximation for RPe
385 	 * For other platforms than PVC we get the resolved RPe directly from
386 	 * PCODE at a different register
387 	 */
388 	if (xe->info.platform == XE_PVC)
389 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
390 	else
391 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
392 
393 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
394 }
395 
396 static void pc_update_rp_values(struct xe_guc_pc *pc)
397 {
398 	struct xe_gt *gt = pc_to_gt(pc);
399 	struct xe_device *xe = gt_to_xe(gt);
400 
401 	if (GRAPHICS_VERx100(xe) >= 1270) {
402 		mtl_update_rpa_value(pc);
403 		mtl_update_rpe_value(pc);
404 	} else {
405 		tgl_update_rpa_value(pc);
406 		tgl_update_rpe_value(pc);
407 	}
408 
409 	/*
410 	 * RPe is decided at runtime by PCODE. In the rare case where that's
411 	 * smaller than the fused min, we will trust the PCODE and use that
412 	 * as our minimum one.
413 	 */
414 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
415 }
416 
417 /**
418  * xe_guc_pc_get_act_freq - Get Actual running frequency
419  * @pc: The GuC PC
420  *
421  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
422  */
423 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
424 {
425 	struct xe_gt *gt = pc_to_gt(pc);
426 	struct xe_device *xe = gt_to_xe(gt);
427 	u32 freq;
428 
429 	/* When in RC6, actual frequency reported will be 0. */
430 	if (GRAPHICS_VERx100(xe) >= 1270) {
431 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
432 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
433 	} else {
434 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
435 		freq = REG_FIELD_GET(CAGF_MASK, freq);
436 	}
437 
438 	freq = decode_freq(freq);
439 
440 	return freq;
441 }
442 
443 /**
444  * xe_guc_pc_get_cur_freq - Get Current requested frequency
445  * @pc: The GuC PC
446  * @freq: A pointer to a u32 where the freq value will be returned
447  *
448  * Returns: 0 on success,
449  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
450  */
451 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
452 {
453 	struct xe_gt *gt = pc_to_gt(pc);
454 	unsigned int fw_ref;
455 
456 	/*
457 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
458 	 * Block RC6 for a more reliable read.
459 	 */
460 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
461 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
462 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
463 		return -ETIMEDOUT;
464 	}
465 
466 	*freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
467 
468 	*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
469 	*freq = decode_freq(*freq);
470 
471 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
472 	return 0;
473 }
474 
475 /**
476  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
477  * @pc: The GuC PC
478  *
479  * Returns: RP0 freq.
480  */
481 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
482 {
483 	return pc->rp0_freq;
484 }
485 
486 /**
487  * xe_guc_pc_get_rpa_freq - Get the RPa freq
488  * @pc: The GuC PC
489  *
490  * Returns: RPa freq.
491  */
492 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
493 {
494 	pc_update_rp_values(pc);
495 
496 	return pc->rpa_freq;
497 }
498 
499 /**
500  * xe_guc_pc_get_rpe_freq - Get the RPe freq
501  * @pc: The GuC PC
502  *
503  * Returns: RPe freq.
504  */
505 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
506 {
507 	pc_update_rp_values(pc);
508 
509 	return pc->rpe_freq;
510 }
511 
512 /**
513  * xe_guc_pc_get_rpn_freq - Get the RPn freq
514  * @pc: The GuC PC
515  *
516  * Returns: RPn freq.
517  */
518 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
519 {
520 	return pc->rpn_freq;
521 }
522 
523 /**
524  * xe_guc_pc_get_min_freq - Get the min operational frequency
525  * @pc: The GuC PC
526  * @freq: A pointer to a u32 where the freq value will be returned
527  *
528  * Returns: 0 on success,
529  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
530  */
531 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
532 {
533 	int ret;
534 
535 	xe_device_assert_mem_access(pc_to_xe(pc));
536 
537 	mutex_lock(&pc->freq_lock);
538 	if (!pc->freq_ready) {
539 		/* Might be in the middle of a gt reset */
540 		ret = -EAGAIN;
541 		goto out;
542 	}
543 
544 	ret = pc_action_query_task_state(pc);
545 	if (ret)
546 		goto out;
547 
548 	*freq = pc_get_min_freq(pc);
549 
550 out:
551 	mutex_unlock(&pc->freq_lock);
552 	return ret;
553 }
554 
555 /**
556  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
557  * @pc: The GuC PC
558  * @freq: The selected minimal frequency
559  *
560  * Returns: 0 on success,
561  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
562  *         -EINVAL if value out of bounds.
563  */
564 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
565 {
566 	int ret;
567 
568 	mutex_lock(&pc->freq_lock);
569 	if (!pc->freq_ready) {
570 		/* Might be in the middle of a gt reset */
571 		ret = -EAGAIN;
572 		goto out;
573 	}
574 
575 	ret = pc_set_min_freq(pc, freq);
576 	if (ret)
577 		goto out;
578 
579 	pc->user_requested_min = freq;
580 
581 out:
582 	mutex_unlock(&pc->freq_lock);
583 	return ret;
584 }
585 
586 /**
587  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
588  * @pc: The GuC PC
589  * @freq: A pointer to a u32 where the freq value will be returned
590  *
591  * Returns: 0 on success,
592  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
593  */
594 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
595 {
596 	int ret;
597 
598 	mutex_lock(&pc->freq_lock);
599 	if (!pc->freq_ready) {
600 		/* Might be in the middle of a gt reset */
601 		ret = -EAGAIN;
602 		goto out;
603 	}
604 
605 	ret = pc_action_query_task_state(pc);
606 	if (ret)
607 		goto out;
608 
609 	*freq = pc_get_max_freq(pc);
610 
611 out:
612 	mutex_unlock(&pc->freq_lock);
613 	return ret;
614 }
615 
616 /**
617  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
618  * @pc: The GuC PC
619  * @freq: The selected maximum frequency value
620  *
621  * Returns: 0 on success,
622  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
623  *         -EINVAL if value out of bounds.
624  */
625 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
626 {
627 	int ret;
628 
629 	mutex_lock(&pc->freq_lock);
630 	if (!pc->freq_ready) {
631 		/* Might be in the middle of a gt reset */
632 		ret = -EAGAIN;
633 		goto out;
634 	}
635 
636 	ret = pc_set_max_freq(pc, freq);
637 	if (ret)
638 		goto out;
639 
640 	pc->user_requested_max = freq;
641 
642 out:
643 	mutex_unlock(&pc->freq_lock);
644 	return ret;
645 }
646 
647 /**
648  * xe_guc_pc_c_status - get the current GT C state
649  * @pc: XE_GuC_PC instance
650  */
651 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
652 {
653 	struct xe_gt *gt = pc_to_gt(pc);
654 	u32 reg, gt_c_state;
655 
656 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
657 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
658 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
659 	} else {
660 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
661 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
662 	}
663 
664 	switch (gt_c_state) {
665 	case GT_C6:
666 		return GT_IDLE_C6;
667 	case GT_C0:
668 		return GT_IDLE_C0;
669 	default:
670 		return GT_IDLE_UNKNOWN;
671 	}
672 }
673 
674 /**
675  * xe_guc_pc_rc6_residency - rc6 residency counter
676  * @pc: Xe_GuC_PC instance
677  */
678 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
679 {
680 	struct xe_gt *gt = pc_to_gt(pc);
681 	u32 reg;
682 
683 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
684 
685 	return reg;
686 }
687 
688 /**
689  * xe_guc_pc_mc6_residency - mc6 residency counter
690  * @pc: Xe_GuC_PC instance
691  */
692 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
693 {
694 	struct xe_gt *gt = pc_to_gt(pc);
695 	u64 reg;
696 
697 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
698 
699 	return reg;
700 }
701 
702 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
703 {
704 	struct xe_gt *gt = pc_to_gt(pc);
705 	u32 reg;
706 
707 	xe_device_assert_mem_access(pc_to_xe(pc));
708 
709 	if (xe_gt_is_media_type(gt))
710 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
711 	else
712 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
713 
714 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
715 
716 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
717 }
718 
719 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
720 {
721 	struct xe_gt *gt = pc_to_gt(pc);
722 	struct xe_device *xe = gt_to_xe(gt);
723 	u32 reg;
724 
725 	xe_device_assert_mem_access(pc_to_xe(pc));
726 
727 	if (xe->info.platform == XE_PVC)
728 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
729 	else
730 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
731 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
732 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
733 }
734 
735 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
736 {
737 	struct xe_gt *gt = pc_to_gt(pc);
738 	struct xe_device *xe = gt_to_xe(gt);
739 
740 	if (GRAPHICS_VERx100(xe) >= 1270)
741 		mtl_init_fused_rp_values(pc);
742 	else
743 		tgl_init_fused_rp_values(pc);
744 }
745 
746 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
747 {
748 	struct xe_gt *gt = pc_to_gt(pc);
749 
750 	if (XE_WA(gt, 22019338487)) {
751 		if (xe_gt_is_media_type(gt))
752 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
753 		else
754 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
755 	} else {
756 		return pc->rp0_freq;
757 	}
758 }
759 
760 /**
761  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
762  * frequency to allow faster GuC load times
763  * @pc: Xe_GuC_PC instance
764  */
765 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
766 {
767 	struct xe_gt *gt = pc_to_gt(pc);
768 
769 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
770 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
771 }
772 
773 /**
774  * xe_guc_pc_init_early - Initialize RPx values
775  * @pc: Xe_GuC_PC instance
776  */
777 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
778 {
779 	struct xe_gt *gt = pc_to_gt(pc);
780 
781 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
782 	pc_init_fused_rp_values(pc);
783 }
784 
785 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
786 {
787 	int ret;
788 
789 	lockdep_assert_held(&pc->freq_lock);
790 
791 	ret = pc_action_query_task_state(pc);
792 	if (ret)
793 		goto out;
794 
795 	/*
796 	 * GuC defaults to some RPmax that is not actually achievable without
797 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
798 	 * regular maximum
799 	 */
800 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
801 		ret = pc_set_max_freq(pc, pc->rp0_freq);
802 		if (ret)
803 			goto out;
804 	}
805 
806 	/*
807 	 * Same thing happens for Server platforms where min is listed as
808 	 * RPMax
809 	 */
810 	if (pc_get_min_freq(pc) > pc->rp0_freq)
811 		ret = pc_set_min_freq(pc, pc->rp0_freq);
812 
813 out:
814 	return ret;
815 }
816 
817 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
818 {
819 	int ret = 0;
820 
821 	lockdep_assert_held(&pc->freq_lock);
822 
823 	if (pc->user_requested_min != 0) {
824 		ret = pc_set_min_freq(pc, pc->user_requested_min);
825 		if (ret)
826 			return ret;
827 	}
828 
829 	if (pc->user_requested_max != 0) {
830 		ret = pc_set_max_freq(pc, pc->user_requested_max);
831 		if (ret)
832 			return ret;
833 	}
834 
835 	return ret;
836 }
837 
838 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
839 {
840 	int ret = 0;
841 
842 	if (XE_WA(pc_to_gt(pc), 22019338487)) {
843 		/*
844 		 * Get updated min/max and stash them.
845 		 */
846 		ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
847 		if (!ret)
848 			ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
849 		if (ret)
850 			return ret;
851 
852 		/*
853 		 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
854 		 */
855 		mutex_lock(&pc->freq_lock);
856 		ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
857 		if (!ret)
858 			ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
859 		mutex_unlock(&pc->freq_lock);
860 	}
861 
862 	return ret;
863 }
864 
865 /**
866  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
867  * @pc: The GuC PC
868  *
869  * Returns: 0 on success,
870  *          error code on failure
871  */
872 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
873 {
874 	int ret = 0;
875 
876 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
877 		return 0;
878 
879 	mutex_lock(&pc->freq_lock);
880 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
881 	if (!ret)
882 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
883 	mutex_unlock(&pc->freq_lock);
884 
885 	return ret;
886 }
887 
888 /**
889  * xe_guc_pc_gucrc_disable - Disable GuC RC
890  * @pc: Xe_GuC_PC instance
891  *
892  * Disables GuC RC by taking control of RC6 back from GuC.
893  *
894  * Return: 0 on success, negative error code on error.
895  */
896 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
897 {
898 	struct xe_device *xe = pc_to_xe(pc);
899 	struct xe_gt *gt = pc_to_gt(pc);
900 	unsigned int fw_ref;
901 	int ret = 0;
902 
903 	if (xe->info.skip_guc_pc)
904 		return 0;
905 
906 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
907 	if (ret)
908 		return ret;
909 
910 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
911 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
912 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
913 		return -ETIMEDOUT;
914 	}
915 
916 	xe_gt_idle_disable_c6(gt);
917 
918 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
919 
920 	return 0;
921 }
922 
923 /**
924  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
925  * @pc: Xe_GuC_PC instance
926  * @mode: new value of the mode.
927  *
928  * Return: 0 on success, negative error code on error
929  */
930 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
931 {
932 	int ret;
933 
934 	xe_pm_runtime_get(pc_to_xe(pc));
935 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
936 	xe_pm_runtime_put(pc_to_xe(pc));
937 
938 	return ret;
939 }
940 
941 /**
942  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
943  * @pc: Xe_GuC_PC instance
944  *
945  * Return: 0 on success, negative error code on error
946  */
947 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
948 {
949 	int ret;
950 
951 	xe_pm_runtime_get(pc_to_xe(pc));
952 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
953 	xe_pm_runtime_put(pc_to_xe(pc));
954 
955 	return ret;
956 }
957 
958 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
959 {
960 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
961 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
962 
963 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
964 }
965 
966 static int pc_init_freqs(struct xe_guc_pc *pc)
967 {
968 	int ret;
969 
970 	mutex_lock(&pc->freq_lock);
971 
972 	ret = pc_adjust_freq_bounds(pc);
973 	if (ret)
974 		goto out;
975 
976 	ret = pc_adjust_requested_freq(pc);
977 	if (ret)
978 		goto out;
979 
980 	pc_update_rp_values(pc);
981 
982 	pc_init_pcode_freq(pc);
983 
984 	/*
985 	 * The frequencies are really ready for use only after the user
986 	 * requested ones got restored.
987 	 */
988 	pc->freq_ready = true;
989 
990 out:
991 	mutex_unlock(&pc->freq_lock);
992 	return ret;
993 }
994 
995 /**
996  * xe_guc_pc_start - Start GuC's Power Conservation component
997  * @pc: Xe_GuC_PC instance
998  */
999 int xe_guc_pc_start(struct xe_guc_pc *pc)
1000 {
1001 	struct xe_device *xe = pc_to_xe(pc);
1002 	struct xe_gt *gt = pc_to_gt(pc);
1003 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1004 	unsigned int fw_ref;
1005 	int ret;
1006 
1007 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1008 
1009 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1010 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1011 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1012 		return -ETIMEDOUT;
1013 	}
1014 
1015 	if (xe->info.skip_guc_pc) {
1016 		if (xe->info.platform != XE_PVC)
1017 			xe_gt_idle_enable_c6(gt);
1018 
1019 		/* Request max possible since dynamic freq mgmt is not enabled */
1020 		pc_set_cur_freq(pc, UINT_MAX);
1021 
1022 		ret = 0;
1023 		goto out;
1024 	}
1025 
1026 	memset(pc->bo->vmap.vaddr, 0, size);
1027 	slpc_shared_data_write(pc, header.size, size);
1028 
1029 	ret = pc_action_reset(pc);
1030 	if (ret)
1031 		goto out;
1032 
1033 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
1034 		xe_gt_err(gt, "GuC PC Start failed\n");
1035 		ret = -EIO;
1036 		goto out;
1037 	}
1038 
1039 	ret = pc_init_freqs(pc);
1040 	if (ret)
1041 		goto out;
1042 
1043 	ret = pc_set_mert_freq_cap(pc);
1044 	if (ret)
1045 		goto out;
1046 
1047 	if (xe->info.platform == XE_PVC) {
1048 		xe_guc_pc_gucrc_disable(pc);
1049 		ret = 0;
1050 		goto out;
1051 	}
1052 
1053 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1054 
1055 out:
1056 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1057 	return ret;
1058 }
1059 
1060 /**
1061  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1062  * @pc: Xe_GuC_PC instance
1063  */
1064 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1065 {
1066 	struct xe_device *xe = pc_to_xe(pc);
1067 
1068 	if (xe->info.skip_guc_pc) {
1069 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1070 		return 0;
1071 	}
1072 
1073 	mutex_lock(&pc->freq_lock);
1074 	pc->freq_ready = false;
1075 	mutex_unlock(&pc->freq_lock);
1076 
1077 	return 0;
1078 }
1079 
1080 /**
1081  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1082  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1083  */
1084 static void xe_guc_pc_fini_hw(void *arg)
1085 {
1086 	struct xe_guc_pc *pc = arg;
1087 	struct xe_device *xe = pc_to_xe(pc);
1088 	unsigned int fw_ref;
1089 
1090 	if (xe_device_wedged(xe))
1091 		return;
1092 
1093 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1094 	xe_guc_pc_gucrc_disable(pc);
1095 	XE_WARN_ON(xe_guc_pc_stop(pc));
1096 
1097 	/* Bind requested freq to mert_freq_cap before unload */
1098 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1099 
1100 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1101 }
1102 
1103 /**
1104  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1105  * @pc: Xe_GuC_PC instance
1106  */
1107 int xe_guc_pc_init(struct xe_guc_pc *pc)
1108 {
1109 	struct xe_gt *gt = pc_to_gt(pc);
1110 	struct xe_tile *tile = gt_to_tile(gt);
1111 	struct xe_device *xe = gt_to_xe(gt);
1112 	struct xe_bo *bo;
1113 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1114 	int err;
1115 
1116 	if (xe->info.skip_guc_pc)
1117 		return 0;
1118 
1119 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1120 	if (err)
1121 		return err;
1122 
1123 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1124 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1125 					  XE_BO_FLAG_GGTT |
1126 					  XE_BO_FLAG_GGTT_INVALIDATE);
1127 	if (IS_ERR(bo))
1128 		return PTR_ERR(bo);
1129 
1130 	pc->bo = bo;
1131 
1132 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1133 }
1134