xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 93d4e8bb3f137e8037a65ea96f175f81c25c50e5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "abi/guc_actions_abi.h"
13 #include "abi/guc_actions_slpc_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_regs.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_gt.h"
19 #include "xe_gt_idle.h"
20 #include "xe_gt_sysfs.h"
21 #include "xe_gt_types.h"
22 #include "xe_guc_ct.h"
23 #include "xe_map.h"
24 #include "xe_mmio.h"
25 #include "xe_pcode.h"
26 
27 #define MCHBAR_MIRROR_BASE_SNB	0x140000
28 
29 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
30 #define   RP0_MASK		REG_GENMASK(7, 0)
31 #define   RP1_MASK		REG_GENMASK(15, 8)
32 #define   RPN_MASK		REG_GENMASK(23, 16)
33 
34 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
35 #define   RPE_MASK		REG_GENMASK(15, 8)
36 
37 #define GT_PERF_STATUS		XE_REG(0x1381b4)
38 #define   CAGF_MASK	REG_GENMASK(19, 11)
39 
40 #define GT_FREQUENCY_MULTIPLIER	50
41 #define GT_FREQUENCY_SCALER	3
42 
43 /**
44  * DOC: GuC Power Conservation (PC)
45  *
46  * GuC Power Conservation (PC) supports multiple features for the most
47  * efficient and performing use of the GT when GuC submission is enabled,
48  * including frequency management, Render-C states management, and various
49  * algorithms for power balancing.
50  *
51  * Single Loop Power Conservation (SLPC) is the name given to the suite of
52  * connected power conservation features in the GuC firmware. The firmware
53  * exposes a programming interface to the host for the control of SLPC.
54  *
55  * Frequency management:
56  * =====================
57  *
58  * Xe driver enables SLPC with all of its defaults features and frequency
59  * selection, which varies per platform.
60  *
61  * Render-C States:
62  * ================
63  *
64  * Render-C states is also a GuC PC feature that is now enabled in Xe for
65  * all platforms.
66  *
67  */
68 
69 static struct xe_guc *
70 pc_to_guc(struct xe_guc_pc *pc)
71 {
72 	return container_of(pc, struct xe_guc, pc);
73 }
74 
75 static struct xe_device *
76 pc_to_xe(struct xe_guc_pc *pc)
77 {
78 	struct xe_guc *guc = pc_to_guc(pc);
79 	struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
80 
81 	return gt_to_xe(gt);
82 }
83 
84 static struct xe_gt *
85 pc_to_gt(struct xe_guc_pc *pc)
86 {
87 	return container_of(pc, struct xe_gt, uc.guc.pc);
88 }
89 
90 static struct iosys_map *
91 pc_to_maps(struct xe_guc_pc *pc)
92 {
93 	return &pc->bo->vmap;
94 }
95 
96 #define slpc_shared_data_read(pc_, field_) \
97 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
98 			struct slpc_shared_data, field_)
99 
100 #define slpc_shared_data_write(pc_, field_, val_) \
101 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
102 			struct slpc_shared_data, field_, val_)
103 
104 #define SLPC_EVENT(id, count) \
105 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
106 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
107 
108 static int wait_for_pc_state(struct xe_guc_pc *pc,
109 			     enum slpc_global_state state)
110 {
111 	int timeout_us = 5000; /* rought 5ms, but no need for precision */
112 	int slept, wait = 10;
113 
114 	xe_device_assert_mem_access(pc_to_xe(pc));
115 
116 	for (slept = 0; slept < timeout_us;) {
117 		if (slpc_shared_data_read(pc, header.global_state) == state)
118 			return 0;
119 
120 		usleep_range(wait, wait << 1);
121 		slept += wait;
122 		wait <<= 1;
123 		if (slept + wait > timeout_us)
124 			wait = timeout_us - slept;
125 	}
126 
127 	return -ETIMEDOUT;
128 }
129 
130 static int pc_action_reset(struct xe_guc_pc *pc)
131 {
132 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
133 	int ret;
134 	u32 action[] = {
135 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
136 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
137 		xe_bo_ggtt_addr(pc->bo),
138 		0,
139 	};
140 
141 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
142 	if (ret)
143 		drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
144 
145 	return ret;
146 }
147 
148 static int pc_action_query_task_state(struct xe_guc_pc *pc)
149 {
150 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
151 	int ret;
152 	u32 action[] = {
153 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
154 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
155 		xe_bo_ggtt_addr(pc->bo),
156 		0,
157 	};
158 
159 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
160 		return -EAGAIN;
161 
162 	/* Blocking here to ensure the results are ready before reading them */
163 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
164 	if (ret)
165 		drm_err(&pc_to_xe(pc)->drm,
166 			"GuC PC query task state failed: %pe", ERR_PTR(ret));
167 
168 	return ret;
169 }
170 
171 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
172 {
173 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
174 	int ret;
175 	u32 action[] = {
176 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
177 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
178 		id,
179 		value,
180 	};
181 
182 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
183 		return -EAGAIN;
184 
185 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
186 	if (ret)
187 		drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
188 			ERR_PTR(ret));
189 
190 	return ret;
191 }
192 
193 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
194 {
195 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
196 	u32 action[] = {
197 		XE_GUC_ACTION_SETUP_PC_GUCRC,
198 		mode,
199 	};
200 	int ret;
201 
202 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
203 	if (ret)
204 		drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
205 			ERR_PTR(ret));
206 	return ret;
207 }
208 
209 static u32 decode_freq(u32 raw)
210 {
211 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
212 				 GT_FREQUENCY_SCALER);
213 }
214 
215 static u32 encode_freq(u32 freq)
216 {
217 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
218 				 GT_FREQUENCY_MULTIPLIER);
219 }
220 
221 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
222 {
223 	u32 freq;
224 
225 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
226 			 slpc_shared_data_read(pc, task_state_data.freq));
227 
228 	return decode_freq(freq);
229 }
230 
231 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
232 {
233 	struct xe_gt *gt = pc_to_gt(pc);
234 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
235 
236 	/* Allow/Disallow punit to process software freq requests */
237 	xe_mmio_write32(gt, RP_CONTROL, state);
238 }
239 
240 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
241 {
242 	struct xe_gt *gt = pc_to_gt(pc);
243 	u32 rpnswreq;
244 
245 	pc_set_manual_rp_ctrl(pc, true);
246 
247 	/* Req freq is in units of 16.66 Mhz */
248 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
249 	xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
250 
251 	/* Sleep for a small time to allow pcode to respond */
252 	usleep_range(100, 300);
253 
254 	pc_set_manual_rp_ctrl(pc, false);
255 }
256 
257 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
258 {
259 	/*
260 	 * Let's only check for the rpn-rp0 range. If max < min,
261 	 * min becomes a fixed request.
262 	 */
263 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
264 		return -EINVAL;
265 
266 	/*
267 	 * GuC policy is to elevate minimum frequency to the efficient levels
268 	 * Our goal is to have the admin choices respected.
269 	 */
270 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
271 			    freq < pc->rpe_freq);
272 
273 	return pc_action_set_param(pc,
274 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
275 				   freq);
276 }
277 
278 static int pc_get_max_freq(struct xe_guc_pc *pc)
279 {
280 	u32 freq;
281 
282 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
283 			 slpc_shared_data_read(pc, task_state_data.freq));
284 
285 	return decode_freq(freq);
286 }
287 
288 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
289 {
290 	/*
291 	 * Let's only check for the rpn-rp0 range. If max < min,
292 	 * min becomes a fixed request.
293 	 * Also, overclocking is not supported.
294 	 */
295 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
296 		return -EINVAL;
297 
298 	return pc_action_set_param(pc,
299 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
300 				   freq);
301 }
302 
303 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
304 {
305 	struct xe_gt *gt = pc_to_gt(pc);
306 	u32 reg;
307 
308 	if (xe_gt_is_media_type(gt))
309 		reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
310 	else
311 		reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
312 
313 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
314 }
315 
316 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
317 {
318 	struct xe_gt *gt = pc_to_gt(pc);
319 	struct xe_device *xe = gt_to_xe(gt);
320 	u32 reg;
321 
322 	/*
323 	 * For PVC we still need to use fused RP1 as the approximation for RPe
324 	 * For other platforms than PVC we get the resolved RPe directly from
325 	 * PCODE at a different register
326 	 */
327 	if (xe->info.platform == XE_PVC)
328 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
329 	else
330 		reg = xe_mmio_read32(gt, FREQ_INFO_REC);
331 
332 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
333 }
334 
335 static void pc_update_rp_values(struct xe_guc_pc *pc)
336 {
337 	struct xe_gt *gt = pc_to_gt(pc);
338 	struct xe_device *xe = gt_to_xe(gt);
339 
340 	if (GRAPHICS_VERx100(xe) >= 1270)
341 		mtl_update_rpe_value(pc);
342 	else
343 		tgl_update_rpe_value(pc);
344 
345 	/*
346 	 * RPe is decided at runtime by PCODE. In the rare case where that's
347 	 * smaller than the fused min, we will trust the PCODE and use that
348 	 * as our minimum one.
349 	 */
350 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
351 }
352 
353 /**
354  * xe_guc_pc_get_act_freq - Get Actual running frequency
355  * @pc: The GuC PC
356  *
357  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
358  */
359 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
360 {
361 	struct xe_gt *gt = pc_to_gt(pc);
362 	struct xe_device *xe = gt_to_xe(gt);
363 	u32 freq;
364 
365 	/* When in RC6, actual frequency reported will be 0. */
366 	if (GRAPHICS_VERx100(xe) >= 1270) {
367 		freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
368 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
369 	} else {
370 		freq = xe_mmio_read32(gt, GT_PERF_STATUS);
371 		freq = REG_FIELD_GET(CAGF_MASK, freq);
372 	}
373 
374 	freq = decode_freq(freq);
375 
376 	return freq;
377 }
378 
379 /**
380  * xe_guc_pc_get_cur_freq - Get Current requested frequency
381  * @pc: The GuC PC
382  * @freq: A pointer to a u32 where the freq value will be returned
383  *
384  * Returns: 0 on success,
385  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
386  */
387 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
388 {
389 	struct xe_gt *gt = pc_to_gt(pc);
390 	int ret;
391 
392 	/*
393 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
394 	 * Block RC6 for a more reliable read.
395 	 */
396 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
397 	if (ret)
398 		return ret;
399 
400 	*freq = xe_mmio_read32(gt, RPNSWREQ);
401 
402 	*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
403 	*freq = decode_freq(*freq);
404 
405 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
406 	return 0;
407 }
408 
409 /**
410  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
411  * @pc: The GuC PC
412  *
413  * Returns: RP0 freq.
414  */
415 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
416 {
417 	return pc->rp0_freq;
418 }
419 
420 /**
421  * xe_guc_pc_get_rpe_freq - Get the RPe freq
422  * @pc: The GuC PC
423  *
424  * Returns: RPe freq.
425  */
426 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
427 {
428 	pc_update_rp_values(pc);
429 
430 	return pc->rpe_freq;
431 }
432 
433 /**
434  * xe_guc_pc_get_rpn_freq - Get the RPn freq
435  * @pc: The GuC PC
436  *
437  * Returns: RPn freq.
438  */
439 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
440 {
441 	return pc->rpn_freq;
442 }
443 
444 /**
445  * xe_guc_pc_get_min_freq - Get the min operational frequency
446  * @pc: The GuC PC
447  * @freq: A pointer to a u32 where the freq value will be returned
448  *
449  * Returns: 0 on success,
450  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
451  */
452 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
453 {
454 	struct xe_gt *gt = pc_to_gt(pc);
455 	int ret;
456 
457 	mutex_lock(&pc->freq_lock);
458 	if (!pc->freq_ready) {
459 		/* Might be in the middle of a gt reset */
460 		ret = -EAGAIN;
461 		goto out;
462 	}
463 
464 	/*
465 	 * GuC SLPC plays with min freq request when GuCRC is enabled
466 	 * Block RC6 for a more reliable read.
467 	 */
468 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
469 	if (ret)
470 		goto out;
471 
472 	ret = pc_action_query_task_state(pc);
473 	if (ret)
474 		goto fw;
475 
476 	*freq = pc_get_min_freq(pc);
477 
478 fw:
479 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
480 out:
481 	mutex_unlock(&pc->freq_lock);
482 	return ret;
483 }
484 
485 /**
486  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
487  * @pc: The GuC PC
488  * @freq: The selected minimal frequency
489  *
490  * Returns: 0 on success,
491  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
492  *         -EINVAL if value out of bounds.
493  */
494 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
495 {
496 	int ret;
497 
498 	mutex_lock(&pc->freq_lock);
499 	if (!pc->freq_ready) {
500 		/* Might be in the middle of a gt reset */
501 		ret = -EAGAIN;
502 		goto out;
503 	}
504 
505 	ret = pc_set_min_freq(pc, freq);
506 	if (ret)
507 		goto out;
508 
509 	pc->user_requested_min = freq;
510 
511 out:
512 	mutex_unlock(&pc->freq_lock);
513 	return ret;
514 }
515 
516 /**
517  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
518  * @pc: The GuC PC
519  * @freq: A pointer to a u32 where the freq value will be returned
520  *
521  * Returns: 0 on success,
522  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
523  */
524 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
525 {
526 	int ret;
527 
528 	mutex_lock(&pc->freq_lock);
529 	if (!pc->freq_ready) {
530 		/* Might be in the middle of a gt reset */
531 		ret = -EAGAIN;
532 		goto out;
533 	}
534 
535 	ret = pc_action_query_task_state(pc);
536 	if (ret)
537 		goto out;
538 
539 	*freq = pc_get_max_freq(pc);
540 
541 out:
542 	mutex_unlock(&pc->freq_lock);
543 	return ret;
544 }
545 
546 /**
547  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
548  * @pc: The GuC PC
549  * @freq: The selected maximum frequency value
550  *
551  * Returns: 0 on success,
552  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
553  *         -EINVAL if value out of bounds.
554  */
555 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
556 {
557 	int ret;
558 
559 	mutex_lock(&pc->freq_lock);
560 	if (!pc->freq_ready) {
561 		/* Might be in the middle of a gt reset */
562 		ret = -EAGAIN;
563 		goto out;
564 	}
565 
566 	ret = pc_set_max_freq(pc, freq);
567 	if (ret)
568 		goto out;
569 
570 	pc->user_requested_max = freq;
571 
572 out:
573 	mutex_unlock(&pc->freq_lock);
574 	return ret;
575 }
576 
577 /**
578  * xe_guc_pc_c_status - get the current GT C state
579  * @pc: XE_GuC_PC instance
580  */
581 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
582 {
583 	struct xe_gt *gt = pc_to_gt(pc);
584 	u32 reg, gt_c_state;
585 
586 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
587 		reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
588 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
589 	} else {
590 		reg = xe_mmio_read32(gt, GT_CORE_STATUS);
591 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
592 	}
593 
594 	switch (gt_c_state) {
595 	case GT_C6:
596 		return GT_IDLE_C6;
597 	case GT_C0:
598 		return GT_IDLE_C0;
599 	default:
600 		return GT_IDLE_UNKNOWN;
601 	}
602 }
603 
604 /**
605  * xe_guc_pc_rc6_residency - rc6 residency counter
606  * @pc: Xe_GuC_PC instance
607  */
608 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
609 {
610 	struct xe_gt *gt = pc_to_gt(pc);
611 	u32 reg;
612 
613 	reg = xe_mmio_read32(gt, GT_GFX_RC6);
614 
615 	return reg;
616 }
617 
618 /**
619  * xe_guc_pc_mc6_residency - mc6 residency counter
620  * @pc: Xe_GuC_PC instance
621  */
622 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
623 {
624 	struct xe_gt *gt = pc_to_gt(pc);
625 	u64 reg;
626 
627 	reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
628 
629 	return reg;
630 }
631 
632 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
633 {
634 	struct xe_gt *gt = pc_to_gt(pc);
635 	u32 reg;
636 
637 	xe_device_assert_mem_access(pc_to_xe(pc));
638 
639 	if (xe_gt_is_media_type(gt))
640 		reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
641 	else
642 		reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
643 
644 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
645 
646 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
647 }
648 
649 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
650 {
651 	struct xe_gt *gt = pc_to_gt(pc);
652 	struct xe_device *xe = gt_to_xe(gt);
653 	u32 reg;
654 
655 	xe_device_assert_mem_access(pc_to_xe(pc));
656 
657 	if (xe->info.platform == XE_PVC)
658 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
659 	else
660 		reg = xe_mmio_read32(gt, RP_STATE_CAP);
661 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
662 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
663 }
664 
665 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
666 {
667 	struct xe_gt *gt = pc_to_gt(pc);
668 	struct xe_device *xe = gt_to_xe(gt);
669 
670 	if (GRAPHICS_VERx100(xe) >= 1270)
671 		mtl_init_fused_rp_values(pc);
672 	else
673 		tgl_init_fused_rp_values(pc);
674 }
675 
676 /**
677  * xe_guc_pc_init_early - Initialize RPx values and request a higher GT
678  * frequency to allow faster GuC load times
679  * @pc: Xe_GuC_PC instance
680  */
681 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
682 {
683 	struct xe_gt *gt = pc_to_gt(pc);
684 
685 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
686 	pc_init_fused_rp_values(pc);
687 	pc_set_cur_freq(pc, pc->rp0_freq);
688 }
689 
690 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
691 {
692 	int ret;
693 
694 	lockdep_assert_held(&pc->freq_lock);
695 
696 	ret = pc_action_query_task_state(pc);
697 	if (ret)
698 		goto out;
699 
700 	/*
701 	 * GuC defaults to some RPmax that is not actually achievable without
702 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
703 	 * regular maximum
704 	 */
705 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
706 		ret = pc_set_max_freq(pc, pc->rp0_freq);
707 		if (ret)
708 			goto out;
709 	}
710 
711 	/*
712 	 * Same thing happens for Server platforms where min is listed as
713 	 * RPMax
714 	 */
715 	if (pc_get_min_freq(pc) > pc->rp0_freq)
716 		ret = pc_set_min_freq(pc, pc->rp0_freq);
717 
718 out:
719 	return ret;
720 }
721 
722 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
723 {
724 	int ret = 0;
725 
726 	lockdep_assert_held(&pc->freq_lock);
727 
728 	if (pc->user_requested_min != 0) {
729 		ret = pc_set_min_freq(pc, pc->user_requested_min);
730 		if (ret)
731 			return ret;
732 	}
733 
734 	if (pc->user_requested_max != 0) {
735 		ret = pc_set_max_freq(pc, pc->user_requested_max);
736 		if (ret)
737 			return ret;
738 	}
739 
740 	return ret;
741 }
742 
743 /**
744  * xe_guc_pc_gucrc_disable - Disable GuC RC
745  * @pc: Xe_GuC_PC instance
746  *
747  * Disables GuC RC by taking control of RC6 back from GuC.
748  *
749  * Return: 0 on success, negative error code on error.
750  */
751 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
752 {
753 	struct xe_device *xe = pc_to_xe(pc);
754 	struct xe_gt *gt = pc_to_gt(pc);
755 	int ret = 0;
756 
757 	if (xe->info.skip_guc_pc)
758 		return 0;
759 
760 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
761 	if (ret)
762 		return ret;
763 
764 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
765 	if (ret)
766 		return ret;
767 
768 	xe_gt_idle_disable_c6(gt);
769 
770 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
771 
772 	return 0;
773 }
774 
775 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
776 {
777 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
778 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
779 
780 	XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
781 }
782 
783 static int pc_init_freqs(struct xe_guc_pc *pc)
784 {
785 	int ret;
786 
787 	mutex_lock(&pc->freq_lock);
788 
789 	ret = pc_adjust_freq_bounds(pc);
790 	if (ret)
791 		goto out;
792 
793 	ret = pc_adjust_requested_freq(pc);
794 	if (ret)
795 		goto out;
796 
797 	pc_update_rp_values(pc);
798 
799 	pc_init_pcode_freq(pc);
800 
801 	/*
802 	 * The frequencies are really ready for use only after the user
803 	 * requested ones got restored.
804 	 */
805 	pc->freq_ready = true;
806 
807 out:
808 	mutex_unlock(&pc->freq_lock);
809 	return ret;
810 }
811 
812 /**
813  * xe_guc_pc_start - Start GuC's Power Conservation component
814  * @pc: Xe_GuC_PC instance
815  */
816 int xe_guc_pc_start(struct xe_guc_pc *pc)
817 {
818 	struct xe_device *xe = pc_to_xe(pc);
819 	struct xe_gt *gt = pc_to_gt(pc);
820 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
821 	int ret;
822 
823 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
824 
825 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
826 	if (ret)
827 		return ret;
828 
829 	if (xe->info.skip_guc_pc) {
830 		if (xe->info.platform != XE_PVC)
831 			xe_gt_idle_enable_c6(gt);
832 
833 		/* Request max possible since dynamic freq mgmt is not enabled */
834 		pc_set_cur_freq(pc, UINT_MAX);
835 
836 		ret = 0;
837 		goto out;
838 	}
839 
840 	memset(pc->bo->vmap.vaddr, 0, size);
841 	slpc_shared_data_write(pc, header.size, size);
842 
843 	ret = pc_action_reset(pc);
844 	if (ret)
845 		goto out;
846 
847 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
848 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
849 		ret = -EIO;
850 		goto out;
851 	}
852 
853 	ret = pc_init_freqs(pc);
854 	if (ret)
855 		goto out;
856 
857 	if (xe->info.platform == XE_PVC) {
858 		xe_guc_pc_gucrc_disable(pc);
859 		ret = 0;
860 		goto out;
861 	}
862 
863 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
864 
865 out:
866 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
867 	return ret;
868 }
869 
870 /**
871  * xe_guc_pc_stop - Stop GuC's Power Conservation component
872  * @pc: Xe_GuC_PC instance
873  */
874 int xe_guc_pc_stop(struct xe_guc_pc *pc)
875 {
876 	struct xe_device *xe = pc_to_xe(pc);
877 
878 	if (xe->info.skip_guc_pc) {
879 		xe_gt_idle_disable_c6(pc_to_gt(pc));
880 		return 0;
881 	}
882 
883 	mutex_lock(&pc->freq_lock);
884 	pc->freq_ready = false;
885 	mutex_unlock(&pc->freq_lock);
886 
887 	return 0;
888 }
889 
890 /**
891  * xe_guc_pc_fini - Finalize GuC's Power Conservation component
892  * @drm: DRM device
893  * @arg: opaque pointer that should point to Xe_GuC_PC instance
894  */
895 static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
896 {
897 	struct xe_guc_pc *pc = arg;
898 	struct xe_device *xe = pc_to_xe(pc);
899 
900 	if (xe->info.skip_guc_pc) {
901 		xe_gt_idle_disable_c6(pc_to_gt(pc));
902 		return;
903 	}
904 
905 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
906 	XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
907 	XE_WARN_ON(xe_guc_pc_stop(pc));
908 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
909 }
910 
911 /**
912  * xe_guc_pc_init - Initialize GuC's Power Conservation component
913  * @pc: Xe_GuC_PC instance
914  */
915 int xe_guc_pc_init(struct xe_guc_pc *pc)
916 {
917 	struct xe_gt *gt = pc_to_gt(pc);
918 	struct xe_tile *tile = gt_to_tile(gt);
919 	struct xe_device *xe = gt_to_xe(gt);
920 	struct xe_bo *bo;
921 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
922 	int err;
923 
924 	if (xe->info.skip_guc_pc)
925 		return 0;
926 
927 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
928 	if (err)
929 		return err;
930 
931 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
932 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
933 					  XE_BO_FLAG_GGTT |
934 					  XE_BO_FLAG_GGTT_INVALIDATE);
935 	if (IS_ERR(bo))
936 		return PTR_ERR(bo);
937 
938 	pc->bo = bo;
939 
940 	return drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
941 }
942