xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 673f816b9e1e92d1f70e1bf5f21b531e0ff9ad6c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "abi/guc_actions_abi.h"
13 #include "abi/guc_actions_slpc_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_regs.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_force_wake.h"
19 #include "xe_gt.h"
20 #include "xe_gt_idle.h"
21 #include "xe_gt_sysfs.h"
22 #include "xe_gt_types.h"
23 #include "xe_guc_ct.h"
24 #include "xe_map.h"
25 #include "xe_mmio.h"
26 #include "xe_pcode.h"
27 
28 #define MCHBAR_MIRROR_BASE_SNB	0x140000
29 
30 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
31 #define   RP0_MASK		REG_GENMASK(7, 0)
32 #define   RP1_MASK		REG_GENMASK(15, 8)
33 #define   RPN_MASK		REG_GENMASK(23, 16)
34 
35 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
36 #define   RPE_MASK		REG_GENMASK(15, 8)
37 
38 #define GT_PERF_STATUS		XE_REG(0x1381b4)
39 #define   CAGF_MASK	REG_GENMASK(19, 11)
40 
41 #define GT_FREQUENCY_MULTIPLIER	50
42 #define GT_FREQUENCY_SCALER	3
43 
44 /**
45  * DOC: GuC Power Conservation (PC)
46  *
47  * GuC Power Conservation (PC) supports multiple features for the most
48  * efficient and performing use of the GT when GuC submission is enabled,
49  * including frequency management, Render-C states management, and various
50  * algorithms for power balancing.
51  *
52  * Single Loop Power Conservation (SLPC) is the name given to the suite of
53  * connected power conservation features in the GuC firmware. The firmware
54  * exposes a programming interface to the host for the control of SLPC.
55  *
56  * Frequency management:
57  * =====================
58  *
59  * Xe driver enables SLPC with all of its defaults features and frequency
60  * selection, which varies per platform.
61  *
62  * Render-C States:
63  * ================
64  *
65  * Render-C states is also a GuC PC feature that is now enabled in Xe for
66  * all platforms.
67  *
68  */
69 
70 static struct xe_guc *
71 pc_to_guc(struct xe_guc_pc *pc)
72 {
73 	return container_of(pc, struct xe_guc, pc);
74 }
75 
76 static struct xe_device *
77 pc_to_xe(struct xe_guc_pc *pc)
78 {
79 	struct xe_guc *guc = pc_to_guc(pc);
80 	struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
81 
82 	return gt_to_xe(gt);
83 }
84 
85 static struct xe_gt *
86 pc_to_gt(struct xe_guc_pc *pc)
87 {
88 	return container_of(pc, struct xe_gt, uc.guc.pc);
89 }
90 
91 static struct iosys_map *
92 pc_to_maps(struct xe_guc_pc *pc)
93 {
94 	return &pc->bo->vmap;
95 }
96 
97 #define slpc_shared_data_read(pc_, field_) \
98 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
99 			struct slpc_shared_data, field_)
100 
101 #define slpc_shared_data_write(pc_, field_, val_) \
102 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
103 			struct slpc_shared_data, field_, val_)
104 
105 #define SLPC_EVENT(id, count) \
106 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
107 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
108 
109 static int wait_for_pc_state(struct xe_guc_pc *pc,
110 			     enum slpc_global_state state)
111 {
112 	int timeout_us = 5000; /* rought 5ms, but no need for precision */
113 	int slept, wait = 10;
114 
115 	xe_device_assert_mem_access(pc_to_xe(pc));
116 
117 	for (slept = 0; slept < timeout_us;) {
118 		if (slpc_shared_data_read(pc, header.global_state) == state)
119 			return 0;
120 
121 		usleep_range(wait, wait << 1);
122 		slept += wait;
123 		wait <<= 1;
124 		if (slept + wait > timeout_us)
125 			wait = timeout_us - slept;
126 	}
127 
128 	return -ETIMEDOUT;
129 }
130 
131 static int pc_action_reset(struct xe_guc_pc *pc)
132 {
133 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
134 	int ret;
135 	u32 action[] = {
136 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
137 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
138 		xe_bo_ggtt_addr(pc->bo),
139 		0,
140 	};
141 
142 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
143 	if (ret)
144 		drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
145 
146 	return ret;
147 }
148 
149 static int pc_action_query_task_state(struct xe_guc_pc *pc)
150 {
151 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
152 	int ret;
153 	u32 action[] = {
154 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
155 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
156 		xe_bo_ggtt_addr(pc->bo),
157 		0,
158 	};
159 
160 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
161 		return -EAGAIN;
162 
163 	/* Blocking here to ensure the results are ready before reading them */
164 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
165 	if (ret)
166 		drm_err(&pc_to_xe(pc)->drm,
167 			"GuC PC query task state failed: %pe", ERR_PTR(ret));
168 
169 	return ret;
170 }
171 
172 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
173 {
174 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
175 	int ret;
176 	u32 action[] = {
177 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
178 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
179 		id,
180 		value,
181 	};
182 
183 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
184 		return -EAGAIN;
185 
186 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
187 	if (ret)
188 		drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
189 			ERR_PTR(ret));
190 
191 	return ret;
192 }
193 
194 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
195 {
196 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
197 	u32 action[] = {
198 		XE_GUC_ACTION_SETUP_PC_GUCRC,
199 		mode,
200 	};
201 	int ret;
202 
203 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
204 	if (ret)
205 		drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
206 			ERR_PTR(ret));
207 	return ret;
208 }
209 
210 static u32 decode_freq(u32 raw)
211 {
212 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
213 				 GT_FREQUENCY_SCALER);
214 }
215 
216 static u32 encode_freq(u32 freq)
217 {
218 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
219 				 GT_FREQUENCY_MULTIPLIER);
220 }
221 
222 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
223 {
224 	u32 freq;
225 
226 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
227 			 slpc_shared_data_read(pc, task_state_data.freq));
228 
229 	return decode_freq(freq);
230 }
231 
232 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
233 {
234 	struct xe_gt *gt = pc_to_gt(pc);
235 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
236 
237 	/* Allow/Disallow punit to process software freq requests */
238 	xe_mmio_write32(gt, RP_CONTROL, state);
239 }
240 
241 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
242 {
243 	struct xe_gt *gt = pc_to_gt(pc);
244 	u32 rpnswreq;
245 
246 	pc_set_manual_rp_ctrl(pc, true);
247 
248 	/* Req freq is in units of 16.66 Mhz */
249 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
250 	xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
251 
252 	/* Sleep for a small time to allow pcode to respond */
253 	usleep_range(100, 300);
254 
255 	pc_set_manual_rp_ctrl(pc, false);
256 }
257 
258 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
259 {
260 	/*
261 	 * Let's only check for the rpn-rp0 range. If max < min,
262 	 * min becomes a fixed request.
263 	 */
264 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
265 		return -EINVAL;
266 
267 	/*
268 	 * GuC policy is to elevate minimum frequency to the efficient levels
269 	 * Our goal is to have the admin choices respected.
270 	 */
271 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
272 			    freq < pc->rpe_freq);
273 
274 	return pc_action_set_param(pc,
275 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
276 				   freq);
277 }
278 
279 static int pc_get_max_freq(struct xe_guc_pc *pc)
280 {
281 	u32 freq;
282 
283 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
284 			 slpc_shared_data_read(pc, task_state_data.freq));
285 
286 	return decode_freq(freq);
287 }
288 
289 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
290 {
291 	/*
292 	 * Let's only check for the rpn-rp0 range. If max < min,
293 	 * min becomes a fixed request.
294 	 * Also, overclocking is not supported.
295 	 */
296 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
297 		return -EINVAL;
298 
299 	return pc_action_set_param(pc,
300 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
301 				   freq);
302 }
303 
304 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
305 {
306 	struct xe_gt *gt = pc_to_gt(pc);
307 	u32 reg;
308 
309 	if (xe_gt_is_media_type(gt))
310 		reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
311 	else
312 		reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
313 
314 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
315 }
316 
317 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
318 {
319 	struct xe_gt *gt = pc_to_gt(pc);
320 	struct xe_device *xe = gt_to_xe(gt);
321 	u32 reg;
322 
323 	/*
324 	 * For PVC we still need to use fused RP1 as the approximation for RPe
325 	 * For other platforms than PVC we get the resolved RPe directly from
326 	 * PCODE at a different register
327 	 */
328 	if (xe->info.platform == XE_PVC)
329 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
330 	else
331 		reg = xe_mmio_read32(gt, FREQ_INFO_REC);
332 
333 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
334 }
335 
336 static void pc_update_rp_values(struct xe_guc_pc *pc)
337 {
338 	struct xe_gt *gt = pc_to_gt(pc);
339 	struct xe_device *xe = gt_to_xe(gt);
340 
341 	if (GRAPHICS_VERx100(xe) >= 1270)
342 		mtl_update_rpe_value(pc);
343 	else
344 		tgl_update_rpe_value(pc);
345 
346 	/*
347 	 * RPe is decided at runtime by PCODE. In the rare case where that's
348 	 * smaller than the fused min, we will trust the PCODE and use that
349 	 * as our minimum one.
350 	 */
351 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
352 }
353 
354 /**
355  * xe_guc_pc_get_act_freq - Get Actual running frequency
356  * @pc: The GuC PC
357  *
358  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
359  */
360 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
361 {
362 	struct xe_gt *gt = pc_to_gt(pc);
363 	struct xe_device *xe = gt_to_xe(gt);
364 	u32 freq;
365 
366 	/* When in RC6, actual frequency reported will be 0. */
367 	if (GRAPHICS_VERx100(xe) >= 1270) {
368 		freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
369 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
370 	} else {
371 		freq = xe_mmio_read32(gt, GT_PERF_STATUS);
372 		freq = REG_FIELD_GET(CAGF_MASK, freq);
373 	}
374 
375 	freq = decode_freq(freq);
376 
377 	return freq;
378 }
379 
380 /**
381  * xe_guc_pc_get_cur_freq - Get Current requested frequency
382  * @pc: The GuC PC
383  * @freq: A pointer to a u32 where the freq value will be returned
384  *
385  * Returns: 0 on success,
386  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
387  */
388 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
389 {
390 	struct xe_gt *gt = pc_to_gt(pc);
391 	int ret;
392 
393 	/*
394 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
395 	 * Block RC6 for a more reliable read.
396 	 */
397 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
398 	if (ret)
399 		return ret;
400 
401 	*freq = xe_mmio_read32(gt, RPNSWREQ);
402 
403 	*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
404 	*freq = decode_freq(*freq);
405 
406 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
407 	return 0;
408 }
409 
410 /**
411  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
412  * @pc: The GuC PC
413  *
414  * Returns: RP0 freq.
415  */
416 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
417 {
418 	return pc->rp0_freq;
419 }
420 
421 /**
422  * xe_guc_pc_get_rpe_freq - Get the RPe freq
423  * @pc: The GuC PC
424  *
425  * Returns: RPe freq.
426  */
427 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
428 {
429 	pc_update_rp_values(pc);
430 
431 	return pc->rpe_freq;
432 }
433 
434 /**
435  * xe_guc_pc_get_rpn_freq - Get the RPn freq
436  * @pc: The GuC PC
437  *
438  * Returns: RPn freq.
439  */
440 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
441 {
442 	return pc->rpn_freq;
443 }
444 
445 /**
446  * xe_guc_pc_get_min_freq - Get the min operational frequency
447  * @pc: The GuC PC
448  * @freq: A pointer to a u32 where the freq value will be returned
449  *
450  * Returns: 0 on success,
451  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
452  */
453 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
454 {
455 	struct xe_gt *gt = pc_to_gt(pc);
456 	int ret;
457 
458 	mutex_lock(&pc->freq_lock);
459 	if (!pc->freq_ready) {
460 		/* Might be in the middle of a gt reset */
461 		ret = -EAGAIN;
462 		goto out;
463 	}
464 
465 	/*
466 	 * GuC SLPC plays with min freq request when GuCRC is enabled
467 	 * Block RC6 for a more reliable read.
468 	 */
469 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
470 	if (ret)
471 		goto out;
472 
473 	ret = pc_action_query_task_state(pc);
474 	if (ret)
475 		goto fw;
476 
477 	*freq = pc_get_min_freq(pc);
478 
479 fw:
480 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
481 out:
482 	mutex_unlock(&pc->freq_lock);
483 	return ret;
484 }
485 
486 /**
487  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
488  * @pc: The GuC PC
489  * @freq: The selected minimal frequency
490  *
491  * Returns: 0 on success,
492  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
493  *         -EINVAL if value out of bounds.
494  */
495 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
496 {
497 	int ret;
498 
499 	mutex_lock(&pc->freq_lock);
500 	if (!pc->freq_ready) {
501 		/* Might be in the middle of a gt reset */
502 		ret = -EAGAIN;
503 		goto out;
504 	}
505 
506 	ret = pc_set_min_freq(pc, freq);
507 	if (ret)
508 		goto out;
509 
510 	pc->user_requested_min = freq;
511 
512 out:
513 	mutex_unlock(&pc->freq_lock);
514 	return ret;
515 }
516 
517 /**
518  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
519  * @pc: The GuC PC
520  * @freq: A pointer to a u32 where the freq value will be returned
521  *
522  * Returns: 0 on success,
523  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
524  */
525 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
526 {
527 	int ret;
528 
529 	mutex_lock(&pc->freq_lock);
530 	if (!pc->freq_ready) {
531 		/* Might be in the middle of a gt reset */
532 		ret = -EAGAIN;
533 		goto out;
534 	}
535 
536 	ret = pc_action_query_task_state(pc);
537 	if (ret)
538 		goto out;
539 
540 	*freq = pc_get_max_freq(pc);
541 
542 out:
543 	mutex_unlock(&pc->freq_lock);
544 	return ret;
545 }
546 
547 /**
548  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
549  * @pc: The GuC PC
550  * @freq: The selected maximum frequency value
551  *
552  * Returns: 0 on success,
553  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
554  *         -EINVAL if value out of bounds.
555  */
556 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
557 {
558 	int ret;
559 
560 	mutex_lock(&pc->freq_lock);
561 	if (!pc->freq_ready) {
562 		/* Might be in the middle of a gt reset */
563 		ret = -EAGAIN;
564 		goto out;
565 	}
566 
567 	ret = pc_set_max_freq(pc, freq);
568 	if (ret)
569 		goto out;
570 
571 	pc->user_requested_max = freq;
572 
573 out:
574 	mutex_unlock(&pc->freq_lock);
575 	return ret;
576 }
577 
578 /**
579  * xe_guc_pc_c_status - get the current GT C state
580  * @pc: XE_GuC_PC instance
581  */
582 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
583 {
584 	struct xe_gt *gt = pc_to_gt(pc);
585 	u32 reg, gt_c_state;
586 
587 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
588 		reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
589 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
590 	} else {
591 		reg = xe_mmio_read32(gt, GT_CORE_STATUS);
592 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
593 	}
594 
595 	switch (gt_c_state) {
596 	case GT_C6:
597 		return GT_IDLE_C6;
598 	case GT_C0:
599 		return GT_IDLE_C0;
600 	default:
601 		return GT_IDLE_UNKNOWN;
602 	}
603 }
604 
605 /**
606  * xe_guc_pc_rc6_residency - rc6 residency counter
607  * @pc: Xe_GuC_PC instance
608  */
609 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
610 {
611 	struct xe_gt *gt = pc_to_gt(pc);
612 	u32 reg;
613 
614 	reg = xe_mmio_read32(gt, GT_GFX_RC6);
615 
616 	return reg;
617 }
618 
619 /**
620  * xe_guc_pc_mc6_residency - mc6 residency counter
621  * @pc: Xe_GuC_PC instance
622  */
623 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
624 {
625 	struct xe_gt *gt = pc_to_gt(pc);
626 	u64 reg;
627 
628 	reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
629 
630 	return reg;
631 }
632 
633 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
634 {
635 	struct xe_gt *gt = pc_to_gt(pc);
636 	u32 reg;
637 
638 	xe_device_assert_mem_access(pc_to_xe(pc));
639 
640 	if (xe_gt_is_media_type(gt))
641 		reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
642 	else
643 		reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
644 
645 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
646 
647 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
648 }
649 
650 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
651 {
652 	struct xe_gt *gt = pc_to_gt(pc);
653 	struct xe_device *xe = gt_to_xe(gt);
654 	u32 reg;
655 
656 	xe_device_assert_mem_access(pc_to_xe(pc));
657 
658 	if (xe->info.platform == XE_PVC)
659 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
660 	else
661 		reg = xe_mmio_read32(gt, RP_STATE_CAP);
662 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
663 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
664 }
665 
666 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
667 {
668 	struct xe_gt *gt = pc_to_gt(pc);
669 	struct xe_device *xe = gt_to_xe(gt);
670 
671 	if (GRAPHICS_VERx100(xe) >= 1270)
672 		mtl_init_fused_rp_values(pc);
673 	else
674 		tgl_init_fused_rp_values(pc);
675 }
676 
677 /**
678  * xe_guc_pc_init_early - Initialize RPx values and request a higher GT
679  * frequency to allow faster GuC load times
680  * @pc: Xe_GuC_PC instance
681  */
682 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
683 {
684 	struct xe_gt *gt = pc_to_gt(pc);
685 
686 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
687 	pc_init_fused_rp_values(pc);
688 	pc_set_cur_freq(pc, pc->rp0_freq);
689 }
690 
691 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
692 {
693 	int ret;
694 
695 	lockdep_assert_held(&pc->freq_lock);
696 
697 	ret = pc_action_query_task_state(pc);
698 	if (ret)
699 		goto out;
700 
701 	/*
702 	 * GuC defaults to some RPmax that is not actually achievable without
703 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
704 	 * regular maximum
705 	 */
706 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
707 		ret = pc_set_max_freq(pc, pc->rp0_freq);
708 		if (ret)
709 			goto out;
710 	}
711 
712 	/*
713 	 * Same thing happens for Server platforms where min is listed as
714 	 * RPMax
715 	 */
716 	if (pc_get_min_freq(pc) > pc->rp0_freq)
717 		ret = pc_set_min_freq(pc, pc->rp0_freq);
718 
719 out:
720 	return ret;
721 }
722 
723 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
724 {
725 	int ret = 0;
726 
727 	lockdep_assert_held(&pc->freq_lock);
728 
729 	if (pc->user_requested_min != 0) {
730 		ret = pc_set_min_freq(pc, pc->user_requested_min);
731 		if (ret)
732 			return ret;
733 	}
734 
735 	if (pc->user_requested_max != 0) {
736 		ret = pc_set_max_freq(pc, pc->user_requested_max);
737 		if (ret)
738 			return ret;
739 	}
740 
741 	return ret;
742 }
743 
744 /**
745  * xe_guc_pc_gucrc_disable - Disable GuC RC
746  * @pc: Xe_GuC_PC instance
747  *
748  * Disables GuC RC by taking control of RC6 back from GuC.
749  *
750  * Return: 0 on success, negative error code on error.
751  */
752 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
753 {
754 	struct xe_device *xe = pc_to_xe(pc);
755 	struct xe_gt *gt = pc_to_gt(pc);
756 	int ret = 0;
757 
758 	if (xe->info.skip_guc_pc)
759 		return 0;
760 
761 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
762 	if (ret)
763 		return ret;
764 
765 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
766 	if (ret)
767 		return ret;
768 
769 	xe_gt_idle_disable_c6(gt);
770 
771 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
772 
773 	return 0;
774 }
775 
776 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
777 {
778 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
779 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
780 
781 	XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
782 }
783 
784 static int pc_init_freqs(struct xe_guc_pc *pc)
785 {
786 	int ret;
787 
788 	mutex_lock(&pc->freq_lock);
789 
790 	ret = pc_adjust_freq_bounds(pc);
791 	if (ret)
792 		goto out;
793 
794 	ret = pc_adjust_requested_freq(pc);
795 	if (ret)
796 		goto out;
797 
798 	pc_update_rp_values(pc);
799 
800 	pc_init_pcode_freq(pc);
801 
802 	/*
803 	 * The frequencies are really ready for use only after the user
804 	 * requested ones got restored.
805 	 */
806 	pc->freq_ready = true;
807 
808 out:
809 	mutex_unlock(&pc->freq_lock);
810 	return ret;
811 }
812 
813 /**
814  * xe_guc_pc_start - Start GuC's Power Conservation component
815  * @pc: Xe_GuC_PC instance
816  */
817 int xe_guc_pc_start(struct xe_guc_pc *pc)
818 {
819 	struct xe_device *xe = pc_to_xe(pc);
820 	struct xe_gt *gt = pc_to_gt(pc);
821 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
822 	int ret;
823 
824 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
825 
826 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
827 	if (ret)
828 		return ret;
829 
830 	if (xe->info.skip_guc_pc) {
831 		if (xe->info.platform != XE_PVC)
832 			xe_gt_idle_enable_c6(gt);
833 
834 		/* Request max possible since dynamic freq mgmt is not enabled */
835 		pc_set_cur_freq(pc, UINT_MAX);
836 
837 		ret = 0;
838 		goto out;
839 	}
840 
841 	memset(pc->bo->vmap.vaddr, 0, size);
842 	slpc_shared_data_write(pc, header.size, size);
843 
844 	ret = pc_action_reset(pc);
845 	if (ret)
846 		goto out;
847 
848 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
849 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
850 		ret = -EIO;
851 		goto out;
852 	}
853 
854 	ret = pc_init_freqs(pc);
855 	if (ret)
856 		goto out;
857 
858 	if (xe->info.platform == XE_PVC) {
859 		xe_guc_pc_gucrc_disable(pc);
860 		ret = 0;
861 		goto out;
862 	}
863 
864 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
865 
866 out:
867 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
868 	return ret;
869 }
870 
871 /**
872  * xe_guc_pc_stop - Stop GuC's Power Conservation component
873  * @pc: Xe_GuC_PC instance
874  */
875 int xe_guc_pc_stop(struct xe_guc_pc *pc)
876 {
877 	struct xe_device *xe = pc_to_xe(pc);
878 
879 	if (xe->info.skip_guc_pc) {
880 		xe_gt_idle_disable_c6(pc_to_gt(pc));
881 		return 0;
882 	}
883 
884 	mutex_lock(&pc->freq_lock);
885 	pc->freq_ready = false;
886 	mutex_unlock(&pc->freq_lock);
887 
888 	return 0;
889 }
890 
891 /**
892  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
893  * @arg: opaque pointer that should point to Xe_GuC_PC instance
894  */
895 static void xe_guc_pc_fini_hw(void *arg)
896 {
897 	struct xe_guc_pc *pc = arg;
898 	struct xe_device *xe = pc_to_xe(pc);
899 
900 	if (xe_device_wedged(xe))
901 		return;
902 
903 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
904 	XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
905 	XE_WARN_ON(xe_guc_pc_stop(pc));
906 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
907 }
908 
909 /**
910  * xe_guc_pc_init - Initialize GuC's Power Conservation component
911  * @pc: Xe_GuC_PC instance
912  */
913 int xe_guc_pc_init(struct xe_guc_pc *pc)
914 {
915 	struct xe_gt *gt = pc_to_gt(pc);
916 	struct xe_tile *tile = gt_to_tile(gt);
917 	struct xe_device *xe = gt_to_xe(gt);
918 	struct xe_bo *bo;
919 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
920 	int err;
921 
922 	if (xe->info.skip_guc_pc)
923 		return 0;
924 
925 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
926 	if (err)
927 		return err;
928 
929 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
930 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
931 					  XE_BO_FLAG_GGTT |
932 					  XE_BO_FLAG_GGTT_INVALIDATE);
933 	if (IS_ERR(bo))
934 		return PTR_ERR(bo);
935 
936 	pc->bo = bo;
937 
938 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
939 }
940