xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 4f77c3462308c62ffe7129cc18b9ac937f44b5a5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 #include <generated/xe_wa_oob.h>
12 
13 #include "abi/guc_actions_slpc_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_regs.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_force_wake.h"
19 #include "xe_gt.h"
20 #include "xe_gt_idle.h"
21 #include "xe_gt_printk.h"
22 #include "xe_gt_types.h"
23 #include "xe_guc.h"
24 #include "xe_guc_ct.h"
25 #include "xe_map.h"
26 #include "xe_mmio.h"
27 #include "xe_pcode.h"
28 #include "xe_pm.h"
29 #include "xe_sriov.h"
30 #include "xe_wa.h"
31 
32 #define MCHBAR_MIRROR_BASE_SNB	0x140000
33 
34 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
35 #define   RP0_MASK		REG_GENMASK(7, 0)
36 #define   RP1_MASK		REG_GENMASK(15, 8)
37 #define   RPN_MASK		REG_GENMASK(23, 16)
38 
39 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
40 #define   RPE_MASK		REG_GENMASK(15, 8)
41 
42 #define GT_PERF_STATUS		XE_REG(0x1381b4)
43 #define   CAGF_MASK	REG_GENMASK(19, 11)
44 
45 #define GT_FREQUENCY_MULTIPLIER	50
46 #define GT_FREQUENCY_SCALER	3
47 
48 #define LNL_MERT_FREQ_CAP	800
49 #define BMG_MERT_FREQ_CAP	2133
50 
51 /**
52  * DOC: GuC Power Conservation (PC)
53  *
54  * GuC Power Conservation (PC) supports multiple features for the most
55  * efficient and performing use of the GT when GuC submission is enabled,
56  * including frequency management, Render-C states management, and various
57  * algorithms for power balancing.
58  *
59  * Single Loop Power Conservation (SLPC) is the name given to the suite of
60  * connected power conservation features in the GuC firmware. The firmware
61  * exposes a programming interface to the host for the control of SLPC.
62  *
63  * Frequency management:
64  * =====================
65  *
66  * Xe driver enables SLPC with all of its defaults features and frequency
67  * selection, which varies per platform.
68  *
69  * Render-C States:
70  * ================
71  *
72  * Render-C states is also a GuC PC feature that is now enabled in Xe for
73  * all platforms.
74  *
75  */
76 
77 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
78 {
79 	return container_of(pc, struct xe_guc, pc);
80 }
81 
82 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
83 {
84 	return &pc_to_guc(pc)->ct;
85 }
86 
87 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
88 {
89 	return guc_to_gt(pc_to_guc(pc));
90 }
91 
92 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
93 {
94 	return guc_to_xe(pc_to_guc(pc));
95 }
96 
97 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
98 {
99 	return &pc->bo->vmap;
100 }
101 
102 #define slpc_shared_data_read(pc_, field_) \
103 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
104 			struct slpc_shared_data, field_)
105 
106 #define slpc_shared_data_write(pc_, field_, val_) \
107 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
108 			struct slpc_shared_data, field_, val_)
109 
110 #define SLPC_EVENT(id, count) \
111 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
112 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
113 
114 static int wait_for_pc_state(struct xe_guc_pc *pc,
115 			     enum slpc_global_state state)
116 {
117 	int timeout_us = 5000; /* rought 5ms, but no need for precision */
118 	int slept, wait = 10;
119 
120 	xe_device_assert_mem_access(pc_to_xe(pc));
121 
122 	for (slept = 0; slept < timeout_us;) {
123 		if (slpc_shared_data_read(pc, header.global_state) == state)
124 			return 0;
125 
126 		usleep_range(wait, wait << 1);
127 		slept += wait;
128 		wait <<= 1;
129 		if (slept + wait > timeout_us)
130 			wait = timeout_us - slept;
131 	}
132 
133 	return -ETIMEDOUT;
134 }
135 
136 static int pc_action_reset(struct xe_guc_pc *pc)
137 {
138 	struct xe_guc_ct *ct = pc_to_ct(pc);
139 	u32 action[] = {
140 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
141 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
142 		xe_bo_ggtt_addr(pc->bo),
143 		0,
144 	};
145 	int ret;
146 
147 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
148 	if (ret)
149 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
150 			  ERR_PTR(ret));
151 
152 	return ret;
153 }
154 
155 static int pc_action_query_task_state(struct xe_guc_pc *pc)
156 {
157 	struct xe_guc_ct *ct = pc_to_ct(pc);
158 	u32 action[] = {
159 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
160 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
161 		xe_bo_ggtt_addr(pc->bo),
162 		0,
163 	};
164 	int ret;
165 
166 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
167 		return -EAGAIN;
168 
169 	/* Blocking here to ensure the results are ready before reading them */
170 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
171 	if (ret)
172 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
173 			  ERR_PTR(ret));
174 
175 	return ret;
176 }
177 
178 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
179 {
180 	struct xe_guc_ct *ct = pc_to_ct(pc);
181 	u32 action[] = {
182 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
183 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
184 		id,
185 		value,
186 	};
187 	int ret;
188 
189 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
190 		return -EAGAIN;
191 
192 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
193 	if (ret)
194 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
195 			  id, value, ERR_PTR(ret));
196 
197 	return ret;
198 }
199 
200 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
201 {
202 	u32 action[] = {
203 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
204 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
205 		id,
206 	};
207 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
208 	int ret;
209 
210 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
211 		return -EAGAIN;
212 
213 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
214 	if (ret)
215 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
216 			  ERR_PTR(ret));
217 
218 	return ret;
219 }
220 
221 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
222 {
223 	struct xe_guc_ct *ct = pc_to_ct(pc);
224 	u32 action[] = {
225 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
226 		mode,
227 	};
228 	int ret;
229 
230 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
231 	if (ret)
232 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
233 			  mode, ERR_PTR(ret));
234 	return ret;
235 }
236 
237 static u32 decode_freq(u32 raw)
238 {
239 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
240 				 GT_FREQUENCY_SCALER);
241 }
242 
243 static u32 encode_freq(u32 freq)
244 {
245 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
246 				 GT_FREQUENCY_MULTIPLIER);
247 }
248 
249 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
250 {
251 	u32 freq;
252 
253 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
254 			 slpc_shared_data_read(pc, task_state_data.freq));
255 
256 	return decode_freq(freq);
257 }
258 
259 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
260 {
261 	struct xe_gt *gt = pc_to_gt(pc);
262 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
263 
264 	/* Allow/Disallow punit to process software freq requests */
265 	xe_mmio_write32(gt, RP_CONTROL, state);
266 }
267 
268 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
269 {
270 	struct xe_gt *gt = pc_to_gt(pc);
271 	u32 rpnswreq;
272 
273 	pc_set_manual_rp_ctrl(pc, true);
274 
275 	/* Req freq is in units of 16.66 Mhz */
276 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
277 	xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
278 
279 	/* Sleep for a small time to allow pcode to respond */
280 	usleep_range(100, 300);
281 
282 	pc_set_manual_rp_ctrl(pc, false);
283 }
284 
285 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
286 {
287 	/*
288 	 * Let's only check for the rpn-rp0 range. If max < min,
289 	 * min becomes a fixed request.
290 	 */
291 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
292 		return -EINVAL;
293 
294 	/*
295 	 * GuC policy is to elevate minimum frequency to the efficient levels
296 	 * Our goal is to have the admin choices respected.
297 	 */
298 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
299 			    freq < pc->rpe_freq);
300 
301 	return pc_action_set_param(pc,
302 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
303 				   freq);
304 }
305 
306 static int pc_get_max_freq(struct xe_guc_pc *pc)
307 {
308 	u32 freq;
309 
310 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
311 			 slpc_shared_data_read(pc, task_state_data.freq));
312 
313 	return decode_freq(freq);
314 }
315 
316 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
317 {
318 	/*
319 	 * Let's only check for the rpn-rp0 range. If max < min,
320 	 * min becomes a fixed request.
321 	 * Also, overclocking is not supported.
322 	 */
323 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
324 		return -EINVAL;
325 
326 	return pc_action_set_param(pc,
327 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
328 				   freq);
329 }
330 
331 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
332 {
333 	struct xe_gt *gt = pc_to_gt(pc);
334 	u32 reg;
335 
336 	if (xe_gt_is_media_type(gt))
337 		reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
338 	else
339 		reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
340 
341 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
342 }
343 
344 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
345 {
346 	struct xe_gt *gt = pc_to_gt(pc);
347 	struct xe_device *xe = gt_to_xe(gt);
348 	u32 reg;
349 
350 	/*
351 	 * For PVC we still need to use fused RP1 as the approximation for RPe
352 	 * For other platforms than PVC we get the resolved RPe directly from
353 	 * PCODE at a different register
354 	 */
355 	if (xe->info.platform == XE_PVC)
356 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
357 	else
358 		reg = xe_mmio_read32(gt, FREQ_INFO_REC);
359 
360 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
361 }
362 
363 static void pc_update_rp_values(struct xe_guc_pc *pc)
364 {
365 	struct xe_gt *gt = pc_to_gt(pc);
366 	struct xe_device *xe = gt_to_xe(gt);
367 
368 	if (GRAPHICS_VERx100(xe) >= 1270)
369 		mtl_update_rpe_value(pc);
370 	else
371 		tgl_update_rpe_value(pc);
372 
373 	/*
374 	 * RPe is decided at runtime by PCODE. In the rare case where that's
375 	 * smaller than the fused min, we will trust the PCODE and use that
376 	 * as our minimum one.
377 	 */
378 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
379 }
380 
381 /**
382  * xe_guc_pc_get_act_freq - Get Actual running frequency
383  * @pc: The GuC PC
384  *
385  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
386  */
387 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
388 {
389 	struct xe_gt *gt = pc_to_gt(pc);
390 	struct xe_device *xe = gt_to_xe(gt);
391 	u32 freq;
392 
393 	/* When in RC6, actual frequency reported will be 0. */
394 	if (GRAPHICS_VERx100(xe) >= 1270) {
395 		freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
396 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
397 	} else {
398 		freq = xe_mmio_read32(gt, GT_PERF_STATUS);
399 		freq = REG_FIELD_GET(CAGF_MASK, freq);
400 	}
401 
402 	freq = decode_freq(freq);
403 
404 	return freq;
405 }
406 
407 /**
408  * xe_guc_pc_get_cur_freq - Get Current requested frequency
409  * @pc: The GuC PC
410  * @freq: A pointer to a u32 where the freq value will be returned
411  *
412  * Returns: 0 on success,
413  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
414  */
415 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
416 {
417 	struct xe_gt *gt = pc_to_gt(pc);
418 	int ret;
419 
420 	/*
421 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
422 	 * Block RC6 for a more reliable read.
423 	 */
424 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
425 	if (ret)
426 		return ret;
427 
428 	*freq = xe_mmio_read32(gt, RPNSWREQ);
429 
430 	*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
431 	*freq = decode_freq(*freq);
432 
433 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
434 	return 0;
435 }
436 
437 /**
438  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
439  * @pc: The GuC PC
440  *
441  * Returns: RP0 freq.
442  */
443 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
444 {
445 	return pc->rp0_freq;
446 }
447 
448 /**
449  * xe_guc_pc_get_rpe_freq - Get the RPe freq
450  * @pc: The GuC PC
451  *
452  * Returns: RPe freq.
453  */
454 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
455 {
456 	pc_update_rp_values(pc);
457 
458 	return pc->rpe_freq;
459 }
460 
461 /**
462  * xe_guc_pc_get_rpn_freq - Get the RPn freq
463  * @pc: The GuC PC
464  *
465  * Returns: RPn freq.
466  */
467 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
468 {
469 	return pc->rpn_freq;
470 }
471 
472 /**
473  * xe_guc_pc_get_min_freq - Get the min operational frequency
474  * @pc: The GuC PC
475  * @freq: A pointer to a u32 where the freq value will be returned
476  *
477  * Returns: 0 on success,
478  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
479  */
480 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
481 {
482 	struct xe_gt *gt = pc_to_gt(pc);
483 	int ret;
484 
485 	mutex_lock(&pc->freq_lock);
486 	if (!pc->freq_ready) {
487 		/* Might be in the middle of a gt reset */
488 		ret = -EAGAIN;
489 		goto out;
490 	}
491 
492 	/*
493 	 * GuC SLPC plays with min freq request when GuCRC is enabled
494 	 * Block RC6 for a more reliable read.
495 	 */
496 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
497 	if (ret)
498 		goto out;
499 
500 	ret = pc_action_query_task_state(pc);
501 	if (ret)
502 		goto fw;
503 
504 	*freq = pc_get_min_freq(pc);
505 
506 fw:
507 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
508 out:
509 	mutex_unlock(&pc->freq_lock);
510 	return ret;
511 }
512 
513 /**
514  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
515  * @pc: The GuC PC
516  * @freq: The selected minimal frequency
517  *
518  * Returns: 0 on success,
519  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
520  *         -EINVAL if value out of bounds.
521  */
522 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
523 {
524 	int ret;
525 
526 	mutex_lock(&pc->freq_lock);
527 	if (!pc->freq_ready) {
528 		/* Might be in the middle of a gt reset */
529 		ret = -EAGAIN;
530 		goto out;
531 	}
532 
533 	ret = pc_set_min_freq(pc, freq);
534 	if (ret)
535 		goto out;
536 
537 	pc->user_requested_min = freq;
538 
539 out:
540 	mutex_unlock(&pc->freq_lock);
541 	return ret;
542 }
543 
544 /**
545  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
546  * @pc: The GuC PC
547  * @freq: A pointer to a u32 where the freq value will be returned
548  *
549  * Returns: 0 on success,
550  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
551  */
552 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
553 {
554 	int ret;
555 
556 	mutex_lock(&pc->freq_lock);
557 	if (!pc->freq_ready) {
558 		/* Might be in the middle of a gt reset */
559 		ret = -EAGAIN;
560 		goto out;
561 	}
562 
563 	ret = pc_action_query_task_state(pc);
564 	if (ret)
565 		goto out;
566 
567 	*freq = pc_get_max_freq(pc);
568 
569 out:
570 	mutex_unlock(&pc->freq_lock);
571 	return ret;
572 }
573 
574 /**
575  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
576  * @pc: The GuC PC
577  * @freq: The selected maximum frequency value
578  *
579  * Returns: 0 on success,
580  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
581  *         -EINVAL if value out of bounds.
582  */
583 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
584 {
585 	int ret;
586 
587 	mutex_lock(&pc->freq_lock);
588 	if (!pc->freq_ready) {
589 		/* Might be in the middle of a gt reset */
590 		ret = -EAGAIN;
591 		goto out;
592 	}
593 
594 	ret = pc_set_max_freq(pc, freq);
595 	if (ret)
596 		goto out;
597 
598 	pc->user_requested_max = freq;
599 
600 out:
601 	mutex_unlock(&pc->freq_lock);
602 	return ret;
603 }
604 
605 /**
606  * xe_guc_pc_c_status - get the current GT C state
607  * @pc: XE_GuC_PC instance
608  */
609 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
610 {
611 	struct xe_gt *gt = pc_to_gt(pc);
612 	u32 reg, gt_c_state;
613 
614 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
615 		reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
616 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
617 	} else {
618 		reg = xe_mmio_read32(gt, GT_CORE_STATUS);
619 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
620 	}
621 
622 	switch (gt_c_state) {
623 	case GT_C6:
624 		return GT_IDLE_C6;
625 	case GT_C0:
626 		return GT_IDLE_C0;
627 	default:
628 		return GT_IDLE_UNKNOWN;
629 	}
630 }
631 
632 /**
633  * xe_guc_pc_rc6_residency - rc6 residency counter
634  * @pc: Xe_GuC_PC instance
635  */
636 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
637 {
638 	struct xe_gt *gt = pc_to_gt(pc);
639 	u32 reg;
640 
641 	reg = xe_mmio_read32(gt, GT_GFX_RC6);
642 
643 	return reg;
644 }
645 
646 /**
647  * xe_guc_pc_mc6_residency - mc6 residency counter
648  * @pc: Xe_GuC_PC instance
649  */
650 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
651 {
652 	struct xe_gt *gt = pc_to_gt(pc);
653 	u64 reg;
654 
655 	reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
656 
657 	return reg;
658 }
659 
660 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
661 {
662 	struct xe_gt *gt = pc_to_gt(pc);
663 	u32 reg;
664 
665 	xe_device_assert_mem_access(pc_to_xe(pc));
666 
667 	if (xe_gt_is_media_type(gt))
668 		reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
669 	else
670 		reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
671 
672 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
673 
674 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
675 }
676 
677 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
678 {
679 	struct xe_gt *gt = pc_to_gt(pc);
680 	struct xe_device *xe = gt_to_xe(gt);
681 	u32 reg;
682 
683 	xe_device_assert_mem_access(pc_to_xe(pc));
684 
685 	if (xe->info.platform == XE_PVC)
686 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
687 	else
688 		reg = xe_mmio_read32(gt, RP_STATE_CAP);
689 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
690 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
691 }
692 
693 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
694 {
695 	struct xe_gt *gt = pc_to_gt(pc);
696 	struct xe_device *xe = gt_to_xe(gt);
697 
698 	if (GRAPHICS_VERx100(xe) >= 1270)
699 		mtl_init_fused_rp_values(pc);
700 	else
701 		tgl_init_fused_rp_values(pc);
702 }
703 
704 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
705 {
706 	struct xe_gt *gt = pc_to_gt(pc);
707 
708 	if (XE_WA(gt, 22019338487)) {
709 		if (xe_gt_is_media_type(gt))
710 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
711 		else
712 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
713 	} else {
714 		return pc->rp0_freq;
715 	}
716 }
717 
718 /**
719  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
720  * frequency to allow faster GuC load times
721  * @pc: Xe_GuC_PC instance
722  */
723 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
724 {
725 	struct xe_gt *gt = pc_to_gt(pc);
726 
727 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
728 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
729 }
730 
731 /**
732  * xe_guc_pc_init_early - Initialize RPx values
733  * @pc: Xe_GuC_PC instance
734  */
735 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
736 {
737 	struct xe_gt *gt = pc_to_gt(pc);
738 
739 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
740 	pc_init_fused_rp_values(pc);
741 }
742 
743 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
744 {
745 	int ret;
746 
747 	lockdep_assert_held(&pc->freq_lock);
748 
749 	ret = pc_action_query_task_state(pc);
750 	if (ret)
751 		goto out;
752 
753 	/*
754 	 * GuC defaults to some RPmax that is not actually achievable without
755 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
756 	 * regular maximum
757 	 */
758 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
759 		ret = pc_set_max_freq(pc, pc->rp0_freq);
760 		if (ret)
761 			goto out;
762 	}
763 
764 	/*
765 	 * Same thing happens for Server platforms where min is listed as
766 	 * RPMax
767 	 */
768 	if (pc_get_min_freq(pc) > pc->rp0_freq)
769 		ret = pc_set_min_freq(pc, pc->rp0_freq);
770 
771 out:
772 	return ret;
773 }
774 
775 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
776 {
777 	int ret = 0;
778 
779 	lockdep_assert_held(&pc->freq_lock);
780 
781 	if (pc->user_requested_min != 0) {
782 		ret = pc_set_min_freq(pc, pc->user_requested_min);
783 		if (ret)
784 			return ret;
785 	}
786 
787 	if (pc->user_requested_max != 0) {
788 		ret = pc_set_max_freq(pc, pc->user_requested_max);
789 		if (ret)
790 			return ret;
791 	}
792 
793 	return ret;
794 }
795 
796 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
797 {
798 	int ret = 0;
799 
800 	if (XE_WA(pc_to_gt(pc), 22019338487)) {
801 		/*
802 		 * Get updated min/max and stash them.
803 		 */
804 		ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
805 		if (!ret)
806 			ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
807 		if (ret)
808 			return ret;
809 
810 		/*
811 		 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
812 		 */
813 		mutex_lock(&pc->freq_lock);
814 		ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
815 		if (!ret)
816 			ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
817 		mutex_unlock(&pc->freq_lock);
818 	}
819 
820 	return ret;
821 }
822 
823 /**
824  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
825  * @pc: The GuC PC
826  *
827  * Returns: 0 on success,
828  *          error code on failure
829  */
830 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
831 {
832 	int ret = 0;
833 
834 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
835 		return 0;
836 
837 	mutex_lock(&pc->freq_lock);
838 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
839 	if (!ret)
840 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
841 	mutex_unlock(&pc->freq_lock);
842 
843 	return ret;
844 }
845 
846 /**
847  * xe_guc_pc_gucrc_disable - Disable GuC RC
848  * @pc: Xe_GuC_PC instance
849  *
850  * Disables GuC RC by taking control of RC6 back from GuC.
851  *
852  * Return: 0 on success, negative error code on error.
853  */
854 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
855 {
856 	struct xe_device *xe = pc_to_xe(pc);
857 	struct xe_gt *gt = pc_to_gt(pc);
858 	int ret = 0;
859 
860 	if (xe->info.skip_guc_pc)
861 		return 0;
862 
863 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
864 	if (ret)
865 		return ret;
866 
867 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
868 	if (ret)
869 		return ret;
870 
871 	xe_gt_idle_disable_c6(gt);
872 
873 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
874 
875 	return 0;
876 }
877 
878 /**
879  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
880  * @pc: Xe_GuC_PC instance
881  * @mode: new value of the mode.
882  *
883  * Return: 0 on success, negative error code on error
884  */
885 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
886 {
887 	int ret;
888 
889 	xe_pm_runtime_get(pc_to_xe(pc));
890 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
891 	xe_pm_runtime_put(pc_to_xe(pc));
892 
893 	return ret;
894 }
895 
896 /**
897  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
898  * @pc: Xe_GuC_PC instance
899  *
900  * Return: 0 on success, negative error code on error
901  */
902 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
903 {
904 	int ret;
905 
906 	xe_pm_runtime_get(pc_to_xe(pc));
907 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
908 	xe_pm_runtime_put(pc_to_xe(pc));
909 
910 	return ret;
911 }
912 
913 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
914 {
915 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
916 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
917 
918 	XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
919 }
920 
921 static int pc_init_freqs(struct xe_guc_pc *pc)
922 {
923 	int ret;
924 
925 	mutex_lock(&pc->freq_lock);
926 
927 	ret = pc_adjust_freq_bounds(pc);
928 	if (ret)
929 		goto out;
930 
931 	ret = pc_adjust_requested_freq(pc);
932 	if (ret)
933 		goto out;
934 
935 	pc_update_rp_values(pc);
936 
937 	pc_init_pcode_freq(pc);
938 
939 	/*
940 	 * The frequencies are really ready for use only after the user
941 	 * requested ones got restored.
942 	 */
943 	pc->freq_ready = true;
944 
945 out:
946 	mutex_unlock(&pc->freq_lock);
947 	return ret;
948 }
949 
950 /**
951  * xe_guc_pc_start - Start GuC's Power Conservation component
952  * @pc: Xe_GuC_PC instance
953  */
954 int xe_guc_pc_start(struct xe_guc_pc *pc)
955 {
956 	struct xe_device *xe = pc_to_xe(pc);
957 	struct xe_gt *gt = pc_to_gt(pc);
958 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
959 	int ret;
960 
961 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
962 
963 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
964 	if (ret)
965 		return ret;
966 
967 	if (xe->info.skip_guc_pc) {
968 		if (xe->info.platform != XE_PVC)
969 			xe_gt_idle_enable_c6(gt);
970 
971 		/* Request max possible since dynamic freq mgmt is not enabled */
972 		pc_set_cur_freq(pc, UINT_MAX);
973 
974 		ret = 0;
975 		goto out;
976 	}
977 
978 	memset(pc->bo->vmap.vaddr, 0, size);
979 	slpc_shared_data_write(pc, header.size, size);
980 
981 	ret = pc_action_reset(pc);
982 	if (ret)
983 		goto out;
984 
985 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
986 		xe_gt_err(gt, "GuC PC Start failed\n");
987 		ret = -EIO;
988 		goto out;
989 	}
990 
991 	ret = pc_init_freqs(pc);
992 	if (ret)
993 		goto out;
994 
995 	ret = pc_set_mert_freq_cap(pc);
996 	if (ret)
997 		goto out;
998 
999 	if (xe->info.platform == XE_PVC) {
1000 		xe_guc_pc_gucrc_disable(pc);
1001 		ret = 0;
1002 		goto out;
1003 	}
1004 
1005 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1006 
1007 out:
1008 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
1009 	return ret;
1010 }
1011 
1012 /**
1013  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1014  * @pc: Xe_GuC_PC instance
1015  */
1016 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1017 {
1018 	struct xe_device *xe = pc_to_xe(pc);
1019 
1020 	if (xe->info.skip_guc_pc) {
1021 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1022 		return 0;
1023 	}
1024 
1025 	mutex_lock(&pc->freq_lock);
1026 	pc->freq_ready = false;
1027 	mutex_unlock(&pc->freq_lock);
1028 
1029 	return 0;
1030 }
1031 
1032 /**
1033  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1034  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1035  */
1036 static void xe_guc_pc_fini_hw(void *arg)
1037 {
1038 	struct xe_guc_pc *pc = arg;
1039 	struct xe_device *xe = pc_to_xe(pc);
1040 
1041 	if (xe_device_wedged(xe))
1042 		return;
1043 
1044 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
1045 	XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
1046 	XE_WARN_ON(xe_guc_pc_stop(pc));
1047 
1048 	/* Bind requested freq to mert_freq_cap before unload */
1049 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1050 
1051 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1052 }
1053 
1054 /**
1055  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1056  * @pc: Xe_GuC_PC instance
1057  */
1058 int xe_guc_pc_init(struct xe_guc_pc *pc)
1059 {
1060 	struct xe_gt *gt = pc_to_gt(pc);
1061 	struct xe_tile *tile = gt_to_tile(gt);
1062 	struct xe_device *xe = gt_to_xe(gt);
1063 	struct xe_bo *bo;
1064 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1065 	int err;
1066 
1067 	if (xe->info.skip_guc_pc)
1068 		return 0;
1069 
1070 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1071 	if (err)
1072 		return err;
1073 
1074 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1075 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1076 					  XE_BO_FLAG_GGTT |
1077 					  XE_BO_FLAG_GGTT_INVALIDATE);
1078 	if (IS_ERR(bo))
1079 		return PTR_ERR(bo);
1080 
1081 	pc->bo = bo;
1082 
1083 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1084 }
1085