xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 36f353a1ebf88280f58d1ebfe2731251d9159456)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "abi/guc_actions_abi.h"
13 #include "abi/guc_actions_slpc_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_regs.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_gt.h"
19 #include "xe_gt_idle.h"
20 #include "xe_gt_sysfs.h"
21 #include "xe_gt_types.h"
22 #include "xe_guc_ct.h"
23 #include "xe_map.h"
24 #include "xe_mmio.h"
25 #include "xe_pcode.h"
26 
27 #define MCHBAR_MIRROR_BASE_SNB	0x140000
28 
29 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
30 #define   RP0_MASK		REG_GENMASK(7, 0)
31 #define   RP1_MASK		REG_GENMASK(15, 8)
32 #define   RPN_MASK		REG_GENMASK(23, 16)
33 
34 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
35 #define   RPE_MASK		REG_GENMASK(15, 8)
36 
37 #define GT_PERF_STATUS		XE_REG(0x1381b4)
38 #define   CAGF_MASK	REG_GENMASK(19, 11)
39 
40 #define GT_FREQUENCY_MULTIPLIER	50
41 #define GT_FREQUENCY_SCALER	3
42 
43 /**
44  * DOC: GuC Power Conservation (PC)
45  *
46  * GuC Power Conservation (PC) supports multiple features for the most
47  * efficient and performing use of the GT when GuC submission is enabled,
48  * including frequency management, Render-C states management, and various
49  * algorithms for power balancing.
50  *
51  * Single Loop Power Conservation (SLPC) is the name given to the suite of
52  * connected power conservation features in the GuC firmware. The firmware
53  * exposes a programming interface to the host for the control of SLPC.
54  *
55  * Frequency management:
56  * =====================
57  *
58  * Xe driver enables SLPC with all of its defaults features and frequency
59  * selection, which varies per platform.
60  *
61  * Render-C States:
62  * ================
63  *
64  * Render-C states is also a GuC PC feature that is now enabled in Xe for
65  * all platforms.
66  *
67  */
68 
69 static struct xe_guc *
70 pc_to_guc(struct xe_guc_pc *pc)
71 {
72 	return container_of(pc, struct xe_guc, pc);
73 }
74 
75 static struct xe_device *
76 pc_to_xe(struct xe_guc_pc *pc)
77 {
78 	struct xe_guc *guc = pc_to_guc(pc);
79 	struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
80 
81 	return gt_to_xe(gt);
82 }
83 
84 static struct xe_gt *
85 pc_to_gt(struct xe_guc_pc *pc)
86 {
87 	return container_of(pc, struct xe_gt, uc.guc.pc);
88 }
89 
90 static struct iosys_map *
91 pc_to_maps(struct xe_guc_pc *pc)
92 {
93 	return &pc->bo->vmap;
94 }
95 
96 #define slpc_shared_data_read(pc_, field_) \
97 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
98 			struct slpc_shared_data, field_)
99 
100 #define slpc_shared_data_write(pc_, field_, val_) \
101 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
102 			struct slpc_shared_data, field_, val_)
103 
104 #define SLPC_EVENT(id, count) \
105 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
106 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
107 
108 static int wait_for_pc_state(struct xe_guc_pc *pc,
109 			     enum slpc_global_state state)
110 {
111 	int timeout_us = 5000; /* rought 5ms, but no need for precision */
112 	int slept, wait = 10;
113 
114 	xe_device_assert_mem_access(pc_to_xe(pc));
115 
116 	for (slept = 0; slept < timeout_us;) {
117 		if (slpc_shared_data_read(pc, header.global_state) == state)
118 			return 0;
119 
120 		usleep_range(wait, wait << 1);
121 		slept += wait;
122 		wait <<= 1;
123 		if (slept + wait > timeout_us)
124 			wait = timeout_us - slept;
125 	}
126 
127 	return -ETIMEDOUT;
128 }
129 
130 static int pc_action_reset(struct xe_guc_pc *pc)
131 {
132 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
133 	int ret;
134 	u32 action[] = {
135 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
136 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
137 		xe_bo_ggtt_addr(pc->bo),
138 		0,
139 	};
140 
141 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
142 	if (ret)
143 		drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
144 
145 	return ret;
146 }
147 
148 static int pc_action_shutdown(struct xe_guc_pc *pc)
149 {
150 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
151 	int ret;
152 	u32 action[] = {
153 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
154 		SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
155 		xe_bo_ggtt_addr(pc->bo),
156 		0,
157 	};
158 
159 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
160 	if (ret)
161 		drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
162 			ERR_PTR(ret));
163 
164 	return ret;
165 }
166 
167 static int pc_action_query_task_state(struct xe_guc_pc *pc)
168 {
169 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
170 	int ret;
171 	u32 action[] = {
172 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
173 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
174 		xe_bo_ggtt_addr(pc->bo),
175 		0,
176 	};
177 
178 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
179 		return -EAGAIN;
180 
181 	/* Blocking here to ensure the results are ready before reading them */
182 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
183 	if (ret)
184 		drm_err(&pc_to_xe(pc)->drm,
185 			"GuC PC query task state failed: %pe", ERR_PTR(ret));
186 
187 	return ret;
188 }
189 
190 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
191 {
192 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
193 	int ret;
194 	u32 action[] = {
195 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
196 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
197 		id,
198 		value,
199 	};
200 
201 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
202 		return -EAGAIN;
203 
204 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
205 	if (ret)
206 		drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
207 			ERR_PTR(ret));
208 
209 	return ret;
210 }
211 
212 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
213 {
214 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
215 	u32 action[] = {
216 		XE_GUC_ACTION_SETUP_PC_GUCRC,
217 		mode,
218 	};
219 	int ret;
220 
221 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
222 	if (ret)
223 		drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
224 			ERR_PTR(ret));
225 	return ret;
226 }
227 
228 static u32 decode_freq(u32 raw)
229 {
230 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
231 				 GT_FREQUENCY_SCALER);
232 }
233 
234 static u32 encode_freq(u32 freq)
235 {
236 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
237 				 GT_FREQUENCY_MULTIPLIER);
238 }
239 
240 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
241 {
242 	u32 freq;
243 
244 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
245 			 slpc_shared_data_read(pc, task_state_data.freq));
246 
247 	return decode_freq(freq);
248 }
249 
250 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
251 {
252 	struct xe_gt *gt = pc_to_gt(pc);
253 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
254 
255 	/* Allow/Disallow punit to process software freq requests */
256 	xe_mmio_write32(gt, RP_CONTROL, state);
257 }
258 
259 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
260 {
261 	struct xe_gt *gt = pc_to_gt(pc);
262 	u32 rpnswreq;
263 
264 	pc_set_manual_rp_ctrl(pc, true);
265 
266 	/* Req freq is in units of 16.66 Mhz */
267 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
268 	xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
269 
270 	/* Sleep for a small time to allow pcode to respond */
271 	usleep_range(100, 300);
272 
273 	pc_set_manual_rp_ctrl(pc, false);
274 }
275 
276 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
277 {
278 	/*
279 	 * Let's only check for the rpn-rp0 range. If max < min,
280 	 * min becomes a fixed request.
281 	 */
282 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
283 		return -EINVAL;
284 
285 	/*
286 	 * GuC policy is to elevate minimum frequency to the efficient levels
287 	 * Our goal is to have the admin choices respected.
288 	 */
289 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
290 			    freq < pc->rpe_freq);
291 
292 	return pc_action_set_param(pc,
293 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
294 				   freq);
295 }
296 
297 static int pc_get_max_freq(struct xe_guc_pc *pc)
298 {
299 	u32 freq;
300 
301 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
302 			 slpc_shared_data_read(pc, task_state_data.freq));
303 
304 	return decode_freq(freq);
305 }
306 
307 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
308 {
309 	/*
310 	 * Let's only check for the rpn-rp0 range. If max < min,
311 	 * min becomes a fixed request.
312 	 * Also, overclocking is not supported.
313 	 */
314 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
315 		return -EINVAL;
316 
317 	return pc_action_set_param(pc,
318 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
319 				   freq);
320 }
321 
322 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
323 {
324 	struct xe_gt *gt = pc_to_gt(pc);
325 	u32 reg;
326 
327 	if (xe_gt_is_media_type(gt))
328 		reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
329 	else
330 		reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
331 
332 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
333 }
334 
335 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
336 {
337 	struct xe_gt *gt = pc_to_gt(pc);
338 	struct xe_device *xe = gt_to_xe(gt);
339 	u32 reg;
340 
341 	/*
342 	 * For PVC we still need to use fused RP1 as the approximation for RPe
343 	 * For other platforms than PVC we get the resolved RPe directly from
344 	 * PCODE at a different register
345 	 */
346 	if (xe->info.platform == XE_PVC)
347 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
348 	else
349 		reg = xe_mmio_read32(gt, FREQ_INFO_REC);
350 
351 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
352 }
353 
354 static void pc_update_rp_values(struct xe_guc_pc *pc)
355 {
356 	struct xe_gt *gt = pc_to_gt(pc);
357 	struct xe_device *xe = gt_to_xe(gt);
358 
359 	if (GRAPHICS_VERx100(xe) >= 1270)
360 		mtl_update_rpe_value(pc);
361 	else
362 		tgl_update_rpe_value(pc);
363 
364 	/*
365 	 * RPe is decided at runtime by PCODE. In the rare case where that's
366 	 * smaller than the fused min, we will trust the PCODE and use that
367 	 * as our minimum one.
368 	 */
369 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
370 }
371 
372 /**
373  * xe_guc_pc_get_act_freq - Get Actual running frequency
374  * @pc: The GuC PC
375  *
376  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
377  */
378 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
379 {
380 	struct xe_gt *gt = pc_to_gt(pc);
381 	struct xe_device *xe = gt_to_xe(gt);
382 	u32 freq;
383 
384 	xe_device_mem_access_get(gt_to_xe(gt));
385 
386 	/* When in RC6, actual frequency reported will be 0. */
387 	if (GRAPHICS_VERx100(xe) >= 1270) {
388 		freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
389 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
390 	} else {
391 		freq = xe_mmio_read32(gt, GT_PERF_STATUS);
392 		freq = REG_FIELD_GET(CAGF_MASK, freq);
393 	}
394 
395 	freq = decode_freq(freq);
396 
397 	xe_device_mem_access_put(gt_to_xe(gt));
398 
399 	return freq;
400 }
401 
402 /**
403  * xe_guc_pc_get_cur_freq - Get Current requested frequency
404  * @pc: The GuC PC
405  * @freq: A pointer to a u32 where the freq value will be returned
406  *
407  * Returns: 0 on success,
408  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
409  */
410 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
411 {
412 	struct xe_gt *gt = pc_to_gt(pc);
413 	int ret;
414 
415 	xe_device_mem_access_get(gt_to_xe(gt));
416 	/*
417 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
418 	 * Block RC6 for a more reliable read.
419 	 */
420 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
421 	if (ret)
422 		goto out;
423 
424 	*freq = xe_mmio_read32(gt, RPNSWREQ);
425 
426 	*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
427 	*freq = decode_freq(*freq);
428 
429 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
430 out:
431 	xe_device_mem_access_put(gt_to_xe(gt));
432 	return ret;
433 }
434 
435 /**
436  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
437  * @pc: The GuC PC
438  *
439  * Returns: RP0 freq.
440  */
441 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
442 {
443 	return pc->rp0_freq;
444 }
445 
446 /**
447  * xe_guc_pc_get_rpe_freq - Get the RPe freq
448  * @pc: The GuC PC
449  *
450  * Returns: RPe freq.
451  */
452 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
453 {
454 	struct xe_gt *gt = pc_to_gt(pc);
455 	struct xe_device *xe = gt_to_xe(gt);
456 
457 	xe_device_mem_access_get(xe);
458 	pc_update_rp_values(pc);
459 	xe_device_mem_access_put(xe);
460 
461 	return pc->rpe_freq;
462 }
463 
464 /**
465  * xe_guc_pc_get_rpn_freq - Get the RPn freq
466  * @pc: The GuC PC
467  *
468  * Returns: RPn freq.
469  */
470 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
471 {
472 	return pc->rpn_freq;
473 }
474 
475 /**
476  * xe_guc_pc_get_min_freq - Get the min operational frequency
477  * @pc: The GuC PC
478  * @freq: A pointer to a u32 where the freq value will be returned
479  *
480  * Returns: 0 on success,
481  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
482  */
483 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
484 {
485 	struct xe_gt *gt = pc_to_gt(pc);
486 	int ret;
487 
488 	xe_device_mem_access_get(pc_to_xe(pc));
489 	mutex_lock(&pc->freq_lock);
490 	if (!pc->freq_ready) {
491 		/* Might be in the middle of a gt reset */
492 		ret = -EAGAIN;
493 		goto out;
494 	}
495 
496 	/*
497 	 * GuC SLPC plays with min freq request when GuCRC is enabled
498 	 * Block RC6 for a more reliable read.
499 	 */
500 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
501 	if (ret)
502 		goto out;
503 
504 	ret = pc_action_query_task_state(pc);
505 	if (ret)
506 		goto fw;
507 
508 	*freq = pc_get_min_freq(pc);
509 
510 fw:
511 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
512 out:
513 	mutex_unlock(&pc->freq_lock);
514 	xe_device_mem_access_put(pc_to_xe(pc));
515 	return ret;
516 }
517 
518 /**
519  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
520  * @pc: The GuC PC
521  * @freq: The selected minimal frequency
522  *
523  * Returns: 0 on success,
524  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
525  *         -EINVAL if value out of bounds.
526  */
527 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
528 {
529 	int ret;
530 
531 	xe_device_mem_access_get(pc_to_xe(pc));
532 	mutex_lock(&pc->freq_lock);
533 	if (!pc->freq_ready) {
534 		/* Might be in the middle of a gt reset */
535 		ret = -EAGAIN;
536 		goto out;
537 	}
538 
539 	ret = pc_set_min_freq(pc, freq);
540 	if (ret)
541 		goto out;
542 
543 	pc->user_requested_min = freq;
544 
545 out:
546 	mutex_unlock(&pc->freq_lock);
547 	xe_device_mem_access_put(pc_to_xe(pc));
548 
549 	return ret;
550 }
551 
552 /**
553  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
554  * @pc: The GuC PC
555  * @freq: A pointer to a u32 where the freq value will be returned
556  *
557  * Returns: 0 on success,
558  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
559  */
560 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
561 {
562 	int ret;
563 
564 	xe_device_mem_access_get(pc_to_xe(pc));
565 	mutex_lock(&pc->freq_lock);
566 	if (!pc->freq_ready) {
567 		/* Might be in the middle of a gt reset */
568 		ret = -EAGAIN;
569 		goto out;
570 	}
571 
572 	ret = pc_action_query_task_state(pc);
573 	if (ret)
574 		goto out;
575 
576 	*freq = pc_get_max_freq(pc);
577 
578 out:
579 	mutex_unlock(&pc->freq_lock);
580 	xe_device_mem_access_put(pc_to_xe(pc));
581 	return ret;
582 }
583 
584 /**
585  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
586  * @pc: The GuC PC
587  * @freq: The selected maximum frequency value
588  *
589  * Returns: 0 on success,
590  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
591  *         -EINVAL if value out of bounds.
592  */
593 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
594 {
595 	int ret;
596 
597 	xe_device_mem_access_get(pc_to_xe(pc));
598 	mutex_lock(&pc->freq_lock);
599 	if (!pc->freq_ready) {
600 		/* Might be in the middle of a gt reset */
601 		ret = -EAGAIN;
602 		goto out;
603 	}
604 
605 	ret = pc_set_max_freq(pc, freq);
606 	if (ret)
607 		goto out;
608 
609 	pc->user_requested_max = freq;
610 
611 out:
612 	mutex_unlock(&pc->freq_lock);
613 	xe_device_mem_access_put(pc_to_xe(pc));
614 	return ret;
615 }
616 
617 /**
618  * xe_guc_pc_c_status - get the current GT C state
619  * @pc: XE_GuC_PC instance
620  */
621 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
622 {
623 	struct xe_gt *gt = pc_to_gt(pc);
624 	u32 reg, gt_c_state;
625 
626 	xe_device_mem_access_get(gt_to_xe(gt));
627 
628 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
629 		reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
630 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
631 	} else {
632 		reg = xe_mmio_read32(gt, GT_CORE_STATUS);
633 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
634 	}
635 
636 	xe_device_mem_access_put(gt_to_xe(gt));
637 
638 	switch (gt_c_state) {
639 	case GT_C6:
640 		return GT_IDLE_C6;
641 	case GT_C0:
642 		return GT_IDLE_C0;
643 	default:
644 		return GT_IDLE_UNKNOWN;
645 	}
646 }
647 
648 /**
649  * xe_guc_pc_rc6_residency - rc6 residency counter
650  * @pc: Xe_GuC_PC instance
651  */
652 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
653 {
654 	struct xe_gt *gt = pc_to_gt(pc);
655 	u32 reg;
656 
657 	xe_device_mem_access_get(gt_to_xe(gt));
658 	reg = xe_mmio_read32(gt, GT_GFX_RC6);
659 	xe_device_mem_access_put(gt_to_xe(gt));
660 
661 	return reg;
662 }
663 
664 /**
665  * xe_guc_pc_mc6_residency - mc6 residency counter
666  * @pc: Xe_GuC_PC instance
667  */
668 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
669 {
670 	struct xe_gt *gt = pc_to_gt(pc);
671 	u64 reg;
672 
673 	xe_device_mem_access_get(gt_to_xe(gt));
674 	reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
675 	xe_device_mem_access_put(gt_to_xe(gt));
676 
677 	return reg;
678 }
679 
680 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
681 {
682 	struct xe_gt *gt = pc_to_gt(pc);
683 	u32 reg;
684 
685 	xe_device_assert_mem_access(pc_to_xe(pc));
686 
687 	if (xe_gt_is_media_type(gt))
688 		reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
689 	else
690 		reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
691 
692 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
693 
694 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
695 }
696 
697 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
698 {
699 	struct xe_gt *gt = pc_to_gt(pc);
700 	struct xe_device *xe = gt_to_xe(gt);
701 	u32 reg;
702 
703 	xe_device_assert_mem_access(pc_to_xe(pc));
704 
705 	if (xe->info.platform == XE_PVC)
706 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
707 	else
708 		reg = xe_mmio_read32(gt, RP_STATE_CAP);
709 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
710 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
711 }
712 
713 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
714 {
715 	struct xe_gt *gt = pc_to_gt(pc);
716 	struct xe_device *xe = gt_to_xe(gt);
717 
718 	if (GRAPHICS_VERx100(xe) >= 1270)
719 		mtl_init_fused_rp_values(pc);
720 	else
721 		tgl_init_fused_rp_values(pc);
722 }
723 
724 /**
725  * xe_guc_pc_init_early - Initialize RPx values and request a higher GT
726  * frequency to allow faster GuC load times
727  * @pc: Xe_GuC_PC instance
728  */
729 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
730 {
731 	struct xe_gt *gt = pc_to_gt(pc);
732 
733 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
734 	pc_init_fused_rp_values(pc);
735 	pc_set_cur_freq(pc, pc->rp0_freq);
736 }
737 
738 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
739 {
740 	int ret;
741 
742 	lockdep_assert_held(&pc->freq_lock);
743 
744 	ret = pc_action_query_task_state(pc);
745 	if (ret)
746 		return ret;
747 
748 	/*
749 	 * GuC defaults to some RPmax that is not actually achievable without
750 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
751 	 * regular maximum
752 	 */
753 	if (pc_get_max_freq(pc) > pc->rp0_freq)
754 		pc_set_max_freq(pc, pc->rp0_freq);
755 
756 	/*
757 	 * Same thing happens for Server platforms where min is listed as
758 	 * RPMax
759 	 */
760 	if (pc_get_min_freq(pc) > pc->rp0_freq)
761 		pc_set_min_freq(pc, pc->rp0_freq);
762 
763 	return 0;
764 }
765 
766 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
767 {
768 	int ret = 0;
769 
770 	lockdep_assert_held(&pc->freq_lock);
771 
772 	if (pc->user_requested_min != 0) {
773 		ret = pc_set_min_freq(pc, pc->user_requested_min);
774 		if (ret)
775 			return ret;
776 	}
777 
778 	if (pc->user_requested_max != 0) {
779 		ret = pc_set_max_freq(pc, pc->user_requested_max);
780 		if (ret)
781 			return ret;
782 	}
783 
784 	return ret;
785 }
786 
787 /**
788  * xe_guc_pc_gucrc_disable - Disable GuC RC
789  * @pc: Xe_GuC_PC instance
790  *
791  * Disables GuC RC by taking control of RC6 back from GuC.
792  *
793  * Return: 0 on success, negative error code on error.
794  */
795 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
796 {
797 	struct xe_device *xe = pc_to_xe(pc);
798 	struct xe_gt *gt = pc_to_gt(pc);
799 	int ret = 0;
800 
801 	if (xe->info.skip_guc_pc)
802 		return 0;
803 
804 	xe_device_mem_access_get(pc_to_xe(pc));
805 
806 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
807 	if (ret)
808 		goto out;
809 
810 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
811 	if (ret)
812 		goto out;
813 
814 	xe_gt_idle_disable_c6(gt);
815 
816 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
817 
818 out:
819 	xe_device_mem_access_put(pc_to_xe(pc));
820 	return ret;
821 }
822 
823 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
824 {
825 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
826 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
827 
828 	XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
829 }
830 
831 static int pc_init_freqs(struct xe_guc_pc *pc)
832 {
833 	int ret;
834 
835 	mutex_lock(&pc->freq_lock);
836 
837 	ret = pc_adjust_freq_bounds(pc);
838 	if (ret)
839 		goto out;
840 
841 	ret = pc_adjust_requested_freq(pc);
842 	if (ret)
843 		goto out;
844 
845 	pc_update_rp_values(pc);
846 
847 	pc_init_pcode_freq(pc);
848 
849 	/*
850 	 * The frequencies are really ready for use only after the user
851 	 * requested ones got restored.
852 	 */
853 	pc->freq_ready = true;
854 
855 out:
856 	mutex_unlock(&pc->freq_lock);
857 	return ret;
858 }
859 
860 /**
861  * xe_guc_pc_start - Start GuC's Power Conservation component
862  * @pc: Xe_GuC_PC instance
863  */
864 int xe_guc_pc_start(struct xe_guc_pc *pc)
865 {
866 	struct xe_device *xe = pc_to_xe(pc);
867 	struct xe_gt *gt = pc_to_gt(pc);
868 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
869 	int ret;
870 
871 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
872 
873 	xe_device_mem_access_get(pc_to_xe(pc));
874 
875 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
876 	if (ret)
877 		goto out_fail_force_wake;
878 
879 	if (xe->info.skip_guc_pc) {
880 		if (xe->info.platform != XE_PVC)
881 			xe_gt_idle_enable_c6(gt);
882 
883 		/* Request max possible since dynamic freq mgmt is not enabled */
884 		pc_set_cur_freq(pc, UINT_MAX);
885 
886 		ret = 0;
887 		goto out;
888 	}
889 
890 	memset(pc->bo->vmap.vaddr, 0, size);
891 	slpc_shared_data_write(pc, header.size, size);
892 
893 	ret = pc_action_reset(pc);
894 	if (ret)
895 		goto out;
896 
897 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
898 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
899 		ret = -EIO;
900 		goto out;
901 	}
902 
903 	ret = pc_init_freqs(pc);
904 	if (ret)
905 		goto out;
906 
907 	if (xe->info.platform == XE_PVC) {
908 		xe_guc_pc_gucrc_disable(pc);
909 		ret = 0;
910 		goto out;
911 	}
912 
913 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
914 
915 out:
916 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
917 out_fail_force_wake:
918 	xe_device_mem_access_put(pc_to_xe(pc));
919 	return ret;
920 }
921 
922 /**
923  * xe_guc_pc_stop - Stop GuC's Power Conservation component
924  * @pc: Xe_GuC_PC instance
925  */
926 int xe_guc_pc_stop(struct xe_guc_pc *pc)
927 {
928 	struct xe_device *xe = pc_to_xe(pc);
929 	int ret;
930 
931 	xe_device_mem_access_get(pc_to_xe(pc));
932 
933 	if (xe->info.skip_guc_pc) {
934 		xe_gt_idle_disable_c6(pc_to_gt(pc));
935 		ret = 0;
936 		goto out;
937 	}
938 
939 	mutex_lock(&pc->freq_lock);
940 	pc->freq_ready = false;
941 	mutex_unlock(&pc->freq_lock);
942 
943 	ret = pc_action_shutdown(pc);
944 	if (ret)
945 		goto out;
946 
947 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
948 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
949 		ret = -EIO;
950 	}
951 
952 out:
953 	xe_device_mem_access_put(pc_to_xe(pc));
954 	return ret;
955 }
956 
957 /**
958  * xe_guc_pc_fini - Finalize GuC's Power Conservation component
959  * @drm: DRM device
960  * @arg: opaque pointer that should point to Xe_GuC_PC instance
961  */
962 static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
963 {
964 	struct xe_guc_pc *pc = arg;
965 	struct xe_device *xe = pc_to_xe(pc);
966 
967 	if (xe->info.skip_guc_pc) {
968 		xe_device_mem_access_get(xe);
969 		xe_gt_idle_disable_c6(pc_to_gt(pc));
970 		xe_device_mem_access_put(xe);
971 		return;
972 	}
973 
974 	xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
975 	XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
976 	XE_WARN_ON(xe_guc_pc_stop(pc));
977 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
978 }
979 
980 /**
981  * xe_guc_pc_init - Initialize GuC's Power Conservation component
982  * @pc: Xe_GuC_PC instance
983  */
984 int xe_guc_pc_init(struct xe_guc_pc *pc)
985 {
986 	struct xe_gt *gt = pc_to_gt(pc);
987 	struct xe_tile *tile = gt_to_tile(gt);
988 	struct xe_device *xe = gt_to_xe(gt);
989 	struct xe_bo *bo;
990 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
991 	int err;
992 
993 	if (xe->info.skip_guc_pc)
994 		return 0;
995 
996 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
997 	if (err)
998 		return err;
999 
1000 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1001 					  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
1002 					  XE_BO_CREATE_GGTT_BIT);
1003 	if (IS_ERR(bo))
1004 		return PTR_ERR(bo);
1005 
1006 	pc->bo = bo;
1007 
1008 	err = drmm_add_action_or_reset(&xe->drm, xe_guc_pc_fini, pc);
1009 	if (err)
1010 		return err;
1011 
1012 	return 0;
1013 }
1014