xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 9fd2da71c301184d98fe37674ca8d017d1ce6600)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/jiffies.h>
11 #include <linux/ktime.h>
12 #include <linux/wait_bit.h>
13 
14 #include <drm/drm_managed.h>
15 #include <drm/drm_print.h>
16 #include <generated/xe_wa_oob.h>
17 
18 #include "abi/guc_actions_slpc_abi.h"
19 #include "regs/xe_gt_regs.h"
20 #include "regs/xe_regs.h"
21 #include "xe_bo.h"
22 #include "xe_device.h"
23 #include "xe_force_wake.h"
24 #include "xe_gt.h"
25 #include "xe_gt_idle.h"
26 #include "xe_gt_printk.h"
27 #include "xe_gt_throttle.h"
28 #include "xe_gt_types.h"
29 #include "xe_guc.h"
30 #include "xe_guc_ct.h"
31 #include "xe_map.h"
32 #include "xe_mmio.h"
33 #include "xe_pcode.h"
34 #include "xe_pm.h"
35 #include "xe_sriov.h"
36 #include "xe_wa.h"
37 
38 #define MCHBAR_MIRROR_BASE_SNB	0x140000
39 
40 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
41 #define   RP0_MASK		REG_GENMASK(7, 0)
42 #define   RP1_MASK		REG_GENMASK(15, 8)
43 #define   RPN_MASK		REG_GENMASK(23, 16)
44 
45 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
46 #define   RPE_MASK		REG_GENMASK(15, 8)
47 #define   RPA_MASK		REG_GENMASK(31, 16)
48 
49 #define GT_PERF_STATUS		XE_REG(0x1381b4)
50 #define   CAGF_MASK	REG_GENMASK(19, 11)
51 
52 #define GT_FREQUENCY_MULTIPLIER	50
53 #define GT_FREQUENCY_SCALER	3
54 
55 #define LNL_MERT_FREQ_CAP	800
56 #define BMG_MERT_FREQ_CAP	2133
57 #define BMG_MIN_FREQ		1200
58 #define BMG_MERT_FLUSH_FREQ_CAP	2600
59 
60 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
61 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
62 #define SLPC_ACT_FREQ_TIMEOUT_MS 100
63 
64 /**
65  * DOC: GuC Power Conservation (PC)
66  *
67  * GuC Power Conservation (PC) supports multiple features for the most
68  * efficient and performing use of the GT when GuC submission is enabled,
69  * including frequency management, Render-C states management, and various
70  * algorithms for power balancing.
71  *
72  * Single Loop Power Conservation (SLPC) is the name given to the suite of
73  * connected power conservation features in the GuC firmware. The firmware
74  * exposes a programming interface to the host for the control of SLPC.
75  *
76  * Frequency management:
77  * =====================
78  *
79  * Xe driver enables SLPC with all of its defaults features and frequency
80  * selection, which varies per platform.
81  *
82  * Render-C States:
83  * ================
84  *
85  * Render-C states is also a GuC PC feature that is now enabled in Xe for
86  * all platforms.
87  *
88  */
89 
90 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
91 {
92 	return container_of(pc, struct xe_guc, pc);
93 }
94 
95 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
96 {
97 	return &pc_to_guc(pc)->ct;
98 }
99 
100 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
101 {
102 	return guc_to_gt(pc_to_guc(pc));
103 }
104 
105 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
106 {
107 	return guc_to_xe(pc_to_guc(pc));
108 }
109 
110 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
111 {
112 	return &pc->bo->vmap;
113 }
114 
115 #define slpc_shared_data_read(pc_, field_) \
116 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
117 			struct slpc_shared_data, field_)
118 
119 #define slpc_shared_data_write(pc_, field_, val_) \
120 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
121 			struct slpc_shared_data, field_, val_)
122 
123 #define SLPC_EVENT(id, count) \
124 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
125 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
126 
127 static int wait_for_pc_state(struct xe_guc_pc *pc,
128 			     enum slpc_global_state state,
129 			     int timeout_ms)
130 {
131 	int timeout_us = 1000 * timeout_ms;
132 	int slept, wait = 10;
133 
134 	xe_device_assert_mem_access(pc_to_xe(pc));
135 
136 	for (slept = 0; slept < timeout_us;) {
137 		if (slpc_shared_data_read(pc, header.global_state) == state)
138 			return 0;
139 
140 		usleep_range(wait, wait << 1);
141 		slept += wait;
142 		wait <<= 1;
143 		if (slept + wait > timeout_us)
144 			wait = timeout_us - slept;
145 	}
146 
147 	return -ETIMEDOUT;
148 }
149 
150 static int wait_for_flush_complete(struct xe_guc_pc *pc)
151 {
152 	const unsigned long timeout = msecs_to_jiffies(30);
153 
154 	if (!wait_var_event_timeout(&pc->flush_freq_limit,
155 				    !atomic_read(&pc->flush_freq_limit),
156 				    timeout))
157 		return -ETIMEDOUT;
158 
159 	return 0;
160 }
161 
162 static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
163 {
164 	int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
165 	int slept, wait = 10;
166 
167 	for (slept = 0; slept < timeout_us;) {
168 		if (xe_guc_pc_get_act_freq(pc) <= freq)
169 			return 0;
170 
171 		usleep_range(wait, wait << 1);
172 		slept += wait;
173 		wait <<= 1;
174 		if (slept + wait > timeout_us)
175 			wait = timeout_us - slept;
176 	}
177 
178 	return -ETIMEDOUT;
179 }
180 static int pc_action_reset(struct xe_guc_pc *pc)
181 {
182 	struct xe_guc_ct *ct = pc_to_ct(pc);
183 	u32 action[] = {
184 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
185 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
186 		xe_bo_ggtt_addr(pc->bo),
187 		0,
188 	};
189 	int ret;
190 
191 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
192 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
193 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
194 			  ERR_PTR(ret));
195 
196 	return ret;
197 }
198 
199 static int pc_action_query_task_state(struct xe_guc_pc *pc)
200 {
201 	struct xe_guc_ct *ct = pc_to_ct(pc);
202 	u32 action[] = {
203 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
204 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
205 		xe_bo_ggtt_addr(pc->bo),
206 		0,
207 	};
208 	int ret;
209 
210 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
211 			      SLPC_RESET_TIMEOUT_MS))
212 		return -EAGAIN;
213 
214 	/* Blocking here to ensure the results are ready before reading them */
215 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
216 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
217 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
218 			  ERR_PTR(ret));
219 
220 	return ret;
221 }
222 
223 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
224 {
225 	struct xe_guc_ct *ct = pc_to_ct(pc);
226 	u32 action[] = {
227 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
228 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
229 		id,
230 		value,
231 	};
232 	int ret;
233 
234 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
235 			      SLPC_RESET_TIMEOUT_MS))
236 		return -EAGAIN;
237 
238 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
239 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
240 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
241 			  id, value, ERR_PTR(ret));
242 
243 	return ret;
244 }
245 
246 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
247 {
248 	u32 action[] = {
249 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
250 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
251 		id,
252 	};
253 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
254 	int ret;
255 
256 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
257 			      SLPC_RESET_TIMEOUT_MS))
258 		return -EAGAIN;
259 
260 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
261 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
262 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
263 			  ERR_PTR(ret));
264 
265 	return ret;
266 }
267 
268 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
269 {
270 	struct xe_guc_ct *ct = pc_to_ct(pc);
271 	u32 action[] = {
272 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
273 		mode,
274 	};
275 	int ret;
276 
277 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
278 	if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
279 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
280 			  mode, ERR_PTR(ret));
281 	return ret;
282 }
283 
284 static u32 decode_freq(u32 raw)
285 {
286 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
287 				 GT_FREQUENCY_SCALER);
288 }
289 
290 static u32 encode_freq(u32 freq)
291 {
292 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
293 				 GT_FREQUENCY_MULTIPLIER);
294 }
295 
296 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
297 {
298 	u32 freq;
299 
300 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
301 			 slpc_shared_data_read(pc, task_state_data.freq));
302 
303 	return decode_freq(freq);
304 }
305 
306 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
307 {
308 	struct xe_gt *gt = pc_to_gt(pc);
309 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
310 
311 	/* Allow/Disallow punit to process software freq requests */
312 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
313 }
314 
315 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
316 {
317 	struct xe_gt *gt = pc_to_gt(pc);
318 	u32 rpnswreq;
319 
320 	pc_set_manual_rp_ctrl(pc, true);
321 
322 	/* Req freq is in units of 16.66 Mhz */
323 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
324 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
325 
326 	/* Sleep for a small time to allow pcode to respond */
327 	usleep_range(100, 300);
328 
329 	pc_set_manual_rp_ctrl(pc, false);
330 }
331 
332 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
333 {
334 	/*
335 	 * Let's only check for the rpn-rp0 range. If max < min,
336 	 * min becomes a fixed request.
337 	 */
338 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
339 		return -EINVAL;
340 
341 	/*
342 	 * GuC policy is to elevate minimum frequency to the efficient levels
343 	 * Our goal is to have the admin choices respected.
344 	 */
345 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
346 			    freq < pc->rpe_freq);
347 
348 	return pc_action_set_param(pc,
349 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
350 				   freq);
351 }
352 
353 static int pc_get_max_freq(struct xe_guc_pc *pc)
354 {
355 	u32 freq;
356 
357 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
358 			 slpc_shared_data_read(pc, task_state_data.freq));
359 
360 	return decode_freq(freq);
361 }
362 
363 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
364 {
365 	/*
366 	 * Let's only check for the rpn-rp0 range. If max < min,
367 	 * min becomes a fixed request.
368 	 * Also, overclocking is not supported.
369 	 */
370 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
371 		return -EINVAL;
372 
373 	return pc_action_set_param(pc,
374 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
375 				   freq);
376 }
377 
378 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
379 {
380 	struct xe_gt *gt = pc_to_gt(pc);
381 	u32 reg;
382 
383 	if (xe_gt_is_media_type(gt))
384 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
385 	else
386 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
387 
388 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
389 }
390 
391 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
392 {
393 	struct xe_gt *gt = pc_to_gt(pc);
394 	u32 reg;
395 
396 	if (xe_gt_is_media_type(gt))
397 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
398 	else
399 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
400 
401 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
402 }
403 
404 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
405 {
406 	struct xe_gt *gt = pc_to_gt(pc);
407 	struct xe_device *xe = gt_to_xe(gt);
408 	u32 reg;
409 
410 	/*
411 	 * For PVC we still need to use fused RP0 as the approximation for RPa
412 	 * For other platforms than PVC we get the resolved RPa directly from
413 	 * PCODE at a different register
414 	 */
415 	if (xe->info.platform == XE_PVC) {
416 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
417 		pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
418 	} else {
419 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
420 		pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
421 	}
422 }
423 
424 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
425 {
426 	struct xe_gt *gt = pc_to_gt(pc);
427 	struct xe_device *xe = gt_to_xe(gt);
428 	u32 reg;
429 
430 	/*
431 	 * For PVC we still need to use fused RP1 as the approximation for RPe
432 	 * For other platforms than PVC we get the resolved RPe directly from
433 	 * PCODE at a different register
434 	 */
435 	if (xe->info.platform == XE_PVC) {
436 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
437 		pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
438 	} else {
439 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
440 		pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
441 	}
442 }
443 
444 static void pc_update_rp_values(struct xe_guc_pc *pc)
445 {
446 	struct xe_gt *gt = pc_to_gt(pc);
447 	struct xe_device *xe = gt_to_xe(gt);
448 
449 	if (GRAPHICS_VERx100(xe) >= 1270) {
450 		mtl_update_rpa_value(pc);
451 		mtl_update_rpe_value(pc);
452 	} else {
453 		tgl_update_rpa_value(pc);
454 		tgl_update_rpe_value(pc);
455 	}
456 
457 	/*
458 	 * RPe is decided at runtime by PCODE. In the rare case where that's
459 	 * smaller than the fused min, we will trust the PCODE and use that
460 	 * as our minimum one.
461 	 */
462 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
463 }
464 
465 /**
466  * xe_guc_pc_get_act_freq - Get Actual running frequency
467  * @pc: The GuC PC
468  *
469  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
470  */
471 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
472 {
473 	struct xe_gt *gt = pc_to_gt(pc);
474 	struct xe_device *xe = gt_to_xe(gt);
475 	u32 freq;
476 
477 	/* When in RC6, actual frequency reported will be 0. */
478 	if (GRAPHICS_VERx100(xe) >= 1270) {
479 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
480 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
481 	} else {
482 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
483 		freq = REG_FIELD_GET(CAGF_MASK, freq);
484 	}
485 
486 	freq = decode_freq(freq);
487 
488 	return freq;
489 }
490 
491 static u32 get_cur_freq(struct xe_gt *gt)
492 {
493 	u32 freq;
494 
495 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
496 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
497 	return decode_freq(freq);
498 }
499 
500 /**
501  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
502  * @pc: The GuC PC
503  *
504  * Returns: the requested frequency for that GT instance
505  */
506 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
507 {
508 	struct xe_gt *gt = pc_to_gt(pc);
509 
510 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
511 
512 	return get_cur_freq(gt);
513 }
514 
515 /**
516  * xe_guc_pc_get_cur_freq - Get Current requested frequency
517  * @pc: The GuC PC
518  * @freq: A pointer to a u32 where the freq value will be returned
519  *
520  * Returns: 0 on success,
521  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
522  */
523 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
524 {
525 	struct xe_gt *gt = pc_to_gt(pc);
526 	unsigned int fw_ref;
527 
528 	/*
529 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
530 	 * Block RC6 for a more reliable read.
531 	 */
532 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
533 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
534 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
535 		return -ETIMEDOUT;
536 	}
537 
538 	*freq = get_cur_freq(gt);
539 
540 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
541 	return 0;
542 }
543 
544 /**
545  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
546  * @pc: The GuC PC
547  *
548  * Returns: RP0 freq.
549  */
550 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
551 {
552 	return pc->rp0_freq;
553 }
554 
555 /**
556  * xe_guc_pc_get_rpa_freq - Get the RPa freq
557  * @pc: The GuC PC
558  *
559  * Returns: RPa freq.
560  */
561 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
562 {
563 	pc_update_rp_values(pc);
564 
565 	return pc->rpa_freq;
566 }
567 
568 /**
569  * xe_guc_pc_get_rpe_freq - Get the RPe freq
570  * @pc: The GuC PC
571  *
572  * Returns: RPe freq.
573  */
574 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
575 {
576 	pc_update_rp_values(pc);
577 
578 	return pc->rpe_freq;
579 }
580 
581 /**
582  * xe_guc_pc_get_rpn_freq - Get the RPn freq
583  * @pc: The GuC PC
584  *
585  * Returns: RPn freq.
586  */
587 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
588 {
589 	return pc->rpn_freq;
590 }
591 
592 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq)
593 {
594 	int ret;
595 
596 	lockdep_assert_held(&pc->freq_lock);
597 
598 	/* Might be in the middle of a gt reset */
599 	if (!pc->freq_ready)
600 		return -EAGAIN;
601 
602 	ret = pc_action_query_task_state(pc);
603 	if (ret)
604 		return ret;
605 
606 	*freq = pc_get_min_freq(pc);
607 
608 	return 0;
609 }
610 
611 /**
612  * xe_guc_pc_get_min_freq - Get the min operational frequency
613  * @pc: The GuC PC
614  * @freq: A pointer to a u32 where the freq value will be returned
615  *
616  * Returns: 0 on success,
617  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
618  */
619 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
620 {
621 	guard(mutex)(&pc->freq_lock);
622 
623 	return xe_guc_pc_get_min_freq_locked(pc, freq);
624 }
625 
626 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq)
627 {
628 	int ret;
629 
630 	lockdep_assert_held(&pc->freq_lock);
631 
632 	/* Might be in the middle of a gt reset */
633 	if (!pc->freq_ready)
634 		return -EAGAIN;
635 
636 	ret = pc_set_min_freq(pc, freq);
637 	if (ret)
638 		return ret;
639 
640 	pc->user_requested_min = freq;
641 
642 	return 0;
643 }
644 
645 /**
646  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
647  * @pc: The GuC PC
648  * @freq: The selected minimal frequency
649  *
650  * Returns: 0 on success,
651  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
652  *         -EINVAL if value out of bounds.
653  */
654 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
655 {
656 	guard(mutex)(&pc->freq_lock);
657 
658 	return xe_guc_pc_set_min_freq_locked(pc, freq);
659 }
660 
661 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq)
662 {
663 	int ret;
664 
665 	lockdep_assert_held(&pc->freq_lock);
666 
667 	/* Might be in the middle of a gt reset */
668 	if (!pc->freq_ready)
669 		return -EAGAIN;
670 
671 	ret = pc_action_query_task_state(pc);
672 	if (ret)
673 		return ret;
674 
675 	*freq = pc_get_max_freq(pc);
676 
677 	return 0;
678 }
679 
680 /**
681  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
682  * @pc: The GuC PC
683  * @freq: A pointer to a u32 where the freq value will be returned
684  *
685  * Returns: 0 on success,
686  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
687  */
688 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
689 {
690 	guard(mutex)(&pc->freq_lock);
691 
692 	return xe_guc_pc_get_max_freq_locked(pc, freq);
693 }
694 
695 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
696 {
697 	int ret;
698 
699 	lockdep_assert_held(&pc->freq_lock);
700 
701 	/* Might be in the middle of a gt reset */
702 	if (!pc->freq_ready)
703 		return -EAGAIN;
704 
705 	ret = pc_set_max_freq(pc, freq);
706 	if (ret)
707 		return ret;
708 
709 	pc->user_requested_max = freq;
710 
711 	return 0;
712 }
713 
714 /**
715  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
716  * @pc: The GuC PC
717  * @freq: The selected maximum frequency value
718  *
719  * Returns: 0 on success,
720  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
721  *         -EINVAL if value out of bounds.
722  */
723 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
724 {
725 	if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
726 		if (wait_for_flush_complete(pc) != 0)
727 			return -EAGAIN;
728 	}
729 
730 	guard(mutex)(&pc->freq_lock);
731 
732 	return xe_guc_pc_set_max_freq_locked(pc, freq);
733 }
734 
735 /**
736  * xe_guc_pc_c_status - get the current GT C state
737  * @pc: XE_GuC_PC instance
738  */
739 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
740 {
741 	struct xe_gt *gt = pc_to_gt(pc);
742 	u32 reg, gt_c_state;
743 
744 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
745 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
746 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
747 	} else {
748 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
749 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
750 	}
751 
752 	switch (gt_c_state) {
753 	case GT_C6:
754 		return GT_IDLE_C6;
755 	case GT_C0:
756 		return GT_IDLE_C0;
757 	default:
758 		return GT_IDLE_UNKNOWN;
759 	}
760 }
761 
762 /**
763  * xe_guc_pc_rc6_residency - rc6 residency counter
764  * @pc: Xe_GuC_PC instance
765  */
766 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
767 {
768 	struct xe_gt *gt = pc_to_gt(pc);
769 	u32 reg;
770 
771 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
772 
773 	return reg;
774 }
775 
776 /**
777  * xe_guc_pc_mc6_residency - mc6 residency counter
778  * @pc: Xe_GuC_PC instance
779  */
780 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
781 {
782 	struct xe_gt *gt = pc_to_gt(pc);
783 	u64 reg;
784 
785 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
786 
787 	return reg;
788 }
789 
790 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
791 {
792 	struct xe_gt *gt = pc_to_gt(pc);
793 	u32 reg;
794 
795 	xe_device_assert_mem_access(pc_to_xe(pc));
796 
797 	if (xe_gt_is_media_type(gt))
798 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
799 	else
800 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
801 
802 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
803 
804 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
805 }
806 
807 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
808 {
809 	struct xe_gt *gt = pc_to_gt(pc);
810 	struct xe_device *xe = gt_to_xe(gt);
811 	u32 reg;
812 
813 	xe_device_assert_mem_access(pc_to_xe(pc));
814 
815 	if (xe->info.platform == XE_PVC)
816 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
817 	else
818 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
819 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
820 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
821 }
822 
823 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
824 {
825 	struct xe_gt *gt = pc_to_gt(pc);
826 	struct xe_device *xe = gt_to_xe(gt);
827 
828 	if (GRAPHICS_VERx100(xe) >= 1270)
829 		mtl_init_fused_rp_values(pc);
830 	else
831 		tgl_init_fused_rp_values(pc);
832 }
833 
834 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
835 {
836 	struct xe_gt *gt = pc_to_gt(pc);
837 
838 	if (XE_GT_WA(gt, 22019338487)) {
839 		if (xe_gt_is_media_type(gt))
840 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
841 		else
842 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
843 	} else {
844 		return pc->rp0_freq;
845 	}
846 }
847 
848 /**
849  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
850  * frequency to allow faster GuC load times
851  * @pc: Xe_GuC_PC instance
852  */
853 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
854 {
855 	struct xe_gt *gt = pc_to_gt(pc);
856 
857 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
858 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
859 }
860 
861 /**
862  * xe_guc_pc_init_early - Initialize RPx values
863  * @pc: Xe_GuC_PC instance
864  */
865 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
866 {
867 	struct xe_gt *gt = pc_to_gt(pc);
868 
869 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
870 	pc_init_fused_rp_values(pc);
871 }
872 
873 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
874 {
875 	struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
876 	int ret;
877 
878 	lockdep_assert_held(&pc->freq_lock);
879 
880 	ret = pc_action_query_task_state(pc);
881 	if (ret)
882 		goto out;
883 
884 	/*
885 	 * GuC defaults to some RPmax that is not actually achievable without
886 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
887 	 * regular maximum
888 	 */
889 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
890 		ret = pc_set_max_freq(pc, pc->rp0_freq);
891 		if (ret)
892 			goto out;
893 	}
894 
895 	/*
896 	 * Same thing happens for Server platforms where min is listed as
897 	 * RPMax
898 	 */
899 	if (pc_get_min_freq(pc) > pc->rp0_freq)
900 		ret = pc_set_min_freq(pc, pc->rp0_freq);
901 
902 	if (XE_GT_WA(tile->primary_gt, 14022085890))
903 		ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
904 
905 out:
906 	return ret;
907 }
908 
909 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
910 {
911 	int ret = 0;
912 
913 	lockdep_assert_held(&pc->freq_lock);
914 
915 	if (pc->user_requested_min != 0) {
916 		ret = pc_set_min_freq(pc, pc->user_requested_min);
917 		if (ret)
918 			return ret;
919 	}
920 
921 	if (pc->user_requested_max != 0) {
922 		ret = pc_set_max_freq(pc, pc->user_requested_max);
923 		if (ret)
924 			return ret;
925 	}
926 
927 	return ret;
928 }
929 
930 static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
931 {
932 	struct xe_gt *gt = pc_to_gt(pc);
933 
934 	return  XE_GT_WA(gt, 22019338487) &&
935 		pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
936 }
937 
938 /**
939  * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush
940  * @pc: the xe_guc_pc object
941  *
942  * As per the WA, reduce max GT frequency during L2 cache flush
943  */
944 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
945 {
946 	struct xe_gt *gt = pc_to_gt(pc);
947 	u32 max_freq;
948 	int ret;
949 
950 	if (!needs_flush_freq_limit(pc))
951 		return;
952 
953 	guard(mutex)(&pc->freq_lock);
954 
955 	ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq);
956 	if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) {
957 		ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP);
958 		if (ret) {
959 			xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n",
960 				       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
961 			return;
962 		}
963 
964 		atomic_set(&pc->flush_freq_limit, 1);
965 
966 		/*
967 		 * If user has previously changed max freq, stash that value to
968 		 * restore later, otherwise use the current max. New user
969 		 * requests wait on flush.
970 		 */
971 		if (pc->user_requested_max != 0)
972 			pc->stashed_max_freq = pc->user_requested_max;
973 		else
974 			pc->stashed_max_freq = max_freq;
975 	}
976 
977 	/*
978 	 * Wait for actual freq to go below the flush cap: even if the previous
979 	 * max was below cap, the current one might still be above it
980 	 */
981 	ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
982 	if (ret)
983 		xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
984 			       BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
985 }
986 
987 /**
988  * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes.
989  * @pc: the xe_guc_pc object
990  *
991  * Retrieve the previous GT max frequency value.
992  */
993 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc)
994 {
995 	struct xe_gt *gt = pc_to_gt(pc);
996 	int ret = 0;
997 
998 	if (!needs_flush_freq_limit(pc))
999 		return;
1000 
1001 	if (!atomic_read(&pc->flush_freq_limit))
1002 		return;
1003 
1004 	mutex_lock(&pc->freq_lock);
1005 
1006 	ret = pc_set_max_freq(&gt->uc.guc.pc, pc->stashed_max_freq);
1007 	if (ret)
1008 		xe_gt_err_once(gt, "Failed to restore max freq %u:%d",
1009 			       pc->stashed_max_freq, ret);
1010 
1011 	atomic_set(&pc->flush_freq_limit, 0);
1012 	mutex_unlock(&pc->freq_lock);
1013 	wake_up_var(&pc->flush_freq_limit);
1014 }
1015 
1016 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
1017 {
1018 	int ret;
1019 
1020 	if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
1021 		return 0;
1022 
1023 	guard(mutex)(&pc->freq_lock);
1024 
1025 	/*
1026 	 * Get updated min/max and stash them.
1027 	 */
1028 	ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq);
1029 	if (!ret)
1030 		ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq);
1031 	if (ret)
1032 		return ret;
1033 
1034 	/*
1035 	 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
1036 	 */
1037 	ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
1038 	if (!ret)
1039 		ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
1040 
1041 	return ret;
1042 }
1043 
1044 /**
1045  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
1046  * @pc: The GuC PC
1047  *
1048  * Returns: 0 on success,
1049  *          error code on failure
1050  */
1051 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
1052 {
1053 	int ret = 0;
1054 
1055 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
1056 		return 0;
1057 
1058 	mutex_lock(&pc->freq_lock);
1059 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
1060 	if (!ret)
1061 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
1062 	mutex_unlock(&pc->freq_lock);
1063 
1064 	return ret;
1065 }
1066 
1067 /**
1068  * xe_guc_pc_gucrc_disable - Disable GuC RC
1069  * @pc: Xe_GuC_PC instance
1070  *
1071  * Disables GuC RC by taking control of RC6 back from GuC.
1072  *
1073  * Return: 0 on success, negative error code on error.
1074  */
1075 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
1076 {
1077 	struct xe_device *xe = pc_to_xe(pc);
1078 	struct xe_gt *gt = pc_to_gt(pc);
1079 	int ret = 0;
1080 
1081 	if (xe->info.skip_guc_pc)
1082 		return 0;
1083 
1084 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
1085 	if (ret)
1086 		return ret;
1087 
1088 	return xe_gt_idle_disable_c6(gt);
1089 }
1090 
1091 /**
1092  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
1093  * @pc: Xe_GuC_PC instance
1094  * @mode: new value of the mode.
1095  *
1096  * Return: 0 on success, negative error code on error
1097  */
1098 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
1099 {
1100 	int ret;
1101 
1102 	xe_pm_runtime_get(pc_to_xe(pc));
1103 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
1104 	xe_pm_runtime_put(pc_to_xe(pc));
1105 
1106 	return ret;
1107 }
1108 
1109 /**
1110  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
1111  * @pc: Xe_GuC_PC instance
1112  *
1113  * Return: 0 on success, negative error code on error
1114  */
1115 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
1116 {
1117 	int ret;
1118 
1119 	xe_pm_runtime_get(pc_to_xe(pc));
1120 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
1121 	xe_pm_runtime_put(pc_to_xe(pc));
1122 
1123 	return ret;
1124 }
1125 
1126 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
1127 {
1128 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
1129 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
1130 
1131 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
1132 }
1133 
1134 static int pc_init_freqs(struct xe_guc_pc *pc)
1135 {
1136 	int ret;
1137 
1138 	mutex_lock(&pc->freq_lock);
1139 
1140 	ret = pc_adjust_freq_bounds(pc);
1141 	if (ret)
1142 		goto out;
1143 
1144 	ret = pc_adjust_requested_freq(pc);
1145 	if (ret)
1146 		goto out;
1147 
1148 	pc_update_rp_values(pc);
1149 
1150 	pc_init_pcode_freq(pc);
1151 
1152 	/*
1153 	 * The frequencies are really ready for use only after the user
1154 	 * requested ones got restored.
1155 	 */
1156 	pc->freq_ready = true;
1157 
1158 out:
1159 	mutex_unlock(&pc->freq_lock);
1160 	return ret;
1161 }
1162 
1163 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1164 {
1165 	int ret = 0;
1166 
1167 	ret = pc_action_set_param(pc,
1168 				  SLPC_PARAM_STRATEGIES,
1169 				  val);
1170 
1171 	return ret;
1172 }
1173 
1174 /**
1175  * xe_guc_pc_start - Start GuC's Power Conservation component
1176  * @pc: Xe_GuC_PC instance
1177  */
1178 int xe_guc_pc_start(struct xe_guc_pc *pc)
1179 {
1180 	struct xe_device *xe = pc_to_xe(pc);
1181 	struct xe_gt *gt = pc_to_gt(pc);
1182 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1183 	unsigned int fw_ref;
1184 	ktime_t earlier;
1185 	int ret;
1186 
1187 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1188 
1189 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1190 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1191 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1192 		return -ETIMEDOUT;
1193 	}
1194 
1195 	if (xe->info.skip_guc_pc) {
1196 		if (xe->info.platform != XE_PVC)
1197 			xe_gt_idle_enable_c6(gt);
1198 
1199 		/* Request max possible since dynamic freq mgmt is not enabled */
1200 		pc_set_cur_freq(pc, UINT_MAX);
1201 
1202 		ret = 0;
1203 		goto out;
1204 	}
1205 
1206 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1207 	slpc_shared_data_write(pc, header.size, size);
1208 
1209 	earlier = ktime_get();
1210 	ret = pc_action_reset(pc);
1211 	if (ret)
1212 		goto out;
1213 
1214 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1215 			      SLPC_RESET_TIMEOUT_MS)) {
1216 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1217 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1218 			   xe_gt_throttle_get_limit_reasons(gt));
1219 
1220 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1221 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1222 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1223 			ret = -EIO;
1224 			goto out;
1225 		}
1226 
1227 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1228 			   ktime_ms_delta(ktime_get(), earlier));
1229 	}
1230 
1231 	ret = pc_init_freqs(pc);
1232 	if (ret)
1233 		goto out;
1234 
1235 	ret = pc_set_mert_freq_cap(pc);
1236 	if (ret)
1237 		goto out;
1238 
1239 	if (xe->info.platform == XE_PVC) {
1240 		xe_guc_pc_gucrc_disable(pc);
1241 		ret = 0;
1242 		goto out;
1243 	}
1244 
1245 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1246 	if (ret)
1247 		goto out;
1248 
1249 	/* Enable SLPC Optimized Strategy for compute */
1250 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1251 
1252 out:
1253 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1254 	return ret;
1255 }
1256 
1257 /**
1258  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1259  * @pc: Xe_GuC_PC instance
1260  */
1261 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1262 {
1263 	struct xe_device *xe = pc_to_xe(pc);
1264 
1265 	if (xe->info.skip_guc_pc) {
1266 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1267 		return 0;
1268 	}
1269 
1270 	mutex_lock(&pc->freq_lock);
1271 	pc->freq_ready = false;
1272 	mutex_unlock(&pc->freq_lock);
1273 
1274 	return 0;
1275 }
1276 
1277 /**
1278  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1279  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1280  */
1281 static void xe_guc_pc_fini_hw(void *arg)
1282 {
1283 	struct xe_guc_pc *pc = arg;
1284 	struct xe_device *xe = pc_to_xe(pc);
1285 	unsigned int fw_ref;
1286 
1287 	if (xe_device_wedged(xe))
1288 		return;
1289 
1290 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1291 	xe_guc_pc_gucrc_disable(pc);
1292 	XE_WARN_ON(xe_guc_pc_stop(pc));
1293 
1294 	/* Bind requested freq to mert_freq_cap before unload */
1295 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1296 
1297 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1298 }
1299 
1300 /**
1301  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1302  * @pc: Xe_GuC_PC instance
1303  */
1304 int xe_guc_pc_init(struct xe_guc_pc *pc)
1305 {
1306 	struct xe_gt *gt = pc_to_gt(pc);
1307 	struct xe_tile *tile = gt_to_tile(gt);
1308 	struct xe_device *xe = gt_to_xe(gt);
1309 	struct xe_bo *bo;
1310 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1311 	int err;
1312 
1313 	if (xe->info.skip_guc_pc)
1314 		return 0;
1315 
1316 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1317 	if (err)
1318 		return err;
1319 
1320 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1321 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1322 					  XE_BO_FLAG_GGTT |
1323 					  XE_BO_FLAG_GGTT_INVALIDATE |
1324 					  XE_BO_FLAG_PINNED_NORESTORE);
1325 	if (IS_ERR(bo))
1326 		return PTR_ERR(bo);
1327 
1328 	pc->bo = bo;
1329 
1330 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1331 }
1332 
1333 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1334 {
1335 	switch (slpc_shared_data_read(pc, header.global_state)) {
1336 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1337 		return "not running";
1338 	case SLPC_GLOBAL_STATE_INITIALIZING:
1339 		return "initializing";
1340 	case SLPC_GLOBAL_STATE_RESETTING:
1341 		return "resetting";
1342 	case SLPC_GLOBAL_STATE_RUNNING:
1343 		return "running";
1344 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1345 		return "shutting down";
1346 	case SLPC_GLOBAL_STATE_ERROR:
1347 		return "error";
1348 	default:
1349 		return "unknown";
1350 	}
1351 }
1352 
1353 /**
1354  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1355  * @pc: Xe_GuC_PC instance
1356  * @p: drm_printer
1357  */
1358 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1359 {
1360 	drm_printf(p, "SLPC Shared Data Header:\n");
1361 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1362 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1363 
1364 	if (pc_action_query_task_state(pc))
1365 		return;
1366 
1367 	drm_printf(p, "\nSLPC Tasks Status:\n");
1368 	drm_printf(p, "\tGTPERF enabled: %s\n",
1369 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1370 			      SLPC_GTPERF_TASK_ENABLED));
1371 	drm_printf(p, "\tDCC enabled: %s\n",
1372 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1373 			      SLPC_DCC_TASK_ENABLED));
1374 	drm_printf(p, "\tDCC in use: %s\n",
1375 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1376 			      SLPC_IN_DCC));
1377 	drm_printf(p, "\tBalancer enabled: %s\n",
1378 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1379 			      SLPC_BALANCER_ENABLED));
1380 	drm_printf(p, "\tIBC enabled: %s\n",
1381 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1382 			      SLPC_IBC_TASK_ENABLED));
1383 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1384 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1385 			      SLPC_BALANCER_IA_LMT_ENABLED));
1386 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1387 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1388 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1389 }
1390