xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 5f2b6c5f6b692c696a232d12c43b8e41c0d393b9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 #include <linux/ktime.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_print.h>
13 #include <generated/xe_wa_oob.h>
14 
15 #include "abi/guc_actions_slpc_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_regs.h"
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_force_wake.h"
21 #include "xe_gt.h"
22 #include "xe_gt_idle.h"
23 #include "xe_gt_printk.h"
24 #include "xe_gt_throttle.h"
25 #include "xe_gt_types.h"
26 #include "xe_guc.h"
27 #include "xe_guc_ct.h"
28 #include "xe_map.h"
29 #include "xe_mmio.h"
30 #include "xe_pcode.h"
31 #include "xe_pm.h"
32 #include "xe_sriov.h"
33 #include "xe_wa.h"
34 
35 #define MCHBAR_MIRROR_BASE_SNB	0x140000
36 
37 #define RP_STATE_CAP		XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
38 #define   RP0_MASK		REG_GENMASK(7, 0)
39 #define   RP1_MASK		REG_GENMASK(15, 8)
40 #define   RPN_MASK		REG_GENMASK(23, 16)
41 
42 #define FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
43 #define   RPE_MASK		REG_GENMASK(15, 8)
44 #define   RPA_MASK		REG_GENMASK(31, 16)
45 
46 #define GT_PERF_STATUS		XE_REG(0x1381b4)
47 #define   CAGF_MASK	REG_GENMASK(19, 11)
48 
49 #define GT_FREQUENCY_MULTIPLIER	50
50 #define GT_FREQUENCY_SCALER	3
51 
52 #define LNL_MERT_FREQ_CAP	800
53 #define BMG_MERT_FREQ_CAP	2133
54 
55 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
56 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
57 
58 /**
59  * DOC: GuC Power Conservation (PC)
60  *
61  * GuC Power Conservation (PC) supports multiple features for the most
62  * efficient and performing use of the GT when GuC submission is enabled,
63  * including frequency management, Render-C states management, and various
64  * algorithms for power balancing.
65  *
66  * Single Loop Power Conservation (SLPC) is the name given to the suite of
67  * connected power conservation features in the GuC firmware. The firmware
68  * exposes a programming interface to the host for the control of SLPC.
69  *
70  * Frequency management:
71  * =====================
72  *
73  * Xe driver enables SLPC with all of its defaults features and frequency
74  * selection, which varies per platform.
75  *
76  * Render-C States:
77  * ================
78  *
79  * Render-C states is also a GuC PC feature that is now enabled in Xe for
80  * all platforms.
81  *
82  */
83 
pc_to_guc(struct xe_guc_pc * pc)84 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc)
85 {
86 	return container_of(pc, struct xe_guc, pc);
87 }
88 
pc_to_ct(struct xe_guc_pc * pc)89 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc)
90 {
91 	return &pc_to_guc(pc)->ct;
92 }
93 
pc_to_gt(struct xe_guc_pc * pc)94 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc)
95 {
96 	return guc_to_gt(pc_to_guc(pc));
97 }
98 
pc_to_xe(struct xe_guc_pc * pc)99 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc)
100 {
101 	return guc_to_xe(pc_to_guc(pc));
102 }
103 
pc_to_maps(struct xe_guc_pc * pc)104 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
105 {
106 	return &pc->bo->vmap;
107 }
108 
109 #define slpc_shared_data_read(pc_, field_) \
110 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
111 			struct slpc_shared_data, field_)
112 
113 #define slpc_shared_data_write(pc_, field_, val_) \
114 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
115 			struct slpc_shared_data, field_, val_)
116 
117 #define SLPC_EVENT(id, count) \
118 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
119 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
120 
wait_for_pc_state(struct xe_guc_pc * pc,enum slpc_global_state state,int timeout_ms)121 static int wait_for_pc_state(struct xe_guc_pc *pc,
122 			     enum slpc_global_state state,
123 			     int timeout_ms)
124 {
125 	int timeout_us = 1000 * timeout_ms;
126 	int slept, wait = 10;
127 
128 	xe_device_assert_mem_access(pc_to_xe(pc));
129 
130 	for (slept = 0; slept < timeout_us;) {
131 		if (slpc_shared_data_read(pc, header.global_state) == state)
132 			return 0;
133 
134 		usleep_range(wait, wait << 1);
135 		slept += wait;
136 		wait <<= 1;
137 		if (slept + wait > timeout_us)
138 			wait = timeout_us - slept;
139 	}
140 
141 	return -ETIMEDOUT;
142 }
143 
pc_action_reset(struct xe_guc_pc * pc)144 static int pc_action_reset(struct xe_guc_pc *pc)
145 {
146 	struct xe_guc_ct *ct = pc_to_ct(pc);
147 	u32 action[] = {
148 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
149 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
150 		xe_bo_ggtt_addr(pc->bo),
151 		0,
152 	};
153 	int ret;
154 
155 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
156 	if (ret)
157 		xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
158 			  ERR_PTR(ret));
159 
160 	return ret;
161 }
162 
pc_action_query_task_state(struct xe_guc_pc * pc)163 static int pc_action_query_task_state(struct xe_guc_pc *pc)
164 {
165 	struct xe_guc_ct *ct = pc_to_ct(pc);
166 	u32 action[] = {
167 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
168 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
169 		xe_bo_ggtt_addr(pc->bo),
170 		0,
171 	};
172 	int ret;
173 
174 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
175 			      SLPC_RESET_TIMEOUT_MS))
176 		return -EAGAIN;
177 
178 	/* Blocking here to ensure the results are ready before reading them */
179 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
180 	if (ret)
181 		xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
182 			  ERR_PTR(ret));
183 
184 	return ret;
185 }
186 
pc_action_set_param(struct xe_guc_pc * pc,u8 id,u32 value)187 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
188 {
189 	struct xe_guc_ct *ct = pc_to_ct(pc);
190 	u32 action[] = {
191 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
192 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
193 		id,
194 		value,
195 	};
196 	int ret;
197 
198 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
199 			      SLPC_RESET_TIMEOUT_MS))
200 		return -EAGAIN;
201 
202 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
203 	if (ret)
204 		xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
205 			  id, value, ERR_PTR(ret));
206 
207 	return ret;
208 }
209 
pc_action_unset_param(struct xe_guc_pc * pc,u8 id)210 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
211 {
212 	u32 action[] = {
213 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
214 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
215 		id,
216 	};
217 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
218 	int ret;
219 
220 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
221 			      SLPC_RESET_TIMEOUT_MS))
222 		return -EAGAIN;
223 
224 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
225 	if (ret)
226 		xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
227 			  ERR_PTR(ret));
228 
229 	return ret;
230 }
231 
pc_action_setup_gucrc(struct xe_guc_pc * pc,u32 mode)232 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
233 {
234 	struct xe_guc_ct *ct = pc_to_ct(pc);
235 	u32 action[] = {
236 		GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC,
237 		mode,
238 	};
239 	int ret;
240 
241 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
242 	if (ret)
243 		xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
244 			  mode, ERR_PTR(ret));
245 	return ret;
246 }
247 
decode_freq(u32 raw)248 static u32 decode_freq(u32 raw)
249 {
250 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
251 				 GT_FREQUENCY_SCALER);
252 }
253 
encode_freq(u32 freq)254 static u32 encode_freq(u32 freq)
255 {
256 	return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
257 				 GT_FREQUENCY_MULTIPLIER);
258 }
259 
pc_get_min_freq(struct xe_guc_pc * pc)260 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
261 {
262 	u32 freq;
263 
264 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
265 			 slpc_shared_data_read(pc, task_state_data.freq));
266 
267 	return decode_freq(freq);
268 }
269 
pc_set_manual_rp_ctrl(struct xe_guc_pc * pc,bool enable)270 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
271 {
272 	struct xe_gt *gt = pc_to_gt(pc);
273 	u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
274 
275 	/* Allow/Disallow punit to process software freq requests */
276 	xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
277 }
278 
pc_set_cur_freq(struct xe_guc_pc * pc,u32 freq)279 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
280 {
281 	struct xe_gt *gt = pc_to_gt(pc);
282 	u32 rpnswreq;
283 
284 	pc_set_manual_rp_ctrl(pc, true);
285 
286 	/* Req freq is in units of 16.66 Mhz */
287 	rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
288 	xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
289 
290 	/* Sleep for a small time to allow pcode to respond */
291 	usleep_range(100, 300);
292 
293 	pc_set_manual_rp_ctrl(pc, false);
294 }
295 
pc_set_min_freq(struct xe_guc_pc * pc,u32 freq)296 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
297 {
298 	/*
299 	 * Let's only check for the rpn-rp0 range. If max < min,
300 	 * min becomes a fixed request.
301 	 */
302 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
303 		return -EINVAL;
304 
305 	/*
306 	 * GuC policy is to elevate minimum frequency to the efficient levels
307 	 * Our goal is to have the admin choices respected.
308 	 */
309 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
310 			    freq < pc->rpe_freq);
311 
312 	return pc_action_set_param(pc,
313 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
314 				   freq);
315 }
316 
pc_get_max_freq(struct xe_guc_pc * pc)317 static int pc_get_max_freq(struct xe_guc_pc *pc)
318 {
319 	u32 freq;
320 
321 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
322 			 slpc_shared_data_read(pc, task_state_data.freq));
323 
324 	return decode_freq(freq);
325 }
326 
pc_set_max_freq(struct xe_guc_pc * pc,u32 freq)327 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
328 {
329 	/*
330 	 * Let's only check for the rpn-rp0 range. If max < min,
331 	 * min becomes a fixed request.
332 	 * Also, overclocking is not supported.
333 	 */
334 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
335 		return -EINVAL;
336 
337 	return pc_action_set_param(pc,
338 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
339 				   freq);
340 }
341 
mtl_update_rpa_value(struct xe_guc_pc * pc)342 static void mtl_update_rpa_value(struct xe_guc_pc *pc)
343 {
344 	struct xe_gt *gt = pc_to_gt(pc);
345 	u32 reg;
346 
347 	if (xe_gt_is_media_type(gt))
348 		reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
349 	else
350 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
351 
352 	pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
353 }
354 
mtl_update_rpe_value(struct xe_guc_pc * pc)355 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
356 {
357 	struct xe_gt *gt = pc_to_gt(pc);
358 	u32 reg;
359 
360 	if (xe_gt_is_media_type(gt))
361 		reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
362 	else
363 		reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
364 
365 	pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
366 }
367 
tgl_update_rpa_value(struct xe_guc_pc * pc)368 static void tgl_update_rpa_value(struct xe_guc_pc *pc)
369 {
370 	struct xe_gt *gt = pc_to_gt(pc);
371 	struct xe_device *xe = gt_to_xe(gt);
372 	u32 reg;
373 
374 	/*
375 	 * For PVC we still need to use fused RP0 as the approximation for RPa
376 	 * For other platforms than PVC we get the resolved RPa directly from
377 	 * PCODE at a different register
378 	 */
379 	if (xe->info.platform == XE_PVC) {
380 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
381 		pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
382 	} else {
383 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
384 		pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
385 	}
386 }
387 
tgl_update_rpe_value(struct xe_guc_pc * pc)388 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
389 {
390 	struct xe_gt *gt = pc_to_gt(pc);
391 	struct xe_device *xe = gt_to_xe(gt);
392 	u32 reg;
393 
394 	/*
395 	 * For PVC we still need to use fused RP1 as the approximation for RPe
396 	 * For other platforms than PVC we get the resolved RPe directly from
397 	 * PCODE at a different register
398 	 */
399 	if (xe->info.platform == XE_PVC) {
400 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
401 		pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
402 	} else {
403 		reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
404 		pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
405 	}
406 }
407 
pc_update_rp_values(struct xe_guc_pc * pc)408 static void pc_update_rp_values(struct xe_guc_pc *pc)
409 {
410 	struct xe_gt *gt = pc_to_gt(pc);
411 	struct xe_device *xe = gt_to_xe(gt);
412 
413 	if (GRAPHICS_VERx100(xe) >= 1270) {
414 		mtl_update_rpa_value(pc);
415 		mtl_update_rpe_value(pc);
416 	} else {
417 		tgl_update_rpa_value(pc);
418 		tgl_update_rpe_value(pc);
419 	}
420 
421 	/*
422 	 * RPe is decided at runtime by PCODE. In the rare case where that's
423 	 * smaller than the fused min, we will trust the PCODE and use that
424 	 * as our minimum one.
425 	 */
426 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
427 }
428 
429 /**
430  * xe_guc_pc_get_act_freq - Get Actual running frequency
431  * @pc: The GuC PC
432  *
433  * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
434  */
xe_guc_pc_get_act_freq(struct xe_guc_pc * pc)435 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
436 {
437 	struct xe_gt *gt = pc_to_gt(pc);
438 	struct xe_device *xe = gt_to_xe(gt);
439 	u32 freq;
440 
441 	/* When in RC6, actual frequency reported will be 0. */
442 	if (GRAPHICS_VERx100(xe) >= 1270) {
443 		freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
444 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
445 	} else {
446 		freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
447 		freq = REG_FIELD_GET(CAGF_MASK, freq);
448 	}
449 
450 	freq = decode_freq(freq);
451 
452 	return freq;
453 }
454 
get_cur_freq(struct xe_gt * gt)455 static u32 get_cur_freq(struct xe_gt *gt)
456 {
457 	u32 freq;
458 
459 	freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
460 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
461 	return decode_freq(freq);
462 }
463 
464 /**
465  * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
466  * @pc: The GuC PC
467  *
468  * Returns: the requested frequency for that GT instance
469  */
xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc * pc)470 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
471 {
472 	struct xe_gt *gt = pc_to_gt(pc);
473 
474 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
475 
476 	return get_cur_freq(gt);
477 }
478 
479 /**
480  * xe_guc_pc_get_cur_freq - Get Current requested frequency
481  * @pc: The GuC PC
482  * @freq: A pointer to a u32 where the freq value will be returned
483  *
484  * Returns: 0 on success,
485  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
486  */
xe_guc_pc_get_cur_freq(struct xe_guc_pc * pc,u32 * freq)487 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
488 {
489 	struct xe_gt *gt = pc_to_gt(pc);
490 	unsigned int fw_ref;
491 
492 	/*
493 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
494 	 * Block RC6 for a more reliable read.
495 	 */
496 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
497 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
498 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
499 		return -ETIMEDOUT;
500 	}
501 
502 	*freq = get_cur_freq(gt);
503 
504 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
505 	return 0;
506 }
507 
508 /**
509  * xe_guc_pc_get_rp0_freq - Get the RP0 freq
510  * @pc: The GuC PC
511  *
512  * Returns: RP0 freq.
513  */
xe_guc_pc_get_rp0_freq(struct xe_guc_pc * pc)514 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
515 {
516 	return pc->rp0_freq;
517 }
518 
519 /**
520  * xe_guc_pc_get_rpa_freq - Get the RPa freq
521  * @pc: The GuC PC
522  *
523  * Returns: RPa freq.
524  */
xe_guc_pc_get_rpa_freq(struct xe_guc_pc * pc)525 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
526 {
527 	pc_update_rp_values(pc);
528 
529 	return pc->rpa_freq;
530 }
531 
532 /**
533  * xe_guc_pc_get_rpe_freq - Get the RPe freq
534  * @pc: The GuC PC
535  *
536  * Returns: RPe freq.
537  */
xe_guc_pc_get_rpe_freq(struct xe_guc_pc * pc)538 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
539 {
540 	pc_update_rp_values(pc);
541 
542 	return pc->rpe_freq;
543 }
544 
545 /**
546  * xe_guc_pc_get_rpn_freq - Get the RPn freq
547  * @pc: The GuC PC
548  *
549  * Returns: RPn freq.
550  */
xe_guc_pc_get_rpn_freq(struct xe_guc_pc * pc)551 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
552 {
553 	return pc->rpn_freq;
554 }
555 
556 /**
557  * xe_guc_pc_get_min_freq - Get the min operational frequency
558  * @pc: The GuC PC
559  * @freq: A pointer to a u32 where the freq value will be returned
560  *
561  * Returns: 0 on success,
562  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
563  */
xe_guc_pc_get_min_freq(struct xe_guc_pc * pc,u32 * freq)564 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
565 {
566 	int ret;
567 
568 	xe_device_assert_mem_access(pc_to_xe(pc));
569 
570 	mutex_lock(&pc->freq_lock);
571 	if (!pc->freq_ready) {
572 		/* Might be in the middle of a gt reset */
573 		ret = -EAGAIN;
574 		goto out;
575 	}
576 
577 	ret = pc_action_query_task_state(pc);
578 	if (ret)
579 		goto out;
580 
581 	*freq = pc_get_min_freq(pc);
582 
583 out:
584 	mutex_unlock(&pc->freq_lock);
585 	return ret;
586 }
587 
588 /**
589  * xe_guc_pc_set_min_freq - Set the minimal operational frequency
590  * @pc: The GuC PC
591  * @freq: The selected minimal frequency
592  *
593  * Returns: 0 on success,
594  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
595  *         -EINVAL if value out of bounds.
596  */
xe_guc_pc_set_min_freq(struct xe_guc_pc * pc,u32 freq)597 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
598 {
599 	int ret;
600 
601 	mutex_lock(&pc->freq_lock);
602 	if (!pc->freq_ready) {
603 		/* Might be in the middle of a gt reset */
604 		ret = -EAGAIN;
605 		goto out;
606 	}
607 
608 	ret = pc_set_min_freq(pc, freq);
609 	if (ret)
610 		goto out;
611 
612 	pc->user_requested_min = freq;
613 
614 out:
615 	mutex_unlock(&pc->freq_lock);
616 	return ret;
617 }
618 
619 /**
620  * xe_guc_pc_get_max_freq - Get Maximum operational frequency
621  * @pc: The GuC PC
622  * @freq: A pointer to a u32 where the freq value will be returned
623  *
624  * Returns: 0 on success,
625  *         -EAGAIN if GuC PC not ready (likely in middle of a reset).
626  */
xe_guc_pc_get_max_freq(struct xe_guc_pc * pc,u32 * freq)627 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
628 {
629 	int ret;
630 
631 	mutex_lock(&pc->freq_lock);
632 	if (!pc->freq_ready) {
633 		/* Might be in the middle of a gt reset */
634 		ret = -EAGAIN;
635 		goto out;
636 	}
637 
638 	ret = pc_action_query_task_state(pc);
639 	if (ret)
640 		goto out;
641 
642 	*freq = pc_get_max_freq(pc);
643 
644 out:
645 	mutex_unlock(&pc->freq_lock);
646 	return ret;
647 }
648 
649 /**
650  * xe_guc_pc_set_max_freq - Set the maximum operational frequency
651  * @pc: The GuC PC
652  * @freq: The selected maximum frequency value
653  *
654  * Returns: 0 on success,
655  *         -EAGAIN if GuC PC not ready (likely in middle of a reset),
656  *         -EINVAL if value out of bounds.
657  */
xe_guc_pc_set_max_freq(struct xe_guc_pc * pc,u32 freq)658 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
659 {
660 	int ret;
661 
662 	mutex_lock(&pc->freq_lock);
663 	if (!pc->freq_ready) {
664 		/* Might be in the middle of a gt reset */
665 		ret = -EAGAIN;
666 		goto out;
667 	}
668 
669 	ret = pc_set_max_freq(pc, freq);
670 	if (ret)
671 		goto out;
672 
673 	pc->user_requested_max = freq;
674 
675 out:
676 	mutex_unlock(&pc->freq_lock);
677 	return ret;
678 }
679 
680 /**
681  * xe_guc_pc_c_status - get the current GT C state
682  * @pc: XE_GuC_PC instance
683  */
xe_guc_pc_c_status(struct xe_guc_pc * pc)684 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
685 {
686 	struct xe_gt *gt = pc_to_gt(pc);
687 	u32 reg, gt_c_state;
688 
689 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
690 		reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
691 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
692 	} else {
693 		reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
694 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
695 	}
696 
697 	switch (gt_c_state) {
698 	case GT_C6:
699 		return GT_IDLE_C6;
700 	case GT_C0:
701 		return GT_IDLE_C0;
702 	default:
703 		return GT_IDLE_UNKNOWN;
704 	}
705 }
706 
707 /**
708  * xe_guc_pc_rc6_residency - rc6 residency counter
709  * @pc: Xe_GuC_PC instance
710  */
xe_guc_pc_rc6_residency(struct xe_guc_pc * pc)711 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
712 {
713 	struct xe_gt *gt = pc_to_gt(pc);
714 	u32 reg;
715 
716 	reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
717 
718 	return reg;
719 }
720 
721 /**
722  * xe_guc_pc_mc6_residency - mc6 residency counter
723  * @pc: Xe_GuC_PC instance
724  */
xe_guc_pc_mc6_residency(struct xe_guc_pc * pc)725 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
726 {
727 	struct xe_gt *gt = pc_to_gt(pc);
728 	u64 reg;
729 
730 	reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
731 
732 	return reg;
733 }
734 
mtl_init_fused_rp_values(struct xe_guc_pc * pc)735 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
736 {
737 	struct xe_gt *gt = pc_to_gt(pc);
738 	u32 reg;
739 
740 	xe_device_assert_mem_access(pc_to_xe(pc));
741 
742 	if (xe_gt_is_media_type(gt))
743 		reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
744 	else
745 		reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
746 
747 	pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
748 
749 	pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
750 }
751 
tgl_init_fused_rp_values(struct xe_guc_pc * pc)752 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
753 {
754 	struct xe_gt *gt = pc_to_gt(pc);
755 	struct xe_device *xe = gt_to_xe(gt);
756 	u32 reg;
757 
758 	xe_device_assert_mem_access(pc_to_xe(pc));
759 
760 	if (xe->info.platform == XE_PVC)
761 		reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
762 	else
763 		reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
764 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
765 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
766 }
767 
pc_init_fused_rp_values(struct xe_guc_pc * pc)768 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
769 {
770 	struct xe_gt *gt = pc_to_gt(pc);
771 	struct xe_device *xe = gt_to_xe(gt);
772 
773 	if (GRAPHICS_VERx100(xe) >= 1270)
774 		mtl_init_fused_rp_values(pc);
775 	else
776 		tgl_init_fused_rp_values(pc);
777 }
778 
pc_max_freq_cap(struct xe_guc_pc * pc)779 static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
780 {
781 	struct xe_gt *gt = pc_to_gt(pc);
782 
783 	if (XE_WA(gt, 22019338487)) {
784 		if (xe_gt_is_media_type(gt))
785 			return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
786 		else
787 			return min(BMG_MERT_FREQ_CAP, pc->rp0_freq);
788 	} else {
789 		return pc->rp0_freq;
790 	}
791 }
792 
793 /**
794  * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT
795  * frequency to allow faster GuC load times
796  * @pc: Xe_GuC_PC instance
797  */
xe_guc_pc_raise_unslice(struct xe_guc_pc * pc)798 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc)
799 {
800 	struct xe_gt *gt = pc_to_gt(pc);
801 
802 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
803 	pc_set_cur_freq(pc, pc_max_freq_cap(pc));
804 }
805 
806 /**
807  * xe_guc_pc_init_early - Initialize RPx values
808  * @pc: Xe_GuC_PC instance
809  */
xe_guc_pc_init_early(struct xe_guc_pc * pc)810 void xe_guc_pc_init_early(struct xe_guc_pc *pc)
811 {
812 	struct xe_gt *gt = pc_to_gt(pc);
813 
814 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
815 	pc_init_fused_rp_values(pc);
816 }
817 
pc_adjust_freq_bounds(struct xe_guc_pc * pc)818 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
819 {
820 	int ret;
821 
822 	lockdep_assert_held(&pc->freq_lock);
823 
824 	ret = pc_action_query_task_state(pc);
825 	if (ret)
826 		goto out;
827 
828 	/*
829 	 * GuC defaults to some RPmax that is not actually achievable without
830 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
831 	 * regular maximum
832 	 */
833 	if (pc_get_max_freq(pc) > pc->rp0_freq) {
834 		ret = pc_set_max_freq(pc, pc->rp0_freq);
835 		if (ret)
836 			goto out;
837 	}
838 
839 	/*
840 	 * Same thing happens for Server platforms where min is listed as
841 	 * RPMax
842 	 */
843 	if (pc_get_min_freq(pc) > pc->rp0_freq)
844 		ret = pc_set_min_freq(pc, pc->rp0_freq);
845 
846 out:
847 	return ret;
848 }
849 
pc_adjust_requested_freq(struct xe_guc_pc * pc)850 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
851 {
852 	int ret = 0;
853 
854 	lockdep_assert_held(&pc->freq_lock);
855 
856 	if (pc->user_requested_min != 0) {
857 		ret = pc_set_min_freq(pc, pc->user_requested_min);
858 		if (ret)
859 			return ret;
860 	}
861 
862 	if (pc->user_requested_max != 0) {
863 		ret = pc_set_max_freq(pc, pc->user_requested_max);
864 		if (ret)
865 			return ret;
866 	}
867 
868 	return ret;
869 }
870 
pc_set_mert_freq_cap(struct xe_guc_pc * pc)871 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
872 {
873 	int ret = 0;
874 
875 	if (XE_WA(pc_to_gt(pc), 22019338487)) {
876 		/*
877 		 * Get updated min/max and stash them.
878 		 */
879 		ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq);
880 		if (!ret)
881 			ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq);
882 		if (ret)
883 			return ret;
884 
885 		/*
886 		 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
887 		 */
888 		mutex_lock(&pc->freq_lock);
889 		ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
890 		if (!ret)
891 			ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
892 		mutex_unlock(&pc->freq_lock);
893 	}
894 
895 	return ret;
896 }
897 
898 /**
899  * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values
900  * @pc: The GuC PC
901  *
902  * Returns: 0 on success,
903  *          error code on failure
904  */
xe_guc_pc_restore_stashed_freq(struct xe_guc_pc * pc)905 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc)
906 {
907 	int ret = 0;
908 
909 	if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc)
910 		return 0;
911 
912 	mutex_lock(&pc->freq_lock);
913 	ret = pc_set_max_freq(pc, pc->stashed_max_freq);
914 	if (!ret)
915 		ret = pc_set_min_freq(pc, pc->stashed_min_freq);
916 	mutex_unlock(&pc->freq_lock);
917 
918 	return ret;
919 }
920 
921 /**
922  * xe_guc_pc_gucrc_disable - Disable GuC RC
923  * @pc: Xe_GuC_PC instance
924  *
925  * Disables GuC RC by taking control of RC6 back from GuC.
926  *
927  * Return: 0 on success, negative error code on error.
928  */
xe_guc_pc_gucrc_disable(struct xe_guc_pc * pc)929 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
930 {
931 	struct xe_device *xe = pc_to_xe(pc);
932 	struct xe_gt *gt = pc_to_gt(pc);
933 	unsigned int fw_ref;
934 	int ret = 0;
935 
936 	if (xe->info.skip_guc_pc)
937 		return 0;
938 
939 	ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL);
940 	if (ret)
941 		return ret;
942 
943 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
944 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
945 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
946 		return -ETIMEDOUT;
947 	}
948 
949 	xe_gt_idle_disable_c6(gt);
950 
951 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
952 
953 	return 0;
954 }
955 
956 /**
957  * xe_guc_pc_override_gucrc_mode - override GUCRC mode
958  * @pc: Xe_GuC_PC instance
959  * @mode: new value of the mode.
960  *
961  * Return: 0 on success, negative error code on error
962  */
xe_guc_pc_override_gucrc_mode(struct xe_guc_pc * pc,enum slpc_gucrc_mode mode)963 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode)
964 {
965 	int ret;
966 
967 	xe_pm_runtime_get(pc_to_xe(pc));
968 	ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
969 	xe_pm_runtime_put(pc_to_xe(pc));
970 
971 	return ret;
972 }
973 
974 /**
975  * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override
976  * @pc: Xe_GuC_PC instance
977  *
978  * Return: 0 on success, negative error code on error
979  */
xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc * pc)980 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc)
981 {
982 	int ret;
983 
984 	xe_pm_runtime_get(pc_to_xe(pc));
985 	ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE);
986 	xe_pm_runtime_put(pc_to_xe(pc));
987 
988 	return ret;
989 }
990 
pc_init_pcode_freq(struct xe_guc_pc * pc)991 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
992 {
993 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
994 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
995 
996 	XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
997 }
998 
pc_init_freqs(struct xe_guc_pc * pc)999 static int pc_init_freqs(struct xe_guc_pc *pc)
1000 {
1001 	int ret;
1002 
1003 	mutex_lock(&pc->freq_lock);
1004 
1005 	ret = pc_adjust_freq_bounds(pc);
1006 	if (ret)
1007 		goto out;
1008 
1009 	ret = pc_adjust_requested_freq(pc);
1010 	if (ret)
1011 		goto out;
1012 
1013 	pc_update_rp_values(pc);
1014 
1015 	pc_init_pcode_freq(pc);
1016 
1017 	/*
1018 	 * The frequencies are really ready for use only after the user
1019 	 * requested ones got restored.
1020 	 */
1021 	pc->freq_ready = true;
1022 
1023 out:
1024 	mutex_unlock(&pc->freq_lock);
1025 	return ret;
1026 }
1027 
pc_action_set_strategy(struct xe_guc_pc * pc,u32 val)1028 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
1029 {
1030 	int ret = 0;
1031 
1032 	ret = pc_action_set_param(pc,
1033 				  SLPC_PARAM_STRATEGIES,
1034 				  val);
1035 
1036 	return ret;
1037 }
1038 
1039 /**
1040  * xe_guc_pc_start - Start GuC's Power Conservation component
1041  * @pc: Xe_GuC_PC instance
1042  */
xe_guc_pc_start(struct xe_guc_pc * pc)1043 int xe_guc_pc_start(struct xe_guc_pc *pc)
1044 {
1045 	struct xe_device *xe = pc_to_xe(pc);
1046 	struct xe_gt *gt = pc_to_gt(pc);
1047 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1048 	unsigned int fw_ref;
1049 	ktime_t earlier;
1050 	int ret;
1051 
1052 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
1053 
1054 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
1055 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
1056 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
1057 		return -ETIMEDOUT;
1058 	}
1059 
1060 	if (xe->info.skip_guc_pc) {
1061 		if (xe->info.platform != XE_PVC)
1062 			xe_gt_idle_enable_c6(gt);
1063 
1064 		/* Request max possible since dynamic freq mgmt is not enabled */
1065 		pc_set_cur_freq(pc, UINT_MAX);
1066 
1067 		ret = 0;
1068 		goto out;
1069 	}
1070 
1071 	xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
1072 	slpc_shared_data_write(pc, header.size, size);
1073 
1074 	earlier = ktime_get();
1075 	ret = pc_action_reset(pc);
1076 	if (ret)
1077 		goto out;
1078 
1079 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1080 			      SLPC_RESET_TIMEOUT_MS)) {
1081 		xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
1082 			   xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
1083 			   xe_gt_throttle_get_limit_reasons(gt));
1084 
1085 		if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
1086 				      SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
1087 			xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
1088 			ret = -EIO;
1089 			goto out;
1090 		}
1091 
1092 		xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
1093 			   ktime_ms_delta(ktime_get(), earlier));
1094 	}
1095 
1096 	ret = pc_init_freqs(pc);
1097 	if (ret)
1098 		goto out;
1099 
1100 	ret = pc_set_mert_freq_cap(pc);
1101 	if (ret)
1102 		goto out;
1103 
1104 	if (xe->info.platform == XE_PVC) {
1105 		xe_guc_pc_gucrc_disable(pc);
1106 		ret = 0;
1107 		goto out;
1108 	}
1109 
1110 	ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
1111 	if (ret)
1112 		goto out;
1113 
1114 	/* Enable SLPC Optimized Strategy for compute */
1115 	ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
1116 
1117 out:
1118 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
1119 	return ret;
1120 }
1121 
1122 /**
1123  * xe_guc_pc_stop - Stop GuC's Power Conservation component
1124  * @pc: Xe_GuC_PC instance
1125  */
xe_guc_pc_stop(struct xe_guc_pc * pc)1126 int xe_guc_pc_stop(struct xe_guc_pc *pc)
1127 {
1128 	struct xe_device *xe = pc_to_xe(pc);
1129 
1130 	if (xe->info.skip_guc_pc) {
1131 		xe_gt_idle_disable_c6(pc_to_gt(pc));
1132 		return 0;
1133 	}
1134 
1135 	mutex_lock(&pc->freq_lock);
1136 	pc->freq_ready = false;
1137 	mutex_unlock(&pc->freq_lock);
1138 
1139 	return 0;
1140 }
1141 
1142 /**
1143  * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component
1144  * @arg: opaque pointer that should point to Xe_GuC_PC instance
1145  */
xe_guc_pc_fini_hw(void * arg)1146 static void xe_guc_pc_fini_hw(void *arg)
1147 {
1148 	struct xe_guc_pc *pc = arg;
1149 	struct xe_device *xe = pc_to_xe(pc);
1150 	unsigned int fw_ref;
1151 
1152 	if (xe_device_wedged(xe))
1153 		return;
1154 
1155 	fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
1156 	xe_guc_pc_gucrc_disable(pc);
1157 	XE_WARN_ON(xe_guc_pc_stop(pc));
1158 
1159 	/* Bind requested freq to mert_freq_cap before unload */
1160 	pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
1161 
1162 	xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
1163 }
1164 
1165 /**
1166  * xe_guc_pc_init - Initialize GuC's Power Conservation component
1167  * @pc: Xe_GuC_PC instance
1168  */
xe_guc_pc_init(struct xe_guc_pc * pc)1169 int xe_guc_pc_init(struct xe_guc_pc *pc)
1170 {
1171 	struct xe_gt *gt = pc_to_gt(pc);
1172 	struct xe_tile *tile = gt_to_tile(gt);
1173 	struct xe_device *xe = gt_to_xe(gt);
1174 	struct xe_bo *bo;
1175 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
1176 	int err;
1177 
1178 	if (xe->info.skip_guc_pc)
1179 		return 0;
1180 
1181 	err = drmm_mutex_init(&xe->drm, &pc->freq_lock);
1182 	if (err)
1183 		return err;
1184 
1185 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
1186 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1187 					  XE_BO_FLAG_GGTT |
1188 					  XE_BO_FLAG_GGTT_INVALIDATE |
1189 					  XE_BO_FLAG_PINNED_NORESTORE);
1190 	if (IS_ERR(bo))
1191 		return PTR_ERR(bo);
1192 
1193 	pc->bo = bo;
1194 
1195 	return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
1196 }
1197 
pc_get_state_string(struct xe_guc_pc * pc)1198 static const char *pc_get_state_string(struct xe_guc_pc *pc)
1199 {
1200 	switch (slpc_shared_data_read(pc, header.global_state)) {
1201 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
1202 		return "not running";
1203 	case SLPC_GLOBAL_STATE_INITIALIZING:
1204 		return "initializing";
1205 	case SLPC_GLOBAL_STATE_RESETTING:
1206 		return "resetting";
1207 	case SLPC_GLOBAL_STATE_RUNNING:
1208 		return "running";
1209 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
1210 		return "shutting down";
1211 	case SLPC_GLOBAL_STATE_ERROR:
1212 		return "error";
1213 	default:
1214 		return "unknown";
1215 	}
1216 }
1217 
1218 /**
1219  * xe_guc_pc_print - Print GuC's Power Conservation information for debug
1220  * @pc: Xe_GuC_PC instance
1221  * @p: drm_printer
1222  */
xe_guc_pc_print(struct xe_guc_pc * pc,struct drm_printer * p)1223 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
1224 {
1225 	drm_printf(p, "SLPC Shared Data Header:\n");
1226 	drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
1227 	drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
1228 
1229 	if (pc_action_query_task_state(pc))
1230 		return;
1231 
1232 	drm_printf(p, "\nSLPC Tasks Status:\n");
1233 	drm_printf(p, "\tGTPERF enabled: %s\n",
1234 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1235 			      SLPC_GTPERF_TASK_ENABLED));
1236 	drm_printf(p, "\tDCC enabled: %s\n",
1237 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1238 			      SLPC_DCC_TASK_ENABLED));
1239 	drm_printf(p, "\tDCC in use: %s\n",
1240 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1241 			      SLPC_IN_DCC));
1242 	drm_printf(p, "\tBalancer enabled: %s\n",
1243 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1244 			      SLPC_BALANCER_ENABLED));
1245 	drm_printf(p, "\tIBC enabled: %s\n",
1246 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1247 			      SLPC_IBC_TASK_ENABLED));
1248 	drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
1249 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1250 			      SLPC_BALANCER_IA_LMT_ENABLED));
1251 	drm_printf(p, "\tBalancer IA LMT active: %s\n",
1252 		   str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
1253 			      SLPC_BALANCER_IA_LMT_ACTIVE));
1254 }
1255