xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_pc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "regs/xe_gt_regs.h"
13 #include "regs/xe_regs.h"
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_gt.h"
17 #include "xe_gt_sysfs.h"
18 #include "xe_gt_types.h"
19 #include "xe_guc_ct.h"
20 #include "xe_map.h"
21 #include "xe_mmio.h"
22 #include "xe_pcode.h"
23 
24 #define MCHBAR_MIRROR_BASE_SNB	0x140000
25 
26 #define GEN6_RP_STATE_CAP	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
27 #define   RP0_MASK		REG_GENMASK(7, 0)
28 #define   RP1_MASK		REG_GENMASK(15, 8)
29 #define   RPN_MASK		REG_GENMASK(23, 16)
30 
31 #define GEN10_FREQ_INFO_REC	XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
32 #define   RPE_MASK		REG_GENMASK(15, 8)
33 
34 #define GEN12_RPSTAT1		XE_REG(0x1381b4)
35 #define   GEN12_CAGF_MASK	REG_GENMASK(19, 11)
36 
37 #define MTL_MIRROR_TARGET_WP1	XE_REG(0xc60)
38 #define   MTL_CAGF_MASK		REG_GENMASK(8, 0)
39 
40 #define GT_FREQUENCY_MULTIPLIER	50
41 #define GEN9_FREQ_SCALER	3
42 
43 /**
44  * DOC: GuC Power Conservation (PC)
45  *
46  * GuC Power Conservation (PC) supports multiple features for the most
47  * efficient and performing use of the GT when GuC submission is enabled,
48  * including frequency management, Render-C states management, and various
49  * algorithms for power balancing.
50  *
51  * Single Loop Power Conservation (SLPC) is the name given to the suite of
52  * connected power conservation features in the GuC firmware. The firmware
53  * exposes a programming interface to the host for the control of SLPC.
54  *
55  * Frequency management:
56  * =====================
57  *
58  * Xe driver enables SLPC with all of its defaults features and frequency
59  * selection, which varies per platform.
60  * Xe's GuC PC provides a sysfs API for frequency management:
61  *
62  * device/gt#/freq_* *read-only* files:
63  * - freq_act: The actual resolved frequency decided by PCODE.
64  * - freq_cur: The current one requested by GuC PC to the Hardware.
65  * - freq_rpn: The Render Performance (RP) N level, which is the minimal one.
66  * - freq_rpe: The Render Performance (RP) E level, which is the efficient one.
67  * - freq_rp0: The Render Performance (RP) 0 level, which is the maximum one.
68  *
69  * device/gt#/freq_* *read-write* files:
70  * - freq_min: GuC PC min request.
71  * - freq_max: GuC PC max request.
72  *             If max <= min, then freq_min becomes a fixed frequency request.
73  *
74  * Render-C States:
75  * ================
76  *
77  * Render-C states is also a GuC PC feature that is now enabled in Xe for
78  * all platforms.
79  * Xe's GuC PC provides a sysfs API for Render-C States:
80  *
81  * device/gt#/rc* *read-only* files:
82  * - rc_status: Provide the actual immediate status of Render-C: (rc0 or rc6)
83  * - rc6_residency: Provide the rc6_residency counter in units of 1.28 uSec.
84  *                  Prone to overflows.
85  */
86 
87 static struct xe_guc *
88 pc_to_guc(struct xe_guc_pc *pc)
89 {
90 	return container_of(pc, struct xe_guc, pc);
91 }
92 
93 static struct xe_device *
94 pc_to_xe(struct xe_guc_pc *pc)
95 {
96 	struct xe_guc *guc = pc_to_guc(pc);
97 	struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
98 
99 	return gt_to_xe(gt);
100 }
101 
102 static struct xe_gt *
103 pc_to_gt(struct xe_guc_pc *pc)
104 {
105 	return container_of(pc, struct xe_gt, uc.guc.pc);
106 }
107 
108 static struct xe_guc_pc *
109 dev_to_pc(struct device *dev)
110 {
111 	return &kobj_to_gt(&dev->kobj)->uc.guc.pc;
112 }
113 
114 static struct iosys_map *
115 pc_to_maps(struct xe_guc_pc *pc)
116 {
117 	return &pc->bo->vmap;
118 }
119 
120 #define slpc_shared_data_read(pc_, field_) \
121 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
122 			struct slpc_shared_data, field_)
123 
124 #define slpc_shared_data_write(pc_, field_, val_) \
125 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
126 			struct slpc_shared_data, field_, val_)
127 
128 #define SLPC_EVENT(id, count) \
129 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
130 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
131 
132 static int wait_for_pc_state(struct xe_guc_pc *pc,
133 			     enum slpc_global_state state)
134 {
135 	int timeout_us = 5000; /* rought 5ms, but no need for precision */
136 	int slept, wait = 10;
137 
138 	xe_device_assert_mem_access(pc_to_xe(pc));
139 
140 	for (slept = 0; slept < timeout_us;) {
141 		if (slpc_shared_data_read(pc, header.global_state) == state)
142 			return 0;
143 
144 		usleep_range(wait, wait << 1);
145 		slept += wait;
146 		wait <<= 1;
147 		if (slept + wait > timeout_us)
148 			wait = timeout_us - slept;
149 	}
150 
151 	return -ETIMEDOUT;
152 }
153 
154 static int pc_action_reset(struct xe_guc_pc *pc)
155 {
156 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
157 	int ret;
158 	u32 action[] = {
159 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
160 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
161 		xe_bo_ggtt_addr(pc->bo),
162 		0,
163 	};
164 
165 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
166 	if (ret)
167 		drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
168 
169 	return ret;
170 }
171 
172 static int pc_action_shutdown(struct xe_guc_pc *pc)
173 {
174 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
175 	int ret;
176 	u32 action[] = {
177 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
178 		SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
179 		xe_bo_ggtt_addr(pc->bo),
180 		0,
181 	};
182 
183 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
184 	if (ret)
185 		drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
186 			ERR_PTR(ret));
187 
188 	return ret;
189 }
190 
191 static int pc_action_query_task_state(struct xe_guc_pc *pc)
192 {
193 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
194 	int ret;
195 	u32 action[] = {
196 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
197 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
198 		xe_bo_ggtt_addr(pc->bo),
199 		0,
200 	};
201 
202 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
203 		return -EAGAIN;
204 
205 	/* Blocking here to ensure the results are ready before reading them */
206 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
207 	if (ret)
208 		drm_err(&pc_to_xe(pc)->drm,
209 			"GuC PC query task state failed: %pe", ERR_PTR(ret));
210 
211 	return ret;
212 }
213 
214 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
215 {
216 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
217 	int ret;
218 	u32 action[] = {
219 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
220 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
221 		id,
222 		value,
223 	};
224 
225 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
226 		return -EAGAIN;
227 
228 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
229 	if (ret)
230 		drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
231 			ERR_PTR(ret));
232 
233 	return ret;
234 }
235 
236 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
237 {
238 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
239 	u32 action[] = {
240 		XE_GUC_ACTION_SETUP_PC_GUCRC,
241 		mode,
242 	};
243 	int ret;
244 
245 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
246 	if (ret)
247 		drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
248 			ERR_PTR(ret));
249 	return ret;
250 }
251 
252 static u32 decode_freq(u32 raw)
253 {
254 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
255 				 GEN9_FREQ_SCALER);
256 }
257 
258 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
259 {
260 	u32 freq;
261 
262 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
263 			 slpc_shared_data_read(pc, task_state_data.freq));
264 
265 	return decode_freq(freq);
266 }
267 
268 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
269 {
270 	/*
271 	 * Let's only check for the rpn-rp0 range. If max < min,
272 	 * min becomes a fixed request.
273 	 */
274 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
275 		return -EINVAL;
276 
277 	/*
278 	 * GuC policy is to elevate minimum frequency to the efficient levels
279 	 * Our goal is to have the admin choices respected.
280 	 */
281 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
282 			    freq < pc->rpe_freq);
283 
284 	return pc_action_set_param(pc,
285 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
286 				   freq);
287 }
288 
289 static int pc_get_max_freq(struct xe_guc_pc *pc)
290 {
291 	u32 freq;
292 
293 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
294 			 slpc_shared_data_read(pc, task_state_data.freq));
295 
296 	return decode_freq(freq);
297 }
298 
299 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
300 {
301 	/*
302 	 * Let's only check for the rpn-rp0 range. If max < min,
303 	 * min becomes a fixed request.
304 	 * Also, overclocking is not supported.
305 	 */
306 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
307 		return -EINVAL;
308 
309 	return pc_action_set_param(pc,
310 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
311 				   freq);
312 }
313 
314 static void mtl_update_rpe_value(struct xe_guc_pc *pc)
315 {
316 	struct xe_gt *gt = pc_to_gt(pc);
317 	u32 reg;
318 
319 	if (xe_gt_is_media_type(gt))
320 		reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
321 	else
322 		reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
323 
324 	pc->rpe_freq = REG_FIELD_GET(MTL_RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
325 }
326 
327 static void tgl_update_rpe_value(struct xe_guc_pc *pc)
328 {
329 	struct xe_gt *gt = pc_to_gt(pc);
330 	struct xe_device *xe = gt_to_xe(gt);
331 	u32 reg;
332 
333 	/*
334 	 * For PVC we still need to use fused RP1 as the approximation for RPe
335 	 * For other platforms than PVC we get the resolved RPe directly from
336 	 * PCODE at a different register
337 	 */
338 	if (xe->info.platform == XE_PVC)
339 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
340 	else
341 		reg = xe_mmio_read32(gt, GEN10_FREQ_INFO_REC);
342 
343 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
344 }
345 
346 static void pc_update_rp_values(struct xe_guc_pc *pc)
347 {
348 	struct xe_gt *gt = pc_to_gt(pc);
349 	struct xe_device *xe = gt_to_xe(gt);
350 
351 	if (xe->info.platform == XE_METEORLAKE)
352 		mtl_update_rpe_value(pc);
353 	else
354 		tgl_update_rpe_value(pc);
355 
356 	/*
357 	 * RPe is decided at runtime by PCODE. In the rare case where that's
358 	 * smaller than the fused min, we will trust the PCODE and use that
359 	 * as our minimum one.
360 	 */
361 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
362 }
363 
364 static ssize_t freq_act_show(struct device *dev,
365 			     struct device_attribute *attr, char *buf)
366 {
367 	struct kobject *kobj = &dev->kobj;
368 	struct xe_gt *gt = kobj_to_gt(kobj);
369 	struct xe_device *xe = gt_to_xe(gt);
370 	u32 freq;
371 	ssize_t ret;
372 
373 	xe_device_mem_access_get(gt_to_xe(gt));
374 	/*
375 	 * When in RC6, actual frequency is 0. Let's block RC6 so we are able
376 	 * to verify that our freq requests are really happening.
377 	 */
378 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
379 	if (ret)
380 		goto out;
381 
382 	if (xe->info.platform == XE_METEORLAKE) {
383 		freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
384 		freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
385 	} else {
386 		freq = xe_mmio_read32(gt, GEN12_RPSTAT1);
387 		freq = REG_FIELD_GET(GEN12_CAGF_MASK, freq);
388 	}
389 
390 	ret = sysfs_emit(buf, "%d\n", decode_freq(freq));
391 
392 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
393 out:
394 	xe_device_mem_access_put(gt_to_xe(gt));
395 	return ret;
396 }
397 static DEVICE_ATTR_RO(freq_act);
398 
399 static ssize_t freq_cur_show(struct device *dev,
400 			     struct device_attribute *attr, char *buf)
401 {
402 	struct kobject *kobj = &dev->kobj;
403 	struct xe_gt *gt = kobj_to_gt(kobj);
404 	u32 freq;
405 	ssize_t ret;
406 
407 	xe_device_mem_access_get(gt_to_xe(gt));
408 	/*
409 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
410 	 * Block RC6 for a more reliable read.
411 	 */
412 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
413 	if (ret)
414 		goto out;
415 
416 	freq = xe_mmio_read32(gt, RPNSWREQ);
417 
418 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
419 	ret = sysfs_emit(buf, "%d\n", decode_freq(freq));
420 
421 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
422 out:
423 	xe_device_mem_access_put(gt_to_xe(gt));
424 	return ret;
425 }
426 static DEVICE_ATTR_RO(freq_cur);
427 
428 static ssize_t freq_rp0_show(struct device *dev,
429 			     struct device_attribute *attr, char *buf)
430 {
431 	struct xe_guc_pc *pc = dev_to_pc(dev);
432 
433 	return sysfs_emit(buf, "%d\n", pc->rp0_freq);
434 }
435 static DEVICE_ATTR_RO(freq_rp0);
436 
437 static ssize_t freq_rpe_show(struct device *dev,
438 			     struct device_attribute *attr, char *buf)
439 {
440 	struct xe_guc_pc *pc = dev_to_pc(dev);
441 
442 	pc_update_rp_values(pc);
443 	return sysfs_emit(buf, "%d\n", pc->rpe_freq);
444 }
445 static DEVICE_ATTR_RO(freq_rpe);
446 
447 static ssize_t freq_rpn_show(struct device *dev,
448 			     struct device_attribute *attr, char *buf)
449 {
450 	struct xe_guc_pc *pc = dev_to_pc(dev);
451 
452 	return sysfs_emit(buf, "%d\n", pc->rpn_freq);
453 }
454 static DEVICE_ATTR_RO(freq_rpn);
455 
456 static ssize_t freq_min_show(struct device *dev,
457 			     struct device_attribute *attr, char *buf)
458 {
459 	struct xe_guc_pc *pc = dev_to_pc(dev);
460 	struct xe_gt *gt = pc_to_gt(pc);
461 	ssize_t ret;
462 
463 	xe_device_mem_access_get(pc_to_xe(pc));
464 	mutex_lock(&pc->freq_lock);
465 	if (!pc->freq_ready) {
466 		/* Might be in the middle of a gt reset */
467 		ret = -EAGAIN;
468 		goto out;
469 	}
470 
471 	/*
472 	 * GuC SLPC plays with min freq request when GuCRC is enabled
473 	 * Block RC6 for a more reliable read.
474 	 */
475 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
476 	if (ret)
477 		goto out;
478 
479 	ret = pc_action_query_task_state(pc);
480 	if (ret)
481 		goto fw;
482 
483 	ret = sysfs_emit(buf, "%d\n", pc_get_min_freq(pc));
484 
485 fw:
486 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
487 out:
488 	mutex_unlock(&pc->freq_lock);
489 	xe_device_mem_access_put(pc_to_xe(pc));
490 	return ret;
491 }
492 
493 static ssize_t freq_min_store(struct device *dev, struct device_attribute *attr,
494 			      const char *buff, size_t count)
495 {
496 	struct xe_guc_pc *pc = dev_to_pc(dev);
497 	u32 freq;
498 	ssize_t ret;
499 
500 	ret = kstrtou32(buff, 0, &freq);
501 	if (ret)
502 		return ret;
503 
504 	xe_device_mem_access_get(pc_to_xe(pc));
505 	mutex_lock(&pc->freq_lock);
506 	if (!pc->freq_ready) {
507 		/* Might be in the middle of a gt reset */
508 		ret = -EAGAIN;
509 		goto out;
510 	}
511 
512 	ret = pc_set_min_freq(pc, freq);
513 	if (ret)
514 		goto out;
515 
516 	pc->user_requested_min = freq;
517 
518 out:
519 	mutex_unlock(&pc->freq_lock);
520 	xe_device_mem_access_put(pc_to_xe(pc));
521 	return ret ?: count;
522 }
523 static DEVICE_ATTR_RW(freq_min);
524 
525 static ssize_t freq_max_show(struct device *dev,
526 			     struct device_attribute *attr, char *buf)
527 {
528 	struct xe_guc_pc *pc = dev_to_pc(dev);
529 	ssize_t ret;
530 
531 	xe_device_mem_access_get(pc_to_xe(pc));
532 	mutex_lock(&pc->freq_lock);
533 	if (!pc->freq_ready) {
534 		/* Might be in the middle of a gt reset */
535 		ret = -EAGAIN;
536 		goto out;
537 	}
538 
539 	ret = pc_action_query_task_state(pc);
540 	if (ret)
541 		goto out;
542 
543 	ret = sysfs_emit(buf, "%d\n", pc_get_max_freq(pc));
544 
545 out:
546 	mutex_unlock(&pc->freq_lock);
547 	xe_device_mem_access_put(pc_to_xe(pc));
548 	return ret;
549 }
550 
551 static ssize_t freq_max_store(struct device *dev, struct device_attribute *attr,
552 			      const char *buff, size_t count)
553 {
554 	struct xe_guc_pc *pc = dev_to_pc(dev);
555 	u32 freq;
556 	ssize_t ret;
557 
558 	ret = kstrtou32(buff, 0, &freq);
559 	if (ret)
560 		return ret;
561 
562 	xe_device_mem_access_get(pc_to_xe(pc));
563 	mutex_lock(&pc->freq_lock);
564 	if (!pc->freq_ready) {
565 		/* Might be in the middle of a gt reset */
566 		ret = -EAGAIN;
567 		goto out;
568 	}
569 
570 	ret = pc_set_max_freq(pc, freq);
571 	if (ret)
572 		goto out;
573 
574 	pc->user_requested_max = freq;
575 
576 out:
577 	mutex_unlock(&pc->freq_lock);
578 	xe_device_mem_access_put(pc_to_xe(pc));
579 	return ret ?: count;
580 }
581 static DEVICE_ATTR_RW(freq_max);
582 
583 static ssize_t rc_status_show(struct device *dev,
584 			      struct device_attribute *attr, char *buff)
585 {
586 	struct xe_guc_pc *pc = dev_to_pc(dev);
587 	struct xe_gt *gt = pc_to_gt(pc);
588 	u32 reg;
589 
590 	xe_device_mem_access_get(gt_to_xe(gt));
591 	reg = xe_mmio_read32(gt, GT_CORE_STATUS);
592 	xe_device_mem_access_put(gt_to_xe(gt));
593 
594 	switch (REG_FIELD_GET(RCN_MASK, reg)) {
595 	case GT_RC6:
596 		return sysfs_emit(buff, "rc6\n");
597 	case GT_RC0:
598 		return sysfs_emit(buff, "rc0\n");
599 	default:
600 		return -ENOENT;
601 	}
602 }
603 static DEVICE_ATTR_RO(rc_status);
604 
605 static ssize_t rc6_residency_show(struct device *dev,
606 				  struct device_attribute *attr, char *buff)
607 {
608 	struct xe_guc_pc *pc = dev_to_pc(dev);
609 	struct xe_gt *gt = pc_to_gt(pc);
610 	u32 reg;
611 	ssize_t ret;
612 
613 	xe_device_mem_access_get(pc_to_xe(pc));
614 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
615 	if (ret)
616 		goto out;
617 
618 	reg = xe_mmio_read32(gt, GT_GFX_RC6);
619 	ret = sysfs_emit(buff, "%u\n", reg);
620 
621 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
622 out:
623 	xe_device_mem_access_put(pc_to_xe(pc));
624 	return ret;
625 }
626 static DEVICE_ATTR_RO(rc6_residency);
627 
628 static const struct attribute *pc_attrs[] = {
629 	&dev_attr_freq_act.attr,
630 	&dev_attr_freq_cur.attr,
631 	&dev_attr_freq_rp0.attr,
632 	&dev_attr_freq_rpe.attr,
633 	&dev_attr_freq_rpn.attr,
634 	&dev_attr_freq_min.attr,
635 	&dev_attr_freq_max.attr,
636 	&dev_attr_rc_status.attr,
637 	&dev_attr_rc6_residency.attr,
638 	NULL
639 };
640 
641 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
642 {
643 	struct xe_gt *gt = pc_to_gt(pc);
644 	u32 reg;
645 
646 	xe_device_assert_mem_access(pc_to_xe(pc));
647 
648 	if (xe_gt_is_media_type(gt))
649 		reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
650 	else
651 		reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
652 	pc->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, reg) *
653 		GT_FREQUENCY_MULTIPLIER;
654 	pc->rpn_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, reg) *
655 		GT_FREQUENCY_MULTIPLIER;
656 }
657 
658 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
659 {
660 	struct xe_gt *gt = pc_to_gt(pc);
661 	struct xe_device *xe = gt_to_xe(gt);
662 	u32 reg;
663 
664 	xe_device_assert_mem_access(pc_to_xe(pc));
665 
666 	if (xe->info.platform == XE_PVC)
667 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
668 	else
669 		reg = xe_mmio_read32(gt, GEN6_RP_STATE_CAP);
670 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
671 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
672 }
673 
674 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
675 {
676 	struct xe_gt *gt = pc_to_gt(pc);
677 	struct xe_device *xe = gt_to_xe(gt);
678 
679 	if (xe->info.platform == XE_METEORLAKE)
680 		mtl_init_fused_rp_values(pc);
681 	else
682 		tgl_init_fused_rp_values(pc);
683 }
684 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
685 {
686 	int ret;
687 
688 	lockdep_assert_held(&pc->freq_lock);
689 
690 	ret = pc_action_query_task_state(pc);
691 	if (ret)
692 		return ret;
693 
694 	/*
695 	 * GuC defaults to some RPmax that is not actually achievable without
696 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
697 	 * regular maximum
698 	 */
699 	if (pc_get_max_freq(pc) > pc->rp0_freq)
700 		pc_set_max_freq(pc, pc->rp0_freq);
701 
702 	/*
703 	 * Same thing happens for Server platforms where min is listed as
704 	 * RPMax
705 	 */
706 	if (pc_get_min_freq(pc) > pc->rp0_freq)
707 		pc_set_min_freq(pc, pc->rp0_freq);
708 
709 	return 0;
710 }
711 
712 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
713 {
714 	int ret = 0;
715 
716 	lockdep_assert_held(&pc->freq_lock);
717 
718 	if (pc->user_requested_min != 0) {
719 		ret = pc_set_min_freq(pc, pc->user_requested_min);
720 		if (ret)
721 			return ret;
722 	}
723 
724 	if (pc->user_requested_max != 0) {
725 		ret = pc_set_max_freq(pc, pc->user_requested_max);
726 		if (ret)
727 			return ret;
728 	}
729 
730 	return ret;
731 }
732 
733 static int pc_gucrc_disable(struct xe_guc_pc *pc)
734 {
735 	struct xe_gt *gt = pc_to_gt(pc);
736 	int ret;
737 
738 	xe_device_assert_mem_access(pc_to_xe(pc));
739 
740 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
741 	if (ret)
742 		return ret;
743 
744 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
745 	if (ret)
746 		return ret;
747 
748 	xe_mmio_write32(gt, PG_ENABLE, 0);
749 	xe_mmio_write32(gt, RC_CONTROL, 0);
750 	xe_mmio_write32(gt, RC_STATE, 0);
751 
752 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
753 	return 0;
754 }
755 
756 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
757 {
758 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
759 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
760 
761 	XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
762 }
763 
764 static int pc_init_freqs(struct xe_guc_pc *pc)
765 {
766 	int ret;
767 
768 	mutex_lock(&pc->freq_lock);
769 
770 	ret = pc_adjust_freq_bounds(pc);
771 	if (ret)
772 		goto out;
773 
774 	ret = pc_adjust_requested_freq(pc);
775 	if (ret)
776 		goto out;
777 
778 	pc_update_rp_values(pc);
779 
780 	pc_init_pcode_freq(pc);
781 
782 	/*
783 	 * The frequencies are really ready for use only after the user
784 	 * requested ones got restored.
785 	 */
786 	pc->freq_ready = true;
787 
788 out:
789 	mutex_unlock(&pc->freq_lock);
790 	return ret;
791 }
792 
793 /**
794  * xe_guc_pc_start - Start GuC's Power Conservation component
795  * @pc: Xe_GuC_PC instance
796  */
797 int xe_guc_pc_start(struct xe_guc_pc *pc)
798 {
799 	struct xe_device *xe = pc_to_xe(pc);
800 	struct xe_gt *gt = pc_to_gt(pc);
801 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
802 	int ret;
803 
804 	XE_WARN_ON(!xe_device_guc_submission_enabled(xe));
805 
806 	xe_device_mem_access_get(pc_to_xe(pc));
807 
808 	memset(pc->bo->vmap.vaddr, 0, size);
809 	slpc_shared_data_write(pc, header.size, size);
810 
811 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
812 	if (ret)
813 		return ret;
814 
815 	ret = pc_action_reset(pc);
816 	if (ret)
817 		goto out;
818 
819 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
820 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
821 		ret = -EIO;
822 		goto out;
823 	}
824 
825 	ret = pc_init_freqs(pc);
826 	if (ret)
827 		goto out;
828 
829 	if (xe->info.platform == XE_PVC) {
830 		pc_gucrc_disable(pc);
831 		ret = 0;
832 		goto out;
833 	}
834 
835 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
836 
837 out:
838 	xe_device_mem_access_put(pc_to_xe(pc));
839 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
840 	return ret;
841 }
842 
843 /**
844  * xe_guc_pc_stop - Stop GuC's Power Conservation component
845  * @pc: Xe_GuC_PC instance
846  */
847 int xe_guc_pc_stop(struct xe_guc_pc *pc)
848 {
849 	int ret;
850 
851 	xe_device_mem_access_get(pc_to_xe(pc));
852 
853 	ret = pc_gucrc_disable(pc);
854 	if (ret)
855 		goto out;
856 
857 	mutex_lock(&pc->freq_lock);
858 	pc->freq_ready = false;
859 	mutex_unlock(&pc->freq_lock);
860 
861 	ret = pc_action_shutdown(pc);
862 	if (ret)
863 		goto out;
864 
865 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
866 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
867 		ret = -EIO;
868 	}
869 
870 out:
871 	xe_device_mem_access_put(pc_to_xe(pc));
872 	return ret;
873 }
874 
875 static void pc_fini(struct drm_device *drm, void *arg)
876 {
877 	struct xe_guc_pc *pc = arg;
878 
879 	XE_WARN_ON(xe_guc_pc_stop(pc));
880 	sysfs_remove_files(pc_to_gt(pc)->sysfs, pc_attrs);
881 	xe_bo_unpin_map_no_vm(pc->bo);
882 }
883 
884 /**
885  * xe_guc_pc_init - Initialize GuC's Power Conservation component
886  * @pc: Xe_GuC_PC instance
887  */
888 int xe_guc_pc_init(struct xe_guc_pc *pc)
889 {
890 	struct xe_gt *gt = pc_to_gt(pc);
891 	struct xe_tile *tile = gt_to_tile(gt);
892 	struct xe_device *xe = gt_to_xe(gt);
893 	struct xe_bo *bo;
894 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
895 	int err;
896 
897 	mutex_init(&pc->freq_lock);
898 
899 	bo = xe_bo_create_pin_map(xe, tile, NULL, size,
900 				  ttm_bo_type_kernel,
901 				  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
902 				  XE_BO_CREATE_GGTT_BIT);
903 
904 	if (IS_ERR(bo))
905 		return PTR_ERR(bo);
906 
907 	pc->bo = bo;
908 
909 	pc_init_fused_rp_values(pc);
910 
911 	err = sysfs_create_files(gt->sysfs, pc_attrs);
912 	if (err)
913 		return err;
914 
915 	err = drmm_add_action_or_reset(&xe->drm, pc_fini, pc);
916 	if (err)
917 		return err;
918 
919 	return 0;
920 }
921