xref: /linux/drivers/gpu/drm/xe/xe_guc_pc.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 #include "xe_bo.h"
8 #include "xe_device.h"
9 #include "xe_gt.h"
10 #include "xe_gt_types.h"
11 #include "xe_gt_sysfs.h"
12 #include "xe_guc_ct.h"
13 #include "xe_map.h"
14 #include "xe_mmio.h"
15 #include "xe_pcode.h"
16 #include "i915_reg_defs.h"
17 #include "i915_reg.h"
18 
19 #include "intel_mchbar_regs.h"
20 
21 /* For GEN6_RP_STATE_CAP.reg to be merged when the definition moves to Xe */
22 #define   RP0_MASK	REG_GENMASK(7, 0)
23 #define   RP1_MASK	REG_GENMASK(15, 8)
24 #define   RPN_MASK	REG_GENMASK(23, 16)
25 
26 #define GEN10_FREQ_INFO_REC	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
27 #define   RPE_MASK		REG_GENMASK(15, 8)
28 
29 #include "gt/intel_gt_regs.h"
30 /* For GEN6_RPNSWREQ.reg to be merged when the definition moves to Xe */
31 #define   REQ_RATIO_MASK	REG_GENMASK(31, 23)
32 
33 /* For GEN6_GT_CORE_STATUS.reg to be merged when the definition moves to Xe */
34 #define   RCN_MASK	REG_GENMASK(2, 0)
35 
36 #define GEN12_RPSTAT1		_MMIO(0x1381b4)
37 #define   GEN12_CAGF_MASK	REG_GENMASK(19, 11)
38 
39 #define GT_FREQUENCY_MULTIPLIER	50
40 #define GEN9_FREQ_SCALER	3
41 
42 /**
43  * DOC: GuC Power Conservation (PC)
44  *
45  * GuC Power Conservation (PC) supports multiple features for the most
46  * efficient and performing use of the GT when GuC submission is enabled,
47  * including frequency management, Render-C states management, and various
48  * algorithms for power balancing.
49  *
50  * Single Loop Power Conservation (SLPC) is the name given to the suite of
51  * connected power conservation features in the GuC firmware. The firmware
52  * exposes a programming interface to the host for the control of SLPC.
53  *
54  * Frequency management:
55  * =====================
56  *
57  * Xe driver enables SLPC with all of its defaults features and frequency
58  * selection, which varies per platform.
59  * Xe's GuC PC provides a sysfs API for frequency management:
60  *
61  * device/gt#/freq_* *read-only* files:
62  * - freq_act: The actual resolved frequency decided by PCODE.
63  * - freq_cur: The current one requested by GuC PC to the Hardware.
64  * - freq_rpn: The Render Performance (RP) N level, which is the minimal one.
65  * - freq_rpe: The Render Performance (RP) E level, which is the efficient one.
66  * - freq_rp0: The Render Performance (RP) 0 level, which is the maximum one.
67  *
68  * device/gt#/freq_* *read-write* files:
69  * - freq_min: GuC PC min request.
70  * - freq_max: GuC PC max request.
71  *             If max <= min, then freq_min becomes a fixed frequency request.
72  *
73  * Render-C States:
74  * ================
75  *
76  * Render-C states is also a GuC PC feature that is now enabled in Xe for
77  * all platforms.
78  * Xe's GuC PC provides a sysfs API for Render-C States:
79  *
80  * device/gt#/rc* *read-only* files:
81  * - rc_status: Provide the actual immediate status of Render-C: (rc0 or rc6)
82  * - rc6_residency: Provide the rc6_residency counter in units of 1.28 uSec.
83  *                  Prone to overflows.
84  */
85 
86 static struct xe_guc *
87 pc_to_guc(struct xe_guc_pc *pc)
88 {
89 	return container_of(pc, struct xe_guc, pc);
90 }
91 
92 static struct xe_device *
93 pc_to_xe(struct xe_guc_pc *pc)
94 {
95 	struct xe_guc *guc = pc_to_guc(pc);
96 	struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
97 
98 	return gt_to_xe(gt);
99 }
100 
101 static struct xe_gt *
102 pc_to_gt(struct xe_guc_pc *pc)
103 {
104 	return container_of(pc, struct xe_gt, uc.guc.pc);
105 }
106 
107 static struct xe_guc_pc *
108 dev_to_pc(struct device *dev)
109 {
110 	return &kobj_to_gt(&dev->kobj)->uc.guc.pc;
111 }
112 
113 static struct iosys_map *
114 pc_to_maps(struct xe_guc_pc *pc)
115 {
116 	return &pc->bo->vmap;
117 }
118 
119 #define slpc_shared_data_read(pc_, field_) \
120 	xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
121 			struct slpc_shared_data, field_)
122 
123 #define slpc_shared_data_write(pc_, field_, val_) \
124 	xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
125 			struct slpc_shared_data, field_, val_)
126 
127 #define SLPC_EVENT(id, count) \
128 	(FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
129 	 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
130 
131 static bool pc_is_in_state(struct xe_guc_pc *pc, enum slpc_global_state state)
132 {
133 	xe_device_assert_mem_access(pc_to_xe(pc));
134 	return slpc_shared_data_read(pc, header.global_state) == state;
135 }
136 
137 static int pc_action_reset(struct xe_guc_pc *pc)
138 {
139 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
140 	int ret;
141 	u32 action[] = {
142 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
143 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
144 		xe_bo_ggtt_addr(pc->bo),
145 		0,
146 	};
147 
148 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
149 	if (ret)
150 		drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
151 
152 	return ret;
153 }
154 
155 static int pc_action_shutdown(struct xe_guc_pc *pc)
156 {
157 	struct  xe_guc_ct *ct = &pc_to_guc(pc)->ct;
158 	int ret;
159 	u32 action[] = {
160 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
161 		SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
162 		xe_bo_ggtt_addr(pc->bo),
163 		0,
164 	};
165 
166 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
167 	if (ret)
168 		drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
169 			ERR_PTR(ret));
170 
171 	return ret;
172 }
173 
174 static int pc_action_query_task_state(struct xe_guc_pc *pc)
175 {
176 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
177 	int ret;
178 	u32 action[] = {
179 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
180 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
181 		xe_bo_ggtt_addr(pc->bo),
182 		0,
183 	};
184 
185 	if (!pc_is_in_state(pc, SLPC_GLOBAL_STATE_RUNNING))
186 		return -EAGAIN;
187 
188 	/* Blocking here to ensure the results are ready before reading them */
189 	ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
190 	if (ret)
191 		drm_err(&pc_to_xe(pc)->drm,
192 			"GuC PC query task state failed: %pe", ERR_PTR(ret));
193 
194 	return ret;
195 }
196 
197 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
198 {
199 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
200 	int ret;
201 	u32 action[] = {
202 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
203 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
204 		id,
205 		value,
206 	};
207 
208 	if (!pc_is_in_state(pc, SLPC_GLOBAL_STATE_RUNNING))
209 		return -EAGAIN;
210 
211 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
212 	if (ret)
213 		drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
214 			ERR_PTR(ret));
215 
216 	return ret;
217 }
218 
219 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
220 {
221 	struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
222 	u32 action[] = {
223 		XE_GUC_ACTION_SETUP_PC_GUCRC,
224 		mode,
225 	};
226 	int ret;
227 
228 	ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
229 	if (ret)
230 		drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
231 			ERR_PTR(ret));
232 	return ret;
233 }
234 
235 static u32 decode_freq(u32 raw)
236 {
237 	return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
238 				 GEN9_FREQ_SCALER);
239 }
240 
241 static u32 pc_get_min_freq(struct xe_guc_pc *pc)
242 {
243 	u32 freq;
244 
245 	freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
246 			 slpc_shared_data_read(pc, task_state_data.freq));
247 
248 	return decode_freq(freq);
249 }
250 
251 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
252 {
253 	/*
254 	 * Let's only check for the rpn-rp0 range. If max < min,
255 	 * min becomes a fixed request.
256 	 */
257 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
258 		return -EINVAL;
259 
260 	/*
261 	 * GuC policy is to elevate minimum frequency to the efficient levels
262 	 * Our goal is to have the admin choices respected.
263 	 */
264 	pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
265 			    freq < pc->rpe_freq);
266 
267 	return pc_action_set_param(pc,
268 				   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
269 				   freq);
270 }
271 
272 static int pc_get_max_freq(struct xe_guc_pc *pc)
273 {
274 	u32 freq;
275 
276 	freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
277 			 slpc_shared_data_read(pc, task_state_data.freq));
278 
279 	return decode_freq(freq);
280 }
281 
282 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
283 {
284 	/*
285 	 * Let's only check for the rpn-rp0 range. If max < min,
286 	 * min becomes a fixed request.
287 	 * Also, overclocking is not supported.
288 	 */
289 	if (freq < pc->rpn_freq || freq > pc->rp0_freq)
290 		return -EINVAL;
291 
292 	return pc_action_set_param(pc,
293 				   SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
294 				   freq);
295 }
296 
297 static void pc_update_rp_values(struct xe_guc_pc *pc)
298 {
299 	struct xe_gt *gt = pc_to_gt(pc);
300 	struct xe_device *xe = gt_to_xe(gt);
301 	u32 reg;
302 
303 	/*
304 	 * For PVC we still need to use fused RP1 as the approximation for RPe
305 	 * For other platforms than PVC we get the resolved RPe directly from
306 	 * PCODE at a different register
307 	 */
308 	if (xe->info.platform == XE_PVC)
309 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP.reg);
310 	else
311 		reg = xe_mmio_read32(gt, GEN10_FREQ_INFO_REC.reg);
312 
313 	pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
314 
315 	/*
316 	 * RPe is decided at runtime by PCODE. In the rare case where that's
317 	 * smaller than the fused min, we will trust the PCODE and use that
318 	 * as our minimum one.
319 	 */
320 	pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
321 }
322 
323 static ssize_t freq_act_show(struct device *dev,
324 			     struct device_attribute *attr, char *buf)
325 {
326 	struct kobject *kobj = &dev->kobj;
327 	struct xe_gt *gt = kobj_to_gt(kobj);
328 	u32 freq;
329 	ssize_t ret;
330 
331 	/*
332 	 * When in RC6, actual frequency is 0. Let's block RC6 so we are able
333 	 * to verify that our freq requests are really happening.
334 	 */
335 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
336 	if (ret)
337 		return ret;
338 
339 	xe_device_mem_access_get(gt_to_xe(gt));
340 	freq = xe_mmio_read32(gt, GEN12_RPSTAT1.reg);
341 	xe_device_mem_access_put(gt_to_xe(gt));
342 
343 	freq = REG_FIELD_GET(GEN12_CAGF_MASK, freq);
344 	ret = sysfs_emit(buf, "%d\n", decode_freq(freq));
345 
346 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
347 	return ret;
348 }
349 static DEVICE_ATTR_RO(freq_act);
350 
351 static ssize_t freq_cur_show(struct device *dev,
352 			     struct device_attribute *attr, char *buf)
353 {
354 	struct kobject *kobj = &dev->kobj;
355 	struct xe_gt *gt = kobj_to_gt(kobj);
356 	u32 freq;
357 	ssize_t ret;
358 
359 	/*
360 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
361 	 * Block RC6 for a more reliable read.
362 	 */
363 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
364 	if (ret)
365 		return ret;
366 
367 	xe_device_mem_access_get(gt_to_xe(gt));
368 	freq = xe_mmio_read32(gt, GEN6_RPNSWREQ.reg);
369 	xe_device_mem_access_put(gt_to_xe(gt));
370 
371 	freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
372 	ret = sysfs_emit(buf, "%d\n", decode_freq(freq));
373 
374 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
375 	return ret;
376 }
377 static DEVICE_ATTR_RO(freq_cur);
378 
379 static ssize_t freq_rp0_show(struct device *dev,
380 			     struct device_attribute *attr, char *buf)
381 {
382 	struct xe_guc_pc *pc = dev_to_pc(dev);
383 
384 	return sysfs_emit(buf, "%d\n", pc->rp0_freq);
385 }
386 static DEVICE_ATTR_RO(freq_rp0);
387 
388 static ssize_t freq_rpe_show(struct device *dev,
389 			     struct device_attribute *attr, char *buf)
390 {
391 	struct xe_guc_pc *pc = dev_to_pc(dev);
392 
393 	pc_update_rp_values(pc);
394 	return sysfs_emit(buf, "%d\n", pc->rpe_freq);
395 }
396 static DEVICE_ATTR_RO(freq_rpe);
397 
398 static ssize_t freq_rpn_show(struct device *dev,
399 			     struct device_attribute *attr, char *buf)
400 {
401 	struct xe_guc_pc *pc = dev_to_pc(dev);
402 
403 	return sysfs_emit(buf, "%d\n", pc->rpn_freq);
404 }
405 static DEVICE_ATTR_RO(freq_rpn);
406 
407 static ssize_t freq_min_show(struct device *dev,
408 			     struct device_attribute *attr, char *buf)
409 {
410 	struct xe_guc_pc *pc = dev_to_pc(dev);
411 	struct xe_gt *gt = pc_to_gt(pc);
412 	ssize_t ret;
413 
414 	xe_device_mem_access_get(pc_to_xe(pc));
415 	mutex_lock(&pc->freq_lock);
416 	if (!pc->freq_ready) {
417 		/* Might be in the middle of a gt reset */
418 		ret = -EAGAIN;
419 		goto out;
420 	}
421 
422 	/*
423 	 * GuC SLPC plays with min freq request when GuCRC is enabled
424 	 * Block RC6 for a more reliable read.
425 	 */
426 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
427 	if (ret)
428 		goto out;
429 
430 	ret = pc_action_query_task_state(pc);
431 	if (ret)
432 		goto fw;
433 
434 	ret = sysfs_emit(buf, "%d\n", pc_get_min_freq(pc));
435 
436 fw:
437 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
438 out:
439 	mutex_unlock(&pc->freq_lock);
440 	xe_device_mem_access_put(pc_to_xe(pc));
441 	return ret;
442 }
443 
444 static ssize_t freq_min_store(struct device *dev, struct device_attribute *attr,
445 			      const char *buff, size_t count)
446 {
447 	struct xe_guc_pc *pc = dev_to_pc(dev);
448 	u32 freq;
449 	ssize_t ret;
450 
451 	ret = kstrtou32(buff, 0, &freq);
452 	if (ret)
453 		return ret;
454 
455 	xe_device_mem_access_get(pc_to_xe(pc));
456 	mutex_lock(&pc->freq_lock);
457 	if (!pc->freq_ready) {
458 		/* Might be in the middle of a gt reset */
459 		ret = -EAGAIN;
460 		goto out;
461 	}
462 
463 	ret = pc_set_min_freq(pc, freq);
464 	if (ret)
465 		goto out;
466 
467 	pc->user_requested_min = freq;
468 
469 out:
470 	mutex_unlock(&pc->freq_lock);
471 	xe_device_mem_access_put(pc_to_xe(pc));
472 	return ret ?: count;
473 }
474 static DEVICE_ATTR_RW(freq_min);
475 
476 static ssize_t freq_max_show(struct device *dev,
477 			     struct device_attribute *attr, char *buf)
478 {
479 	struct xe_guc_pc *pc = dev_to_pc(dev);
480 	ssize_t ret;
481 
482 	xe_device_mem_access_get(pc_to_xe(pc));
483 	mutex_lock(&pc->freq_lock);
484 	if (!pc->freq_ready) {
485 		/* Might be in the middle of a gt reset */
486 		ret = -EAGAIN;
487 		goto out;
488 	}
489 
490 	ret = pc_action_query_task_state(pc);
491 	if (ret)
492 		goto out;
493 
494 	ret = sysfs_emit(buf, "%d\n", pc_get_max_freq(pc));
495 
496 out:
497 	mutex_unlock(&pc->freq_lock);
498 	xe_device_mem_access_put(pc_to_xe(pc));
499 	return ret;
500 }
501 
502 static ssize_t freq_max_store(struct device *dev, struct device_attribute *attr,
503 			      const char *buff, size_t count)
504 {
505 	struct xe_guc_pc *pc = dev_to_pc(dev);
506 	u32 freq;
507 	ssize_t ret;
508 
509 	ret = kstrtou32(buff, 0, &freq);
510 	if (ret)
511 		return ret;
512 
513 	xe_device_mem_access_get(pc_to_xe(pc));
514 	mutex_lock(&pc->freq_lock);
515 	if (!pc->freq_ready) {
516 		/* Might be in the middle of a gt reset */
517 		ret = -EAGAIN;
518 		goto out;
519 	}
520 
521 	ret = pc_set_max_freq(pc, freq);
522 	if (ret)
523 		goto out;
524 
525 	pc->user_requested_max = freq;
526 
527 out:
528 	mutex_unlock(&pc->freq_lock);
529 	xe_device_mem_access_put(pc_to_xe(pc));
530 	return ret ?: count;
531 }
532 static DEVICE_ATTR_RW(freq_max);
533 
534 static ssize_t rc_status_show(struct device *dev,
535 			      struct device_attribute *attr, char *buff)
536 {
537 	struct xe_guc_pc *pc = dev_to_pc(dev);
538 	struct xe_gt *gt = pc_to_gt(pc);
539 	u32 reg;
540 
541 	xe_device_mem_access_get(gt_to_xe(gt));
542 	reg = xe_mmio_read32(gt, GEN6_GT_CORE_STATUS.reg);
543 	xe_device_mem_access_put(gt_to_xe(gt));
544 
545 	switch (REG_FIELD_GET(RCN_MASK, reg)) {
546 	case GEN6_RC6:
547 		return sysfs_emit(buff, "rc6\n");
548 	case GEN6_RC0:
549 		return sysfs_emit(buff, "rc0\n");
550 	default:
551 		return -ENOENT;
552 	}
553 }
554 static DEVICE_ATTR_RO(rc_status);
555 
556 static ssize_t rc6_residency_show(struct device *dev,
557 				  struct device_attribute *attr, char *buff)
558 {
559 	struct xe_guc_pc *pc = dev_to_pc(dev);
560 	struct xe_gt *gt = pc_to_gt(pc);
561 	u32 reg;
562 	ssize_t ret;
563 
564 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
565 	if (ret)
566 		return ret;
567 
568 	xe_device_mem_access_get(pc_to_xe(pc));
569 	reg = xe_mmio_read32(gt, GEN6_GT_GFX_RC6.reg);
570 	xe_device_mem_access_put(pc_to_xe(pc));
571 
572 	ret = sysfs_emit(buff, "%u\n", reg);
573 
574 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
575 	return ret;
576 }
577 static DEVICE_ATTR_RO(rc6_residency);
578 
579 static const struct attribute *pc_attrs[] = {
580 	&dev_attr_freq_act.attr,
581 	&dev_attr_freq_cur.attr,
582 	&dev_attr_freq_rp0.attr,
583 	&dev_attr_freq_rpe.attr,
584 	&dev_attr_freq_rpn.attr,
585 	&dev_attr_freq_min.attr,
586 	&dev_attr_freq_max.attr,
587 	&dev_attr_rc_status.attr,
588 	&dev_attr_rc6_residency.attr,
589 	NULL
590 };
591 
592 static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
593 {
594 	struct xe_gt *gt = pc_to_gt(pc);
595 	struct xe_device *xe = gt_to_xe(gt);
596 	u32 reg;
597 
598 	xe_device_assert_mem_access(pc_to_xe(pc));
599 
600 	if (xe->info.platform == XE_PVC)
601 		reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP.reg);
602 	else
603 		reg = xe_mmio_read32(gt, GEN6_RP_STATE_CAP.reg);
604 	pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
605 	pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
606 }
607 
608 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
609 {
610 	int ret;
611 
612 	lockdep_assert_held(&pc->freq_lock);
613 
614 	ret = pc_action_query_task_state(pc);
615 	if (ret)
616 		return ret;
617 
618 	/*
619 	 * GuC defaults to some RPmax that is not actually achievable without
620 	 * overclocking. Let's adjust it to the Hardware RP0, which is the
621 	 * regular maximum
622 	 */
623 	if (pc_get_max_freq(pc) > pc->rp0_freq)
624 		pc_set_max_freq(pc, pc->rp0_freq);
625 
626 	/*
627 	 * Same thing happens for Server platforms where min is listed as
628 	 * RPMax
629 	 */
630 	if (pc_get_min_freq(pc) > pc->rp0_freq)
631 		pc_set_min_freq(pc, pc->rp0_freq);
632 
633 	return 0;
634 }
635 
636 static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
637 {
638 	int ret = 0;
639 
640 	lockdep_assert_held(&pc->freq_lock);
641 
642 	if (pc->user_requested_min != 0) {
643 		ret = pc_set_min_freq(pc, pc->user_requested_min);
644 		if (ret)
645 			return ret;
646 	}
647 
648 	if (pc->user_requested_max != 0) {
649 		ret = pc_set_max_freq(pc, pc->user_requested_max);
650 		if (ret)
651 			return ret;
652 	}
653 
654 	return ret;
655 }
656 
657 static int pc_gucrc_disable(struct xe_guc_pc *pc)
658 {
659 	struct xe_gt *gt = pc_to_gt(pc);
660 	int ret;
661 
662 	xe_device_assert_mem_access(pc_to_xe(pc));
663 
664 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
665 	if (ret)
666 		return ret;
667 
668 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
669 	if (ret)
670 		return ret;
671 
672 	xe_mmio_write32(gt, GEN9_PG_ENABLE.reg, 0);
673 	xe_mmio_write32(gt, GEN6_RC_CONTROL.reg, 0);
674 	xe_mmio_write32(gt, GEN6_RC_STATE.reg, 0);
675 
676 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
677 	return 0;
678 }
679 
680 static void pc_init_pcode_freq(struct xe_guc_pc *pc)
681 {
682 	u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
683 	u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
684 
685 	XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
686 }
687 
688 static int pc_init_freqs(struct xe_guc_pc *pc)
689 {
690 	int ret;
691 
692 	mutex_lock(&pc->freq_lock);
693 
694 	ret = pc_adjust_freq_bounds(pc);
695 	if (ret)
696 		goto out;
697 
698 	ret = pc_adjust_requested_freq(pc);
699 	if (ret)
700 		goto out;
701 
702 	pc_update_rp_values(pc);
703 
704 	pc_init_pcode_freq(pc);
705 
706 	/*
707 	 * The frequencies are really ready for use only after the user
708 	 * requested ones got restored.
709 	 */
710 	pc->freq_ready = true;
711 
712 out:
713 	mutex_unlock(&pc->freq_lock);
714 	return ret;
715 }
716 
717 /**
718  * xe_guc_pc_start - Start GuC's Power Conservation component
719  * @pc: Xe_GuC_PC instance
720  */
721 int xe_guc_pc_start(struct xe_guc_pc *pc)
722 {
723 	struct xe_device *xe = pc_to_xe(pc);
724 	struct xe_gt *gt = pc_to_gt(pc);
725 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
726 	int ret;
727 
728 	XE_WARN_ON(!xe_device_guc_submission_enabled(xe));
729 
730 	xe_device_mem_access_get(pc_to_xe(pc));
731 
732 	memset(pc->bo->vmap.vaddr, 0, size);
733 	slpc_shared_data_write(pc, header.size, size);
734 
735 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
736 	if (ret)
737 		return ret;
738 
739 	ret = pc_action_reset(pc);
740 	if (ret)
741 		goto out;
742 
743 	if (wait_for(pc_is_in_state(pc, SLPC_GLOBAL_STATE_RUNNING), 5)) {
744 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
745 		ret = -EIO;
746 		goto out;
747 	}
748 
749 	ret = pc_init_freqs(pc);
750 	if (ret)
751 		goto out;
752 
753 	if (xe->info.platform == XE_PVC) {
754 		pc_gucrc_disable(pc);
755 		ret = 0;
756 		goto out;
757 	}
758 
759 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
760 
761 out:
762 	xe_device_mem_access_put(pc_to_xe(pc));
763 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
764 	return ret;
765 }
766 
767 /**
768  * xe_guc_pc_stop - Stop GuC's Power Conservation component
769  * @pc: Xe_GuC_PC instance
770  */
771 int xe_guc_pc_stop(struct xe_guc_pc *pc)
772 {
773 	int ret;
774 
775 	xe_device_mem_access_get(pc_to_xe(pc));
776 
777 	ret = pc_gucrc_disable(pc);
778 	if (ret)
779 		goto out;
780 
781 	mutex_lock(&pc->freq_lock);
782 	pc->freq_ready = false;
783 	mutex_unlock(&pc->freq_lock);
784 
785 	ret = pc_action_shutdown(pc);
786 	if (ret)
787 		goto out;
788 
789 	if (wait_for(pc_is_in_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING), 5)) {
790 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
791 		ret = -EIO;
792 	}
793 
794 out:
795 	xe_device_mem_access_put(pc_to_xe(pc));
796 	return ret;
797 }
798 
799 static void pc_fini(struct drm_device *drm, void *arg)
800 {
801 	struct xe_guc_pc *pc = arg;
802 
803 	XE_WARN_ON(xe_guc_pc_stop(pc));
804 	sysfs_remove_files(pc_to_gt(pc)->sysfs, pc_attrs);
805 	xe_bo_unpin_map_no_vm(pc->bo);
806 }
807 
808 /**
809  * xe_guc_pc_init - Initialize GuC's Power Conservation component
810  * @pc: Xe_GuC_PC instance
811  */
812 int xe_guc_pc_init(struct xe_guc_pc *pc)
813 {
814 	struct xe_gt *gt = pc_to_gt(pc);
815 	struct xe_device *xe = gt_to_xe(gt);
816 	struct xe_bo *bo;
817 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
818 	int err;
819 
820 	mutex_init(&pc->freq_lock);
821 
822 	bo = xe_bo_create_pin_map(xe, gt, NULL, size,
823 				  ttm_bo_type_kernel,
824 				  XE_BO_CREATE_VRAM_IF_DGFX(gt) |
825 				  XE_BO_CREATE_GGTT_BIT);
826 
827 	if (IS_ERR(bo))
828 		return PTR_ERR(bo);
829 
830 	pc->bo = bo;
831 
832 	pc_init_fused_rp_values(pc);
833 
834 	err = sysfs_create_files(gt->sysfs, pc_attrs);
835 	if (err)
836 		return err;
837 
838 	err = drmm_add_action_or_reset(&xe->drm, pc_fini, pc);
839 	if (err)
840 		return err;
841 
842 	return 0;
843 }
844