xref: /linux/drivers/gpu/drm/xe/xe_gt_idle.c (revision 37aeccf5f839c155e8c9100937a01059b24e61b5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "xe_force_wake.h"
9 #include "xe_device.h"
10 #include "xe_gt.h"
11 #include "xe_gt_idle.h"
12 #include "xe_gt_sysfs.h"
13 #include "xe_guc_pc.h"
14 #include "regs/xe_gt_regs.h"
15 #include "xe_macros.h"
16 #include "xe_mmio.h"
17 #include "xe_pm.h"
18 #include "xe_sriov.h"
19 
20 /**
21  * DOC: Xe GT Idle
22  *
23  * Contains functions that init GT idle features like C6
24  *
25  * device/gt#/gtidle/name - name of the state
26  * device/gt#/gtidle/idle_residency_ms - Provides residency of the idle state in ms
27  * device/gt#/gtidle/idle_status - Provides current idle state
28  */
29 
30 static struct xe_gt_idle *dev_to_gtidle(struct device *dev)
31 {
32 	struct kobject *kobj = &dev->kobj;
33 
34 	return &kobj_to_gt(kobj->parent)->gtidle;
35 }
36 
37 static struct xe_gt *gtidle_to_gt(struct xe_gt_idle *gtidle)
38 {
39 	return container_of(gtidle, struct xe_gt, gtidle);
40 }
41 
42 static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle)
43 {
44 	return &gtidle_to_gt(gtidle)->uc.guc.pc;
45 }
46 
47 static struct xe_device *
48 pc_to_xe(struct xe_guc_pc *pc)
49 {
50 	struct xe_guc *guc = container_of(pc, struct xe_guc, pc);
51 	struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
52 
53 	return gt_to_xe(gt);
54 }
55 
56 static const char *gt_idle_state_to_string(enum xe_gt_idle_state state)
57 {
58 	switch (state) {
59 	case GT_IDLE_C0:
60 		return "gt-c0";
61 	case GT_IDLE_C6:
62 		return "gt-c6";
63 	default:
64 		return "unknown";
65 	}
66 }
67 
68 static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
69 {
70 	u64 delta, overflow_residency, prev_residency;
71 
72 	overflow_residency = BIT_ULL(32);
73 
74 	/*
75 	 * Counter wrap handling
76 	 * Store previous hw counter values for counter wrap-around handling
77 	 * Relying on sufficient frequency of queries otherwise counters can still wrap.
78 	 */
79 	prev_residency = gtidle->prev_residency;
80 	gtidle->prev_residency = cur_residency;
81 
82 	/* delta */
83 	if (cur_residency >= prev_residency)
84 		delta = cur_residency - prev_residency;
85 	else
86 		delta = cur_residency + (overflow_residency - prev_residency);
87 
88 	/* Add delta to extended raw driver copy of idle residency */
89 	cur_residency = gtidle->cur_residency + delta;
90 	gtidle->cur_residency = cur_residency;
91 
92 	/* residency multiplier in ns, convert to ms */
93 	cur_residency = mul_u64_u32_div(cur_residency, gtidle->residency_multiplier, 1e6);
94 
95 	return cur_residency;
96 }
97 
98 void xe_gt_idle_enable_pg(struct xe_gt *gt)
99 {
100 	struct xe_device *xe = gt_to_xe(gt);
101 	struct xe_gt_idle *gtidle = &gt->gtidle;
102 	struct xe_mmio *mmio = &gt->mmio;
103 	u32 vcs_mask, vecs_mask;
104 	int i, j;
105 
106 	if (IS_SRIOV_VF(xe))
107 		return;
108 
109 	/* Disable CPG for PVC */
110 	if (xe->info.platform == XE_PVC)
111 		return;
112 
113 	xe_device_assert_mem_access(gt_to_xe(gt));
114 
115 	vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
116 	vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
117 
118 	if (vcs_mask || vecs_mask)
119 		gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
120 
121 	if (!xe_gt_is_media_type(gt))
122 		gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
123 
124 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
125 		if ((gt->info.engine_mask & BIT(i)))
126 			gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
127 						     VDN_MFXVDENC_POWERGATE_ENABLE(j));
128 	}
129 
130 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
131 	if (xe->info.skip_guc_pc) {
132 		/*
133 		 * GuC sets the hysteresis value when GuC PC is enabled
134 		 * else set it to 25 (25 * 1.28us)
135 		 */
136 		xe_mmio_write32(mmio, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25);
137 		xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
138 	}
139 
140 	xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
141 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT));
142 }
143 
144 void xe_gt_idle_disable_pg(struct xe_gt *gt)
145 {
146 	struct xe_gt_idle *gtidle = &gt->gtidle;
147 
148 	if (IS_SRIOV_VF(gt_to_xe(gt)))
149 		return;
150 
151 	xe_device_assert_mem_access(gt_to_xe(gt));
152 	gtidle->powergate_enable = 0;
153 
154 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
155 	xe_mmio_write32(&gt->mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
156 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT));
157 }
158 
159 /**
160  * xe_gt_idle_pg_print - Xe powergating info
161  * @gt: GT object
162  * @p: drm_printer.
163  *
164  * This function prints the powergating information
165  *
166  * Return: 0 on success, negative error code otherwise
167  */
168 int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
169 {
170 	struct xe_gt_idle *gtidle = &gt->gtidle;
171 	struct xe_device *xe = gt_to_xe(gt);
172 	enum xe_gt_idle_state state;
173 	u32 pg_enabled, pg_status = 0;
174 	u32 vcs_mask, vecs_mask;
175 	int err, n;
176 	/*
177 	 * Media Slices
178 	 *
179 	 * Slice 0: VCS0, VCS1, VECS0
180 	 * Slice 1: VCS2, VCS3, VECS1
181 	 * Slice 2: VCS4, VCS5, VECS2
182 	 * Slice 3: VCS6, VCS7, VECS3
183 	 */
184 	static const struct {
185 		u64 engines;
186 		u32 status_bit;
187 	} media_slices[] = {
188 		{(BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS1) |
189 		  BIT(XE_HW_ENGINE_VECS0)), MEDIA_SLICE0_AWAKE_STATUS},
190 
191 		{(BIT(XE_HW_ENGINE_VCS2) | BIT(XE_HW_ENGINE_VCS3) |
192 		   BIT(XE_HW_ENGINE_VECS1)), MEDIA_SLICE1_AWAKE_STATUS},
193 
194 		{(BIT(XE_HW_ENGINE_VCS4) | BIT(XE_HW_ENGINE_VCS5) |
195 		   BIT(XE_HW_ENGINE_VECS2)), MEDIA_SLICE2_AWAKE_STATUS},
196 
197 		{(BIT(XE_HW_ENGINE_VCS6) | BIT(XE_HW_ENGINE_VCS7) |
198 		   BIT(XE_HW_ENGINE_VECS3)), MEDIA_SLICE3_AWAKE_STATUS},
199 	};
200 
201 	if (xe->info.platform == XE_PVC) {
202 		drm_printf(p, "Power Gating not supported\n");
203 		return 0;
204 	}
205 
206 	state = gtidle->idle_status(gtidle_to_pc(gtidle));
207 	pg_enabled = gtidle->powergate_enable;
208 
209 	/* Do not wake the GT to read powergating status */
210 	if (state != GT_IDLE_C6) {
211 		err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
212 		if (err)
213 			return err;
214 
215 		pg_enabled = xe_mmio_read32(&gt->mmio, POWERGATE_ENABLE);
216 		pg_status = xe_mmio_read32(&gt->mmio, POWERGATE_DOMAIN_STATUS);
217 
218 		XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT));
219 	}
220 
221 	if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) {
222 		drm_printf(p, "Render Power Gating Enabled: %s\n",
223 			   str_yes_no(pg_enabled & RENDER_POWERGATE_ENABLE));
224 
225 		drm_printf(p, "Render Power Gate Status: %s\n",
226 			   str_up_down(pg_status & RENDER_AWAKE_STATUS));
227 	}
228 
229 	vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
230 	vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
231 
232 	/* Print media CPG status only if media is present */
233 	if (vcs_mask || vecs_mask) {
234 		drm_printf(p, "Media Power Gating Enabled: %s\n",
235 			   str_yes_no(pg_enabled & MEDIA_POWERGATE_ENABLE));
236 
237 		for (n = 0; n < ARRAY_SIZE(media_slices); n++)
238 			if (gt->info.engine_mask & media_slices[n].engines)
239 				drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n,
240 					   str_up_down(pg_status & media_slices[n].status_bit));
241 	}
242 	return 0;
243 }
244 
245 static ssize_t name_show(struct device *dev,
246 			 struct device_attribute *attr, char *buff)
247 {
248 	struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
249 	struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
250 	ssize_t ret;
251 
252 	xe_pm_runtime_get(pc_to_xe(pc));
253 	ret = sysfs_emit(buff, "%s\n", gtidle->name);
254 	xe_pm_runtime_put(pc_to_xe(pc));
255 
256 	return ret;
257 }
258 static DEVICE_ATTR_RO(name);
259 
260 static ssize_t idle_status_show(struct device *dev,
261 				struct device_attribute *attr, char *buff)
262 {
263 	struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
264 	struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
265 	enum xe_gt_idle_state state;
266 
267 	xe_pm_runtime_get(pc_to_xe(pc));
268 	state = gtidle->idle_status(pc);
269 	xe_pm_runtime_put(pc_to_xe(pc));
270 
271 	return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
272 }
273 static DEVICE_ATTR_RO(idle_status);
274 
275 static ssize_t idle_residency_ms_show(struct device *dev,
276 				      struct device_attribute *attr, char *buff)
277 {
278 	struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
279 	struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
280 	u64 residency;
281 
282 	xe_pm_runtime_get(pc_to_xe(pc));
283 	residency = gtidle->idle_residency(pc);
284 	xe_pm_runtime_put(pc_to_xe(pc));
285 
286 	return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
287 }
288 static DEVICE_ATTR_RO(idle_residency_ms);
289 
290 static const struct attribute *gt_idle_attrs[] = {
291 	&dev_attr_name.attr,
292 	&dev_attr_idle_status.attr,
293 	&dev_attr_idle_residency_ms.attr,
294 	NULL,
295 };
296 
297 static void gt_idle_fini(void *arg)
298 {
299 	struct kobject *kobj = arg;
300 	struct xe_gt *gt = kobj_to_gt(kobj->parent);
301 
302 	xe_gt_idle_disable_pg(gt);
303 
304 	if (gt_to_xe(gt)->info.skip_guc_pc) {
305 		XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
306 		xe_gt_idle_disable_c6(gt);
307 		xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
308 	}
309 
310 	sysfs_remove_files(kobj, gt_idle_attrs);
311 	kobject_put(kobj);
312 }
313 
314 int xe_gt_idle_init(struct xe_gt_idle *gtidle)
315 {
316 	struct xe_gt *gt = gtidle_to_gt(gtidle);
317 	struct xe_device *xe = gt_to_xe(gt);
318 	struct kobject *kobj;
319 	int err;
320 
321 	if (IS_SRIOV_VF(xe))
322 		return 0;
323 
324 	kobj = kobject_create_and_add("gtidle", gt->sysfs);
325 	if (!kobj)
326 		return -ENOMEM;
327 
328 	if (xe_gt_is_media_type(gt)) {
329 		snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
330 		gtidle->idle_residency = xe_guc_pc_mc6_residency;
331 	} else {
332 		snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-rc", gt->info.id);
333 		gtidle->idle_residency = xe_guc_pc_rc6_residency;
334 	}
335 
336 	/* Multiplier for Residency counter in units of 1.28us */
337 	gtidle->residency_multiplier = 1280;
338 	gtidle->idle_status = xe_guc_pc_c_status;
339 
340 	err = sysfs_create_files(kobj, gt_idle_attrs);
341 	if (err) {
342 		kobject_put(kobj);
343 		return err;
344 	}
345 
346 	xe_gt_idle_enable_pg(gt);
347 
348 	return devm_add_action_or_reset(xe->drm.dev, gt_idle_fini, kobj);
349 }
350 
351 void xe_gt_idle_enable_c6(struct xe_gt *gt)
352 {
353 	xe_device_assert_mem_access(gt_to_xe(gt));
354 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
355 
356 	if (IS_SRIOV_VF(gt_to_xe(gt)))
357 		return;
358 
359 	/* Units of 1280 ns for a total of 5s */
360 	xe_mmio_write32(&gt->mmio, RC_IDLE_HYSTERSIS, 0x3B9ACA);
361 	/* Enable RC6 */
362 	xe_mmio_write32(&gt->mmio, RC_CONTROL,
363 			RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
364 }
365 
366 void xe_gt_idle_disable_c6(struct xe_gt *gt)
367 {
368 	xe_device_assert_mem_access(gt_to_xe(gt));
369 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
370 
371 	if (IS_SRIOV_VF(gt_to_xe(gt)))
372 		return;
373 
374 	xe_mmio_write32(&gt->mmio, RC_CONTROL, 0);
375 	xe_mmio_write32(&gt->mmio, RC_STATE, 0);
376 }
377