xref: /linux/drivers/gpu/drm/xe/xe_guc_engine_activity.c (revision b729ea271e849c88f91ba51208e7ca3fb2f1bc4c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "abi/guc_actions_abi.h"
9 #include "regs/xe_gt_regs.h"
10 
11 #include "xe_bo.h"
12 #include "xe_force_wake.h"
13 #include "xe_gt_printk.h"
14 #include "xe_guc.h"
15 #include "xe_guc_engine_activity.h"
16 #include "xe_guc_ct.h"
17 #include "xe_hw_engine.h"
18 #include "xe_map.h"
19 #include "xe_mmio.h"
20 
21 #define TOTAL_QUANTA 0x8000
22 
23 static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
24 {
25 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
26 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
27 	u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
28 	size_t offset;
29 
30 	offset = offsetof(struct guc_engine_activity_data,
31 			  engine_activity[guc_class][hwe->logical_instance]);
32 
33 	return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
34 }
35 
36 static struct iosys_map engine_metadata_map(struct xe_guc *guc)
37 {
38 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
39 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
40 
41 	return buffer->metadata_bo->vmap;
42 }
43 
44 static int allocate_engine_activity_group(struct xe_guc *guc)
45 {
46 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
47 	struct xe_device *xe = guc_to_xe(guc);
48 	u32 num_activity_group = 1; /* Will be modified for VF */
49 
50 	engine_activity->eag  = drmm_kcalloc(&xe->drm, num_activity_group,
51 					     sizeof(struct engine_activity_group), GFP_KERNEL);
52 
53 	if (!engine_activity->eag)
54 		return -ENOMEM;
55 
56 	engine_activity->num_activity_group = num_activity_group;
57 
58 	return 0;
59 }
60 
61 static int allocate_engine_activity_buffers(struct xe_guc *guc,
62 					    struct engine_activity_buffer *buffer)
63 {
64 	u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
65 	u32 size = sizeof(struct guc_engine_activity_data);
66 	struct xe_gt *gt = guc_to_gt(guc);
67 	struct xe_tile *tile = gt_to_tile(gt);
68 	struct xe_bo *bo, *metadata_bo;
69 
70 	metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
71 					   ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
72 					   XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
73 
74 	if (IS_ERR(metadata_bo))
75 		return PTR_ERR(metadata_bo);
76 
77 	bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
78 				  ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
79 				  XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
80 
81 	if (IS_ERR(bo)) {
82 		xe_bo_unpin_map_no_vm(metadata_bo);
83 		return PTR_ERR(bo);
84 	}
85 
86 	buffer->metadata_bo = metadata_bo;
87 	buffer->activity_bo = bo;
88 	return 0;
89 }
90 
91 static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
92 {
93 	xe_bo_unpin_map_no_vm(buffer->metadata_bo);
94 	xe_bo_unpin_map_no_vm(buffer->activity_bo);
95 }
96 
97 static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
98 {
99 	struct xe_guc *guc = &hwe->gt->uc.guc;
100 	struct engine_activity_group *eag = &guc->engine_activity.eag[0];
101 	u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
102 
103 	return &eag->engine[guc_class][hwe->logical_instance];
104 }
105 
106 static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
107 {
108 	return mul_u64_u32_div(ns, freq, NSEC_PER_SEC);
109 }
110 
111 #define read_engine_activity_record(xe_, map_, field_) \
112 	xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity, field_)
113 
114 #define read_metadata_record(xe_, map_, field_) \
115 	xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
116 
117 static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
118 {
119 	struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
120 	struct guc_engine_activity *cached_activity = &ea->activity;
121 	struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
122 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
123 	struct iosys_map activity_map, metadata_map;
124 	struct xe_device *xe =  guc_to_xe(guc);
125 	struct xe_gt *gt = guc_to_gt(guc);
126 	u32 last_update_tick, global_change_num;
127 	u64 active_ticks, gpm_ts;
128 	u16 change_num;
129 
130 	activity_map = engine_activity_map(guc, hwe);
131 	metadata_map = engine_metadata_map(guc);
132 	global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
133 
134 	/* GuC has not initialized activity data yet, return 0 */
135 	if (!global_change_num)
136 		goto update;
137 
138 	if (global_change_num == cached_metadata->global_change_num)
139 		goto update;
140 
141 	cached_metadata->global_change_num = global_change_num;
142 	change_num = read_engine_activity_record(xe, &activity_map, change_num);
143 
144 	if (!change_num || change_num == cached_activity->change_num)
145 		goto update;
146 
147 	/* read engine activity values */
148 	last_update_tick = read_engine_activity_record(xe, &activity_map, last_update_tick);
149 	active_ticks = read_engine_activity_record(xe, &activity_map, active_ticks);
150 
151 	/* activity calculations */
152 	ea->running = !!last_update_tick;
153 	ea->total += active_ticks - cached_activity->active_ticks;
154 	ea->active = 0;
155 
156 	/* cache the counter */
157 	cached_activity->change_num = change_num;
158 	cached_activity->last_update_tick = last_update_tick;
159 	cached_activity->active_ticks = active_ticks;
160 
161 update:
162 	if (ea->running) {
163 		gpm_ts = xe_mmio_read64_2x32(&gt->mmio, MISC_STATUS_0) >>
164 			 engine_activity->gpm_timestamp_shift;
165 		ea->active = lower_32_bits(gpm_ts) - cached_activity->last_update_tick;
166 	}
167 
168 	return ea->total + ea->active;
169 }
170 
171 static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
172 {
173 	struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
174 	struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
175 	struct guc_engine_activity *cached_activity = &ea->activity;
176 	struct iosys_map activity_map, metadata_map;
177 	struct xe_device *xe = guc_to_xe(guc);
178 	ktime_t now, cpu_delta;
179 	u64 numerator;
180 	u16 quanta_ratio;
181 
182 	activity_map = engine_activity_map(guc, hwe);
183 	metadata_map = engine_metadata_map(guc);
184 
185 	if (!cached_metadata->guc_tsc_frequency_hz)
186 		cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
187 									     guc_tsc_frequency_hz);
188 
189 	quanta_ratio = read_engine_activity_record(xe, &activity_map, quanta_ratio);
190 	cached_activity->quanta_ratio = quanta_ratio;
191 
192 	/* Total ticks calculations */
193 	now = ktime_get();
194 	cpu_delta = now - ea->last_cpu_ts;
195 	ea->last_cpu_ts = now;
196 	numerator = (ea->quanta_remainder_ns + cpu_delta) * cached_activity->quanta_ratio;
197 	ea->quanta_ns += numerator / TOTAL_QUANTA;
198 	ea->quanta_remainder_ns = numerator % TOTAL_QUANTA;
199 	ea->quanta = cpu_ns_to_guc_tsc_tick(ea->quanta_ns, cached_metadata->guc_tsc_frequency_hz);
200 
201 	return ea->quanta;
202 }
203 
204 static int enable_engine_activity_stats(struct xe_guc *guc)
205 {
206 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
207 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
208 	u32 action[] = {
209 		XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER,
210 		xe_bo_ggtt_addr(buffer->metadata_bo),
211 		0,
212 		xe_bo_ggtt_addr(buffer->activity_bo),
213 		0,
214 	};
215 
216 	/* Blocking here to ensure the buffers are ready before reading them */
217 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
218 }
219 
220 static void engine_activity_set_cpu_ts(struct xe_guc *guc)
221 {
222 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
223 	struct engine_activity_group *eag = &engine_activity->eag[0];
224 	int i, j;
225 
226 	for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
227 		for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
228 			eag->engine[i][j].last_cpu_ts = ktime_get();
229 }
230 
231 static u32 gpm_timestamp_shift(struct xe_gt *gt)
232 {
233 	u32 reg;
234 
235 	reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
236 
237 	return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
238 }
239 
240 /**
241  * xe_guc_engine_activity_active_ticks - Get engine active ticks
242  * @guc: The GuC object
243  * @hwe: The hw_engine object
244  *
245  * Return: accumulated ticks @hwe was active since engine activity stats were enabled.
246  */
247 u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
248 {
249 	return get_engine_active_ticks(guc, hwe);
250 }
251 
252 /**
253  * xe_guc_engine_activity_total_ticks - Get engine total ticks
254  * @guc: The GuC object
255  * @hwe: The hw_engine object
256  *
257  * Return: accumulated quanta of ticks allocated for the engine
258  */
259 u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
260 {
261 	return get_engine_total_ticks(guc, hwe);
262 }
263 
264 /**
265  * xe_guc_engine_activity_enable_stats - Enable engine activity stats
266  * @guc: The GuC object
267  *
268  * Enable engine activity stats and set initial timestamps
269  */
270 void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
271 {
272 	int ret;
273 
274 	ret = enable_engine_activity_stats(guc);
275 	if (ret)
276 		xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
277 	else
278 		engine_activity_set_cpu_ts(guc);
279 }
280 
281 static void engine_activity_fini(void *arg)
282 {
283 	struct xe_guc_engine_activity *engine_activity = arg;
284 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
285 
286 	free_engine_activity_buffers(buffer);
287 }
288 
289 /**
290  * xe_guc_engine_activity_init - Initialize the engine activity data
291  * @guc: The GuC object
292  *
293  * Return: 0 on success, negative error code otherwise.
294  */
295 int xe_guc_engine_activity_init(struct xe_guc *guc)
296 {
297 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
298 	struct xe_gt *gt = guc_to_gt(guc);
299 	struct xe_device *xe = gt_to_xe(gt);
300 	int ret;
301 
302 	if (IS_SRIOV_VF(xe))
303 		return 0;
304 
305 	ret = allocate_engine_activity_group(guc);
306 	if (ret) {
307 		xe_gt_err(gt, "failed to allocate engine activity group (%pe)\n", ERR_PTR(ret));
308 		return ret;
309 	}
310 
311 	ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
312 	if (ret) {
313 		xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
314 		return ret;
315 	}
316 
317 	engine_activity->gpm_timestamp_shift = gpm_timestamp_shift(gt);
318 
319 	return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, engine_activity_fini,
320 					engine_activity);
321 }
322