xref: /linux/drivers/gpu/drm/xe/xe_trace.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM xe
8 
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10 #define _XE_TRACE_H_
11 
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
14 
15 #include "xe_exec_queue_types.h"
16 #include "xe_gpu_scheduler_types.h"
17 #include "xe_gt_tlb_invalidation_types.h"
18 #include "xe_gt_types.h"
19 #include "xe_guc_exec_queue_types.h"
20 #include "xe_sched_job.h"
21 #include "xe_vm.h"
22 
23 #define __dev_name_xe(xe)	dev_name((xe)->drm.dev)
24 #define __dev_name_gt(gt)	__dev_name_xe(gt_to_xe((gt)))
25 #define __dev_name_eq(q)	__dev_name_gt((q)->gt)
26 
27 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
28 		    TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
29 		    TP_ARGS(xe, fence),
30 
31 		    TP_STRUCT__entry(
32 			     __string(dev, __dev_name_xe(xe))
33 			     __field(struct xe_gt_tlb_invalidation_fence *, fence)
34 			     __field(int, seqno)
35 			     ),
36 
37 		    TP_fast_assign(
38 			   __assign_str(dev);
39 			   __entry->fence = fence;
40 			   __entry->seqno = fence->seqno;
41 			   ),
42 
43 		    TP_printk("dev=%s, fence=%p, seqno=%d",
44 			      __get_str(dev), __entry->fence, __entry->seqno)
45 );
46 
47 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
48 	     TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
49 	     TP_ARGS(xe, fence)
50 );
51 
52 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
53 	     xe_gt_tlb_invalidation_fence_work_func,
54 	     TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
55 	     TP_ARGS(xe, fence)
56 );
57 
58 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
59 	     TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
60 	     TP_ARGS(xe, fence)
61 );
62 
63 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
64 	     TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
65 	     TP_ARGS(xe, fence)
66 );
67 
68 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
69 	     TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
70 	     TP_ARGS(xe, fence)
71 );
72 
73 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
74 	     TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
75 	     TP_ARGS(xe, fence)
76 );
77 
78 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
79 	     TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
80 	     TP_ARGS(xe, fence)
81 );
82 
83 DECLARE_EVENT_CLASS(xe_exec_queue,
84 		    TP_PROTO(struct xe_exec_queue *q),
85 		    TP_ARGS(q),
86 
87 		    TP_STRUCT__entry(
88 			     __string(dev, __dev_name_eq(q))
89 			     __field(enum xe_engine_class, class)
90 			     __field(u32, logical_mask)
91 			     __field(u8, gt_id)
92 			     __field(u16, width)
93 			     __field(u16, guc_id)
94 			     __field(u32, guc_state)
95 			     __field(u32, flags)
96 			     ),
97 
98 		    TP_fast_assign(
99 			   __assign_str(dev);
100 			   __entry->class = q->class;
101 			   __entry->logical_mask = q->logical_mask;
102 			   __entry->gt_id = q->gt->info.id;
103 			   __entry->width = q->width;
104 			   __entry->guc_id = q->guc->id;
105 			   __entry->guc_state = atomic_read(&q->guc->state);
106 			   __entry->flags = q->flags;
107 			   ),
108 
109 		    TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
110 			      __get_str(dev), __entry->class, __entry->logical_mask,
111 			      __entry->gt_id, __entry->width, __entry->guc_id,
112 			      __entry->guc_state, __entry->flags)
113 );
114 
115 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
116 	     TP_PROTO(struct xe_exec_queue *q),
117 	     TP_ARGS(q)
118 );
119 
120 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
121 	     TP_PROTO(struct xe_exec_queue *q),
122 	     TP_ARGS(q)
123 );
124 
125 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
126 	     TP_PROTO(struct xe_exec_queue *q),
127 	     TP_ARGS(q)
128 );
129 
130 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
131 	     TP_PROTO(struct xe_exec_queue *q),
132 	     TP_ARGS(q)
133 );
134 
135 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
136 	     TP_PROTO(struct xe_exec_queue *q),
137 	     TP_ARGS(q)
138 );
139 
140 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
141 	     TP_PROTO(struct xe_exec_queue *q),
142 	     TP_ARGS(q)
143 );
144 
145 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
146 	     TP_PROTO(struct xe_exec_queue *q),
147 	     TP_ARGS(q)
148 );
149 
150 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
151 	     TP_PROTO(struct xe_exec_queue *q),
152 	     TP_ARGS(q)
153 );
154 
155 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
156 	     TP_PROTO(struct xe_exec_queue *q),
157 	     TP_ARGS(q)
158 );
159 
160 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
161 	     TP_PROTO(struct xe_exec_queue *q),
162 	     TP_ARGS(q)
163 );
164 
165 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
166 	     TP_PROTO(struct xe_exec_queue *q),
167 	     TP_ARGS(q)
168 );
169 
170 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
171 	     TP_PROTO(struct xe_exec_queue *q),
172 	     TP_ARGS(q)
173 );
174 
175 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
176 	     TP_PROTO(struct xe_exec_queue *q),
177 	     TP_ARGS(q)
178 );
179 
180 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
181 	     TP_PROTO(struct xe_exec_queue *q),
182 	     TP_ARGS(q)
183 );
184 
185 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
186 	     TP_PROTO(struct xe_exec_queue *q),
187 	     TP_ARGS(q)
188 );
189 
190 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
191 	     TP_PROTO(struct xe_exec_queue *q),
192 	     TP_ARGS(q)
193 );
194 
195 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
196 	     TP_PROTO(struct xe_exec_queue *q),
197 	     TP_ARGS(q)
198 );
199 
200 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
201 	     TP_PROTO(struct xe_exec_queue *q),
202 	     TP_ARGS(q)
203 );
204 
205 DECLARE_EVENT_CLASS(xe_sched_job,
206 		    TP_PROTO(struct xe_sched_job *job),
207 		    TP_ARGS(job),
208 
209 		    TP_STRUCT__entry(
210 			     __string(dev, __dev_name_eq(job->q))
211 			     __field(u32, seqno)
212 			     __field(u32, lrc_seqno)
213 			     __field(u16, guc_id)
214 			     __field(u32, guc_state)
215 			     __field(u32, flags)
216 			     __field(int, error)
217 			     __field(struct dma_fence *, fence)
218 			     __field(u64, batch_addr)
219 			     ),
220 
221 		    TP_fast_assign(
222 			   __assign_str(dev);
223 			   __entry->seqno = xe_sched_job_seqno(job);
224 			   __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
225 			   __entry->guc_id = job->q->guc->id;
226 			   __entry->guc_state =
227 			   atomic_read(&job->q->guc->state);
228 			   __entry->flags = job->q->flags;
229 			   __entry->error = job->fence ? job->fence->error : 0;
230 			   __entry->fence = job->fence;
231 			   __entry->batch_addr = (u64)job->ptrs[0].batch_addr;
232 			   ),
233 
234 		    TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
235 			      __get_str(dev), __entry->fence, __entry->seqno,
236 			      __entry->lrc_seqno, __entry->guc_id,
237 			      __entry->batch_addr, __entry->guc_state,
238 			      __entry->flags, __entry->error)
239 );
240 
241 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
242 	     TP_PROTO(struct xe_sched_job *job),
243 	     TP_ARGS(job)
244 );
245 
246 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
247 	     TP_PROTO(struct xe_sched_job *job),
248 	     TP_ARGS(job)
249 );
250 
251 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
252 	     TP_PROTO(struct xe_sched_job *job),
253 	     TP_ARGS(job)
254 );
255 
256 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
257 	     TP_PROTO(struct xe_sched_job *job),
258 	     TP_ARGS(job)
259 );
260 
261 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
262 	     TP_PROTO(struct xe_sched_job *job),
263 	     TP_ARGS(job)
264 );
265 
266 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
267 	     TP_PROTO(struct xe_sched_job *job),
268 	     TP_ARGS(job)
269 );
270 
271 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
272 	     TP_PROTO(struct xe_sched_job *job),
273 	     TP_ARGS(job)
274 );
275 
276 DECLARE_EVENT_CLASS(xe_sched_msg,
277 		    TP_PROTO(struct xe_sched_msg *msg),
278 		    TP_ARGS(msg),
279 
280 		    TP_STRUCT__entry(
281 			     __string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
282 			     __field(u32, opcode)
283 			     __field(u16, guc_id)
284 			     ),
285 
286 		    TP_fast_assign(
287 			   __assign_str(dev);
288 			   __entry->opcode = msg->opcode;
289 			   __entry->guc_id =
290 			   ((struct xe_exec_queue *)msg->private_data)->guc->id;
291 			   ),
292 
293 		    TP_printk("dev=%s, guc_id=%d, opcode=%u", __get_str(dev), __entry->guc_id,
294 			      __entry->opcode)
295 );
296 
297 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
298 	     TP_PROTO(struct xe_sched_msg *msg),
299 	     TP_ARGS(msg)
300 );
301 
302 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
303 	     TP_PROTO(struct xe_sched_msg *msg),
304 	     TP_ARGS(msg)
305 );
306 
307 DECLARE_EVENT_CLASS(xe_hw_fence,
308 		    TP_PROTO(struct xe_hw_fence *fence),
309 		    TP_ARGS(fence),
310 
311 		    TP_STRUCT__entry(
312 			     __string(dev, __dev_name_xe(fence->xe))
313 			     __field(u64, ctx)
314 			     __field(u32, seqno)
315 			     __field(struct xe_hw_fence *, fence)
316 			     ),
317 
318 		    TP_fast_assign(
319 			   __assign_str(dev);
320 			   __entry->ctx = fence->dma.context;
321 			   __entry->seqno = fence->dma.seqno;
322 			   __entry->fence = fence;
323 			   ),
324 
325 		    TP_printk("dev=%s, ctx=0x%016llx, fence=%p, seqno=%u",
326 			      __get_str(dev), __entry->ctx, __entry->fence, __entry->seqno)
327 );
328 
329 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
330 	     TP_PROTO(struct xe_hw_fence *fence),
331 	     TP_ARGS(fence)
332 );
333 
334 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
335 	     TP_PROTO(struct xe_hw_fence *fence),
336 	     TP_ARGS(fence)
337 );
338 
339 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
340 	     TP_PROTO(struct xe_hw_fence *fence),
341 	     TP_ARGS(fence)
342 );
343 
344 TRACE_EVENT(xe_reg_rw,
345 	TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),
346 
347 	TP_ARGS(gt, write, reg, val, len),
348 
349 	TP_STRUCT__entry(
350 		__string(dev, __dev_name_gt(gt))
351 		__field(u64, val)
352 		__field(u32, reg)
353 		__field(u16, write)
354 		__field(u16, len)
355 		),
356 
357 	TP_fast_assign(
358 		__assign_str(dev);
359 		__entry->val = val;
360 		__entry->reg = reg;
361 		__entry->write = write;
362 		__entry->len = len;
363 		),
364 
365 	TP_printk("dev=%s, %s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
366 		  __get_str(dev), __entry->write ? "write" : "read",
367 		  __entry->reg, __entry->len,
368 		  (u32)(__entry->val & 0xffffffff),
369 		  (u32)(__entry->val >> 32))
370 );
371 
372 DECLARE_EVENT_CLASS(xe_pm_runtime,
373 		    TP_PROTO(struct xe_device *xe, void *caller),
374 		    TP_ARGS(xe, caller),
375 
376 		    TP_STRUCT__entry(
377 			     __string(dev, __dev_name_xe(xe))
378 			     __field(void *, caller)
379 			     ),
380 
381 		    TP_fast_assign(
382 			   __assign_str(dev);
383 			   __entry->caller = caller;
384 			   ),
385 
386 		    TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
387 );
388 
389 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
390 	     TP_PROTO(struct xe_device *xe, void *caller),
391 	     TP_ARGS(xe, caller)
392 );
393 
394 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
395 	     TP_PROTO(struct xe_device *xe, void *caller),
396 	     TP_ARGS(xe, caller)
397 );
398 
399 DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
400 	     TP_PROTO(struct xe_device *xe, void *caller),
401 	     TP_ARGS(xe, caller)
402 );
403 
404 DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
405 	     TP_PROTO(struct xe_device *xe, void *caller),
406 	     TP_ARGS(xe, caller)
407 );
408 
409 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
410 	     TP_PROTO(struct xe_device *xe, void *caller),
411 	     TP_ARGS(xe, caller)
412 );
413 
414 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
415 	     TP_PROTO(struct xe_device *xe, void *caller),
416 	     TP_ARGS(xe, caller)
417 );
418 
419 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
420 	     TP_PROTO(struct xe_device *xe, void *caller),
421 	     TP_ARGS(xe, caller)
422 );
423 
424 #endif
425 
426 /* This part must be outside protection */
427 #undef TRACE_INCLUDE_PATH
428 #undef TRACE_INCLUDE_FILE
429 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
430 #define TRACE_INCLUDE_FILE xe_trace
431 #include <trace/define_trace.h>
432