xref: /linux/drivers/gpu/drm/xe/xe_trace.h (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM xe
8 
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10 #define _XE_TRACE_H_
11 
12 #include <linux/types.h>
13 #include <linux/tracepoint.h>
14 
15 #include "xe_bo_types.h"
16 #include "xe_engine_types.h"
17 #include "xe_gpu_scheduler_types.h"
18 #include "xe_gt_types.h"
19 #include "xe_guc_engine_types.h"
20 #include "xe_sched_job.h"
21 #include "xe_vm_types.h"
22 
23 DECLARE_EVENT_CLASS(xe_bo,
24 		    TP_PROTO(struct xe_bo *bo),
25 		    TP_ARGS(bo),
26 
27 		    TP_STRUCT__entry(
28 			     __field(size_t, size)
29 			     __field(u32, flags)
30 			     __field(u64, vm)
31 			     ),
32 
33 		    TP_fast_assign(
34 			   __entry->size = bo->size;
35 			   __entry->flags = bo->flags;
36 			   __entry->vm = (u64)bo->vm;
37 			   ),
38 
39 		    TP_printk("size=%ld, flags=0x%02x, vm=0x%016llx",
40 			      __entry->size, __entry->flags, __entry->vm)
41 );
42 
43 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
44 	     TP_PROTO(struct xe_bo *bo),
45 	     TP_ARGS(bo)
46 );
47 
48 DEFINE_EVENT(xe_bo, xe_bo_move,
49 	     TP_PROTO(struct xe_bo *bo),
50 	     TP_ARGS(bo)
51 );
52 
53 DECLARE_EVENT_CLASS(xe_engine,
54 		    TP_PROTO(struct xe_engine *e),
55 		    TP_ARGS(e),
56 
57 		    TP_STRUCT__entry(
58 			     __field(enum xe_engine_class, class)
59 			     __field(u32, logical_mask)
60 			     __field(u8, gt_id)
61 			     __field(u16, width)
62 			     __field(u16, guc_id)
63 			     __field(u32, guc_state)
64 			     __field(u32, flags)
65 			     ),
66 
67 		    TP_fast_assign(
68 			   __entry->class = e->class;
69 			   __entry->logical_mask = e->logical_mask;
70 			   __entry->gt_id = e->gt->info.id;
71 			   __entry->width = e->width;
72 			   __entry->guc_id = e->guc->id;
73 			   __entry->guc_state = atomic_read(&e->guc->state);
74 			   __entry->flags = e->flags;
75 			   ),
76 
77 		    TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
78 			      __entry->class, __entry->logical_mask,
79 			      __entry->gt_id, __entry->width, __entry->guc_id,
80 			      __entry->guc_state, __entry->flags)
81 );
82 
83 DEFINE_EVENT(xe_engine, xe_engine_create,
84 	     TP_PROTO(struct xe_engine *e),
85 	     TP_ARGS(e)
86 );
87 
88 DEFINE_EVENT(xe_engine, xe_engine_supress_resume,
89 	     TP_PROTO(struct xe_engine *e),
90 	     TP_ARGS(e)
91 );
92 
93 DEFINE_EVENT(xe_engine, xe_engine_submit,
94 	     TP_PROTO(struct xe_engine *e),
95 	     TP_ARGS(e)
96 );
97 
98 DEFINE_EVENT(xe_engine, xe_engine_scheduling_enable,
99 	     TP_PROTO(struct xe_engine *e),
100 	     TP_ARGS(e)
101 );
102 
103 DEFINE_EVENT(xe_engine, xe_engine_scheduling_disable,
104 	     TP_PROTO(struct xe_engine *e),
105 	     TP_ARGS(e)
106 );
107 
108 DEFINE_EVENT(xe_engine, xe_engine_scheduling_done,
109 	     TP_PROTO(struct xe_engine *e),
110 	     TP_ARGS(e)
111 );
112 
113 DEFINE_EVENT(xe_engine, xe_engine_register,
114 	     TP_PROTO(struct xe_engine *e),
115 	     TP_ARGS(e)
116 );
117 
118 DEFINE_EVENT(xe_engine, xe_engine_deregister,
119 	     TP_PROTO(struct xe_engine *e),
120 	     TP_ARGS(e)
121 );
122 
123 DEFINE_EVENT(xe_engine, xe_engine_deregister_done,
124 	     TP_PROTO(struct xe_engine *e),
125 	     TP_ARGS(e)
126 );
127 
128 DEFINE_EVENT(xe_engine, xe_engine_close,
129 	     TP_PROTO(struct xe_engine *e),
130 	     TP_ARGS(e)
131 );
132 
133 DEFINE_EVENT(xe_engine, xe_engine_kill,
134 	     TP_PROTO(struct xe_engine *e),
135 	     TP_ARGS(e)
136 );
137 
138 DEFINE_EVENT(xe_engine, xe_engine_cleanup_entity,
139 	     TP_PROTO(struct xe_engine *e),
140 	     TP_ARGS(e)
141 );
142 
143 DEFINE_EVENT(xe_engine, xe_engine_destroy,
144 	     TP_PROTO(struct xe_engine *e),
145 	     TP_ARGS(e)
146 );
147 
148 DEFINE_EVENT(xe_engine, xe_engine_reset,
149 	     TP_PROTO(struct xe_engine *e),
150 	     TP_ARGS(e)
151 );
152 
153 DEFINE_EVENT(xe_engine, xe_engine_memory_cat_error,
154 	     TP_PROTO(struct xe_engine *e),
155 	     TP_ARGS(e)
156 );
157 
158 DEFINE_EVENT(xe_engine, xe_engine_stop,
159 	     TP_PROTO(struct xe_engine *e),
160 	     TP_ARGS(e)
161 );
162 
163 DEFINE_EVENT(xe_engine, xe_engine_resubmit,
164 	     TP_PROTO(struct xe_engine *e),
165 	     TP_ARGS(e)
166 );
167 
168 DECLARE_EVENT_CLASS(xe_sched_job,
169 		    TP_PROTO(struct xe_sched_job *job),
170 		    TP_ARGS(job),
171 
172 		    TP_STRUCT__entry(
173 			     __field(u32, seqno)
174 			     __field(u16, guc_id)
175 			     __field(u32, guc_state)
176 			     __field(u32, flags)
177 			     __field(int, error)
178 			     __field(u64, fence)
179 			     __field(u64, batch_addr)
180 			     ),
181 
182 		    TP_fast_assign(
183 			   __entry->seqno = xe_sched_job_seqno(job);
184 			   __entry->guc_id = job->engine->guc->id;
185 			   __entry->guc_state =
186 			   atomic_read(&job->engine->guc->state);
187 			   __entry->flags = job->engine->flags;
188 			   __entry->error = job->fence->error;
189 			   __entry->fence = (u64)job->fence;
190 			   __entry->batch_addr = (u64)job->batch_addr[0];
191 			   ),
192 
193 		    TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
194 			      __entry->fence, __entry->seqno, __entry->guc_id,
195 			      __entry->batch_addr, __entry->guc_state,
196 			      __entry->flags, __entry->error)
197 );
198 
199 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
200 	     TP_PROTO(struct xe_sched_job *job),
201 	     TP_ARGS(job)
202 );
203 
204 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
205 	     TP_PROTO(struct xe_sched_job *job),
206 	     TP_ARGS(job)
207 );
208 
209 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
210 	     TP_PROTO(struct xe_sched_job *job),
211 	     TP_ARGS(job)
212 );
213 
214 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
215 	     TP_PROTO(struct xe_sched_job *job),
216 	     TP_ARGS(job)
217 );
218 
219 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
220 	     TP_PROTO(struct xe_sched_job *job),
221 	     TP_ARGS(job)
222 );
223 
224 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
225 	     TP_PROTO(struct xe_sched_job *job),
226 	     TP_ARGS(job)
227 );
228 
229 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
230 	     TP_PROTO(struct xe_sched_job *job),
231 	     TP_ARGS(job)
232 );
233 
234 DECLARE_EVENT_CLASS(xe_sched_msg,
235 		    TP_PROTO(struct xe_sched_msg *msg),
236 		    TP_ARGS(msg),
237 
238 		    TP_STRUCT__entry(
239 			     __field(u32, opcode)
240 			     __field(u16, guc_id)
241 			     ),
242 
243 		    TP_fast_assign(
244 			   __entry->opcode = msg->opcode;
245 			   __entry->guc_id =
246 			   ((struct xe_engine *)msg->private_data)->guc->id;
247 			   ),
248 
249 		    TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
250 			      __entry->opcode)
251 );
252 
253 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
254 	     TP_PROTO(struct xe_sched_msg *msg),
255 	     TP_ARGS(msg)
256 );
257 
258 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
259 	     TP_PROTO(struct xe_sched_msg *msg),
260 	     TP_ARGS(msg)
261 );
262 
263 DECLARE_EVENT_CLASS(xe_hw_fence,
264 		    TP_PROTO(struct xe_hw_fence *fence),
265 		    TP_ARGS(fence),
266 
267 		    TP_STRUCT__entry(
268 			     __field(u64, ctx)
269 			     __field(u32, seqno)
270 			     __field(u64, fence)
271 			     ),
272 
273 		    TP_fast_assign(
274 			   __entry->ctx = fence->dma.context;
275 			   __entry->seqno = fence->dma.seqno;
276 			   __entry->fence = (u64)fence;
277 			   ),
278 
279 		    TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
280 			      __entry->ctx, __entry->fence, __entry->seqno)
281 );
282 
283 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
284 	     TP_PROTO(struct xe_hw_fence *fence),
285 	     TP_ARGS(fence)
286 );
287 
288 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
289 	     TP_PROTO(struct xe_hw_fence *fence),
290 	     TP_ARGS(fence)
291 );
292 
293 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
294 	     TP_PROTO(struct xe_hw_fence *fence),
295 	     TP_ARGS(fence)
296 );
297 
298 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
299 	     TP_PROTO(struct xe_hw_fence *fence),
300 	     TP_ARGS(fence)
301 );
302 
303 DECLARE_EVENT_CLASS(xe_vma,
304 		    TP_PROTO(struct xe_vma *vma),
305 		    TP_ARGS(vma),
306 
307 		    TP_STRUCT__entry(
308 			     __field(u64, vma)
309 			     __field(u32, asid)
310 			     __field(u64, start)
311 			     __field(u64, end)
312 			     __field(u64, ptr)
313 			     ),
314 
315 		    TP_fast_assign(
316 			   __entry->vma = (u64)vma;
317 			   __entry->asid = vma->vm->usm.asid;
318 			   __entry->start = vma->start;
319 			   __entry->end = vma->end;
320 			   __entry->ptr = (u64)vma->userptr.ptr;
321 			   ),
322 
323 		    TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
324 			      __entry->vma, __entry->asid, __entry->start,
325 			      __entry->end, __entry->ptr)
326 )
327 
328 DEFINE_EVENT(xe_vma, xe_vma_flush,
329 	     TP_PROTO(struct xe_vma *vma),
330 	     TP_ARGS(vma)
331 );
332 
333 DEFINE_EVENT(xe_vma, xe_vma_pagefault,
334 	     TP_PROTO(struct xe_vma *vma),
335 	     TP_ARGS(vma)
336 );
337 
338 DEFINE_EVENT(xe_vma, xe_vma_acc,
339 	     TP_PROTO(struct xe_vma *vma),
340 	     TP_ARGS(vma)
341 );
342 
343 DEFINE_EVENT(xe_vma, xe_vma_fail,
344 	     TP_PROTO(struct xe_vma *vma),
345 	     TP_ARGS(vma)
346 );
347 
348 DEFINE_EVENT(xe_vma, xe_vma_bind,
349 	     TP_PROTO(struct xe_vma *vma),
350 	     TP_ARGS(vma)
351 );
352 
353 DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
354 	     TP_PROTO(struct xe_vma *vma),
355 	     TP_ARGS(vma)
356 );
357 
358 DEFINE_EVENT(xe_vma, xe_vma_unbind,
359 	     TP_PROTO(struct xe_vma *vma),
360 	     TP_ARGS(vma)
361 );
362 
363 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
364 	     TP_PROTO(struct xe_vma *vma),
365 	     TP_ARGS(vma)
366 );
367 
368 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
369 	     TP_PROTO(struct xe_vma *vma),
370 	     TP_ARGS(vma)
371 );
372 
373 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
374 	     TP_PROTO(struct xe_vma *vma),
375 	     TP_ARGS(vma)
376 );
377 
378 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
379 	     TP_PROTO(struct xe_vma *vma),
380 	     TP_ARGS(vma)
381 );
382 
383 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
384 	     TP_PROTO(struct xe_vma *vma),
385 	     TP_ARGS(vma)
386 );
387 
388 DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
389 	     TP_PROTO(struct xe_vma *vma),
390 	     TP_ARGS(vma)
391 );
392 
393 DEFINE_EVENT(xe_vma, xe_vma_evict,
394 	     TP_PROTO(struct xe_vma *vma),
395 	     TP_ARGS(vma)
396 );
397 
398 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
399 	     TP_PROTO(struct xe_vma *vma),
400 	     TP_ARGS(vma)
401 );
402 
403 DECLARE_EVENT_CLASS(xe_vm,
404 		    TP_PROTO(struct xe_vm *vm),
405 		    TP_ARGS(vm),
406 
407 		    TP_STRUCT__entry(
408 			     __field(u64, vm)
409 			     __field(u32, asid)
410 			     ),
411 
412 		    TP_fast_assign(
413 			   __entry->vm = (u64)vm;
414 			   __entry->asid = vm->usm.asid;
415 			   ),
416 
417 		    TP_printk("vm=0x%016llx, asid=0x%05x",  __entry->vm,
418 			      __entry->asid)
419 );
420 
421 DEFINE_EVENT(xe_vm, xe_vm_create,
422 	     TP_PROTO(struct xe_vm *vm),
423 	     TP_ARGS(vm)
424 );
425 
426 DEFINE_EVENT(xe_vm, xe_vm_free,
427 	     TP_PROTO(struct xe_vm *vm),
428 	     TP_ARGS(vm)
429 );
430 
431 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
432 	     TP_PROTO(struct xe_vm *vm),
433 	     TP_ARGS(vm)
434 );
435 
436 DEFINE_EVENT(xe_vm, xe_vm_restart,
437 	     TP_PROTO(struct xe_vm *vm),
438 	     TP_ARGS(vm)
439 );
440 
441 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
442 	     TP_PROTO(struct xe_vm *vm),
443 	     TP_ARGS(vm)
444 );
445 
446 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
447 	     TP_PROTO(struct xe_vm *vm),
448 	     TP_ARGS(vm)
449 );
450 
451 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
452 	     TP_PROTO(struct xe_vm *vm),
453 	     TP_ARGS(vm)
454 );
455 
456 TRACE_EVENT(xe_guc_ct_h2g_flow_control,
457 	    TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
458 	    TP_ARGS(_head, _tail, size, space, len),
459 
460 	    TP_STRUCT__entry(
461 		     __field(u32, _head)
462 		     __field(u32, _tail)
463 		     __field(u32, size)
464 		     __field(u32, space)
465 		     __field(u32, len)
466 		     ),
467 
468 	    TP_fast_assign(
469 		   __entry->_head = _head;
470 		   __entry->_tail = _tail;
471 		   __entry->size = size;
472 		   __entry->space = space;
473 		   __entry->len = len;
474 		   ),
475 
476 	    TP_printk("head=%u, tail=%u, size=%u, space=%u, len=%u",
477 		      __entry->_head, __entry->_tail, __entry->size,
478 		      __entry->space, __entry->len)
479 );
480 
481 TRACE_EVENT(xe_guc_ct_g2h_flow_control,
482 	    TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
483 	    TP_ARGS(_head, _tail, size, space, len),
484 
485 	    TP_STRUCT__entry(
486 		     __field(u32, _head)
487 		     __field(u32, _tail)
488 		     __field(u32, size)
489 		     __field(u32, space)
490 		     __field(u32, len)
491 		     ),
492 
493 	    TP_fast_assign(
494 		   __entry->_head = _head;
495 		   __entry->_tail = _tail;
496 		   __entry->size = size;
497 		   __entry->space = space;
498 		   __entry->len = len;
499 		   ),
500 
501 	    TP_printk("head=%u, tail=%u, size=%u, space=%u, len=%u",
502 		      __entry->_head, __entry->_tail, __entry->size,
503 		      __entry->space, __entry->len)
504 );
505 
506 #endif
507 
508 /* This part must be outside protection */
509 #undef TRACE_INCLUDE_PATH
510 #undef TRACE_INCLUDE_FILE
511 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
512 #define TRACE_INCLUDE_FILE xe_trace
513 #include <trace/define_trace.h>
514