xref: /linux/drivers/gpu/drm/xe/xe_trace.h (revision 7255fcc80d4b525cc10cfaaf7f485830d4ed2000)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM xe
8 
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10 #define _XE_TRACE_H_
11 
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
14 
15 #include "xe_bo.h"
16 #include "xe_bo_types.h"
17 #include "xe_exec_queue_types.h"
18 #include "xe_gpu_scheduler_types.h"
19 #include "xe_gt_tlb_invalidation_types.h"
20 #include "xe_gt_types.h"
21 #include "xe_guc_exec_queue_types.h"
22 #include "xe_sched_job.h"
23 #include "xe_vm.h"
24 
25 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
26 		    TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
27 		    TP_ARGS(fence),
28 
29 		    TP_STRUCT__entry(
30 			     __field(struct xe_gt_tlb_invalidation_fence *, fence)
31 			     __field(int, seqno)
32 			     ),
33 
34 		    TP_fast_assign(
35 			   __entry->fence = fence;
36 			   __entry->seqno = fence->seqno;
37 			   ),
38 
39 		    TP_printk("fence=%p, seqno=%d",
40 			      __entry->fence, __entry->seqno)
41 );
42 
43 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
44 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
45 	     TP_ARGS(fence)
46 );
47 
48 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
49 	     xe_gt_tlb_invalidation_fence_work_func,
50 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
51 	     TP_ARGS(fence)
52 );
53 
54 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
55 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
56 	     TP_ARGS(fence)
57 );
58 
59 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
60 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
61 	     TP_ARGS(fence)
62 );
63 
64 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
65 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
66 	     TP_ARGS(fence)
67 );
68 
69 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
70 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
71 	     TP_ARGS(fence)
72 );
73 
74 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
75 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
76 	     TP_ARGS(fence)
77 );
78 
79 DECLARE_EVENT_CLASS(xe_bo,
80 		    TP_PROTO(struct xe_bo *bo),
81 		    TP_ARGS(bo),
82 
83 		    TP_STRUCT__entry(
84 			     __field(size_t, size)
85 			     __field(u32, flags)
86 			     __field(struct xe_vm *, vm)
87 			     ),
88 
89 		    TP_fast_assign(
90 			   __entry->size = bo->size;
91 			   __entry->flags = bo->flags;
92 			   __entry->vm = bo->vm;
93 			   ),
94 
95 		    TP_printk("size=%zu, flags=0x%02x, vm=%p",
96 			      __entry->size, __entry->flags, __entry->vm)
97 );
98 
99 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
100 	     TP_PROTO(struct xe_bo *bo),
101 	     TP_ARGS(bo)
102 );
103 
104 TRACE_EVENT(xe_bo_move,
105 	    TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
106 		     bool move_lacks_source),
107 	    TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
108 	    TP_STRUCT__entry(
109 		     __field(struct xe_bo *, bo)
110 		     __field(size_t, size)
111 		     __field(u32, new_placement)
112 		     __field(u32, old_placement)
113 		     __array(char, device_id, 12)
114 		     __field(bool, move_lacks_source)
115 			),
116 
117 	    TP_fast_assign(
118 		   __entry->bo      = bo;
119 		   __entry->size = bo->size;
120 		   __entry->new_placement = new_placement;
121 		   __entry->old_placement = old_placement;
122 		   strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
123 		   __entry->move_lacks_source = move_lacks_source;
124 		   ),
125 	    TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
126 		      __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
127 		      xe_mem_type_to_name[__entry->old_placement],
128 		      xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
129 );
130 
131 DECLARE_EVENT_CLASS(xe_exec_queue,
132 		    TP_PROTO(struct xe_exec_queue *q),
133 		    TP_ARGS(q),
134 
135 		    TP_STRUCT__entry(
136 			     __field(enum xe_engine_class, class)
137 			     __field(u32, logical_mask)
138 			     __field(u8, gt_id)
139 			     __field(u16, width)
140 			     __field(u16, guc_id)
141 			     __field(u32, guc_state)
142 			     __field(u32, flags)
143 			     ),
144 
145 		    TP_fast_assign(
146 			   __entry->class = q->class;
147 			   __entry->logical_mask = q->logical_mask;
148 			   __entry->gt_id = q->gt->info.id;
149 			   __entry->width = q->width;
150 			   __entry->guc_id = q->guc->id;
151 			   __entry->guc_state = atomic_read(&q->guc->state);
152 			   __entry->flags = q->flags;
153 			   ),
154 
155 		    TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
156 			      __entry->class, __entry->logical_mask,
157 			      __entry->gt_id, __entry->width, __entry->guc_id,
158 			      __entry->guc_state, __entry->flags)
159 );
160 
161 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
162 	     TP_PROTO(struct xe_exec_queue *q),
163 	     TP_ARGS(q)
164 );
165 
166 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
167 	     TP_PROTO(struct xe_exec_queue *q),
168 	     TP_ARGS(q)
169 );
170 
171 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
172 	     TP_PROTO(struct xe_exec_queue *q),
173 	     TP_ARGS(q)
174 );
175 
176 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
177 	     TP_PROTO(struct xe_exec_queue *q),
178 	     TP_ARGS(q)
179 );
180 
181 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
182 	     TP_PROTO(struct xe_exec_queue *q),
183 	     TP_ARGS(q)
184 );
185 
186 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
187 	     TP_PROTO(struct xe_exec_queue *q),
188 	     TP_ARGS(q)
189 );
190 
191 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
192 	     TP_PROTO(struct xe_exec_queue *q),
193 	     TP_ARGS(q)
194 );
195 
196 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
197 	     TP_PROTO(struct xe_exec_queue *q),
198 	     TP_ARGS(q)
199 );
200 
201 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
202 	     TP_PROTO(struct xe_exec_queue *q),
203 	     TP_ARGS(q)
204 );
205 
206 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
207 	     TP_PROTO(struct xe_exec_queue *q),
208 	     TP_ARGS(q)
209 );
210 
211 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
212 	     TP_PROTO(struct xe_exec_queue *q),
213 	     TP_ARGS(q)
214 );
215 
216 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
217 	     TP_PROTO(struct xe_exec_queue *q),
218 	     TP_ARGS(q)
219 );
220 
221 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
222 	     TP_PROTO(struct xe_exec_queue *q),
223 	     TP_ARGS(q)
224 );
225 
226 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
227 	     TP_PROTO(struct xe_exec_queue *q),
228 	     TP_ARGS(q)
229 );
230 
231 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
232 	     TP_PROTO(struct xe_exec_queue *q),
233 	     TP_ARGS(q)
234 );
235 
236 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
237 	     TP_PROTO(struct xe_exec_queue *q),
238 	     TP_ARGS(q)
239 );
240 
241 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
242 	     TP_PROTO(struct xe_exec_queue *q),
243 	     TP_ARGS(q)
244 );
245 
246 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
247 	     TP_PROTO(struct xe_exec_queue *q),
248 	     TP_ARGS(q)
249 );
250 
251 DECLARE_EVENT_CLASS(xe_sched_job,
252 		    TP_PROTO(struct xe_sched_job *job),
253 		    TP_ARGS(job),
254 
255 		    TP_STRUCT__entry(
256 			     __field(u32, seqno)
257 			     __field(u16, guc_id)
258 			     __field(u32, guc_state)
259 			     __field(u32, flags)
260 			     __field(int, error)
261 			     __field(u64, fence)
262 			     __field(u64, batch_addr)
263 			     ),
264 
265 		    TP_fast_assign(
266 			   __entry->seqno = xe_sched_job_seqno(job);
267 			   __entry->guc_id = job->q->guc->id;
268 			   __entry->guc_state =
269 			   atomic_read(&job->q->guc->state);
270 			   __entry->flags = job->q->flags;
271 			   __entry->error = job->fence->error;
272 			   __entry->fence = (unsigned long)job->fence;
273 			   __entry->batch_addr = (u64)job->batch_addr[0];
274 			   ),
275 
276 		    TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
277 			      __entry->fence, __entry->seqno, __entry->guc_id,
278 			      __entry->batch_addr, __entry->guc_state,
279 			      __entry->flags, __entry->error)
280 );
281 
282 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
283 	     TP_PROTO(struct xe_sched_job *job),
284 	     TP_ARGS(job)
285 );
286 
287 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
288 	     TP_PROTO(struct xe_sched_job *job),
289 	     TP_ARGS(job)
290 );
291 
292 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
293 	     TP_PROTO(struct xe_sched_job *job),
294 	     TP_ARGS(job)
295 );
296 
297 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
298 	     TP_PROTO(struct xe_sched_job *job),
299 	     TP_ARGS(job)
300 );
301 
302 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
303 	     TP_PROTO(struct xe_sched_job *job),
304 	     TP_ARGS(job)
305 );
306 
307 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
308 	     TP_PROTO(struct xe_sched_job *job),
309 	     TP_ARGS(job)
310 );
311 
312 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
313 	     TP_PROTO(struct xe_sched_job *job),
314 	     TP_ARGS(job)
315 );
316 
317 DECLARE_EVENT_CLASS(xe_sched_msg,
318 		    TP_PROTO(struct xe_sched_msg *msg),
319 		    TP_ARGS(msg),
320 
321 		    TP_STRUCT__entry(
322 			     __field(u32, opcode)
323 			     __field(u16, guc_id)
324 			     ),
325 
326 		    TP_fast_assign(
327 			   __entry->opcode = msg->opcode;
328 			   __entry->guc_id =
329 			   ((struct xe_exec_queue *)msg->private_data)->guc->id;
330 			   ),
331 
332 		    TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
333 			      __entry->opcode)
334 );
335 
336 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
337 	     TP_PROTO(struct xe_sched_msg *msg),
338 	     TP_ARGS(msg)
339 );
340 
341 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
342 	     TP_PROTO(struct xe_sched_msg *msg),
343 	     TP_ARGS(msg)
344 );
345 
346 DECLARE_EVENT_CLASS(xe_hw_fence,
347 		    TP_PROTO(struct xe_hw_fence *fence),
348 		    TP_ARGS(fence),
349 
350 		    TP_STRUCT__entry(
351 			     __field(u64, ctx)
352 			     __field(u32, seqno)
353 			     __field(struct xe_hw_fence *, fence)
354 			     ),
355 
356 		    TP_fast_assign(
357 			   __entry->ctx = fence->dma.context;
358 			   __entry->seqno = fence->dma.seqno;
359 			   __entry->fence = fence;
360 			   ),
361 
362 		    TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
363 			      __entry->ctx, __entry->fence, __entry->seqno)
364 );
365 
366 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
367 	     TP_PROTO(struct xe_hw_fence *fence),
368 	     TP_ARGS(fence)
369 );
370 
371 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
372 	     TP_PROTO(struct xe_hw_fence *fence),
373 	     TP_ARGS(fence)
374 );
375 
376 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
377 	     TP_PROTO(struct xe_hw_fence *fence),
378 	     TP_ARGS(fence)
379 );
380 
381 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
382 	     TP_PROTO(struct xe_hw_fence *fence),
383 	     TP_ARGS(fence)
384 );
385 
386 DECLARE_EVENT_CLASS(xe_vma,
387 		    TP_PROTO(struct xe_vma *vma),
388 		    TP_ARGS(vma),
389 
390 		    TP_STRUCT__entry(
391 			     __field(struct xe_vma *, vma)
392 			     __field(u32, asid)
393 			     __field(u64, start)
394 			     __field(u64, end)
395 			     __field(u64, ptr)
396 			     ),
397 
398 		    TP_fast_assign(
399 			   __entry->vma = vma;
400 			   __entry->asid = xe_vma_vm(vma)->usm.asid;
401 			   __entry->start = xe_vma_start(vma);
402 			   __entry->end = xe_vma_end(vma) - 1;
403 			   __entry->ptr = xe_vma_userptr(vma);
404 			   ),
405 
406 		    TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
407 			      __entry->vma, __entry->asid, __entry->start,
408 			      __entry->end, __entry->ptr)
409 )
410 
411 DEFINE_EVENT(xe_vma, xe_vma_flush,
412 	     TP_PROTO(struct xe_vma *vma),
413 	     TP_ARGS(vma)
414 );
415 
416 DEFINE_EVENT(xe_vma, xe_vma_pagefault,
417 	     TP_PROTO(struct xe_vma *vma),
418 	     TP_ARGS(vma)
419 );
420 
421 DEFINE_EVENT(xe_vma, xe_vma_acc,
422 	     TP_PROTO(struct xe_vma *vma),
423 	     TP_ARGS(vma)
424 );
425 
426 DEFINE_EVENT(xe_vma, xe_vma_fail,
427 	     TP_PROTO(struct xe_vma *vma),
428 	     TP_ARGS(vma)
429 );
430 
431 DEFINE_EVENT(xe_vma, xe_vma_bind,
432 	     TP_PROTO(struct xe_vma *vma),
433 	     TP_ARGS(vma)
434 );
435 
436 DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
437 	     TP_PROTO(struct xe_vma *vma),
438 	     TP_ARGS(vma)
439 );
440 
441 DEFINE_EVENT(xe_vma, xe_vma_unbind,
442 	     TP_PROTO(struct xe_vma *vma),
443 	     TP_ARGS(vma)
444 );
445 
446 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
447 	     TP_PROTO(struct xe_vma *vma),
448 	     TP_ARGS(vma)
449 );
450 
451 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
452 	     TP_PROTO(struct xe_vma *vma),
453 	     TP_ARGS(vma)
454 );
455 
456 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
457 	     TP_PROTO(struct xe_vma *vma),
458 	     TP_ARGS(vma)
459 );
460 
461 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
462 	     TP_PROTO(struct xe_vma *vma),
463 	     TP_ARGS(vma)
464 );
465 
466 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
467 	     TP_PROTO(struct xe_vma *vma),
468 	     TP_ARGS(vma)
469 );
470 
471 DEFINE_EVENT(xe_vma, xe_vma_invalidate,
472 	     TP_PROTO(struct xe_vma *vma),
473 	     TP_ARGS(vma)
474 );
475 
476 DEFINE_EVENT(xe_vma, xe_vma_evict,
477 	     TP_PROTO(struct xe_vma *vma),
478 	     TP_ARGS(vma)
479 );
480 
481 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
482 	     TP_PROTO(struct xe_vma *vma),
483 	     TP_ARGS(vma)
484 );
485 
486 DECLARE_EVENT_CLASS(xe_vm,
487 		    TP_PROTO(struct xe_vm *vm),
488 		    TP_ARGS(vm),
489 
490 		    TP_STRUCT__entry(
491 			     __field(struct xe_vm *, vm)
492 			     __field(u32, asid)
493 			     ),
494 
495 		    TP_fast_assign(
496 			   __entry->vm = vm;
497 			   __entry->asid = vm->usm.asid;
498 			   ),
499 
500 		    TP_printk("vm=%p, asid=0x%05x",  __entry->vm,
501 			      __entry->asid)
502 );
503 
504 DEFINE_EVENT(xe_vm, xe_vm_kill,
505 	     TP_PROTO(struct xe_vm *vm),
506 	     TP_ARGS(vm)
507 );
508 
509 DEFINE_EVENT(xe_vm, xe_vm_create,
510 	     TP_PROTO(struct xe_vm *vm),
511 	     TP_ARGS(vm)
512 );
513 
514 DEFINE_EVENT(xe_vm, xe_vm_free,
515 	     TP_PROTO(struct xe_vm *vm),
516 	     TP_ARGS(vm)
517 );
518 
519 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
520 	     TP_PROTO(struct xe_vm *vm),
521 	     TP_ARGS(vm)
522 );
523 
524 DEFINE_EVENT(xe_vm, xe_vm_restart,
525 	     TP_PROTO(struct xe_vm *vm),
526 	     TP_ARGS(vm)
527 );
528 
529 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
530 	     TP_PROTO(struct xe_vm *vm),
531 	     TP_ARGS(vm)
532 );
533 
534 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
535 	     TP_PROTO(struct xe_vm *vm),
536 	     TP_ARGS(vm)
537 );
538 
539 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
540 	     TP_PROTO(struct xe_vm *vm),
541 	     TP_ARGS(vm)
542 );
543 
544 /* GuC */
545 DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
546 		    TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
547 		    TP_ARGS(_head, _tail, size, space, len),
548 
549 		    TP_STRUCT__entry(
550 			     __field(u32, _head)
551 			     __field(u32, _tail)
552 			     __field(u32, size)
553 			     __field(u32, space)
554 			     __field(u32, len)
555 			     ),
556 
557 		    TP_fast_assign(
558 			   __entry->_head = _head;
559 			   __entry->_tail = _tail;
560 			   __entry->size = size;
561 			   __entry->space = space;
562 			   __entry->len = len;
563 			   ),
564 
565 		    TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
566 			      __entry->_head, __entry->_tail, __entry->size,
567 			      __entry->space, __entry->len)
568 );
569 
570 DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
571 	     TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
572 	     TP_ARGS(_head, _tail, size, space, len)
573 );
574 
575 DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
576 		   TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
577 		   TP_ARGS(_head, _tail, size, space, len),
578 
579 		   TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
580 			     __entry->_head, __entry->_tail, __entry->size,
581 			     __entry->space, __entry->len)
582 );
583 
584 DECLARE_EVENT_CLASS(xe_guc_ctb,
585 		    TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
586 		    TP_ARGS(gt_id, action, len, _head, tail),
587 
588 		    TP_STRUCT__entry(
589 				__field(u8, gt_id)
590 				__field(u32, action)
591 				__field(u32, len)
592 				__field(u32, tail)
593 				__field(u32, _head)
594 		    ),
595 
596 		    TP_fast_assign(
597 			    __entry->gt_id = gt_id;
598 			    __entry->action = action;
599 			    __entry->len = len;
600 			    __entry->tail = tail;
601 			    __entry->_head = _head;
602 		    ),
603 
604 		    TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
605 			      __entry->gt_id, __entry->action, __entry->len,
606 			      __entry->tail, __entry->_head)
607 );
608 
609 DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
610 	     TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
611 	     TP_ARGS(gt_id, action, len, _head, tail)
612 );
613 
614 DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
615 		   TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
616 		   TP_ARGS(gt_id, action, len, _head, tail),
617 
618 		   TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
619 			     __entry->gt_id, __entry->action, __entry->len,
620 			     __entry->tail, __entry->_head)
621 
622 );
623 
624 #endif
625 
626 /* This part must be outside protection */
627 #undef TRACE_INCLUDE_PATH
628 #undef TRACE_INCLUDE_FILE
629 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
630 #define TRACE_INCLUDE_FILE xe_trace
631 #include <trace/define_trace.h>
632