xref: /linux/drivers/gpu/drm/xe/xe_trace.h (revision ee15c8bf5d77a306614bdefe33828310662dee05)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM xe
8 
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10 #define _XE_TRACE_H_
11 
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
14 
15 #include "xe_bo.h"
16 #include "xe_bo_types.h"
17 #include "xe_exec_queue_types.h"
18 #include "xe_gpu_scheduler_types.h"
19 #include "xe_gt_tlb_invalidation_types.h"
20 #include "xe_gt_types.h"
21 #include "xe_guc_exec_queue_types.h"
22 #include "xe_sched_job.h"
23 #include "xe_vm.h"
24 
25 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
26 		    TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
27 		    TP_ARGS(fence),
28 
29 		    TP_STRUCT__entry(
30 			     __field(struct xe_gt_tlb_invalidation_fence *, fence)
31 			     __field(int, seqno)
32 			     ),
33 
34 		    TP_fast_assign(
35 			   __entry->fence = fence;
36 			   __entry->seqno = fence->seqno;
37 			   ),
38 
39 		    TP_printk("fence=%p, seqno=%d",
40 			      __entry->fence, __entry->seqno)
41 );
42 
43 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
44 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
45 	     TP_ARGS(fence)
46 );
47 
48 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
49 	     xe_gt_tlb_invalidation_fence_work_func,
50 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
51 	     TP_ARGS(fence)
52 );
53 
54 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
55 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
56 	     TP_ARGS(fence)
57 );
58 
59 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
60 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
61 	     TP_ARGS(fence)
62 );
63 
64 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
65 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
66 	     TP_ARGS(fence)
67 );
68 
69 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
70 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
71 	     TP_ARGS(fence)
72 );
73 
74 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
75 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
76 	     TP_ARGS(fence)
77 );
78 
79 DECLARE_EVENT_CLASS(xe_bo,
80 		    TP_PROTO(struct xe_bo *bo),
81 		    TP_ARGS(bo),
82 
83 		    TP_STRUCT__entry(
84 			     __field(size_t, size)
85 			     __field(u32, flags)
86 			     __field(struct xe_vm *, vm)
87 			     ),
88 
89 		    TP_fast_assign(
90 			   __entry->size = bo->size;
91 			   __entry->flags = bo->flags;
92 			   __entry->vm = bo->vm;
93 			   ),
94 
95 		    TP_printk("size=%zu, flags=0x%02x, vm=%p",
96 			      __entry->size, __entry->flags, __entry->vm)
97 );
98 
99 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
100 	     TP_PROTO(struct xe_bo *bo),
101 	     TP_ARGS(bo)
102 );
103 
104 TRACE_EVENT(xe_bo_move,
105 	    TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement),
106 	    TP_ARGS(bo, new_placement, old_placement),
107 	    TP_STRUCT__entry(
108 		     __field(struct xe_bo *, bo)
109 		     __field(size_t, size)
110 		     __field(u32, new_placement)
111 		     __field(u32, old_placement)
112 		     __array(char, device_id, 12)
113 			),
114 
115 	    TP_fast_assign(
116 		   __entry->bo      = bo;
117 		   __entry->size = bo->size;
118 		   __entry->new_placement = new_placement;
119 		   __entry->old_placement = old_placement;
120 		   strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
121 		   ),
122 	    TP_printk("migrate object %p [size %zu] from %s to %s device_id:%s",
123 		      __entry->bo, __entry->size, xe_mem_type_to_name[__entry->old_placement],
124 		      xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
125 );
126 
127 DECLARE_EVENT_CLASS(xe_exec_queue,
128 		    TP_PROTO(struct xe_exec_queue *q),
129 		    TP_ARGS(q),
130 
131 		    TP_STRUCT__entry(
132 			     __field(enum xe_engine_class, class)
133 			     __field(u32, logical_mask)
134 			     __field(u8, gt_id)
135 			     __field(u16, width)
136 			     __field(u16, guc_id)
137 			     __field(u32, guc_state)
138 			     __field(u32, flags)
139 			     ),
140 
141 		    TP_fast_assign(
142 			   __entry->class = q->class;
143 			   __entry->logical_mask = q->logical_mask;
144 			   __entry->gt_id = q->gt->info.id;
145 			   __entry->width = q->width;
146 			   __entry->guc_id = q->guc->id;
147 			   __entry->guc_state = atomic_read(&q->guc->state);
148 			   __entry->flags = q->flags;
149 			   ),
150 
151 		    TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
152 			      __entry->class, __entry->logical_mask,
153 			      __entry->gt_id, __entry->width, __entry->guc_id,
154 			      __entry->guc_state, __entry->flags)
155 );
156 
157 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
158 	     TP_PROTO(struct xe_exec_queue *q),
159 	     TP_ARGS(q)
160 );
161 
162 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
163 	     TP_PROTO(struct xe_exec_queue *q),
164 	     TP_ARGS(q)
165 );
166 
167 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
168 	     TP_PROTO(struct xe_exec_queue *q),
169 	     TP_ARGS(q)
170 );
171 
172 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
173 	     TP_PROTO(struct xe_exec_queue *q),
174 	     TP_ARGS(q)
175 );
176 
177 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
178 	     TP_PROTO(struct xe_exec_queue *q),
179 	     TP_ARGS(q)
180 );
181 
182 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
183 	     TP_PROTO(struct xe_exec_queue *q),
184 	     TP_ARGS(q)
185 );
186 
187 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
188 	     TP_PROTO(struct xe_exec_queue *q),
189 	     TP_ARGS(q)
190 );
191 
192 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
193 	     TP_PROTO(struct xe_exec_queue *q),
194 	     TP_ARGS(q)
195 );
196 
197 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
198 	     TP_PROTO(struct xe_exec_queue *q),
199 	     TP_ARGS(q)
200 );
201 
202 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
203 	     TP_PROTO(struct xe_exec_queue *q),
204 	     TP_ARGS(q)
205 );
206 
207 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
208 	     TP_PROTO(struct xe_exec_queue *q),
209 	     TP_ARGS(q)
210 );
211 
212 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
213 	     TP_PROTO(struct xe_exec_queue *q),
214 	     TP_ARGS(q)
215 );
216 
217 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
218 	     TP_PROTO(struct xe_exec_queue *q),
219 	     TP_ARGS(q)
220 );
221 
222 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
223 	     TP_PROTO(struct xe_exec_queue *q),
224 	     TP_ARGS(q)
225 );
226 
227 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
228 	     TP_PROTO(struct xe_exec_queue *q),
229 	     TP_ARGS(q)
230 );
231 
232 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
233 	     TP_PROTO(struct xe_exec_queue *q),
234 	     TP_ARGS(q)
235 );
236 
237 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
238 	     TP_PROTO(struct xe_exec_queue *q),
239 	     TP_ARGS(q)
240 );
241 
242 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
243 	     TP_PROTO(struct xe_exec_queue *q),
244 	     TP_ARGS(q)
245 );
246 
247 DECLARE_EVENT_CLASS(xe_sched_job,
248 		    TP_PROTO(struct xe_sched_job *job),
249 		    TP_ARGS(job),
250 
251 		    TP_STRUCT__entry(
252 			     __field(u32, seqno)
253 			     __field(u16, guc_id)
254 			     __field(u32, guc_state)
255 			     __field(u32, flags)
256 			     __field(int, error)
257 			     __field(u64, fence)
258 			     __field(u64, batch_addr)
259 			     ),
260 
261 		    TP_fast_assign(
262 			   __entry->seqno = xe_sched_job_seqno(job);
263 			   __entry->guc_id = job->q->guc->id;
264 			   __entry->guc_state =
265 			   atomic_read(&job->q->guc->state);
266 			   __entry->flags = job->q->flags;
267 			   __entry->error = job->fence->error;
268 			   __entry->fence = (unsigned long)job->fence;
269 			   __entry->batch_addr = (u64)job->batch_addr[0];
270 			   ),
271 
272 		    TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
273 			      __entry->fence, __entry->seqno, __entry->guc_id,
274 			      __entry->batch_addr, __entry->guc_state,
275 			      __entry->flags, __entry->error)
276 );
277 
278 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
279 	     TP_PROTO(struct xe_sched_job *job),
280 	     TP_ARGS(job)
281 );
282 
283 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
284 	     TP_PROTO(struct xe_sched_job *job),
285 	     TP_ARGS(job)
286 );
287 
288 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
289 	     TP_PROTO(struct xe_sched_job *job),
290 	     TP_ARGS(job)
291 );
292 
293 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
294 	     TP_PROTO(struct xe_sched_job *job),
295 	     TP_ARGS(job)
296 );
297 
298 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
299 	     TP_PROTO(struct xe_sched_job *job),
300 	     TP_ARGS(job)
301 );
302 
303 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
304 	     TP_PROTO(struct xe_sched_job *job),
305 	     TP_ARGS(job)
306 );
307 
308 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
309 	     TP_PROTO(struct xe_sched_job *job),
310 	     TP_ARGS(job)
311 );
312 
313 DECLARE_EVENT_CLASS(xe_sched_msg,
314 		    TP_PROTO(struct xe_sched_msg *msg),
315 		    TP_ARGS(msg),
316 
317 		    TP_STRUCT__entry(
318 			     __field(u32, opcode)
319 			     __field(u16, guc_id)
320 			     ),
321 
322 		    TP_fast_assign(
323 			   __entry->opcode = msg->opcode;
324 			   __entry->guc_id =
325 			   ((struct xe_exec_queue *)msg->private_data)->guc->id;
326 			   ),
327 
328 		    TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
329 			      __entry->opcode)
330 );
331 
332 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
333 	     TP_PROTO(struct xe_sched_msg *msg),
334 	     TP_ARGS(msg)
335 );
336 
337 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
338 	     TP_PROTO(struct xe_sched_msg *msg),
339 	     TP_ARGS(msg)
340 );
341 
342 DECLARE_EVENT_CLASS(xe_hw_fence,
343 		    TP_PROTO(struct xe_hw_fence *fence),
344 		    TP_ARGS(fence),
345 
346 		    TP_STRUCT__entry(
347 			     __field(u64, ctx)
348 			     __field(u32, seqno)
349 			     __field(struct xe_hw_fence *, fence)
350 			     ),
351 
352 		    TP_fast_assign(
353 			   __entry->ctx = fence->dma.context;
354 			   __entry->seqno = fence->dma.seqno;
355 			   __entry->fence = fence;
356 			   ),
357 
358 		    TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
359 			      __entry->ctx, __entry->fence, __entry->seqno)
360 );
361 
362 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
363 	     TP_PROTO(struct xe_hw_fence *fence),
364 	     TP_ARGS(fence)
365 );
366 
367 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
368 	     TP_PROTO(struct xe_hw_fence *fence),
369 	     TP_ARGS(fence)
370 );
371 
372 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
373 	     TP_PROTO(struct xe_hw_fence *fence),
374 	     TP_ARGS(fence)
375 );
376 
377 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
378 	     TP_PROTO(struct xe_hw_fence *fence),
379 	     TP_ARGS(fence)
380 );
381 
382 DECLARE_EVENT_CLASS(xe_vma,
383 		    TP_PROTO(struct xe_vma *vma),
384 		    TP_ARGS(vma),
385 
386 		    TP_STRUCT__entry(
387 			     __field(struct xe_vma *, vma)
388 			     __field(u32, asid)
389 			     __field(u64, start)
390 			     __field(u64, end)
391 			     __field(u64, ptr)
392 			     ),
393 
394 		    TP_fast_assign(
395 			   __entry->vma = vma;
396 			   __entry->asid = xe_vma_vm(vma)->usm.asid;
397 			   __entry->start = xe_vma_start(vma);
398 			   __entry->end = xe_vma_end(vma) - 1;
399 			   __entry->ptr = xe_vma_userptr(vma);
400 			   ),
401 
402 		    TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
403 			      __entry->vma, __entry->asid, __entry->start,
404 			      __entry->end, __entry->ptr)
405 )
406 
407 DEFINE_EVENT(xe_vma, xe_vma_flush,
408 	     TP_PROTO(struct xe_vma *vma),
409 	     TP_ARGS(vma)
410 );
411 
412 DEFINE_EVENT(xe_vma, xe_vma_pagefault,
413 	     TP_PROTO(struct xe_vma *vma),
414 	     TP_ARGS(vma)
415 );
416 
417 DEFINE_EVENT(xe_vma, xe_vma_acc,
418 	     TP_PROTO(struct xe_vma *vma),
419 	     TP_ARGS(vma)
420 );
421 
422 DEFINE_EVENT(xe_vma, xe_vma_fail,
423 	     TP_PROTO(struct xe_vma *vma),
424 	     TP_ARGS(vma)
425 );
426 
427 DEFINE_EVENT(xe_vma, xe_vma_bind,
428 	     TP_PROTO(struct xe_vma *vma),
429 	     TP_ARGS(vma)
430 );
431 
432 DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
433 	     TP_PROTO(struct xe_vma *vma),
434 	     TP_ARGS(vma)
435 );
436 
437 DEFINE_EVENT(xe_vma, xe_vma_unbind,
438 	     TP_PROTO(struct xe_vma *vma),
439 	     TP_ARGS(vma)
440 );
441 
442 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
443 	     TP_PROTO(struct xe_vma *vma),
444 	     TP_ARGS(vma)
445 );
446 
447 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
448 	     TP_PROTO(struct xe_vma *vma),
449 	     TP_ARGS(vma)
450 );
451 
452 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
453 	     TP_PROTO(struct xe_vma *vma),
454 	     TP_ARGS(vma)
455 );
456 
457 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
458 	     TP_PROTO(struct xe_vma *vma),
459 	     TP_ARGS(vma)
460 );
461 
462 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
463 	     TP_PROTO(struct xe_vma *vma),
464 	     TP_ARGS(vma)
465 );
466 
467 DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
468 	     TP_PROTO(struct xe_vma *vma),
469 	     TP_ARGS(vma)
470 );
471 
472 DEFINE_EVENT(xe_vma, xe_vma_evict,
473 	     TP_PROTO(struct xe_vma *vma),
474 	     TP_ARGS(vma)
475 );
476 
477 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
478 	     TP_PROTO(struct xe_vma *vma),
479 	     TP_ARGS(vma)
480 );
481 
482 DECLARE_EVENT_CLASS(xe_vm,
483 		    TP_PROTO(struct xe_vm *vm),
484 		    TP_ARGS(vm),
485 
486 		    TP_STRUCT__entry(
487 			     __field(struct xe_vm *, vm)
488 			     __field(u32, asid)
489 			     ),
490 
491 		    TP_fast_assign(
492 			   __entry->vm = vm;
493 			   __entry->asid = vm->usm.asid;
494 			   ),
495 
496 		    TP_printk("vm=%p, asid=0x%05x",  __entry->vm,
497 			      __entry->asid)
498 );
499 
500 DEFINE_EVENT(xe_vm, xe_vm_kill,
501 	     TP_PROTO(struct xe_vm *vm),
502 	     TP_ARGS(vm)
503 );
504 
505 DEFINE_EVENT(xe_vm, xe_vm_create,
506 	     TP_PROTO(struct xe_vm *vm),
507 	     TP_ARGS(vm)
508 );
509 
510 DEFINE_EVENT(xe_vm, xe_vm_free,
511 	     TP_PROTO(struct xe_vm *vm),
512 	     TP_ARGS(vm)
513 );
514 
515 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
516 	     TP_PROTO(struct xe_vm *vm),
517 	     TP_ARGS(vm)
518 );
519 
520 DEFINE_EVENT(xe_vm, xe_vm_restart,
521 	     TP_PROTO(struct xe_vm *vm),
522 	     TP_ARGS(vm)
523 );
524 
525 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
526 	     TP_PROTO(struct xe_vm *vm),
527 	     TP_ARGS(vm)
528 );
529 
530 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
531 	     TP_PROTO(struct xe_vm *vm),
532 	     TP_ARGS(vm)
533 );
534 
535 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
536 	     TP_PROTO(struct xe_vm *vm),
537 	     TP_ARGS(vm)
538 );
539 
540 /* GuC */
541 DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
542 		    TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
543 		    TP_ARGS(_head, _tail, size, space, len),
544 
545 		    TP_STRUCT__entry(
546 			     __field(u32, _head)
547 			     __field(u32, _tail)
548 			     __field(u32, size)
549 			     __field(u32, space)
550 			     __field(u32, len)
551 			     ),
552 
553 		    TP_fast_assign(
554 			   __entry->_head = _head;
555 			   __entry->_tail = _tail;
556 			   __entry->size = size;
557 			   __entry->space = space;
558 			   __entry->len = len;
559 			   ),
560 
561 		    TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
562 			      __entry->_head, __entry->_tail, __entry->size,
563 			      __entry->space, __entry->len)
564 );
565 
566 DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
567 	     TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
568 	     TP_ARGS(_head, _tail, size, space, len)
569 );
570 
571 DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
572 		   TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
573 		   TP_ARGS(_head, _tail, size, space, len),
574 
575 		   TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
576 			     __entry->_head, __entry->_tail, __entry->size,
577 			     __entry->space, __entry->len)
578 );
579 
580 DECLARE_EVENT_CLASS(xe_guc_ctb,
581 		    TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
582 		    TP_ARGS(gt_id, action, len, _head, tail),
583 
584 		    TP_STRUCT__entry(
585 				__field(u8, gt_id)
586 				__field(u32, action)
587 				__field(u32, len)
588 				__field(u32, tail)
589 				__field(u32, _head)
590 		    ),
591 
592 		    TP_fast_assign(
593 			    __entry->gt_id = gt_id;
594 			    __entry->action = action;
595 			    __entry->len = len;
596 			    __entry->tail = tail;
597 			    __entry->_head = _head;
598 		    ),
599 
600 		    TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
601 			      __entry->gt_id, __entry->action, __entry->len,
602 			      __entry->tail, __entry->_head)
603 );
604 
605 DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
606 	     TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
607 	     TP_ARGS(gt_id, action, len, _head, tail)
608 );
609 
610 DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
611 		   TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
612 		   TP_ARGS(gt_id, action, len, _head, tail),
613 
614 		   TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
615 			     __entry->gt_id, __entry->action, __entry->len,
616 			     __entry->tail, __entry->_head)
617 
618 );
619 
620 #endif
621 
622 /* This part must be outside protection */
623 #undef TRACE_INCLUDE_PATH
624 #undef TRACE_INCLUDE_FILE
625 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
626 #define TRACE_INCLUDE_FILE xe_trace
627 #include <trace/define_trace.h>
628