xref: /linux/drivers/gpu/drm/xe/xe_trace.h (revision 673f816b9e1e92d1f70e1bf5f21b531e0ff9ad6c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM xe
8 
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10 #define _XE_TRACE_H_
11 
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
14 
15 #include "xe_bo.h"
16 #include "xe_bo_types.h"
17 #include "xe_exec_queue_types.h"
18 #include "xe_gpu_scheduler_types.h"
19 #include "xe_gt_tlb_invalidation_types.h"
20 #include "xe_gt_types.h"
21 #include "xe_guc_exec_queue_types.h"
22 #include "xe_sched_job.h"
23 #include "xe_vm.h"
24 
25 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
26 		    TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
27 		    TP_ARGS(fence),
28 
29 		    TP_STRUCT__entry(
30 			     __field(struct xe_gt_tlb_invalidation_fence *, fence)
31 			     __field(int, seqno)
32 			     ),
33 
34 		    TP_fast_assign(
35 			   __entry->fence = fence;
36 			   __entry->seqno = fence->seqno;
37 			   ),
38 
39 		    TP_printk("fence=%p, seqno=%d",
40 			      __entry->fence, __entry->seqno)
41 );
42 
43 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
44 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
45 	     TP_ARGS(fence)
46 );
47 
48 DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
49 	     xe_gt_tlb_invalidation_fence_work_func,
50 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
51 	     TP_ARGS(fence)
52 );
53 
54 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
55 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
56 	     TP_ARGS(fence)
57 );
58 
59 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
60 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
61 	     TP_ARGS(fence)
62 );
63 
64 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
65 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
66 	     TP_ARGS(fence)
67 );
68 
69 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
70 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
71 	     TP_ARGS(fence)
72 );
73 
74 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
75 	     TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
76 	     TP_ARGS(fence)
77 );
78 
79 DECLARE_EVENT_CLASS(xe_bo,
80 		    TP_PROTO(struct xe_bo *bo),
81 		    TP_ARGS(bo),
82 
83 		    TP_STRUCT__entry(
84 			     __field(size_t, size)
85 			     __field(u32, flags)
86 			     __field(struct xe_vm *, vm)
87 			     ),
88 
89 		    TP_fast_assign(
90 			   __entry->size = bo->size;
91 			   __entry->flags = bo->flags;
92 			   __entry->vm = bo->vm;
93 			   ),
94 
95 		    TP_printk("size=%zu, flags=0x%02x, vm=%p",
96 			      __entry->size, __entry->flags, __entry->vm)
97 );
98 
99 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
100 	     TP_PROTO(struct xe_bo *bo),
101 	     TP_ARGS(bo)
102 );
103 
104 TRACE_EVENT(xe_bo_move,
105 	    TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
106 		     bool move_lacks_source),
107 	    TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
108 	    TP_STRUCT__entry(
109 		     __field(struct xe_bo *, bo)
110 		     __field(size_t, size)
111 		     __field(u32, new_placement)
112 		     __field(u32, old_placement)
113 		     __array(char, device_id, 12)
114 		     __field(bool, move_lacks_source)
115 			),
116 
117 	    TP_fast_assign(
118 		   __entry->bo      = bo;
119 		   __entry->size = bo->size;
120 		   __entry->new_placement = new_placement;
121 		   __entry->old_placement = old_placement;
122 		   strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
123 		   __entry->move_lacks_source = move_lacks_source;
124 		   ),
125 	    TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
126 		      __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
127 		      xe_mem_type_to_name[__entry->old_placement],
128 		      xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
129 );
130 
131 DECLARE_EVENT_CLASS(xe_exec_queue,
132 		    TP_PROTO(struct xe_exec_queue *q),
133 		    TP_ARGS(q),
134 
135 		    TP_STRUCT__entry(
136 			     __field(enum xe_engine_class, class)
137 			     __field(u32, logical_mask)
138 			     __field(u8, gt_id)
139 			     __field(u16, width)
140 			     __field(u16, guc_id)
141 			     __field(u32, guc_state)
142 			     __field(u32, flags)
143 			     ),
144 
145 		    TP_fast_assign(
146 			   __entry->class = q->class;
147 			   __entry->logical_mask = q->logical_mask;
148 			   __entry->gt_id = q->gt->info.id;
149 			   __entry->width = q->width;
150 			   __entry->guc_id = q->guc->id;
151 			   __entry->guc_state = atomic_read(&q->guc->state);
152 			   __entry->flags = q->flags;
153 			   ),
154 
155 		    TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
156 			      __entry->class, __entry->logical_mask,
157 			      __entry->gt_id, __entry->width, __entry->guc_id,
158 			      __entry->guc_state, __entry->flags)
159 );
160 
161 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
162 	     TP_PROTO(struct xe_exec_queue *q),
163 	     TP_ARGS(q)
164 );
165 
166 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
167 	     TP_PROTO(struct xe_exec_queue *q),
168 	     TP_ARGS(q)
169 );
170 
171 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
172 	     TP_PROTO(struct xe_exec_queue *q),
173 	     TP_ARGS(q)
174 );
175 
176 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
177 	     TP_PROTO(struct xe_exec_queue *q),
178 	     TP_ARGS(q)
179 );
180 
181 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
182 	     TP_PROTO(struct xe_exec_queue *q),
183 	     TP_ARGS(q)
184 );
185 
186 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
187 	     TP_PROTO(struct xe_exec_queue *q),
188 	     TP_ARGS(q)
189 );
190 
191 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
192 	     TP_PROTO(struct xe_exec_queue *q),
193 	     TP_ARGS(q)
194 );
195 
196 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
197 	     TP_PROTO(struct xe_exec_queue *q),
198 	     TP_ARGS(q)
199 );
200 
201 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
202 	     TP_PROTO(struct xe_exec_queue *q),
203 	     TP_ARGS(q)
204 );
205 
206 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
207 	     TP_PROTO(struct xe_exec_queue *q),
208 	     TP_ARGS(q)
209 );
210 
211 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
212 	     TP_PROTO(struct xe_exec_queue *q),
213 	     TP_ARGS(q)
214 );
215 
216 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
217 	     TP_PROTO(struct xe_exec_queue *q),
218 	     TP_ARGS(q)
219 );
220 
221 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
222 	     TP_PROTO(struct xe_exec_queue *q),
223 	     TP_ARGS(q)
224 );
225 
226 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
227 	     TP_PROTO(struct xe_exec_queue *q),
228 	     TP_ARGS(q)
229 );
230 
231 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
232 	     TP_PROTO(struct xe_exec_queue *q),
233 	     TP_ARGS(q)
234 );
235 
236 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
237 	     TP_PROTO(struct xe_exec_queue *q),
238 	     TP_ARGS(q)
239 );
240 
241 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
242 	     TP_PROTO(struct xe_exec_queue *q),
243 	     TP_ARGS(q)
244 );
245 
246 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
247 	     TP_PROTO(struct xe_exec_queue *q),
248 	     TP_ARGS(q)
249 );
250 
251 DECLARE_EVENT_CLASS(xe_sched_job,
252 		    TP_PROTO(struct xe_sched_job *job),
253 		    TP_ARGS(job),
254 
255 		    TP_STRUCT__entry(
256 			     __field(u32, seqno)
257 			     __field(u32, lrc_seqno)
258 			     __field(u16, guc_id)
259 			     __field(u32, guc_state)
260 			     __field(u32, flags)
261 			     __field(int, error)
262 			     __field(struct dma_fence *, fence)
263 			     __field(u64, batch_addr)
264 			     ),
265 
266 		    TP_fast_assign(
267 			   __entry->seqno = xe_sched_job_seqno(job);
268 			   __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
269 			   __entry->guc_id = job->q->guc->id;
270 			   __entry->guc_state =
271 			   atomic_read(&job->q->guc->state);
272 			   __entry->flags = job->q->flags;
273 			   __entry->error = job->fence ? job->fence->error : 0;
274 			   __entry->fence = job->fence;
275 			   __entry->batch_addr = (u64)job->ptrs[0].batch_addr;
276 			   ),
277 
278 		    TP_printk("fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
279 			      __entry->fence, __entry->seqno,
280 			      __entry->lrc_seqno, __entry->guc_id,
281 			      __entry->batch_addr, __entry->guc_state,
282 			      __entry->flags, __entry->error)
283 );
284 
285 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
286 	     TP_PROTO(struct xe_sched_job *job),
287 	     TP_ARGS(job)
288 );
289 
290 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
291 	     TP_PROTO(struct xe_sched_job *job),
292 	     TP_ARGS(job)
293 );
294 
295 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
296 	     TP_PROTO(struct xe_sched_job *job),
297 	     TP_ARGS(job)
298 );
299 
300 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
301 	     TP_PROTO(struct xe_sched_job *job),
302 	     TP_ARGS(job)
303 );
304 
305 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
306 	     TP_PROTO(struct xe_sched_job *job),
307 	     TP_ARGS(job)
308 );
309 
310 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
311 	     TP_PROTO(struct xe_sched_job *job),
312 	     TP_ARGS(job)
313 );
314 
315 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
316 	     TP_PROTO(struct xe_sched_job *job),
317 	     TP_ARGS(job)
318 );
319 
320 DECLARE_EVENT_CLASS(xe_sched_msg,
321 		    TP_PROTO(struct xe_sched_msg *msg),
322 		    TP_ARGS(msg),
323 
324 		    TP_STRUCT__entry(
325 			     __field(u32, opcode)
326 			     __field(u16, guc_id)
327 			     ),
328 
329 		    TP_fast_assign(
330 			   __entry->opcode = msg->opcode;
331 			   __entry->guc_id =
332 			   ((struct xe_exec_queue *)msg->private_data)->guc->id;
333 			   ),
334 
335 		    TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
336 			      __entry->opcode)
337 );
338 
339 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
340 	     TP_PROTO(struct xe_sched_msg *msg),
341 	     TP_ARGS(msg)
342 );
343 
344 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
345 	     TP_PROTO(struct xe_sched_msg *msg),
346 	     TP_ARGS(msg)
347 );
348 
349 DECLARE_EVENT_CLASS(xe_hw_fence,
350 		    TP_PROTO(struct xe_hw_fence *fence),
351 		    TP_ARGS(fence),
352 
353 		    TP_STRUCT__entry(
354 			     __field(u64, ctx)
355 			     __field(u32, seqno)
356 			     __field(struct xe_hw_fence *, fence)
357 			     ),
358 
359 		    TP_fast_assign(
360 			   __entry->ctx = fence->dma.context;
361 			   __entry->seqno = fence->dma.seqno;
362 			   __entry->fence = fence;
363 			   ),
364 
365 		    TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
366 			      __entry->ctx, __entry->fence, __entry->seqno)
367 );
368 
369 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
370 	     TP_PROTO(struct xe_hw_fence *fence),
371 	     TP_ARGS(fence)
372 );
373 
374 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
375 	     TP_PROTO(struct xe_hw_fence *fence),
376 	     TP_ARGS(fence)
377 );
378 
379 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
380 	     TP_PROTO(struct xe_hw_fence *fence),
381 	     TP_ARGS(fence)
382 );
383 
384 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
385 	     TP_PROTO(struct xe_hw_fence *fence),
386 	     TP_ARGS(fence)
387 );
388 
389 DECLARE_EVENT_CLASS(xe_vma,
390 		    TP_PROTO(struct xe_vma *vma),
391 		    TP_ARGS(vma),
392 
393 		    TP_STRUCT__entry(
394 			     __field(struct xe_vma *, vma)
395 			     __field(u32, asid)
396 			     __field(u64, start)
397 			     __field(u64, end)
398 			     __field(u64, ptr)
399 			     ),
400 
401 		    TP_fast_assign(
402 			   __entry->vma = vma;
403 			   __entry->asid = xe_vma_vm(vma)->usm.asid;
404 			   __entry->start = xe_vma_start(vma);
405 			   __entry->end = xe_vma_end(vma) - 1;
406 			   __entry->ptr = xe_vma_userptr(vma);
407 			   ),
408 
409 		    TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
410 			      __entry->vma, __entry->asid, __entry->start,
411 			      __entry->end, __entry->ptr)
412 )
413 
414 DEFINE_EVENT(xe_vma, xe_vma_flush,
415 	     TP_PROTO(struct xe_vma *vma),
416 	     TP_ARGS(vma)
417 );
418 
419 DEFINE_EVENT(xe_vma, xe_vma_pagefault,
420 	     TP_PROTO(struct xe_vma *vma),
421 	     TP_ARGS(vma)
422 );
423 
424 DEFINE_EVENT(xe_vma, xe_vma_acc,
425 	     TP_PROTO(struct xe_vma *vma),
426 	     TP_ARGS(vma)
427 );
428 
429 DEFINE_EVENT(xe_vma, xe_vma_fail,
430 	     TP_PROTO(struct xe_vma *vma),
431 	     TP_ARGS(vma)
432 );
433 
434 DEFINE_EVENT(xe_vma, xe_vma_bind,
435 	     TP_PROTO(struct xe_vma *vma),
436 	     TP_ARGS(vma)
437 );
438 
439 DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
440 	     TP_PROTO(struct xe_vma *vma),
441 	     TP_ARGS(vma)
442 );
443 
444 DEFINE_EVENT(xe_vma, xe_vma_unbind,
445 	     TP_PROTO(struct xe_vma *vma),
446 	     TP_ARGS(vma)
447 );
448 
449 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
450 	     TP_PROTO(struct xe_vma *vma),
451 	     TP_ARGS(vma)
452 );
453 
454 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
455 	     TP_PROTO(struct xe_vma *vma),
456 	     TP_ARGS(vma)
457 );
458 
459 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
460 	     TP_PROTO(struct xe_vma *vma),
461 	     TP_ARGS(vma)
462 );
463 
464 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
465 	     TP_PROTO(struct xe_vma *vma),
466 	     TP_ARGS(vma)
467 );
468 
469 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
470 	     TP_PROTO(struct xe_vma *vma),
471 	     TP_ARGS(vma)
472 );
473 
474 DEFINE_EVENT(xe_vma, xe_vma_invalidate,
475 	     TP_PROTO(struct xe_vma *vma),
476 	     TP_ARGS(vma)
477 );
478 
479 DEFINE_EVENT(xe_vma, xe_vma_evict,
480 	     TP_PROTO(struct xe_vma *vma),
481 	     TP_ARGS(vma)
482 );
483 
484 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
485 	     TP_PROTO(struct xe_vma *vma),
486 	     TP_ARGS(vma)
487 );
488 
489 DECLARE_EVENT_CLASS(xe_vm,
490 		    TP_PROTO(struct xe_vm *vm),
491 		    TP_ARGS(vm),
492 
493 		    TP_STRUCT__entry(
494 			     __field(struct xe_vm *, vm)
495 			     __field(u32, asid)
496 			     ),
497 
498 		    TP_fast_assign(
499 			   __entry->vm = vm;
500 			   __entry->asid = vm->usm.asid;
501 			   ),
502 
503 		    TP_printk("vm=%p, asid=0x%05x",  __entry->vm,
504 			      __entry->asid)
505 );
506 
507 DEFINE_EVENT(xe_vm, xe_vm_kill,
508 	     TP_PROTO(struct xe_vm *vm),
509 	     TP_ARGS(vm)
510 );
511 
512 DEFINE_EVENT(xe_vm, xe_vm_create,
513 	     TP_PROTO(struct xe_vm *vm),
514 	     TP_ARGS(vm)
515 );
516 
517 DEFINE_EVENT(xe_vm, xe_vm_free,
518 	     TP_PROTO(struct xe_vm *vm),
519 	     TP_ARGS(vm)
520 );
521 
522 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
523 	     TP_PROTO(struct xe_vm *vm),
524 	     TP_ARGS(vm)
525 );
526 
527 DEFINE_EVENT(xe_vm, xe_vm_restart,
528 	     TP_PROTO(struct xe_vm *vm),
529 	     TP_ARGS(vm)
530 );
531 
532 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
533 	     TP_PROTO(struct xe_vm *vm),
534 	     TP_ARGS(vm)
535 );
536 
537 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
538 	     TP_PROTO(struct xe_vm *vm),
539 	     TP_ARGS(vm)
540 );
541 
542 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
543 	     TP_PROTO(struct xe_vm *vm),
544 	     TP_ARGS(vm)
545 );
546 
547 /* GuC */
548 DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
549 		    TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
550 		    TP_ARGS(_head, _tail, size, space, len),
551 
552 		    TP_STRUCT__entry(
553 			     __field(u32, _head)
554 			     __field(u32, _tail)
555 			     __field(u32, size)
556 			     __field(u32, space)
557 			     __field(u32, len)
558 			     ),
559 
560 		    TP_fast_assign(
561 			   __entry->_head = _head;
562 			   __entry->_tail = _tail;
563 			   __entry->size = size;
564 			   __entry->space = space;
565 			   __entry->len = len;
566 			   ),
567 
568 		    TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
569 			      __entry->_head, __entry->_tail, __entry->size,
570 			      __entry->space, __entry->len)
571 );
572 
573 DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
574 	     TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
575 	     TP_ARGS(_head, _tail, size, space, len)
576 );
577 
578 DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
579 		   TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
580 		   TP_ARGS(_head, _tail, size, space, len),
581 
582 		   TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
583 			     __entry->_head, __entry->_tail, __entry->size,
584 			     __entry->space, __entry->len)
585 );
586 
587 DECLARE_EVENT_CLASS(xe_guc_ctb,
588 		    TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
589 		    TP_ARGS(gt_id, action, len, _head, tail),
590 
591 		    TP_STRUCT__entry(
592 				__field(u8, gt_id)
593 				__field(u32, action)
594 				__field(u32, len)
595 				__field(u32, tail)
596 				__field(u32, _head)
597 		    ),
598 
599 		    TP_fast_assign(
600 			    __entry->gt_id = gt_id;
601 			    __entry->action = action;
602 			    __entry->len = len;
603 			    __entry->tail = tail;
604 			    __entry->_head = _head;
605 		    ),
606 
607 		    TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
608 			      __entry->gt_id, __entry->action, __entry->len,
609 			      __entry->tail, __entry->_head)
610 );
611 
612 DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
613 	     TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
614 	     TP_ARGS(gt_id, action, len, _head, tail)
615 );
616 
617 DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
618 		   TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
619 		   TP_ARGS(gt_id, action, len, _head, tail),
620 
621 		   TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
622 			     __entry->gt_id, __entry->action, __entry->len,
623 			     __entry->tail, __entry->_head)
624 
625 );
626 
627 #endif
628 
629 /* This part must be outside protection */
630 #undef TRACE_INCLUDE_PATH
631 #undef TRACE_INCLUDE_FILE
632 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
633 #define TRACE_INCLUDE_FILE xe_trace
634 #include <trace/define_trace.h>
635