xref: /linux/drivers/gpu/drm/xe/xe_trace.h (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM xe
8 
9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10 #define _XE_TRACE_H_
11 
12 #include <linux/tracepoint.h>
13 #include <linux/types.h>
14 
15 #include "xe_exec_queue_types.h"
16 #include "xe_exec_queue.h"
17 #include "xe_gpu_scheduler_types.h"
18 #include "xe_gt_types.h"
19 #include "xe_guc_exec_queue_types.h"
20 #include "xe_sched_job.h"
21 #include "xe_tlb_inval_types.h"
22 #include "xe_vm.h"
23 
24 #define __dev_name_xe(xe)	dev_name((xe)->drm.dev)
25 #define __dev_name_tile(tile)	__dev_name_xe(tile_to_xe((tile)))
26 #define __dev_name_gt(gt)	__dev_name_xe(gt_to_xe((gt)))
27 #define __dev_name_eq(q)	__dev_name_gt((q)->gt)
28 
29 DECLARE_EVENT_CLASS(xe_tlb_inval_fence,
30 		    TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
31 		    TP_ARGS(xe, fence),
32 
33 		    TP_STRUCT__entry(
34 			     __string(dev, __dev_name_xe(xe))
35 			     __field(struct xe_tlb_inval_fence *, fence)
36 			     __field(int, seqno)
37 			     ),
38 
39 		    TP_fast_assign(
40 			   __assign_str(dev);
41 			   __entry->fence = fence;
42 			   __entry->seqno = fence->seqno;
43 			   ),
44 
45 		    TP_printk("dev=%s, fence=%p, seqno=%d",
46 			      __get_str(dev), __entry->fence, __entry->seqno)
47 );
48 
49 DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_send,
50 	     TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
51 	     TP_ARGS(xe, fence)
52 );
53 
54 DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_recv,
55 	     TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
56 	     TP_ARGS(xe, fence)
57 );
58 
59 DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_signal,
60 	     TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
61 	     TP_ARGS(xe, fence)
62 );
63 
64 DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_timeout,
65 	     TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
66 	     TP_ARGS(xe, fence)
67 );
68 
69 DECLARE_EVENT_CLASS(xe_exec_queue,
70 		    TP_PROTO(struct xe_exec_queue *q),
71 		    TP_ARGS(q),
72 
73 		    TP_STRUCT__entry(
74 			     __string(dev, __dev_name_eq(q))
75 			     __field(enum xe_engine_class, class)
76 			     __field(u32, logical_mask)
77 			     __field(u8, gt_id)
78 			     __field(u16, width)
79 			     __field(u16, guc_id)
80 			     __field(u32, guc_state)
81 			     __field(u32, flags)
82 			     ),
83 
84 		    TP_fast_assign(
85 			   __assign_str(dev);
86 			   __entry->class = q->class;
87 			   __entry->logical_mask = q->logical_mask;
88 			   __entry->gt_id = q->gt->info.id;
89 			   __entry->width = q->width;
90 			   __entry->guc_id = q->guc->id;
91 			   __entry->guc_state = atomic_read(&q->guc->state);
92 			   __entry->flags = q->flags;
93 			   ),
94 
95 		    TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
96 			      __get_str(dev), __entry->class, __entry->logical_mask,
97 			      __entry->gt_id, __entry->width, __entry->guc_id,
98 			      __entry->guc_state, __entry->flags)
99 );
100 
101 DECLARE_EVENT_CLASS(xe_exec_queue_multi_queue,
102 		    TP_PROTO(struct xe_exec_queue *q),
103 		    TP_ARGS(q),
104 
105 		    TP_STRUCT__entry(
106 			     __string(dev, __dev_name_eq(q))
107 			     __field(enum xe_engine_class, class)
108 			     __field(u32, logical_mask)
109 			     __field(u8, gt_id)
110 			     __field(u16, width)
111 			     __field(u32, guc_id)
112 			     __field(u32, guc_state)
113 			     __field(u32, flags)
114 			     __field(u32, primary)
115 			     ),
116 
117 		    TP_fast_assign(
118 			   __assign_str(dev);
119 			   __entry->class = q->class;
120 			   __entry->logical_mask = q->logical_mask;
121 			   __entry->gt_id = q->gt->info.id;
122 			   __entry->width = q->width;
123 			   __entry->guc_id = q->guc->id;
124 			   __entry->guc_state = atomic_read(&q->guc->state);
125 			   __entry->flags = q->flags;
126 			   __entry->primary = xe_exec_queue_multi_queue_primary(q)->guc->id;
127 			   ),
128 
129 		    TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d guc_id=%d, guc_state=0x%x, flags=0x%x, primary=%d",
130 			      __get_str(dev), __entry->class, __entry->logical_mask,
131 			      __entry->gt_id, __entry->width, __entry->guc_id,
132 			      __entry->guc_state, __entry->flags,
133 			      __entry->primary)
134 );
135 
136 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
137 	     TP_PROTO(struct xe_exec_queue *q),
138 	     TP_ARGS(q)
139 );
140 
141 DEFINE_EVENT(xe_exec_queue_multi_queue, xe_exec_queue_create_multi_queue,
142 	     TP_PROTO(struct xe_exec_queue *q),
143 	     TP_ARGS(q)
144 );
145 
146 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
147 	     TP_PROTO(struct xe_exec_queue *q),
148 	     TP_ARGS(q)
149 );
150 
151 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
152 	     TP_PROTO(struct xe_exec_queue *q),
153 	     TP_ARGS(q)
154 );
155 
156 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
157 	     TP_PROTO(struct xe_exec_queue *q),
158 	     TP_ARGS(q)
159 );
160 
161 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
162 	     TP_PROTO(struct xe_exec_queue *q),
163 	     TP_ARGS(q)
164 );
165 
166 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
167 	     TP_PROTO(struct xe_exec_queue *q),
168 	     TP_ARGS(q)
169 );
170 
171 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
172 	     TP_PROTO(struct xe_exec_queue *q),
173 	     TP_ARGS(q)
174 );
175 
176 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
177 	     TP_PROTO(struct xe_exec_queue *q),
178 	     TP_ARGS(q)
179 );
180 
181 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
182 	     TP_PROTO(struct xe_exec_queue *q),
183 	     TP_ARGS(q)
184 );
185 
186 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
187 	     TP_PROTO(struct xe_exec_queue *q),
188 	     TP_ARGS(q)
189 );
190 
191 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
192 	     TP_PROTO(struct xe_exec_queue *q),
193 	     TP_ARGS(q)
194 );
195 
196 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
197 	     TP_PROTO(struct xe_exec_queue *q),
198 	     TP_ARGS(q)
199 );
200 
201 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
202 	     TP_PROTO(struct xe_exec_queue *q),
203 	     TP_ARGS(q)
204 );
205 
206 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
207 	     TP_PROTO(struct xe_exec_queue *q),
208 	     TP_ARGS(q)
209 );
210 
211 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
212 	     TP_PROTO(struct xe_exec_queue *q),
213 	     TP_ARGS(q)
214 );
215 
216 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cgp_context_error,
217 	     TP_PROTO(struct xe_exec_queue *q),
218 	     TP_ARGS(q)
219 );
220 
221 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
222 	     TP_PROTO(struct xe_exec_queue *q),
223 	     TP_ARGS(q)
224 );
225 
226 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
227 	     TP_PROTO(struct xe_exec_queue *q),
228 	     TP_ARGS(q)
229 );
230 
231 DECLARE_EVENT_CLASS(xe_sched_job,
232 		    TP_PROTO(struct xe_sched_job *job),
233 		    TP_ARGS(job),
234 
235 		    TP_STRUCT__entry(
236 			     __string(dev, __dev_name_eq(job->q))
237 			     __field(u32, seqno)
238 			     __field(u32, lrc_seqno)
239 			     __field(u8, gt_id)
240 			     __field(u16, guc_id)
241 			     __field(u32, guc_state)
242 			     __field(u32, flags)
243 			     __field(int, error)
244 			     __field(struct dma_fence *, fence)
245 			     __field(u64, batch_addr)
246 			     ),
247 
248 		    TP_fast_assign(
249 			   __assign_str(dev);
250 			   __entry->seqno = xe_sched_job_seqno(job);
251 			   __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
252 			   __entry->gt_id = job->q->gt->info.id;
253 			   __entry->guc_id = job->q->guc->id;
254 			   __entry->guc_state =
255 			   atomic_read(&job->q->guc->state);
256 			   __entry->flags = job->q->flags;
257 			   __entry->error = job->fence ? job->fence->error : 0;
258 			   __entry->fence = job->fence;
259 			   __entry->batch_addr = (u64)job->ptrs[0].batch_addr;
260 			   ),
261 
262 		    TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
263 			      __get_str(dev), __entry->fence, __entry->seqno,
264 			      __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
265 			      __entry->batch_addr, __entry->guc_state,
266 			      __entry->flags, __entry->error)
267 );
268 
269 DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
270 	     TP_PROTO(struct xe_sched_job *job),
271 	     TP_ARGS(job)
272 );
273 
274 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
275 	     TP_PROTO(struct xe_sched_job *job),
276 	     TP_ARGS(job)
277 );
278 
279 DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
280 	     TP_PROTO(struct xe_sched_job *job),
281 	     TP_ARGS(job)
282 );
283 
284 DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
285 	     TP_PROTO(struct xe_sched_job *job),
286 	     TP_ARGS(job)
287 );
288 
289 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
290 	     TP_PROTO(struct xe_sched_job *job),
291 	     TP_ARGS(job)
292 );
293 
294 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
295 	     TP_PROTO(struct xe_sched_job *job),
296 	     TP_ARGS(job)
297 );
298 
299 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
300 	     TP_PROTO(struct xe_sched_job *job),
301 	     TP_ARGS(job)
302 );
303 
304 DECLARE_EVENT_CLASS(xe_sched_msg,
305 		    TP_PROTO(struct xe_sched_msg *msg),
306 		    TP_ARGS(msg),
307 
308 		    TP_STRUCT__entry(
309 			     __string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
310 			     __field(u32, opcode)
311 			     __field(u16, guc_id)
312 			     __field(u8, gt_id)
313 			     ),
314 
315 		    TP_fast_assign(
316 			   __assign_str(dev);
317 			   __entry->opcode = msg->opcode;
318 			   __entry->guc_id =
319 			   ((struct xe_exec_queue *)msg->private_data)->guc->id;
320 			   __entry->gt_id =
321 			   ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
322 			   ),
323 
324 		    TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
325 			      __entry->opcode)
326 );
327 
328 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
329 	     TP_PROTO(struct xe_sched_msg *msg),
330 	     TP_ARGS(msg)
331 );
332 
333 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
334 	     TP_PROTO(struct xe_sched_msg *msg),
335 	     TP_ARGS(msg)
336 );
337 
338 DECLARE_EVENT_CLASS(xe_hw_fence,
339 		    TP_PROTO(struct xe_hw_fence *fence),
340 		    TP_ARGS(fence),
341 
342 		    TP_STRUCT__entry(
343 			     __string(dev, __dev_name_xe(fence->xe))
344 			     __field(u64, ctx)
345 			     __field(u32, seqno)
346 			     __field(struct xe_hw_fence *, fence)
347 			     ),
348 
349 		    TP_fast_assign(
350 			   __assign_str(dev);
351 			   __entry->ctx = fence->dma.context;
352 			   __entry->seqno = fence->dma.seqno;
353 			   __entry->fence = fence;
354 			   ),
355 
356 		    TP_printk("dev=%s, ctx=0x%016llx, fence=%p, seqno=%u",
357 			      __get_str(dev), __entry->ctx, __entry->fence, __entry->seqno)
358 );
359 
360 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
361 	     TP_PROTO(struct xe_hw_fence *fence),
362 	     TP_ARGS(fence)
363 );
364 
365 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
366 	     TP_PROTO(struct xe_hw_fence *fence),
367 	     TP_ARGS(fence)
368 );
369 
370 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
371 	     TP_PROTO(struct xe_hw_fence *fence),
372 	     TP_ARGS(fence)
373 );
374 
375 TRACE_EVENT(xe_reg_rw,
376 	TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
377 
378 	TP_ARGS(mmio, write, reg, val, len),
379 
380 	TP_STRUCT__entry(
381 		__string(dev, __dev_name_tile(mmio->tile))
382 		__field(u64, val)
383 		__field(u32, reg)
384 		__field(u16, write)
385 		__field(u16, len)
386 		),
387 
388 	TP_fast_assign(
389 		__assign_str(dev);
390 		__entry->val = val;
391 		__entry->reg = reg;
392 		__entry->write = write;
393 		__entry->len = len;
394 		),
395 
396 	TP_printk("dev=%s, %s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
397 		  __get_str(dev), __entry->write ? "write" : "read",
398 		  __entry->reg, __entry->len,
399 		  (u32)(__entry->val & 0xffffffff),
400 		  (u32)(__entry->val >> 32))
401 );
402 
403 DECLARE_EVENT_CLASS(xe_pm_runtime,
404 		    TP_PROTO(struct xe_device *xe, void *caller),
405 		    TP_ARGS(xe, caller),
406 
407 		    TP_STRUCT__entry(
408 			     __string(dev, __dev_name_xe(xe))
409 			     __field(void *, caller)
410 			     ),
411 
412 		    TP_fast_assign(
413 			   __assign_str(dev);
414 			   __entry->caller = caller;
415 			   ),
416 
417 		    TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
418 );
419 
420 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
421 	     TP_PROTO(struct xe_device *xe, void *caller),
422 	     TP_ARGS(xe, caller)
423 );
424 
425 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
426 	     TP_PROTO(struct xe_device *xe, void *caller),
427 	     TP_ARGS(xe, caller)
428 );
429 
430 DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
431 	     TP_PROTO(struct xe_device *xe, void *caller),
432 	     TP_ARGS(xe, caller)
433 );
434 
435 DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
436 	     TP_PROTO(struct xe_device *xe, void *caller),
437 	     TP_ARGS(xe, caller)
438 );
439 
440 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
441 	     TP_PROTO(struct xe_device *xe, void *caller),
442 	     TP_ARGS(xe, caller)
443 );
444 
445 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
446 	     TP_PROTO(struct xe_device *xe, void *caller),
447 	     TP_ARGS(xe, caller)
448 );
449 
450 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
451 	     TP_PROTO(struct xe_device *xe, void *caller),
452 	     TP_ARGS(xe, caller)
453 );
454 
455 TRACE_EVENT(xe_eu_stall_data_read,
456 	    TP_PROTO(u8 slice, u8 subslice,
457 		     u32 read_ptr, u32 write_ptr,
458 		     size_t read_size, size_t total_size),
459 	    TP_ARGS(slice, subslice,
460 		    read_ptr, write_ptr,
461 		    read_size, total_size),
462 
463 	    TP_STRUCT__entry(__field(u8, slice)
464 			     __field(u8, subslice)
465 			     __field(u32, read_ptr)
466 			     __field(u32, write_ptr)
467 			     __field(size_t, read_size)
468 			     __field(size_t, total_size)
469 			     ),
470 
471 	    TP_fast_assign(__entry->slice = slice;
472 			   __entry->subslice = subslice;
473 			   __entry->read_ptr = read_ptr;
474 			   __entry->write_ptr = write_ptr;
475 			   __entry->read_size = read_size;
476 			   __entry->total_size = total_size;
477 			   ),
478 
479 	    TP_printk("slice: %u subslice: %u read ptr: 0x%x write ptr: 0x%x read size: %zu total read size: %zu",
480 		      __entry->slice, __entry->subslice,
481 		      __entry->read_ptr, __entry->write_ptr,
482 		      __entry->read_size, __entry->total_size)
483 );
484 
485 TRACE_EVENT(xe_exec_queue_reach_max_job_count,
486 	    TP_PROTO(struct xe_exec_queue *q, int max_cnt),
487 	    TP_ARGS(q, max_cnt),
488 
489 	    TP_STRUCT__entry(__string(dev, __dev_name_eq(q))
490 			     __field(enum xe_engine_class, class)
491 			     __field(u32, logical_mask)
492 			     __field(u16, guc_id)
493 			     __field(int, max_cnt)
494 			     ),
495 
496 	    TP_fast_assign(__assign_str(dev);
497 			   __entry->class = q->class;
498 			   __entry->logical_mask = q->logical_mask;
499 			   __entry->guc_id = q->guc->id;
500 			   __entry->max_cnt = max_cnt;
501 			   ),
502 
503 	    TP_printk("dev=%s, job count exceeded the maximum limit (%d) per exec queue. engine_class=0x%x, logical_mask=0x%x, guc_id=%d",
504 		      __get_str(dev), __entry->max_cnt,
505 		      __entry->class, __entry->logical_mask, __entry->guc_id)
506 );
507 
508 #endif
509 
510 /* This part must be outside protection */
511 #undef TRACE_INCLUDE_PATH
512 #undef TRACE_INCLUDE_FILE
513 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
514 #define TRACE_INCLUDE_FILE xe_trace
515 #include <trace/define_trace.h>
516