1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #undef TRACE_SYSTEM 7 #define TRACE_SYSTEM xe 8 9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 10 #define _XE_TRACE_H_ 11 12 #include <linux/tracepoint.h> 13 #include <linux/types.h> 14 15 #include "xe_exec_queue_types.h" 16 #include "xe_gpu_scheduler_types.h" 17 #include "xe_gt_tlb_invalidation_types.h" 18 #include "xe_gt_types.h" 19 #include "xe_guc_exec_queue_types.h" 20 #include "xe_sched_job.h" 21 #include "xe_vm.h" 22 23 #define __dev_name_xe(xe) dev_name((xe)->drm.dev) 24 #define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile))) 25 #define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt))) 26 #define __dev_name_eq(q) __dev_name_gt((q)->gt) 27 28 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence, 29 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 30 TP_ARGS(xe, fence), 31 32 TP_STRUCT__entry( 33 __string(dev, __dev_name_xe(xe)) 34 __field(struct xe_gt_tlb_invalidation_fence *, fence) 35 __field(int, seqno) 36 ), 37 38 TP_fast_assign( 39 __assign_str(dev); 40 __entry->fence = fence; 41 __entry->seqno = fence->seqno; 42 ), 43 44 TP_printk("dev=%s, fence=%p, seqno=%d", 45 __get_str(dev), __entry->fence, __entry->seqno) 46 ); 47 48 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create, 49 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 50 TP_ARGS(xe, fence) 51 ); 52 53 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, 54 xe_gt_tlb_invalidation_fence_work_func, 55 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 56 TP_ARGS(xe, fence) 57 ); 58 59 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb, 60 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 61 TP_ARGS(xe, fence) 62 ); 63 64 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send, 65 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 66 TP_ARGS(xe, fence) 67 ); 68 69 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv, 70 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 71 TP_ARGS(xe, fence) 72 ); 73 74 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal, 75 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 76 TP_ARGS(xe, fence) 77 ); 78 79 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout, 80 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence), 81 TP_ARGS(xe, fence) 82 ); 83 84 DECLARE_EVENT_CLASS(xe_exec_queue, 85 TP_PROTO(struct xe_exec_queue *q), 86 TP_ARGS(q), 87 88 TP_STRUCT__entry( 89 __string(dev, __dev_name_eq(q)) 90 __field(enum xe_engine_class, class) 91 __field(u32, logical_mask) 92 __field(u8, gt_id) 93 __field(u16, width) 94 __field(u16, guc_id) 95 __field(u32, guc_state) 96 __field(u32, flags) 97 ), 98 99 TP_fast_assign( 100 __assign_str(dev); 101 __entry->class = q->class; 102 __entry->logical_mask = q->logical_mask; 103 __entry->gt_id = q->gt->info.id; 104 __entry->width = q->width; 105 __entry->guc_id = q->guc->id; 106 __entry->guc_state = atomic_read(&q->guc->state); 107 __entry->flags = q->flags; 108 ), 109 110 TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x", 111 __get_str(dev), __entry->class, __entry->logical_mask, 112 __entry->gt_id, __entry->width, __entry->guc_id, 113 __entry->guc_state, __entry->flags) 114 ); 115 116 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create, 117 TP_PROTO(struct xe_exec_queue *q), 118 TP_ARGS(q) 119 ); 120 121 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume, 122 TP_PROTO(struct xe_exec_queue *q), 123 TP_ARGS(q) 124 ); 125 126 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit, 127 TP_PROTO(struct xe_exec_queue *q), 128 TP_ARGS(q) 129 ); 130 131 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable, 132 TP_PROTO(struct xe_exec_queue *q), 133 TP_ARGS(q) 134 ); 135 136 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable, 137 TP_PROTO(struct xe_exec_queue *q), 138 TP_ARGS(q) 139 ); 140 141 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done, 142 TP_PROTO(struct xe_exec_queue *q), 143 TP_ARGS(q) 144 ); 145 146 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register, 147 TP_PROTO(struct xe_exec_queue *q), 148 TP_ARGS(q) 149 ); 150 151 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister, 152 TP_PROTO(struct xe_exec_queue *q), 153 TP_ARGS(q) 154 ); 155 156 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done, 157 TP_PROTO(struct xe_exec_queue *q), 158 TP_ARGS(q) 159 ); 160 161 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close, 162 TP_PROTO(struct xe_exec_queue *q), 163 TP_ARGS(q) 164 ); 165 166 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill, 167 TP_PROTO(struct xe_exec_queue *q), 168 TP_ARGS(q) 169 ); 170 171 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity, 172 TP_PROTO(struct xe_exec_queue *q), 173 TP_ARGS(q) 174 ); 175 176 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy, 177 TP_PROTO(struct xe_exec_queue *q), 178 TP_ARGS(q) 179 ); 180 181 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset, 182 TP_PROTO(struct xe_exec_queue *q), 183 TP_ARGS(q) 184 ); 185 186 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error, 187 TP_PROTO(struct xe_exec_queue *q), 188 TP_ARGS(q) 189 ); 190 191 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop, 192 TP_PROTO(struct xe_exec_queue *q), 193 TP_ARGS(q) 194 ); 195 196 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit, 197 TP_PROTO(struct xe_exec_queue *q), 198 TP_ARGS(q) 199 ); 200 201 DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup, 202 TP_PROTO(struct xe_exec_queue *q), 203 TP_ARGS(q) 204 ); 205 206 DECLARE_EVENT_CLASS(xe_sched_job, 207 TP_PROTO(struct xe_sched_job *job), 208 TP_ARGS(job), 209 210 TP_STRUCT__entry( 211 __string(dev, __dev_name_eq(job->q)) 212 __field(u32, seqno) 213 __field(u32, lrc_seqno) 214 __field(u16, guc_id) 215 __field(u32, guc_state) 216 __field(u32, flags) 217 __field(int, error) 218 __field(struct dma_fence *, fence) 219 __field(u64, batch_addr) 220 ), 221 222 TP_fast_assign( 223 __assign_str(dev); 224 __entry->seqno = xe_sched_job_seqno(job); 225 __entry->lrc_seqno = xe_sched_job_lrc_seqno(job); 226 __entry->guc_id = job->q->guc->id; 227 __entry->guc_state = 228 atomic_read(&job->q->guc->state); 229 __entry->flags = job->q->flags; 230 __entry->error = job->fence ? job->fence->error : 0; 231 __entry->fence = job->fence; 232 __entry->batch_addr = (u64)job->ptrs[0].batch_addr; 233 ), 234 235 TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d", 236 __get_str(dev), __entry->fence, __entry->seqno, 237 __entry->lrc_seqno, __entry->guc_id, 238 __entry->batch_addr, __entry->guc_state, 239 __entry->flags, __entry->error) 240 ); 241 242 DEFINE_EVENT(xe_sched_job, xe_sched_job_create, 243 TP_PROTO(struct xe_sched_job *job), 244 TP_ARGS(job) 245 ); 246 247 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec, 248 TP_PROTO(struct xe_sched_job *job), 249 TP_ARGS(job) 250 ); 251 252 DEFINE_EVENT(xe_sched_job, xe_sched_job_run, 253 TP_PROTO(struct xe_sched_job *job), 254 TP_ARGS(job) 255 ); 256 257 DEFINE_EVENT(xe_sched_job, xe_sched_job_free, 258 TP_PROTO(struct xe_sched_job *job), 259 TP_ARGS(job) 260 ); 261 262 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout, 263 TP_PROTO(struct xe_sched_job *job), 264 TP_ARGS(job) 265 ); 266 267 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error, 268 TP_PROTO(struct xe_sched_job *job), 269 TP_ARGS(job) 270 ); 271 272 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban, 273 TP_PROTO(struct xe_sched_job *job), 274 TP_ARGS(job) 275 ); 276 277 DECLARE_EVENT_CLASS(xe_sched_msg, 278 TP_PROTO(struct xe_sched_msg *msg), 279 TP_ARGS(msg), 280 281 TP_STRUCT__entry( 282 __string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data))) 283 __field(u32, opcode) 284 __field(u16, guc_id) 285 ), 286 287 TP_fast_assign( 288 __assign_str(dev); 289 __entry->opcode = msg->opcode; 290 __entry->guc_id = 291 ((struct xe_exec_queue *)msg->private_data)->guc->id; 292 ), 293 294 TP_printk("dev=%s, guc_id=%d, opcode=%u", __get_str(dev), __entry->guc_id, 295 __entry->opcode) 296 ); 297 298 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add, 299 TP_PROTO(struct xe_sched_msg *msg), 300 TP_ARGS(msg) 301 ); 302 303 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv, 304 TP_PROTO(struct xe_sched_msg *msg), 305 TP_ARGS(msg) 306 ); 307 308 DECLARE_EVENT_CLASS(xe_hw_fence, 309 TP_PROTO(struct xe_hw_fence *fence), 310 TP_ARGS(fence), 311 312 TP_STRUCT__entry( 313 __string(dev, __dev_name_xe(fence->xe)) 314 __field(u64, ctx) 315 __field(u32, seqno) 316 __field(struct xe_hw_fence *, fence) 317 ), 318 319 TP_fast_assign( 320 __assign_str(dev); 321 __entry->ctx = fence->dma.context; 322 __entry->seqno = fence->dma.seqno; 323 __entry->fence = fence; 324 ), 325 326 TP_printk("dev=%s, ctx=0x%016llx, fence=%p, seqno=%u", 327 __get_str(dev), __entry->ctx, __entry->fence, __entry->seqno) 328 ); 329 330 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create, 331 TP_PROTO(struct xe_hw_fence *fence), 332 TP_ARGS(fence) 333 ); 334 335 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal, 336 TP_PROTO(struct xe_hw_fence *fence), 337 TP_ARGS(fence) 338 ); 339 340 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal, 341 TP_PROTO(struct xe_hw_fence *fence), 342 TP_ARGS(fence) 343 ); 344 345 TRACE_EVENT(xe_reg_rw, 346 TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len), 347 348 TP_ARGS(mmio, write, reg, val, len), 349 350 TP_STRUCT__entry( 351 __string(dev, __dev_name_tile(mmio->tile)) 352 __field(u64, val) 353 __field(u32, reg) 354 __field(u16, write) 355 __field(u16, len) 356 ), 357 358 TP_fast_assign( 359 __assign_str(dev); 360 __entry->val = val; 361 __entry->reg = reg; 362 __entry->write = write; 363 __entry->len = len; 364 ), 365 366 TP_printk("dev=%s, %s reg=0x%x, len=%d, val=(0x%x, 0x%x)", 367 __get_str(dev), __entry->write ? "write" : "read", 368 __entry->reg, __entry->len, 369 (u32)(__entry->val & 0xffffffff), 370 (u32)(__entry->val >> 32)) 371 ); 372 373 DECLARE_EVENT_CLASS(xe_pm_runtime, 374 TP_PROTO(struct xe_device *xe, void *caller), 375 TP_ARGS(xe, caller), 376 377 TP_STRUCT__entry( 378 __string(dev, __dev_name_xe(xe)) 379 __field(void *, caller) 380 ), 381 382 TP_fast_assign( 383 __assign_str(dev); 384 __entry->caller = caller; 385 ), 386 387 TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller) 388 ); 389 390 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get, 391 TP_PROTO(struct xe_device *xe, void *caller), 392 TP_ARGS(xe, caller) 393 ); 394 395 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put, 396 TP_PROTO(struct xe_device *xe, void *caller), 397 TP_ARGS(xe, caller) 398 ); 399 400 DEFINE_EVENT(xe_pm_runtime, xe_pm_resume, 401 TP_PROTO(struct xe_device *xe, void *caller), 402 TP_ARGS(xe, caller) 403 ); 404 405 DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend, 406 TP_PROTO(struct xe_device *xe, void *caller), 407 TP_ARGS(xe, caller) 408 ); 409 410 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume, 411 TP_PROTO(struct xe_device *xe, void *caller), 412 TP_ARGS(xe, caller) 413 ); 414 415 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend, 416 TP_PROTO(struct xe_device *xe, void *caller), 417 TP_ARGS(xe, caller) 418 ); 419 420 DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl, 421 TP_PROTO(struct xe_device *xe, void *caller), 422 TP_ARGS(xe, caller) 423 ); 424 425 #endif 426 427 /* This part must be outside protection */ 428 #undef TRACE_INCLUDE_PATH 429 #undef TRACE_INCLUDE_FILE 430 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe 431 #define TRACE_INCLUDE_FILE xe_trace 432 #include <trace/define_trace.h> 433