1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #undef TRACE_SYSTEM 7 #define TRACE_SYSTEM xe 8 9 #if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 10 #define _XE_TRACE_H_ 11 12 #include <linux/tracepoint.h> 13 #include <linux/types.h> 14 15 #include "xe_bo_types.h" 16 #include "xe_engine_types.h" 17 #include "xe_gpu_scheduler_types.h" 18 #include "xe_gt_tlb_invalidation_types.h" 19 #include "xe_gt_types.h" 20 #include "xe_guc_engine_types.h" 21 #include "xe_sched_job.h" 22 #include "xe_vm_types.h" 23 24 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence, 25 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 26 TP_ARGS(fence), 27 28 TP_STRUCT__entry( 29 __field(u64, fence) 30 __field(int, seqno) 31 ), 32 33 TP_fast_assign( 34 __entry->fence = (u64)fence; 35 __entry->seqno = fence->seqno; 36 ), 37 38 TP_printk("fence=0x%016llx, seqno=%d", 39 __entry->fence, __entry->seqno) 40 ); 41 42 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create, 43 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 44 TP_ARGS(fence) 45 ); 46 47 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, 48 xe_gt_tlb_invalidation_fence_work_func, 49 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 50 TP_ARGS(fence) 51 ); 52 53 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb, 54 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 55 TP_ARGS(fence) 56 ); 57 58 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send, 59 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 60 TP_ARGS(fence) 61 ); 62 63 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv, 64 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 65 TP_ARGS(fence) 66 ); 67 68 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal, 69 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 70 TP_ARGS(fence) 71 ); 72 73 DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout, 74 TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence), 75 TP_ARGS(fence) 76 ); 77 78 DECLARE_EVENT_CLASS(xe_bo, 79 TP_PROTO(struct xe_bo *bo), 80 TP_ARGS(bo), 81 82 TP_STRUCT__entry( 83 __field(size_t, size) 84 __field(u32, flags) 85 __field(u64, vm) 86 ), 87 88 TP_fast_assign( 89 __entry->size = bo->size; 90 __entry->flags = bo->flags; 91 __entry->vm = (unsigned long)bo->vm; 92 ), 93 94 TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx", 95 __entry->size, __entry->flags, __entry->vm) 96 ); 97 98 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault, 99 TP_PROTO(struct xe_bo *bo), 100 TP_ARGS(bo) 101 ); 102 103 DEFINE_EVENT(xe_bo, xe_bo_move, 104 TP_PROTO(struct xe_bo *bo), 105 TP_ARGS(bo) 106 ); 107 108 DECLARE_EVENT_CLASS(xe_engine, 109 TP_PROTO(struct xe_engine *e), 110 TP_ARGS(e), 111 112 TP_STRUCT__entry( 113 __field(enum xe_engine_class, class) 114 __field(u32, logical_mask) 115 __field(u8, gt_id) 116 __field(u16, width) 117 __field(u16, guc_id) 118 __field(u32, guc_state) 119 __field(u32, flags) 120 ), 121 122 TP_fast_assign( 123 __entry->class = e->class; 124 __entry->logical_mask = e->logical_mask; 125 __entry->gt_id = e->gt->info.id; 126 __entry->width = e->width; 127 __entry->guc_id = e->guc->id; 128 __entry->guc_state = atomic_read(&e->guc->state); 129 __entry->flags = e->flags; 130 ), 131 132 TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x", 133 __entry->class, __entry->logical_mask, 134 __entry->gt_id, __entry->width, __entry->guc_id, 135 __entry->guc_state, __entry->flags) 136 ); 137 138 DEFINE_EVENT(xe_engine, xe_engine_create, 139 TP_PROTO(struct xe_engine *e), 140 TP_ARGS(e) 141 ); 142 143 DEFINE_EVENT(xe_engine, xe_engine_supress_resume, 144 TP_PROTO(struct xe_engine *e), 145 TP_ARGS(e) 146 ); 147 148 DEFINE_EVENT(xe_engine, xe_engine_submit, 149 TP_PROTO(struct xe_engine *e), 150 TP_ARGS(e) 151 ); 152 153 DEFINE_EVENT(xe_engine, xe_engine_scheduling_enable, 154 TP_PROTO(struct xe_engine *e), 155 TP_ARGS(e) 156 ); 157 158 DEFINE_EVENT(xe_engine, xe_engine_scheduling_disable, 159 TP_PROTO(struct xe_engine *e), 160 TP_ARGS(e) 161 ); 162 163 DEFINE_EVENT(xe_engine, xe_engine_scheduling_done, 164 TP_PROTO(struct xe_engine *e), 165 TP_ARGS(e) 166 ); 167 168 DEFINE_EVENT(xe_engine, xe_engine_register, 169 TP_PROTO(struct xe_engine *e), 170 TP_ARGS(e) 171 ); 172 173 DEFINE_EVENT(xe_engine, xe_engine_deregister, 174 TP_PROTO(struct xe_engine *e), 175 TP_ARGS(e) 176 ); 177 178 DEFINE_EVENT(xe_engine, xe_engine_deregister_done, 179 TP_PROTO(struct xe_engine *e), 180 TP_ARGS(e) 181 ); 182 183 DEFINE_EVENT(xe_engine, xe_engine_close, 184 TP_PROTO(struct xe_engine *e), 185 TP_ARGS(e) 186 ); 187 188 DEFINE_EVENT(xe_engine, xe_engine_kill, 189 TP_PROTO(struct xe_engine *e), 190 TP_ARGS(e) 191 ); 192 193 DEFINE_EVENT(xe_engine, xe_engine_cleanup_entity, 194 TP_PROTO(struct xe_engine *e), 195 TP_ARGS(e) 196 ); 197 198 DEFINE_EVENT(xe_engine, xe_engine_destroy, 199 TP_PROTO(struct xe_engine *e), 200 TP_ARGS(e) 201 ); 202 203 DEFINE_EVENT(xe_engine, xe_engine_reset, 204 TP_PROTO(struct xe_engine *e), 205 TP_ARGS(e) 206 ); 207 208 DEFINE_EVENT(xe_engine, xe_engine_memory_cat_error, 209 TP_PROTO(struct xe_engine *e), 210 TP_ARGS(e) 211 ); 212 213 DEFINE_EVENT(xe_engine, xe_engine_stop, 214 TP_PROTO(struct xe_engine *e), 215 TP_ARGS(e) 216 ); 217 218 DEFINE_EVENT(xe_engine, xe_engine_resubmit, 219 TP_PROTO(struct xe_engine *e), 220 TP_ARGS(e) 221 ); 222 223 DECLARE_EVENT_CLASS(xe_sched_job, 224 TP_PROTO(struct xe_sched_job *job), 225 TP_ARGS(job), 226 227 TP_STRUCT__entry( 228 __field(u32, seqno) 229 __field(u16, guc_id) 230 __field(u32, guc_state) 231 __field(u32, flags) 232 __field(int, error) 233 __field(u64, fence) 234 __field(u64, batch_addr) 235 ), 236 237 TP_fast_assign( 238 __entry->seqno = xe_sched_job_seqno(job); 239 __entry->guc_id = job->engine->guc->id; 240 __entry->guc_state = 241 atomic_read(&job->engine->guc->state); 242 __entry->flags = job->engine->flags; 243 __entry->error = job->fence->error; 244 __entry->fence = (unsigned long)job->fence; 245 __entry->batch_addr = (u64)job->batch_addr[0]; 246 ), 247 248 TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d", 249 __entry->fence, __entry->seqno, __entry->guc_id, 250 __entry->batch_addr, __entry->guc_state, 251 __entry->flags, __entry->error) 252 ); 253 254 DEFINE_EVENT(xe_sched_job, xe_sched_job_create, 255 TP_PROTO(struct xe_sched_job *job), 256 TP_ARGS(job) 257 ); 258 259 DEFINE_EVENT(xe_sched_job, xe_sched_job_exec, 260 TP_PROTO(struct xe_sched_job *job), 261 TP_ARGS(job) 262 ); 263 264 DEFINE_EVENT(xe_sched_job, xe_sched_job_run, 265 TP_PROTO(struct xe_sched_job *job), 266 TP_ARGS(job) 267 ); 268 269 DEFINE_EVENT(xe_sched_job, xe_sched_job_free, 270 TP_PROTO(struct xe_sched_job *job), 271 TP_ARGS(job) 272 ); 273 274 DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout, 275 TP_PROTO(struct xe_sched_job *job), 276 TP_ARGS(job) 277 ); 278 279 DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error, 280 TP_PROTO(struct xe_sched_job *job), 281 TP_ARGS(job) 282 ); 283 284 DEFINE_EVENT(xe_sched_job, xe_sched_job_ban, 285 TP_PROTO(struct xe_sched_job *job), 286 TP_ARGS(job) 287 ); 288 289 DECLARE_EVENT_CLASS(xe_sched_msg, 290 TP_PROTO(struct xe_sched_msg *msg), 291 TP_ARGS(msg), 292 293 TP_STRUCT__entry( 294 __field(u32, opcode) 295 __field(u16, guc_id) 296 ), 297 298 TP_fast_assign( 299 __entry->opcode = msg->opcode; 300 __entry->guc_id = 301 ((struct xe_engine *)msg->private_data)->guc->id; 302 ), 303 304 TP_printk("guc_id=%d, opcode=%u", __entry->guc_id, 305 __entry->opcode) 306 ); 307 308 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add, 309 TP_PROTO(struct xe_sched_msg *msg), 310 TP_ARGS(msg) 311 ); 312 313 DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv, 314 TP_PROTO(struct xe_sched_msg *msg), 315 TP_ARGS(msg) 316 ); 317 318 DECLARE_EVENT_CLASS(xe_hw_fence, 319 TP_PROTO(struct xe_hw_fence *fence), 320 TP_ARGS(fence), 321 322 TP_STRUCT__entry( 323 __field(u64, ctx) 324 __field(u32, seqno) 325 __field(u64, fence) 326 ), 327 328 TP_fast_assign( 329 __entry->ctx = fence->dma.context; 330 __entry->seqno = fence->dma.seqno; 331 __entry->fence = (unsigned long)fence; 332 ), 333 334 TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u", 335 __entry->ctx, __entry->fence, __entry->seqno) 336 ); 337 338 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create, 339 TP_PROTO(struct xe_hw_fence *fence), 340 TP_ARGS(fence) 341 ); 342 343 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal, 344 TP_PROTO(struct xe_hw_fence *fence), 345 TP_ARGS(fence) 346 ); 347 348 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal, 349 TP_PROTO(struct xe_hw_fence *fence), 350 TP_ARGS(fence) 351 ); 352 353 DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free, 354 TP_PROTO(struct xe_hw_fence *fence), 355 TP_ARGS(fence) 356 ); 357 358 DECLARE_EVENT_CLASS(xe_vma, 359 TP_PROTO(struct xe_vma *vma), 360 TP_ARGS(vma), 361 362 TP_STRUCT__entry( 363 __field(u64, vma) 364 __field(u32, asid) 365 __field(u64, start) 366 __field(u64, end) 367 __field(u64, ptr) 368 ), 369 370 TP_fast_assign( 371 __entry->vma = (unsigned long)vma; 372 __entry->asid = vma->vm->usm.asid; 373 __entry->start = vma->start; 374 __entry->end = vma->end; 375 __entry->ptr = (u64)vma->userptr.ptr; 376 ), 377 378 TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,", 379 __entry->vma, __entry->asid, __entry->start, 380 __entry->end, __entry->ptr) 381 ) 382 383 DEFINE_EVENT(xe_vma, xe_vma_flush, 384 TP_PROTO(struct xe_vma *vma), 385 TP_ARGS(vma) 386 ); 387 388 DEFINE_EVENT(xe_vma, xe_vma_pagefault, 389 TP_PROTO(struct xe_vma *vma), 390 TP_ARGS(vma) 391 ); 392 393 DEFINE_EVENT(xe_vma, xe_vma_acc, 394 TP_PROTO(struct xe_vma *vma), 395 TP_ARGS(vma) 396 ); 397 398 DEFINE_EVENT(xe_vma, xe_vma_fail, 399 TP_PROTO(struct xe_vma *vma), 400 TP_ARGS(vma) 401 ); 402 403 DEFINE_EVENT(xe_vma, xe_vma_bind, 404 TP_PROTO(struct xe_vma *vma), 405 TP_ARGS(vma) 406 ); 407 408 DEFINE_EVENT(xe_vma, xe_vma_pf_bind, 409 TP_PROTO(struct xe_vma *vma), 410 TP_ARGS(vma) 411 ); 412 413 DEFINE_EVENT(xe_vma, xe_vma_unbind, 414 TP_PROTO(struct xe_vma *vma), 415 TP_ARGS(vma) 416 ); 417 418 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker, 419 TP_PROTO(struct xe_vma *vma), 420 TP_ARGS(vma) 421 ); 422 423 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec, 424 TP_PROTO(struct xe_vma *vma), 425 TP_ARGS(vma) 426 ); 427 428 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker, 429 TP_PROTO(struct xe_vma *vma), 430 TP_ARGS(vma) 431 ); 432 433 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec, 434 TP_PROTO(struct xe_vma *vma), 435 TP_ARGS(vma) 436 ); 437 438 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate, 439 TP_PROTO(struct xe_vma *vma), 440 TP_ARGS(vma) 441 ); 442 443 DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate, 444 TP_PROTO(struct xe_vma *vma), 445 TP_ARGS(vma) 446 ); 447 448 DEFINE_EVENT(xe_vma, xe_vma_evict, 449 TP_PROTO(struct xe_vma *vma), 450 TP_ARGS(vma) 451 ); 452 453 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete, 454 TP_PROTO(struct xe_vma *vma), 455 TP_ARGS(vma) 456 ); 457 458 DECLARE_EVENT_CLASS(xe_vm, 459 TP_PROTO(struct xe_vm *vm), 460 TP_ARGS(vm), 461 462 TP_STRUCT__entry( 463 __field(u64, vm) 464 __field(u32, asid) 465 ), 466 467 TP_fast_assign( 468 __entry->vm = (unsigned long)vm; 469 __entry->asid = vm->usm.asid; 470 ), 471 472 TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm, 473 __entry->asid) 474 ); 475 476 DEFINE_EVENT(xe_vm, xe_vm_create, 477 TP_PROTO(struct xe_vm *vm), 478 TP_ARGS(vm) 479 ); 480 481 DEFINE_EVENT(xe_vm, xe_vm_free, 482 TP_PROTO(struct xe_vm *vm), 483 TP_ARGS(vm) 484 ); 485 486 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind, 487 TP_PROTO(struct xe_vm *vm), 488 TP_ARGS(vm) 489 ); 490 491 DEFINE_EVENT(xe_vm, xe_vm_restart, 492 TP_PROTO(struct xe_vm *vm), 493 TP_ARGS(vm) 494 ); 495 496 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter, 497 TP_PROTO(struct xe_vm *vm), 498 TP_ARGS(vm) 499 ); 500 501 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry, 502 TP_PROTO(struct xe_vm *vm), 503 TP_ARGS(vm) 504 ); 505 506 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit, 507 TP_PROTO(struct xe_vm *vm), 508 TP_ARGS(vm) 509 ); 510 511 TRACE_EVENT(xe_guc_ct_h2g_flow_control, 512 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len), 513 TP_ARGS(_head, _tail, size, space, len), 514 515 TP_STRUCT__entry( 516 __field(u32, _head) 517 __field(u32, _tail) 518 __field(u32, size) 519 __field(u32, space) 520 __field(u32, len) 521 ), 522 523 TP_fast_assign( 524 __entry->_head = _head; 525 __entry->_tail = _tail; 526 __entry->size = size; 527 __entry->space = space; 528 __entry->len = len; 529 ), 530 531 TP_printk("head=%u, tail=%u, size=%u, space=%u, len=%u", 532 __entry->_head, __entry->_tail, __entry->size, 533 __entry->space, __entry->len) 534 ); 535 536 TRACE_EVENT(xe_guc_ct_g2h_flow_control, 537 TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len), 538 TP_ARGS(_head, _tail, size, space, len), 539 540 TP_STRUCT__entry( 541 __field(u32, _head) 542 __field(u32, _tail) 543 __field(u32, size) 544 __field(u32, space) 545 __field(u32, len) 546 ), 547 548 TP_fast_assign( 549 __entry->_head = _head; 550 __entry->_tail = _tail; 551 __entry->size = size; 552 __entry->space = space; 553 __entry->len = len; 554 ), 555 556 TP_printk("head=%u, tail=%u, size=%u, space=%u, len=%u", 557 __entry->_head, __entry->_tail, __entry->size, 558 __entry->space, __entry->len) 559 ); 560 561 #endif 562 563 /* This part must be outside protection */ 564 #undef TRACE_INCLUDE_PATH 565 #undef TRACE_INCLUDE_FILE 566 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe 567 #define TRACE_INCLUDE_FILE xe_trace 568 #include <trace/define_trace.h> 569