1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright © 2024 Intel Corporation 4 */ 5 6 #undef TRACE_SYSTEM 7 #define TRACE_SYSTEM xe 8 9 #if !defined(_XE_TRACE_BO_H_) || defined(TRACE_HEADER_MULTI_READ) 10 #define _XE_TRACE_BO_H_ 11 12 #include <linux/tracepoint.h> 13 #include <linux/types.h> 14 15 #include "xe_bo.h" 16 #include "xe_bo_types.h" 17 #include "xe_vm.h" 18 19 #define __dev_name_bo(bo) dev_name(xe_bo_device(bo)->drm.dev) 20 #define __dev_name_vm(vm) dev_name((vm)->xe->drm.dev) 21 #define __dev_name_vma(vma) __dev_name_vm(xe_vma_vm(vma)) 22 23 DECLARE_EVENT_CLASS(xe_bo, 24 TP_PROTO(struct xe_bo *bo), 25 TP_ARGS(bo), 26 27 TP_STRUCT__entry( 28 __string(dev, __dev_name_bo(bo)) 29 __field(size_t, size) 30 __field(u32, flags) 31 __field(struct xe_vm *, vm) 32 ), 33 34 TP_fast_assign( 35 __assign_str(dev); 36 __entry->size = bo->size; 37 __entry->flags = bo->flags; 38 __entry->vm = bo->vm; 39 ), 40 41 TP_printk("dev=%s, size=%zu, flags=0x%02x, vm=%p", 42 __get_str(dev), __entry->size, 43 __entry->flags, __entry->vm) 44 ); 45 46 DEFINE_EVENT(xe_bo, xe_bo_cpu_fault, 47 TP_PROTO(struct xe_bo *bo), 48 TP_ARGS(bo) 49 ); 50 51 DEFINE_EVENT(xe_bo, xe_bo_validate, 52 TP_PROTO(struct xe_bo *bo), 53 TP_ARGS(bo) 54 ); 55 56 TRACE_EVENT(xe_bo_move, 57 TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement, 58 bool move_lacks_source), 59 TP_ARGS(bo, new_placement, old_placement, move_lacks_source), 60 TP_STRUCT__entry( 61 __field(struct xe_bo *, bo) 62 __field(size_t, size) 63 __string(new_placement_name, xe_mem_type_to_name[new_placement]) 64 __string(old_placement_name, xe_mem_type_to_name[old_placement]) 65 __string(device_id, __dev_name_bo(bo)) 66 __field(bool, move_lacks_source) 67 ), 68 69 TP_fast_assign( 70 __entry->bo = bo; 71 __entry->size = bo->size; 72 __assign_str(new_placement_name); 73 __assign_str(old_placement_name); 74 __assign_str(device_id); 75 __entry->move_lacks_source = move_lacks_source; 76 ), 77 TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s", 78 __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size, 79 __get_str(old_placement_name), 80 __get_str(new_placement_name), __get_str(device_id)) 81 ); 82 83 DECLARE_EVENT_CLASS(xe_vma, 84 TP_PROTO(struct xe_vma *vma), 85 TP_ARGS(vma), 86 87 TP_STRUCT__entry( 88 __string(dev, __dev_name_vma(vma)) 89 __field(struct xe_vma *, vma) 90 __field(u32, asid) 91 __field(u64, start) 92 __field(u64, end) 93 __field(u64, ptr) 94 ), 95 96 TP_fast_assign( 97 __assign_str(dev); 98 __entry->vma = vma; 99 __entry->asid = xe_vma_vm(vma)->usm.asid; 100 __entry->start = xe_vma_start(vma); 101 __entry->end = xe_vma_end(vma) - 1; 102 __entry->ptr = xe_vma_userptr(vma); 103 ), 104 105 TP_printk("dev=%s, vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,", 106 __get_str(dev), __entry->vma, __entry->asid, __entry->start, 107 __entry->end, __entry->ptr) 108 ) 109 110 DEFINE_EVENT(xe_vma, xe_vma_flush, 111 TP_PROTO(struct xe_vma *vma), 112 TP_ARGS(vma) 113 ); 114 115 DEFINE_EVENT(xe_vma, xe_vma_pagefault, 116 TP_PROTO(struct xe_vma *vma), 117 TP_ARGS(vma) 118 ); 119 120 DEFINE_EVENT(xe_vma, xe_vma_acc, 121 TP_PROTO(struct xe_vma *vma), 122 TP_ARGS(vma) 123 ); 124 125 DEFINE_EVENT(xe_vma, xe_vma_bind, 126 TP_PROTO(struct xe_vma *vma), 127 TP_ARGS(vma) 128 ); 129 130 DEFINE_EVENT(xe_vma, xe_vma_pf_bind, 131 TP_PROTO(struct xe_vma *vma), 132 TP_ARGS(vma) 133 ); 134 135 DEFINE_EVENT(xe_vma, xe_vma_unbind, 136 TP_PROTO(struct xe_vma *vma), 137 TP_ARGS(vma) 138 ); 139 140 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker, 141 TP_PROTO(struct xe_vma *vma), 142 TP_ARGS(vma) 143 ); 144 145 DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec, 146 TP_PROTO(struct xe_vma *vma), 147 TP_ARGS(vma) 148 ); 149 150 DEFINE_EVENT(xe_vma, xe_vma_rebind_worker, 151 TP_PROTO(struct xe_vma *vma), 152 TP_ARGS(vma) 153 ); 154 155 DEFINE_EVENT(xe_vma, xe_vma_rebind_exec, 156 TP_PROTO(struct xe_vma *vma), 157 TP_ARGS(vma) 158 ); 159 160 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate, 161 TP_PROTO(struct xe_vma *vma), 162 TP_ARGS(vma) 163 ); 164 165 DEFINE_EVENT(xe_vma, xe_vma_invalidate, 166 TP_PROTO(struct xe_vma *vma), 167 TP_ARGS(vma) 168 ); 169 170 DEFINE_EVENT(xe_vma, xe_vma_evict, 171 TP_PROTO(struct xe_vma *vma), 172 TP_ARGS(vma) 173 ); 174 175 DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete, 176 TP_PROTO(struct xe_vma *vma), 177 TP_ARGS(vma) 178 ); 179 180 DECLARE_EVENT_CLASS(xe_vm, 181 TP_PROTO(struct xe_vm *vm), 182 TP_ARGS(vm), 183 184 TP_STRUCT__entry( 185 __string(dev, __dev_name_vm(vm)) 186 __field(struct xe_vm *, vm) 187 __field(u32, asid) 188 ), 189 190 TP_fast_assign( 191 __assign_str(dev); 192 __entry->vm = vm; 193 __entry->asid = vm->usm.asid; 194 ), 195 196 TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev), 197 __entry->vm, __entry->asid) 198 ); 199 200 DEFINE_EVENT(xe_vm, xe_vm_kill, 201 TP_PROTO(struct xe_vm *vm), 202 TP_ARGS(vm) 203 ); 204 205 DEFINE_EVENT(xe_vm, xe_vm_create, 206 TP_PROTO(struct xe_vm *vm), 207 TP_ARGS(vm) 208 ); 209 210 DEFINE_EVENT(xe_vm, xe_vm_free, 211 TP_PROTO(struct xe_vm *vm), 212 TP_ARGS(vm) 213 ); 214 215 DEFINE_EVENT(xe_vm, xe_vm_cpu_bind, 216 TP_PROTO(struct xe_vm *vm), 217 TP_ARGS(vm) 218 ); 219 220 DEFINE_EVENT(xe_vm, xe_vm_restart, 221 TP_PROTO(struct xe_vm *vm), 222 TP_ARGS(vm) 223 ); 224 225 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter, 226 TP_PROTO(struct xe_vm *vm), 227 TP_ARGS(vm) 228 ); 229 230 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry, 231 TP_PROTO(struct xe_vm *vm), 232 TP_ARGS(vm) 233 ); 234 235 DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit, 236 TP_PROTO(struct xe_vm *vm), 237 TP_ARGS(vm) 238 ); 239 240 DEFINE_EVENT(xe_vm, xe_vm_ops_fail, 241 TP_PROTO(struct xe_vm *vm), 242 TP_ARGS(vm) 243 ); 244 245 #endif 246 247 /* This part must be outside protection */ 248 #undef TRACE_INCLUDE_PATH 249 #undef TRACE_INCLUDE_FILE 250 #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe 251 #define TRACE_INCLUDE_FILE xe_trace_bo 252 #include <trace/define_trace.h> 253