1 #if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 2 #define _AMDGPU_TRACE_H_ 3 4 #include <linux/stringify.h> 5 #include <linux/types.h> 6 #include <linux/tracepoint.h> 7 8 #include <drm/drmP.h> 9 10 #undef TRACE_SYSTEM 11 #define TRACE_SYSTEM amdgpu 12 #define TRACE_INCLUDE_FILE amdgpu_trace 13 14 TRACE_EVENT(amdgpu_bo_create, 15 TP_PROTO(struct amdgpu_bo *bo), 16 TP_ARGS(bo), 17 TP_STRUCT__entry( 18 __field(struct amdgpu_bo *, bo) 19 __field(u32, pages) 20 ), 21 22 TP_fast_assign( 23 __entry->bo = bo; 24 __entry->pages = bo->tbo.num_pages; 25 ), 26 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 27 ); 28 29 TRACE_EVENT(amdgpu_cs, 30 TP_PROTO(struct amdgpu_cs_parser *p, int i), 31 TP_ARGS(p, i), 32 TP_STRUCT__entry( 33 __field(struct amdgpu_bo_list *, bo_list) 34 __field(u32, ring) 35 __field(u32, dw) 36 __field(u32, fences) 37 ), 38 39 TP_fast_assign( 40 __entry->bo_list = p->bo_list; 41 __entry->ring = p->job->ring->idx; 42 __entry->dw = p->job->ibs[i].length_dw; 43 __entry->fences = amdgpu_fence_count_emitted( 44 p->job->ring); 45 ), 46 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", 47 __entry->bo_list, __entry->ring, __entry->dw, 48 __entry->fences) 49 ); 50 51 TRACE_EVENT(amdgpu_cs_ioctl, 52 TP_PROTO(struct amdgpu_job *job), 53 TP_ARGS(job), 54 TP_STRUCT__entry( 55 __field(struct amdgpu_device *, adev) 56 __field(struct amd_sched_job *, sched_job) 57 __field(struct amdgpu_ib *, ib) 58 __field(struct fence *, fence) 59 __field(char *, ring_name) 60 __field(u32, num_ibs) 61 ), 62 63 TP_fast_assign( 64 __entry->adev = job->adev; 65 __entry->sched_job = &job->base; 66 __entry->ib = job->ibs; 67 __entry->fence = &job->base.s_fence->base; 68 __entry->ring_name = job->ring->name; 69 __entry->num_ibs = job->num_ibs; 70 ), 71 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", 72 __entry->adev, __entry->sched_job, __entry->ib, 73 __entry->fence, __entry->ring_name, __entry->num_ibs) 74 ); 75 76 TRACE_EVENT(amdgpu_sched_run_job, 77 TP_PROTO(struct amdgpu_job *job), 78 TP_ARGS(job), 79 TP_STRUCT__entry( 80 __field(struct amdgpu_device *, adev) 81 __field(struct amd_sched_job *, sched_job) 82 __field(struct amdgpu_ib *, ib) 83 __field(struct fence *, fence) 84 __field(char *, ring_name) 85 __field(u32, num_ibs) 86 ), 87 88 TP_fast_assign( 89 __entry->adev = job->adev; 90 __entry->sched_job = &job->base; 91 __entry->ib = job->ibs; 92 __entry->fence = &job->base.s_fence->base; 93 __entry->ring_name = job->ring->name; 94 __entry->num_ibs = job->num_ibs; 95 ), 96 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", 97 __entry->adev, __entry->sched_job, __entry->ib, 98 __entry->fence, __entry->ring_name, __entry->num_ibs) 99 ); 100 101 102 TRACE_EVENT(amdgpu_vm_grab_id, 103 TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, 104 uint64_t pd_addr), 105 TP_ARGS(vm, ring, vmid, pd_addr), 106 TP_STRUCT__entry( 107 __field(struct amdgpu_vm *, vm) 108 __field(u32, ring) 109 __field(u32, vmid) 110 __field(u64, pd_addr) 111 ), 112 113 TP_fast_assign( 114 __entry->vm = vm; 115 __entry->ring = ring; 116 __entry->vmid = vmid; 117 __entry->pd_addr = pd_addr; 118 ), 119 TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, 120 __entry->ring, __entry->vmid, __entry->pd_addr) 121 ); 122 123 TRACE_EVENT(amdgpu_vm_bo_map, 124 TP_PROTO(struct amdgpu_bo_va *bo_va, 125 struct amdgpu_bo_va_mapping *mapping), 126 TP_ARGS(bo_va, mapping), 127 TP_STRUCT__entry( 128 __field(struct amdgpu_bo *, bo) 129 __field(long, start) 130 __field(long, last) 131 __field(u64, offset) 132 __field(u32, flags) 133 ), 134 135 TP_fast_assign( 136 __entry->bo = bo_va->bo; 137 __entry->start = mapping->it.start; 138 __entry->last = mapping->it.last; 139 __entry->offset = mapping->offset; 140 __entry->flags = mapping->flags; 141 ), 142 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", 143 __entry->bo, __entry->start, __entry->last, 144 __entry->offset, __entry->flags) 145 ); 146 147 TRACE_EVENT(amdgpu_vm_bo_unmap, 148 TP_PROTO(struct amdgpu_bo_va *bo_va, 149 struct amdgpu_bo_va_mapping *mapping), 150 TP_ARGS(bo_va, mapping), 151 TP_STRUCT__entry( 152 __field(struct amdgpu_bo *, bo) 153 __field(long, start) 154 __field(long, last) 155 __field(u64, offset) 156 __field(u32, flags) 157 ), 158 159 TP_fast_assign( 160 __entry->bo = bo_va->bo; 161 __entry->start = mapping->it.start; 162 __entry->last = mapping->it.last; 163 __entry->offset = mapping->offset; 164 __entry->flags = mapping->flags; 165 ), 166 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", 167 __entry->bo, __entry->start, __entry->last, 168 __entry->offset, __entry->flags) 169 ); 170 171 DECLARE_EVENT_CLASS(amdgpu_vm_mapping, 172 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 173 TP_ARGS(mapping), 174 TP_STRUCT__entry( 175 __field(u64, soffset) 176 __field(u64, eoffset) 177 __field(u32, flags) 178 ), 179 180 TP_fast_assign( 181 __entry->soffset = mapping->it.start; 182 __entry->eoffset = mapping->it.last + 1; 183 __entry->flags = mapping->flags; 184 ), 185 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", 186 __entry->soffset, __entry->eoffset, __entry->flags) 187 ); 188 189 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update, 190 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 191 TP_ARGS(mapping) 192 ); 193 194 DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, 195 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 196 TP_ARGS(mapping) 197 ); 198 199 TRACE_EVENT(amdgpu_vm_set_page, 200 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 201 uint32_t incr, uint32_t flags), 202 TP_ARGS(pe, addr, count, incr, flags), 203 TP_STRUCT__entry( 204 __field(u64, pe) 205 __field(u64, addr) 206 __field(u32, count) 207 __field(u32, incr) 208 __field(u32, flags) 209 ), 210 211 TP_fast_assign( 212 __entry->pe = pe; 213 __entry->addr = addr; 214 __entry->count = count; 215 __entry->incr = incr; 216 __entry->flags = flags; 217 ), 218 TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", 219 __entry->pe, __entry->addr, __entry->incr, 220 __entry->flags, __entry->count) 221 ); 222 223 TRACE_EVENT(amdgpu_vm_flush, 224 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), 225 TP_ARGS(pd_addr, ring, id), 226 TP_STRUCT__entry( 227 __field(u64, pd_addr) 228 __field(u32, ring) 229 __field(u32, id) 230 ), 231 232 TP_fast_assign( 233 __entry->pd_addr = pd_addr; 234 __entry->ring = ring; 235 __entry->id = id; 236 ), 237 TP_printk("ring=%u, id=%u, pd_addr=%010Lx", 238 __entry->ring, __entry->id, __entry->pd_addr) 239 ); 240 241 TRACE_EVENT(amdgpu_bo_list_set, 242 TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), 243 TP_ARGS(list, bo), 244 TP_STRUCT__entry( 245 __field(struct amdgpu_bo_list *, list) 246 __field(struct amdgpu_bo *, bo) 247 ), 248 249 TP_fast_assign( 250 __entry->list = list; 251 __entry->bo = bo; 252 ), 253 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 254 ); 255 256 #endif 257 258 /* This part must be outside protection */ 259 #undef TRACE_INCLUDE_PATH 260 #define TRACE_INCLUDE_PATH . 261 #include <trace/define_trace.h> 262