1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 2 #define _I915_TRACE_H_ 3 4 #include <linux/stringify.h> 5 #include <linux/types.h> 6 #include <linux/tracepoint.h> 7 8 #include <drm/drmP.h> 9 #include "i915_drv.h" 10 #include "intel_ringbuffer.h" 11 12 #undef TRACE_SYSTEM 13 #define TRACE_SYSTEM i915 14 #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) 15 #define TRACE_INCLUDE_FILE i915_trace 16 17 /* object tracking */ 18 19 TRACE_EVENT(i915_gem_object_create, 20 TP_PROTO(struct drm_i915_gem_object *obj), 21 TP_ARGS(obj), 22 23 TP_STRUCT__entry( 24 __field(struct drm_i915_gem_object *, obj) 25 __field(u32, size) 26 ), 27 28 TP_fast_assign( 29 __entry->obj = obj; 30 __entry->size = obj->base.size; 31 ), 32 33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 34 ); 35 36 TRACE_EVENT(i915_gem_object_bind, 37 TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), 38 TP_ARGS(obj, mappable), 39 40 TP_STRUCT__entry( 41 __field(struct drm_i915_gem_object *, obj) 42 __field(u32, offset) 43 __field(u32, size) 44 __field(bool, mappable) 45 ), 46 47 TP_fast_assign( 48 __entry->obj = obj; 49 __entry->offset = obj->gtt_space->start; 50 __entry->size = obj->gtt_space->size; 51 __entry->mappable = mappable; 52 ), 53 54 TP_printk("obj=%p, offset=%08x size=%x%s", 55 __entry->obj, __entry->offset, __entry->size, 56 __entry->mappable ? ", mappable" : "") 57 ); 58 59 TRACE_EVENT(i915_gem_object_unbind, 60 TP_PROTO(struct drm_i915_gem_object *obj), 61 TP_ARGS(obj), 62 63 TP_STRUCT__entry( 64 __field(struct drm_i915_gem_object *, obj) 65 __field(u32, offset) 66 __field(u32, size) 67 ), 68 69 TP_fast_assign( 70 __entry->obj = obj; 71 __entry->offset = obj->gtt_space->start; 72 __entry->size = obj->gtt_space->size; 73 ), 74 75 TP_printk("obj=%p, offset=%08x size=%x", 76 __entry->obj, __entry->offset, __entry->size) 77 ); 78 79 TRACE_EVENT(i915_gem_object_change_domain, 80 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), 81 TP_ARGS(obj, old_read, old_write), 82 83 TP_STRUCT__entry( 84 __field(struct drm_i915_gem_object *, obj) 85 __field(u32, read_domains) 86 __field(u32, write_domain) 87 ), 88 89 TP_fast_assign( 90 __entry->obj = obj; 91 __entry->read_domains = obj->base.read_domains | (old_read << 16); 92 __entry->write_domain = obj->base.write_domain | (old_write << 16); 93 ), 94 95 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", 96 __entry->obj, 97 __entry->read_domains >> 16, 98 __entry->read_domains & 0xffff, 99 __entry->write_domain >> 16, 100 __entry->write_domain & 0xffff) 101 ); 102 103 TRACE_EVENT(i915_gem_object_pwrite, 104 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), 105 TP_ARGS(obj, offset, len), 106 107 TP_STRUCT__entry( 108 __field(struct drm_i915_gem_object *, obj) 109 __field(u32, offset) 110 __field(u32, len) 111 ), 112 113 TP_fast_assign( 114 __entry->obj = obj; 115 __entry->offset = offset; 116 __entry->len = len; 117 ), 118 119 TP_printk("obj=%p, offset=%u, len=%u", 120 __entry->obj, __entry->offset, __entry->len) 121 ); 122 123 TRACE_EVENT(i915_gem_object_pread, 124 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), 125 TP_ARGS(obj, offset, len), 126 127 TP_STRUCT__entry( 128 __field(struct drm_i915_gem_object *, obj) 129 __field(u32, offset) 130 __field(u32, len) 131 ), 132 133 TP_fast_assign( 134 __entry->obj = obj; 135 __entry->offset = offset; 136 __entry->len = len; 137 ), 138 139 TP_printk("obj=%p, offset=%u, len=%u", 140 __entry->obj, __entry->offset, __entry->len) 141 ); 142 143 TRACE_EVENT(i915_gem_object_fault, 144 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), 145 TP_ARGS(obj, index, gtt, write), 146 147 TP_STRUCT__entry( 148 __field(struct drm_i915_gem_object *, obj) 149 __field(u32, index) 150 __field(bool, gtt) 151 __field(bool, write) 152 ), 153 154 TP_fast_assign( 155 __entry->obj = obj; 156 __entry->index = index; 157 __entry->gtt = gtt; 158 __entry->write = write; 159 ), 160 161 TP_printk("obj=%p, %s index=%u %s", 162 __entry->obj, 163 __entry->gtt ? "GTT" : "CPU", 164 __entry->index, 165 __entry->write ? ", writable" : "") 166 ); 167 168 DECLARE_EVENT_CLASS(i915_gem_object, 169 TP_PROTO(struct drm_i915_gem_object *obj), 170 TP_ARGS(obj), 171 172 TP_STRUCT__entry( 173 __field(struct drm_i915_gem_object *, obj) 174 ), 175 176 TP_fast_assign( 177 __entry->obj = obj; 178 ), 179 180 TP_printk("obj=%p", __entry->obj) 181 ); 182 183 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 184 TP_PROTO(struct drm_i915_gem_object *obj), 185 TP_ARGS(obj) 186 ); 187 188 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 189 TP_PROTO(struct drm_i915_gem_object *obj), 190 TP_ARGS(obj) 191 ); 192 193 TRACE_EVENT(i915_gem_evict, 194 TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), 195 TP_ARGS(dev, size, align, mappable), 196 197 TP_STRUCT__entry( 198 __field(u32, dev) 199 __field(u32, size) 200 __field(u32, align) 201 __field(bool, mappable) 202 ), 203 204 TP_fast_assign( 205 __entry->dev = dev->primary->index; 206 __entry->size = size; 207 __entry->align = align; 208 __entry->mappable = mappable; 209 ), 210 211 TP_printk("dev=%d, size=%d, align=%d %s", 212 __entry->dev, __entry->size, __entry->align, 213 __entry->mappable ? ", mappable" : "") 214 ); 215 216 TRACE_EVENT(i915_gem_evict_everything, 217 TP_PROTO(struct drm_device *dev, bool purgeable), 218 TP_ARGS(dev, purgeable), 219 220 TP_STRUCT__entry( 221 __field(u32, dev) 222 __field(bool, purgeable) 223 ), 224 225 TP_fast_assign( 226 __entry->dev = dev->primary->index; 227 __entry->purgeable = purgeable; 228 ), 229 230 TP_printk("dev=%d%s", 231 __entry->dev, 232 __entry->purgeable ? ", purgeable only" : "") 233 ); 234 235 TRACE_EVENT(i915_gem_ring_dispatch, 236 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 237 TP_ARGS(ring, seqno), 238 239 TP_STRUCT__entry( 240 __field(u32, dev) 241 __field(u32, ring) 242 __field(u32, seqno) 243 ), 244 245 TP_fast_assign( 246 __entry->dev = ring->dev->primary->index; 247 __entry->ring = ring->id; 248 __entry->seqno = seqno; 249 i915_trace_irq_get(ring, seqno); 250 ), 251 252 TP_printk("dev=%u, ring=%u, seqno=%u", 253 __entry->dev, __entry->ring, __entry->seqno) 254 ); 255 256 TRACE_EVENT(i915_gem_ring_flush, 257 TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), 258 TP_ARGS(ring, invalidate, flush), 259 260 TP_STRUCT__entry( 261 __field(u32, dev) 262 __field(u32, ring) 263 __field(u32, invalidate) 264 __field(u32, flush) 265 ), 266 267 TP_fast_assign( 268 __entry->dev = ring->dev->primary->index; 269 __entry->ring = ring->id; 270 __entry->invalidate = invalidate; 271 __entry->flush = flush; 272 ), 273 274 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", 275 __entry->dev, __entry->ring, 276 __entry->invalidate, __entry->flush) 277 ); 278 279 DECLARE_EVENT_CLASS(i915_gem_request, 280 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 281 TP_ARGS(ring, seqno), 282 283 TP_STRUCT__entry( 284 __field(u32, dev) 285 __field(u32, ring) 286 __field(u32, seqno) 287 ), 288 289 TP_fast_assign( 290 __entry->dev = ring->dev->primary->index; 291 __entry->ring = ring->id; 292 __entry->seqno = seqno; 293 ), 294 295 TP_printk("dev=%u, ring=%u, seqno=%u", 296 __entry->dev, __entry->ring, __entry->seqno) 297 ); 298 299 DEFINE_EVENT(i915_gem_request, i915_gem_request_add, 300 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 301 TP_ARGS(ring, seqno) 302 ); 303 304 DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 305 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 306 TP_ARGS(ring, seqno) 307 ); 308 309 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 310 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 311 TP_ARGS(ring, seqno) 312 ); 313 314 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, 315 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 316 TP_ARGS(ring, seqno) 317 ); 318 319 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 320 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 321 TP_ARGS(ring, seqno) 322 ); 323 324 DECLARE_EVENT_CLASS(i915_ring, 325 TP_PROTO(struct intel_ring_buffer *ring), 326 TP_ARGS(ring), 327 328 TP_STRUCT__entry( 329 __field(u32, dev) 330 __field(u32, ring) 331 ), 332 333 TP_fast_assign( 334 __entry->dev = ring->dev->primary->index; 335 __entry->ring = ring->id; 336 ), 337 338 TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) 339 ); 340 341 DEFINE_EVENT(i915_ring, i915_ring_wait_begin, 342 TP_PROTO(struct intel_ring_buffer *ring), 343 TP_ARGS(ring) 344 ); 345 346 DEFINE_EVENT(i915_ring, i915_ring_wait_end, 347 TP_PROTO(struct intel_ring_buffer *ring), 348 TP_ARGS(ring) 349 ); 350 351 TRACE_EVENT(i915_flip_request, 352 TP_PROTO(int plane, struct drm_i915_gem_object *obj), 353 354 TP_ARGS(plane, obj), 355 356 TP_STRUCT__entry( 357 __field(int, plane) 358 __field(struct drm_i915_gem_object *, obj) 359 ), 360 361 TP_fast_assign( 362 __entry->plane = plane; 363 __entry->obj = obj; 364 ), 365 366 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 367 ); 368 369 TRACE_EVENT(i915_flip_complete, 370 TP_PROTO(int plane, struct drm_i915_gem_object *obj), 371 372 TP_ARGS(plane, obj), 373 374 TP_STRUCT__entry( 375 __field(int, plane) 376 __field(struct drm_i915_gem_object *, obj) 377 ), 378 379 TP_fast_assign( 380 __entry->plane = plane; 381 __entry->obj = obj; 382 ), 383 384 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 385 ); 386 387 TRACE_EVENT(i915_reg_rw, 388 TP_PROTO(bool write, u32 reg, u64 val, int len), 389 390 TP_ARGS(write, reg, val, len), 391 392 TP_STRUCT__entry( 393 __field(u64, val) 394 __field(u32, reg) 395 __field(u16, write) 396 __field(u16, len) 397 ), 398 399 TP_fast_assign( 400 __entry->val = (u64)val; 401 __entry->reg = reg; 402 __entry->write = write; 403 __entry->len = len; 404 ), 405 406 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", 407 __entry->write ? "write" : "read", 408 __entry->reg, __entry->len, 409 (u32)(__entry->val & 0xffffffff), 410 (u32)(__entry->val >> 32)) 411 ); 412 413 #endif /* _I915_TRACE_H_ */ 414 415 /* This part must be outside protection */ 416 #undef TRACE_INCLUDE_PATH 417 #define TRACE_INCLUDE_PATH . 418 #include <trace/define_trace.h> 419