xref: /linux/drivers/gpu/drm/i915/i915_trace.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2 #define _I915_TRACE_H_
3 
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
7 
8 #include <drm/drmP.h>
9 #include "i915_drv.h"
10 #include "intel_drv.h"
11 #include "intel_ringbuffer.h"
12 
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM i915
15 #define TRACE_INCLUDE_FILE i915_trace
16 
17 /* watermark/fifo updates */
18 
19 TRACE_EVENT(intel_cpu_fifo_underrun,
20 	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
21 	    TP_ARGS(dev_priv, pipe),
22 
23 	    TP_STRUCT__entry(
24 			     __field(enum pipe, pipe)
25 			     __field(u32, frame)
26 			     __field(u32, scanline)
27 			     ),
28 
29 	    TP_fast_assign(
30 			   __entry->pipe = pipe;
31 			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
32 			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
33 			   ),
34 
35 	    TP_printk("pipe %c, frame=%u, scanline=%u",
36 		      pipe_name(__entry->pipe),
37 		      __entry->frame, __entry->scanline)
38 );
39 
40 TRACE_EVENT(intel_pch_fifo_underrun,
41 	    TP_PROTO(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder),
42 	    TP_ARGS(dev_priv, pch_transcoder),
43 
44 	    TP_STRUCT__entry(
45 			     __field(enum pipe, pipe)
46 			     __field(u32, frame)
47 			     __field(u32, scanline)
48 			     ),
49 
50 	    TP_fast_assign(
51 			   enum pipe pipe = (enum pipe)pch_transcoder;
52 			   __entry->pipe = pipe;
53 			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
54 			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
55 			   ),
56 
57 	    TP_printk("pch transcoder %c, frame=%u, scanline=%u",
58 		      pipe_name(__entry->pipe),
59 		      __entry->frame, __entry->scanline)
60 );
61 
62 TRACE_EVENT(intel_memory_cxsr,
63 	    TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
64 	    TP_ARGS(dev_priv, old, new),
65 
66 	    TP_STRUCT__entry(
67 			     __array(u32, frame, 3)
68 			     __array(u32, scanline, 3)
69 			     __field(bool, old)
70 			     __field(bool, new)
71 			     ),
72 
73 	    TP_fast_assign(
74 			   enum pipe pipe;
75 			   for_each_pipe(dev_priv, pipe) {
76 				   __entry->frame[pipe] =
77 					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
78 				   __entry->scanline[pipe] =
79 					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
80 			   }
81 			   __entry->old = old;
82 			   __entry->new = new;
83 			   ),
84 
85 	    TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
86 		      onoff(__entry->old), onoff(__entry->new),
87 		      __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
88 		      __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
89 		      __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
90 );
91 
92 TRACE_EVENT(vlv_wm,
93 	    TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
94 	    TP_ARGS(crtc, wm),
95 
96 	    TP_STRUCT__entry(
97 			     __field(enum pipe, pipe)
98 			     __field(u32, frame)
99 			     __field(u32, scanline)
100 			     __field(u32, level)
101 			     __field(u32, cxsr)
102 			     __field(u32, primary)
103 			     __field(u32, sprite0)
104 			     __field(u32, sprite1)
105 			     __field(u32, cursor)
106 			     __field(u32, sr_plane)
107 			     __field(u32, sr_cursor)
108 			     ),
109 
110 	    TP_fast_assign(
111 			   __entry->pipe = crtc->pipe;
112 			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
113 										       crtc->pipe);
114 			   __entry->scanline = intel_get_crtc_scanline(crtc);
115 			   __entry->level = wm->level;
116 			   __entry->cxsr = wm->cxsr;
117 			   __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
118 			   __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
119 			   __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1];
120 			   __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
121 			   __entry->sr_plane = wm->sr.plane;
122 			   __entry->sr_cursor = wm->sr.cursor;
123 			   ),
124 
125 	    TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
126 		      pipe_name(__entry->pipe), __entry->frame,
127 		      __entry->scanline, __entry->level, __entry->cxsr,
128 		      __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
129 		      __entry->sr_plane, __entry->sr_cursor)
130 );
131 
132 TRACE_EVENT(vlv_fifo_size,
133 	    TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size),
134 	    TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size),
135 
136 	    TP_STRUCT__entry(
137 			     __field(enum pipe, pipe)
138 			     __field(u32, frame)
139 			     __field(u32, scanline)
140 			     __field(u32, sprite0_start)
141 			     __field(u32, sprite1_start)
142 			     __field(u32, fifo_size)
143 			     ),
144 
145 	    TP_fast_assign(
146 			   __entry->pipe = crtc->pipe;
147 			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
148 										       crtc->pipe);
149 			   __entry->scanline = intel_get_crtc_scanline(crtc);
150 			   __entry->sprite0_start = sprite0_start;
151 			   __entry->sprite1_start = sprite1_start;
152 			   __entry->fifo_size = fifo_size;
153 			   ),
154 
155 	    TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d",
156 		      pipe_name(__entry->pipe), __entry->frame,
157 		      __entry->scanline, __entry->sprite0_start,
158 		      __entry->sprite1_start, __entry->fifo_size)
159 );
160 
161 /* plane updates */
162 
163 TRACE_EVENT(intel_update_plane,
164 	    TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
165 	    TP_ARGS(plane, crtc),
166 
167 	    TP_STRUCT__entry(
168 			     __field(enum pipe, pipe)
169 			     __field(const char *, name)
170 			     __field(u32, frame)
171 			     __field(u32, scanline)
172 			     __array(int, src, 4)
173 			     __array(int, dst, 4)
174 			     ),
175 
176 	    TP_fast_assign(
177 			   __entry->pipe = crtc->pipe;
178 			   __entry->name = plane->name;
179 			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
180 										       crtc->pipe);
181 			   __entry->scanline = intel_get_crtc_scanline(crtc);
182 			   memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
183 			   memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
184 			   ),
185 
186 	    TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
187 		      pipe_name(__entry->pipe), __entry->name,
188 		      __entry->frame, __entry->scanline,
189 		      DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
190 		      DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
191 );
192 
193 TRACE_EVENT(intel_disable_plane,
194 	    TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
195 	    TP_ARGS(plane, crtc),
196 
197 	    TP_STRUCT__entry(
198 			     __field(enum pipe, pipe)
199 			     __field(const char *, name)
200 			     __field(u32, frame)
201 			     __field(u32, scanline)
202 			     ),
203 
204 	    TP_fast_assign(
205 			   __entry->pipe = crtc->pipe;
206 			   __entry->name = plane->name;
207 			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
208 										       crtc->pipe);
209 			   __entry->scanline = intel_get_crtc_scanline(crtc);
210 			   ),
211 
212 	    TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
213 		      pipe_name(__entry->pipe), __entry->name,
214 		      __entry->frame, __entry->scanline)
215 );
216 
217 /* pipe updates */
218 
219 TRACE_EVENT(i915_pipe_update_start,
220 	    TP_PROTO(struct intel_crtc *crtc),
221 	    TP_ARGS(crtc),
222 
223 	    TP_STRUCT__entry(
224 			     __field(enum pipe, pipe)
225 			     __field(u32, frame)
226 			     __field(u32, scanline)
227 			     __field(u32, min)
228 			     __field(u32, max)
229 			     ),
230 
231 	    TP_fast_assign(
232 			   __entry->pipe = crtc->pipe;
233 			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
234 										       crtc->pipe);
235 			   __entry->scanline = intel_get_crtc_scanline(crtc);
236 			   __entry->min = crtc->debug.min_vbl;
237 			   __entry->max = crtc->debug.max_vbl;
238 			   ),
239 
240 	    TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
241 		      pipe_name(__entry->pipe), __entry->frame,
242 		       __entry->scanline, __entry->min, __entry->max)
243 );
244 
245 TRACE_EVENT(i915_pipe_update_vblank_evaded,
246 	    TP_PROTO(struct intel_crtc *crtc),
247 	    TP_ARGS(crtc),
248 
249 	    TP_STRUCT__entry(
250 			     __field(enum pipe, pipe)
251 			     __field(u32, frame)
252 			     __field(u32, scanline)
253 			     __field(u32, min)
254 			     __field(u32, max)
255 			     ),
256 
257 	    TP_fast_assign(
258 			   __entry->pipe = crtc->pipe;
259 			   __entry->frame = crtc->debug.start_vbl_count;
260 			   __entry->scanline = crtc->debug.scanline_start;
261 			   __entry->min = crtc->debug.min_vbl;
262 			   __entry->max = crtc->debug.max_vbl;
263 			   ),
264 
265 	    TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
266 		      pipe_name(__entry->pipe), __entry->frame,
267 		       __entry->scanline, __entry->min, __entry->max)
268 );
269 
270 TRACE_EVENT(i915_pipe_update_end,
271 	    TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
272 	    TP_ARGS(crtc, frame, scanline_end),
273 
274 	    TP_STRUCT__entry(
275 			     __field(enum pipe, pipe)
276 			     __field(u32, frame)
277 			     __field(u32, scanline)
278 			     ),
279 
280 	    TP_fast_assign(
281 			   __entry->pipe = crtc->pipe;
282 			   __entry->frame = frame;
283 			   __entry->scanline = scanline_end;
284 			   ),
285 
286 	    TP_printk("pipe %c, frame=%u, scanline=%u",
287 		      pipe_name(__entry->pipe), __entry->frame,
288 		      __entry->scanline)
289 );
290 
291 /* object tracking */
292 
293 TRACE_EVENT(i915_gem_object_create,
294 	    TP_PROTO(struct drm_i915_gem_object *obj),
295 	    TP_ARGS(obj),
296 
297 	    TP_STRUCT__entry(
298 			     __field(struct drm_i915_gem_object *, obj)
299 			     __field(u32, size)
300 			     ),
301 
302 	    TP_fast_assign(
303 			   __entry->obj = obj;
304 			   __entry->size = obj->base.size;
305 			   ),
306 
307 	    TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
308 );
309 
310 TRACE_EVENT(i915_gem_shrink,
311 	    TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
312 	    TP_ARGS(i915, target, flags),
313 
314 	    TP_STRUCT__entry(
315 			     __field(int, dev)
316 			     __field(unsigned long, target)
317 			     __field(unsigned, flags)
318 			     ),
319 
320 	    TP_fast_assign(
321 			   __entry->dev = i915->drm.primary->index;
322 			   __entry->target = target;
323 			   __entry->flags = flags;
324 			   ),
325 
326 	    TP_printk("dev=%d, target=%lu, flags=%x",
327 		      __entry->dev, __entry->target, __entry->flags)
328 );
329 
330 TRACE_EVENT(i915_vma_bind,
331 	    TP_PROTO(struct i915_vma *vma, unsigned flags),
332 	    TP_ARGS(vma, flags),
333 
334 	    TP_STRUCT__entry(
335 			     __field(struct drm_i915_gem_object *, obj)
336 			     __field(struct i915_address_space *, vm)
337 			     __field(u64, offset)
338 			     __field(u32, size)
339 			     __field(unsigned, flags)
340 			     ),
341 
342 	    TP_fast_assign(
343 			   __entry->obj = vma->obj;
344 			   __entry->vm = vma->vm;
345 			   __entry->offset = vma->node.start;
346 			   __entry->size = vma->node.size;
347 			   __entry->flags = flags;
348 			   ),
349 
350 	    TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
351 		      __entry->obj, __entry->offset, __entry->size,
352 		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
353 		      __entry->vm)
354 );
355 
356 TRACE_EVENT(i915_vma_unbind,
357 	    TP_PROTO(struct i915_vma *vma),
358 	    TP_ARGS(vma),
359 
360 	    TP_STRUCT__entry(
361 			     __field(struct drm_i915_gem_object *, obj)
362 			     __field(struct i915_address_space *, vm)
363 			     __field(u64, offset)
364 			     __field(u32, size)
365 			     ),
366 
367 	    TP_fast_assign(
368 			   __entry->obj = vma->obj;
369 			   __entry->vm = vma->vm;
370 			   __entry->offset = vma->node.start;
371 			   __entry->size = vma->node.size;
372 			   ),
373 
374 	    TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
375 		      __entry->obj, __entry->offset, __entry->size, __entry->vm)
376 );
377 
378 TRACE_EVENT(i915_gem_object_pwrite,
379 	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
380 	    TP_ARGS(obj, offset, len),
381 
382 	    TP_STRUCT__entry(
383 			     __field(struct drm_i915_gem_object *, obj)
384 			     __field(u32, offset)
385 			     __field(u32, len)
386 			     ),
387 
388 	    TP_fast_assign(
389 			   __entry->obj = obj;
390 			   __entry->offset = offset;
391 			   __entry->len = len;
392 			   ),
393 
394 	    TP_printk("obj=%p, offset=%u, len=%u",
395 		      __entry->obj, __entry->offset, __entry->len)
396 );
397 
398 TRACE_EVENT(i915_gem_object_pread,
399 	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
400 	    TP_ARGS(obj, offset, len),
401 
402 	    TP_STRUCT__entry(
403 			     __field(struct drm_i915_gem_object *, obj)
404 			     __field(u32, offset)
405 			     __field(u32, len)
406 			     ),
407 
408 	    TP_fast_assign(
409 			   __entry->obj = obj;
410 			   __entry->offset = offset;
411 			   __entry->len = len;
412 			   ),
413 
414 	    TP_printk("obj=%p, offset=%u, len=%u",
415 		      __entry->obj, __entry->offset, __entry->len)
416 );
417 
418 TRACE_EVENT(i915_gem_object_fault,
419 	    TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
420 	    TP_ARGS(obj, index, gtt, write),
421 
422 	    TP_STRUCT__entry(
423 			     __field(struct drm_i915_gem_object *, obj)
424 			     __field(u32, index)
425 			     __field(bool, gtt)
426 			     __field(bool, write)
427 			     ),
428 
429 	    TP_fast_assign(
430 			   __entry->obj = obj;
431 			   __entry->index = index;
432 			   __entry->gtt = gtt;
433 			   __entry->write = write;
434 			   ),
435 
436 	    TP_printk("obj=%p, %s index=%u %s",
437 		      __entry->obj,
438 		      __entry->gtt ? "GTT" : "CPU",
439 		      __entry->index,
440 		      __entry->write ? ", writable" : "")
441 );
442 
443 DECLARE_EVENT_CLASS(i915_gem_object,
444 	    TP_PROTO(struct drm_i915_gem_object *obj),
445 	    TP_ARGS(obj),
446 
447 	    TP_STRUCT__entry(
448 			     __field(struct drm_i915_gem_object *, obj)
449 			     ),
450 
451 	    TP_fast_assign(
452 			   __entry->obj = obj;
453 			   ),
454 
455 	    TP_printk("obj=%p", __entry->obj)
456 );
457 
458 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
459 	     TP_PROTO(struct drm_i915_gem_object *obj),
460 	     TP_ARGS(obj)
461 );
462 
463 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
464 	    TP_PROTO(struct drm_i915_gem_object *obj),
465 	    TP_ARGS(obj)
466 );
467 
468 TRACE_EVENT(i915_gem_evict,
469 	    TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
470 	    TP_ARGS(vm, size, align, flags),
471 
472 	    TP_STRUCT__entry(
473 			     __field(u32, dev)
474 			     __field(struct i915_address_space *, vm)
475 			     __field(u32, size)
476 			     __field(u32, align)
477 			     __field(unsigned int, flags)
478 			    ),
479 
480 	    TP_fast_assign(
481 			   __entry->dev = vm->i915->drm.primary->index;
482 			   __entry->vm = vm;
483 			   __entry->size = size;
484 			   __entry->align = align;
485 			   __entry->flags = flags;
486 			  ),
487 
488 	    TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
489 		      __entry->dev, __entry->vm, __entry->size, __entry->align,
490 		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
491 );
492 
493 TRACE_EVENT(i915_gem_evict_everything,
494 	    TP_PROTO(struct drm_device *dev),
495 	    TP_ARGS(dev),
496 
497 	    TP_STRUCT__entry(
498 			     __field(u32, dev)
499 			    ),
500 
501 	    TP_fast_assign(
502 			   __entry->dev = dev->primary->index;
503 			  ),
504 
505 	    TP_printk("dev=%d", __entry->dev)
506 );
507 
508 TRACE_EVENT(i915_gem_evict_vm,
509 	    TP_PROTO(struct i915_address_space *vm),
510 	    TP_ARGS(vm),
511 
512 	    TP_STRUCT__entry(
513 			     __field(u32, dev)
514 			     __field(struct i915_address_space *, vm)
515 			    ),
516 
517 	    TP_fast_assign(
518 			   __entry->dev = vm->i915->drm.primary->index;
519 			   __entry->vm = vm;
520 			  ),
521 
522 	    TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
523 );
524 
525 TRACE_EVENT(i915_gem_evict_node,
526 	    TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
527 	    TP_ARGS(vm, node, flags),
528 
529 	    TP_STRUCT__entry(
530 			     __field(u32, dev)
531 			     __field(struct i915_address_space *, vm)
532 			     __field(u64, start)
533 			     __field(u64, size)
534 			     __field(unsigned long, color)
535 			     __field(unsigned int, flags)
536 			    ),
537 
538 	    TP_fast_assign(
539 			   __entry->dev = vm->i915->drm.primary->index;
540 			   __entry->vm = vm;
541 			   __entry->start = node->start;
542 			   __entry->size = node->size;
543 			   __entry->color = node->color;
544 			   __entry->flags = flags;
545 			  ),
546 
547 	    TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
548 		      __entry->dev, __entry->vm,
549 		      __entry->start, __entry->size,
550 		      __entry->color, __entry->flags)
551 );
552 
553 TRACE_EVENT(i915_gem_ring_sync_to,
554 	    TP_PROTO(struct drm_i915_gem_request *to,
555 		     struct drm_i915_gem_request *from),
556 	    TP_ARGS(to, from),
557 
558 	    TP_STRUCT__entry(
559 			     __field(u32, dev)
560 			     __field(u32, sync_from)
561 			     __field(u32, sync_to)
562 			     __field(u32, seqno)
563 			     ),
564 
565 	    TP_fast_assign(
566 			   __entry->dev = from->i915->drm.primary->index;
567 			   __entry->sync_from = from->engine->id;
568 			   __entry->sync_to = to->engine->id;
569 			   __entry->seqno = from->global_seqno;
570 			   ),
571 
572 	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
573 		      __entry->dev,
574 		      __entry->sync_from, __entry->sync_to,
575 		      __entry->seqno)
576 );
577 
578 TRACE_EVENT(i915_gem_request_queue,
579 	    TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
580 	    TP_ARGS(req, flags),
581 
582 	    TP_STRUCT__entry(
583 			     __field(u32, dev)
584 			     __field(u32, ring)
585 			     __field(u32, ctx)
586 			     __field(u32, seqno)
587 			     __field(u32, flags)
588 			     ),
589 
590 	    TP_fast_assign(
591 			   __entry->dev = req->i915->drm.primary->index;
592 			   __entry->ring = req->engine->id;
593 			   __entry->ctx = req->fence.context;
594 			   __entry->seqno = req->fence.seqno;
595 			   __entry->flags = flags;
596 			   ),
597 
598 	    TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
599 		      __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
600 		      __entry->flags)
601 );
602 
603 TRACE_EVENT(i915_gem_ring_flush,
604 	    TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
605 	    TP_ARGS(req, invalidate, flush),
606 
607 	    TP_STRUCT__entry(
608 			     __field(u32, dev)
609 			     __field(u32, ring)
610 			     __field(u32, invalidate)
611 			     __field(u32, flush)
612 			     ),
613 
614 	    TP_fast_assign(
615 			   __entry->dev = req->i915->drm.primary->index;
616 			   __entry->ring = req->engine->id;
617 			   __entry->invalidate = invalidate;
618 			   __entry->flush = flush;
619 			   ),
620 
621 	    TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
622 		      __entry->dev, __entry->ring,
623 		      __entry->invalidate, __entry->flush)
624 );
625 
626 DECLARE_EVENT_CLASS(i915_gem_request,
627 	    TP_PROTO(struct drm_i915_gem_request *req),
628 	    TP_ARGS(req),
629 
630 	    TP_STRUCT__entry(
631 			     __field(u32, dev)
632 			     __field(u32, ctx)
633 			     __field(u32, ring)
634 			     __field(u32, seqno)
635 			     __field(u32, global)
636 			     ),
637 
638 	    TP_fast_assign(
639 			   __entry->dev = req->i915->drm.primary->index;
640 			   __entry->ring = req->engine->id;
641 			   __entry->ctx = req->fence.context;
642 			   __entry->seqno = req->fence.seqno;
643 			   __entry->global = req->global_seqno;
644 			   ),
645 
646 	    TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
647 		      __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
648 		      __entry->global)
649 );
650 
651 DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
652 	    TP_PROTO(struct drm_i915_gem_request *req),
653 	    TP_ARGS(req)
654 );
655 
656 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
657 DEFINE_EVENT(i915_gem_request, i915_gem_request_submit,
658 	     TP_PROTO(struct drm_i915_gem_request *req),
659 	     TP_ARGS(req)
660 );
661 
662 DEFINE_EVENT(i915_gem_request, i915_gem_request_execute,
663 	     TP_PROTO(struct drm_i915_gem_request *req),
664 	     TP_ARGS(req)
665 );
666 
667 DECLARE_EVENT_CLASS(i915_gem_request_hw,
668 		    TP_PROTO(struct drm_i915_gem_request *req,
669 			     unsigned int port),
670 		    TP_ARGS(req, port),
671 
672 		    TP_STRUCT__entry(
673 				     __field(u32, dev)
674 				     __field(u32, ring)
675 				     __field(u32, seqno)
676 				     __field(u32, global_seqno)
677 				     __field(u32, ctx)
678 				     __field(u32, port)
679 				    ),
680 
681 		    TP_fast_assign(
682 			           __entry->dev = req->i915->drm.primary->index;
683 			           __entry->ring = req->engine->id;
684 			           __entry->ctx = req->fence.context;
685 			           __entry->seqno = req->fence.seqno;
686 			           __entry->global_seqno = req->global_seqno;
687 			           __entry->port = port;
688 			          ),
689 
690 		    TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
691 			      __entry->dev, __entry->ring, __entry->ctx,
692 			      __entry->seqno, __entry->global_seqno,
693 			      __entry->port)
694 );
695 
696 DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
697 	     TP_PROTO(struct drm_i915_gem_request *req, unsigned int port),
698 	     TP_ARGS(req, port)
699 );
700 
701 DEFINE_EVENT(i915_gem_request, i915_gem_request_out,
702 	     TP_PROTO(struct drm_i915_gem_request *req),
703 	     TP_ARGS(req)
704 );
705 #else
706 #if !defined(TRACE_HEADER_MULTI_READ)
707 static inline void
708 trace_i915_gem_request_submit(struct drm_i915_gem_request *req)
709 {
710 }
711 
712 static inline void
713 trace_i915_gem_request_execute(struct drm_i915_gem_request *req)
714 {
715 }
716 
717 static inline void
718 trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port)
719 {
720 }
721 
722 static inline void
723 trace_i915_gem_request_out(struct drm_i915_gem_request *req)
724 {
725 }
726 #endif
727 #endif
728 
729 TRACE_EVENT(intel_engine_notify,
730 	    TP_PROTO(struct intel_engine_cs *engine, bool waiters),
731 	    TP_ARGS(engine, waiters),
732 
733 	    TP_STRUCT__entry(
734 			     __field(u32, dev)
735 			     __field(u32, ring)
736 			     __field(u32, seqno)
737 			     __field(bool, waiters)
738 			     ),
739 
740 	    TP_fast_assign(
741 			   __entry->dev = engine->i915->drm.primary->index;
742 			   __entry->ring = engine->id;
743 			   __entry->seqno = intel_engine_get_seqno(engine);
744 			   __entry->waiters = waiters;
745 			   ),
746 
747 	    TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
748 		      __entry->dev, __entry->ring, __entry->seqno,
749 		      __entry->waiters)
750 );
751 
752 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
753 	    TP_PROTO(struct drm_i915_gem_request *req),
754 	    TP_ARGS(req)
755 );
756 
757 TRACE_EVENT(i915_gem_request_wait_begin,
758 	    TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags),
759 	    TP_ARGS(req, flags),
760 
761 	    TP_STRUCT__entry(
762 			     __field(u32, dev)
763 			     __field(u32, ring)
764 			     __field(u32, ctx)
765 			     __field(u32, seqno)
766 			     __field(u32, global)
767 			     __field(unsigned int, flags)
768 			     ),
769 
770 	    /* NB: the blocking information is racy since mutex_is_locked
771 	     * doesn't check that the current thread holds the lock. The only
772 	     * other option would be to pass the boolean information of whether
773 	     * or not the class was blocking down through the stack which is
774 	     * less desirable.
775 	     */
776 	    TP_fast_assign(
777 			   __entry->dev = req->i915->drm.primary->index;
778 			   __entry->ring = req->engine->id;
779 			   __entry->ctx = req->fence.context;
780 			   __entry->seqno = req->fence.seqno;
781 			   __entry->global = req->global_seqno;
782 			   __entry->flags = flags;
783 			   ),
784 
785 	    TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
786 		      __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
787 		      __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
788 		      __entry->flags)
789 );
790 
791 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
792 	    TP_PROTO(struct drm_i915_gem_request *req),
793 	    TP_ARGS(req)
794 );
795 
796 TRACE_EVENT(i915_flip_request,
797 	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
798 
799 	    TP_ARGS(plane, obj),
800 
801 	    TP_STRUCT__entry(
802 		    __field(int, plane)
803 		    __field(struct drm_i915_gem_object *, obj)
804 		    ),
805 
806 	    TP_fast_assign(
807 		    __entry->plane = plane;
808 		    __entry->obj = obj;
809 		    ),
810 
811 	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
812 );
813 
814 TRACE_EVENT(i915_flip_complete,
815 	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
816 
817 	    TP_ARGS(plane, obj),
818 
819 	    TP_STRUCT__entry(
820 		    __field(int, plane)
821 		    __field(struct drm_i915_gem_object *, obj)
822 		    ),
823 
824 	    TP_fast_assign(
825 		    __entry->plane = plane;
826 		    __entry->obj = obj;
827 		    ),
828 
829 	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
830 );
831 
832 TRACE_EVENT_CONDITION(i915_reg_rw,
833 	TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
834 
835 	TP_ARGS(write, reg, val, len, trace),
836 
837 	TP_CONDITION(trace),
838 
839 	TP_STRUCT__entry(
840 		__field(u64, val)
841 		__field(u32, reg)
842 		__field(u16, write)
843 		__field(u16, len)
844 		),
845 
846 	TP_fast_assign(
847 		__entry->val = (u64)val;
848 		__entry->reg = i915_mmio_reg_offset(reg);
849 		__entry->write = write;
850 		__entry->len = len;
851 		),
852 
853 	TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
854 		__entry->write ? "write" : "read",
855 		__entry->reg, __entry->len,
856 		(u32)(__entry->val & 0xffffffff),
857 		(u32)(__entry->val >> 32))
858 );
859 
860 TRACE_EVENT(intel_gpu_freq_change,
861 	    TP_PROTO(u32 freq),
862 	    TP_ARGS(freq),
863 
864 	    TP_STRUCT__entry(
865 			     __field(u32, freq)
866 			     ),
867 
868 	    TP_fast_assign(
869 			   __entry->freq = freq;
870 			   ),
871 
872 	    TP_printk("new_freq=%u", __entry->freq)
873 );
874 
875 /**
876  * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
877  *
878  * With full ppgtt enabled each process using drm will allocate at least one
879  * translation table. With these traces it is possible to keep track of the
880  * allocation and of the lifetime of the tables; this can be used during
881  * testing/debug to verify that we are not leaking ppgtts.
882  * These traces identify the ppgtt through the vm pointer, which is also printed
883  * by the i915_vma_bind and i915_vma_unbind tracepoints.
884  */
885 DECLARE_EVENT_CLASS(i915_ppgtt,
886 	TP_PROTO(struct i915_address_space *vm),
887 	TP_ARGS(vm),
888 
889 	TP_STRUCT__entry(
890 			__field(struct i915_address_space *, vm)
891 			__field(u32, dev)
892 	),
893 
894 	TP_fast_assign(
895 			__entry->vm = vm;
896 			__entry->dev = vm->i915->drm.primary->index;
897 	),
898 
899 	TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
900 )
901 
902 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
903 	TP_PROTO(struct i915_address_space *vm),
904 	TP_ARGS(vm)
905 );
906 
907 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
908 	TP_PROTO(struct i915_address_space *vm),
909 	TP_ARGS(vm)
910 );
911 
912 /**
913  * DOC: i915_context_create and i915_context_free tracepoints
914  *
915  * These tracepoints are used to track creation and deletion of contexts.
916  * If full ppgtt is enabled, they also print the address of the vm assigned to
917  * the context.
918  */
919 DECLARE_EVENT_CLASS(i915_context,
920 	TP_PROTO(struct i915_gem_context *ctx),
921 	TP_ARGS(ctx),
922 
923 	TP_STRUCT__entry(
924 			__field(u32, dev)
925 			__field(struct i915_gem_context *, ctx)
926 			__field(u32, hw_id)
927 			__field(struct i915_address_space *, vm)
928 	),
929 
930 	TP_fast_assign(
931 			__entry->dev = ctx->i915->drm.primary->index;
932 			__entry->ctx = ctx;
933 			__entry->hw_id = ctx->hw_id;
934 			__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
935 	),
936 
937 	TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
938 		  __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
939 )
940 
941 DEFINE_EVENT(i915_context, i915_context_create,
942 	TP_PROTO(struct i915_gem_context *ctx),
943 	TP_ARGS(ctx)
944 );
945 
946 DEFINE_EVENT(i915_context, i915_context_free,
947 	TP_PROTO(struct i915_gem_context *ctx),
948 	TP_ARGS(ctx)
949 );
950 
951 /**
952  * DOC: switch_mm tracepoint
953  *
954  * This tracepoint allows tracking of the mm switch, which is an important point
955  * in the lifetime of the vm in the legacy submission path. This tracepoint is
956  * called only if full ppgtt is enabled.
957  */
958 TRACE_EVENT(switch_mm,
959 	TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
960 
961 	TP_ARGS(engine, to),
962 
963 	TP_STRUCT__entry(
964 			__field(u32, ring)
965 			__field(struct i915_gem_context *, to)
966 			__field(struct i915_address_space *, vm)
967 			__field(u32, dev)
968 	),
969 
970 	TP_fast_assign(
971 			__entry->ring = engine->id;
972 			__entry->to = to;
973 			__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
974 			__entry->dev = engine->i915->drm.primary->index;
975 	),
976 
977 	TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
978 		  __entry->dev, __entry->ring, __entry->to, __entry->vm)
979 );
980 
981 #endif /* _I915_TRACE_H_ */
982 
983 /* This part must be outside protection */
984 #undef TRACE_INCLUDE_PATH
985 #define TRACE_INCLUDE_PATH .
986 #include <trace/define_trace.h>
987