xref: /linux/include/trace/events/kvm.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVM_MAIN_H
4 
5 #include <linux/tracepoint.h>
6 
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM kvm
9 
10 #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
11 
12 #define kvm_trace_exit_reason						\
13 	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
14 	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
15 	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
16 	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
17 	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
18 	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
19 	ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI),          \
20 	ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
21 
22 TRACE_EVENT(kvm_userspace_exit,
23 	    TP_PROTO(__u32 reason, int errno),
24 	    TP_ARGS(reason, errno),
25 
26 	TP_STRUCT__entry(
27 		__field(	__u32,		reason		)
28 		__field(	int,		errno		)
29 	),
30 
31 	TP_fast_assign(
32 		__entry->reason		= reason;
33 		__entry->errno		= errno;
34 	),
35 
36 	TP_printk("reason %s (%d)",
37 		  __entry->errno < 0 ?
38 		  (__entry->errno == -EINTR ? "restart" : "error") :
39 		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
40 		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
41 );
42 
43 TRACE_EVENT(kvm_vcpu_wakeup,
44 	    TP_PROTO(__u64 ns, bool waited, bool valid),
45 	    TP_ARGS(ns, waited, valid),
46 
47 	TP_STRUCT__entry(
48 		__field(	__u64,		ns		)
49 		__field(	bool,		waited		)
50 		__field(	bool,		valid		)
51 	),
52 
53 	TP_fast_assign(
54 		__entry->ns		= ns;
55 		__entry->waited		= waited;
56 		__entry->valid		= valid;
57 	),
58 
59 	TP_printk("%s time %lld ns, polling %s",
60 		  __entry->waited ? "wait" : "poll",
61 		  __entry->ns,
62 		  __entry->valid ? "valid" : "invalid")
63 );
64 
65 #if defined(CONFIG_HAVE_KVM_IRQCHIP)
66 TRACE_EVENT(kvm_set_irq,
67 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
68 	TP_ARGS(gsi, level, irq_source_id),
69 
70 	TP_STRUCT__entry(
71 		__field(	unsigned int,	gsi		)
72 		__field(	int,		level		)
73 		__field(	int,		irq_source_id	)
74 	),
75 
76 	TP_fast_assign(
77 		__entry->gsi		= gsi;
78 		__entry->level		= level;
79 		__entry->irq_source_id	= irq_source_id;
80 	),
81 
82 	TP_printk("gsi %u level %d source %d",
83 		  __entry->gsi, __entry->level, __entry->irq_source_id)
84 );
85 #endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
86 
87 #if defined(__KVM_HAVE_IOAPIC)
88 #define kvm_deliver_mode		\
89 	{0x0, "Fixed"},			\
90 	{0x1, "LowPrio"},		\
91 	{0x2, "SMI"},			\
92 	{0x3, "Res3"},			\
93 	{0x4, "NMI"},			\
94 	{0x5, "INIT"},			\
95 	{0x6, "SIPI"},			\
96 	{0x7, "ExtINT"}
97 
98 TRACE_EVENT(kvm_ioapic_set_irq,
99 	    TP_PROTO(__u64 e, int pin, bool coalesced),
100 	    TP_ARGS(e, pin, coalesced),
101 
102 	TP_STRUCT__entry(
103 		__field(	__u64,		e		)
104 		__field(	int,		pin		)
105 		__field(	bool,		coalesced	)
106 	),
107 
108 	TP_fast_assign(
109 		__entry->e		= e;
110 		__entry->pin		= pin;
111 		__entry->coalesced	= coalesced;
112 	),
113 
114 	TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
115 		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
116 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
117 		  (__entry->e & (1<<11)) ? "logical" : "physical",
118 		  (__entry->e & (1<<15)) ? "level" : "edge",
119 		  (__entry->e & (1<<16)) ? "|masked" : "",
120 		  __entry->coalesced ? " (coalesced)" : "")
121 );
122 
123 TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
124 	    TP_PROTO(__u64 e),
125 	    TP_ARGS(e),
126 
127 	TP_STRUCT__entry(
128 		__field(	__u64,		e		)
129 	),
130 
131 	TP_fast_assign(
132 		__entry->e		= e;
133 	),
134 
135 	TP_printk("dst %x vec %u (%s|%s|%s%s)",
136 		  (u8)(__entry->e >> 56), (u8)__entry->e,
137 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
138 		  (__entry->e & (1<<11)) ? "logical" : "physical",
139 		  (__entry->e & (1<<15)) ? "level" : "edge",
140 		  (__entry->e & (1<<16)) ? "|masked" : "")
141 );
142 
143 TRACE_EVENT(kvm_msi_set_irq,
144 	    TP_PROTO(__u64 address, __u64 data),
145 	    TP_ARGS(address, data),
146 
147 	TP_STRUCT__entry(
148 		__field(	__u64,		address		)
149 		__field(	__u64,		data		)
150 	),
151 
152 	TP_fast_assign(
153 		__entry->address	= address;
154 		__entry->data		= data;
155 	),
156 
157 	TP_printk("dst %llx vec %u (%s|%s|%s%s)",
158 		  (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
159 		  (u8)__entry->data,
160 		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
161 		  (__entry->address & (1<<2)) ? "logical" : "physical",
162 		  (__entry->data & (1<<15)) ? "level" : "edge",
163 		  (__entry->address & (1<<3)) ? "|rh" : "")
164 );
165 
166 #define kvm_irqchips						\
167 	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
168 	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
169 	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
170 
171 #endif /* defined(__KVM_HAVE_IOAPIC) */
172 
173 #if defined(CONFIG_HAVE_KVM_IRQCHIP)
174 
175 #ifdef kvm_irqchips
176 #define kvm_ack_irq_string "irqchip %s pin %u"
177 #define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
178 #else
179 #define kvm_ack_irq_string "irqchip %d pin %u"
180 #define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
181 #endif
182 
183 TRACE_EVENT(kvm_ack_irq,
184 	TP_PROTO(unsigned int irqchip, unsigned int pin),
185 	TP_ARGS(irqchip, pin),
186 
187 	TP_STRUCT__entry(
188 		__field(	unsigned int,	irqchip		)
189 		__field(	unsigned int,	pin		)
190 	),
191 
192 	TP_fast_assign(
193 		__entry->irqchip	= irqchip;
194 		__entry->pin		= pin;
195 	),
196 
197 	TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
198 );
199 
200 #endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
201 
202 
203 
204 #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
205 #define KVM_TRACE_MMIO_READ 1
206 #define KVM_TRACE_MMIO_WRITE 2
207 
208 #define kvm_trace_symbol_mmio \
209 	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
210 	{ KVM_TRACE_MMIO_READ, "read" }, \
211 	{ KVM_TRACE_MMIO_WRITE, "write" }
212 
213 TRACE_EVENT(kvm_mmio,
214 	TP_PROTO(int type, int len, u64 gpa, void *val),
215 	TP_ARGS(type, len, gpa, val),
216 
217 	TP_STRUCT__entry(
218 		__field(	u32,	type		)
219 		__field(	u32,	len		)
220 		__field(	u64,	gpa		)
221 		__field(	u64,	val		)
222 	),
223 
224 	TP_fast_assign(
225 		__entry->type		= type;
226 		__entry->len		= len;
227 		__entry->gpa		= gpa;
228 		__entry->val		= 0;
229 		if (val)
230 			memcpy(&__entry->val, val,
231 			       min_t(u32, sizeof(__entry->val), len));
232 	),
233 
234 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
235 		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
236 		  __entry->len, __entry->gpa, __entry->val)
237 );
238 
239 #define KVM_TRACE_IOCSR_READ_UNSATISFIED 0
240 #define KVM_TRACE_IOCSR_READ 1
241 #define KVM_TRACE_IOCSR_WRITE 2
242 
243 #define kvm_trace_symbol_iocsr \
244 	{ KVM_TRACE_IOCSR_READ_UNSATISFIED, "unsatisfied-read" }, \
245 	{ KVM_TRACE_IOCSR_READ, "read" }, \
246 	{ KVM_TRACE_IOCSR_WRITE, "write" }
247 
248 TRACE_EVENT(kvm_iocsr,
249 	TP_PROTO(int type, int len, u64 gpa, void *val),
250 	TP_ARGS(type, len, gpa, val),
251 
252 	TP_STRUCT__entry(
253 		__field(	u32,	type	)
254 		__field(	u32,	len	)
255 		__field(	u64,	gpa	)
256 		__field(	u64,	val	)
257 	),
258 
259 	TP_fast_assign(
260 		__entry->type		= type;
261 		__entry->len		= len;
262 		__entry->gpa		= gpa;
263 		__entry->val		= 0;
264 		if (val)
265 			memcpy(&__entry->val, val,
266 			       min_t(u32, sizeof(__entry->val), len));
267 	),
268 
269 	TP_printk("iocsr %s len %u gpa 0x%llx val 0x%llx",
270 		  __print_symbolic(__entry->type, kvm_trace_symbol_iocsr),
271 		  __entry->len, __entry->gpa, __entry->val)
272 );
273 
274 #define kvm_fpu_load_symbol	\
275 	{0, "unload"},		\
276 	{1, "load"}
277 
278 TRACE_EVENT(kvm_fpu,
279 	TP_PROTO(int load),
280 	TP_ARGS(load),
281 
282 	TP_STRUCT__entry(
283 		__field(	u32,	        load		)
284 	),
285 
286 	TP_fast_assign(
287 		__entry->load		= load;
288 	),
289 
290 	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
291 );
292 
293 #ifdef CONFIG_KVM_ASYNC_PF
294 DECLARE_EVENT_CLASS(kvm_async_get_page_class,
295 
296 	TP_PROTO(u64 gva, u64 gfn),
297 
298 	TP_ARGS(gva, gfn),
299 
300 	TP_STRUCT__entry(
301 		__field(__u64, gva)
302 		__field(u64, gfn)
303 	),
304 
305 	TP_fast_assign(
306 		__entry->gva = gva;
307 		__entry->gfn = gfn;
308 	),
309 
310 	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
311 );
312 
313 DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
314 
315 	TP_PROTO(u64 gva, u64 gfn),
316 
317 	TP_ARGS(gva, gfn)
318 );
319 
320 DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
321 
322 	TP_PROTO(u64 gva, u64 gfn),
323 
324 	TP_ARGS(gva, gfn)
325 );
326 
327 DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
328 
329 	TP_PROTO(u64 token, u64 gva),
330 
331 	TP_ARGS(token, gva),
332 
333 	TP_STRUCT__entry(
334 		__field(__u64, token)
335 		__field(__u64, gva)
336 	),
337 
338 	TP_fast_assign(
339 		__entry->token = token;
340 		__entry->gva = gva;
341 	),
342 
343 	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
344 
345 );
346 
347 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
348 
349 	TP_PROTO(u64 token, u64 gva),
350 
351 	TP_ARGS(token, gva)
352 );
353 
354 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
355 
356 	TP_PROTO(u64 token, u64 gva),
357 
358 	TP_ARGS(token, gva)
359 );
360 
361 TRACE_EVENT(
362 	kvm_async_pf_completed,
363 	TP_PROTO(unsigned long address, u64 gva),
364 	TP_ARGS(address, gva),
365 
366 	TP_STRUCT__entry(
367 		__field(unsigned long, address)
368 		__field(u64, gva)
369 		),
370 
371 	TP_fast_assign(
372 		__entry->address = address;
373 		__entry->gva = gva;
374 		),
375 
376 	TP_printk("gva %#llx address %#lx",  __entry->gva,
377 		  __entry->address)
378 );
379 
380 #endif
381 
382 TRACE_EVENT(kvm_halt_poll_ns,
383 	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
384 		 unsigned int old),
385 	TP_ARGS(grow, vcpu_id, new, old),
386 
387 	TP_STRUCT__entry(
388 		__field(bool, grow)
389 		__field(unsigned int, vcpu_id)
390 		__field(unsigned int, new)
391 		__field(unsigned int, old)
392 	),
393 
394 	TP_fast_assign(
395 		__entry->grow           = grow;
396 		__entry->vcpu_id        = vcpu_id;
397 		__entry->new            = new;
398 		__entry->old            = old;
399 	),
400 
401 	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
402 			__entry->vcpu_id,
403 			__entry->new,
404 			__entry->grow ? "grow" : "shrink",
405 			__entry->old)
406 );
407 
408 #define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
409 	trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
410 #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
411 	trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
412 
413 TRACE_EVENT(kvm_dirty_ring_push,
414 	TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
415 	TP_ARGS(ring, slot, offset),
416 
417 	TP_STRUCT__entry(
418 		__field(int, index)
419 		__field(u32, dirty_index)
420 		__field(u32, reset_index)
421 		__field(u32, slot)
422 		__field(u64, offset)
423 	),
424 
425 	TP_fast_assign(
426 		__entry->index          = ring->index;
427 		__entry->dirty_index    = ring->dirty_index;
428 		__entry->reset_index    = ring->reset_index;
429 		__entry->slot           = slot;
430 		__entry->offset         = offset;
431 	),
432 
433 	TP_printk("ring %d: dirty 0x%x reset 0x%x "
434 		  "slot %u offset 0x%llx (used %u)",
435 		  __entry->index, __entry->dirty_index,
436 		  __entry->reset_index,  __entry->slot, __entry->offset,
437 		  __entry->dirty_index - __entry->reset_index)
438 );
439 
440 TRACE_EVENT(kvm_dirty_ring_reset,
441 	TP_PROTO(struct kvm_dirty_ring *ring),
442 	TP_ARGS(ring),
443 
444 	TP_STRUCT__entry(
445 		__field(int, index)
446 		__field(u32, dirty_index)
447 		__field(u32, reset_index)
448 	),
449 
450 	TP_fast_assign(
451 		__entry->index          = ring->index;
452 		__entry->dirty_index    = ring->dirty_index;
453 		__entry->reset_index    = ring->reset_index;
454 	),
455 
456 	TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
457 		  __entry->index, __entry->dirty_index, __entry->reset_index,
458 		  __entry->dirty_index - __entry->reset_index)
459 );
460 
461 TRACE_EVENT(kvm_dirty_ring_exit,
462 	TP_PROTO(struct kvm_vcpu *vcpu),
463 	TP_ARGS(vcpu),
464 
465 	TP_STRUCT__entry(
466 	    __field(int, vcpu_id)
467 	),
468 
469 	TP_fast_assign(
470 	    __entry->vcpu_id = vcpu->vcpu_id;
471 	),
472 
473 	TP_printk("vcpu %d", __entry->vcpu_id)
474 );
475 
476 TRACE_EVENT(kvm_unmap_hva_range,
477 	TP_PROTO(unsigned long start, unsigned long end),
478 	TP_ARGS(start, end),
479 
480 	TP_STRUCT__entry(
481 		__field(	unsigned long,	start		)
482 		__field(	unsigned long,	end		)
483 	),
484 
485 	TP_fast_assign(
486 		__entry->start		= start;
487 		__entry->end		= end;
488 	),
489 
490 	TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
491 		  __entry->start, __entry->end)
492 );
493 
494 TRACE_EVENT(kvm_age_hva,
495 	TP_PROTO(unsigned long start, unsigned long end),
496 	TP_ARGS(start, end),
497 
498 	TP_STRUCT__entry(
499 		__field(	unsigned long,	start		)
500 		__field(	unsigned long,	end		)
501 	),
502 
503 	TP_fast_assign(
504 		__entry->start		= start;
505 		__entry->end		= end;
506 	),
507 
508 	TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
509 		  __entry->start, __entry->end)
510 );
511 
512 TRACE_EVENT(kvm_test_age_hva,
513 	TP_PROTO(unsigned long hva),
514 	TP_ARGS(hva),
515 
516 	TP_STRUCT__entry(
517 		__field(	unsigned long,	hva		)
518 	),
519 
520 	TP_fast_assign(
521 		__entry->hva		= hva;
522 	),
523 
524 	TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
525 );
526 
527 #endif /* _TRACE_KVM_MAIN_H */
528 
529 /* This part must be outside protection */
530 #include <trace/define_trace.h>
531