xref: /linux/include/trace/events/kvm.h (revision d4b996f9ef1fe83d9ce9ad5c1ca0bd8231638ce5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVM_MAIN_H
4 
5 #include <linux/tracepoint.h>
6 
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM kvm
9 
10 #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
11 
12 #define kvm_trace_exit_reason						\
13 	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
14 	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
15 	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
16 	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
17 	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
18 	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
19 	ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI),          \
20 	ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
21 
22 TRACE_EVENT(kvm_userspace_exit,
23 	    TP_PROTO(__u32 reason, int errno),
24 	    TP_ARGS(reason, errno),
25 
26 	TP_STRUCT__entry(
27 		__field(	__u32,		reason		)
28 		__field(	int,		errno		)
29 	),
30 
31 	TP_fast_assign(
32 		__entry->reason		= reason;
33 		__entry->errno		= errno;
34 	),
35 
36 	TP_printk("reason %s (%d)",
37 		  __entry->errno < 0 ?
38 		  (__entry->errno == -EINTR ? "restart" : "error") :
39 		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
40 		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
41 );
42 
43 TRACE_EVENT(kvm_vcpu_wakeup,
44 	    TP_PROTO(__u64 ns, bool waited, bool valid),
45 	    TP_ARGS(ns, waited, valid),
46 
47 	TP_STRUCT__entry(
48 		__field(	__u64,		ns		)
49 		__field(	bool,		waited		)
50 		__field(	bool,		valid		)
51 	),
52 
53 	TP_fast_assign(
54 		__entry->ns		= ns;
55 		__entry->waited		= waited;
56 		__entry->valid		= valid;
57 	),
58 
59 	TP_printk("%s time %lld ns, polling %s",
60 		  __entry->waited ? "wait" : "poll",
61 		  __entry->ns,
62 		  __entry->valid ? "valid" : "invalid")
63 );
64 
65 #if defined(CONFIG_HAVE_KVM_IRQFD)
66 TRACE_EVENT(kvm_set_irq,
67 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
68 	TP_ARGS(gsi, level, irq_source_id),
69 
70 	TP_STRUCT__entry(
71 		__field(	unsigned int,	gsi		)
72 		__field(	int,		level		)
73 		__field(	int,		irq_source_id	)
74 	),
75 
76 	TP_fast_assign(
77 		__entry->gsi		= gsi;
78 		__entry->level		= level;
79 		__entry->irq_source_id	= irq_source_id;
80 	),
81 
82 	TP_printk("gsi %u level %d source %d",
83 		  __entry->gsi, __entry->level, __entry->irq_source_id)
84 );
85 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
86 
87 #if defined(__KVM_HAVE_IOAPIC)
88 #define kvm_deliver_mode		\
89 	{0x0, "Fixed"},			\
90 	{0x1, "LowPrio"},		\
91 	{0x2, "SMI"},			\
92 	{0x3, "Res3"},			\
93 	{0x4, "NMI"},			\
94 	{0x5, "INIT"},			\
95 	{0x6, "SIPI"},			\
96 	{0x7, "ExtINT"}
97 
98 TRACE_EVENT(kvm_ioapic_set_irq,
99 	    TP_PROTO(__u64 e, int pin, bool coalesced),
100 	    TP_ARGS(e, pin, coalesced),
101 
102 	TP_STRUCT__entry(
103 		__field(	__u64,		e		)
104 		__field(	int,		pin		)
105 		__field(	bool,		coalesced	)
106 	),
107 
108 	TP_fast_assign(
109 		__entry->e		= e;
110 		__entry->pin		= pin;
111 		__entry->coalesced	= coalesced;
112 	),
113 
114 	TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
115 		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
116 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
117 		  (__entry->e & (1<<11)) ? "logical" : "physical",
118 		  (__entry->e & (1<<15)) ? "level" : "edge",
119 		  (__entry->e & (1<<16)) ? "|masked" : "",
120 		  __entry->coalesced ? " (coalesced)" : "")
121 );
122 
123 TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
124 	    TP_PROTO(__u64 e),
125 	    TP_ARGS(e),
126 
127 	TP_STRUCT__entry(
128 		__field(	__u64,		e		)
129 	),
130 
131 	TP_fast_assign(
132 		__entry->e		= e;
133 	),
134 
135 	TP_printk("dst %x vec %u (%s|%s|%s%s)",
136 		  (u8)(__entry->e >> 56), (u8)__entry->e,
137 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
138 		  (__entry->e & (1<<11)) ? "logical" : "physical",
139 		  (__entry->e & (1<<15)) ? "level" : "edge",
140 		  (__entry->e & (1<<16)) ? "|masked" : "")
141 );
142 
143 TRACE_EVENT(kvm_msi_set_irq,
144 	    TP_PROTO(__u64 address, __u64 data),
145 	    TP_ARGS(address, data),
146 
147 	TP_STRUCT__entry(
148 		__field(	__u64,		address		)
149 		__field(	__u64,		data		)
150 	),
151 
152 	TP_fast_assign(
153 		__entry->address	= address;
154 		__entry->data		= data;
155 	),
156 
157 	TP_printk("dst %llx vec %u (%s|%s|%s%s)",
158 		  (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
159 		  (u8)__entry->data,
160 		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
161 		  (__entry->address & (1<<2)) ? "logical" : "physical",
162 		  (__entry->data & (1<<15)) ? "level" : "edge",
163 		  (__entry->address & (1<<3)) ? "|rh" : "")
164 );
165 
166 #define kvm_irqchips						\
167 	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
168 	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
169 	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
170 
171 #endif /* defined(__KVM_HAVE_IOAPIC) */
172 
173 #if defined(CONFIG_HAVE_KVM_IRQFD)
174 
175 #ifdef kvm_irqchips
176 #define kvm_ack_irq_string "irqchip %s pin %u"
177 #define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
178 #else
179 #define kvm_ack_irq_string "irqchip %d pin %u"
180 #define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
181 #endif
182 
183 TRACE_EVENT(kvm_ack_irq,
184 	TP_PROTO(unsigned int irqchip, unsigned int pin),
185 	TP_ARGS(irqchip, pin),
186 
187 	TP_STRUCT__entry(
188 		__field(	unsigned int,	irqchip		)
189 		__field(	unsigned int,	pin		)
190 	),
191 
192 	TP_fast_assign(
193 		__entry->irqchip	= irqchip;
194 		__entry->pin		= pin;
195 	),
196 
197 	TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
198 );
199 
200 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
201 
202 
203 
204 #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
205 #define KVM_TRACE_MMIO_READ 1
206 #define KVM_TRACE_MMIO_WRITE 2
207 
208 #define kvm_trace_symbol_mmio \
209 	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
210 	{ KVM_TRACE_MMIO_READ, "read" }, \
211 	{ KVM_TRACE_MMIO_WRITE, "write" }
212 
213 TRACE_EVENT(kvm_mmio,
214 	TP_PROTO(int type, int len, u64 gpa, void *val),
215 	TP_ARGS(type, len, gpa, val),
216 
217 	TP_STRUCT__entry(
218 		__field(	u32,	type		)
219 		__field(	u32,	len		)
220 		__field(	u64,	gpa		)
221 		__field(	u64,	val		)
222 	),
223 
224 	TP_fast_assign(
225 		__entry->type		= type;
226 		__entry->len		= len;
227 		__entry->gpa		= gpa;
228 		__entry->val		= 0;
229 		if (val)
230 			memcpy(&__entry->val, val,
231 			       min_t(u32, sizeof(__entry->val), len));
232 	),
233 
234 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
235 		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
236 		  __entry->len, __entry->gpa, __entry->val)
237 );
238 
239 #define kvm_fpu_load_symbol	\
240 	{0, "unload"},		\
241 	{1, "load"}
242 
243 TRACE_EVENT(kvm_fpu,
244 	TP_PROTO(int load),
245 	TP_ARGS(load),
246 
247 	TP_STRUCT__entry(
248 		__field(	u32,	        load		)
249 	),
250 
251 	TP_fast_assign(
252 		__entry->load		= load;
253 	),
254 
255 	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
256 );
257 
258 #ifdef CONFIG_KVM_ASYNC_PF
259 DECLARE_EVENT_CLASS(kvm_async_get_page_class,
260 
261 	TP_PROTO(u64 gva, u64 gfn),
262 
263 	TP_ARGS(gva, gfn),
264 
265 	TP_STRUCT__entry(
266 		__field(__u64, gva)
267 		__field(u64, gfn)
268 	),
269 
270 	TP_fast_assign(
271 		__entry->gva = gva;
272 		__entry->gfn = gfn;
273 	),
274 
275 	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
276 );
277 
278 DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
279 
280 	TP_PROTO(u64 gva, u64 gfn),
281 
282 	TP_ARGS(gva, gfn)
283 );
284 
285 DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
286 
287 	TP_PROTO(u64 gva, u64 gfn),
288 
289 	TP_ARGS(gva, gfn)
290 );
291 
292 DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
293 
294 	TP_PROTO(u64 token, u64 gva),
295 
296 	TP_ARGS(token, gva),
297 
298 	TP_STRUCT__entry(
299 		__field(__u64, token)
300 		__field(__u64, gva)
301 	),
302 
303 	TP_fast_assign(
304 		__entry->token = token;
305 		__entry->gva = gva;
306 	),
307 
308 	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
309 
310 );
311 
312 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
313 
314 	TP_PROTO(u64 token, u64 gva),
315 
316 	TP_ARGS(token, gva)
317 );
318 
319 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
320 
321 	TP_PROTO(u64 token, u64 gva),
322 
323 	TP_ARGS(token, gva)
324 );
325 
326 TRACE_EVENT(
327 	kvm_async_pf_completed,
328 	TP_PROTO(unsigned long address, u64 gva),
329 	TP_ARGS(address, gva),
330 
331 	TP_STRUCT__entry(
332 		__field(unsigned long, address)
333 		__field(u64, gva)
334 		),
335 
336 	TP_fast_assign(
337 		__entry->address = address;
338 		__entry->gva = gva;
339 		),
340 
341 	TP_printk("gva %#llx address %#lx",  __entry->gva,
342 		  __entry->address)
343 );
344 
345 #endif
346 
347 TRACE_EVENT(kvm_halt_poll_ns,
348 	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
349 		 unsigned int old),
350 	TP_ARGS(grow, vcpu_id, new, old),
351 
352 	TP_STRUCT__entry(
353 		__field(bool, grow)
354 		__field(unsigned int, vcpu_id)
355 		__field(unsigned int, new)
356 		__field(unsigned int, old)
357 	),
358 
359 	TP_fast_assign(
360 		__entry->grow           = grow;
361 		__entry->vcpu_id        = vcpu_id;
362 		__entry->new            = new;
363 		__entry->old            = old;
364 	),
365 
366 	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
367 			__entry->vcpu_id,
368 			__entry->new,
369 			__entry->grow ? "grow" : "shrink",
370 			__entry->old)
371 );
372 
373 #define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
374 	trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
375 #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
376 	trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
377 
378 TRACE_EVENT(kvm_dirty_ring_push,
379 	TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
380 	TP_ARGS(ring, slot, offset),
381 
382 	TP_STRUCT__entry(
383 		__field(int, index)
384 		__field(u32, dirty_index)
385 		__field(u32, reset_index)
386 		__field(u32, slot)
387 		__field(u64, offset)
388 	),
389 
390 	TP_fast_assign(
391 		__entry->index          = ring->index;
392 		__entry->dirty_index    = ring->dirty_index;
393 		__entry->reset_index    = ring->reset_index;
394 		__entry->slot           = slot;
395 		__entry->offset         = offset;
396 	),
397 
398 	TP_printk("ring %d: dirty 0x%x reset 0x%x "
399 		  "slot %u offset 0x%llx (used %u)",
400 		  __entry->index, __entry->dirty_index,
401 		  __entry->reset_index,  __entry->slot, __entry->offset,
402 		  __entry->dirty_index - __entry->reset_index)
403 );
404 
405 TRACE_EVENT(kvm_dirty_ring_reset,
406 	TP_PROTO(struct kvm_dirty_ring *ring),
407 	TP_ARGS(ring),
408 
409 	TP_STRUCT__entry(
410 		__field(int, index)
411 		__field(u32, dirty_index)
412 		__field(u32, reset_index)
413 	),
414 
415 	TP_fast_assign(
416 		__entry->index          = ring->index;
417 		__entry->dirty_index    = ring->dirty_index;
418 		__entry->reset_index    = ring->reset_index;
419 	),
420 
421 	TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
422 		  __entry->index, __entry->dirty_index, __entry->reset_index,
423 		  __entry->dirty_index - __entry->reset_index)
424 );
425 
426 TRACE_EVENT(kvm_dirty_ring_exit,
427 	TP_PROTO(struct kvm_vcpu *vcpu),
428 	TP_ARGS(vcpu),
429 
430 	TP_STRUCT__entry(
431 	    __field(int, vcpu_id)
432 	),
433 
434 	TP_fast_assign(
435 	    __entry->vcpu_id = vcpu->vcpu_id;
436 	),
437 
438 	TP_printk("vcpu %d", __entry->vcpu_id)
439 );
440 
441 TRACE_EVENT(kvm_unmap_hva_range,
442 	TP_PROTO(unsigned long start, unsigned long end),
443 	TP_ARGS(start, end),
444 
445 	TP_STRUCT__entry(
446 		__field(	unsigned long,	start		)
447 		__field(	unsigned long,	end		)
448 	),
449 
450 	TP_fast_assign(
451 		__entry->start		= start;
452 		__entry->end		= end;
453 	),
454 
455 	TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
456 		  __entry->start, __entry->end)
457 );
458 
459 TRACE_EVENT(kvm_set_spte_hva,
460 	TP_PROTO(unsigned long hva),
461 	TP_ARGS(hva),
462 
463 	TP_STRUCT__entry(
464 		__field(	unsigned long,	hva		)
465 	),
466 
467 	TP_fast_assign(
468 		__entry->hva		= hva;
469 	),
470 
471 	TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
472 );
473 
474 TRACE_EVENT(kvm_age_hva,
475 	TP_PROTO(unsigned long start, unsigned long end),
476 	TP_ARGS(start, end),
477 
478 	TP_STRUCT__entry(
479 		__field(	unsigned long,	start		)
480 		__field(	unsigned long,	end		)
481 	),
482 
483 	TP_fast_assign(
484 		__entry->start		= start;
485 		__entry->end		= end;
486 	),
487 
488 	TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
489 		  __entry->start, __entry->end)
490 );
491 
492 TRACE_EVENT(kvm_test_age_hva,
493 	TP_PROTO(unsigned long hva),
494 	TP_ARGS(hva),
495 
496 	TP_STRUCT__entry(
497 		__field(	unsigned long,	hva		)
498 	),
499 
500 	TP_fast_assign(
501 		__entry->hva		= hva;
502 	),
503 
504 	TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
505 );
506 
507 #endif /* _TRACE_KVM_MAIN_H */
508 
509 /* This part must be outside protection */
510 #include <trace/define_trace.h>
511