xref: /linux/include/trace/events/kvm.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _TRACE_KVM_MAIN_H
3 
4 #include <linux/tracepoint.h>
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM kvm
8 
9 #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10 
11 #define kvm_trace_exit_reason						\
12 	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
13 	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
14 	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
15 	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
17 	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
18 
19 TRACE_EVENT(kvm_userspace_exit,
20 	    TP_PROTO(__u32 reason, int errno),
21 	    TP_ARGS(reason, errno),
22 
23 	TP_STRUCT__entry(
24 		__field(	__u32,		reason		)
25 		__field(	int,		errno		)
26 	),
27 
28 	TP_fast_assign(
29 		__entry->reason		= reason;
30 		__entry->errno		= errno;
31 	),
32 
33 	TP_printk("reason %s (%d)",
34 		  __entry->errno < 0 ?
35 		  (__entry->errno == -EINTR ? "restart" : "error") :
36 		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
37 		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
38 );
39 
40 TRACE_EVENT(kvm_vcpu_wakeup,
41 	    TP_PROTO(__u64 ns, bool waited, bool valid),
42 	    TP_ARGS(ns, waited, valid),
43 
44 	TP_STRUCT__entry(
45 		__field(	__u64,		ns		)
46 		__field(	bool,		waited		)
47 		__field(	bool,		valid		)
48 	),
49 
50 	TP_fast_assign(
51 		__entry->ns		= ns;
52 		__entry->waited		= waited;
53 		__entry->valid		= valid;
54 	),
55 
56 	TP_printk("%s time %lld ns, polling %s",
57 		  __entry->waited ? "wait" : "poll",
58 		  __entry->ns,
59 		  __entry->valid ? "valid" : "invalid")
60 );
61 
62 #if defined(CONFIG_HAVE_KVM_IRQFD)
63 TRACE_EVENT(kvm_set_irq,
64 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
65 	TP_ARGS(gsi, level, irq_source_id),
66 
67 	TP_STRUCT__entry(
68 		__field(	unsigned int,	gsi		)
69 		__field(	int,		level		)
70 		__field(	int,		irq_source_id	)
71 	),
72 
73 	TP_fast_assign(
74 		__entry->gsi		= gsi;
75 		__entry->level		= level;
76 		__entry->irq_source_id	= irq_source_id;
77 	),
78 
79 	TP_printk("gsi %u level %d source %d",
80 		  __entry->gsi, __entry->level, __entry->irq_source_id)
81 );
82 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
83 
84 #if defined(__KVM_HAVE_IOAPIC)
85 #define kvm_deliver_mode		\
86 	{0x0, "Fixed"},			\
87 	{0x1, "LowPrio"},		\
88 	{0x2, "SMI"},			\
89 	{0x3, "Res3"},			\
90 	{0x4, "NMI"},			\
91 	{0x5, "INIT"},			\
92 	{0x6, "SIPI"},			\
93 	{0x7, "ExtINT"}
94 
95 TRACE_EVENT(kvm_ioapic_set_irq,
96 	    TP_PROTO(__u64 e, int pin, bool coalesced),
97 	    TP_ARGS(e, pin, coalesced),
98 
99 	TP_STRUCT__entry(
100 		__field(	__u64,		e		)
101 		__field(	int,		pin		)
102 		__field(	bool,		coalesced	)
103 	),
104 
105 	TP_fast_assign(
106 		__entry->e		= e;
107 		__entry->pin		= pin;
108 		__entry->coalesced	= coalesced;
109 	),
110 
111 	TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
112 		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
113 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
114 		  (__entry->e & (1<<11)) ? "logical" : "physical",
115 		  (__entry->e & (1<<15)) ? "level" : "edge",
116 		  (__entry->e & (1<<16)) ? "|masked" : "",
117 		  __entry->coalesced ? " (coalesced)" : "")
118 );
119 
120 TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
121 	    TP_PROTO(__u64 e),
122 	    TP_ARGS(e),
123 
124 	TP_STRUCT__entry(
125 		__field(	__u64,		e		)
126 	),
127 
128 	TP_fast_assign(
129 		__entry->e		= e;
130 	),
131 
132 	TP_printk("dst %x vec %u (%s|%s|%s%s)",
133 		  (u8)(__entry->e >> 56), (u8)__entry->e,
134 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
135 		  (__entry->e & (1<<11)) ? "logical" : "physical",
136 		  (__entry->e & (1<<15)) ? "level" : "edge",
137 		  (__entry->e & (1<<16)) ? "|masked" : "")
138 );
139 
140 TRACE_EVENT(kvm_msi_set_irq,
141 	    TP_PROTO(__u64 address, __u64 data),
142 	    TP_ARGS(address, data),
143 
144 	TP_STRUCT__entry(
145 		__field(	__u64,		address		)
146 		__field(	__u64,		data		)
147 	),
148 
149 	TP_fast_assign(
150 		__entry->address	= address;
151 		__entry->data		= data;
152 	),
153 
154 	TP_printk("dst %llx vec %u (%s|%s|%s%s)",
155 		  (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
156 		  (u8)__entry->data,
157 		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
158 		  (__entry->address & (1<<2)) ? "logical" : "physical",
159 		  (__entry->data & (1<<15)) ? "level" : "edge",
160 		  (__entry->address & (1<<3)) ? "|rh" : "")
161 );
162 
163 #define kvm_irqchips						\
164 	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
165 	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
166 	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
167 
168 #endif /* defined(__KVM_HAVE_IOAPIC) */
169 
170 #if defined(CONFIG_HAVE_KVM_IRQFD)
171 
172 #ifdef kvm_irqchips
173 #define kvm_ack_irq_string "irqchip %s pin %u"
174 #define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
175 #else
176 #define kvm_ack_irq_string "irqchip %d pin %u"
177 #define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
178 #endif
179 
180 TRACE_EVENT(kvm_ack_irq,
181 	TP_PROTO(unsigned int irqchip, unsigned int pin),
182 	TP_ARGS(irqchip, pin),
183 
184 	TP_STRUCT__entry(
185 		__field(	unsigned int,	irqchip		)
186 		__field(	unsigned int,	pin		)
187 	),
188 
189 	TP_fast_assign(
190 		__entry->irqchip	= irqchip;
191 		__entry->pin		= pin;
192 	),
193 
194 	TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
195 );
196 
197 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
198 
199 
200 
201 #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
202 #define KVM_TRACE_MMIO_READ 1
203 #define KVM_TRACE_MMIO_WRITE 2
204 
205 #define kvm_trace_symbol_mmio \
206 	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
207 	{ KVM_TRACE_MMIO_READ, "read" }, \
208 	{ KVM_TRACE_MMIO_WRITE, "write" }
209 
210 TRACE_EVENT(kvm_mmio,
211 	TP_PROTO(int type, int len, u64 gpa, u64 val),
212 	TP_ARGS(type, len, gpa, val),
213 
214 	TP_STRUCT__entry(
215 		__field(	u32,	type		)
216 		__field(	u32,	len		)
217 		__field(	u64,	gpa		)
218 		__field(	u64,	val		)
219 	),
220 
221 	TP_fast_assign(
222 		__entry->type		= type;
223 		__entry->len		= len;
224 		__entry->gpa		= gpa;
225 		__entry->val		= val;
226 	),
227 
228 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
229 		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
230 		  __entry->len, __entry->gpa, __entry->val)
231 );
232 
233 #define kvm_fpu_load_symbol	\
234 	{0, "unload"},		\
235 	{1, "load"}
236 
237 TRACE_EVENT(kvm_fpu,
238 	TP_PROTO(int load),
239 	TP_ARGS(load),
240 
241 	TP_STRUCT__entry(
242 		__field(	u32,	        load		)
243 	),
244 
245 	TP_fast_assign(
246 		__entry->load		= load;
247 	),
248 
249 	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
250 );
251 
252 TRACE_EVENT(kvm_age_page,
253 	TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
254 	TP_ARGS(gfn, level, slot, ref),
255 
256 	TP_STRUCT__entry(
257 		__field(	u64,	hva		)
258 		__field(	u64,	gfn		)
259 		__field(	u8,	level		)
260 		__field(	u8,	referenced	)
261 	),
262 
263 	TP_fast_assign(
264 		__entry->gfn		= gfn;
265 		__entry->level		= level;
266 		__entry->hva		= ((gfn - slot->base_gfn) <<
267 					    PAGE_SHIFT) + slot->userspace_addr;
268 		__entry->referenced	= ref;
269 	),
270 
271 	TP_printk("hva %llx gfn %llx level %u %s",
272 		  __entry->hva, __entry->gfn, __entry->level,
273 		  __entry->referenced ? "YOUNG" : "OLD")
274 );
275 
276 #ifdef CONFIG_KVM_ASYNC_PF
277 DECLARE_EVENT_CLASS(kvm_async_get_page_class,
278 
279 	TP_PROTO(u64 gva, u64 gfn),
280 
281 	TP_ARGS(gva, gfn),
282 
283 	TP_STRUCT__entry(
284 		__field(__u64, gva)
285 		__field(u64, gfn)
286 	),
287 
288 	TP_fast_assign(
289 		__entry->gva = gva;
290 		__entry->gfn = gfn;
291 	),
292 
293 	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
294 );
295 
296 DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
297 
298 	TP_PROTO(u64 gva, u64 gfn),
299 
300 	TP_ARGS(gva, gfn)
301 );
302 
303 DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
304 
305 	TP_PROTO(u64 gva, u64 gfn),
306 
307 	TP_ARGS(gva, gfn)
308 );
309 
310 DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
311 
312 	TP_PROTO(u64 token, u64 gva),
313 
314 	TP_ARGS(token, gva),
315 
316 	TP_STRUCT__entry(
317 		__field(__u64, token)
318 		__field(__u64, gva)
319 	),
320 
321 	TP_fast_assign(
322 		__entry->token = token;
323 		__entry->gva = gva;
324 	),
325 
326 	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
327 
328 );
329 
330 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
331 
332 	TP_PROTO(u64 token, u64 gva),
333 
334 	TP_ARGS(token, gva)
335 );
336 
337 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
338 
339 	TP_PROTO(u64 token, u64 gva),
340 
341 	TP_ARGS(token, gva)
342 );
343 
344 TRACE_EVENT(
345 	kvm_async_pf_completed,
346 	TP_PROTO(unsigned long address, u64 gva),
347 	TP_ARGS(address, gva),
348 
349 	TP_STRUCT__entry(
350 		__field(unsigned long, address)
351 		__field(u64, gva)
352 		),
353 
354 	TP_fast_assign(
355 		__entry->address = address;
356 		__entry->gva = gva;
357 		),
358 
359 	TP_printk("gva %#llx address %#lx",  __entry->gva,
360 		  __entry->address)
361 );
362 
363 #endif
364 
365 TRACE_EVENT(kvm_halt_poll_ns,
366 	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
367 		 unsigned int old),
368 	TP_ARGS(grow, vcpu_id, new, old),
369 
370 	TP_STRUCT__entry(
371 		__field(bool, grow)
372 		__field(unsigned int, vcpu_id)
373 		__field(unsigned int, new)
374 		__field(unsigned int, old)
375 	),
376 
377 	TP_fast_assign(
378 		__entry->grow           = grow;
379 		__entry->vcpu_id        = vcpu_id;
380 		__entry->new            = new;
381 		__entry->old            = old;
382 	),
383 
384 	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
385 			__entry->vcpu_id,
386 			__entry->new,
387 			__entry->grow ? "grow" : "shrink",
388 			__entry->old)
389 );
390 
391 #define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
392 	trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
393 #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
394 	trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
395 
396 #endif /* _TRACE_KVM_MAIN_H */
397 
398 /* This part must be outside protection */
399 #include <trace/define_trace.h>
400