xref: /linux/arch/x86/kvm/trace.h (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _TRACE_KVM_H
3 
4 #include <linux/tracepoint.h>
5 #include <asm/vmx.h>
6 #include <asm/svm.h>
7 
8 #undef TRACE_SYSTEM
9 #define TRACE_SYSTEM kvm
10 
11 /*
12  * Tracepoint for guest mode entry.
13  */
14 TRACE_EVENT(kvm_entry,
15 	TP_PROTO(unsigned int vcpu_id),
16 	TP_ARGS(vcpu_id),
17 
18 	TP_STRUCT__entry(
19 		__field(	unsigned int,	vcpu_id		)
20 	),
21 
22 	TP_fast_assign(
23 		__entry->vcpu_id	= vcpu_id;
24 	),
25 
26 	TP_printk("vcpu %u", __entry->vcpu_id)
27 );
28 
29 /*
30  * Tracepoint for hypercall.
31  */
32 TRACE_EVENT(kvm_hypercall,
33 	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
34 		 unsigned long a2, unsigned long a3),
35 	TP_ARGS(nr, a0, a1, a2, a3),
36 
37 	TP_STRUCT__entry(
38 		__field(	unsigned long, 	nr		)
39 		__field(	unsigned long,	a0		)
40 		__field(	unsigned long,	a1		)
41 		__field(	unsigned long,	a2		)
42 		__field(	unsigned long,	a3		)
43 	),
44 
45 	TP_fast_assign(
46 		__entry->nr		= nr;
47 		__entry->a0		= a0;
48 		__entry->a1		= a1;
49 		__entry->a2		= a2;
50 		__entry->a3		= a3;
51 	),
52 
53 	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
54 		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
55 		 __entry->a3)
56 );
57 
58 /*
59  * Tracepoint for hypercall.
60  */
61 TRACE_EVENT(kvm_hv_hypercall,
62 	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
63 		 __u64 ingpa, __u64 outgpa),
64 	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
65 
66 	TP_STRUCT__entry(
67 		__field(	__u16,		rep_cnt		)
68 		__field(	__u16,		rep_idx		)
69 		__field(	__u64,		ingpa		)
70 		__field(	__u64,		outgpa		)
71 		__field(	__u16, 		code		)
72 		__field(	bool,		fast		)
73 	),
74 
75 	TP_fast_assign(
76 		__entry->rep_cnt	= rep_cnt;
77 		__entry->rep_idx	= rep_idx;
78 		__entry->ingpa		= ingpa;
79 		__entry->outgpa		= outgpa;
80 		__entry->code		= code;
81 		__entry->fast		= fast;
82 	),
83 
84 	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
85 		  __entry->code, __entry->fast ? "fast" : "slow",
86 		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
87 		  __entry->outgpa)
88 );
89 
90 /*
91  * Tracepoint for PIO.
92  */
93 TRACE_EVENT(kvm_pio,
94 	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
95 		 unsigned int count),
96 	TP_ARGS(rw, port, size, count),
97 
98 	TP_STRUCT__entry(
99 		__field(	unsigned int, 	rw		)
100 		__field(	unsigned int, 	port		)
101 		__field(	unsigned int, 	size		)
102 		__field(	unsigned int,	count		)
103 	),
104 
105 	TP_fast_assign(
106 		__entry->rw		= rw;
107 		__entry->port		= port;
108 		__entry->size		= size;
109 		__entry->count		= count;
110 	),
111 
112 	TP_printk("pio_%s at 0x%x size %d count %d",
113 		  __entry->rw ? "write" : "read",
114 		  __entry->port, __entry->size, __entry->count)
115 );
116 
117 /*
118  * Tracepoint for cpuid.
119  */
120 TRACE_EVENT(kvm_cpuid,
121 	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
122 		 unsigned long rcx, unsigned long rdx),
123 	TP_ARGS(function, rax, rbx, rcx, rdx),
124 
125 	TP_STRUCT__entry(
126 		__field(	unsigned int,	function	)
127 		__field(	unsigned long,	rax		)
128 		__field(	unsigned long,	rbx		)
129 		__field(	unsigned long,	rcx		)
130 		__field(	unsigned long,	rdx		)
131 	),
132 
133 	TP_fast_assign(
134 		__entry->function	= function;
135 		__entry->rax		= rax;
136 		__entry->rbx		= rbx;
137 		__entry->rcx		= rcx;
138 		__entry->rdx		= rdx;
139 	),
140 
141 	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
142 		  __entry->function, __entry->rax,
143 		  __entry->rbx, __entry->rcx, __entry->rdx)
144 );
145 
146 #define AREG(x) { APIC_##x, "APIC_" #x }
147 
148 #define kvm_trace_symbol_apic						    \
149 	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
150 	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
151 	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
152 	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
153 	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
154 	AREG(ECTRL)
155 /*
156  * Tracepoint for apic access.
157  */
158 TRACE_EVENT(kvm_apic,
159 	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
160 	TP_ARGS(rw, reg, val),
161 
162 	TP_STRUCT__entry(
163 		__field(	unsigned int,	rw		)
164 		__field(	unsigned int,	reg		)
165 		__field(	unsigned int,	val		)
166 	),
167 
168 	TP_fast_assign(
169 		__entry->rw		= rw;
170 		__entry->reg		= reg;
171 		__entry->val		= val;
172 	),
173 
174 	TP_printk("apic_%s %s = 0x%x",
175 		  __entry->rw ? "write" : "read",
176 		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
177 		  __entry->val)
178 );
179 
180 #define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
181 #define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
182 
183 #define KVM_ISA_VMX   1
184 #define KVM_ISA_SVM   2
185 
186 /*
187  * Tracepoint for kvm guest exit:
188  */
189 TRACE_EVENT(kvm_exit,
190 	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
191 	TP_ARGS(exit_reason, vcpu, isa),
192 
193 	TP_STRUCT__entry(
194 		__field(	unsigned int,	exit_reason	)
195 		__field(	unsigned long,	guest_rip	)
196 		__field(	u32,	        isa             )
197 		__field(	u64,	        info1           )
198 		__field(	u64,	        info2           )
199 	),
200 
201 	TP_fast_assign(
202 		__entry->exit_reason	= exit_reason;
203 		__entry->guest_rip	= kvm_rip_read(vcpu);
204 		__entry->isa            = isa;
205 		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
206 					   &__entry->info2);
207 	),
208 
209 	TP_printk("reason %s rip 0x%lx info %llx %llx",
210 		 (__entry->isa == KVM_ISA_VMX) ?
211 		 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
212 		 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
213 		 __entry->guest_rip, __entry->info1, __entry->info2)
214 );
215 
216 /*
217  * Tracepoint for kvm interrupt injection:
218  */
219 TRACE_EVENT(kvm_inj_virq,
220 	TP_PROTO(unsigned int irq),
221 	TP_ARGS(irq),
222 
223 	TP_STRUCT__entry(
224 		__field(	unsigned int,	irq		)
225 	),
226 
227 	TP_fast_assign(
228 		__entry->irq		= irq;
229 	),
230 
231 	TP_printk("irq %u", __entry->irq)
232 );
233 
234 #define EXS(x) { x##_VECTOR, "#" #x }
235 
236 #define kvm_trace_sym_exc						\
237 	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
238 	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
239 	EXS(MF), EXS(MC)
240 
241 /*
242  * Tracepoint for kvm interrupt injection:
243  */
244 TRACE_EVENT(kvm_inj_exception,
245 	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
246 	TP_ARGS(exception, has_error, error_code),
247 
248 	TP_STRUCT__entry(
249 		__field(	u8,	exception	)
250 		__field(	u8,	has_error	)
251 		__field(	u32,	error_code	)
252 	),
253 
254 	TP_fast_assign(
255 		__entry->exception	= exception;
256 		__entry->has_error	= has_error;
257 		__entry->error_code	= error_code;
258 	),
259 
260 	TP_printk("%s (0x%x)",
261 		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
262 		  /* FIXME: don't print error_code if not present */
263 		  __entry->has_error ? __entry->error_code : 0)
264 );
265 
266 /*
267  * Tracepoint for page fault.
268  */
269 TRACE_EVENT(kvm_page_fault,
270 	TP_PROTO(unsigned long fault_address, unsigned int error_code),
271 	TP_ARGS(fault_address, error_code),
272 
273 	TP_STRUCT__entry(
274 		__field(	unsigned long,	fault_address	)
275 		__field(	unsigned int,	error_code	)
276 	),
277 
278 	TP_fast_assign(
279 		__entry->fault_address	= fault_address;
280 		__entry->error_code	= error_code;
281 	),
282 
283 	TP_printk("address %lx error_code %x",
284 		  __entry->fault_address, __entry->error_code)
285 );
286 
287 /*
288  * Tracepoint for guest MSR access.
289  */
290 TRACE_EVENT(kvm_msr,
291 	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
292 	TP_ARGS(write, ecx, data, exception),
293 
294 	TP_STRUCT__entry(
295 		__field(	unsigned,	write		)
296 		__field(	u32,		ecx		)
297 		__field(	u64,		data		)
298 		__field(	u8,		exception	)
299 	),
300 
301 	TP_fast_assign(
302 		__entry->write		= write;
303 		__entry->ecx		= ecx;
304 		__entry->data		= data;
305 		__entry->exception	= exception;
306 	),
307 
308 	TP_printk("msr_%s %x = 0x%llx%s",
309 		  __entry->write ? "write" : "read",
310 		  __entry->ecx, __entry->data,
311 		  __entry->exception ? " (#GP)" : "")
312 );
313 
314 #define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
315 #define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
316 #define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
317 #define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
318 
319 /*
320  * Tracepoint for guest CR access.
321  */
322 TRACE_EVENT(kvm_cr,
323 	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
324 	TP_ARGS(rw, cr, val),
325 
326 	TP_STRUCT__entry(
327 		__field(	unsigned int,	rw		)
328 		__field(	unsigned int,	cr		)
329 		__field(	unsigned long,	val		)
330 	),
331 
332 	TP_fast_assign(
333 		__entry->rw		= rw;
334 		__entry->cr		= cr;
335 		__entry->val		= val;
336 	),
337 
338 	TP_printk("cr_%s %x = 0x%lx",
339 		  __entry->rw ? "write" : "read",
340 		  __entry->cr, __entry->val)
341 );
342 
343 #define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
344 #define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
345 
346 TRACE_EVENT(kvm_pic_set_irq,
347 	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
348 	    TP_ARGS(chip, pin, elcr, imr, coalesced),
349 
350 	TP_STRUCT__entry(
351 		__field(	__u8,		chip		)
352 		__field(	__u8,		pin		)
353 		__field(	__u8,		elcr		)
354 		__field(	__u8,		imr		)
355 		__field(	bool,		coalesced	)
356 	),
357 
358 	TP_fast_assign(
359 		__entry->chip		= chip;
360 		__entry->pin		= pin;
361 		__entry->elcr		= elcr;
362 		__entry->imr		= imr;
363 		__entry->coalesced	= coalesced;
364 	),
365 
366 	TP_printk("chip %u pin %u (%s%s)%s",
367 		  __entry->chip, __entry->pin,
368 		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
369 		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
370 		  __entry->coalesced ? " (coalesced)" : "")
371 );
372 
373 #define kvm_apic_dst_shorthand		\
374 	{0x0, "dst"},			\
375 	{0x1, "self"},			\
376 	{0x2, "all"},			\
377 	{0x3, "all-but-self"}
378 
379 TRACE_EVENT(kvm_apic_ipi,
380 	    TP_PROTO(__u32 icr_low, __u32 dest_id),
381 	    TP_ARGS(icr_low, dest_id),
382 
383 	TP_STRUCT__entry(
384 		__field(	__u32,		icr_low		)
385 		__field(	__u32,		dest_id		)
386 	),
387 
388 	TP_fast_assign(
389 		__entry->icr_low	= icr_low;
390 		__entry->dest_id	= dest_id;
391 	),
392 
393 	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
394 		  __entry->dest_id, (u8)__entry->icr_low,
395 		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
396 				   kvm_deliver_mode),
397 		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
398 		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
399 		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
400 		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
401 				   kvm_apic_dst_shorthand))
402 );
403 
404 TRACE_EVENT(kvm_apic_accept_irq,
405 	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
406 	    TP_ARGS(apicid, dm, tm, vec, coalesced),
407 
408 	TP_STRUCT__entry(
409 		__field(	__u32,		apicid		)
410 		__field(	__u16,		dm		)
411 		__field(	__u8,		tm		)
412 		__field(	__u8,		vec		)
413 		__field(	bool,		coalesced	)
414 	),
415 
416 	TP_fast_assign(
417 		__entry->apicid		= apicid;
418 		__entry->dm		= dm;
419 		__entry->tm		= tm;
420 		__entry->vec		= vec;
421 		__entry->coalesced	= coalesced;
422 	),
423 
424 	TP_printk("apicid %x vec %u (%s|%s)%s",
425 		  __entry->apicid, __entry->vec,
426 		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
427 		  __entry->tm ? "level" : "edge",
428 		  __entry->coalesced ? " (coalesced)" : "")
429 );
430 
431 TRACE_EVENT(kvm_eoi,
432 	    TP_PROTO(struct kvm_lapic *apic, int vector),
433 	    TP_ARGS(apic, vector),
434 
435 	TP_STRUCT__entry(
436 		__field(	__u32,		apicid		)
437 		__field(	int,		vector		)
438 	),
439 
440 	TP_fast_assign(
441 		__entry->apicid		= apic->vcpu->vcpu_id;
442 		__entry->vector		= vector;
443 	),
444 
445 	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
446 );
447 
448 TRACE_EVENT(kvm_pv_eoi,
449 	    TP_PROTO(struct kvm_lapic *apic, int vector),
450 	    TP_ARGS(apic, vector),
451 
452 	TP_STRUCT__entry(
453 		__field(	__u32,		apicid		)
454 		__field(	int,		vector		)
455 	),
456 
457 	TP_fast_assign(
458 		__entry->apicid		= apic->vcpu->vcpu_id;
459 		__entry->vector		= vector;
460 	),
461 
462 	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
463 );
464 
465 /*
466  * Tracepoint for nested VMRUN
467  */
468 TRACE_EVENT(kvm_nested_vmrun,
469 	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
470 		     __u32 event_inj, bool npt),
471 	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
472 
473 	TP_STRUCT__entry(
474 		__field(	__u64,		rip		)
475 		__field(	__u64,		vmcb		)
476 		__field(	__u64,		nested_rip	)
477 		__field(	__u32,		int_ctl		)
478 		__field(	__u32,		event_inj	)
479 		__field(	bool,		npt		)
480 	),
481 
482 	TP_fast_assign(
483 		__entry->rip		= rip;
484 		__entry->vmcb		= vmcb;
485 		__entry->nested_rip	= nested_rip;
486 		__entry->int_ctl	= int_ctl;
487 		__entry->event_inj	= event_inj;
488 		__entry->npt		= npt;
489 	),
490 
491 	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
492 		  "event_inj: 0x%08x npt: %s",
493 		__entry->rip, __entry->vmcb, __entry->nested_rip,
494 		__entry->int_ctl, __entry->event_inj,
495 		__entry->npt ? "on" : "off")
496 );
497 
498 TRACE_EVENT(kvm_nested_intercepts,
499 	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
500 	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
501 
502 	TP_STRUCT__entry(
503 		__field(	__u16,		cr_read		)
504 		__field(	__u16,		cr_write	)
505 		__field(	__u32,		exceptions	)
506 		__field(	__u64,		intercept	)
507 	),
508 
509 	TP_fast_assign(
510 		__entry->cr_read	= cr_read;
511 		__entry->cr_write	= cr_write;
512 		__entry->exceptions	= exceptions;
513 		__entry->intercept	= intercept;
514 	),
515 
516 	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
517 		__entry->cr_read, __entry->cr_write, __entry->exceptions,
518 		__entry->intercept)
519 );
520 /*
521  * Tracepoint for #VMEXIT while nested
522  */
523 TRACE_EVENT(kvm_nested_vmexit,
524 	    TP_PROTO(__u64 rip, __u32 exit_code,
525 		     __u64 exit_info1, __u64 exit_info2,
526 		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
527 	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
528 		    exit_int_info, exit_int_info_err, isa),
529 
530 	TP_STRUCT__entry(
531 		__field(	__u64,		rip			)
532 		__field(	__u32,		exit_code		)
533 		__field(	__u64,		exit_info1		)
534 		__field(	__u64,		exit_info2		)
535 		__field(	__u32,		exit_int_info		)
536 		__field(	__u32,		exit_int_info_err	)
537 		__field(	__u32,		isa			)
538 	),
539 
540 	TP_fast_assign(
541 		__entry->rip			= rip;
542 		__entry->exit_code		= exit_code;
543 		__entry->exit_info1		= exit_info1;
544 		__entry->exit_info2		= exit_info2;
545 		__entry->exit_int_info		= exit_int_info;
546 		__entry->exit_int_info_err	= exit_int_info_err;
547 		__entry->isa			= isa;
548 	),
549 	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
550 		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
551 		  __entry->rip,
552 		 (__entry->isa == KVM_ISA_VMX) ?
553 		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
554 		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
555 		  __entry->exit_info1, __entry->exit_info2,
556 		  __entry->exit_int_info, __entry->exit_int_info_err)
557 );
558 
559 /*
560  * Tracepoint for #VMEXIT reinjected to the guest
561  */
562 TRACE_EVENT(kvm_nested_vmexit_inject,
563 	    TP_PROTO(__u32 exit_code,
564 		     __u64 exit_info1, __u64 exit_info2,
565 		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
566 	    TP_ARGS(exit_code, exit_info1, exit_info2,
567 		    exit_int_info, exit_int_info_err, isa),
568 
569 	TP_STRUCT__entry(
570 		__field(	__u32,		exit_code		)
571 		__field(	__u64,		exit_info1		)
572 		__field(	__u64,		exit_info2		)
573 		__field(	__u32,		exit_int_info		)
574 		__field(	__u32,		exit_int_info_err	)
575 		__field(	__u32,		isa			)
576 	),
577 
578 	TP_fast_assign(
579 		__entry->exit_code		= exit_code;
580 		__entry->exit_info1		= exit_info1;
581 		__entry->exit_info2		= exit_info2;
582 		__entry->exit_int_info		= exit_int_info;
583 		__entry->exit_int_info_err	= exit_int_info_err;
584 		__entry->isa			= isa;
585 	),
586 
587 	TP_printk("reason: %s ext_inf1: 0x%016llx "
588 		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
589 		 (__entry->isa == KVM_ISA_VMX) ?
590 		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
591 		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
592 		__entry->exit_info1, __entry->exit_info2,
593 		__entry->exit_int_info, __entry->exit_int_info_err)
594 );
595 
596 /*
597  * Tracepoint for nested #vmexit because of interrupt pending
598  */
599 TRACE_EVENT(kvm_nested_intr_vmexit,
600 	    TP_PROTO(__u64 rip),
601 	    TP_ARGS(rip),
602 
603 	TP_STRUCT__entry(
604 		__field(	__u64,	rip	)
605 	),
606 
607 	TP_fast_assign(
608 		__entry->rip	=	rip
609 	),
610 
611 	TP_printk("rip: 0x%016llx", __entry->rip)
612 );
613 
614 /*
615  * Tracepoint for nested #vmexit because of interrupt pending
616  */
617 TRACE_EVENT(kvm_invlpga,
618 	    TP_PROTO(__u64 rip, int asid, u64 address),
619 	    TP_ARGS(rip, asid, address),
620 
621 	TP_STRUCT__entry(
622 		__field(	__u64,	rip	)
623 		__field(	int,	asid	)
624 		__field(	__u64,	address	)
625 	),
626 
627 	TP_fast_assign(
628 		__entry->rip		=	rip;
629 		__entry->asid		=	asid;
630 		__entry->address	=	address;
631 	),
632 
633 	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
634 		  __entry->rip, __entry->asid, __entry->address)
635 );
636 
637 /*
638  * Tracepoint for nested #vmexit because of interrupt pending
639  */
640 TRACE_EVENT(kvm_skinit,
641 	    TP_PROTO(__u64 rip, __u32 slb),
642 	    TP_ARGS(rip, slb),
643 
644 	TP_STRUCT__entry(
645 		__field(	__u64,	rip	)
646 		__field(	__u32,	slb	)
647 	),
648 
649 	TP_fast_assign(
650 		__entry->rip		=	rip;
651 		__entry->slb		=	slb;
652 	),
653 
654 	TP_printk("rip: 0x%016llx slb: 0x%08x",
655 		  __entry->rip, __entry->slb)
656 );
657 
658 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
659 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
660 #define KVM_EMUL_INSN_F_CS_D   (1 << 2)
661 #define KVM_EMUL_INSN_F_CS_L   (1 << 3)
662 
663 #define kvm_trace_symbol_emul_flags	                  \
664 	{ 0,   			    "real" },		  \
665 	{ KVM_EMUL_INSN_F_CR0_PE			  \
666 	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
667 	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
668 	{ KVM_EMUL_INSN_F_CR0_PE			  \
669 	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
670 	{ KVM_EMUL_INSN_F_CR0_PE			  \
671 	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
672 
673 #define kei_decode_mode(mode) ({			\
674 	u8 flags = 0xff;				\
675 	switch (mode) {					\
676 	case X86EMUL_MODE_REAL:				\
677 		flags = 0;				\
678 		break;					\
679 	case X86EMUL_MODE_VM86:				\
680 		flags = KVM_EMUL_INSN_F_EFL_VM;		\
681 		break;					\
682 	case X86EMUL_MODE_PROT16:			\
683 		flags = KVM_EMUL_INSN_F_CR0_PE;		\
684 		break;					\
685 	case X86EMUL_MODE_PROT32:			\
686 		flags = KVM_EMUL_INSN_F_CR0_PE		\
687 			| KVM_EMUL_INSN_F_CS_D;		\
688 		break;					\
689 	case X86EMUL_MODE_PROT64:			\
690 		flags = KVM_EMUL_INSN_F_CR0_PE		\
691 			| KVM_EMUL_INSN_F_CS_L;		\
692 		break;					\
693 	}						\
694 	flags;						\
695 	})
696 
697 TRACE_EVENT(kvm_emulate_insn,
698 	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
699 	TP_ARGS(vcpu, failed),
700 
701 	TP_STRUCT__entry(
702 		__field(    __u64, rip                       )
703 		__field(    __u32, csbase                    )
704 		__field(    __u8,  len                       )
705 		__array(    __u8,  insn,    15	             )
706 		__field(    __u8,  flags       	   	     )
707 		__field(    __u8,  failed                    )
708 		),
709 
710 	TP_fast_assign(
711 		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
712 		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
713 		__entry->len = vcpu->arch.emulate_ctxt._eip
714 			       - vcpu->arch.emulate_ctxt.fetch.start;
715 		memcpy(__entry->insn,
716 		       vcpu->arch.emulate_ctxt.fetch.data,
717 		       15);
718 		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
719 		__entry->failed = failed;
720 		),
721 
722 	TP_printk("%x:%llx:%s (%s)%s",
723 		  __entry->csbase, __entry->rip,
724 		  __print_hex(__entry->insn, __entry->len),
725 		  __print_symbolic(__entry->flags,
726 				   kvm_trace_symbol_emul_flags),
727 		  __entry->failed ? " failed" : ""
728 		)
729 	);
730 
731 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
732 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
733 
734 TRACE_EVENT(
735 	vcpu_match_mmio,
736 	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
737 	TP_ARGS(gva, gpa, write, gpa_match),
738 
739 	TP_STRUCT__entry(
740 		__field(gva_t, gva)
741 		__field(gpa_t, gpa)
742 		__field(bool, write)
743 		__field(bool, gpa_match)
744 		),
745 
746 	TP_fast_assign(
747 		__entry->gva = gva;
748 		__entry->gpa = gpa;
749 		__entry->write = write;
750 		__entry->gpa_match = gpa_match
751 		),
752 
753 	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
754 		  __entry->write ? "Write" : "Read",
755 		  __entry->gpa_match ? "GPA" : "GVA")
756 );
757 #endif /* _TRACE_KVM_H */
758 
759 #undef TRACE_INCLUDE_PATH
760 #define TRACE_INCLUDE_PATH arch/x86/kvm
761 #undef TRACE_INCLUDE_FILE
762 #define TRACE_INCLUDE_FILE trace
763 
764 /* This part must be outside protection */
765 #include <trace/define_trace.h>
766