xref: /linux/include/trace/events/kvm.h (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVM_MAIN_H
4 
5 #include <linux/tracepoint.h>
6 
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM kvm
9 
10 #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
11 
12 #define kvm_trace_exit_reason						\
13 	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
14 	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
15 	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
16 	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
17 	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\
18 	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
19 	ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI),          \
20 	ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
21 
22 TRACE_EVENT(kvm_userspace_exit,
23 	    TP_PROTO(__u32 reason, int errno),
24 	    TP_ARGS(reason, errno),
25 
26 	TP_STRUCT__entry(
27 		__field(	__u32,		reason		)
28 		__field(	int,		errno		)
29 	),
30 
31 	TP_fast_assign(
32 		__entry->reason		= reason;
33 		__entry->errno		= errno;
34 	),
35 
36 	TP_printk("reason %s (%d)",
37 		  __entry->errno < 0 ?
38 		  (__entry->errno == -EINTR ? "restart" : "error") :
39 		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
40 		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
41 );
42 
43 TRACE_EVENT(kvm_vcpu_wakeup,
44 	    TP_PROTO(__u64 ns, bool waited, bool valid),
45 	    TP_ARGS(ns, waited, valid),
46 
47 	TP_STRUCT__entry(
48 		__field(	__u64,		ns		)
49 		__field(	bool,		waited		)
50 		__field(	bool,		valid		)
51 	),
52 
53 	TP_fast_assign(
54 		__entry->ns		= ns;
55 		__entry->waited		= waited;
56 		__entry->valid		= valid;
57 	),
58 
59 	TP_printk("%s time %lld ns, polling %s",
60 		  __entry->waited ? "wait" : "poll",
61 		  __entry->ns,
62 		  __entry->valid ? "valid" : "invalid")
63 );
64 
65 #if defined(CONFIG_HAVE_KVM_IRQCHIP)
66 TRACE_EVENT(kvm_set_irq,
67 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
68 	TP_ARGS(gsi, level, irq_source_id),
69 
70 	TP_STRUCT__entry(
71 		__field(	unsigned int,	gsi		)
72 		__field(	int,		level		)
73 		__field(	int,		irq_source_id	)
74 	),
75 
76 	TP_fast_assign(
77 		__entry->gsi		= gsi;
78 		__entry->level		= level;
79 		__entry->irq_source_id	= irq_source_id;
80 	),
81 
82 	TP_printk("gsi %u level %d source %d",
83 		  __entry->gsi, __entry->level, __entry->irq_source_id)
84 );
85 
86 #ifdef CONFIG_KVM_IOAPIC
87 
88 #define kvm_irqchips						\
89 	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
90 	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
91 	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
92 
93 #endif /* CONFIG_KVM_IOAPIC */
94 
95 #ifdef kvm_irqchips
96 #define kvm_ack_irq_string "irqchip %s pin %u"
97 #define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
98 #else
99 #define kvm_ack_irq_string "irqchip %d pin %u"
100 #define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
101 #endif
102 
103 TRACE_EVENT(kvm_ack_irq,
104 	TP_PROTO(unsigned int irqchip, unsigned int pin),
105 	TP_ARGS(irqchip, pin),
106 
107 	TP_STRUCT__entry(
108 		__field(	unsigned int,	irqchip		)
109 		__field(	unsigned int,	pin		)
110 	),
111 
112 	TP_fast_assign(
113 		__entry->irqchip	= irqchip;
114 		__entry->pin		= pin;
115 	),
116 
117 	TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
118 );
119 
120 #endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
121 
122 
123 
124 #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
125 #define KVM_TRACE_MMIO_READ 1
126 #define KVM_TRACE_MMIO_WRITE 2
127 
128 #define kvm_trace_symbol_mmio \
129 	{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
130 	{ KVM_TRACE_MMIO_READ, "read" }, \
131 	{ KVM_TRACE_MMIO_WRITE, "write" }
132 
133 TRACE_EVENT(kvm_mmio,
134 	TP_PROTO(int type, int len, u64 gpa, void *val),
135 	TP_ARGS(type, len, gpa, val),
136 
137 	TP_STRUCT__entry(
138 		__field(	u32,	type		)
139 		__field(	u32,	len		)
140 		__field(	u64,	gpa		)
141 		__field(	u64,	val		)
142 	),
143 
144 	TP_fast_assign(
145 		__entry->type		= type;
146 		__entry->len		= len;
147 		__entry->gpa		= gpa;
148 		__entry->val		= 0;
149 		if (val)
150 			memcpy(&__entry->val, val,
151 			       min_t(u32, sizeof(__entry->val), len));
152 	),
153 
154 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
155 		  __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
156 		  __entry->len, __entry->gpa, __entry->val)
157 );
158 
159 #define kvm_fpu_load_symbol	\
160 	{0, "unload"},		\
161 	{1, "load"}
162 
163 TRACE_EVENT(kvm_fpu,
164 	TP_PROTO(int load),
165 	TP_ARGS(load),
166 
167 	TP_STRUCT__entry(
168 		__field(	u32,	        load		)
169 	),
170 
171 	TP_fast_assign(
172 		__entry->load		= load;
173 	),
174 
175 	TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
176 );
177 
178 #ifdef CONFIG_KVM_ASYNC_PF
179 DECLARE_EVENT_CLASS(kvm_async_get_page_class,
180 
181 	TP_PROTO(u64 gva, u64 gfn),
182 
183 	TP_ARGS(gva, gfn),
184 
185 	TP_STRUCT__entry(
186 		__field(__u64, gva)
187 		__field(u64, gfn)
188 	),
189 
190 	TP_fast_assign(
191 		__entry->gva = gva;
192 		__entry->gfn = gfn;
193 	),
194 
195 	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
196 );
197 
198 DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
199 
200 	TP_PROTO(u64 gva, u64 gfn),
201 
202 	TP_ARGS(gva, gfn)
203 );
204 
205 DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
206 
207 	TP_PROTO(u64 gva, u64 gfn),
208 
209 	TP_ARGS(gva, gfn)
210 );
211 
212 DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
213 
214 	TP_PROTO(u64 token, u64 gva),
215 
216 	TP_ARGS(token, gva),
217 
218 	TP_STRUCT__entry(
219 		__field(__u64, token)
220 		__field(__u64, gva)
221 	),
222 
223 	TP_fast_assign(
224 		__entry->token = token;
225 		__entry->gva = gva;
226 	),
227 
228 	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
229 
230 );
231 
232 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
233 
234 	TP_PROTO(u64 token, u64 gva),
235 
236 	TP_ARGS(token, gva)
237 );
238 
239 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
240 
241 	TP_PROTO(u64 token, u64 gva),
242 
243 	TP_ARGS(token, gva)
244 );
245 
246 TRACE_EVENT(
247 	kvm_async_pf_completed,
248 	TP_PROTO(unsigned long address, u64 gva),
249 	TP_ARGS(address, gva),
250 
251 	TP_STRUCT__entry(
252 		__field(unsigned long, address)
253 		__field(u64, gva)
254 		),
255 
256 	TP_fast_assign(
257 		__entry->address = address;
258 		__entry->gva = gva;
259 		),
260 
261 	TP_printk("gva %#llx address %#lx",  __entry->gva,
262 		  __entry->address)
263 );
264 
265 #endif
266 
267 TRACE_EVENT(kvm_halt_poll_ns,
268 	TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
269 		 unsigned int old),
270 	TP_ARGS(grow, vcpu_id, new, old),
271 
272 	TP_STRUCT__entry(
273 		__field(bool, grow)
274 		__field(unsigned int, vcpu_id)
275 		__field(unsigned int, new)
276 		__field(unsigned int, old)
277 	),
278 
279 	TP_fast_assign(
280 		__entry->grow           = grow;
281 		__entry->vcpu_id        = vcpu_id;
282 		__entry->new            = new;
283 		__entry->old            = old;
284 	),
285 
286 	TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
287 			__entry->vcpu_id,
288 			__entry->new,
289 			__entry->grow ? "grow" : "shrink",
290 			__entry->old)
291 );
292 
293 #define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
294 	trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
295 #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
296 	trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
297 
298 TRACE_EVENT(kvm_dirty_ring_push,
299 	TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
300 	TP_ARGS(ring, slot, offset),
301 
302 	TP_STRUCT__entry(
303 		__field(int, index)
304 		__field(u32, dirty_index)
305 		__field(u32, reset_index)
306 		__field(u32, slot)
307 		__field(u64, offset)
308 	),
309 
310 	TP_fast_assign(
311 		__entry->index          = ring->index;
312 		__entry->dirty_index    = ring->dirty_index;
313 		__entry->reset_index    = ring->reset_index;
314 		__entry->slot           = slot;
315 		__entry->offset         = offset;
316 	),
317 
318 	TP_printk("ring %d: dirty 0x%x reset 0x%x "
319 		  "slot %u offset 0x%llx (used %u)",
320 		  __entry->index, __entry->dirty_index,
321 		  __entry->reset_index,  __entry->slot, __entry->offset,
322 		  __entry->dirty_index - __entry->reset_index)
323 );
324 
325 TRACE_EVENT(kvm_dirty_ring_reset,
326 	TP_PROTO(struct kvm_dirty_ring *ring),
327 	TP_ARGS(ring),
328 
329 	TP_STRUCT__entry(
330 		__field(int, index)
331 		__field(u32, dirty_index)
332 		__field(u32, reset_index)
333 	),
334 
335 	TP_fast_assign(
336 		__entry->index          = ring->index;
337 		__entry->dirty_index    = ring->dirty_index;
338 		__entry->reset_index    = ring->reset_index;
339 	),
340 
341 	TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
342 		  __entry->index, __entry->dirty_index, __entry->reset_index,
343 		  __entry->dirty_index - __entry->reset_index)
344 );
345 
346 TRACE_EVENT(kvm_dirty_ring_exit,
347 	TP_PROTO(struct kvm_vcpu *vcpu),
348 	TP_ARGS(vcpu),
349 
350 	TP_STRUCT__entry(
351 	    __field(int, vcpu_id)
352 	),
353 
354 	TP_fast_assign(
355 	    __entry->vcpu_id = vcpu->vcpu_id;
356 	),
357 
358 	TP_printk("vcpu %d", __entry->vcpu_id)
359 );
360 
361 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
362 /*
363  * @start:	Starting address of guest memory range
364  * @end:	End address of guest memory range
365  * @attr:	The value of the attribute being set.
366  */
367 TRACE_EVENT(kvm_vm_set_mem_attributes,
368 	TP_PROTO(gfn_t start, gfn_t end, unsigned long attr),
369 	TP_ARGS(start, end, attr),
370 
371 	TP_STRUCT__entry(
372 		__field(gfn_t,		start)
373 		__field(gfn_t,		end)
374 		__field(unsigned long,	attr)
375 	),
376 
377 	TP_fast_assign(
378 		__entry->start		= start;
379 		__entry->end		= end;
380 		__entry->attr		= attr;
381 	),
382 
383 	TP_printk("%#016llx -- %#016llx [0x%lx]",
384 		  __entry->start, __entry->end, __entry->attr)
385 );
386 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
387 
388 TRACE_EVENT(kvm_unmap_hva_range,
389 	TP_PROTO(unsigned long start, unsigned long end),
390 	TP_ARGS(start, end),
391 
392 	TP_STRUCT__entry(
393 		__field(	unsigned long,	start		)
394 		__field(	unsigned long,	end		)
395 	),
396 
397 	TP_fast_assign(
398 		__entry->start		= start;
399 		__entry->end		= end;
400 	),
401 
402 	TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
403 		  __entry->start, __entry->end)
404 );
405 
406 TRACE_EVENT(kvm_age_hva,
407 	TP_PROTO(unsigned long start, unsigned long end),
408 	TP_ARGS(start, end),
409 
410 	TP_STRUCT__entry(
411 		__field(	unsigned long,	start		)
412 		__field(	unsigned long,	end		)
413 	),
414 
415 	TP_fast_assign(
416 		__entry->start		= start;
417 		__entry->end		= end;
418 	),
419 
420 	TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
421 		  __entry->start, __entry->end)
422 );
423 
424 TRACE_EVENT(kvm_test_age_hva,
425 	TP_PROTO(unsigned long hva),
426 	TP_ARGS(hva),
427 
428 	TP_STRUCT__entry(
429 		__field(	unsigned long,	hva		)
430 	),
431 
432 	TP_fast_assign(
433 		__entry->hva		= hva;
434 	),
435 
436 	TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
437 );
438 
439 #endif /* _TRACE_KVM_MAIN_H */
440 
441 /* This part must be outside protection */
442 #include <trace/define_trace.h>
443