xref: /linux/arch/mips/kvm/trace.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
7  * Authors: Sanjay Lal <sanjayl@kymasys.com>
8  */
9 
10 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_KVM_H
12 
13 #include <linux/tracepoint.h>
14 
15 #undef TRACE_SYSTEM
16 #define TRACE_SYSTEM kvm
17 #define TRACE_INCLUDE_PATH .
18 #define TRACE_INCLUDE_FILE trace
19 
20 /*
21  * arch/mips/kvm/mips.c
22  */
23 extern bool kvm_trace_guest_mode_change;
24 int kvm_guest_mode_change_trace_reg(void);
25 void kvm_guest_mode_change_trace_unreg(void);
26 
27 /*
28  * Tracepoints for VM enters
29  */
30 DECLARE_EVENT_CLASS(kvm_transition,
31 	TP_PROTO(struct kvm_vcpu *vcpu),
32 	TP_ARGS(vcpu),
33 	TP_STRUCT__entry(
34 		__field(unsigned long, pc)
35 	),
36 
37 	TP_fast_assign(
38 		__entry->pc = vcpu->arch.pc;
39 	),
40 
41 	TP_printk("PC: 0x%08lx",
42 		  __entry->pc)
43 );
44 
45 DEFINE_EVENT(kvm_transition, kvm_enter,
46 	     TP_PROTO(struct kvm_vcpu *vcpu),
47 	     TP_ARGS(vcpu));
48 
49 DEFINE_EVENT(kvm_transition, kvm_reenter,
50 	     TP_PROTO(struct kvm_vcpu *vcpu),
51 	     TP_ARGS(vcpu));
52 
53 DEFINE_EVENT(kvm_transition, kvm_out,
54 	     TP_PROTO(struct kvm_vcpu *vcpu),
55 	     TP_ARGS(vcpu));
56 
57 /* The first 32 exit reasons correspond to Cause.ExcCode */
58 #define KVM_TRACE_EXIT_INT		 0
59 #define KVM_TRACE_EXIT_TLBMOD		 1
60 #define KVM_TRACE_EXIT_TLBMISS_LD	 2
61 #define KVM_TRACE_EXIT_TLBMISS_ST	 3
62 #define KVM_TRACE_EXIT_ADDRERR_LD	 4
63 #define KVM_TRACE_EXIT_ADDRERR_ST	 5
64 #define KVM_TRACE_EXIT_SYSCALL		 8
65 #define KVM_TRACE_EXIT_BREAK_INST	 9
66 #define KVM_TRACE_EXIT_RESVD_INST	10
67 #define KVM_TRACE_EXIT_COP_UNUSABLE	11
68 #define KVM_TRACE_EXIT_TRAP_INST	13
69 #define KVM_TRACE_EXIT_MSA_FPE		14
70 #define KVM_TRACE_EXIT_FPE		15
71 #define KVM_TRACE_EXIT_MSA_DISABLED	21
72 #define KVM_TRACE_EXIT_GUEST_EXIT	27
73 /* Further exit reasons */
74 #define KVM_TRACE_EXIT_WAIT		32
75 #define KVM_TRACE_EXIT_CACHE		33
76 #define KVM_TRACE_EXIT_SIGNAL		34
77 /* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */
78 #define KVM_TRACE_EXIT_GEXCCODE_BASE	64
79 #define KVM_TRACE_EXIT_GPSI		64	/*  0 */
80 #define KVM_TRACE_EXIT_GSFC		65	/*  1 */
81 #define KVM_TRACE_EXIT_HC		66	/*  2 */
82 #define KVM_TRACE_EXIT_GRR		67	/*  3 */
83 #define KVM_TRACE_EXIT_GVA		72	/*  8 */
84 #define KVM_TRACE_EXIT_GHFC		73	/*  9 */
85 #define KVM_TRACE_EXIT_GPA		74	/* 10 */
86 
87 /* Tracepoints for VM exits */
88 #define kvm_trace_symbol_exit_types				\
89 	{ KVM_TRACE_EXIT_INT,		"Interrupt" },		\
90 	{ KVM_TRACE_EXIT_TLBMOD,	"TLB Mod" },		\
91 	{ KVM_TRACE_EXIT_TLBMISS_LD,	"TLB Miss (LD)" },	\
92 	{ KVM_TRACE_EXIT_TLBMISS_ST,	"TLB Miss (ST)" },	\
93 	{ KVM_TRACE_EXIT_ADDRERR_LD,	"Address Error (LD)" },	\
94 	{ KVM_TRACE_EXIT_ADDRERR_ST,	"Address Err (ST)" },	\
95 	{ KVM_TRACE_EXIT_SYSCALL,	"System Call" },	\
96 	{ KVM_TRACE_EXIT_BREAK_INST,	"Break Inst" },		\
97 	{ KVM_TRACE_EXIT_RESVD_INST,	"Reserved Inst" },	\
98 	{ KVM_TRACE_EXIT_COP_UNUSABLE,	"COP0/1 Unusable" },	\
99 	{ KVM_TRACE_EXIT_TRAP_INST,	"Trap Inst" },		\
100 	{ KVM_TRACE_EXIT_MSA_FPE,	"MSA FPE" },		\
101 	{ KVM_TRACE_EXIT_FPE,		"FPE" },		\
102 	{ KVM_TRACE_EXIT_MSA_DISABLED,	"MSA Disabled" },	\
103 	{ KVM_TRACE_EXIT_GUEST_EXIT,	"Guest Exit" },		\
104 	{ KVM_TRACE_EXIT_WAIT,		"WAIT" },		\
105 	{ KVM_TRACE_EXIT_CACHE,		"CACHE" },		\
106 	{ KVM_TRACE_EXIT_SIGNAL,	"Signal" },		\
107 	{ KVM_TRACE_EXIT_GPSI,		"GPSI" },		\
108 	{ KVM_TRACE_EXIT_GSFC,		"GSFC" },		\
109 	{ KVM_TRACE_EXIT_HC,		"HC" },			\
110 	{ KVM_TRACE_EXIT_GRR,		"GRR" },		\
111 	{ KVM_TRACE_EXIT_GVA,		"GVA" },		\
112 	{ KVM_TRACE_EXIT_GHFC,		"GHFC" },		\
113 	{ KVM_TRACE_EXIT_GPA,		"GPA" }
114 
115 TRACE_EVENT(kvm_exit,
116 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
117 	    TP_ARGS(vcpu, reason),
118 	    TP_STRUCT__entry(
119 			__field(unsigned long, pc)
120 			__field(unsigned int, reason)
121 	    ),
122 
123 	    TP_fast_assign(
124 			__entry->pc = vcpu->arch.pc;
125 			__entry->reason = reason;
126 	    ),
127 
128 	    TP_printk("[%s]PC: 0x%08lx",
129 		      __print_symbolic(__entry->reason,
130 				       kvm_trace_symbol_exit_types),
131 		      __entry->pc)
132 );
133 
134 #define KVM_TRACE_MFC0		0
135 #define KVM_TRACE_MTC0		1
136 #define KVM_TRACE_DMFC0		2
137 #define KVM_TRACE_DMTC0		3
138 #define KVM_TRACE_RDHWR		4
139 
140 #define KVM_TRACE_HWR_COP0	0
141 #define KVM_TRACE_HWR_HWR	1
142 
143 #define KVM_TRACE_COP0(REG, SEL)	((KVM_TRACE_HWR_COP0 << 8) |	\
144 					 ((REG) << 3) | (SEL))
145 #define KVM_TRACE_HWR(REG, SEL)		((KVM_TRACE_HWR_HWR  << 8) |	\
146 					 ((REG) << 3) | (SEL))
147 
148 #define kvm_trace_symbol_hwr_ops				\
149 	{ KVM_TRACE_MFC0,		"MFC0" },		\
150 	{ KVM_TRACE_MTC0,		"MTC0" },		\
151 	{ KVM_TRACE_DMFC0,		"DMFC0" },		\
152 	{ KVM_TRACE_DMTC0,		"DMTC0" },		\
153 	{ KVM_TRACE_RDHWR,		"RDHWR" }
154 
155 #define kvm_trace_symbol_hwr_cop				\
156 	{ KVM_TRACE_HWR_COP0,		"COP0" },		\
157 	{ KVM_TRACE_HWR_HWR,		"HWR" }
158 
159 #define kvm_trace_symbol_hwr_regs				\
160 	{ KVM_TRACE_COP0( 0, 0),	"Index" },		\
161 	{ KVM_TRACE_COP0( 2, 0),	"EntryLo0" },		\
162 	{ KVM_TRACE_COP0( 3, 0),	"EntryLo1" },		\
163 	{ KVM_TRACE_COP0( 4, 0),	"Context" },		\
164 	{ KVM_TRACE_COP0( 4, 2),	"UserLocal" },		\
165 	{ KVM_TRACE_COP0( 5, 0),	"PageMask" },		\
166 	{ KVM_TRACE_COP0( 6, 0),	"Wired" },		\
167 	{ KVM_TRACE_COP0( 7, 0),	"HWREna" },		\
168 	{ KVM_TRACE_COP0( 8, 0),	"BadVAddr" },		\
169 	{ KVM_TRACE_COP0( 9, 0),	"Count" },		\
170 	{ KVM_TRACE_COP0(10, 0),	"EntryHi" },		\
171 	{ KVM_TRACE_COP0(11, 0),	"Compare" },		\
172 	{ KVM_TRACE_COP0(12, 0),	"Status" },		\
173 	{ KVM_TRACE_COP0(12, 1),	"IntCtl" },		\
174 	{ KVM_TRACE_COP0(12, 2),	"SRSCtl" },		\
175 	{ KVM_TRACE_COP0(13, 0),	"Cause" },		\
176 	{ KVM_TRACE_COP0(14, 0),	"EPC" },		\
177 	{ KVM_TRACE_COP0(15, 0),	"PRId" },		\
178 	{ KVM_TRACE_COP0(15, 1),	"EBase" },		\
179 	{ KVM_TRACE_COP0(16, 0),	"Config" },		\
180 	{ KVM_TRACE_COP0(16, 1),	"Config1" },		\
181 	{ KVM_TRACE_COP0(16, 2),	"Config2" },		\
182 	{ KVM_TRACE_COP0(16, 3),	"Config3" },		\
183 	{ KVM_TRACE_COP0(16, 4),	"Config4" },		\
184 	{ KVM_TRACE_COP0(16, 5),	"Config5" },		\
185 	{ KVM_TRACE_COP0(16, 7),	"Config7" },		\
186 	{ KVM_TRACE_COP0(17, 1),	"MAAR" },		\
187 	{ KVM_TRACE_COP0(17, 2),	"MAARI" },		\
188 	{ KVM_TRACE_COP0(26, 0),	"ECC" },		\
189 	{ KVM_TRACE_COP0(30, 0),	"ErrorEPC" },		\
190 	{ KVM_TRACE_COP0(31, 2),	"KScratch1" },		\
191 	{ KVM_TRACE_COP0(31, 3),	"KScratch2" },		\
192 	{ KVM_TRACE_COP0(31, 4),	"KScratch3" },		\
193 	{ KVM_TRACE_COP0(31, 5),	"KScratch4" },		\
194 	{ KVM_TRACE_COP0(31, 6),	"KScratch5" },		\
195 	{ KVM_TRACE_COP0(31, 7),	"KScratch6" },		\
196 	{ KVM_TRACE_HWR( 0, 0),		"CPUNum" },		\
197 	{ KVM_TRACE_HWR( 1, 0),		"SYNCI_Step" },		\
198 	{ KVM_TRACE_HWR( 2, 0),		"CC" },			\
199 	{ KVM_TRACE_HWR( 3, 0),		"CCRes" },		\
200 	{ KVM_TRACE_HWR(29, 0),		"ULR" }
201 
202 TRACE_EVENT(kvm_hwr,
203 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
204 		     unsigned long val),
205 	    TP_ARGS(vcpu, op, reg, val),
206 	    TP_STRUCT__entry(
207 			__field(unsigned long, val)
208 			__field(u16, reg)
209 			__field(u8, op)
210 	    ),
211 
212 	    TP_fast_assign(
213 			__entry->val = val;
214 			__entry->reg = reg;
215 			__entry->op = op;
216 	    ),
217 
218 	    TP_printk("%s %s (%s:%u:%u) 0x%08lx",
219 		      __print_symbolic(__entry->op,
220 				       kvm_trace_symbol_hwr_ops),
221 		      __print_symbolic(__entry->reg,
222 				       kvm_trace_symbol_hwr_regs),
223 		      __print_symbolic(__entry->reg >> 8,
224 				       kvm_trace_symbol_hwr_cop),
225 		      (__entry->reg >> 3) & 0x1f,
226 		      __entry->reg & 0x7,
227 		      __entry->val)
228 );
229 
230 #define KVM_TRACE_AUX_RESTORE		0
231 #define KVM_TRACE_AUX_SAVE		1
232 #define KVM_TRACE_AUX_ENABLE		2
233 #define KVM_TRACE_AUX_DISABLE		3
234 #define KVM_TRACE_AUX_DISCARD		4
235 
236 #define KVM_TRACE_AUX_FPU		1
237 #define KVM_TRACE_AUX_MSA		2
238 #define KVM_TRACE_AUX_FPU_MSA		3
239 
240 #define kvm_trace_symbol_aux_op		\
241 	{ KVM_TRACE_AUX_RESTORE, "restore" },	\
242 	{ KVM_TRACE_AUX_SAVE,    "save" },	\
243 	{ KVM_TRACE_AUX_ENABLE,  "enable" },	\
244 	{ KVM_TRACE_AUX_DISABLE, "disable" },	\
245 	{ KVM_TRACE_AUX_DISCARD, "discard" }
246 
247 #define kvm_trace_symbol_aux_state		\
248 	{ KVM_TRACE_AUX_FPU,     "FPU" },	\
249 	{ KVM_TRACE_AUX_MSA,     "MSA" },	\
250 	{ KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
251 
252 TRACE_EVENT(kvm_aux,
253 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
254 		     unsigned int state),
255 	    TP_ARGS(vcpu, op, state),
256 	    TP_STRUCT__entry(
257 			__field(unsigned long, pc)
258 			__field(u8, op)
259 			__field(u8, state)
260 	    ),
261 
262 	    TP_fast_assign(
263 			__entry->pc = vcpu->arch.pc;
264 			__entry->op = op;
265 			__entry->state = state;
266 	    ),
267 
268 	    TP_printk("%s %s PC: 0x%08lx",
269 		      __print_symbolic(__entry->op,
270 				       kvm_trace_symbol_aux_op),
271 		      __print_symbolic(__entry->state,
272 				       kvm_trace_symbol_aux_state),
273 		      __entry->pc)
274 );
275 
276 TRACE_EVENT(kvm_asid_change,
277 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
278 		     unsigned int new_asid),
279 	    TP_ARGS(vcpu, old_asid, new_asid),
280 	    TP_STRUCT__entry(
281 			__field(unsigned long, pc)
282 			__field(u8, old_asid)
283 			__field(u8, new_asid)
284 	    ),
285 
286 	    TP_fast_assign(
287 			__entry->pc = vcpu->arch.pc;
288 			__entry->old_asid = old_asid;
289 			__entry->new_asid = new_asid;
290 	    ),
291 
292 	    TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x",
293 		      __entry->pc,
294 		      __entry->old_asid,
295 		      __entry->new_asid)
296 );
297 
298 TRACE_EVENT(kvm_guestid_change,
299 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
300 	    TP_ARGS(vcpu, guestid),
301 	    TP_STRUCT__entry(
302 			__field(unsigned int, guestid)
303 	    ),
304 
305 	    TP_fast_assign(
306 			__entry->guestid = guestid;
307 	    ),
308 
309 	    TP_printk("GuestID: 0x%02x",
310 		      __entry->guestid)
311 );
312 
313 TRACE_EVENT_FN(kvm_guest_mode_change,
314 	    TP_PROTO(struct kvm_vcpu *vcpu),
315 	    TP_ARGS(vcpu),
316 	    TP_STRUCT__entry(
317 			__field(unsigned long, epc)
318 			__field(unsigned long, pc)
319 			__field(unsigned long, badvaddr)
320 			__field(unsigned int, status)
321 			__field(unsigned int, cause)
322 	    ),
323 
324 	    TP_fast_assign(
325 			__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
326 			__entry->pc = vcpu->arch.pc;
327 			__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
328 			__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
329 			__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
330 	    ),
331 
332 	    TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
333 		      __entry->epc,
334 		      __entry->pc,
335 		      __entry->status,
336 		      __entry->cause,
337 		      __entry->badvaddr),
338 
339 	    kvm_guest_mode_change_trace_reg,
340 	    kvm_guest_mode_change_trace_unreg
341 );
342 
343 #endif /* _TRACE_KVM_H */
344 
345 /* This part must be outside protection */
346 #include <trace/define_trace.h>
347