xref: /linux/arch/x86/kvm/mmu/mmutrace.h (revision 256e3417065b2721f77bcd37331796b59483ef3b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVMMMU_H
4 
5 #include <linux/tracepoint.h>
6 #include <linux/trace_events.h>
7 
8 #undef TRACE_SYSTEM
9 #define TRACE_SYSTEM kvmmmu
10 
11 #define KVM_MMU_PAGE_FIELDS		\
12 	__field(__u8, mmu_valid_gen)	\
13 	__field(__u64, gfn)		\
14 	__field(__u32, role)		\
15 	__field(__u32, root_count)	\
16 	__field(bool, unsync)
17 
18 #define KVM_MMU_PAGE_ASSIGN(sp)				\
19 	__entry->mmu_valid_gen = sp->mmu_valid_gen;	\
20 	__entry->gfn = sp->gfn;				\
21 	__entry->role = sp->role.word;			\
22 	__entry->root_count = sp->root_count;		\
23 	__entry->unsync = sp->unsync;
24 
25 #define KVM_MMU_PAGE_PRINTK() ({				        \
26 	const char *saved_ptr = trace_seq_buffer_ptr(p);		\
27 	static const char *access_str[] = {			        \
28 		"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
29 	};							        \
30 	union kvm_mmu_page_role role;				        \
31 								        \
32 	role.word = __entry->role;					\
33 									\
34 	trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s"	\
35 			 " %snxe %sad root %u %s%c",			\
36 			 __entry->mmu_valid_gen,			\
37 			 __entry->gfn, role.level,			\
38 			 role.has_4_byte_gpte ? 4 : 8,			\
39 			 role.quadrant,					\
40 			 role.direct ? " direct" : "",			\
41 			 access_str[role.access],			\
42 			 role.invalid ? " invalid" : "",		\
43 			 role.efer_nx ? "" : "!",			\
44 			 role.ad_disabled ? "!" : "",			\
45 			 __entry->root_count,				\
46 			 __entry->unsync ? "unsync" : "sync", 0);	\
47 	saved_ptr;							\
48 		})
49 
50 #define kvm_mmu_trace_pferr_flags       \
51 	{ PFERR_PRESENT_MASK, "P" },	\
52 	{ PFERR_WRITE_MASK, "W" },	\
53 	{ PFERR_USER_MASK, "U" },	\
54 	{ PFERR_PK_MASK, "PK" },	\
55 	{ PFERR_SS_MASK, "SS" },	\
56 	{ PFERR_SGX_MASK, "SGX" },	\
57 	{ PFERR_RSVD_MASK, "RSVD" },	\
58 	{ PFERR_FETCH_MASK, "F" }
59 
60 TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
61 TRACE_DEFINE_ENUM(RET_PF_RETRY);
62 TRACE_DEFINE_ENUM(RET_PF_EMULATE);
63 TRACE_DEFINE_ENUM(RET_PF_WRITE_PROTECTED);
64 TRACE_DEFINE_ENUM(RET_PF_INVALID);
65 TRACE_DEFINE_ENUM(RET_PF_FIXED);
66 TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
67 
68 /*
69  * A pagetable walk has started
70  */
71 TRACE_EVENT(
72 	kvm_mmu_pagetable_walk,
73 	TP_PROTO(u64 addr, u32 pferr),
74 	TP_ARGS(addr, pferr),
75 
76 	TP_STRUCT__entry(
77 		__field(__u64, addr)
78 		__field(__u32, pferr)
79 	),
80 
81 	TP_fast_assign(
82 		__entry->addr = addr;
83 		__entry->pferr = pferr;
84 	),
85 
86 	TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
87 		  __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
88 );
89 
90 
91 /* We just walked a paging element */
92 TRACE_EVENT(
93 	kvm_mmu_paging_element,
94 	TP_PROTO(u64 pte, int level),
95 	TP_ARGS(pte, level),
96 
97 	TP_STRUCT__entry(
98 		__field(__u64, pte)
99 		__field(__u32, level)
100 		),
101 
102 	TP_fast_assign(
103 		__entry->pte = pte;
104 		__entry->level = level;
105 		),
106 
107 	TP_printk("pte %llx level %u", __entry->pte, __entry->level)
108 );
109 
110 DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
111 
112 	TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
113 
114 	TP_ARGS(table_gfn, index, size),
115 
116 	TP_STRUCT__entry(
117 		__field(__u64, gpa)
118 	),
119 
120 	TP_fast_assign(
121 		__entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
122 				+ index * size;
123 		),
124 
125 	TP_printk("gpa %llx", __entry->gpa)
126 );
127 
128 /* We set a pte accessed bit */
129 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
130 
131 	TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
132 
133 	TP_ARGS(table_gfn, index, size)
134 );
135 
136 /* We set a pte dirty bit */
137 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
138 
139 	TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
140 
141 	TP_ARGS(table_gfn, index, size)
142 );
143 
144 TRACE_EVENT(
145 	kvm_mmu_walker_error,
146 	TP_PROTO(u32 pferr),
147 	TP_ARGS(pferr),
148 
149 	TP_STRUCT__entry(
150 		__field(__u32, pferr)
151 		),
152 
153 	TP_fast_assign(
154 		__entry->pferr = pferr;
155 		),
156 
157 	TP_printk("pferr %x %s", __entry->pferr,
158 		  __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
159 );
160 
161 TRACE_EVENT(
162 	kvm_mmu_get_page,
163 	TP_PROTO(struct kvm_mmu_page *sp, bool created),
164 	TP_ARGS(sp, created),
165 
166 	TP_STRUCT__entry(
167 		KVM_MMU_PAGE_FIELDS
168 		__field(bool, created)
169 		),
170 
171 	TP_fast_assign(
172 		KVM_MMU_PAGE_ASSIGN(sp)
173 		__entry->created = created;
174 		),
175 
176 	TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
177 		  __entry->created ? "new" : "existing")
178 );
179 
180 DECLARE_EVENT_CLASS(kvm_mmu_page_class,
181 
182 	TP_PROTO(struct kvm_mmu_page *sp),
183 	TP_ARGS(sp),
184 
185 	TP_STRUCT__entry(
186 		KVM_MMU_PAGE_FIELDS
187 	),
188 
189 	TP_fast_assign(
190 		KVM_MMU_PAGE_ASSIGN(sp)
191 	),
192 
193 	TP_printk("%s", KVM_MMU_PAGE_PRINTK())
194 );
195 
196 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
197 	TP_PROTO(struct kvm_mmu_page *sp),
198 
199 	TP_ARGS(sp)
200 );
201 
202 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
203 	TP_PROTO(struct kvm_mmu_page *sp),
204 
205 	TP_ARGS(sp)
206 );
207 
208 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
209 	TP_PROTO(struct kvm_mmu_page *sp),
210 
211 	TP_ARGS(sp)
212 );
213 
214 TRACE_EVENT(
215 	mark_mmio_spte,
216 	TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
217 	TP_ARGS(sptep, gfn, spte),
218 
219 	TP_STRUCT__entry(
220 		__field(void *, sptep)
221 		__field(gfn_t, gfn)
222 		__field(unsigned, access)
223 		__field(unsigned int, gen)
224 	),
225 
226 	TP_fast_assign(
227 		__entry->sptep = sptep;
228 		__entry->gfn = gfn;
229 		__entry->access = spte & ACC_ALL;
230 		__entry->gen = get_mmio_spte_generation(spte);
231 	),
232 
233 	TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
234 		  __entry->gfn, __entry->access, __entry->gen)
235 );
236 
237 TRACE_EVENT(
238 	handle_mmio_page_fault,
239 	TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
240 	TP_ARGS(addr, gfn, access),
241 
242 	TP_STRUCT__entry(
243 		__field(u64, addr)
244 		__field(gfn_t, gfn)
245 		__field(unsigned, access)
246 	),
247 
248 	TP_fast_assign(
249 		__entry->addr = addr;
250 		__entry->gfn = gfn;
251 		__entry->access = access;
252 	),
253 
254 	TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
255 		  __entry->access)
256 );
257 
258 TRACE_EVENT(
259 	fast_page_fault,
260 	TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
261 		 u64 *sptep, u64 old_spte, int ret),
262 	TP_ARGS(vcpu, fault, sptep, old_spte, ret),
263 
264 	TP_STRUCT__entry(
265 		__field(int, vcpu_id)
266 		__field(gpa_t, cr2_or_gpa)
267 		__field(u64, error_code)
268 		__field(u64 *, sptep)
269 		__field(u64, old_spte)
270 		__field(u64, new_spte)
271 		__field(int, ret)
272 	),
273 
274 	TP_fast_assign(
275 		__entry->vcpu_id = vcpu->vcpu_id;
276 		__entry->cr2_or_gpa = fault->addr;
277 		__entry->error_code = fault->error_code;
278 		__entry->sptep = sptep;
279 		__entry->old_spte = old_spte;
280 		__entry->new_spte = *sptep;
281 		__entry->ret = ret;
282 	),
283 
284 	TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
285 		  " new %llx spurious %d fixed %d", __entry->vcpu_id,
286 		  __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
287 		  kvm_mmu_trace_pferr_flags), __entry->sptep,
288 		  __entry->old_spte, __entry->new_spte,
289 		  __entry->ret == RET_PF_SPURIOUS, __entry->ret == RET_PF_FIXED
290 	)
291 );
292 
293 TRACE_EVENT(
294 	kvm_mmu_zap_all_fast,
295 	TP_PROTO(struct kvm *kvm),
296 	TP_ARGS(kvm),
297 
298 	TP_STRUCT__entry(
299 		__field(__u8, mmu_valid_gen)
300 		__field(unsigned int, mmu_used_pages)
301 	),
302 
303 	TP_fast_assign(
304 		__entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
305 		__entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
306 	),
307 
308 	TP_printk("kvm-mmu-valid-gen %u used_pages %x",
309 		  __entry->mmu_valid_gen, __entry->mmu_used_pages
310 	)
311 );
312 
313 
314 TRACE_EVENT(
315 	check_mmio_spte,
316 	TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
317 	TP_ARGS(spte, kvm_gen, spte_gen),
318 
319 	TP_STRUCT__entry(
320 		__field(unsigned int, kvm_gen)
321 		__field(unsigned int, spte_gen)
322 		__field(u64, spte)
323 	),
324 
325 	TP_fast_assign(
326 		__entry->kvm_gen = kvm_gen;
327 		__entry->spte_gen = spte_gen;
328 		__entry->spte = spte;
329 	),
330 
331 	TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
332 		  __entry->kvm_gen, __entry->spte_gen,
333 		  __entry->kvm_gen == __entry->spte_gen
334 	)
335 );
336 
337 TRACE_EVENT(
338 	kvm_mmu_set_spte,
339 	TP_PROTO(int level, gfn_t gfn, u64 *sptep),
340 	TP_ARGS(level, gfn, sptep),
341 
342 	TP_STRUCT__entry(
343 		__field(u64, gfn)
344 		__field(u64, spte)
345 		__field(u64, sptep)
346 		__field(u8, level)
347 		/* These depend on page entry type, so compute them now.  */
348 		__field(bool, r)
349 		__field(bool, x)
350 		__field(signed char, u)
351 	),
352 
353 	TP_fast_assign(
354 		__entry->gfn = gfn;
355 		__entry->spte = *sptep;
356 		__entry->sptep = virt_to_phys(sptep);
357 		__entry->level = level;
358 		__entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
359 		__entry->x = is_executable_pte(__entry->spte);
360 		__entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
361 	),
362 
363 	TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
364 		  __entry->gfn, __entry->spte,
365 		  __entry->r ? "r" : "-",
366 		  __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
367 		  __entry->x ? "x" : "-",
368 		  __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
369 		  __entry->level, __entry->sptep
370 	)
371 );
372 
373 TRACE_EVENT(
374 	kvm_mmu_spte_requested,
375 	TP_PROTO(struct kvm_page_fault *fault),
376 	TP_ARGS(fault),
377 
378 	TP_STRUCT__entry(
379 		__field(u64, gfn)
380 		__field(u64, pfn)
381 		__field(u8, level)
382 	),
383 
384 	TP_fast_assign(
385 		__entry->gfn = fault->gfn;
386 		__entry->pfn = fault->pfn | (fault->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));
387 		__entry->level = fault->goal_level;
388 	),
389 
390 	TP_printk("gfn %llx pfn %llx level %d",
391 		  __entry->gfn, __entry->pfn, __entry->level
392 	)
393 );
394 
395 TRACE_EVENT(
396 	kvm_tdp_mmu_spte_changed,
397 	TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
398 	TP_ARGS(as_id, gfn, level, old_spte, new_spte),
399 
400 	TP_STRUCT__entry(
401 		__field(u64, gfn)
402 		__field(u64, old_spte)
403 		__field(u64, new_spte)
404 		/* Level cannot be larger than 5 on x86, so it fits in a u8. */
405 		__field(u8, level)
406 		/* as_id can only be 0 or 1 x86, so it fits in a u8. */
407 		__field(u8, as_id)
408 	),
409 
410 	TP_fast_assign(
411 		__entry->gfn = gfn;
412 		__entry->old_spte = old_spte;
413 		__entry->new_spte = new_spte;
414 		__entry->level = level;
415 		__entry->as_id = as_id;
416 	),
417 
418 	TP_printk("as id %d gfn %llx level %d old_spte %llx new_spte %llx",
419 		  __entry->as_id, __entry->gfn, __entry->level,
420 		  __entry->old_spte, __entry->new_spte
421 	)
422 );
423 
424 TRACE_EVENT(
425 	kvm_mmu_split_huge_page,
426 	TP_PROTO(u64 gfn, u64 spte, int level, int errno),
427 	TP_ARGS(gfn, spte, level, errno),
428 
429 	TP_STRUCT__entry(
430 		__field(u64, gfn)
431 		__field(u64, spte)
432 		__field(int, level)
433 		__field(int, errno)
434 	),
435 
436 	TP_fast_assign(
437 		__entry->gfn = gfn;
438 		__entry->spte = spte;
439 		__entry->level = level;
440 		__entry->errno = errno;
441 	),
442 
443 	TP_printk("gfn %llx spte %llx level %d errno %d",
444 		  __entry->gfn, __entry->spte, __entry->level, __entry->errno)
445 );
446 
447 #endif /* _TRACE_KVMMMU_H */
448 
449 #undef TRACE_INCLUDE_PATH
450 #define TRACE_INCLUDE_PATH mmu
451 #undef TRACE_INCLUDE_FILE
452 #define TRACE_INCLUDE_FILE mmutrace
453 
454 /* This part must be outside protection */
455 #include <trace/define_trace.h>
456