1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_ARM_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_ARM_ARM64_KVM_H 4 5 #include <asm/kvm_emulate.h> 6 #include <kvm/arm_arch_timer.h> 7 #include <linux/tracepoint.h> 8 9 #undef TRACE_SYSTEM 10 #define TRACE_SYSTEM kvm 11 12 /* 13 * Tracepoints for entry/exit to guest 14 */ 15 TRACE_EVENT(kvm_entry, 16 TP_PROTO(unsigned long vcpu_pc), 17 TP_ARGS(vcpu_pc), 18 19 TP_STRUCT__entry( 20 __field( unsigned long, vcpu_pc ) 21 ), 22 23 TP_fast_assign( 24 __entry->vcpu_pc = vcpu_pc; 25 ), 26 27 TP_printk("PC: 0x%016lx", __entry->vcpu_pc) 28 ); 29 30 TRACE_EVENT(kvm_exit, 31 TP_PROTO(int ret, unsigned int esr_ec, unsigned long vcpu_pc), 32 TP_ARGS(ret, esr_ec, vcpu_pc), 33 34 TP_STRUCT__entry( 35 __field( int, ret ) 36 __field( unsigned int, esr_ec ) 37 __field( unsigned long, vcpu_pc ) 38 ), 39 40 TP_fast_assign( 41 __entry->ret = ARM_EXCEPTION_CODE(ret); 42 __entry->esr_ec = ARM_EXCEPTION_IS_TRAP(ret) ? esr_ec : 0; 43 __entry->vcpu_pc = vcpu_pc; 44 ), 45 46 TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx", 47 __print_symbolic(__entry->ret, kvm_arm_exception_type), 48 __entry->esr_ec, 49 __print_symbolic(__entry->esr_ec, kvm_arm_exception_class), 50 __entry->vcpu_pc) 51 ); 52 53 TRACE_EVENT(kvm_guest_fault, 54 TP_PROTO(unsigned long vcpu_pc, unsigned long hsr, 55 unsigned long hxfar, 56 unsigned long long ipa), 57 TP_ARGS(vcpu_pc, hsr, hxfar, ipa), 58 59 TP_STRUCT__entry( 60 __field( unsigned long, vcpu_pc ) 61 __field( unsigned long, hsr ) 62 __field( unsigned long, hxfar ) 63 __field( unsigned long long, ipa ) 64 ), 65 66 TP_fast_assign( 67 __entry->vcpu_pc = vcpu_pc; 68 __entry->hsr = hsr; 69 __entry->hxfar = hxfar; 70 __entry->ipa = ipa; 71 ), 72 73 TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx", 74 __entry->ipa, __entry->hsr, 75 __entry->hxfar, __entry->vcpu_pc) 76 ); 77 78 TRACE_EVENT(kvm_access_fault, 79 TP_PROTO(unsigned long ipa), 80 TP_ARGS(ipa), 81 82 TP_STRUCT__entry( 83 __field( unsigned long, ipa ) 84 ), 85 86 TP_fast_assign( 87 __entry->ipa = ipa; 88 ), 89 90 TP_printk("IPA: %lx", __entry->ipa) 91 ); 92 93 TRACE_EVENT(kvm_irq_line, 94 TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), 95 TP_ARGS(type, vcpu_idx, irq_num, level), 96 97 TP_STRUCT__entry( 98 __field( unsigned int, type ) 99 __field( int, vcpu_idx ) 100 __field( int, irq_num ) 101 __field( int, level ) 102 ), 103 104 TP_fast_assign( 105 __entry->type = type; 106 __entry->vcpu_idx = vcpu_idx; 107 __entry->irq_num = irq_num; 108 __entry->level = level; 109 ), 110 111 TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d", 112 (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" : 113 (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" : 114 (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN", 115 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) 116 ); 117 118 TRACE_EVENT(kvm_mmio_emulate, 119 TP_PROTO(unsigned long vcpu_pc, unsigned long instr, 120 unsigned long cpsr), 121 TP_ARGS(vcpu_pc, instr, cpsr), 122 123 TP_STRUCT__entry( 124 __field( unsigned long, vcpu_pc ) 125 __field( unsigned long, instr ) 126 __field( unsigned long, cpsr ) 127 ), 128 129 TP_fast_assign( 130 __entry->vcpu_pc = vcpu_pc; 131 __entry->instr = instr; 132 __entry->cpsr = cpsr; 133 ), 134 135 TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)", 136 __entry->vcpu_pc, __entry->instr, __entry->cpsr) 137 ); 138 139 TRACE_EVENT(kvm_set_way_flush, 140 TP_PROTO(unsigned long vcpu_pc, bool cache), 141 TP_ARGS(vcpu_pc, cache), 142 143 TP_STRUCT__entry( 144 __field( unsigned long, vcpu_pc ) 145 __field( bool, cache ) 146 ), 147 148 TP_fast_assign( 149 __entry->vcpu_pc = vcpu_pc; 150 __entry->cache = cache; 151 ), 152 153 TP_printk("S/W flush at 0x%016lx (cache %s)", 154 __entry->vcpu_pc, __entry->cache ? "on" : "off") 155 ); 156 157 TRACE_EVENT(kvm_toggle_cache, 158 TP_PROTO(unsigned long vcpu_pc, bool was, bool now), 159 TP_ARGS(vcpu_pc, was, now), 160 161 TP_STRUCT__entry( 162 __field( unsigned long, vcpu_pc ) 163 __field( bool, was ) 164 __field( bool, now ) 165 ), 166 167 TP_fast_assign( 168 __entry->vcpu_pc = vcpu_pc; 169 __entry->was = was; 170 __entry->now = now; 171 ), 172 173 TP_printk("VM op at 0x%016lx (cache was %s, now %s)", 174 __entry->vcpu_pc, __entry->was ? "on" : "off", 175 __entry->now ? "on" : "off") 176 ); 177 178 /* 179 * Tracepoints for arch_timer 180 */ 181 TRACE_EVENT(kvm_timer_update_irq, 182 TP_PROTO(unsigned long vcpu_id, __u32 irq, int level), 183 TP_ARGS(vcpu_id, irq, level), 184 185 TP_STRUCT__entry( 186 __field( unsigned long, vcpu_id ) 187 __field( __u32, irq ) 188 __field( int, level ) 189 ), 190 191 TP_fast_assign( 192 __entry->vcpu_id = vcpu_id; 193 __entry->irq = irq; 194 __entry->level = level; 195 ), 196 197 TP_printk("VCPU: %ld, IRQ %d, level %d", 198 __entry->vcpu_id, __entry->irq, __entry->level) 199 ); 200 201 TRACE_EVENT(kvm_get_timer_map, 202 TP_PROTO(unsigned long vcpu_id, struct timer_map *map), 203 TP_ARGS(vcpu_id, map), 204 205 TP_STRUCT__entry( 206 __field( unsigned long, vcpu_id ) 207 __field( int, direct_vtimer ) 208 __field( int, direct_ptimer ) 209 __field( int, emul_vtimer ) 210 __field( int, emul_ptimer ) 211 ), 212 213 TP_fast_assign( 214 __entry->vcpu_id = vcpu_id; 215 __entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer); 216 __entry->direct_ptimer = 217 (map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1; 218 __entry->emul_vtimer = 219 (map->emul_vtimer) ? arch_timer_ctx_index(map->emul_vtimer) : -1; 220 __entry->emul_ptimer = 221 (map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1; 222 ), 223 224 TP_printk("VCPU: %ld, dv: %d, dp: %d, ev: %d, ep: %d", 225 __entry->vcpu_id, 226 __entry->direct_vtimer, 227 __entry->direct_ptimer, 228 __entry->emul_vtimer, 229 __entry->emul_ptimer) 230 ); 231 232 TRACE_EVENT(kvm_timer_save_state, 233 TP_PROTO(struct arch_timer_context *ctx), 234 TP_ARGS(ctx), 235 236 TP_STRUCT__entry( 237 __field( unsigned long, ctl ) 238 __field( unsigned long long, cval ) 239 __field( int, timer_idx ) 240 ), 241 242 TP_fast_assign( 243 __entry->ctl = timer_get_ctl(ctx); 244 __entry->cval = timer_get_cval(ctx); 245 __entry->timer_idx = arch_timer_ctx_index(ctx); 246 ), 247 248 TP_printk(" CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d", 249 __entry->ctl, 250 __entry->cval, 251 __entry->timer_idx) 252 ); 253 254 TRACE_EVENT(kvm_timer_restore_state, 255 TP_PROTO(struct arch_timer_context *ctx), 256 TP_ARGS(ctx), 257 258 TP_STRUCT__entry( 259 __field( unsigned long, ctl ) 260 __field( unsigned long long, cval ) 261 __field( int, timer_idx ) 262 ), 263 264 TP_fast_assign( 265 __entry->ctl = timer_get_ctl(ctx); 266 __entry->cval = timer_get_cval(ctx); 267 __entry->timer_idx = arch_timer_ctx_index(ctx); 268 ), 269 270 TP_printk("CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d", 271 __entry->ctl, 272 __entry->cval, 273 __entry->timer_idx) 274 ); 275 276 TRACE_EVENT(kvm_timer_hrtimer_expire, 277 TP_PROTO(struct arch_timer_context *ctx), 278 TP_ARGS(ctx), 279 280 TP_STRUCT__entry( 281 __field( int, timer_idx ) 282 ), 283 284 TP_fast_assign( 285 __entry->timer_idx = arch_timer_ctx_index(ctx); 286 ), 287 288 TP_printk("arch_timer_ctx_index: %d", __entry->timer_idx) 289 ); 290 291 TRACE_EVENT(kvm_timer_emulate, 292 TP_PROTO(struct arch_timer_context *ctx, bool should_fire), 293 TP_ARGS(ctx, should_fire), 294 295 TP_STRUCT__entry( 296 __field( int, timer_idx ) 297 __field( bool, should_fire ) 298 ), 299 300 TP_fast_assign( 301 __entry->timer_idx = arch_timer_ctx_index(ctx); 302 __entry->should_fire = should_fire; 303 ), 304 305 TP_printk("arch_timer_ctx_index: %d (should_fire: %d)", 306 __entry->timer_idx, __entry->should_fire) 307 ); 308 309 TRACE_EVENT(kvm_nested_eret, 310 TP_PROTO(struct kvm_vcpu *vcpu, unsigned long elr_el2, 311 unsigned long spsr_el2), 312 TP_ARGS(vcpu, elr_el2, spsr_el2), 313 314 TP_STRUCT__entry( 315 __field(struct kvm_vcpu *, vcpu) 316 __field(unsigned long, elr_el2) 317 __field(unsigned long, spsr_el2) 318 __field(unsigned long, target_mode) 319 __field(unsigned long, hcr_el2) 320 ), 321 322 TP_fast_assign( 323 __entry->vcpu = vcpu; 324 __entry->elr_el2 = elr_el2; 325 __entry->spsr_el2 = spsr_el2; 326 __entry->target_mode = spsr_el2 & (PSR_MODE_MASK | PSR_MODE32_BIT); 327 __entry->hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2); 328 ), 329 330 TP_printk("elr_el2: 0x%lx spsr_el2: 0x%08lx (M: %s) hcr_el2: %lx", 331 __entry->elr_el2, __entry->spsr_el2, 332 __print_symbolic(__entry->target_mode, kvm_mode_names), 333 __entry->hcr_el2) 334 ); 335 336 TRACE_EVENT(kvm_inject_nested_exception, 337 TP_PROTO(struct kvm_vcpu *vcpu, u64 esr_el2, int type), 338 TP_ARGS(vcpu, esr_el2, type), 339 340 TP_STRUCT__entry( 341 __field(struct kvm_vcpu *, vcpu) 342 __field(unsigned long, esr_el2) 343 __field(int, type) 344 __field(unsigned long, spsr_el2) 345 __field(unsigned long, pc) 346 __field(unsigned long, source_mode) 347 __field(unsigned long, hcr_el2) 348 ), 349 350 TP_fast_assign( 351 __entry->vcpu = vcpu; 352 __entry->esr_el2 = esr_el2; 353 __entry->type = type; 354 __entry->spsr_el2 = *vcpu_cpsr(vcpu); 355 __entry->pc = *vcpu_pc(vcpu); 356 __entry->source_mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT); 357 __entry->hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2); 358 ), 359 360 TP_printk("%s: esr_el2 0x%lx elr_el2: 0x%lx spsr_el2: 0x%08lx (M: %s) hcr_el2: %lx", 361 __print_symbolic(__entry->type, kvm_exception_type_names), 362 __entry->esr_el2, __entry->pc, __entry->spsr_el2, 363 __print_symbolic(__entry->source_mode, kvm_mode_names), 364 __entry->hcr_el2) 365 ); 366 367 #endif /* _TRACE_ARM_ARM64_KVM_H */ 368 369 #undef TRACE_INCLUDE_PATH 370 #define TRACE_INCLUDE_PATH . 371 #undef TRACE_INCLUDE_FILE 372 #define TRACE_INCLUDE_FILE trace_arm 373 374 /* This part must be outside protection */ 375 #include <trace/define_trace.h> 376