1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_KVM_H 4 5 #include <linux/tracepoint.h> 6 #include <asm/vmx.h> 7 #include <asm/svm.h> 8 #include <asm/clocksource.h> 9 #include <asm/pvclock-abi.h> 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM kvm 13 14 #ifdef CREATE_TRACE_POINTS 15 #define tracing_kvm_rip_read(vcpu) ({ \ 16 typeof(vcpu) __vcpu = vcpu; \ 17 __vcpu->arch.guest_state_protected ? 0 : kvm_rip_read(__vcpu); \ 18 }) 19 #endif 20 21 /* 22 * Tracepoint for guest mode entry. 23 */ 24 TRACE_EVENT(kvm_entry, 25 TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit), 26 TP_ARGS(vcpu, force_immediate_exit), 27 28 TP_STRUCT__entry( 29 __field( unsigned int, vcpu_id ) 30 __field( unsigned long, rip ) 31 __field( bool, immediate_exit ) 32 __field( u32, intr_info ) 33 __field( u32, error_code ) 34 ), 35 36 TP_fast_assign( 37 __entry->vcpu_id = vcpu->vcpu_id; 38 __entry->rip = tracing_kvm_rip_read(vcpu); 39 __entry->immediate_exit = force_immediate_exit; 40 41 kvm_x86_call(get_entry_info)(vcpu, &__entry->intr_info, 42 &__entry->error_code); 43 ), 44 45 TP_printk("vcpu %u, rip 0x%lx intr_info 0x%08x error_code 0x%08x%s", 46 __entry->vcpu_id, __entry->rip, 47 __entry->intr_info, __entry->error_code, 48 __entry->immediate_exit ? "[immediate exit]" : "") 49 ); 50 51 /* 52 * Tracepoint for hypercall. 53 */ 54 TRACE_EVENT(kvm_hypercall, 55 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 56 unsigned long a2, unsigned long a3), 57 TP_ARGS(nr, a0, a1, a2, a3), 58 59 TP_STRUCT__entry( 60 __field( unsigned long, nr ) 61 __field( unsigned long, a0 ) 62 __field( unsigned long, a1 ) 63 __field( unsigned long, a2 ) 64 __field( unsigned long, a3 ) 65 ), 66 67 TP_fast_assign( 68 __entry->nr = nr; 69 __entry->a0 = a0; 70 __entry->a1 = a1; 71 __entry->a2 = a2; 72 __entry->a3 = a3; 73 ), 74 75 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 76 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 77 __entry->a3) 78 ); 79 80 /* 81 * Tracepoint for hypercall. 82 */ 83 TRACE_EVENT(kvm_hv_hypercall, 84 TP_PROTO(__u16 code, bool fast, __u16 var_cnt, __u16 rep_cnt, 85 __u16 rep_idx, __u64 ingpa, __u64 outgpa), 86 TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa), 87 88 TP_STRUCT__entry( 89 __field( __u16, rep_cnt ) 90 __field( __u16, rep_idx ) 91 __field( __u64, ingpa ) 92 __field( __u64, outgpa ) 93 __field( __u16, code ) 94 __field( __u16, var_cnt ) 95 __field( bool, fast ) 96 ), 97 98 TP_fast_assign( 99 __entry->rep_cnt = rep_cnt; 100 __entry->rep_idx = rep_idx; 101 __entry->ingpa = ingpa; 102 __entry->outgpa = outgpa; 103 __entry->code = code; 104 __entry->var_cnt = var_cnt; 105 __entry->fast = fast; 106 ), 107 108 TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 109 __entry->code, __entry->fast ? "fast" : "slow", 110 __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx, 111 __entry->ingpa, __entry->outgpa) 112 ); 113 114 TRACE_EVENT(kvm_hv_hypercall_done, 115 TP_PROTO(u64 result), 116 TP_ARGS(result), 117 118 TP_STRUCT__entry( 119 __field(__u64, result) 120 ), 121 122 TP_fast_assign( 123 __entry->result = result; 124 ), 125 126 TP_printk("result 0x%llx", __entry->result) 127 ); 128 129 /* 130 * Tracepoint for Xen hypercall. 131 */ 132 TRACE_EVENT(kvm_xen_hypercall, 133 TP_PROTO(u8 cpl, unsigned long nr, 134 unsigned long a0, unsigned long a1, unsigned long a2, 135 unsigned long a3, unsigned long a4, unsigned long a5), 136 TP_ARGS(cpl, nr, a0, a1, a2, a3, a4, a5), 137 138 TP_STRUCT__entry( 139 __field(u8, cpl) 140 __field(unsigned long, nr) 141 __field(unsigned long, a0) 142 __field(unsigned long, a1) 143 __field(unsigned long, a2) 144 __field(unsigned long, a3) 145 __field(unsigned long, a4) 146 __field(unsigned long, a5) 147 ), 148 149 TP_fast_assign( 150 __entry->cpl = cpl; 151 __entry->nr = nr; 152 __entry->a0 = a0; 153 __entry->a1 = a1; 154 __entry->a2 = a2; 155 __entry->a3 = a3; 156 __entry->a4 = a4; 157 __entry->a4 = a5; 158 ), 159 160 TP_printk("cpl %d nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", 161 __entry->cpl, __entry->nr, 162 __entry->a0, __entry->a1, __entry->a2, 163 __entry->a3, __entry->a4, __entry->a5) 164 ); 165 166 167 168 /* 169 * Tracepoint for PIO. 170 */ 171 172 #define KVM_PIO_IN 0 173 #define KVM_PIO_OUT 1 174 175 TRACE_EVENT(kvm_pio, 176 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 177 unsigned int count, const void *data), 178 TP_ARGS(rw, port, size, count, data), 179 180 TP_STRUCT__entry( 181 __field( unsigned int, rw ) 182 __field( unsigned int, port ) 183 __field( unsigned int, size ) 184 __field( unsigned int, count ) 185 __field( unsigned int, val ) 186 ), 187 188 TP_fast_assign( 189 __entry->rw = rw; 190 __entry->port = port; 191 __entry->size = size; 192 __entry->count = count; 193 if (size == 1) 194 __entry->val = *(unsigned char *)data; 195 else if (size == 2) 196 __entry->val = *(unsigned short *)data; 197 else 198 __entry->val = *(unsigned int *)data; 199 ), 200 201 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 202 __entry->rw ? "write" : "read", 203 __entry->port, __entry->size, __entry->count, __entry->val, 204 __entry->count > 1 ? "(...)" : "") 205 ); 206 207 /* 208 * Tracepoint for fast mmio. 209 */ 210 TRACE_EVENT(kvm_fast_mmio, 211 TP_PROTO(u64 gpa), 212 TP_ARGS(gpa), 213 214 TP_STRUCT__entry( 215 __field(u64, gpa) 216 ), 217 218 TP_fast_assign( 219 __entry->gpa = gpa; 220 ), 221 222 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 223 ); 224 225 /* 226 * Tracepoint for cpuid. 227 */ 228 TRACE_EVENT(kvm_cpuid, 229 TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 230 unsigned long rbx, unsigned long rcx, unsigned long rdx, 231 bool found, bool used_max_basic), 232 TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 233 234 TP_STRUCT__entry( 235 __field( unsigned int, function ) 236 __field( unsigned int, index ) 237 __field( unsigned long, rax ) 238 __field( unsigned long, rbx ) 239 __field( unsigned long, rcx ) 240 __field( unsigned long, rdx ) 241 __field( bool, found ) 242 __field( bool, used_max_basic ) 243 ), 244 245 TP_fast_assign( 246 __entry->function = function; 247 __entry->index = index; 248 __entry->rax = rax; 249 __entry->rbx = rbx; 250 __entry->rcx = rcx; 251 __entry->rdx = rdx; 252 __entry->found = found; 253 __entry->used_max_basic = used_max_basic; 254 ), 255 256 TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 257 __entry->function, __entry->index, __entry->rax, 258 __entry->rbx, __entry->rcx, __entry->rdx, 259 __entry->found ? "found" : "not found", 260 __entry->used_max_basic ? ", used max basic" : "") 261 ); 262 263 #define AREG(x) { APIC_##x, "APIC_" #x } 264 265 #define kvm_trace_symbol_apic \ 266 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 267 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 268 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 269 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 270 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 271 AREG(ECTRL) 272 /* 273 * Tracepoint for apic access. 274 */ 275 TRACE_EVENT(kvm_apic, 276 TP_PROTO(unsigned int rw, unsigned int reg, u64 val), 277 TP_ARGS(rw, reg, val), 278 279 TP_STRUCT__entry( 280 __field( unsigned int, rw ) 281 __field( unsigned int, reg ) 282 __field( u64, val ) 283 ), 284 285 TP_fast_assign( 286 __entry->rw = rw; 287 __entry->reg = reg; 288 __entry->val = val; 289 ), 290 291 TP_printk("apic_%s %s = 0x%llx", 292 __entry->rw ? "write" : "read", 293 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 294 __entry->val) 295 ); 296 297 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 298 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 299 300 #define KVM_ISA_VMX 1 301 #define KVM_ISA_SVM 2 302 303 #define kvm_print_exit_reason(exit_reason, isa) \ 304 (isa == KVM_ISA_VMX) ? \ 305 __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ 306 __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ 307 (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ 308 (isa == KVM_ISA_VMX) ? \ 309 __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" 310 311 #define TRACE_EVENT_KVM_EXIT(name) \ 312 TRACE_EVENT(name, \ 313 TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \ 314 TP_ARGS(vcpu, isa), \ 315 \ 316 TP_STRUCT__entry( \ 317 __field( unsigned int, exit_reason ) \ 318 __field( unsigned long, guest_rip ) \ 319 __field( u32, isa ) \ 320 __field( u64, info1 ) \ 321 __field( u64, info2 ) \ 322 __field( u32, intr_info ) \ 323 __field( u32, error_code ) \ 324 __field( unsigned int, vcpu_id ) \ 325 __field( u64, requests ) \ 326 ), \ 327 \ 328 TP_fast_assign( \ 329 __entry->guest_rip = tracing_kvm_rip_read(vcpu); \ 330 __entry->isa = isa; \ 331 __entry->vcpu_id = vcpu->vcpu_id; \ 332 __entry->requests = READ_ONCE(vcpu->requests); \ 333 kvm_x86_call(get_exit_info)(vcpu, \ 334 &__entry->exit_reason, \ 335 &__entry->info1, \ 336 &__entry->info2, \ 337 &__entry->intr_info, \ 338 &__entry->error_code); \ 339 ), \ 340 \ 341 TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ 342 "info2 0x%016llx intr_info 0x%08x error_code 0x%08x " \ 343 "requests 0x%016llx", \ 344 __entry->vcpu_id, \ 345 kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ 346 __entry->guest_rip, __entry->info1, __entry->info2, \ 347 __entry->intr_info, __entry->error_code, \ 348 __entry->requests) \ 349 ) 350 351 /* 352 * Tracepoint for kvm guest exit: 353 */ 354 TRACE_EVENT_KVM_EXIT(kvm_exit); 355 356 /* 357 * Tracepoint for kvm interrupt injection: 358 */ 359 TRACE_EVENT(kvm_inj_virq, 360 TP_PROTO(unsigned int vector, bool soft, bool reinjected), 361 TP_ARGS(vector, soft, reinjected), 362 363 TP_STRUCT__entry( 364 __field( unsigned int, vector ) 365 __field( bool, soft ) 366 __field( bool, reinjected ) 367 ), 368 369 TP_fast_assign( 370 __entry->vector = vector; 371 __entry->soft = soft; 372 __entry->reinjected = reinjected; 373 ), 374 375 TP_printk("%s 0x%x%s", 376 __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector, 377 __entry->reinjected ? " [reinjected]" : "") 378 ); 379 380 #define EXS(x) { x##_VECTOR, "#" #x } 381 382 #define kvm_trace_sym_exc \ 383 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 384 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 385 EXS(MF), EXS(AC), EXS(MC) 386 387 /* 388 * Tracepoint for kvm interrupt injection: 389 */ 390 TRACE_EVENT(kvm_inj_exception, 391 TP_PROTO(unsigned exception, bool has_error, unsigned error_code, 392 bool reinjected), 393 TP_ARGS(exception, has_error, error_code, reinjected), 394 395 TP_STRUCT__entry( 396 __field( u8, exception ) 397 __field( u8, has_error ) 398 __field( u32, error_code ) 399 __field( bool, reinjected ) 400 ), 401 402 TP_fast_assign( 403 __entry->exception = exception; 404 __entry->has_error = has_error; 405 __entry->error_code = error_code; 406 __entry->reinjected = reinjected; 407 ), 408 409 TP_printk("%s%s%s%s%s", 410 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 411 !__entry->has_error ? "" : " (", 412 !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }), 413 !__entry->has_error ? "" : ")", 414 __entry->reinjected ? " [reinjected]" : "") 415 ); 416 417 /* 418 * Tracepoint for page fault. 419 */ 420 TRACE_EVENT(kvm_page_fault, 421 TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code), 422 TP_ARGS(vcpu, fault_address, error_code), 423 424 TP_STRUCT__entry( 425 __field( unsigned int, vcpu_id ) 426 __field( unsigned long, guest_rip ) 427 __field( u64, fault_address ) 428 __field( u64, error_code ) 429 ), 430 431 TP_fast_assign( 432 __entry->vcpu_id = vcpu->vcpu_id; 433 __entry->guest_rip = tracing_kvm_rip_read(vcpu); 434 __entry->fault_address = fault_address; 435 __entry->error_code = error_code; 436 ), 437 438 TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx", 439 __entry->vcpu_id, __entry->guest_rip, 440 __entry->fault_address, __entry->error_code) 441 ); 442 443 /* 444 * Tracepoint for guest MSR access. 445 */ 446 TRACE_EVENT(kvm_msr, 447 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 448 TP_ARGS(write, ecx, data, exception), 449 450 TP_STRUCT__entry( 451 __field( unsigned, write ) 452 __field( u32, ecx ) 453 __field( u64, data ) 454 __field( u8, exception ) 455 ), 456 457 TP_fast_assign( 458 __entry->write = write; 459 __entry->ecx = ecx; 460 __entry->data = data; 461 __entry->exception = exception; 462 ), 463 464 TP_printk("msr_%s %x = 0x%llx%s", 465 __entry->write ? "write" : "read", 466 __entry->ecx, __entry->data, 467 __entry->exception ? " (#GP)" : "") 468 ); 469 470 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 471 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 472 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 473 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 474 475 /* 476 * Tracepoint for guest CR access. 477 */ 478 TRACE_EVENT(kvm_cr, 479 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 480 TP_ARGS(rw, cr, val), 481 482 TP_STRUCT__entry( 483 __field( unsigned int, rw ) 484 __field( unsigned int, cr ) 485 __field( unsigned long, val ) 486 ), 487 488 TP_fast_assign( 489 __entry->rw = rw; 490 __entry->cr = cr; 491 __entry->val = val; 492 ), 493 494 TP_printk("cr_%s %x = 0x%lx", 495 __entry->rw ? "write" : "read", 496 __entry->cr, __entry->val) 497 ); 498 499 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 500 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 501 502 TRACE_EVENT(kvm_pic_set_irq, 503 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 504 TP_ARGS(chip, pin, elcr, imr, coalesced), 505 506 TP_STRUCT__entry( 507 __field( __u8, chip ) 508 __field( __u8, pin ) 509 __field( __u8, elcr ) 510 __field( __u8, imr ) 511 __field( bool, coalesced ) 512 ), 513 514 TP_fast_assign( 515 __entry->chip = chip; 516 __entry->pin = pin; 517 __entry->elcr = elcr; 518 __entry->imr = imr; 519 __entry->coalesced = coalesced; 520 ), 521 522 TP_printk("chip %u pin %u (%s%s)%s", 523 __entry->chip, __entry->pin, 524 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 525 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 526 __entry->coalesced ? " (coalesced)" : "") 527 ); 528 529 #define kvm_apic_dst_shorthand \ 530 {0x0, "dst"}, \ 531 {0x1, "self"}, \ 532 {0x2, "all"}, \ 533 {0x3, "all-but-self"} 534 535 TRACE_EVENT(kvm_apic_ipi, 536 TP_PROTO(__u32 icr_low, __u32 dest_id), 537 TP_ARGS(icr_low, dest_id), 538 539 TP_STRUCT__entry( 540 __field( __u32, icr_low ) 541 __field( __u32, dest_id ) 542 ), 543 544 TP_fast_assign( 545 __entry->icr_low = icr_low; 546 __entry->dest_id = dest_id; 547 ), 548 549 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 550 __entry->dest_id, (u8)__entry->icr_low, 551 __print_symbolic((__entry->icr_low >> 8 & 0x7), 552 kvm_deliver_mode), 553 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 554 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 555 (__entry->icr_low & (1<<15)) ? "level" : "edge", 556 __print_symbolic((__entry->icr_low >> 18 & 0x3), 557 kvm_apic_dst_shorthand)) 558 ); 559 560 TRACE_EVENT(kvm_apic_accept_irq, 561 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 562 TP_ARGS(apicid, dm, tm, vec), 563 564 TP_STRUCT__entry( 565 __field( __u32, apicid ) 566 __field( __u16, dm ) 567 __field( __u16, tm ) 568 __field( __u8, vec ) 569 ), 570 571 TP_fast_assign( 572 __entry->apicid = apicid; 573 __entry->dm = dm; 574 __entry->tm = tm; 575 __entry->vec = vec; 576 ), 577 578 TP_printk("apicid %x vec %u (%s|%s)", 579 __entry->apicid, __entry->vec, 580 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 581 __entry->tm ? "level" : "edge") 582 ); 583 584 TRACE_EVENT(kvm_eoi, 585 TP_PROTO(struct kvm_lapic *apic, int vector), 586 TP_ARGS(apic, vector), 587 588 TP_STRUCT__entry( 589 __field( __u32, apicid ) 590 __field( int, vector ) 591 ), 592 593 TP_fast_assign( 594 __entry->apicid = apic->vcpu->vcpu_id; 595 __entry->vector = vector; 596 ), 597 598 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 599 ); 600 601 TRACE_EVENT(kvm_pv_eoi, 602 TP_PROTO(struct kvm_lapic *apic, int vector), 603 TP_ARGS(apic, vector), 604 605 TP_STRUCT__entry( 606 __field( __u32, apicid ) 607 __field( int, vector ) 608 ), 609 610 TP_fast_assign( 611 __entry->apicid = apic->vcpu->vcpu_id; 612 __entry->vector = vector; 613 ), 614 615 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 616 ); 617 618 /* 619 * Tracepoint for nested VMRUN 620 */ 621 TRACE_EVENT(kvm_nested_vmenter, 622 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 623 __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd, 624 __u64 guest_cr3, __u32 isa), 625 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled, 626 guest_tdp_pgd, guest_cr3, isa), 627 628 TP_STRUCT__entry( 629 __field( __u64, rip ) 630 __field( __u64, vmcb ) 631 __field( __u64, nested_rip ) 632 __field( __u32, int_ctl ) 633 __field( __u32, event_inj ) 634 __field( bool, tdp_enabled ) 635 __field( __u64, guest_pgd ) 636 __field( __u32, isa ) 637 ), 638 639 TP_fast_assign( 640 __entry->rip = rip; 641 __entry->vmcb = vmcb; 642 __entry->nested_rip = nested_rip; 643 __entry->int_ctl = int_ctl; 644 __entry->event_inj = event_inj; 645 __entry->tdp_enabled = tdp_enabled; 646 __entry->guest_pgd = tdp_enabled ? guest_tdp_pgd : guest_cr3; 647 __entry->isa = isa; 648 ), 649 650 TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx " 651 "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx", 652 __entry->rip, 653 __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb", 654 __entry->vmcb, 655 __entry->nested_rip, 656 __entry->int_ctl, 657 __entry->event_inj, 658 __entry->isa == KVM_ISA_VMX ? "ept" : "npt", 659 __entry->tdp_enabled ? "y" : "n", 660 !__entry->tdp_enabled ? "guest_cr3" : 661 __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3", 662 __entry->guest_pgd) 663 ); 664 665 TRACE_EVENT(kvm_nested_intercepts, 666 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, 667 __u32 intercept1, __u32 intercept2, __u32 intercept3), 668 TP_ARGS(cr_read, cr_write, exceptions, intercept1, 669 intercept2, intercept3), 670 671 TP_STRUCT__entry( 672 __field( __u16, cr_read ) 673 __field( __u16, cr_write ) 674 __field( __u32, exceptions ) 675 __field( __u32, intercept1 ) 676 __field( __u32, intercept2 ) 677 __field( __u32, intercept3 ) 678 ), 679 680 TP_fast_assign( 681 __entry->cr_read = cr_read; 682 __entry->cr_write = cr_write; 683 __entry->exceptions = exceptions; 684 __entry->intercept1 = intercept1; 685 __entry->intercept2 = intercept2; 686 __entry->intercept3 = intercept3; 687 ), 688 689 TP_printk("cr_read: %04x cr_write: %04x excp: %08x " 690 "intercepts: %08x %08x %08x", 691 __entry->cr_read, __entry->cr_write, __entry->exceptions, 692 __entry->intercept1, __entry->intercept2, __entry->intercept3) 693 ); 694 /* 695 * Tracepoint for #VMEXIT while nested 696 */ 697 TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); 698 699 /* 700 * Tracepoint for #VMEXIT reinjected to the guest 701 */ 702 TRACE_EVENT(kvm_nested_vmexit_inject, 703 TP_PROTO(__u32 exit_code, 704 __u64 exit_info1, __u64 exit_info2, 705 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 706 TP_ARGS(exit_code, exit_info1, exit_info2, 707 exit_int_info, exit_int_info_err, isa), 708 709 TP_STRUCT__entry( 710 __field( __u32, exit_code ) 711 __field( __u64, exit_info1 ) 712 __field( __u64, exit_info2 ) 713 __field( __u32, exit_int_info ) 714 __field( __u32, exit_int_info_err ) 715 __field( __u32, isa ) 716 ), 717 718 TP_fast_assign( 719 __entry->exit_code = exit_code; 720 __entry->exit_info1 = exit_info1; 721 __entry->exit_info2 = exit_info2; 722 __entry->exit_int_info = exit_int_info; 723 __entry->exit_int_info_err = exit_int_info_err; 724 __entry->isa = isa; 725 ), 726 727 TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " 728 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 729 kvm_print_exit_reason(__entry->exit_code, __entry->isa), 730 __entry->exit_info1, __entry->exit_info2, 731 __entry->exit_int_info, __entry->exit_int_info_err) 732 ); 733 734 /* 735 * Tracepoint for nested #vmexit because of interrupt pending 736 */ 737 TRACE_EVENT(kvm_nested_intr_vmexit, 738 TP_PROTO(__u64 rip), 739 TP_ARGS(rip), 740 741 TP_STRUCT__entry( 742 __field( __u64, rip ) 743 ), 744 745 TP_fast_assign( 746 __entry->rip = rip 747 ), 748 749 TP_printk("rip: 0x%016llx", __entry->rip) 750 ); 751 752 /* 753 * Tracepoint for nested #vmexit because of interrupt pending 754 */ 755 TRACE_EVENT(kvm_invlpga, 756 TP_PROTO(__u64 rip, unsigned int asid, u64 address), 757 TP_ARGS(rip, asid, address), 758 759 TP_STRUCT__entry( 760 __field( __u64, rip ) 761 __field( unsigned int, asid ) 762 __field( __u64, address ) 763 ), 764 765 TP_fast_assign( 766 __entry->rip = rip; 767 __entry->asid = asid; 768 __entry->address = address; 769 ), 770 771 TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx", 772 __entry->rip, __entry->asid, __entry->address) 773 ); 774 775 /* 776 * Tracepoint for nested #vmexit because of interrupt pending 777 */ 778 TRACE_EVENT(kvm_skinit, 779 TP_PROTO(__u64 rip, __u32 slb), 780 TP_ARGS(rip, slb), 781 782 TP_STRUCT__entry( 783 __field( __u64, rip ) 784 __field( __u32, slb ) 785 ), 786 787 TP_fast_assign( 788 __entry->rip = rip; 789 __entry->slb = slb; 790 ), 791 792 TP_printk("rip: 0x%016llx slb: 0x%08x", 793 __entry->rip, __entry->slb) 794 ); 795 796 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 797 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 798 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 799 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 800 801 #define kvm_trace_symbol_emul_flags \ 802 { 0, "real" }, \ 803 { KVM_EMUL_INSN_F_CR0_PE \ 804 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 805 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 806 { KVM_EMUL_INSN_F_CR0_PE \ 807 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 808 { KVM_EMUL_INSN_F_CR0_PE \ 809 | KVM_EMUL_INSN_F_CS_L, "prot64" } 810 811 #define kei_decode_mode(mode) ({ \ 812 u8 flags = 0xff; \ 813 switch (mode) { \ 814 case X86EMUL_MODE_REAL: \ 815 flags = 0; \ 816 break; \ 817 case X86EMUL_MODE_VM86: \ 818 flags = KVM_EMUL_INSN_F_EFL_VM; \ 819 break; \ 820 case X86EMUL_MODE_PROT16: \ 821 flags = KVM_EMUL_INSN_F_CR0_PE; \ 822 break; \ 823 case X86EMUL_MODE_PROT32: \ 824 flags = KVM_EMUL_INSN_F_CR0_PE \ 825 | KVM_EMUL_INSN_F_CS_D; \ 826 break; \ 827 case X86EMUL_MODE_PROT64: \ 828 flags = KVM_EMUL_INSN_F_CR0_PE \ 829 | KVM_EMUL_INSN_F_CS_L; \ 830 break; \ 831 } \ 832 flags; \ 833 }) 834 835 TRACE_EVENT(kvm_emulate_insn, 836 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 837 TP_ARGS(vcpu, failed), 838 839 TP_STRUCT__entry( 840 __field( __u64, rip ) 841 __field( __u32, csbase ) 842 __field( __u8, len ) 843 __array( __u8, insn, X86_MAX_INSTRUCTION_LENGTH ) 844 __field( __u8, flags ) 845 __field( __u8, failed ) 846 ), 847 848 TP_fast_assign( 849 __entry->csbase = kvm_x86_call(get_segment_base)(vcpu, 850 VCPU_SREG_CS); 851 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 852 - vcpu->arch.emulate_ctxt->fetch.data; 853 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 854 memcpy(__entry->insn, 855 vcpu->arch.emulate_ctxt->fetch.data, 856 X86_MAX_INSTRUCTION_LENGTH); 857 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 858 __entry->failed = failed; 859 ), 860 861 TP_printk("%x:%llx:%s (%s)%s", 862 __entry->csbase, __entry->rip, 863 __print_hex(__entry->insn, __entry->len), 864 __print_symbolic(__entry->flags, 865 kvm_trace_symbol_emul_flags), 866 __entry->failed ? " failed" : "" 867 ) 868 ); 869 870 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 871 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 872 873 TRACE_EVENT( 874 vcpu_match_mmio, 875 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 876 TP_ARGS(gva, gpa, write, gpa_match), 877 878 TP_STRUCT__entry( 879 __field(gva_t, gva) 880 __field(gpa_t, gpa) 881 __field(bool, write) 882 __field(bool, gpa_match) 883 ), 884 885 TP_fast_assign( 886 __entry->gva = gva; 887 __entry->gpa = gpa; 888 __entry->write = write; 889 __entry->gpa_match = gpa_match 890 ), 891 892 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 893 __entry->write ? "Write" : "Read", 894 __entry->gpa_match ? "GPA" : "GVA") 895 ); 896 897 TRACE_EVENT(kvm_write_tsc_offset, 898 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 899 __u64 next_tsc_offset), 900 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 901 902 TP_STRUCT__entry( 903 __field( unsigned int, vcpu_id ) 904 __field( __u64, previous_tsc_offset ) 905 __field( __u64, next_tsc_offset ) 906 ), 907 908 TP_fast_assign( 909 __entry->vcpu_id = vcpu_id; 910 __entry->previous_tsc_offset = previous_tsc_offset; 911 __entry->next_tsc_offset = next_tsc_offset; 912 ), 913 914 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 915 __entry->previous_tsc_offset, __entry->next_tsc_offset) 916 ); 917 918 #ifdef CONFIG_X86_64 919 920 #define host_clocks \ 921 {VDSO_CLOCKMODE_NONE, "none"}, \ 922 {VDSO_CLOCKMODE_TSC, "tsc"} \ 923 924 TRACE_EVENT(kvm_update_master_clock, 925 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 926 TP_ARGS(use_master_clock, host_clock, offset_matched), 927 928 TP_STRUCT__entry( 929 __field( bool, use_master_clock ) 930 __field( unsigned int, host_clock ) 931 __field( bool, offset_matched ) 932 ), 933 934 TP_fast_assign( 935 __entry->use_master_clock = use_master_clock; 936 __entry->host_clock = host_clock; 937 __entry->offset_matched = offset_matched; 938 ), 939 940 TP_printk("masterclock %d hostclock %s offsetmatched %u", 941 __entry->use_master_clock, 942 __print_symbolic(__entry->host_clock, host_clocks), 943 __entry->offset_matched) 944 ); 945 946 TRACE_EVENT(kvm_track_tsc, 947 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 948 unsigned int online_vcpus, bool use_master_clock, 949 unsigned int host_clock), 950 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 951 host_clock), 952 953 TP_STRUCT__entry( 954 __field( unsigned int, vcpu_id ) 955 __field( unsigned int, nr_vcpus_matched_tsc ) 956 __field( unsigned int, online_vcpus ) 957 __field( bool, use_master_clock ) 958 __field( unsigned int, host_clock ) 959 ), 960 961 TP_fast_assign( 962 __entry->vcpu_id = vcpu_id; 963 __entry->nr_vcpus_matched_tsc = nr_matched; 964 __entry->online_vcpus = online_vcpus; 965 __entry->use_master_clock = use_master_clock; 966 __entry->host_clock = host_clock; 967 ), 968 969 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 970 " hostclock %s", 971 __entry->vcpu_id, __entry->use_master_clock, 972 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 973 __print_symbolic(__entry->host_clock, host_clocks)) 974 ); 975 976 #endif /* CONFIG_X86_64 */ 977 978 /* 979 * Tracepoint for PML full VMEXIT. 980 */ 981 TRACE_EVENT(kvm_pml_full, 982 TP_PROTO(unsigned int vcpu_id), 983 TP_ARGS(vcpu_id), 984 985 TP_STRUCT__entry( 986 __field( unsigned int, vcpu_id ) 987 ), 988 989 TP_fast_assign( 990 __entry->vcpu_id = vcpu_id; 991 ), 992 993 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 994 ); 995 996 TRACE_EVENT(kvm_ple_window_update, 997 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 998 TP_ARGS(vcpu_id, new, old), 999 1000 TP_STRUCT__entry( 1001 __field( unsigned int, vcpu_id ) 1002 __field( unsigned int, new ) 1003 __field( unsigned int, old ) 1004 ), 1005 1006 TP_fast_assign( 1007 __entry->vcpu_id = vcpu_id; 1008 __entry->new = new; 1009 __entry->old = old; 1010 ), 1011 1012 TP_printk("vcpu %u old %u new %u (%s)", 1013 __entry->vcpu_id, __entry->old, __entry->new, 1014 __entry->old < __entry->new ? "growed" : "shrinked") 1015 ); 1016 1017 TRACE_EVENT(kvm_pvclock_update, 1018 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 1019 TP_ARGS(vcpu_id, pvclock), 1020 1021 TP_STRUCT__entry( 1022 __field( unsigned int, vcpu_id ) 1023 __field( __u32, version ) 1024 __field( __u64, tsc_timestamp ) 1025 __field( __u64, system_time ) 1026 __field( __u32, tsc_to_system_mul ) 1027 __field( __s8, tsc_shift ) 1028 __field( __u8, flags ) 1029 ), 1030 1031 TP_fast_assign( 1032 __entry->vcpu_id = vcpu_id; 1033 __entry->version = pvclock->version; 1034 __entry->tsc_timestamp = pvclock->tsc_timestamp; 1035 __entry->system_time = pvclock->system_time; 1036 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 1037 __entry->tsc_shift = pvclock->tsc_shift; 1038 __entry->flags = pvclock->flags; 1039 ), 1040 1041 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 1042 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 1043 "flags 0x%x }", 1044 __entry->vcpu_id, 1045 __entry->version, 1046 __entry->tsc_timestamp, 1047 __entry->system_time, 1048 __entry->tsc_to_system_mul, 1049 __entry->tsc_shift, 1050 __entry->flags) 1051 ); 1052 1053 TRACE_EVENT(kvm_wait_lapic_expire, 1054 TP_PROTO(unsigned int vcpu_id, s64 delta), 1055 TP_ARGS(vcpu_id, delta), 1056 1057 TP_STRUCT__entry( 1058 __field( unsigned int, vcpu_id ) 1059 __field( s64, delta ) 1060 ), 1061 1062 TP_fast_assign( 1063 __entry->vcpu_id = vcpu_id; 1064 __entry->delta = delta; 1065 ), 1066 1067 TP_printk("vcpu %u: delta %lld (%s)", 1068 __entry->vcpu_id, 1069 __entry->delta, 1070 __entry->delta < 0 ? "early" : "late") 1071 ); 1072 1073 TRACE_EVENT(kvm_smm_transition, 1074 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 1075 TP_ARGS(vcpu_id, smbase, entering), 1076 1077 TP_STRUCT__entry( 1078 __field( unsigned int, vcpu_id ) 1079 __field( u64, smbase ) 1080 __field( bool, entering ) 1081 ), 1082 1083 TP_fast_assign( 1084 __entry->vcpu_id = vcpu_id; 1085 __entry->smbase = smbase; 1086 __entry->entering = entering; 1087 ), 1088 1089 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 1090 __entry->vcpu_id, 1091 __entry->entering ? "entering" : "leaving", 1092 __entry->smbase) 1093 ); 1094 1095 /* 1096 * Tracepoint for VT-d posted-interrupts and AMD-Vi Guest Virtual APIC. 1097 */ 1098 TRACE_EVENT(kvm_pi_irte_update, 1099 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1100 unsigned int gsi, unsigned int gvec, 1101 u64 pi_desc_addr, bool set), 1102 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1103 1104 TP_STRUCT__entry( 1105 __field( unsigned int, host_irq ) 1106 __field( unsigned int, vcpu_id ) 1107 __field( unsigned int, gsi ) 1108 __field( unsigned int, gvec ) 1109 __field( u64, pi_desc_addr ) 1110 __field( bool, set ) 1111 ), 1112 1113 TP_fast_assign( 1114 __entry->host_irq = host_irq; 1115 __entry->vcpu_id = vcpu_id; 1116 __entry->gsi = gsi; 1117 __entry->gvec = gvec; 1118 __entry->pi_desc_addr = pi_desc_addr; 1119 __entry->set = set; 1120 ), 1121 1122 TP_printk("PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1123 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1124 __entry->set ? "enabled and being updated" : "disabled", 1125 __entry->host_irq, 1126 __entry->vcpu_id, 1127 __entry->gsi, 1128 __entry->gvec, 1129 __entry->pi_desc_addr) 1130 ); 1131 1132 /* 1133 * Tracepoint for kvm_hv_notify_acked_sint. 1134 */ 1135 TRACE_EVENT(kvm_hv_notify_acked_sint, 1136 TP_PROTO(int vcpu_id, u32 sint), 1137 TP_ARGS(vcpu_id, sint), 1138 1139 TP_STRUCT__entry( 1140 __field(int, vcpu_id) 1141 __field(u32, sint) 1142 ), 1143 1144 TP_fast_assign( 1145 __entry->vcpu_id = vcpu_id; 1146 __entry->sint = sint; 1147 ), 1148 1149 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1150 ); 1151 1152 /* 1153 * Tracepoint for synic_set_irq. 1154 */ 1155 TRACE_EVENT(kvm_hv_synic_set_irq, 1156 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1157 TP_ARGS(vcpu_id, sint, vector, ret), 1158 1159 TP_STRUCT__entry( 1160 __field(int, vcpu_id) 1161 __field(u32, sint) 1162 __field(int, vector) 1163 __field(int, ret) 1164 ), 1165 1166 TP_fast_assign( 1167 __entry->vcpu_id = vcpu_id; 1168 __entry->sint = sint; 1169 __entry->vector = vector; 1170 __entry->ret = ret; 1171 ), 1172 1173 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1174 __entry->vcpu_id, __entry->sint, __entry->vector, 1175 __entry->ret) 1176 ); 1177 1178 /* 1179 * Tracepoint for kvm_hv_synic_send_eoi. 1180 */ 1181 TRACE_EVENT(kvm_hv_synic_send_eoi, 1182 TP_PROTO(int vcpu_id, int vector), 1183 TP_ARGS(vcpu_id, vector), 1184 1185 TP_STRUCT__entry( 1186 __field(int, vcpu_id) 1187 __field(u32, sint) 1188 __field(int, vector) 1189 __field(int, ret) 1190 ), 1191 1192 TP_fast_assign( 1193 __entry->vcpu_id = vcpu_id; 1194 __entry->vector = vector; 1195 ), 1196 1197 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1198 ); 1199 1200 /* 1201 * Tracepoint for synic_set_msr. 1202 */ 1203 TRACE_EVENT(kvm_hv_synic_set_msr, 1204 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1205 TP_ARGS(vcpu_id, msr, data, host), 1206 1207 TP_STRUCT__entry( 1208 __field(int, vcpu_id) 1209 __field(u32, msr) 1210 __field(u64, data) 1211 __field(bool, host) 1212 ), 1213 1214 TP_fast_assign( 1215 __entry->vcpu_id = vcpu_id; 1216 __entry->msr = msr; 1217 __entry->data = data; 1218 __entry->host = host 1219 ), 1220 1221 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1222 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1223 ); 1224 1225 /* 1226 * Tracepoint for stimer_set_config. 1227 */ 1228 TRACE_EVENT(kvm_hv_stimer_set_config, 1229 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1230 TP_ARGS(vcpu_id, timer_index, config, host), 1231 1232 TP_STRUCT__entry( 1233 __field(int, vcpu_id) 1234 __field(int, timer_index) 1235 __field(u64, config) 1236 __field(bool, host) 1237 ), 1238 1239 TP_fast_assign( 1240 __entry->vcpu_id = vcpu_id; 1241 __entry->timer_index = timer_index; 1242 __entry->config = config; 1243 __entry->host = host; 1244 ), 1245 1246 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1247 __entry->vcpu_id, __entry->timer_index, __entry->config, 1248 __entry->host) 1249 ); 1250 1251 /* 1252 * Tracepoint for stimer_set_count. 1253 */ 1254 TRACE_EVENT(kvm_hv_stimer_set_count, 1255 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1256 TP_ARGS(vcpu_id, timer_index, count, host), 1257 1258 TP_STRUCT__entry( 1259 __field(int, vcpu_id) 1260 __field(int, timer_index) 1261 __field(u64, count) 1262 __field(bool, host) 1263 ), 1264 1265 TP_fast_assign( 1266 __entry->vcpu_id = vcpu_id; 1267 __entry->timer_index = timer_index; 1268 __entry->count = count; 1269 __entry->host = host; 1270 ), 1271 1272 TP_printk("vcpu_id %d timer %d count %llu host %d", 1273 __entry->vcpu_id, __entry->timer_index, __entry->count, 1274 __entry->host) 1275 ); 1276 1277 /* 1278 * Tracepoint for stimer_start(periodic timer case). 1279 */ 1280 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1281 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1282 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1283 1284 TP_STRUCT__entry( 1285 __field(int, vcpu_id) 1286 __field(int, timer_index) 1287 __field(u64, time_now) 1288 __field(u64, exp_time) 1289 ), 1290 1291 TP_fast_assign( 1292 __entry->vcpu_id = vcpu_id; 1293 __entry->timer_index = timer_index; 1294 __entry->time_now = time_now; 1295 __entry->exp_time = exp_time; 1296 ), 1297 1298 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1299 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1300 __entry->exp_time) 1301 ); 1302 1303 /* 1304 * Tracepoint for stimer_start(one-shot timer case). 1305 */ 1306 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1307 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1308 TP_ARGS(vcpu_id, timer_index, time_now, count), 1309 1310 TP_STRUCT__entry( 1311 __field(int, vcpu_id) 1312 __field(int, timer_index) 1313 __field(u64, time_now) 1314 __field(u64, count) 1315 ), 1316 1317 TP_fast_assign( 1318 __entry->vcpu_id = vcpu_id; 1319 __entry->timer_index = timer_index; 1320 __entry->time_now = time_now; 1321 __entry->count = count; 1322 ), 1323 1324 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1325 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1326 __entry->count) 1327 ); 1328 1329 /* 1330 * Tracepoint for stimer_timer_callback. 1331 */ 1332 TRACE_EVENT(kvm_hv_stimer_callback, 1333 TP_PROTO(int vcpu_id, int timer_index), 1334 TP_ARGS(vcpu_id, timer_index), 1335 1336 TP_STRUCT__entry( 1337 __field(int, vcpu_id) 1338 __field(int, timer_index) 1339 ), 1340 1341 TP_fast_assign( 1342 __entry->vcpu_id = vcpu_id; 1343 __entry->timer_index = timer_index; 1344 ), 1345 1346 TP_printk("vcpu_id %d timer %d", 1347 __entry->vcpu_id, __entry->timer_index) 1348 ); 1349 1350 /* 1351 * Tracepoint for stimer_expiration. 1352 */ 1353 TRACE_EVENT(kvm_hv_stimer_expiration, 1354 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1355 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1356 1357 TP_STRUCT__entry( 1358 __field(int, vcpu_id) 1359 __field(int, timer_index) 1360 __field(int, direct) 1361 __field(int, msg_send_result) 1362 ), 1363 1364 TP_fast_assign( 1365 __entry->vcpu_id = vcpu_id; 1366 __entry->timer_index = timer_index; 1367 __entry->direct = direct; 1368 __entry->msg_send_result = msg_send_result; 1369 ), 1370 1371 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1372 __entry->vcpu_id, __entry->timer_index, 1373 __entry->direct, __entry->msg_send_result) 1374 ); 1375 1376 /* 1377 * Tracepoint for stimer_cleanup. 1378 */ 1379 TRACE_EVENT(kvm_hv_stimer_cleanup, 1380 TP_PROTO(int vcpu_id, int timer_index), 1381 TP_ARGS(vcpu_id, timer_index), 1382 1383 TP_STRUCT__entry( 1384 __field(int, vcpu_id) 1385 __field(int, timer_index) 1386 ), 1387 1388 TP_fast_assign( 1389 __entry->vcpu_id = vcpu_id; 1390 __entry->timer_index = timer_index; 1391 ), 1392 1393 TP_printk("vcpu_id %d timer %d", 1394 __entry->vcpu_id, __entry->timer_index) 1395 ); 1396 1397 #define kvm_print_apicv_inhibit_reasons(inhibits) \ 1398 (inhibits), (inhibits) ? " " : "", \ 1399 (inhibits) ? __print_flags(inhibits, "|", APICV_INHIBIT_REASONS) : "" 1400 1401 TRACE_EVENT(kvm_apicv_inhibit_changed, 1402 TP_PROTO(int reason, bool set, unsigned long inhibits), 1403 TP_ARGS(reason, set, inhibits), 1404 1405 TP_STRUCT__entry( 1406 __field(int, reason) 1407 __field(bool, set) 1408 __field(unsigned long, inhibits) 1409 ), 1410 1411 TP_fast_assign( 1412 __entry->reason = reason; 1413 __entry->set = set; 1414 __entry->inhibits = inhibits; 1415 ), 1416 1417 TP_printk("%s reason=%u, inhibits=0x%lx%s%s", 1418 __entry->set ? "set" : "cleared", 1419 __entry->reason, 1420 kvm_print_apicv_inhibit_reasons(__entry->inhibits)) 1421 ); 1422 1423 TRACE_EVENT(kvm_apicv_accept_irq, 1424 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 1425 TP_ARGS(apicid, dm, tm, vec), 1426 1427 TP_STRUCT__entry( 1428 __field( __u32, apicid ) 1429 __field( __u16, dm ) 1430 __field( __u16, tm ) 1431 __field( __u8, vec ) 1432 ), 1433 1434 TP_fast_assign( 1435 __entry->apicid = apicid; 1436 __entry->dm = dm; 1437 __entry->tm = tm; 1438 __entry->vec = vec; 1439 ), 1440 1441 TP_printk("apicid %x vec %u (%s|%s)", 1442 __entry->apicid, __entry->vec, 1443 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 1444 __entry->tm ? "level" : "edge") 1445 ); 1446 1447 /* 1448 * Tracepoint for AMD AVIC 1449 */ 1450 TRACE_EVENT(kvm_avic_incomplete_ipi, 1451 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1452 TP_ARGS(vcpu, icrh, icrl, id, index), 1453 1454 TP_STRUCT__entry( 1455 __field(u32, vcpu) 1456 __field(u32, icrh) 1457 __field(u32, icrl) 1458 __field(u32, id) 1459 __field(u32, index) 1460 ), 1461 1462 TP_fast_assign( 1463 __entry->vcpu = vcpu; 1464 __entry->icrh = icrh; 1465 __entry->icrl = icrl; 1466 __entry->id = id; 1467 __entry->index = index; 1468 ), 1469 1470 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1471 __entry->vcpu, __entry->icrh, __entry->icrl, 1472 __entry->id, __entry->index) 1473 ); 1474 1475 TRACE_EVENT(kvm_avic_unaccelerated_access, 1476 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1477 TP_ARGS(vcpu, offset, ft, rw, vec), 1478 1479 TP_STRUCT__entry( 1480 __field(u32, vcpu) 1481 __field(u32, offset) 1482 __field(bool, ft) 1483 __field(bool, rw) 1484 __field(u32, vec) 1485 ), 1486 1487 TP_fast_assign( 1488 __entry->vcpu = vcpu; 1489 __entry->offset = offset; 1490 __entry->ft = ft; 1491 __entry->rw = rw; 1492 __entry->vec = vec; 1493 ), 1494 1495 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1496 __entry->vcpu, 1497 __entry->offset, 1498 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1499 __entry->ft ? "trap" : "fault", 1500 __entry->rw ? "write" : "read", 1501 __entry->vec) 1502 ); 1503 1504 TRACE_EVENT(kvm_avic_ga_log, 1505 TP_PROTO(u32 vmid, u32 vcpuid), 1506 TP_ARGS(vmid, vcpuid), 1507 1508 TP_STRUCT__entry( 1509 __field(u32, vmid) 1510 __field(u32, vcpuid) 1511 ), 1512 1513 TP_fast_assign( 1514 __entry->vmid = vmid; 1515 __entry->vcpuid = vcpuid; 1516 ), 1517 1518 TP_printk("vmid=%u, vcpuid=%u", 1519 __entry->vmid, __entry->vcpuid) 1520 ); 1521 1522 TRACE_EVENT(kvm_avic_kick_vcpu_slowpath, 1523 TP_PROTO(u32 icrh, u32 icrl, u32 index), 1524 TP_ARGS(icrh, icrl, index), 1525 1526 TP_STRUCT__entry( 1527 __field(u32, icrh) 1528 __field(u32, icrl) 1529 __field(u32, index) 1530 ), 1531 1532 TP_fast_assign( 1533 __entry->icrh = icrh; 1534 __entry->icrl = icrl; 1535 __entry->index = index; 1536 ), 1537 1538 TP_printk("icrh:icrl=%#08x:%08x, index=%u", 1539 __entry->icrh, __entry->icrl, __entry->index) 1540 ); 1541 1542 TRACE_EVENT(kvm_avic_doorbell, 1543 TP_PROTO(u32 vcpuid, u32 apicid), 1544 TP_ARGS(vcpuid, apicid), 1545 1546 TP_STRUCT__entry( 1547 __field(u32, vcpuid) 1548 __field(u32, apicid) 1549 ), 1550 1551 TP_fast_assign( 1552 __entry->vcpuid = vcpuid; 1553 __entry->apicid = apicid; 1554 ), 1555 1556 TP_printk("vcpuid=%u, apicid=%u", 1557 __entry->vcpuid, __entry->apicid) 1558 ); 1559 1560 TRACE_EVENT(kvm_hv_timer_state, 1561 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1562 TP_ARGS(vcpu_id, hv_timer_in_use), 1563 TP_STRUCT__entry( 1564 __field(unsigned int, vcpu_id) 1565 __field(unsigned int, hv_timer_in_use) 1566 ), 1567 TP_fast_assign( 1568 __entry->vcpu_id = vcpu_id; 1569 __entry->hv_timer_in_use = hv_timer_in_use; 1570 ), 1571 TP_printk("vcpu_id %x hv_timer %x", 1572 __entry->vcpu_id, 1573 __entry->hv_timer_in_use) 1574 ); 1575 1576 /* 1577 * Tracepoint for kvm_hv_flush_tlb. 1578 */ 1579 TRACE_EVENT(kvm_hv_flush_tlb, 1580 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool guest_mode), 1581 TP_ARGS(processor_mask, address_space, flags, guest_mode), 1582 1583 TP_STRUCT__entry( 1584 __field(u64, processor_mask) 1585 __field(u64, address_space) 1586 __field(u64, flags) 1587 __field(bool, guest_mode) 1588 ), 1589 1590 TP_fast_assign( 1591 __entry->processor_mask = processor_mask; 1592 __entry->address_space = address_space; 1593 __entry->flags = flags; 1594 __entry->guest_mode = guest_mode; 1595 ), 1596 1597 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s", 1598 __entry->processor_mask, __entry->address_space, 1599 __entry->flags, __entry->guest_mode ? "(L2)" : "") 1600 ); 1601 1602 /* 1603 * Tracepoint for kvm_hv_flush_tlb_ex. 1604 */ 1605 TRACE_EVENT(kvm_hv_flush_tlb_ex, 1606 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool guest_mode), 1607 TP_ARGS(valid_bank_mask, format, address_space, flags, guest_mode), 1608 1609 TP_STRUCT__entry( 1610 __field(u64, valid_bank_mask) 1611 __field(u64, format) 1612 __field(u64, address_space) 1613 __field(u64, flags) 1614 __field(bool, guest_mode) 1615 ), 1616 1617 TP_fast_assign( 1618 __entry->valid_bank_mask = valid_bank_mask; 1619 __entry->format = format; 1620 __entry->address_space = address_space; 1621 __entry->flags = flags; 1622 __entry->guest_mode = guest_mode; 1623 ), 1624 1625 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1626 "address_space 0x%llx flags 0x%llx %s", 1627 __entry->valid_bank_mask, __entry->format, 1628 __entry->address_space, __entry->flags, 1629 __entry->guest_mode ? "(L2)" : "") 1630 ); 1631 1632 /* 1633 * Tracepoints for kvm_hv_send_ipi. 1634 */ 1635 TRACE_EVENT(kvm_hv_send_ipi, 1636 TP_PROTO(u32 vector, u64 processor_mask), 1637 TP_ARGS(vector, processor_mask), 1638 1639 TP_STRUCT__entry( 1640 __field(u32, vector) 1641 __field(u64, processor_mask) 1642 ), 1643 1644 TP_fast_assign( 1645 __entry->vector = vector; 1646 __entry->processor_mask = processor_mask; 1647 ), 1648 1649 TP_printk("vector %x processor_mask 0x%llx", 1650 __entry->vector, __entry->processor_mask) 1651 ); 1652 1653 TRACE_EVENT(kvm_hv_send_ipi_ex, 1654 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1655 TP_ARGS(vector, format, valid_bank_mask), 1656 1657 TP_STRUCT__entry( 1658 __field(u32, vector) 1659 __field(u64, format) 1660 __field(u64, valid_bank_mask) 1661 ), 1662 1663 TP_fast_assign( 1664 __entry->vector = vector; 1665 __entry->format = format; 1666 __entry->valid_bank_mask = valid_bank_mask; 1667 ), 1668 1669 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1670 __entry->vector, __entry->format, 1671 __entry->valid_bank_mask) 1672 ); 1673 1674 TRACE_EVENT(kvm_pv_tlb_flush, 1675 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1676 TP_ARGS(vcpu_id, need_flush_tlb), 1677 1678 TP_STRUCT__entry( 1679 __field( unsigned int, vcpu_id ) 1680 __field( bool, need_flush_tlb ) 1681 ), 1682 1683 TP_fast_assign( 1684 __entry->vcpu_id = vcpu_id; 1685 __entry->need_flush_tlb = need_flush_tlb; 1686 ), 1687 1688 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1689 __entry->need_flush_tlb ? "true" : "false") 1690 ); 1691 1692 /* 1693 * Tracepoint for failed nested VMX VM-Enter. 1694 */ 1695 TRACE_EVENT(kvm_nested_vmenter_failed, 1696 TP_PROTO(const char *msg, u32 err), 1697 TP_ARGS(msg, err), 1698 1699 TP_STRUCT__entry( 1700 __string(msg, msg) 1701 __field(u32, err) 1702 ), 1703 1704 TP_fast_assign( 1705 __assign_str(msg); 1706 __entry->err = err; 1707 ), 1708 1709 TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : 1710 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1711 ); 1712 1713 /* 1714 * Tracepoint for syndbg_set_msr. 1715 */ 1716 TRACE_EVENT(kvm_hv_syndbg_set_msr, 1717 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1718 TP_ARGS(vcpu_id, vp_index, msr, data), 1719 1720 TP_STRUCT__entry( 1721 __field(int, vcpu_id) 1722 __field(u32, vp_index) 1723 __field(u32, msr) 1724 __field(u64, data) 1725 ), 1726 1727 TP_fast_assign( 1728 __entry->vcpu_id = vcpu_id; 1729 __entry->vp_index = vp_index; 1730 __entry->msr = msr; 1731 __entry->data = data; 1732 ), 1733 1734 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1735 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1736 __entry->data) 1737 ); 1738 1739 /* 1740 * Tracepoint for syndbg_get_msr. 1741 */ 1742 TRACE_EVENT(kvm_hv_syndbg_get_msr, 1743 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1744 TP_ARGS(vcpu_id, vp_index, msr, data), 1745 1746 TP_STRUCT__entry( 1747 __field(int, vcpu_id) 1748 __field(u32, vp_index) 1749 __field(u32, msr) 1750 __field(u64, data) 1751 ), 1752 1753 TP_fast_assign( 1754 __entry->vcpu_id = vcpu_id; 1755 __entry->vp_index = vp_index; 1756 __entry->msr = msr; 1757 __entry->data = data; 1758 ), 1759 1760 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1761 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1762 __entry->data) 1763 ); 1764 1765 /* 1766 * Tracepoint for the start of VMGEXIT processing 1767 */ 1768 TRACE_EVENT(kvm_vmgexit_enter, 1769 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1770 TP_ARGS(vcpu_id, ghcb), 1771 1772 TP_STRUCT__entry( 1773 __field(unsigned int, vcpu_id) 1774 __field(u64, exit_reason) 1775 __field(u64, info1) 1776 __field(u64, info2) 1777 ), 1778 1779 TP_fast_assign( 1780 __entry->vcpu_id = vcpu_id; 1781 __entry->exit_reason = ghcb->save.sw_exit_code; 1782 __entry->info1 = ghcb->save.sw_exit_info_1; 1783 __entry->info2 = ghcb->save.sw_exit_info_2; 1784 ), 1785 1786 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1787 __entry->vcpu_id, __entry->exit_reason, 1788 __entry->info1, __entry->info2) 1789 ); 1790 1791 /* 1792 * Tracepoint for the end of VMGEXIT processing 1793 */ 1794 TRACE_EVENT(kvm_vmgexit_exit, 1795 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1796 TP_ARGS(vcpu_id, ghcb), 1797 1798 TP_STRUCT__entry( 1799 __field(unsigned int, vcpu_id) 1800 __field(u64, exit_reason) 1801 __field(u64, info1) 1802 __field(u64, info2) 1803 ), 1804 1805 TP_fast_assign( 1806 __entry->vcpu_id = vcpu_id; 1807 __entry->exit_reason = ghcb->save.sw_exit_code; 1808 __entry->info1 = ghcb->save.sw_exit_info_1; 1809 __entry->info2 = ghcb->save.sw_exit_info_2; 1810 ), 1811 1812 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1813 __entry->vcpu_id, __entry->exit_reason, 1814 __entry->info1, __entry->info2) 1815 ); 1816 1817 /* 1818 * Tracepoint for the start of VMGEXIT MSR procotol processing 1819 */ 1820 TRACE_EVENT(kvm_vmgexit_msr_protocol_enter, 1821 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa), 1822 TP_ARGS(vcpu_id, ghcb_gpa), 1823 1824 TP_STRUCT__entry( 1825 __field(unsigned int, vcpu_id) 1826 __field(u64, ghcb_gpa) 1827 ), 1828 1829 TP_fast_assign( 1830 __entry->vcpu_id = vcpu_id; 1831 __entry->ghcb_gpa = ghcb_gpa; 1832 ), 1833 1834 TP_printk("vcpu %u, ghcb_gpa %016llx", 1835 __entry->vcpu_id, __entry->ghcb_gpa) 1836 ); 1837 1838 /* 1839 * Tracepoint for the end of VMGEXIT MSR procotol processing 1840 */ 1841 TRACE_EVENT(kvm_vmgexit_msr_protocol_exit, 1842 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result), 1843 TP_ARGS(vcpu_id, ghcb_gpa, result), 1844 1845 TP_STRUCT__entry( 1846 __field(unsigned int, vcpu_id) 1847 __field(u64, ghcb_gpa) 1848 __field(int, result) 1849 ), 1850 1851 TP_fast_assign( 1852 __entry->vcpu_id = vcpu_id; 1853 __entry->ghcb_gpa = ghcb_gpa; 1854 __entry->result = result; 1855 ), 1856 1857 TP_printk("vcpu %u, ghcb_gpa %016llx, result %d", 1858 __entry->vcpu_id, __entry->ghcb_gpa, __entry->result) 1859 ); 1860 1861 /* 1862 * Tracepoint for #NPFs due to RMP faults. 1863 */ 1864 TRACE_EVENT(kvm_rmp_fault, 1865 TP_PROTO(struct kvm_vcpu *vcpu, u64 gpa, u64 pfn, u64 error_code, 1866 int rmp_level, int psmash_ret), 1867 TP_ARGS(vcpu, gpa, pfn, error_code, rmp_level, psmash_ret), 1868 1869 TP_STRUCT__entry( 1870 __field(unsigned int, vcpu_id) 1871 __field(u64, gpa) 1872 __field(u64, pfn) 1873 __field(u64, error_code) 1874 __field(int, rmp_level) 1875 __field(int, psmash_ret) 1876 ), 1877 1878 TP_fast_assign( 1879 __entry->vcpu_id = vcpu->vcpu_id; 1880 __entry->gpa = gpa; 1881 __entry->pfn = pfn; 1882 __entry->error_code = error_code; 1883 __entry->rmp_level = rmp_level; 1884 __entry->psmash_ret = psmash_ret; 1885 ), 1886 1887 TP_printk("vcpu %u gpa %016llx pfn 0x%llx error_code 0x%llx rmp_level %d psmash_ret %d", 1888 __entry->vcpu_id, __entry->gpa, __entry->pfn, 1889 __entry->error_code, __entry->rmp_level, __entry->psmash_ret) 1890 ); 1891 1892 #endif /* _TRACE_KVM_H */ 1893 1894 #undef TRACE_INCLUDE_PATH 1895 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1896 #undef TRACE_INCLUDE_FILE 1897 #define TRACE_INCLUDE_FILE trace 1898 1899 /* This part must be outside protection */ 1900 #include <trace/define_trace.h> 1901