1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_KVM_H 4 5 #include <linux/tracepoint.h> 6 #include <asm/vmx.h> 7 #include <asm/svm.h> 8 #include <asm/clocksource.h> 9 #include <asm/pvclock-abi.h> 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM kvm 13 14 /* 15 * Tracepoint for guest mode entry. 16 */ 17 TRACE_EVENT(kvm_entry, 18 TP_PROTO(struct kvm_vcpu *vcpu), 19 TP_ARGS(vcpu), 20 21 TP_STRUCT__entry( 22 __field( unsigned int, vcpu_id ) 23 __field( unsigned long, rip ) 24 ), 25 26 TP_fast_assign( 27 __entry->vcpu_id = vcpu->vcpu_id; 28 __entry->rip = kvm_rip_read(vcpu); 29 ), 30 31 TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) 32 ); 33 34 /* 35 * Tracepoint for hypercall. 36 */ 37 TRACE_EVENT(kvm_hypercall, 38 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 39 unsigned long a2, unsigned long a3), 40 TP_ARGS(nr, a0, a1, a2, a3), 41 42 TP_STRUCT__entry( 43 __field( unsigned long, nr ) 44 __field( unsigned long, a0 ) 45 __field( unsigned long, a1 ) 46 __field( unsigned long, a2 ) 47 __field( unsigned long, a3 ) 48 ), 49 50 TP_fast_assign( 51 __entry->nr = nr; 52 __entry->a0 = a0; 53 __entry->a1 = a1; 54 __entry->a2 = a2; 55 __entry->a3 = a3; 56 ), 57 58 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 59 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 60 __entry->a3) 61 ); 62 63 /* 64 * Tracepoint for hypercall. 65 */ 66 TRACE_EVENT(kvm_hv_hypercall, 67 TP_PROTO(__u16 code, bool fast, __u16 var_cnt, __u16 rep_cnt, 68 __u16 rep_idx, __u64 ingpa, __u64 outgpa), 69 TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa), 70 71 TP_STRUCT__entry( 72 __field( __u16, rep_cnt ) 73 __field( __u16, rep_idx ) 74 __field( __u64, ingpa ) 75 __field( __u64, outgpa ) 76 __field( __u16, code ) 77 __field( __u16, var_cnt ) 78 __field( bool, fast ) 79 ), 80 81 TP_fast_assign( 82 __entry->rep_cnt = rep_cnt; 83 __entry->rep_idx = rep_idx; 84 __entry->ingpa = ingpa; 85 __entry->outgpa = outgpa; 86 __entry->code = code; 87 __entry->var_cnt = var_cnt; 88 __entry->fast = fast; 89 ), 90 91 TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 92 __entry->code, __entry->fast ? "fast" : "slow", 93 __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx, 94 __entry->ingpa, __entry->outgpa) 95 ); 96 97 TRACE_EVENT(kvm_hv_hypercall_done, 98 TP_PROTO(u64 result), 99 TP_ARGS(result), 100 101 TP_STRUCT__entry( 102 __field(__u64, result) 103 ), 104 105 TP_fast_assign( 106 __entry->result = result; 107 ), 108 109 TP_printk("result 0x%llx", __entry->result) 110 ); 111 112 /* 113 * Tracepoint for Xen hypercall. 114 */ 115 TRACE_EVENT(kvm_xen_hypercall, 116 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 117 unsigned long a2, unsigned long a3, unsigned long a4, 118 unsigned long a5), 119 TP_ARGS(nr, a0, a1, a2, a3, a4, a5), 120 121 TP_STRUCT__entry( 122 __field(unsigned long, nr) 123 __field(unsigned long, a0) 124 __field(unsigned long, a1) 125 __field(unsigned long, a2) 126 __field(unsigned long, a3) 127 __field(unsigned long, a4) 128 __field(unsigned long, a5) 129 ), 130 131 TP_fast_assign( 132 __entry->nr = nr; 133 __entry->a0 = a0; 134 __entry->a1 = a1; 135 __entry->a2 = a2; 136 __entry->a3 = a3; 137 __entry->a4 = a4; 138 __entry->a4 = a5; 139 ), 140 141 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", 142 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 143 __entry->a3, __entry->a4, __entry->a5) 144 ); 145 146 147 148 /* 149 * Tracepoint for PIO. 150 */ 151 152 #define KVM_PIO_IN 0 153 #define KVM_PIO_OUT 1 154 155 TRACE_EVENT(kvm_pio, 156 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 157 unsigned int count, const void *data), 158 TP_ARGS(rw, port, size, count, data), 159 160 TP_STRUCT__entry( 161 __field( unsigned int, rw ) 162 __field( unsigned int, port ) 163 __field( unsigned int, size ) 164 __field( unsigned int, count ) 165 __field( unsigned int, val ) 166 ), 167 168 TP_fast_assign( 169 __entry->rw = rw; 170 __entry->port = port; 171 __entry->size = size; 172 __entry->count = count; 173 if (size == 1) 174 __entry->val = *(unsigned char *)data; 175 else if (size == 2) 176 __entry->val = *(unsigned short *)data; 177 else 178 __entry->val = *(unsigned int *)data; 179 ), 180 181 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 182 __entry->rw ? "write" : "read", 183 __entry->port, __entry->size, __entry->count, __entry->val, 184 __entry->count > 1 ? "(...)" : "") 185 ); 186 187 /* 188 * Tracepoint for fast mmio. 189 */ 190 TRACE_EVENT(kvm_fast_mmio, 191 TP_PROTO(u64 gpa), 192 TP_ARGS(gpa), 193 194 TP_STRUCT__entry( 195 __field(u64, gpa) 196 ), 197 198 TP_fast_assign( 199 __entry->gpa = gpa; 200 ), 201 202 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 203 ); 204 205 /* 206 * Tracepoint for cpuid. 207 */ 208 TRACE_EVENT(kvm_cpuid, 209 TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 210 unsigned long rbx, unsigned long rcx, unsigned long rdx, 211 bool found, bool used_max_basic), 212 TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 213 214 TP_STRUCT__entry( 215 __field( unsigned int, function ) 216 __field( unsigned int, index ) 217 __field( unsigned long, rax ) 218 __field( unsigned long, rbx ) 219 __field( unsigned long, rcx ) 220 __field( unsigned long, rdx ) 221 __field( bool, found ) 222 __field( bool, used_max_basic ) 223 ), 224 225 TP_fast_assign( 226 __entry->function = function; 227 __entry->index = index; 228 __entry->rax = rax; 229 __entry->rbx = rbx; 230 __entry->rcx = rcx; 231 __entry->rdx = rdx; 232 __entry->found = found; 233 __entry->used_max_basic = used_max_basic; 234 ), 235 236 TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 237 __entry->function, __entry->index, __entry->rax, 238 __entry->rbx, __entry->rcx, __entry->rdx, 239 __entry->found ? "found" : "not found", 240 __entry->used_max_basic ? ", used max basic" : "") 241 ); 242 243 #define AREG(x) { APIC_##x, "APIC_" #x } 244 245 #define kvm_trace_symbol_apic \ 246 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 247 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 248 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 249 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 250 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 251 AREG(ECTRL) 252 /* 253 * Tracepoint for apic access. 254 */ 255 TRACE_EVENT(kvm_apic, 256 TP_PROTO(unsigned int rw, unsigned int reg, u64 val), 257 TP_ARGS(rw, reg, val), 258 259 TP_STRUCT__entry( 260 __field( unsigned int, rw ) 261 __field( unsigned int, reg ) 262 __field( u64, val ) 263 ), 264 265 TP_fast_assign( 266 __entry->rw = rw; 267 __entry->reg = reg; 268 __entry->val = val; 269 ), 270 271 TP_printk("apic_%s %s = 0x%llx", 272 __entry->rw ? "write" : "read", 273 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 274 __entry->val) 275 ); 276 277 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 278 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 279 280 #define KVM_ISA_VMX 1 281 #define KVM_ISA_SVM 2 282 283 #define kvm_print_exit_reason(exit_reason, isa) \ 284 (isa == KVM_ISA_VMX) ? \ 285 __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ 286 __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ 287 (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ 288 (isa == KVM_ISA_VMX) ? \ 289 __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" 290 291 #define TRACE_EVENT_KVM_EXIT(name) \ 292 TRACE_EVENT(name, \ 293 TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \ 294 TP_ARGS(vcpu, isa), \ 295 \ 296 TP_STRUCT__entry( \ 297 __field( unsigned int, exit_reason ) \ 298 __field( unsigned long, guest_rip ) \ 299 __field( u32, isa ) \ 300 __field( u64, info1 ) \ 301 __field( u64, info2 ) \ 302 __field( u32, intr_info ) \ 303 __field( u32, error_code ) \ 304 __field( unsigned int, vcpu_id ) \ 305 ), \ 306 \ 307 TP_fast_assign( \ 308 __entry->guest_rip = kvm_rip_read(vcpu); \ 309 __entry->isa = isa; \ 310 __entry->vcpu_id = vcpu->vcpu_id; \ 311 static_call(kvm_x86_get_exit_info)(vcpu, \ 312 &__entry->exit_reason, \ 313 &__entry->info1, \ 314 &__entry->info2, \ 315 &__entry->intr_info, \ 316 &__entry->error_code); \ 317 ), \ 318 \ 319 TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ 320 "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \ 321 __entry->vcpu_id, \ 322 kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ 323 __entry->guest_rip, __entry->info1, __entry->info2, \ 324 __entry->intr_info, __entry->error_code) \ 325 ) 326 327 /* 328 * Tracepoint for kvm guest exit: 329 */ 330 TRACE_EVENT_KVM_EXIT(kvm_exit); 331 332 /* 333 * Tracepoint for kvm interrupt injection: 334 */ 335 TRACE_EVENT(kvm_inj_virq, 336 TP_PROTO(unsigned int vector, bool soft, bool reinjected), 337 TP_ARGS(vector, soft, reinjected), 338 339 TP_STRUCT__entry( 340 __field( unsigned int, vector ) 341 __field( bool, soft ) 342 __field( bool, reinjected ) 343 ), 344 345 TP_fast_assign( 346 __entry->vector = vector; 347 __entry->soft = soft; 348 __entry->reinjected = reinjected; 349 ), 350 351 TP_printk("%s 0x%x%s", 352 __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector, 353 __entry->reinjected ? " [reinjected]" : "") 354 ); 355 356 #define EXS(x) { x##_VECTOR, "#" #x } 357 358 #define kvm_trace_sym_exc \ 359 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 360 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 361 EXS(MF), EXS(AC), EXS(MC) 362 363 /* 364 * Tracepoint for kvm interrupt injection: 365 */ 366 TRACE_EVENT(kvm_inj_exception, 367 TP_PROTO(unsigned exception, bool has_error, unsigned error_code, 368 bool reinjected), 369 TP_ARGS(exception, has_error, error_code, reinjected), 370 371 TP_STRUCT__entry( 372 __field( u8, exception ) 373 __field( u8, has_error ) 374 __field( u32, error_code ) 375 __field( bool, reinjected ) 376 ), 377 378 TP_fast_assign( 379 __entry->exception = exception; 380 __entry->has_error = has_error; 381 __entry->error_code = error_code; 382 __entry->reinjected = reinjected; 383 ), 384 385 TP_printk("%s%s%s%s%s", 386 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 387 !__entry->has_error ? "" : " (", 388 !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }), 389 !__entry->has_error ? "" : ")", 390 __entry->reinjected ? " [reinjected]" : "") 391 ); 392 393 /* 394 * Tracepoint for page fault. 395 */ 396 TRACE_EVENT(kvm_page_fault, 397 TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code), 398 TP_ARGS(vcpu, fault_address, error_code), 399 400 TP_STRUCT__entry( 401 __field( unsigned int, vcpu_id ) 402 __field( unsigned long, guest_rip ) 403 __field( u64, fault_address ) 404 __field( u64, error_code ) 405 ), 406 407 TP_fast_assign( 408 __entry->vcpu_id = vcpu->vcpu_id; 409 __entry->guest_rip = kvm_rip_read(vcpu); 410 __entry->fault_address = fault_address; 411 __entry->error_code = error_code; 412 ), 413 414 TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx", 415 __entry->vcpu_id, __entry->guest_rip, 416 __entry->fault_address, __entry->error_code) 417 ); 418 419 /* 420 * Tracepoint for guest MSR access. 421 */ 422 TRACE_EVENT(kvm_msr, 423 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 424 TP_ARGS(write, ecx, data, exception), 425 426 TP_STRUCT__entry( 427 __field( unsigned, write ) 428 __field( u32, ecx ) 429 __field( u64, data ) 430 __field( u8, exception ) 431 ), 432 433 TP_fast_assign( 434 __entry->write = write; 435 __entry->ecx = ecx; 436 __entry->data = data; 437 __entry->exception = exception; 438 ), 439 440 TP_printk("msr_%s %x = 0x%llx%s", 441 __entry->write ? "write" : "read", 442 __entry->ecx, __entry->data, 443 __entry->exception ? " (#GP)" : "") 444 ); 445 446 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 447 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 448 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 449 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 450 451 /* 452 * Tracepoint for guest CR access. 453 */ 454 TRACE_EVENT(kvm_cr, 455 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 456 TP_ARGS(rw, cr, val), 457 458 TP_STRUCT__entry( 459 __field( unsigned int, rw ) 460 __field( unsigned int, cr ) 461 __field( unsigned long, val ) 462 ), 463 464 TP_fast_assign( 465 __entry->rw = rw; 466 __entry->cr = cr; 467 __entry->val = val; 468 ), 469 470 TP_printk("cr_%s %x = 0x%lx", 471 __entry->rw ? "write" : "read", 472 __entry->cr, __entry->val) 473 ); 474 475 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 476 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 477 478 TRACE_EVENT(kvm_pic_set_irq, 479 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 480 TP_ARGS(chip, pin, elcr, imr, coalesced), 481 482 TP_STRUCT__entry( 483 __field( __u8, chip ) 484 __field( __u8, pin ) 485 __field( __u8, elcr ) 486 __field( __u8, imr ) 487 __field( bool, coalesced ) 488 ), 489 490 TP_fast_assign( 491 __entry->chip = chip; 492 __entry->pin = pin; 493 __entry->elcr = elcr; 494 __entry->imr = imr; 495 __entry->coalesced = coalesced; 496 ), 497 498 TP_printk("chip %u pin %u (%s%s)%s", 499 __entry->chip, __entry->pin, 500 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 501 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 502 __entry->coalesced ? " (coalesced)" : "") 503 ); 504 505 #define kvm_apic_dst_shorthand \ 506 {0x0, "dst"}, \ 507 {0x1, "self"}, \ 508 {0x2, "all"}, \ 509 {0x3, "all-but-self"} 510 511 TRACE_EVENT(kvm_apic_ipi, 512 TP_PROTO(__u32 icr_low, __u32 dest_id), 513 TP_ARGS(icr_low, dest_id), 514 515 TP_STRUCT__entry( 516 __field( __u32, icr_low ) 517 __field( __u32, dest_id ) 518 ), 519 520 TP_fast_assign( 521 __entry->icr_low = icr_low; 522 __entry->dest_id = dest_id; 523 ), 524 525 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 526 __entry->dest_id, (u8)__entry->icr_low, 527 __print_symbolic((__entry->icr_low >> 8 & 0x7), 528 kvm_deliver_mode), 529 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 530 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 531 (__entry->icr_low & (1<<15)) ? "level" : "edge", 532 __print_symbolic((__entry->icr_low >> 18 & 0x3), 533 kvm_apic_dst_shorthand)) 534 ); 535 536 TRACE_EVENT(kvm_apic_accept_irq, 537 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 538 TP_ARGS(apicid, dm, tm, vec), 539 540 TP_STRUCT__entry( 541 __field( __u32, apicid ) 542 __field( __u16, dm ) 543 __field( __u16, tm ) 544 __field( __u8, vec ) 545 ), 546 547 TP_fast_assign( 548 __entry->apicid = apicid; 549 __entry->dm = dm; 550 __entry->tm = tm; 551 __entry->vec = vec; 552 ), 553 554 TP_printk("apicid %x vec %u (%s|%s)", 555 __entry->apicid, __entry->vec, 556 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 557 __entry->tm ? "level" : "edge") 558 ); 559 560 TRACE_EVENT(kvm_eoi, 561 TP_PROTO(struct kvm_lapic *apic, int vector), 562 TP_ARGS(apic, vector), 563 564 TP_STRUCT__entry( 565 __field( __u32, apicid ) 566 __field( int, vector ) 567 ), 568 569 TP_fast_assign( 570 __entry->apicid = apic->vcpu->vcpu_id; 571 __entry->vector = vector; 572 ), 573 574 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 575 ); 576 577 TRACE_EVENT(kvm_pv_eoi, 578 TP_PROTO(struct kvm_lapic *apic, int vector), 579 TP_ARGS(apic, vector), 580 581 TP_STRUCT__entry( 582 __field( __u32, apicid ) 583 __field( int, vector ) 584 ), 585 586 TP_fast_assign( 587 __entry->apicid = apic->vcpu->vcpu_id; 588 __entry->vector = vector; 589 ), 590 591 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 592 ); 593 594 /* 595 * Tracepoint for nested VMRUN 596 */ 597 TRACE_EVENT(kvm_nested_vmenter, 598 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 599 __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd, 600 __u64 guest_cr3, __u32 isa), 601 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled, 602 guest_tdp_pgd, guest_cr3, isa), 603 604 TP_STRUCT__entry( 605 __field( __u64, rip ) 606 __field( __u64, vmcb ) 607 __field( __u64, nested_rip ) 608 __field( __u32, int_ctl ) 609 __field( __u32, event_inj ) 610 __field( bool, tdp_enabled ) 611 __field( __u64, guest_pgd ) 612 __field( __u32, isa ) 613 ), 614 615 TP_fast_assign( 616 __entry->rip = rip; 617 __entry->vmcb = vmcb; 618 __entry->nested_rip = nested_rip; 619 __entry->int_ctl = int_ctl; 620 __entry->event_inj = event_inj; 621 __entry->tdp_enabled = tdp_enabled; 622 __entry->guest_pgd = tdp_enabled ? guest_tdp_pgd : guest_cr3; 623 __entry->isa = isa; 624 ), 625 626 TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx " 627 "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx", 628 __entry->rip, 629 __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb", 630 __entry->vmcb, 631 __entry->nested_rip, 632 __entry->int_ctl, 633 __entry->event_inj, 634 __entry->isa == KVM_ISA_VMX ? "ept" : "npt", 635 __entry->tdp_enabled ? "y" : "n", 636 !__entry->tdp_enabled ? "guest_cr3" : 637 __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3", 638 __entry->guest_pgd) 639 ); 640 641 TRACE_EVENT(kvm_nested_intercepts, 642 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, 643 __u32 intercept1, __u32 intercept2, __u32 intercept3), 644 TP_ARGS(cr_read, cr_write, exceptions, intercept1, 645 intercept2, intercept3), 646 647 TP_STRUCT__entry( 648 __field( __u16, cr_read ) 649 __field( __u16, cr_write ) 650 __field( __u32, exceptions ) 651 __field( __u32, intercept1 ) 652 __field( __u32, intercept2 ) 653 __field( __u32, intercept3 ) 654 ), 655 656 TP_fast_assign( 657 __entry->cr_read = cr_read; 658 __entry->cr_write = cr_write; 659 __entry->exceptions = exceptions; 660 __entry->intercept1 = intercept1; 661 __entry->intercept2 = intercept2; 662 __entry->intercept3 = intercept3; 663 ), 664 665 TP_printk("cr_read: %04x cr_write: %04x excp: %08x " 666 "intercepts: %08x %08x %08x", 667 __entry->cr_read, __entry->cr_write, __entry->exceptions, 668 __entry->intercept1, __entry->intercept2, __entry->intercept3) 669 ); 670 /* 671 * Tracepoint for #VMEXIT while nested 672 */ 673 TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); 674 675 /* 676 * Tracepoint for #VMEXIT reinjected to the guest 677 */ 678 TRACE_EVENT(kvm_nested_vmexit_inject, 679 TP_PROTO(__u32 exit_code, 680 __u64 exit_info1, __u64 exit_info2, 681 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 682 TP_ARGS(exit_code, exit_info1, exit_info2, 683 exit_int_info, exit_int_info_err, isa), 684 685 TP_STRUCT__entry( 686 __field( __u32, exit_code ) 687 __field( __u64, exit_info1 ) 688 __field( __u64, exit_info2 ) 689 __field( __u32, exit_int_info ) 690 __field( __u32, exit_int_info_err ) 691 __field( __u32, isa ) 692 ), 693 694 TP_fast_assign( 695 __entry->exit_code = exit_code; 696 __entry->exit_info1 = exit_info1; 697 __entry->exit_info2 = exit_info2; 698 __entry->exit_int_info = exit_int_info; 699 __entry->exit_int_info_err = exit_int_info_err; 700 __entry->isa = isa; 701 ), 702 703 TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " 704 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 705 kvm_print_exit_reason(__entry->exit_code, __entry->isa), 706 __entry->exit_info1, __entry->exit_info2, 707 __entry->exit_int_info, __entry->exit_int_info_err) 708 ); 709 710 /* 711 * Tracepoint for nested #vmexit because of interrupt pending 712 */ 713 TRACE_EVENT(kvm_nested_intr_vmexit, 714 TP_PROTO(__u64 rip), 715 TP_ARGS(rip), 716 717 TP_STRUCT__entry( 718 __field( __u64, rip ) 719 ), 720 721 TP_fast_assign( 722 __entry->rip = rip 723 ), 724 725 TP_printk("rip: 0x%016llx", __entry->rip) 726 ); 727 728 /* 729 * Tracepoint for nested #vmexit because of interrupt pending 730 */ 731 TRACE_EVENT(kvm_invlpga, 732 TP_PROTO(__u64 rip, int asid, u64 address), 733 TP_ARGS(rip, asid, address), 734 735 TP_STRUCT__entry( 736 __field( __u64, rip ) 737 __field( int, asid ) 738 __field( __u64, address ) 739 ), 740 741 TP_fast_assign( 742 __entry->rip = rip; 743 __entry->asid = asid; 744 __entry->address = address; 745 ), 746 747 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 748 __entry->rip, __entry->asid, __entry->address) 749 ); 750 751 /* 752 * Tracepoint for nested #vmexit because of interrupt pending 753 */ 754 TRACE_EVENT(kvm_skinit, 755 TP_PROTO(__u64 rip, __u32 slb), 756 TP_ARGS(rip, slb), 757 758 TP_STRUCT__entry( 759 __field( __u64, rip ) 760 __field( __u32, slb ) 761 ), 762 763 TP_fast_assign( 764 __entry->rip = rip; 765 __entry->slb = slb; 766 ), 767 768 TP_printk("rip: 0x%016llx slb: 0x%08x", 769 __entry->rip, __entry->slb) 770 ); 771 772 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 773 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 774 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 775 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 776 777 #define kvm_trace_symbol_emul_flags \ 778 { 0, "real" }, \ 779 { KVM_EMUL_INSN_F_CR0_PE \ 780 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 781 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 782 { KVM_EMUL_INSN_F_CR0_PE \ 783 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 784 { KVM_EMUL_INSN_F_CR0_PE \ 785 | KVM_EMUL_INSN_F_CS_L, "prot64" } 786 787 #define kei_decode_mode(mode) ({ \ 788 u8 flags = 0xff; \ 789 switch (mode) { \ 790 case X86EMUL_MODE_REAL: \ 791 flags = 0; \ 792 break; \ 793 case X86EMUL_MODE_VM86: \ 794 flags = KVM_EMUL_INSN_F_EFL_VM; \ 795 break; \ 796 case X86EMUL_MODE_PROT16: \ 797 flags = KVM_EMUL_INSN_F_CR0_PE; \ 798 break; \ 799 case X86EMUL_MODE_PROT32: \ 800 flags = KVM_EMUL_INSN_F_CR0_PE \ 801 | KVM_EMUL_INSN_F_CS_D; \ 802 break; \ 803 case X86EMUL_MODE_PROT64: \ 804 flags = KVM_EMUL_INSN_F_CR0_PE \ 805 | KVM_EMUL_INSN_F_CS_L; \ 806 break; \ 807 } \ 808 flags; \ 809 }) 810 811 TRACE_EVENT(kvm_emulate_insn, 812 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 813 TP_ARGS(vcpu, failed), 814 815 TP_STRUCT__entry( 816 __field( __u64, rip ) 817 __field( __u32, csbase ) 818 __field( __u8, len ) 819 __array( __u8, insn, 15 ) 820 __field( __u8, flags ) 821 __field( __u8, failed ) 822 ), 823 824 TP_fast_assign( 825 __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); 826 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 827 - vcpu->arch.emulate_ctxt->fetch.data; 828 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 829 memcpy(__entry->insn, 830 vcpu->arch.emulate_ctxt->fetch.data, 831 15); 832 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 833 __entry->failed = failed; 834 ), 835 836 TP_printk("%x:%llx:%s (%s)%s", 837 __entry->csbase, __entry->rip, 838 __print_hex(__entry->insn, __entry->len), 839 __print_symbolic(__entry->flags, 840 kvm_trace_symbol_emul_flags), 841 __entry->failed ? " failed" : "" 842 ) 843 ); 844 845 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 846 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 847 848 TRACE_EVENT( 849 vcpu_match_mmio, 850 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 851 TP_ARGS(gva, gpa, write, gpa_match), 852 853 TP_STRUCT__entry( 854 __field(gva_t, gva) 855 __field(gpa_t, gpa) 856 __field(bool, write) 857 __field(bool, gpa_match) 858 ), 859 860 TP_fast_assign( 861 __entry->gva = gva; 862 __entry->gpa = gpa; 863 __entry->write = write; 864 __entry->gpa_match = gpa_match 865 ), 866 867 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 868 __entry->write ? "Write" : "Read", 869 __entry->gpa_match ? "GPA" : "GVA") 870 ); 871 872 TRACE_EVENT(kvm_write_tsc_offset, 873 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 874 __u64 next_tsc_offset), 875 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 876 877 TP_STRUCT__entry( 878 __field( unsigned int, vcpu_id ) 879 __field( __u64, previous_tsc_offset ) 880 __field( __u64, next_tsc_offset ) 881 ), 882 883 TP_fast_assign( 884 __entry->vcpu_id = vcpu_id; 885 __entry->previous_tsc_offset = previous_tsc_offset; 886 __entry->next_tsc_offset = next_tsc_offset; 887 ), 888 889 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 890 __entry->previous_tsc_offset, __entry->next_tsc_offset) 891 ); 892 893 #ifdef CONFIG_X86_64 894 895 #define host_clocks \ 896 {VDSO_CLOCKMODE_NONE, "none"}, \ 897 {VDSO_CLOCKMODE_TSC, "tsc"} \ 898 899 TRACE_EVENT(kvm_update_master_clock, 900 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 901 TP_ARGS(use_master_clock, host_clock, offset_matched), 902 903 TP_STRUCT__entry( 904 __field( bool, use_master_clock ) 905 __field( unsigned int, host_clock ) 906 __field( bool, offset_matched ) 907 ), 908 909 TP_fast_assign( 910 __entry->use_master_clock = use_master_clock; 911 __entry->host_clock = host_clock; 912 __entry->offset_matched = offset_matched; 913 ), 914 915 TP_printk("masterclock %d hostclock %s offsetmatched %u", 916 __entry->use_master_clock, 917 __print_symbolic(__entry->host_clock, host_clocks), 918 __entry->offset_matched) 919 ); 920 921 TRACE_EVENT(kvm_track_tsc, 922 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 923 unsigned int online_vcpus, bool use_master_clock, 924 unsigned int host_clock), 925 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 926 host_clock), 927 928 TP_STRUCT__entry( 929 __field( unsigned int, vcpu_id ) 930 __field( unsigned int, nr_vcpus_matched_tsc ) 931 __field( unsigned int, online_vcpus ) 932 __field( bool, use_master_clock ) 933 __field( unsigned int, host_clock ) 934 ), 935 936 TP_fast_assign( 937 __entry->vcpu_id = vcpu_id; 938 __entry->nr_vcpus_matched_tsc = nr_matched; 939 __entry->online_vcpus = online_vcpus; 940 __entry->use_master_clock = use_master_clock; 941 __entry->host_clock = host_clock; 942 ), 943 944 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 945 " hostclock %s", 946 __entry->vcpu_id, __entry->use_master_clock, 947 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 948 __print_symbolic(__entry->host_clock, host_clocks)) 949 ); 950 951 #endif /* CONFIG_X86_64 */ 952 953 /* 954 * Tracepoint for PML full VMEXIT. 955 */ 956 TRACE_EVENT(kvm_pml_full, 957 TP_PROTO(unsigned int vcpu_id), 958 TP_ARGS(vcpu_id), 959 960 TP_STRUCT__entry( 961 __field( unsigned int, vcpu_id ) 962 ), 963 964 TP_fast_assign( 965 __entry->vcpu_id = vcpu_id; 966 ), 967 968 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 969 ); 970 971 TRACE_EVENT(kvm_ple_window_update, 972 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 973 TP_ARGS(vcpu_id, new, old), 974 975 TP_STRUCT__entry( 976 __field( unsigned int, vcpu_id ) 977 __field( unsigned int, new ) 978 __field( unsigned int, old ) 979 ), 980 981 TP_fast_assign( 982 __entry->vcpu_id = vcpu_id; 983 __entry->new = new; 984 __entry->old = old; 985 ), 986 987 TP_printk("vcpu %u old %u new %u (%s)", 988 __entry->vcpu_id, __entry->old, __entry->new, 989 __entry->old < __entry->new ? "growed" : "shrinked") 990 ); 991 992 TRACE_EVENT(kvm_pvclock_update, 993 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 994 TP_ARGS(vcpu_id, pvclock), 995 996 TP_STRUCT__entry( 997 __field( unsigned int, vcpu_id ) 998 __field( __u32, version ) 999 __field( __u64, tsc_timestamp ) 1000 __field( __u64, system_time ) 1001 __field( __u32, tsc_to_system_mul ) 1002 __field( __s8, tsc_shift ) 1003 __field( __u8, flags ) 1004 ), 1005 1006 TP_fast_assign( 1007 __entry->vcpu_id = vcpu_id; 1008 __entry->version = pvclock->version; 1009 __entry->tsc_timestamp = pvclock->tsc_timestamp; 1010 __entry->system_time = pvclock->system_time; 1011 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 1012 __entry->tsc_shift = pvclock->tsc_shift; 1013 __entry->flags = pvclock->flags; 1014 ), 1015 1016 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 1017 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 1018 "flags 0x%x }", 1019 __entry->vcpu_id, 1020 __entry->version, 1021 __entry->tsc_timestamp, 1022 __entry->system_time, 1023 __entry->tsc_to_system_mul, 1024 __entry->tsc_shift, 1025 __entry->flags) 1026 ); 1027 1028 TRACE_EVENT(kvm_wait_lapic_expire, 1029 TP_PROTO(unsigned int vcpu_id, s64 delta), 1030 TP_ARGS(vcpu_id, delta), 1031 1032 TP_STRUCT__entry( 1033 __field( unsigned int, vcpu_id ) 1034 __field( s64, delta ) 1035 ), 1036 1037 TP_fast_assign( 1038 __entry->vcpu_id = vcpu_id; 1039 __entry->delta = delta; 1040 ), 1041 1042 TP_printk("vcpu %u: delta %lld (%s)", 1043 __entry->vcpu_id, 1044 __entry->delta, 1045 __entry->delta < 0 ? "early" : "late") 1046 ); 1047 1048 TRACE_EVENT(kvm_smm_transition, 1049 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 1050 TP_ARGS(vcpu_id, smbase, entering), 1051 1052 TP_STRUCT__entry( 1053 __field( unsigned int, vcpu_id ) 1054 __field( u64, smbase ) 1055 __field( bool, entering ) 1056 ), 1057 1058 TP_fast_assign( 1059 __entry->vcpu_id = vcpu_id; 1060 __entry->smbase = smbase; 1061 __entry->entering = entering; 1062 ), 1063 1064 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 1065 __entry->vcpu_id, 1066 __entry->entering ? "entering" : "leaving", 1067 __entry->smbase) 1068 ); 1069 1070 /* 1071 * Tracepoint for VT-d posted-interrupts. 1072 */ 1073 TRACE_EVENT(kvm_pi_irte_update, 1074 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1075 unsigned int gsi, unsigned int gvec, 1076 u64 pi_desc_addr, bool set), 1077 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1078 1079 TP_STRUCT__entry( 1080 __field( unsigned int, host_irq ) 1081 __field( unsigned int, vcpu_id ) 1082 __field( unsigned int, gsi ) 1083 __field( unsigned int, gvec ) 1084 __field( u64, pi_desc_addr ) 1085 __field( bool, set ) 1086 ), 1087 1088 TP_fast_assign( 1089 __entry->host_irq = host_irq; 1090 __entry->vcpu_id = vcpu_id; 1091 __entry->gsi = gsi; 1092 __entry->gvec = gvec; 1093 __entry->pi_desc_addr = pi_desc_addr; 1094 __entry->set = set; 1095 ), 1096 1097 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1098 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1099 __entry->set ? "enabled and being updated" : "disabled", 1100 __entry->host_irq, 1101 __entry->vcpu_id, 1102 __entry->gsi, 1103 __entry->gvec, 1104 __entry->pi_desc_addr) 1105 ); 1106 1107 /* 1108 * Tracepoint for kvm_hv_notify_acked_sint. 1109 */ 1110 TRACE_EVENT(kvm_hv_notify_acked_sint, 1111 TP_PROTO(int vcpu_id, u32 sint), 1112 TP_ARGS(vcpu_id, sint), 1113 1114 TP_STRUCT__entry( 1115 __field(int, vcpu_id) 1116 __field(u32, sint) 1117 ), 1118 1119 TP_fast_assign( 1120 __entry->vcpu_id = vcpu_id; 1121 __entry->sint = sint; 1122 ), 1123 1124 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1125 ); 1126 1127 /* 1128 * Tracepoint for synic_set_irq. 1129 */ 1130 TRACE_EVENT(kvm_hv_synic_set_irq, 1131 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1132 TP_ARGS(vcpu_id, sint, vector, ret), 1133 1134 TP_STRUCT__entry( 1135 __field(int, vcpu_id) 1136 __field(u32, sint) 1137 __field(int, vector) 1138 __field(int, ret) 1139 ), 1140 1141 TP_fast_assign( 1142 __entry->vcpu_id = vcpu_id; 1143 __entry->sint = sint; 1144 __entry->vector = vector; 1145 __entry->ret = ret; 1146 ), 1147 1148 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1149 __entry->vcpu_id, __entry->sint, __entry->vector, 1150 __entry->ret) 1151 ); 1152 1153 /* 1154 * Tracepoint for kvm_hv_synic_send_eoi. 1155 */ 1156 TRACE_EVENT(kvm_hv_synic_send_eoi, 1157 TP_PROTO(int vcpu_id, int vector), 1158 TP_ARGS(vcpu_id, vector), 1159 1160 TP_STRUCT__entry( 1161 __field(int, vcpu_id) 1162 __field(u32, sint) 1163 __field(int, vector) 1164 __field(int, ret) 1165 ), 1166 1167 TP_fast_assign( 1168 __entry->vcpu_id = vcpu_id; 1169 __entry->vector = vector; 1170 ), 1171 1172 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1173 ); 1174 1175 /* 1176 * Tracepoint for synic_set_msr. 1177 */ 1178 TRACE_EVENT(kvm_hv_synic_set_msr, 1179 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1180 TP_ARGS(vcpu_id, msr, data, host), 1181 1182 TP_STRUCT__entry( 1183 __field(int, vcpu_id) 1184 __field(u32, msr) 1185 __field(u64, data) 1186 __field(bool, host) 1187 ), 1188 1189 TP_fast_assign( 1190 __entry->vcpu_id = vcpu_id; 1191 __entry->msr = msr; 1192 __entry->data = data; 1193 __entry->host = host 1194 ), 1195 1196 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1197 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1198 ); 1199 1200 /* 1201 * Tracepoint for stimer_set_config. 1202 */ 1203 TRACE_EVENT(kvm_hv_stimer_set_config, 1204 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1205 TP_ARGS(vcpu_id, timer_index, config, host), 1206 1207 TP_STRUCT__entry( 1208 __field(int, vcpu_id) 1209 __field(int, timer_index) 1210 __field(u64, config) 1211 __field(bool, host) 1212 ), 1213 1214 TP_fast_assign( 1215 __entry->vcpu_id = vcpu_id; 1216 __entry->timer_index = timer_index; 1217 __entry->config = config; 1218 __entry->host = host; 1219 ), 1220 1221 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1222 __entry->vcpu_id, __entry->timer_index, __entry->config, 1223 __entry->host) 1224 ); 1225 1226 /* 1227 * Tracepoint for stimer_set_count. 1228 */ 1229 TRACE_EVENT(kvm_hv_stimer_set_count, 1230 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1231 TP_ARGS(vcpu_id, timer_index, count, host), 1232 1233 TP_STRUCT__entry( 1234 __field(int, vcpu_id) 1235 __field(int, timer_index) 1236 __field(u64, count) 1237 __field(bool, host) 1238 ), 1239 1240 TP_fast_assign( 1241 __entry->vcpu_id = vcpu_id; 1242 __entry->timer_index = timer_index; 1243 __entry->count = count; 1244 __entry->host = host; 1245 ), 1246 1247 TP_printk("vcpu_id %d timer %d count %llu host %d", 1248 __entry->vcpu_id, __entry->timer_index, __entry->count, 1249 __entry->host) 1250 ); 1251 1252 /* 1253 * Tracepoint for stimer_start(periodic timer case). 1254 */ 1255 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1256 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1257 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1258 1259 TP_STRUCT__entry( 1260 __field(int, vcpu_id) 1261 __field(int, timer_index) 1262 __field(u64, time_now) 1263 __field(u64, exp_time) 1264 ), 1265 1266 TP_fast_assign( 1267 __entry->vcpu_id = vcpu_id; 1268 __entry->timer_index = timer_index; 1269 __entry->time_now = time_now; 1270 __entry->exp_time = exp_time; 1271 ), 1272 1273 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1274 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1275 __entry->exp_time) 1276 ); 1277 1278 /* 1279 * Tracepoint for stimer_start(one-shot timer case). 1280 */ 1281 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1282 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1283 TP_ARGS(vcpu_id, timer_index, time_now, count), 1284 1285 TP_STRUCT__entry( 1286 __field(int, vcpu_id) 1287 __field(int, timer_index) 1288 __field(u64, time_now) 1289 __field(u64, count) 1290 ), 1291 1292 TP_fast_assign( 1293 __entry->vcpu_id = vcpu_id; 1294 __entry->timer_index = timer_index; 1295 __entry->time_now = time_now; 1296 __entry->count = count; 1297 ), 1298 1299 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1300 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1301 __entry->count) 1302 ); 1303 1304 /* 1305 * Tracepoint for stimer_timer_callback. 1306 */ 1307 TRACE_EVENT(kvm_hv_stimer_callback, 1308 TP_PROTO(int vcpu_id, int timer_index), 1309 TP_ARGS(vcpu_id, timer_index), 1310 1311 TP_STRUCT__entry( 1312 __field(int, vcpu_id) 1313 __field(int, timer_index) 1314 ), 1315 1316 TP_fast_assign( 1317 __entry->vcpu_id = vcpu_id; 1318 __entry->timer_index = timer_index; 1319 ), 1320 1321 TP_printk("vcpu_id %d timer %d", 1322 __entry->vcpu_id, __entry->timer_index) 1323 ); 1324 1325 /* 1326 * Tracepoint for stimer_expiration. 1327 */ 1328 TRACE_EVENT(kvm_hv_stimer_expiration, 1329 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1330 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1331 1332 TP_STRUCT__entry( 1333 __field(int, vcpu_id) 1334 __field(int, timer_index) 1335 __field(int, direct) 1336 __field(int, msg_send_result) 1337 ), 1338 1339 TP_fast_assign( 1340 __entry->vcpu_id = vcpu_id; 1341 __entry->timer_index = timer_index; 1342 __entry->direct = direct; 1343 __entry->msg_send_result = msg_send_result; 1344 ), 1345 1346 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1347 __entry->vcpu_id, __entry->timer_index, 1348 __entry->direct, __entry->msg_send_result) 1349 ); 1350 1351 /* 1352 * Tracepoint for stimer_cleanup. 1353 */ 1354 TRACE_EVENT(kvm_hv_stimer_cleanup, 1355 TP_PROTO(int vcpu_id, int timer_index), 1356 TP_ARGS(vcpu_id, timer_index), 1357 1358 TP_STRUCT__entry( 1359 __field(int, vcpu_id) 1360 __field(int, timer_index) 1361 ), 1362 1363 TP_fast_assign( 1364 __entry->vcpu_id = vcpu_id; 1365 __entry->timer_index = timer_index; 1366 ), 1367 1368 TP_printk("vcpu_id %d timer %d", 1369 __entry->vcpu_id, __entry->timer_index) 1370 ); 1371 1372 TRACE_EVENT(kvm_apicv_inhibit_changed, 1373 TP_PROTO(int reason, bool set, unsigned long inhibits), 1374 TP_ARGS(reason, set, inhibits), 1375 1376 TP_STRUCT__entry( 1377 __field(int, reason) 1378 __field(bool, set) 1379 __field(unsigned long, inhibits) 1380 ), 1381 1382 TP_fast_assign( 1383 __entry->reason = reason; 1384 __entry->set = set; 1385 __entry->inhibits = inhibits; 1386 ), 1387 1388 TP_printk("%s reason=%u, inhibits=0x%lx", 1389 __entry->set ? "set" : "cleared", 1390 __entry->reason, __entry->inhibits) 1391 ); 1392 1393 TRACE_EVENT(kvm_apicv_accept_irq, 1394 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 1395 TP_ARGS(apicid, dm, tm, vec), 1396 1397 TP_STRUCT__entry( 1398 __field( __u32, apicid ) 1399 __field( __u16, dm ) 1400 __field( __u16, tm ) 1401 __field( __u8, vec ) 1402 ), 1403 1404 TP_fast_assign( 1405 __entry->apicid = apicid; 1406 __entry->dm = dm; 1407 __entry->tm = tm; 1408 __entry->vec = vec; 1409 ), 1410 1411 TP_printk("apicid %x vec %u (%s|%s)", 1412 __entry->apicid, __entry->vec, 1413 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 1414 __entry->tm ? "level" : "edge") 1415 ); 1416 1417 /* 1418 * Tracepoint for AMD AVIC 1419 */ 1420 TRACE_EVENT(kvm_avic_incomplete_ipi, 1421 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1422 TP_ARGS(vcpu, icrh, icrl, id, index), 1423 1424 TP_STRUCT__entry( 1425 __field(u32, vcpu) 1426 __field(u32, icrh) 1427 __field(u32, icrl) 1428 __field(u32, id) 1429 __field(u32, index) 1430 ), 1431 1432 TP_fast_assign( 1433 __entry->vcpu = vcpu; 1434 __entry->icrh = icrh; 1435 __entry->icrl = icrl; 1436 __entry->id = id; 1437 __entry->index = index; 1438 ), 1439 1440 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1441 __entry->vcpu, __entry->icrh, __entry->icrl, 1442 __entry->id, __entry->index) 1443 ); 1444 1445 TRACE_EVENT(kvm_avic_unaccelerated_access, 1446 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1447 TP_ARGS(vcpu, offset, ft, rw, vec), 1448 1449 TP_STRUCT__entry( 1450 __field(u32, vcpu) 1451 __field(u32, offset) 1452 __field(bool, ft) 1453 __field(bool, rw) 1454 __field(u32, vec) 1455 ), 1456 1457 TP_fast_assign( 1458 __entry->vcpu = vcpu; 1459 __entry->offset = offset; 1460 __entry->ft = ft; 1461 __entry->rw = rw; 1462 __entry->vec = vec; 1463 ), 1464 1465 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1466 __entry->vcpu, 1467 __entry->offset, 1468 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1469 __entry->ft ? "trap" : "fault", 1470 __entry->rw ? "write" : "read", 1471 __entry->vec) 1472 ); 1473 1474 TRACE_EVENT(kvm_avic_ga_log, 1475 TP_PROTO(u32 vmid, u32 vcpuid), 1476 TP_ARGS(vmid, vcpuid), 1477 1478 TP_STRUCT__entry( 1479 __field(u32, vmid) 1480 __field(u32, vcpuid) 1481 ), 1482 1483 TP_fast_assign( 1484 __entry->vmid = vmid; 1485 __entry->vcpuid = vcpuid; 1486 ), 1487 1488 TP_printk("vmid=%u, vcpuid=%u", 1489 __entry->vmid, __entry->vcpuid) 1490 ); 1491 1492 TRACE_EVENT(kvm_avic_kick_vcpu_slowpath, 1493 TP_PROTO(u32 icrh, u32 icrl, u32 index), 1494 TP_ARGS(icrh, icrl, index), 1495 1496 TP_STRUCT__entry( 1497 __field(u32, icrh) 1498 __field(u32, icrl) 1499 __field(u32, index) 1500 ), 1501 1502 TP_fast_assign( 1503 __entry->icrh = icrh; 1504 __entry->icrl = icrl; 1505 __entry->index = index; 1506 ), 1507 1508 TP_printk("icrh:icrl=%#08x:%08x, index=%u", 1509 __entry->icrh, __entry->icrl, __entry->index) 1510 ); 1511 1512 TRACE_EVENT(kvm_avic_doorbell, 1513 TP_PROTO(u32 vcpuid, u32 apicid), 1514 TP_ARGS(vcpuid, apicid), 1515 1516 TP_STRUCT__entry( 1517 __field(u32, vcpuid) 1518 __field(u32, apicid) 1519 ), 1520 1521 TP_fast_assign( 1522 __entry->vcpuid = vcpuid; 1523 __entry->apicid = apicid; 1524 ), 1525 1526 TP_printk("vcpuid=%u, apicid=%u", 1527 __entry->vcpuid, __entry->apicid) 1528 ); 1529 1530 TRACE_EVENT(kvm_hv_timer_state, 1531 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1532 TP_ARGS(vcpu_id, hv_timer_in_use), 1533 TP_STRUCT__entry( 1534 __field(unsigned int, vcpu_id) 1535 __field(unsigned int, hv_timer_in_use) 1536 ), 1537 TP_fast_assign( 1538 __entry->vcpu_id = vcpu_id; 1539 __entry->hv_timer_in_use = hv_timer_in_use; 1540 ), 1541 TP_printk("vcpu_id %x hv_timer %x", 1542 __entry->vcpu_id, 1543 __entry->hv_timer_in_use) 1544 ); 1545 1546 /* 1547 * Tracepoint for kvm_hv_flush_tlb. 1548 */ 1549 TRACE_EVENT(kvm_hv_flush_tlb, 1550 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1551 TP_ARGS(processor_mask, address_space, flags), 1552 1553 TP_STRUCT__entry( 1554 __field(u64, processor_mask) 1555 __field(u64, address_space) 1556 __field(u64, flags) 1557 ), 1558 1559 TP_fast_assign( 1560 __entry->processor_mask = processor_mask; 1561 __entry->address_space = address_space; 1562 __entry->flags = flags; 1563 ), 1564 1565 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1566 __entry->processor_mask, __entry->address_space, 1567 __entry->flags) 1568 ); 1569 1570 /* 1571 * Tracepoint for kvm_hv_flush_tlb_ex. 1572 */ 1573 TRACE_EVENT(kvm_hv_flush_tlb_ex, 1574 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1575 TP_ARGS(valid_bank_mask, format, address_space, flags), 1576 1577 TP_STRUCT__entry( 1578 __field(u64, valid_bank_mask) 1579 __field(u64, format) 1580 __field(u64, address_space) 1581 __field(u64, flags) 1582 ), 1583 1584 TP_fast_assign( 1585 __entry->valid_bank_mask = valid_bank_mask; 1586 __entry->format = format; 1587 __entry->address_space = address_space; 1588 __entry->flags = flags; 1589 ), 1590 1591 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1592 "address_space 0x%llx flags 0x%llx", 1593 __entry->valid_bank_mask, __entry->format, 1594 __entry->address_space, __entry->flags) 1595 ); 1596 1597 /* 1598 * Tracepoints for kvm_hv_send_ipi. 1599 */ 1600 TRACE_EVENT(kvm_hv_send_ipi, 1601 TP_PROTO(u32 vector, u64 processor_mask), 1602 TP_ARGS(vector, processor_mask), 1603 1604 TP_STRUCT__entry( 1605 __field(u32, vector) 1606 __field(u64, processor_mask) 1607 ), 1608 1609 TP_fast_assign( 1610 __entry->vector = vector; 1611 __entry->processor_mask = processor_mask; 1612 ), 1613 1614 TP_printk("vector %x processor_mask 0x%llx", 1615 __entry->vector, __entry->processor_mask) 1616 ); 1617 1618 TRACE_EVENT(kvm_hv_send_ipi_ex, 1619 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1620 TP_ARGS(vector, format, valid_bank_mask), 1621 1622 TP_STRUCT__entry( 1623 __field(u32, vector) 1624 __field(u64, format) 1625 __field(u64, valid_bank_mask) 1626 ), 1627 1628 TP_fast_assign( 1629 __entry->vector = vector; 1630 __entry->format = format; 1631 __entry->valid_bank_mask = valid_bank_mask; 1632 ), 1633 1634 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1635 __entry->vector, __entry->format, 1636 __entry->valid_bank_mask) 1637 ); 1638 1639 TRACE_EVENT(kvm_pv_tlb_flush, 1640 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1641 TP_ARGS(vcpu_id, need_flush_tlb), 1642 1643 TP_STRUCT__entry( 1644 __field( unsigned int, vcpu_id ) 1645 __field( bool, need_flush_tlb ) 1646 ), 1647 1648 TP_fast_assign( 1649 __entry->vcpu_id = vcpu_id; 1650 __entry->need_flush_tlb = need_flush_tlb; 1651 ), 1652 1653 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1654 __entry->need_flush_tlb ? "true" : "false") 1655 ); 1656 1657 /* 1658 * Tracepoint for failed nested VMX VM-Enter. 1659 */ 1660 TRACE_EVENT(kvm_nested_vmenter_failed, 1661 TP_PROTO(const char *msg, u32 err), 1662 TP_ARGS(msg, err), 1663 1664 TP_STRUCT__entry( 1665 __string(msg, msg) 1666 __field(u32, err) 1667 ), 1668 1669 TP_fast_assign( 1670 __assign_str(msg, msg); 1671 __entry->err = err; 1672 ), 1673 1674 TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : 1675 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1676 ); 1677 1678 /* 1679 * Tracepoint for syndbg_set_msr. 1680 */ 1681 TRACE_EVENT(kvm_hv_syndbg_set_msr, 1682 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1683 TP_ARGS(vcpu_id, vp_index, msr, data), 1684 1685 TP_STRUCT__entry( 1686 __field(int, vcpu_id) 1687 __field(u32, vp_index) 1688 __field(u32, msr) 1689 __field(u64, data) 1690 ), 1691 1692 TP_fast_assign( 1693 __entry->vcpu_id = vcpu_id; 1694 __entry->vp_index = vp_index; 1695 __entry->msr = msr; 1696 __entry->data = data; 1697 ), 1698 1699 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1700 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1701 __entry->data) 1702 ); 1703 1704 /* 1705 * Tracepoint for syndbg_get_msr. 1706 */ 1707 TRACE_EVENT(kvm_hv_syndbg_get_msr, 1708 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1709 TP_ARGS(vcpu_id, vp_index, msr, data), 1710 1711 TP_STRUCT__entry( 1712 __field(int, vcpu_id) 1713 __field(u32, vp_index) 1714 __field(u32, msr) 1715 __field(u64, data) 1716 ), 1717 1718 TP_fast_assign( 1719 __entry->vcpu_id = vcpu_id; 1720 __entry->vp_index = vp_index; 1721 __entry->msr = msr; 1722 __entry->data = data; 1723 ), 1724 1725 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1726 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1727 __entry->data) 1728 ); 1729 1730 /* 1731 * Tracepoint for the start of VMGEXIT processing 1732 */ 1733 TRACE_EVENT(kvm_vmgexit_enter, 1734 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1735 TP_ARGS(vcpu_id, ghcb), 1736 1737 TP_STRUCT__entry( 1738 __field(unsigned int, vcpu_id) 1739 __field(u64, exit_reason) 1740 __field(u64, info1) 1741 __field(u64, info2) 1742 ), 1743 1744 TP_fast_assign( 1745 __entry->vcpu_id = vcpu_id; 1746 __entry->exit_reason = ghcb->save.sw_exit_code; 1747 __entry->info1 = ghcb->save.sw_exit_info_1; 1748 __entry->info2 = ghcb->save.sw_exit_info_2; 1749 ), 1750 1751 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1752 __entry->vcpu_id, __entry->exit_reason, 1753 __entry->info1, __entry->info2) 1754 ); 1755 1756 /* 1757 * Tracepoint for the end of VMGEXIT processing 1758 */ 1759 TRACE_EVENT(kvm_vmgexit_exit, 1760 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1761 TP_ARGS(vcpu_id, ghcb), 1762 1763 TP_STRUCT__entry( 1764 __field(unsigned int, vcpu_id) 1765 __field(u64, exit_reason) 1766 __field(u64, info1) 1767 __field(u64, info2) 1768 ), 1769 1770 TP_fast_assign( 1771 __entry->vcpu_id = vcpu_id; 1772 __entry->exit_reason = ghcb->save.sw_exit_code; 1773 __entry->info1 = ghcb->save.sw_exit_info_1; 1774 __entry->info2 = ghcb->save.sw_exit_info_2; 1775 ), 1776 1777 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1778 __entry->vcpu_id, __entry->exit_reason, 1779 __entry->info1, __entry->info2) 1780 ); 1781 1782 /* 1783 * Tracepoint for the start of VMGEXIT MSR procotol processing 1784 */ 1785 TRACE_EVENT(kvm_vmgexit_msr_protocol_enter, 1786 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa), 1787 TP_ARGS(vcpu_id, ghcb_gpa), 1788 1789 TP_STRUCT__entry( 1790 __field(unsigned int, vcpu_id) 1791 __field(u64, ghcb_gpa) 1792 ), 1793 1794 TP_fast_assign( 1795 __entry->vcpu_id = vcpu_id; 1796 __entry->ghcb_gpa = ghcb_gpa; 1797 ), 1798 1799 TP_printk("vcpu %u, ghcb_gpa %016llx", 1800 __entry->vcpu_id, __entry->ghcb_gpa) 1801 ); 1802 1803 /* 1804 * Tracepoint for the end of VMGEXIT MSR procotol processing 1805 */ 1806 TRACE_EVENT(kvm_vmgexit_msr_protocol_exit, 1807 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result), 1808 TP_ARGS(vcpu_id, ghcb_gpa, result), 1809 1810 TP_STRUCT__entry( 1811 __field(unsigned int, vcpu_id) 1812 __field(u64, ghcb_gpa) 1813 __field(int, result) 1814 ), 1815 1816 TP_fast_assign( 1817 __entry->vcpu_id = vcpu_id; 1818 __entry->ghcb_gpa = ghcb_gpa; 1819 __entry->result = result; 1820 ), 1821 1822 TP_printk("vcpu %u, ghcb_gpa %016llx, result %d", 1823 __entry->vcpu_id, __entry->ghcb_gpa, __entry->result) 1824 ); 1825 1826 #endif /* _TRACE_KVM_H */ 1827 1828 #undef TRACE_INCLUDE_PATH 1829 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1830 #undef TRACE_INCLUDE_FILE 1831 #define TRACE_INCLUDE_FILE trace 1832 1833 /* This part must be outside protection */ 1834 #include <trace/define_trace.h> 1835