1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_KVM_H 4 5 #include <linux/tracepoint.h> 6 #include <asm/vmx.h> 7 #include <asm/svm.h> 8 #include <asm/clocksource.h> 9 #include <asm/pvclock-abi.h> 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM kvm 13 14 /* 15 * Tracepoint for guest mode entry. 16 */ 17 TRACE_EVENT(kvm_entry, 18 TP_PROTO(struct kvm_vcpu *vcpu), 19 TP_ARGS(vcpu), 20 21 TP_STRUCT__entry( 22 __field( unsigned int, vcpu_id ) 23 __field( unsigned long, rip ) 24 ), 25 26 TP_fast_assign( 27 __entry->vcpu_id = vcpu->vcpu_id; 28 __entry->rip = kvm_rip_read(vcpu); 29 ), 30 31 TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) 32 ); 33 34 /* 35 * Tracepoint for hypercall. 36 */ 37 TRACE_EVENT(kvm_hypercall, 38 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 39 unsigned long a2, unsigned long a3), 40 TP_ARGS(nr, a0, a1, a2, a3), 41 42 TP_STRUCT__entry( 43 __field( unsigned long, nr ) 44 __field( unsigned long, a0 ) 45 __field( unsigned long, a1 ) 46 __field( unsigned long, a2 ) 47 __field( unsigned long, a3 ) 48 ), 49 50 TP_fast_assign( 51 __entry->nr = nr; 52 __entry->a0 = a0; 53 __entry->a1 = a1; 54 __entry->a2 = a2; 55 __entry->a3 = a3; 56 ), 57 58 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 59 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 60 __entry->a3) 61 ); 62 63 /* 64 * Tracepoint for hypercall. 65 */ 66 TRACE_EVENT(kvm_hv_hypercall, 67 TP_PROTO(__u16 code, bool fast, __u16 var_cnt, __u16 rep_cnt, 68 __u16 rep_idx, __u64 ingpa, __u64 outgpa), 69 TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa), 70 71 TP_STRUCT__entry( 72 __field( __u16, rep_cnt ) 73 __field( __u16, rep_idx ) 74 __field( __u64, ingpa ) 75 __field( __u64, outgpa ) 76 __field( __u16, code ) 77 __field( __u16, var_cnt ) 78 __field( bool, fast ) 79 ), 80 81 TP_fast_assign( 82 __entry->rep_cnt = rep_cnt; 83 __entry->rep_idx = rep_idx; 84 __entry->ingpa = ingpa; 85 __entry->outgpa = outgpa; 86 __entry->code = code; 87 __entry->var_cnt = var_cnt; 88 __entry->fast = fast; 89 ), 90 91 TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 92 __entry->code, __entry->fast ? "fast" : "slow", 93 __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx, 94 __entry->ingpa, __entry->outgpa) 95 ); 96 97 TRACE_EVENT(kvm_hv_hypercall_done, 98 TP_PROTO(u64 result), 99 TP_ARGS(result), 100 101 TP_STRUCT__entry( 102 __field(__u64, result) 103 ), 104 105 TP_fast_assign( 106 __entry->result = result; 107 ), 108 109 TP_printk("result 0x%llx", __entry->result) 110 ); 111 112 /* 113 * Tracepoint for Xen hypercall. 114 */ 115 TRACE_EVENT(kvm_xen_hypercall, 116 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 117 unsigned long a2, unsigned long a3, unsigned long a4, 118 unsigned long a5), 119 TP_ARGS(nr, a0, a1, a2, a3, a4, a5), 120 121 TP_STRUCT__entry( 122 __field(unsigned long, nr) 123 __field(unsigned long, a0) 124 __field(unsigned long, a1) 125 __field(unsigned long, a2) 126 __field(unsigned long, a3) 127 __field(unsigned long, a4) 128 __field(unsigned long, a5) 129 ), 130 131 TP_fast_assign( 132 __entry->nr = nr; 133 __entry->a0 = a0; 134 __entry->a1 = a1; 135 __entry->a2 = a2; 136 __entry->a3 = a3; 137 __entry->a4 = a4; 138 __entry->a4 = a5; 139 ), 140 141 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", 142 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 143 __entry->a3, __entry->a4, __entry->a5) 144 ); 145 146 147 148 /* 149 * Tracepoint for PIO. 150 */ 151 152 #define KVM_PIO_IN 0 153 #define KVM_PIO_OUT 1 154 155 TRACE_EVENT(kvm_pio, 156 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 157 unsigned int count, void *data), 158 TP_ARGS(rw, port, size, count, data), 159 160 TP_STRUCT__entry( 161 __field( unsigned int, rw ) 162 __field( unsigned int, port ) 163 __field( unsigned int, size ) 164 __field( unsigned int, count ) 165 __field( unsigned int, val ) 166 ), 167 168 TP_fast_assign( 169 __entry->rw = rw; 170 __entry->port = port; 171 __entry->size = size; 172 __entry->count = count; 173 if (size == 1) 174 __entry->val = *(unsigned char *)data; 175 else if (size == 2) 176 __entry->val = *(unsigned short *)data; 177 else 178 __entry->val = *(unsigned int *)data; 179 ), 180 181 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 182 __entry->rw ? "write" : "read", 183 __entry->port, __entry->size, __entry->count, __entry->val, 184 __entry->count > 1 ? "(...)" : "") 185 ); 186 187 /* 188 * Tracepoint for fast mmio. 189 */ 190 TRACE_EVENT(kvm_fast_mmio, 191 TP_PROTO(u64 gpa), 192 TP_ARGS(gpa), 193 194 TP_STRUCT__entry( 195 __field(u64, gpa) 196 ), 197 198 TP_fast_assign( 199 __entry->gpa = gpa; 200 ), 201 202 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 203 ); 204 205 /* 206 * Tracepoint for cpuid. 207 */ 208 TRACE_EVENT(kvm_cpuid, 209 TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 210 unsigned long rbx, unsigned long rcx, unsigned long rdx, 211 bool found, bool used_max_basic), 212 TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 213 214 TP_STRUCT__entry( 215 __field( unsigned int, function ) 216 __field( unsigned int, index ) 217 __field( unsigned long, rax ) 218 __field( unsigned long, rbx ) 219 __field( unsigned long, rcx ) 220 __field( unsigned long, rdx ) 221 __field( bool, found ) 222 __field( bool, used_max_basic ) 223 ), 224 225 TP_fast_assign( 226 __entry->function = function; 227 __entry->index = index; 228 __entry->rax = rax; 229 __entry->rbx = rbx; 230 __entry->rcx = rcx; 231 __entry->rdx = rdx; 232 __entry->found = found; 233 __entry->used_max_basic = used_max_basic; 234 ), 235 236 TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 237 __entry->function, __entry->index, __entry->rax, 238 __entry->rbx, __entry->rcx, __entry->rdx, 239 __entry->found ? "found" : "not found", 240 __entry->used_max_basic ? ", used max basic" : "") 241 ); 242 243 #define AREG(x) { APIC_##x, "APIC_" #x } 244 245 #define kvm_trace_symbol_apic \ 246 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 247 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 248 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 249 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 250 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 251 AREG(ECTRL) 252 /* 253 * Tracepoint for apic access. 254 */ 255 TRACE_EVENT(kvm_apic, 256 TP_PROTO(unsigned int rw, unsigned int reg, u64 val), 257 TP_ARGS(rw, reg, val), 258 259 TP_STRUCT__entry( 260 __field( unsigned int, rw ) 261 __field( unsigned int, reg ) 262 __field( u64, val ) 263 ), 264 265 TP_fast_assign( 266 __entry->rw = rw; 267 __entry->reg = reg; 268 __entry->val = val; 269 ), 270 271 TP_printk("apic_%s %s = 0x%llx", 272 __entry->rw ? "write" : "read", 273 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 274 __entry->val) 275 ); 276 277 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 278 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 279 280 #define KVM_ISA_VMX 1 281 #define KVM_ISA_SVM 2 282 283 #define kvm_print_exit_reason(exit_reason, isa) \ 284 (isa == KVM_ISA_VMX) ? \ 285 __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ 286 __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ 287 (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ 288 (isa == KVM_ISA_VMX) ? \ 289 __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" 290 291 #define TRACE_EVENT_KVM_EXIT(name) \ 292 TRACE_EVENT(name, \ 293 TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \ 294 TP_ARGS(vcpu, isa), \ 295 \ 296 TP_STRUCT__entry( \ 297 __field( unsigned int, exit_reason ) \ 298 __field( unsigned long, guest_rip ) \ 299 __field( u32, isa ) \ 300 __field( u64, info1 ) \ 301 __field( u64, info2 ) \ 302 __field( u32, intr_info ) \ 303 __field( u32, error_code ) \ 304 __field( unsigned int, vcpu_id ) \ 305 ), \ 306 \ 307 TP_fast_assign( \ 308 __entry->guest_rip = kvm_rip_read(vcpu); \ 309 __entry->isa = isa; \ 310 __entry->vcpu_id = vcpu->vcpu_id; \ 311 static_call(kvm_x86_get_exit_info)(vcpu, \ 312 &__entry->exit_reason, \ 313 &__entry->info1, \ 314 &__entry->info2, \ 315 &__entry->intr_info, \ 316 &__entry->error_code); \ 317 ), \ 318 \ 319 TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ 320 "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \ 321 __entry->vcpu_id, \ 322 kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ 323 __entry->guest_rip, __entry->info1, __entry->info2, \ 324 __entry->intr_info, __entry->error_code) \ 325 ) 326 327 /* 328 * Tracepoint for kvm guest exit: 329 */ 330 TRACE_EVENT_KVM_EXIT(kvm_exit); 331 332 /* 333 * Tracepoint for kvm interrupt injection: 334 */ 335 TRACE_EVENT(kvm_inj_virq, 336 TP_PROTO(unsigned int irq), 337 TP_ARGS(irq), 338 339 TP_STRUCT__entry( 340 __field( unsigned int, irq ) 341 ), 342 343 TP_fast_assign( 344 __entry->irq = irq; 345 ), 346 347 TP_printk("irq %u", __entry->irq) 348 ); 349 350 #define EXS(x) { x##_VECTOR, "#" #x } 351 352 #define kvm_trace_sym_exc \ 353 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 354 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 355 EXS(MF), EXS(AC), EXS(MC) 356 357 /* 358 * Tracepoint for kvm interrupt injection: 359 */ 360 TRACE_EVENT(kvm_inj_exception, 361 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 362 TP_ARGS(exception, has_error, error_code), 363 364 TP_STRUCT__entry( 365 __field( u8, exception ) 366 __field( u8, has_error ) 367 __field( u32, error_code ) 368 ), 369 370 TP_fast_assign( 371 __entry->exception = exception; 372 __entry->has_error = has_error; 373 __entry->error_code = error_code; 374 ), 375 376 TP_printk("%s (0x%x)", 377 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 378 /* FIXME: don't print error_code if not present */ 379 __entry->has_error ? __entry->error_code : 0) 380 ); 381 382 /* 383 * Tracepoint for page fault. 384 */ 385 TRACE_EVENT(kvm_page_fault, 386 TP_PROTO(unsigned long fault_address, unsigned int error_code), 387 TP_ARGS(fault_address, error_code), 388 389 TP_STRUCT__entry( 390 __field( unsigned long, fault_address ) 391 __field( unsigned int, error_code ) 392 ), 393 394 TP_fast_assign( 395 __entry->fault_address = fault_address; 396 __entry->error_code = error_code; 397 ), 398 399 TP_printk("address %lx error_code %x", 400 __entry->fault_address, __entry->error_code) 401 ); 402 403 /* 404 * Tracepoint for guest MSR access. 405 */ 406 TRACE_EVENT(kvm_msr, 407 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 408 TP_ARGS(write, ecx, data, exception), 409 410 TP_STRUCT__entry( 411 __field( unsigned, write ) 412 __field( u32, ecx ) 413 __field( u64, data ) 414 __field( u8, exception ) 415 ), 416 417 TP_fast_assign( 418 __entry->write = write; 419 __entry->ecx = ecx; 420 __entry->data = data; 421 __entry->exception = exception; 422 ), 423 424 TP_printk("msr_%s %x = 0x%llx%s", 425 __entry->write ? "write" : "read", 426 __entry->ecx, __entry->data, 427 __entry->exception ? " (#GP)" : "") 428 ); 429 430 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 431 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 432 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 433 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 434 435 /* 436 * Tracepoint for guest CR access. 437 */ 438 TRACE_EVENT(kvm_cr, 439 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 440 TP_ARGS(rw, cr, val), 441 442 TP_STRUCT__entry( 443 __field( unsigned int, rw ) 444 __field( unsigned int, cr ) 445 __field( unsigned long, val ) 446 ), 447 448 TP_fast_assign( 449 __entry->rw = rw; 450 __entry->cr = cr; 451 __entry->val = val; 452 ), 453 454 TP_printk("cr_%s %x = 0x%lx", 455 __entry->rw ? "write" : "read", 456 __entry->cr, __entry->val) 457 ); 458 459 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 460 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 461 462 TRACE_EVENT(kvm_pic_set_irq, 463 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 464 TP_ARGS(chip, pin, elcr, imr, coalesced), 465 466 TP_STRUCT__entry( 467 __field( __u8, chip ) 468 __field( __u8, pin ) 469 __field( __u8, elcr ) 470 __field( __u8, imr ) 471 __field( bool, coalesced ) 472 ), 473 474 TP_fast_assign( 475 __entry->chip = chip; 476 __entry->pin = pin; 477 __entry->elcr = elcr; 478 __entry->imr = imr; 479 __entry->coalesced = coalesced; 480 ), 481 482 TP_printk("chip %u pin %u (%s%s)%s", 483 __entry->chip, __entry->pin, 484 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 485 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 486 __entry->coalesced ? " (coalesced)" : "") 487 ); 488 489 #define kvm_apic_dst_shorthand \ 490 {0x0, "dst"}, \ 491 {0x1, "self"}, \ 492 {0x2, "all"}, \ 493 {0x3, "all-but-self"} 494 495 TRACE_EVENT(kvm_apic_ipi, 496 TP_PROTO(__u32 icr_low, __u32 dest_id), 497 TP_ARGS(icr_low, dest_id), 498 499 TP_STRUCT__entry( 500 __field( __u32, icr_low ) 501 __field( __u32, dest_id ) 502 ), 503 504 TP_fast_assign( 505 __entry->icr_low = icr_low; 506 __entry->dest_id = dest_id; 507 ), 508 509 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 510 __entry->dest_id, (u8)__entry->icr_low, 511 __print_symbolic((__entry->icr_low >> 8 & 0x7), 512 kvm_deliver_mode), 513 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 514 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 515 (__entry->icr_low & (1<<15)) ? "level" : "edge", 516 __print_symbolic((__entry->icr_low >> 18 & 0x3), 517 kvm_apic_dst_shorthand)) 518 ); 519 520 TRACE_EVENT(kvm_apic_accept_irq, 521 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 522 TP_ARGS(apicid, dm, tm, vec), 523 524 TP_STRUCT__entry( 525 __field( __u32, apicid ) 526 __field( __u16, dm ) 527 __field( __u16, tm ) 528 __field( __u8, vec ) 529 ), 530 531 TP_fast_assign( 532 __entry->apicid = apicid; 533 __entry->dm = dm; 534 __entry->tm = tm; 535 __entry->vec = vec; 536 ), 537 538 TP_printk("apicid %x vec %u (%s|%s)", 539 __entry->apicid, __entry->vec, 540 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 541 __entry->tm ? "level" : "edge") 542 ); 543 544 TRACE_EVENT(kvm_eoi, 545 TP_PROTO(struct kvm_lapic *apic, int vector), 546 TP_ARGS(apic, vector), 547 548 TP_STRUCT__entry( 549 __field( __u32, apicid ) 550 __field( int, vector ) 551 ), 552 553 TP_fast_assign( 554 __entry->apicid = apic->vcpu->vcpu_id; 555 __entry->vector = vector; 556 ), 557 558 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 559 ); 560 561 TRACE_EVENT(kvm_pv_eoi, 562 TP_PROTO(struct kvm_lapic *apic, int vector), 563 TP_ARGS(apic, vector), 564 565 TP_STRUCT__entry( 566 __field( __u32, apicid ) 567 __field( int, vector ) 568 ), 569 570 TP_fast_assign( 571 __entry->apicid = apic->vcpu->vcpu_id; 572 __entry->vector = vector; 573 ), 574 575 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 576 ); 577 578 /* 579 * Tracepoint for nested VMRUN 580 */ 581 TRACE_EVENT(kvm_nested_vmrun, 582 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 583 __u32 event_inj, bool npt), 584 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 585 586 TP_STRUCT__entry( 587 __field( __u64, rip ) 588 __field( __u64, vmcb ) 589 __field( __u64, nested_rip ) 590 __field( __u32, int_ctl ) 591 __field( __u32, event_inj ) 592 __field( bool, npt ) 593 ), 594 595 TP_fast_assign( 596 __entry->rip = rip; 597 __entry->vmcb = vmcb; 598 __entry->nested_rip = nested_rip; 599 __entry->int_ctl = int_ctl; 600 __entry->event_inj = event_inj; 601 __entry->npt = npt; 602 ), 603 604 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 605 "event_inj: 0x%08x npt: %s", 606 __entry->rip, __entry->vmcb, __entry->nested_rip, 607 __entry->int_ctl, __entry->event_inj, 608 __entry->npt ? "on" : "off") 609 ); 610 611 TRACE_EVENT(kvm_nested_intercepts, 612 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, 613 __u32 intercept1, __u32 intercept2, __u32 intercept3), 614 TP_ARGS(cr_read, cr_write, exceptions, intercept1, 615 intercept2, intercept3), 616 617 TP_STRUCT__entry( 618 __field( __u16, cr_read ) 619 __field( __u16, cr_write ) 620 __field( __u32, exceptions ) 621 __field( __u32, intercept1 ) 622 __field( __u32, intercept2 ) 623 __field( __u32, intercept3 ) 624 ), 625 626 TP_fast_assign( 627 __entry->cr_read = cr_read; 628 __entry->cr_write = cr_write; 629 __entry->exceptions = exceptions; 630 __entry->intercept1 = intercept1; 631 __entry->intercept2 = intercept2; 632 __entry->intercept3 = intercept3; 633 ), 634 635 TP_printk("cr_read: %04x cr_write: %04x excp: %08x " 636 "intercepts: %08x %08x %08x", 637 __entry->cr_read, __entry->cr_write, __entry->exceptions, 638 __entry->intercept1, __entry->intercept2, __entry->intercept3) 639 ); 640 /* 641 * Tracepoint for #VMEXIT while nested 642 */ 643 TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); 644 645 /* 646 * Tracepoint for #VMEXIT reinjected to the guest 647 */ 648 TRACE_EVENT(kvm_nested_vmexit_inject, 649 TP_PROTO(__u32 exit_code, 650 __u64 exit_info1, __u64 exit_info2, 651 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 652 TP_ARGS(exit_code, exit_info1, exit_info2, 653 exit_int_info, exit_int_info_err, isa), 654 655 TP_STRUCT__entry( 656 __field( __u32, exit_code ) 657 __field( __u64, exit_info1 ) 658 __field( __u64, exit_info2 ) 659 __field( __u32, exit_int_info ) 660 __field( __u32, exit_int_info_err ) 661 __field( __u32, isa ) 662 ), 663 664 TP_fast_assign( 665 __entry->exit_code = exit_code; 666 __entry->exit_info1 = exit_info1; 667 __entry->exit_info2 = exit_info2; 668 __entry->exit_int_info = exit_int_info; 669 __entry->exit_int_info_err = exit_int_info_err; 670 __entry->isa = isa; 671 ), 672 673 TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " 674 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 675 kvm_print_exit_reason(__entry->exit_code, __entry->isa), 676 __entry->exit_info1, __entry->exit_info2, 677 __entry->exit_int_info, __entry->exit_int_info_err) 678 ); 679 680 /* 681 * Tracepoint for nested #vmexit because of interrupt pending 682 */ 683 TRACE_EVENT(kvm_nested_intr_vmexit, 684 TP_PROTO(__u64 rip), 685 TP_ARGS(rip), 686 687 TP_STRUCT__entry( 688 __field( __u64, rip ) 689 ), 690 691 TP_fast_assign( 692 __entry->rip = rip 693 ), 694 695 TP_printk("rip: 0x%016llx", __entry->rip) 696 ); 697 698 /* 699 * Tracepoint for nested #vmexit because of interrupt pending 700 */ 701 TRACE_EVENT(kvm_invlpga, 702 TP_PROTO(__u64 rip, int asid, u64 address), 703 TP_ARGS(rip, asid, address), 704 705 TP_STRUCT__entry( 706 __field( __u64, rip ) 707 __field( int, asid ) 708 __field( __u64, address ) 709 ), 710 711 TP_fast_assign( 712 __entry->rip = rip; 713 __entry->asid = asid; 714 __entry->address = address; 715 ), 716 717 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 718 __entry->rip, __entry->asid, __entry->address) 719 ); 720 721 /* 722 * Tracepoint for nested #vmexit because of interrupt pending 723 */ 724 TRACE_EVENT(kvm_skinit, 725 TP_PROTO(__u64 rip, __u32 slb), 726 TP_ARGS(rip, slb), 727 728 TP_STRUCT__entry( 729 __field( __u64, rip ) 730 __field( __u32, slb ) 731 ), 732 733 TP_fast_assign( 734 __entry->rip = rip; 735 __entry->slb = slb; 736 ), 737 738 TP_printk("rip: 0x%016llx slb: 0x%08x", 739 __entry->rip, __entry->slb) 740 ); 741 742 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 743 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 744 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 745 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 746 747 #define kvm_trace_symbol_emul_flags \ 748 { 0, "real" }, \ 749 { KVM_EMUL_INSN_F_CR0_PE \ 750 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 751 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 752 { KVM_EMUL_INSN_F_CR0_PE \ 753 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 754 { KVM_EMUL_INSN_F_CR0_PE \ 755 | KVM_EMUL_INSN_F_CS_L, "prot64" } 756 757 #define kei_decode_mode(mode) ({ \ 758 u8 flags = 0xff; \ 759 switch (mode) { \ 760 case X86EMUL_MODE_REAL: \ 761 flags = 0; \ 762 break; \ 763 case X86EMUL_MODE_VM86: \ 764 flags = KVM_EMUL_INSN_F_EFL_VM; \ 765 break; \ 766 case X86EMUL_MODE_PROT16: \ 767 flags = KVM_EMUL_INSN_F_CR0_PE; \ 768 break; \ 769 case X86EMUL_MODE_PROT32: \ 770 flags = KVM_EMUL_INSN_F_CR0_PE \ 771 | KVM_EMUL_INSN_F_CS_D; \ 772 break; \ 773 case X86EMUL_MODE_PROT64: \ 774 flags = KVM_EMUL_INSN_F_CR0_PE \ 775 | KVM_EMUL_INSN_F_CS_L; \ 776 break; \ 777 } \ 778 flags; \ 779 }) 780 781 TRACE_EVENT(kvm_emulate_insn, 782 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 783 TP_ARGS(vcpu, failed), 784 785 TP_STRUCT__entry( 786 __field( __u64, rip ) 787 __field( __u32, csbase ) 788 __field( __u8, len ) 789 __array( __u8, insn, 15 ) 790 __field( __u8, flags ) 791 __field( __u8, failed ) 792 ), 793 794 TP_fast_assign( 795 __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); 796 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 797 - vcpu->arch.emulate_ctxt->fetch.data; 798 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 799 memcpy(__entry->insn, 800 vcpu->arch.emulate_ctxt->fetch.data, 801 15); 802 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 803 __entry->failed = failed; 804 ), 805 806 TP_printk("%x:%llx:%s (%s)%s", 807 __entry->csbase, __entry->rip, 808 __print_hex(__entry->insn, __entry->len), 809 __print_symbolic(__entry->flags, 810 kvm_trace_symbol_emul_flags), 811 __entry->failed ? " failed" : "" 812 ) 813 ); 814 815 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 816 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 817 818 TRACE_EVENT( 819 vcpu_match_mmio, 820 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 821 TP_ARGS(gva, gpa, write, gpa_match), 822 823 TP_STRUCT__entry( 824 __field(gva_t, gva) 825 __field(gpa_t, gpa) 826 __field(bool, write) 827 __field(bool, gpa_match) 828 ), 829 830 TP_fast_assign( 831 __entry->gva = gva; 832 __entry->gpa = gpa; 833 __entry->write = write; 834 __entry->gpa_match = gpa_match 835 ), 836 837 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 838 __entry->write ? "Write" : "Read", 839 __entry->gpa_match ? "GPA" : "GVA") 840 ); 841 842 TRACE_EVENT(kvm_write_tsc_offset, 843 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 844 __u64 next_tsc_offset), 845 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 846 847 TP_STRUCT__entry( 848 __field( unsigned int, vcpu_id ) 849 __field( __u64, previous_tsc_offset ) 850 __field( __u64, next_tsc_offset ) 851 ), 852 853 TP_fast_assign( 854 __entry->vcpu_id = vcpu_id; 855 __entry->previous_tsc_offset = previous_tsc_offset; 856 __entry->next_tsc_offset = next_tsc_offset; 857 ), 858 859 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 860 __entry->previous_tsc_offset, __entry->next_tsc_offset) 861 ); 862 863 #ifdef CONFIG_X86_64 864 865 #define host_clocks \ 866 {VDSO_CLOCKMODE_NONE, "none"}, \ 867 {VDSO_CLOCKMODE_TSC, "tsc"} \ 868 869 TRACE_EVENT(kvm_update_master_clock, 870 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 871 TP_ARGS(use_master_clock, host_clock, offset_matched), 872 873 TP_STRUCT__entry( 874 __field( bool, use_master_clock ) 875 __field( unsigned int, host_clock ) 876 __field( bool, offset_matched ) 877 ), 878 879 TP_fast_assign( 880 __entry->use_master_clock = use_master_clock; 881 __entry->host_clock = host_clock; 882 __entry->offset_matched = offset_matched; 883 ), 884 885 TP_printk("masterclock %d hostclock %s offsetmatched %u", 886 __entry->use_master_clock, 887 __print_symbolic(__entry->host_clock, host_clocks), 888 __entry->offset_matched) 889 ); 890 891 TRACE_EVENT(kvm_track_tsc, 892 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 893 unsigned int online_vcpus, bool use_master_clock, 894 unsigned int host_clock), 895 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 896 host_clock), 897 898 TP_STRUCT__entry( 899 __field( unsigned int, vcpu_id ) 900 __field( unsigned int, nr_vcpus_matched_tsc ) 901 __field( unsigned int, online_vcpus ) 902 __field( bool, use_master_clock ) 903 __field( unsigned int, host_clock ) 904 ), 905 906 TP_fast_assign( 907 __entry->vcpu_id = vcpu_id; 908 __entry->nr_vcpus_matched_tsc = nr_matched; 909 __entry->online_vcpus = online_vcpus; 910 __entry->use_master_clock = use_master_clock; 911 __entry->host_clock = host_clock; 912 ), 913 914 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 915 " hostclock %s", 916 __entry->vcpu_id, __entry->use_master_clock, 917 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 918 __print_symbolic(__entry->host_clock, host_clocks)) 919 ); 920 921 #endif /* CONFIG_X86_64 */ 922 923 /* 924 * Tracepoint for PML full VMEXIT. 925 */ 926 TRACE_EVENT(kvm_pml_full, 927 TP_PROTO(unsigned int vcpu_id), 928 TP_ARGS(vcpu_id), 929 930 TP_STRUCT__entry( 931 __field( unsigned int, vcpu_id ) 932 ), 933 934 TP_fast_assign( 935 __entry->vcpu_id = vcpu_id; 936 ), 937 938 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 939 ); 940 941 TRACE_EVENT(kvm_ple_window_update, 942 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 943 TP_ARGS(vcpu_id, new, old), 944 945 TP_STRUCT__entry( 946 __field( unsigned int, vcpu_id ) 947 __field( unsigned int, new ) 948 __field( unsigned int, old ) 949 ), 950 951 TP_fast_assign( 952 __entry->vcpu_id = vcpu_id; 953 __entry->new = new; 954 __entry->old = old; 955 ), 956 957 TP_printk("vcpu %u old %u new %u (%s)", 958 __entry->vcpu_id, __entry->old, __entry->new, 959 __entry->old < __entry->new ? "growed" : "shrinked") 960 ); 961 962 TRACE_EVENT(kvm_pvclock_update, 963 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 964 TP_ARGS(vcpu_id, pvclock), 965 966 TP_STRUCT__entry( 967 __field( unsigned int, vcpu_id ) 968 __field( __u32, version ) 969 __field( __u64, tsc_timestamp ) 970 __field( __u64, system_time ) 971 __field( __u32, tsc_to_system_mul ) 972 __field( __s8, tsc_shift ) 973 __field( __u8, flags ) 974 ), 975 976 TP_fast_assign( 977 __entry->vcpu_id = vcpu_id; 978 __entry->version = pvclock->version; 979 __entry->tsc_timestamp = pvclock->tsc_timestamp; 980 __entry->system_time = pvclock->system_time; 981 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 982 __entry->tsc_shift = pvclock->tsc_shift; 983 __entry->flags = pvclock->flags; 984 ), 985 986 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 987 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 988 "flags 0x%x }", 989 __entry->vcpu_id, 990 __entry->version, 991 __entry->tsc_timestamp, 992 __entry->system_time, 993 __entry->tsc_to_system_mul, 994 __entry->tsc_shift, 995 __entry->flags) 996 ); 997 998 TRACE_EVENT(kvm_wait_lapic_expire, 999 TP_PROTO(unsigned int vcpu_id, s64 delta), 1000 TP_ARGS(vcpu_id, delta), 1001 1002 TP_STRUCT__entry( 1003 __field( unsigned int, vcpu_id ) 1004 __field( s64, delta ) 1005 ), 1006 1007 TP_fast_assign( 1008 __entry->vcpu_id = vcpu_id; 1009 __entry->delta = delta; 1010 ), 1011 1012 TP_printk("vcpu %u: delta %lld (%s)", 1013 __entry->vcpu_id, 1014 __entry->delta, 1015 __entry->delta < 0 ? "early" : "late") 1016 ); 1017 1018 TRACE_EVENT(kvm_smm_transition, 1019 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 1020 TP_ARGS(vcpu_id, smbase, entering), 1021 1022 TP_STRUCT__entry( 1023 __field( unsigned int, vcpu_id ) 1024 __field( u64, smbase ) 1025 __field( bool, entering ) 1026 ), 1027 1028 TP_fast_assign( 1029 __entry->vcpu_id = vcpu_id; 1030 __entry->smbase = smbase; 1031 __entry->entering = entering; 1032 ), 1033 1034 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 1035 __entry->vcpu_id, 1036 __entry->entering ? "entering" : "leaving", 1037 __entry->smbase) 1038 ); 1039 1040 /* 1041 * Tracepoint for VT-d posted-interrupts. 1042 */ 1043 TRACE_EVENT(kvm_pi_irte_update, 1044 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1045 unsigned int gsi, unsigned int gvec, 1046 u64 pi_desc_addr, bool set), 1047 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1048 1049 TP_STRUCT__entry( 1050 __field( unsigned int, host_irq ) 1051 __field( unsigned int, vcpu_id ) 1052 __field( unsigned int, gsi ) 1053 __field( unsigned int, gvec ) 1054 __field( u64, pi_desc_addr ) 1055 __field( bool, set ) 1056 ), 1057 1058 TP_fast_assign( 1059 __entry->host_irq = host_irq; 1060 __entry->vcpu_id = vcpu_id; 1061 __entry->gsi = gsi; 1062 __entry->gvec = gvec; 1063 __entry->pi_desc_addr = pi_desc_addr; 1064 __entry->set = set; 1065 ), 1066 1067 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1068 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1069 __entry->set ? "enabled and being updated" : "disabled", 1070 __entry->host_irq, 1071 __entry->vcpu_id, 1072 __entry->gsi, 1073 __entry->gvec, 1074 __entry->pi_desc_addr) 1075 ); 1076 1077 /* 1078 * Tracepoint for kvm_hv_notify_acked_sint. 1079 */ 1080 TRACE_EVENT(kvm_hv_notify_acked_sint, 1081 TP_PROTO(int vcpu_id, u32 sint), 1082 TP_ARGS(vcpu_id, sint), 1083 1084 TP_STRUCT__entry( 1085 __field(int, vcpu_id) 1086 __field(u32, sint) 1087 ), 1088 1089 TP_fast_assign( 1090 __entry->vcpu_id = vcpu_id; 1091 __entry->sint = sint; 1092 ), 1093 1094 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1095 ); 1096 1097 /* 1098 * Tracepoint for synic_set_irq. 1099 */ 1100 TRACE_EVENT(kvm_hv_synic_set_irq, 1101 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1102 TP_ARGS(vcpu_id, sint, vector, ret), 1103 1104 TP_STRUCT__entry( 1105 __field(int, vcpu_id) 1106 __field(u32, sint) 1107 __field(int, vector) 1108 __field(int, ret) 1109 ), 1110 1111 TP_fast_assign( 1112 __entry->vcpu_id = vcpu_id; 1113 __entry->sint = sint; 1114 __entry->vector = vector; 1115 __entry->ret = ret; 1116 ), 1117 1118 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1119 __entry->vcpu_id, __entry->sint, __entry->vector, 1120 __entry->ret) 1121 ); 1122 1123 /* 1124 * Tracepoint for kvm_hv_synic_send_eoi. 1125 */ 1126 TRACE_EVENT(kvm_hv_synic_send_eoi, 1127 TP_PROTO(int vcpu_id, int vector), 1128 TP_ARGS(vcpu_id, vector), 1129 1130 TP_STRUCT__entry( 1131 __field(int, vcpu_id) 1132 __field(u32, sint) 1133 __field(int, vector) 1134 __field(int, ret) 1135 ), 1136 1137 TP_fast_assign( 1138 __entry->vcpu_id = vcpu_id; 1139 __entry->vector = vector; 1140 ), 1141 1142 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1143 ); 1144 1145 /* 1146 * Tracepoint for synic_set_msr. 1147 */ 1148 TRACE_EVENT(kvm_hv_synic_set_msr, 1149 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1150 TP_ARGS(vcpu_id, msr, data, host), 1151 1152 TP_STRUCT__entry( 1153 __field(int, vcpu_id) 1154 __field(u32, msr) 1155 __field(u64, data) 1156 __field(bool, host) 1157 ), 1158 1159 TP_fast_assign( 1160 __entry->vcpu_id = vcpu_id; 1161 __entry->msr = msr; 1162 __entry->data = data; 1163 __entry->host = host 1164 ), 1165 1166 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1167 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1168 ); 1169 1170 /* 1171 * Tracepoint for stimer_set_config. 1172 */ 1173 TRACE_EVENT(kvm_hv_stimer_set_config, 1174 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1175 TP_ARGS(vcpu_id, timer_index, config, host), 1176 1177 TP_STRUCT__entry( 1178 __field(int, vcpu_id) 1179 __field(int, timer_index) 1180 __field(u64, config) 1181 __field(bool, host) 1182 ), 1183 1184 TP_fast_assign( 1185 __entry->vcpu_id = vcpu_id; 1186 __entry->timer_index = timer_index; 1187 __entry->config = config; 1188 __entry->host = host; 1189 ), 1190 1191 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1192 __entry->vcpu_id, __entry->timer_index, __entry->config, 1193 __entry->host) 1194 ); 1195 1196 /* 1197 * Tracepoint for stimer_set_count. 1198 */ 1199 TRACE_EVENT(kvm_hv_stimer_set_count, 1200 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1201 TP_ARGS(vcpu_id, timer_index, count, host), 1202 1203 TP_STRUCT__entry( 1204 __field(int, vcpu_id) 1205 __field(int, timer_index) 1206 __field(u64, count) 1207 __field(bool, host) 1208 ), 1209 1210 TP_fast_assign( 1211 __entry->vcpu_id = vcpu_id; 1212 __entry->timer_index = timer_index; 1213 __entry->count = count; 1214 __entry->host = host; 1215 ), 1216 1217 TP_printk("vcpu_id %d timer %d count %llu host %d", 1218 __entry->vcpu_id, __entry->timer_index, __entry->count, 1219 __entry->host) 1220 ); 1221 1222 /* 1223 * Tracepoint for stimer_start(periodic timer case). 1224 */ 1225 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1226 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1227 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1228 1229 TP_STRUCT__entry( 1230 __field(int, vcpu_id) 1231 __field(int, timer_index) 1232 __field(u64, time_now) 1233 __field(u64, exp_time) 1234 ), 1235 1236 TP_fast_assign( 1237 __entry->vcpu_id = vcpu_id; 1238 __entry->timer_index = timer_index; 1239 __entry->time_now = time_now; 1240 __entry->exp_time = exp_time; 1241 ), 1242 1243 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1244 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1245 __entry->exp_time) 1246 ); 1247 1248 /* 1249 * Tracepoint for stimer_start(one-shot timer case). 1250 */ 1251 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1252 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1253 TP_ARGS(vcpu_id, timer_index, time_now, count), 1254 1255 TP_STRUCT__entry( 1256 __field(int, vcpu_id) 1257 __field(int, timer_index) 1258 __field(u64, time_now) 1259 __field(u64, count) 1260 ), 1261 1262 TP_fast_assign( 1263 __entry->vcpu_id = vcpu_id; 1264 __entry->timer_index = timer_index; 1265 __entry->time_now = time_now; 1266 __entry->count = count; 1267 ), 1268 1269 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1270 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1271 __entry->count) 1272 ); 1273 1274 /* 1275 * Tracepoint for stimer_timer_callback. 1276 */ 1277 TRACE_EVENT(kvm_hv_stimer_callback, 1278 TP_PROTO(int vcpu_id, int timer_index), 1279 TP_ARGS(vcpu_id, timer_index), 1280 1281 TP_STRUCT__entry( 1282 __field(int, vcpu_id) 1283 __field(int, timer_index) 1284 ), 1285 1286 TP_fast_assign( 1287 __entry->vcpu_id = vcpu_id; 1288 __entry->timer_index = timer_index; 1289 ), 1290 1291 TP_printk("vcpu_id %d timer %d", 1292 __entry->vcpu_id, __entry->timer_index) 1293 ); 1294 1295 /* 1296 * Tracepoint for stimer_expiration. 1297 */ 1298 TRACE_EVENT(kvm_hv_stimer_expiration, 1299 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1300 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1301 1302 TP_STRUCT__entry( 1303 __field(int, vcpu_id) 1304 __field(int, timer_index) 1305 __field(int, direct) 1306 __field(int, msg_send_result) 1307 ), 1308 1309 TP_fast_assign( 1310 __entry->vcpu_id = vcpu_id; 1311 __entry->timer_index = timer_index; 1312 __entry->direct = direct; 1313 __entry->msg_send_result = msg_send_result; 1314 ), 1315 1316 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1317 __entry->vcpu_id, __entry->timer_index, 1318 __entry->direct, __entry->msg_send_result) 1319 ); 1320 1321 /* 1322 * Tracepoint for stimer_cleanup. 1323 */ 1324 TRACE_EVENT(kvm_hv_stimer_cleanup, 1325 TP_PROTO(int vcpu_id, int timer_index), 1326 TP_ARGS(vcpu_id, timer_index), 1327 1328 TP_STRUCT__entry( 1329 __field(int, vcpu_id) 1330 __field(int, timer_index) 1331 ), 1332 1333 TP_fast_assign( 1334 __entry->vcpu_id = vcpu_id; 1335 __entry->timer_index = timer_index; 1336 ), 1337 1338 TP_printk("vcpu_id %d timer %d", 1339 __entry->vcpu_id, __entry->timer_index) 1340 ); 1341 1342 TRACE_EVENT(kvm_apicv_inhibit_changed, 1343 TP_PROTO(int reason, bool set, unsigned long inhibits), 1344 TP_ARGS(reason, set, inhibits), 1345 1346 TP_STRUCT__entry( 1347 __field(int, reason) 1348 __field(bool, set) 1349 __field(unsigned long, inhibits) 1350 ), 1351 1352 TP_fast_assign( 1353 __entry->reason = reason; 1354 __entry->set = set; 1355 __entry->inhibits = inhibits; 1356 ), 1357 1358 TP_printk("%s reason=%u, inhibits=0x%lx", 1359 __entry->set ? "set" : "cleared", 1360 __entry->reason, __entry->inhibits) 1361 ); 1362 1363 TRACE_EVENT(kvm_apicv_accept_irq, 1364 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 1365 TP_ARGS(apicid, dm, tm, vec), 1366 1367 TP_STRUCT__entry( 1368 __field( __u32, apicid ) 1369 __field( __u16, dm ) 1370 __field( __u16, tm ) 1371 __field( __u8, vec ) 1372 ), 1373 1374 TP_fast_assign( 1375 __entry->apicid = apicid; 1376 __entry->dm = dm; 1377 __entry->tm = tm; 1378 __entry->vec = vec; 1379 ), 1380 1381 TP_printk("apicid %x vec %u (%s|%s)", 1382 __entry->apicid, __entry->vec, 1383 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 1384 __entry->tm ? "level" : "edge") 1385 ); 1386 1387 /* 1388 * Tracepoint for AMD AVIC 1389 */ 1390 TRACE_EVENT(kvm_avic_incomplete_ipi, 1391 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1392 TP_ARGS(vcpu, icrh, icrl, id, index), 1393 1394 TP_STRUCT__entry( 1395 __field(u32, vcpu) 1396 __field(u32, icrh) 1397 __field(u32, icrl) 1398 __field(u32, id) 1399 __field(u32, index) 1400 ), 1401 1402 TP_fast_assign( 1403 __entry->vcpu = vcpu; 1404 __entry->icrh = icrh; 1405 __entry->icrl = icrl; 1406 __entry->id = id; 1407 __entry->index = index; 1408 ), 1409 1410 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1411 __entry->vcpu, __entry->icrh, __entry->icrl, 1412 __entry->id, __entry->index) 1413 ); 1414 1415 TRACE_EVENT(kvm_avic_unaccelerated_access, 1416 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1417 TP_ARGS(vcpu, offset, ft, rw, vec), 1418 1419 TP_STRUCT__entry( 1420 __field(u32, vcpu) 1421 __field(u32, offset) 1422 __field(bool, ft) 1423 __field(bool, rw) 1424 __field(u32, vec) 1425 ), 1426 1427 TP_fast_assign( 1428 __entry->vcpu = vcpu; 1429 __entry->offset = offset; 1430 __entry->ft = ft; 1431 __entry->rw = rw; 1432 __entry->vec = vec; 1433 ), 1434 1435 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1436 __entry->vcpu, 1437 __entry->offset, 1438 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1439 __entry->ft ? "trap" : "fault", 1440 __entry->rw ? "write" : "read", 1441 __entry->vec) 1442 ); 1443 1444 TRACE_EVENT(kvm_avic_ga_log, 1445 TP_PROTO(u32 vmid, u32 vcpuid), 1446 TP_ARGS(vmid, vcpuid), 1447 1448 TP_STRUCT__entry( 1449 __field(u32, vmid) 1450 __field(u32, vcpuid) 1451 ), 1452 1453 TP_fast_assign( 1454 __entry->vmid = vmid; 1455 __entry->vcpuid = vcpuid; 1456 ), 1457 1458 TP_printk("vmid=%u, vcpuid=%u", 1459 __entry->vmid, __entry->vcpuid) 1460 ); 1461 1462 TRACE_EVENT(kvm_hv_timer_state, 1463 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1464 TP_ARGS(vcpu_id, hv_timer_in_use), 1465 TP_STRUCT__entry( 1466 __field(unsigned int, vcpu_id) 1467 __field(unsigned int, hv_timer_in_use) 1468 ), 1469 TP_fast_assign( 1470 __entry->vcpu_id = vcpu_id; 1471 __entry->hv_timer_in_use = hv_timer_in_use; 1472 ), 1473 TP_printk("vcpu_id %x hv_timer %x", 1474 __entry->vcpu_id, 1475 __entry->hv_timer_in_use) 1476 ); 1477 1478 /* 1479 * Tracepoint for kvm_hv_flush_tlb. 1480 */ 1481 TRACE_EVENT(kvm_hv_flush_tlb, 1482 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1483 TP_ARGS(processor_mask, address_space, flags), 1484 1485 TP_STRUCT__entry( 1486 __field(u64, processor_mask) 1487 __field(u64, address_space) 1488 __field(u64, flags) 1489 ), 1490 1491 TP_fast_assign( 1492 __entry->processor_mask = processor_mask; 1493 __entry->address_space = address_space; 1494 __entry->flags = flags; 1495 ), 1496 1497 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1498 __entry->processor_mask, __entry->address_space, 1499 __entry->flags) 1500 ); 1501 1502 /* 1503 * Tracepoint for kvm_hv_flush_tlb_ex. 1504 */ 1505 TRACE_EVENT(kvm_hv_flush_tlb_ex, 1506 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1507 TP_ARGS(valid_bank_mask, format, address_space, flags), 1508 1509 TP_STRUCT__entry( 1510 __field(u64, valid_bank_mask) 1511 __field(u64, format) 1512 __field(u64, address_space) 1513 __field(u64, flags) 1514 ), 1515 1516 TP_fast_assign( 1517 __entry->valid_bank_mask = valid_bank_mask; 1518 __entry->format = format; 1519 __entry->address_space = address_space; 1520 __entry->flags = flags; 1521 ), 1522 1523 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1524 "address_space 0x%llx flags 0x%llx", 1525 __entry->valid_bank_mask, __entry->format, 1526 __entry->address_space, __entry->flags) 1527 ); 1528 1529 /* 1530 * Tracepoints for kvm_hv_send_ipi. 1531 */ 1532 TRACE_EVENT(kvm_hv_send_ipi, 1533 TP_PROTO(u32 vector, u64 processor_mask), 1534 TP_ARGS(vector, processor_mask), 1535 1536 TP_STRUCT__entry( 1537 __field(u32, vector) 1538 __field(u64, processor_mask) 1539 ), 1540 1541 TP_fast_assign( 1542 __entry->vector = vector; 1543 __entry->processor_mask = processor_mask; 1544 ), 1545 1546 TP_printk("vector %x processor_mask 0x%llx", 1547 __entry->vector, __entry->processor_mask) 1548 ); 1549 1550 TRACE_EVENT(kvm_hv_send_ipi_ex, 1551 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1552 TP_ARGS(vector, format, valid_bank_mask), 1553 1554 TP_STRUCT__entry( 1555 __field(u32, vector) 1556 __field(u64, format) 1557 __field(u64, valid_bank_mask) 1558 ), 1559 1560 TP_fast_assign( 1561 __entry->vector = vector; 1562 __entry->format = format; 1563 __entry->valid_bank_mask = valid_bank_mask; 1564 ), 1565 1566 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1567 __entry->vector, __entry->format, 1568 __entry->valid_bank_mask) 1569 ); 1570 1571 TRACE_EVENT(kvm_pv_tlb_flush, 1572 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1573 TP_ARGS(vcpu_id, need_flush_tlb), 1574 1575 TP_STRUCT__entry( 1576 __field( unsigned int, vcpu_id ) 1577 __field( bool, need_flush_tlb ) 1578 ), 1579 1580 TP_fast_assign( 1581 __entry->vcpu_id = vcpu_id; 1582 __entry->need_flush_tlb = need_flush_tlb; 1583 ), 1584 1585 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1586 __entry->need_flush_tlb ? "true" : "false") 1587 ); 1588 1589 /* 1590 * Tracepoint for failed nested VMX VM-Enter. 1591 */ 1592 TRACE_EVENT(kvm_nested_vmenter_failed, 1593 TP_PROTO(const char *msg, u32 err), 1594 TP_ARGS(msg, err), 1595 1596 TP_STRUCT__entry( 1597 __string(msg, msg) 1598 __field(u32, err) 1599 ), 1600 1601 TP_fast_assign( 1602 __assign_str(msg, msg); 1603 __entry->err = err; 1604 ), 1605 1606 TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : 1607 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1608 ); 1609 1610 /* 1611 * Tracepoint for syndbg_set_msr. 1612 */ 1613 TRACE_EVENT(kvm_hv_syndbg_set_msr, 1614 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1615 TP_ARGS(vcpu_id, vp_index, msr, data), 1616 1617 TP_STRUCT__entry( 1618 __field(int, vcpu_id) 1619 __field(u32, vp_index) 1620 __field(u32, msr) 1621 __field(u64, data) 1622 ), 1623 1624 TP_fast_assign( 1625 __entry->vcpu_id = vcpu_id; 1626 __entry->vp_index = vp_index; 1627 __entry->msr = msr; 1628 __entry->data = data; 1629 ), 1630 1631 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1632 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1633 __entry->data) 1634 ); 1635 1636 /* 1637 * Tracepoint for syndbg_get_msr. 1638 */ 1639 TRACE_EVENT(kvm_hv_syndbg_get_msr, 1640 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1641 TP_ARGS(vcpu_id, vp_index, msr, data), 1642 1643 TP_STRUCT__entry( 1644 __field(int, vcpu_id) 1645 __field(u32, vp_index) 1646 __field(u32, msr) 1647 __field(u64, data) 1648 ), 1649 1650 TP_fast_assign( 1651 __entry->vcpu_id = vcpu_id; 1652 __entry->vp_index = vp_index; 1653 __entry->msr = msr; 1654 __entry->data = data; 1655 ), 1656 1657 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1658 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1659 __entry->data) 1660 ); 1661 1662 /* 1663 * Tracepoint for the start of VMGEXIT processing 1664 */ 1665 TRACE_EVENT(kvm_vmgexit_enter, 1666 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1667 TP_ARGS(vcpu_id, ghcb), 1668 1669 TP_STRUCT__entry( 1670 __field(unsigned int, vcpu_id) 1671 __field(u64, exit_reason) 1672 __field(u64, info1) 1673 __field(u64, info2) 1674 ), 1675 1676 TP_fast_assign( 1677 __entry->vcpu_id = vcpu_id; 1678 __entry->exit_reason = ghcb->save.sw_exit_code; 1679 __entry->info1 = ghcb->save.sw_exit_info_1; 1680 __entry->info2 = ghcb->save.sw_exit_info_2; 1681 ), 1682 1683 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1684 __entry->vcpu_id, __entry->exit_reason, 1685 __entry->info1, __entry->info2) 1686 ); 1687 1688 /* 1689 * Tracepoint for the end of VMGEXIT processing 1690 */ 1691 TRACE_EVENT(kvm_vmgexit_exit, 1692 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1693 TP_ARGS(vcpu_id, ghcb), 1694 1695 TP_STRUCT__entry( 1696 __field(unsigned int, vcpu_id) 1697 __field(u64, exit_reason) 1698 __field(u64, info1) 1699 __field(u64, info2) 1700 ), 1701 1702 TP_fast_assign( 1703 __entry->vcpu_id = vcpu_id; 1704 __entry->exit_reason = ghcb->save.sw_exit_code; 1705 __entry->info1 = ghcb->save.sw_exit_info_1; 1706 __entry->info2 = ghcb->save.sw_exit_info_2; 1707 ), 1708 1709 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1710 __entry->vcpu_id, __entry->exit_reason, 1711 __entry->info1, __entry->info2) 1712 ); 1713 1714 /* 1715 * Tracepoint for the start of VMGEXIT MSR procotol processing 1716 */ 1717 TRACE_EVENT(kvm_vmgexit_msr_protocol_enter, 1718 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa), 1719 TP_ARGS(vcpu_id, ghcb_gpa), 1720 1721 TP_STRUCT__entry( 1722 __field(unsigned int, vcpu_id) 1723 __field(u64, ghcb_gpa) 1724 ), 1725 1726 TP_fast_assign( 1727 __entry->vcpu_id = vcpu_id; 1728 __entry->ghcb_gpa = ghcb_gpa; 1729 ), 1730 1731 TP_printk("vcpu %u, ghcb_gpa %016llx", 1732 __entry->vcpu_id, __entry->ghcb_gpa) 1733 ); 1734 1735 /* 1736 * Tracepoint for the end of VMGEXIT MSR procotol processing 1737 */ 1738 TRACE_EVENT(kvm_vmgexit_msr_protocol_exit, 1739 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result), 1740 TP_ARGS(vcpu_id, ghcb_gpa, result), 1741 1742 TP_STRUCT__entry( 1743 __field(unsigned int, vcpu_id) 1744 __field(u64, ghcb_gpa) 1745 __field(int, result) 1746 ), 1747 1748 TP_fast_assign( 1749 __entry->vcpu_id = vcpu_id; 1750 __entry->ghcb_gpa = ghcb_gpa; 1751 __entry->result = result; 1752 ), 1753 1754 TP_printk("vcpu %u, ghcb_gpa %016llx, result %d", 1755 __entry->vcpu_id, __entry->ghcb_gpa, __entry->result) 1756 ); 1757 1758 #endif /* _TRACE_KVM_H */ 1759 1760 #undef TRACE_INCLUDE_PATH 1761 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1762 #undef TRACE_INCLUDE_FILE 1763 #define TRACE_INCLUDE_FILE trace 1764 1765 /* This part must be outside protection */ 1766 #include <trace/define_trace.h> 1767