1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_KVM_H 4 5 #include <linux/tracepoint.h> 6 #include <asm/vmx.h> 7 #include <asm/svm.h> 8 #include <asm/clocksource.h> 9 #include <asm/pvclock-abi.h> 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM kvm 13 14 /* 15 * Tracepoint for guest mode entry. 16 */ 17 TRACE_EVENT(kvm_entry, 18 TP_PROTO(struct kvm_vcpu *vcpu), 19 TP_ARGS(vcpu), 20 21 TP_STRUCT__entry( 22 __field( unsigned int, vcpu_id ) 23 __field( unsigned long, rip ) 24 ), 25 26 TP_fast_assign( 27 __entry->vcpu_id = vcpu->vcpu_id; 28 __entry->rip = kvm_rip_read(vcpu); 29 ), 30 31 TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) 32 ); 33 34 /* 35 * Tracepoint for hypercall. 36 */ 37 TRACE_EVENT(kvm_hypercall, 38 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 39 unsigned long a2, unsigned long a3), 40 TP_ARGS(nr, a0, a1, a2, a3), 41 42 TP_STRUCT__entry( 43 __field( unsigned long, nr ) 44 __field( unsigned long, a0 ) 45 __field( unsigned long, a1 ) 46 __field( unsigned long, a2 ) 47 __field( unsigned long, a3 ) 48 ), 49 50 TP_fast_assign( 51 __entry->nr = nr; 52 __entry->a0 = a0; 53 __entry->a1 = a1; 54 __entry->a2 = a2; 55 __entry->a3 = a3; 56 ), 57 58 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 59 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 60 __entry->a3) 61 ); 62 63 /* 64 * Tracepoint for hypercall. 65 */ 66 TRACE_EVENT(kvm_hv_hypercall, 67 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, 68 __u64 ingpa, __u64 outgpa), 69 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), 70 71 TP_STRUCT__entry( 72 __field( __u16, rep_cnt ) 73 __field( __u16, rep_idx ) 74 __field( __u64, ingpa ) 75 __field( __u64, outgpa ) 76 __field( __u16, code ) 77 __field( bool, fast ) 78 ), 79 80 TP_fast_assign( 81 __entry->rep_cnt = rep_cnt; 82 __entry->rep_idx = rep_idx; 83 __entry->ingpa = ingpa; 84 __entry->outgpa = outgpa; 85 __entry->code = code; 86 __entry->fast = fast; 87 ), 88 89 TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 90 __entry->code, __entry->fast ? "fast" : "slow", 91 __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, 92 __entry->outgpa) 93 ); 94 95 /* 96 * Tracepoint for Xen hypercall. 97 */ 98 TRACE_EVENT(kvm_xen_hypercall, 99 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 100 unsigned long a2, unsigned long a3, unsigned long a4, 101 unsigned long a5), 102 TP_ARGS(nr, a0, a1, a2, a3, a4, a5), 103 104 TP_STRUCT__entry( 105 __field(unsigned long, nr) 106 __field(unsigned long, a0) 107 __field(unsigned long, a1) 108 __field(unsigned long, a2) 109 __field(unsigned long, a3) 110 __field(unsigned long, a4) 111 __field(unsigned long, a5) 112 ), 113 114 TP_fast_assign( 115 __entry->nr = nr; 116 __entry->a0 = a0; 117 __entry->a1 = a1; 118 __entry->a2 = a2; 119 __entry->a3 = a3; 120 __entry->a4 = a4; 121 __entry->a4 = a5; 122 ), 123 124 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", 125 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 126 __entry->a3, __entry->a4, __entry->a5) 127 ); 128 129 130 131 /* 132 * Tracepoint for PIO. 133 */ 134 135 #define KVM_PIO_IN 0 136 #define KVM_PIO_OUT 1 137 138 TRACE_EVENT(kvm_pio, 139 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 140 unsigned int count, void *data), 141 TP_ARGS(rw, port, size, count, data), 142 143 TP_STRUCT__entry( 144 __field( unsigned int, rw ) 145 __field( unsigned int, port ) 146 __field( unsigned int, size ) 147 __field( unsigned int, count ) 148 __field( unsigned int, val ) 149 ), 150 151 TP_fast_assign( 152 __entry->rw = rw; 153 __entry->port = port; 154 __entry->size = size; 155 __entry->count = count; 156 if (size == 1) 157 __entry->val = *(unsigned char *)data; 158 else if (size == 2) 159 __entry->val = *(unsigned short *)data; 160 else 161 __entry->val = *(unsigned int *)data; 162 ), 163 164 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 165 __entry->rw ? "write" : "read", 166 __entry->port, __entry->size, __entry->count, __entry->val, 167 __entry->count > 1 ? "(...)" : "") 168 ); 169 170 /* 171 * Tracepoint for fast mmio. 172 */ 173 TRACE_EVENT(kvm_fast_mmio, 174 TP_PROTO(u64 gpa), 175 TP_ARGS(gpa), 176 177 TP_STRUCT__entry( 178 __field(u64, gpa) 179 ), 180 181 TP_fast_assign( 182 __entry->gpa = gpa; 183 ), 184 185 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 186 ); 187 188 /* 189 * Tracepoint for cpuid. 190 */ 191 TRACE_EVENT(kvm_cpuid, 192 TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 193 unsigned long rbx, unsigned long rcx, unsigned long rdx, 194 bool found, bool used_max_basic), 195 TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 196 197 TP_STRUCT__entry( 198 __field( unsigned int, function ) 199 __field( unsigned int, index ) 200 __field( unsigned long, rax ) 201 __field( unsigned long, rbx ) 202 __field( unsigned long, rcx ) 203 __field( unsigned long, rdx ) 204 __field( bool, found ) 205 __field( bool, used_max_basic ) 206 ), 207 208 TP_fast_assign( 209 __entry->function = function; 210 __entry->index = index; 211 __entry->rax = rax; 212 __entry->rbx = rbx; 213 __entry->rcx = rcx; 214 __entry->rdx = rdx; 215 __entry->found = found; 216 __entry->used_max_basic = used_max_basic; 217 ), 218 219 TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 220 __entry->function, __entry->index, __entry->rax, 221 __entry->rbx, __entry->rcx, __entry->rdx, 222 __entry->found ? "found" : "not found", 223 __entry->used_max_basic ? ", used max basic" : "") 224 ); 225 226 #define AREG(x) { APIC_##x, "APIC_" #x } 227 228 #define kvm_trace_symbol_apic \ 229 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 230 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 231 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 232 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 233 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 234 AREG(ECTRL) 235 /* 236 * Tracepoint for apic access. 237 */ 238 TRACE_EVENT(kvm_apic, 239 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), 240 TP_ARGS(rw, reg, val), 241 242 TP_STRUCT__entry( 243 __field( unsigned int, rw ) 244 __field( unsigned int, reg ) 245 __field( unsigned int, val ) 246 ), 247 248 TP_fast_assign( 249 __entry->rw = rw; 250 __entry->reg = reg; 251 __entry->val = val; 252 ), 253 254 TP_printk("apic_%s %s = 0x%x", 255 __entry->rw ? "write" : "read", 256 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 257 __entry->val) 258 ); 259 260 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 261 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 262 263 #define KVM_ISA_VMX 1 264 #define KVM_ISA_SVM 2 265 266 #define kvm_print_exit_reason(exit_reason, isa) \ 267 (isa == KVM_ISA_VMX) ? \ 268 __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ 269 __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ 270 (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ 271 (isa == KVM_ISA_VMX) ? \ 272 __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" 273 274 #define TRACE_EVENT_KVM_EXIT(name) \ 275 TRACE_EVENT(name, \ 276 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), \ 277 TP_ARGS(exit_reason, vcpu, isa), \ 278 \ 279 TP_STRUCT__entry( \ 280 __field( unsigned int, exit_reason ) \ 281 __field( unsigned long, guest_rip ) \ 282 __field( u32, isa ) \ 283 __field( u64, info1 ) \ 284 __field( u64, info2 ) \ 285 __field( u32, intr_info ) \ 286 __field( u32, error_code ) \ 287 __field( unsigned int, vcpu_id ) \ 288 ), \ 289 \ 290 TP_fast_assign( \ 291 __entry->exit_reason = exit_reason; \ 292 __entry->guest_rip = kvm_rip_read(vcpu); \ 293 __entry->isa = isa; \ 294 __entry->vcpu_id = vcpu->vcpu_id; \ 295 static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \ 296 &__entry->info2, \ 297 &__entry->intr_info, \ 298 &__entry->error_code); \ 299 ), \ 300 \ 301 TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ 302 "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \ 303 __entry->vcpu_id, \ 304 kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ 305 __entry->guest_rip, __entry->info1, __entry->info2, \ 306 __entry->intr_info, __entry->error_code) \ 307 ) 308 309 /* 310 * Tracepoint for kvm guest exit: 311 */ 312 TRACE_EVENT_KVM_EXIT(kvm_exit); 313 314 /* 315 * Tracepoint for kvm interrupt injection: 316 */ 317 TRACE_EVENT(kvm_inj_virq, 318 TP_PROTO(unsigned int irq), 319 TP_ARGS(irq), 320 321 TP_STRUCT__entry( 322 __field( unsigned int, irq ) 323 ), 324 325 TP_fast_assign( 326 __entry->irq = irq; 327 ), 328 329 TP_printk("irq %u", __entry->irq) 330 ); 331 332 #define EXS(x) { x##_VECTOR, "#" #x } 333 334 #define kvm_trace_sym_exc \ 335 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 336 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 337 EXS(MF), EXS(AC), EXS(MC) 338 339 /* 340 * Tracepoint for kvm interrupt injection: 341 */ 342 TRACE_EVENT(kvm_inj_exception, 343 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 344 TP_ARGS(exception, has_error, error_code), 345 346 TP_STRUCT__entry( 347 __field( u8, exception ) 348 __field( u8, has_error ) 349 __field( u32, error_code ) 350 ), 351 352 TP_fast_assign( 353 __entry->exception = exception; 354 __entry->has_error = has_error; 355 __entry->error_code = error_code; 356 ), 357 358 TP_printk("%s (0x%x)", 359 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 360 /* FIXME: don't print error_code if not present */ 361 __entry->has_error ? __entry->error_code : 0) 362 ); 363 364 /* 365 * Tracepoint for page fault. 366 */ 367 TRACE_EVENT(kvm_page_fault, 368 TP_PROTO(unsigned long fault_address, unsigned int error_code), 369 TP_ARGS(fault_address, error_code), 370 371 TP_STRUCT__entry( 372 __field( unsigned long, fault_address ) 373 __field( unsigned int, error_code ) 374 ), 375 376 TP_fast_assign( 377 __entry->fault_address = fault_address; 378 __entry->error_code = error_code; 379 ), 380 381 TP_printk("address %lx error_code %x", 382 __entry->fault_address, __entry->error_code) 383 ); 384 385 /* 386 * Tracepoint for guest MSR access. 387 */ 388 TRACE_EVENT(kvm_msr, 389 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 390 TP_ARGS(write, ecx, data, exception), 391 392 TP_STRUCT__entry( 393 __field( unsigned, write ) 394 __field( u32, ecx ) 395 __field( u64, data ) 396 __field( u8, exception ) 397 ), 398 399 TP_fast_assign( 400 __entry->write = write; 401 __entry->ecx = ecx; 402 __entry->data = data; 403 __entry->exception = exception; 404 ), 405 406 TP_printk("msr_%s %x = 0x%llx%s", 407 __entry->write ? "write" : "read", 408 __entry->ecx, __entry->data, 409 __entry->exception ? " (#GP)" : "") 410 ); 411 412 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 413 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 414 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 415 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 416 417 /* 418 * Tracepoint for guest CR access. 419 */ 420 TRACE_EVENT(kvm_cr, 421 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 422 TP_ARGS(rw, cr, val), 423 424 TP_STRUCT__entry( 425 __field( unsigned int, rw ) 426 __field( unsigned int, cr ) 427 __field( unsigned long, val ) 428 ), 429 430 TP_fast_assign( 431 __entry->rw = rw; 432 __entry->cr = cr; 433 __entry->val = val; 434 ), 435 436 TP_printk("cr_%s %x = 0x%lx", 437 __entry->rw ? "write" : "read", 438 __entry->cr, __entry->val) 439 ); 440 441 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 442 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 443 444 TRACE_EVENT(kvm_pic_set_irq, 445 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 446 TP_ARGS(chip, pin, elcr, imr, coalesced), 447 448 TP_STRUCT__entry( 449 __field( __u8, chip ) 450 __field( __u8, pin ) 451 __field( __u8, elcr ) 452 __field( __u8, imr ) 453 __field( bool, coalesced ) 454 ), 455 456 TP_fast_assign( 457 __entry->chip = chip; 458 __entry->pin = pin; 459 __entry->elcr = elcr; 460 __entry->imr = imr; 461 __entry->coalesced = coalesced; 462 ), 463 464 TP_printk("chip %u pin %u (%s%s)%s", 465 __entry->chip, __entry->pin, 466 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 467 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 468 __entry->coalesced ? " (coalesced)" : "") 469 ); 470 471 #define kvm_apic_dst_shorthand \ 472 {0x0, "dst"}, \ 473 {0x1, "self"}, \ 474 {0x2, "all"}, \ 475 {0x3, "all-but-self"} 476 477 TRACE_EVENT(kvm_apic_ipi, 478 TP_PROTO(__u32 icr_low, __u32 dest_id), 479 TP_ARGS(icr_low, dest_id), 480 481 TP_STRUCT__entry( 482 __field( __u32, icr_low ) 483 __field( __u32, dest_id ) 484 ), 485 486 TP_fast_assign( 487 __entry->icr_low = icr_low; 488 __entry->dest_id = dest_id; 489 ), 490 491 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 492 __entry->dest_id, (u8)__entry->icr_low, 493 __print_symbolic((__entry->icr_low >> 8 & 0x7), 494 kvm_deliver_mode), 495 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 496 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 497 (__entry->icr_low & (1<<15)) ? "level" : "edge", 498 __print_symbolic((__entry->icr_low >> 18 & 0x3), 499 kvm_apic_dst_shorthand)) 500 ); 501 502 TRACE_EVENT(kvm_apic_accept_irq, 503 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 504 TP_ARGS(apicid, dm, tm, vec), 505 506 TP_STRUCT__entry( 507 __field( __u32, apicid ) 508 __field( __u16, dm ) 509 __field( __u16, tm ) 510 __field( __u8, vec ) 511 ), 512 513 TP_fast_assign( 514 __entry->apicid = apicid; 515 __entry->dm = dm; 516 __entry->tm = tm; 517 __entry->vec = vec; 518 ), 519 520 TP_printk("apicid %x vec %u (%s|%s)", 521 __entry->apicid, __entry->vec, 522 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 523 __entry->tm ? "level" : "edge") 524 ); 525 526 TRACE_EVENT(kvm_eoi, 527 TP_PROTO(struct kvm_lapic *apic, int vector), 528 TP_ARGS(apic, vector), 529 530 TP_STRUCT__entry( 531 __field( __u32, apicid ) 532 __field( int, vector ) 533 ), 534 535 TP_fast_assign( 536 __entry->apicid = apic->vcpu->vcpu_id; 537 __entry->vector = vector; 538 ), 539 540 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 541 ); 542 543 TRACE_EVENT(kvm_pv_eoi, 544 TP_PROTO(struct kvm_lapic *apic, int vector), 545 TP_ARGS(apic, vector), 546 547 TP_STRUCT__entry( 548 __field( __u32, apicid ) 549 __field( int, vector ) 550 ), 551 552 TP_fast_assign( 553 __entry->apicid = apic->vcpu->vcpu_id; 554 __entry->vector = vector; 555 ), 556 557 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 558 ); 559 560 /* 561 * Tracepoint for nested VMRUN 562 */ 563 TRACE_EVENT(kvm_nested_vmrun, 564 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 565 __u32 event_inj, bool npt), 566 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 567 568 TP_STRUCT__entry( 569 __field( __u64, rip ) 570 __field( __u64, vmcb ) 571 __field( __u64, nested_rip ) 572 __field( __u32, int_ctl ) 573 __field( __u32, event_inj ) 574 __field( bool, npt ) 575 ), 576 577 TP_fast_assign( 578 __entry->rip = rip; 579 __entry->vmcb = vmcb; 580 __entry->nested_rip = nested_rip; 581 __entry->int_ctl = int_ctl; 582 __entry->event_inj = event_inj; 583 __entry->npt = npt; 584 ), 585 586 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 587 "event_inj: 0x%08x npt: %s", 588 __entry->rip, __entry->vmcb, __entry->nested_rip, 589 __entry->int_ctl, __entry->event_inj, 590 __entry->npt ? "on" : "off") 591 ); 592 593 TRACE_EVENT(kvm_nested_intercepts, 594 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, 595 __u32 intercept1, __u32 intercept2, __u32 intercept3), 596 TP_ARGS(cr_read, cr_write, exceptions, intercept1, 597 intercept2, intercept3), 598 599 TP_STRUCT__entry( 600 __field( __u16, cr_read ) 601 __field( __u16, cr_write ) 602 __field( __u32, exceptions ) 603 __field( __u32, intercept1 ) 604 __field( __u32, intercept2 ) 605 __field( __u32, intercept3 ) 606 ), 607 608 TP_fast_assign( 609 __entry->cr_read = cr_read; 610 __entry->cr_write = cr_write; 611 __entry->exceptions = exceptions; 612 __entry->intercept1 = intercept1; 613 __entry->intercept2 = intercept2; 614 __entry->intercept3 = intercept3; 615 ), 616 617 TP_printk("cr_read: %04x cr_write: %04x excp: %08x " 618 "intercepts: %08x %08x %08x", 619 __entry->cr_read, __entry->cr_write, __entry->exceptions, 620 __entry->intercept1, __entry->intercept2, __entry->intercept3) 621 ); 622 /* 623 * Tracepoint for #VMEXIT while nested 624 */ 625 TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); 626 627 /* 628 * Tracepoint for #VMEXIT reinjected to the guest 629 */ 630 TRACE_EVENT(kvm_nested_vmexit_inject, 631 TP_PROTO(__u32 exit_code, 632 __u64 exit_info1, __u64 exit_info2, 633 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 634 TP_ARGS(exit_code, exit_info1, exit_info2, 635 exit_int_info, exit_int_info_err, isa), 636 637 TP_STRUCT__entry( 638 __field( __u32, exit_code ) 639 __field( __u64, exit_info1 ) 640 __field( __u64, exit_info2 ) 641 __field( __u32, exit_int_info ) 642 __field( __u32, exit_int_info_err ) 643 __field( __u32, isa ) 644 ), 645 646 TP_fast_assign( 647 __entry->exit_code = exit_code; 648 __entry->exit_info1 = exit_info1; 649 __entry->exit_info2 = exit_info2; 650 __entry->exit_int_info = exit_int_info; 651 __entry->exit_int_info_err = exit_int_info_err; 652 __entry->isa = isa; 653 ), 654 655 TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " 656 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 657 kvm_print_exit_reason(__entry->exit_code, __entry->isa), 658 __entry->exit_info1, __entry->exit_info2, 659 __entry->exit_int_info, __entry->exit_int_info_err) 660 ); 661 662 /* 663 * Tracepoint for nested #vmexit because of interrupt pending 664 */ 665 TRACE_EVENT(kvm_nested_intr_vmexit, 666 TP_PROTO(__u64 rip), 667 TP_ARGS(rip), 668 669 TP_STRUCT__entry( 670 __field( __u64, rip ) 671 ), 672 673 TP_fast_assign( 674 __entry->rip = rip 675 ), 676 677 TP_printk("rip: 0x%016llx", __entry->rip) 678 ); 679 680 /* 681 * Tracepoint for nested #vmexit because of interrupt pending 682 */ 683 TRACE_EVENT(kvm_invlpga, 684 TP_PROTO(__u64 rip, int asid, u64 address), 685 TP_ARGS(rip, asid, address), 686 687 TP_STRUCT__entry( 688 __field( __u64, rip ) 689 __field( int, asid ) 690 __field( __u64, address ) 691 ), 692 693 TP_fast_assign( 694 __entry->rip = rip; 695 __entry->asid = asid; 696 __entry->address = address; 697 ), 698 699 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 700 __entry->rip, __entry->asid, __entry->address) 701 ); 702 703 /* 704 * Tracepoint for nested #vmexit because of interrupt pending 705 */ 706 TRACE_EVENT(kvm_skinit, 707 TP_PROTO(__u64 rip, __u32 slb), 708 TP_ARGS(rip, slb), 709 710 TP_STRUCT__entry( 711 __field( __u64, rip ) 712 __field( __u32, slb ) 713 ), 714 715 TP_fast_assign( 716 __entry->rip = rip; 717 __entry->slb = slb; 718 ), 719 720 TP_printk("rip: 0x%016llx slb: 0x%08x", 721 __entry->rip, __entry->slb) 722 ); 723 724 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 725 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 726 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 727 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 728 729 #define kvm_trace_symbol_emul_flags \ 730 { 0, "real" }, \ 731 { KVM_EMUL_INSN_F_CR0_PE \ 732 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 733 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 734 { KVM_EMUL_INSN_F_CR0_PE \ 735 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 736 { KVM_EMUL_INSN_F_CR0_PE \ 737 | KVM_EMUL_INSN_F_CS_L, "prot64" } 738 739 #define kei_decode_mode(mode) ({ \ 740 u8 flags = 0xff; \ 741 switch (mode) { \ 742 case X86EMUL_MODE_REAL: \ 743 flags = 0; \ 744 break; \ 745 case X86EMUL_MODE_VM86: \ 746 flags = KVM_EMUL_INSN_F_EFL_VM; \ 747 break; \ 748 case X86EMUL_MODE_PROT16: \ 749 flags = KVM_EMUL_INSN_F_CR0_PE; \ 750 break; \ 751 case X86EMUL_MODE_PROT32: \ 752 flags = KVM_EMUL_INSN_F_CR0_PE \ 753 | KVM_EMUL_INSN_F_CS_D; \ 754 break; \ 755 case X86EMUL_MODE_PROT64: \ 756 flags = KVM_EMUL_INSN_F_CR0_PE \ 757 | KVM_EMUL_INSN_F_CS_L; \ 758 break; \ 759 } \ 760 flags; \ 761 }) 762 763 TRACE_EVENT(kvm_emulate_insn, 764 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 765 TP_ARGS(vcpu, failed), 766 767 TP_STRUCT__entry( 768 __field( __u64, rip ) 769 __field( __u32, csbase ) 770 __field( __u8, len ) 771 __array( __u8, insn, 15 ) 772 __field( __u8, flags ) 773 __field( __u8, failed ) 774 ), 775 776 TP_fast_assign( 777 __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); 778 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 779 - vcpu->arch.emulate_ctxt->fetch.data; 780 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 781 memcpy(__entry->insn, 782 vcpu->arch.emulate_ctxt->fetch.data, 783 15); 784 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 785 __entry->failed = failed; 786 ), 787 788 TP_printk("%x:%llx:%s (%s)%s", 789 __entry->csbase, __entry->rip, 790 __print_hex(__entry->insn, __entry->len), 791 __print_symbolic(__entry->flags, 792 kvm_trace_symbol_emul_flags), 793 __entry->failed ? " failed" : "" 794 ) 795 ); 796 797 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 798 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 799 800 TRACE_EVENT( 801 vcpu_match_mmio, 802 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 803 TP_ARGS(gva, gpa, write, gpa_match), 804 805 TP_STRUCT__entry( 806 __field(gva_t, gva) 807 __field(gpa_t, gpa) 808 __field(bool, write) 809 __field(bool, gpa_match) 810 ), 811 812 TP_fast_assign( 813 __entry->gva = gva; 814 __entry->gpa = gpa; 815 __entry->write = write; 816 __entry->gpa_match = gpa_match 817 ), 818 819 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 820 __entry->write ? "Write" : "Read", 821 __entry->gpa_match ? "GPA" : "GVA") 822 ); 823 824 TRACE_EVENT(kvm_write_tsc_offset, 825 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 826 __u64 next_tsc_offset), 827 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 828 829 TP_STRUCT__entry( 830 __field( unsigned int, vcpu_id ) 831 __field( __u64, previous_tsc_offset ) 832 __field( __u64, next_tsc_offset ) 833 ), 834 835 TP_fast_assign( 836 __entry->vcpu_id = vcpu_id; 837 __entry->previous_tsc_offset = previous_tsc_offset; 838 __entry->next_tsc_offset = next_tsc_offset; 839 ), 840 841 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 842 __entry->previous_tsc_offset, __entry->next_tsc_offset) 843 ); 844 845 #ifdef CONFIG_X86_64 846 847 #define host_clocks \ 848 {VDSO_CLOCKMODE_NONE, "none"}, \ 849 {VDSO_CLOCKMODE_TSC, "tsc"} \ 850 851 TRACE_EVENT(kvm_update_master_clock, 852 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 853 TP_ARGS(use_master_clock, host_clock, offset_matched), 854 855 TP_STRUCT__entry( 856 __field( bool, use_master_clock ) 857 __field( unsigned int, host_clock ) 858 __field( bool, offset_matched ) 859 ), 860 861 TP_fast_assign( 862 __entry->use_master_clock = use_master_clock; 863 __entry->host_clock = host_clock; 864 __entry->offset_matched = offset_matched; 865 ), 866 867 TP_printk("masterclock %d hostclock %s offsetmatched %u", 868 __entry->use_master_clock, 869 __print_symbolic(__entry->host_clock, host_clocks), 870 __entry->offset_matched) 871 ); 872 873 TRACE_EVENT(kvm_track_tsc, 874 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 875 unsigned int online_vcpus, bool use_master_clock, 876 unsigned int host_clock), 877 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 878 host_clock), 879 880 TP_STRUCT__entry( 881 __field( unsigned int, vcpu_id ) 882 __field( unsigned int, nr_vcpus_matched_tsc ) 883 __field( unsigned int, online_vcpus ) 884 __field( bool, use_master_clock ) 885 __field( unsigned int, host_clock ) 886 ), 887 888 TP_fast_assign( 889 __entry->vcpu_id = vcpu_id; 890 __entry->nr_vcpus_matched_tsc = nr_matched; 891 __entry->online_vcpus = online_vcpus; 892 __entry->use_master_clock = use_master_clock; 893 __entry->host_clock = host_clock; 894 ), 895 896 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 897 " hostclock %s", 898 __entry->vcpu_id, __entry->use_master_clock, 899 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 900 __print_symbolic(__entry->host_clock, host_clocks)) 901 ); 902 903 #endif /* CONFIG_X86_64 */ 904 905 /* 906 * Tracepoint for PML full VMEXIT. 907 */ 908 TRACE_EVENT(kvm_pml_full, 909 TP_PROTO(unsigned int vcpu_id), 910 TP_ARGS(vcpu_id), 911 912 TP_STRUCT__entry( 913 __field( unsigned int, vcpu_id ) 914 ), 915 916 TP_fast_assign( 917 __entry->vcpu_id = vcpu_id; 918 ), 919 920 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 921 ); 922 923 TRACE_EVENT(kvm_ple_window_update, 924 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 925 TP_ARGS(vcpu_id, new, old), 926 927 TP_STRUCT__entry( 928 __field( unsigned int, vcpu_id ) 929 __field( unsigned int, new ) 930 __field( unsigned int, old ) 931 ), 932 933 TP_fast_assign( 934 __entry->vcpu_id = vcpu_id; 935 __entry->new = new; 936 __entry->old = old; 937 ), 938 939 TP_printk("vcpu %u old %u new %u (%s)", 940 __entry->vcpu_id, __entry->old, __entry->new, 941 __entry->old < __entry->new ? "growed" : "shrinked") 942 ); 943 944 TRACE_EVENT(kvm_pvclock_update, 945 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 946 TP_ARGS(vcpu_id, pvclock), 947 948 TP_STRUCT__entry( 949 __field( unsigned int, vcpu_id ) 950 __field( __u32, version ) 951 __field( __u64, tsc_timestamp ) 952 __field( __u64, system_time ) 953 __field( __u32, tsc_to_system_mul ) 954 __field( __s8, tsc_shift ) 955 __field( __u8, flags ) 956 ), 957 958 TP_fast_assign( 959 __entry->vcpu_id = vcpu_id; 960 __entry->version = pvclock->version; 961 __entry->tsc_timestamp = pvclock->tsc_timestamp; 962 __entry->system_time = pvclock->system_time; 963 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 964 __entry->tsc_shift = pvclock->tsc_shift; 965 __entry->flags = pvclock->flags; 966 ), 967 968 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 969 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 970 "flags 0x%x }", 971 __entry->vcpu_id, 972 __entry->version, 973 __entry->tsc_timestamp, 974 __entry->system_time, 975 __entry->tsc_to_system_mul, 976 __entry->tsc_shift, 977 __entry->flags) 978 ); 979 980 TRACE_EVENT(kvm_wait_lapic_expire, 981 TP_PROTO(unsigned int vcpu_id, s64 delta), 982 TP_ARGS(vcpu_id, delta), 983 984 TP_STRUCT__entry( 985 __field( unsigned int, vcpu_id ) 986 __field( s64, delta ) 987 ), 988 989 TP_fast_assign( 990 __entry->vcpu_id = vcpu_id; 991 __entry->delta = delta; 992 ), 993 994 TP_printk("vcpu %u: delta %lld (%s)", 995 __entry->vcpu_id, 996 __entry->delta, 997 __entry->delta < 0 ? "early" : "late") 998 ); 999 1000 TRACE_EVENT(kvm_smm_transition, 1001 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 1002 TP_ARGS(vcpu_id, smbase, entering), 1003 1004 TP_STRUCT__entry( 1005 __field( unsigned int, vcpu_id ) 1006 __field( u64, smbase ) 1007 __field( bool, entering ) 1008 ), 1009 1010 TP_fast_assign( 1011 __entry->vcpu_id = vcpu_id; 1012 __entry->smbase = smbase; 1013 __entry->entering = entering; 1014 ), 1015 1016 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 1017 __entry->vcpu_id, 1018 __entry->entering ? "entering" : "leaving", 1019 __entry->smbase) 1020 ); 1021 1022 /* 1023 * Tracepoint for VT-d posted-interrupts. 1024 */ 1025 TRACE_EVENT(kvm_pi_irte_update, 1026 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1027 unsigned int gsi, unsigned int gvec, 1028 u64 pi_desc_addr, bool set), 1029 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1030 1031 TP_STRUCT__entry( 1032 __field( unsigned int, host_irq ) 1033 __field( unsigned int, vcpu_id ) 1034 __field( unsigned int, gsi ) 1035 __field( unsigned int, gvec ) 1036 __field( u64, pi_desc_addr ) 1037 __field( bool, set ) 1038 ), 1039 1040 TP_fast_assign( 1041 __entry->host_irq = host_irq; 1042 __entry->vcpu_id = vcpu_id; 1043 __entry->gsi = gsi; 1044 __entry->gvec = gvec; 1045 __entry->pi_desc_addr = pi_desc_addr; 1046 __entry->set = set; 1047 ), 1048 1049 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1050 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1051 __entry->set ? "enabled and being updated" : "disabled", 1052 __entry->host_irq, 1053 __entry->vcpu_id, 1054 __entry->gsi, 1055 __entry->gvec, 1056 __entry->pi_desc_addr) 1057 ); 1058 1059 /* 1060 * Tracepoint for kvm_hv_notify_acked_sint. 1061 */ 1062 TRACE_EVENT(kvm_hv_notify_acked_sint, 1063 TP_PROTO(int vcpu_id, u32 sint), 1064 TP_ARGS(vcpu_id, sint), 1065 1066 TP_STRUCT__entry( 1067 __field(int, vcpu_id) 1068 __field(u32, sint) 1069 ), 1070 1071 TP_fast_assign( 1072 __entry->vcpu_id = vcpu_id; 1073 __entry->sint = sint; 1074 ), 1075 1076 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1077 ); 1078 1079 /* 1080 * Tracepoint for synic_set_irq. 1081 */ 1082 TRACE_EVENT(kvm_hv_synic_set_irq, 1083 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1084 TP_ARGS(vcpu_id, sint, vector, ret), 1085 1086 TP_STRUCT__entry( 1087 __field(int, vcpu_id) 1088 __field(u32, sint) 1089 __field(int, vector) 1090 __field(int, ret) 1091 ), 1092 1093 TP_fast_assign( 1094 __entry->vcpu_id = vcpu_id; 1095 __entry->sint = sint; 1096 __entry->vector = vector; 1097 __entry->ret = ret; 1098 ), 1099 1100 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1101 __entry->vcpu_id, __entry->sint, __entry->vector, 1102 __entry->ret) 1103 ); 1104 1105 /* 1106 * Tracepoint for kvm_hv_synic_send_eoi. 1107 */ 1108 TRACE_EVENT(kvm_hv_synic_send_eoi, 1109 TP_PROTO(int vcpu_id, int vector), 1110 TP_ARGS(vcpu_id, vector), 1111 1112 TP_STRUCT__entry( 1113 __field(int, vcpu_id) 1114 __field(u32, sint) 1115 __field(int, vector) 1116 __field(int, ret) 1117 ), 1118 1119 TP_fast_assign( 1120 __entry->vcpu_id = vcpu_id; 1121 __entry->vector = vector; 1122 ), 1123 1124 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1125 ); 1126 1127 /* 1128 * Tracepoint for synic_set_msr. 1129 */ 1130 TRACE_EVENT(kvm_hv_synic_set_msr, 1131 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1132 TP_ARGS(vcpu_id, msr, data, host), 1133 1134 TP_STRUCT__entry( 1135 __field(int, vcpu_id) 1136 __field(u32, msr) 1137 __field(u64, data) 1138 __field(bool, host) 1139 ), 1140 1141 TP_fast_assign( 1142 __entry->vcpu_id = vcpu_id; 1143 __entry->msr = msr; 1144 __entry->data = data; 1145 __entry->host = host 1146 ), 1147 1148 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1149 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1150 ); 1151 1152 /* 1153 * Tracepoint for stimer_set_config. 1154 */ 1155 TRACE_EVENT(kvm_hv_stimer_set_config, 1156 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1157 TP_ARGS(vcpu_id, timer_index, config, host), 1158 1159 TP_STRUCT__entry( 1160 __field(int, vcpu_id) 1161 __field(int, timer_index) 1162 __field(u64, config) 1163 __field(bool, host) 1164 ), 1165 1166 TP_fast_assign( 1167 __entry->vcpu_id = vcpu_id; 1168 __entry->timer_index = timer_index; 1169 __entry->config = config; 1170 __entry->host = host; 1171 ), 1172 1173 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1174 __entry->vcpu_id, __entry->timer_index, __entry->config, 1175 __entry->host) 1176 ); 1177 1178 /* 1179 * Tracepoint for stimer_set_count. 1180 */ 1181 TRACE_EVENT(kvm_hv_stimer_set_count, 1182 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1183 TP_ARGS(vcpu_id, timer_index, count, host), 1184 1185 TP_STRUCT__entry( 1186 __field(int, vcpu_id) 1187 __field(int, timer_index) 1188 __field(u64, count) 1189 __field(bool, host) 1190 ), 1191 1192 TP_fast_assign( 1193 __entry->vcpu_id = vcpu_id; 1194 __entry->timer_index = timer_index; 1195 __entry->count = count; 1196 __entry->host = host; 1197 ), 1198 1199 TP_printk("vcpu_id %d timer %d count %llu host %d", 1200 __entry->vcpu_id, __entry->timer_index, __entry->count, 1201 __entry->host) 1202 ); 1203 1204 /* 1205 * Tracepoint for stimer_start(periodic timer case). 1206 */ 1207 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1208 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1209 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1210 1211 TP_STRUCT__entry( 1212 __field(int, vcpu_id) 1213 __field(int, timer_index) 1214 __field(u64, time_now) 1215 __field(u64, exp_time) 1216 ), 1217 1218 TP_fast_assign( 1219 __entry->vcpu_id = vcpu_id; 1220 __entry->timer_index = timer_index; 1221 __entry->time_now = time_now; 1222 __entry->exp_time = exp_time; 1223 ), 1224 1225 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1226 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1227 __entry->exp_time) 1228 ); 1229 1230 /* 1231 * Tracepoint for stimer_start(one-shot timer case). 1232 */ 1233 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1234 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1235 TP_ARGS(vcpu_id, timer_index, time_now, count), 1236 1237 TP_STRUCT__entry( 1238 __field(int, vcpu_id) 1239 __field(int, timer_index) 1240 __field(u64, time_now) 1241 __field(u64, count) 1242 ), 1243 1244 TP_fast_assign( 1245 __entry->vcpu_id = vcpu_id; 1246 __entry->timer_index = timer_index; 1247 __entry->time_now = time_now; 1248 __entry->count = count; 1249 ), 1250 1251 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1252 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1253 __entry->count) 1254 ); 1255 1256 /* 1257 * Tracepoint for stimer_timer_callback. 1258 */ 1259 TRACE_EVENT(kvm_hv_stimer_callback, 1260 TP_PROTO(int vcpu_id, int timer_index), 1261 TP_ARGS(vcpu_id, timer_index), 1262 1263 TP_STRUCT__entry( 1264 __field(int, vcpu_id) 1265 __field(int, timer_index) 1266 ), 1267 1268 TP_fast_assign( 1269 __entry->vcpu_id = vcpu_id; 1270 __entry->timer_index = timer_index; 1271 ), 1272 1273 TP_printk("vcpu_id %d timer %d", 1274 __entry->vcpu_id, __entry->timer_index) 1275 ); 1276 1277 /* 1278 * Tracepoint for stimer_expiration. 1279 */ 1280 TRACE_EVENT(kvm_hv_stimer_expiration, 1281 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1282 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1283 1284 TP_STRUCT__entry( 1285 __field(int, vcpu_id) 1286 __field(int, timer_index) 1287 __field(int, direct) 1288 __field(int, msg_send_result) 1289 ), 1290 1291 TP_fast_assign( 1292 __entry->vcpu_id = vcpu_id; 1293 __entry->timer_index = timer_index; 1294 __entry->direct = direct; 1295 __entry->msg_send_result = msg_send_result; 1296 ), 1297 1298 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1299 __entry->vcpu_id, __entry->timer_index, 1300 __entry->direct, __entry->msg_send_result) 1301 ); 1302 1303 /* 1304 * Tracepoint for stimer_cleanup. 1305 */ 1306 TRACE_EVENT(kvm_hv_stimer_cleanup, 1307 TP_PROTO(int vcpu_id, int timer_index), 1308 TP_ARGS(vcpu_id, timer_index), 1309 1310 TP_STRUCT__entry( 1311 __field(int, vcpu_id) 1312 __field(int, timer_index) 1313 ), 1314 1315 TP_fast_assign( 1316 __entry->vcpu_id = vcpu_id; 1317 __entry->timer_index = timer_index; 1318 ), 1319 1320 TP_printk("vcpu_id %d timer %d", 1321 __entry->vcpu_id, __entry->timer_index) 1322 ); 1323 1324 TRACE_EVENT(kvm_apicv_update_request, 1325 TP_PROTO(bool activate, unsigned long bit), 1326 TP_ARGS(activate, bit), 1327 1328 TP_STRUCT__entry( 1329 __field(bool, activate) 1330 __field(unsigned long, bit) 1331 ), 1332 1333 TP_fast_assign( 1334 __entry->activate = activate; 1335 __entry->bit = bit; 1336 ), 1337 1338 TP_printk("%s bit=%lu", 1339 __entry->activate ? "activate" : "deactivate", 1340 __entry->bit) 1341 ); 1342 1343 /* 1344 * Tracepoint for AMD AVIC 1345 */ 1346 TRACE_EVENT(kvm_avic_incomplete_ipi, 1347 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1348 TP_ARGS(vcpu, icrh, icrl, id, index), 1349 1350 TP_STRUCT__entry( 1351 __field(u32, vcpu) 1352 __field(u32, icrh) 1353 __field(u32, icrl) 1354 __field(u32, id) 1355 __field(u32, index) 1356 ), 1357 1358 TP_fast_assign( 1359 __entry->vcpu = vcpu; 1360 __entry->icrh = icrh; 1361 __entry->icrl = icrl; 1362 __entry->id = id; 1363 __entry->index = index; 1364 ), 1365 1366 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1367 __entry->vcpu, __entry->icrh, __entry->icrl, 1368 __entry->id, __entry->index) 1369 ); 1370 1371 TRACE_EVENT(kvm_avic_unaccelerated_access, 1372 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1373 TP_ARGS(vcpu, offset, ft, rw, vec), 1374 1375 TP_STRUCT__entry( 1376 __field(u32, vcpu) 1377 __field(u32, offset) 1378 __field(bool, ft) 1379 __field(bool, rw) 1380 __field(u32, vec) 1381 ), 1382 1383 TP_fast_assign( 1384 __entry->vcpu = vcpu; 1385 __entry->offset = offset; 1386 __entry->ft = ft; 1387 __entry->rw = rw; 1388 __entry->vec = vec; 1389 ), 1390 1391 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1392 __entry->vcpu, 1393 __entry->offset, 1394 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1395 __entry->ft ? "trap" : "fault", 1396 __entry->rw ? "write" : "read", 1397 __entry->vec) 1398 ); 1399 1400 TRACE_EVENT(kvm_avic_ga_log, 1401 TP_PROTO(u32 vmid, u32 vcpuid), 1402 TP_ARGS(vmid, vcpuid), 1403 1404 TP_STRUCT__entry( 1405 __field(u32, vmid) 1406 __field(u32, vcpuid) 1407 ), 1408 1409 TP_fast_assign( 1410 __entry->vmid = vmid; 1411 __entry->vcpuid = vcpuid; 1412 ), 1413 1414 TP_printk("vmid=%u, vcpuid=%u", 1415 __entry->vmid, __entry->vcpuid) 1416 ); 1417 1418 TRACE_EVENT(kvm_hv_timer_state, 1419 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1420 TP_ARGS(vcpu_id, hv_timer_in_use), 1421 TP_STRUCT__entry( 1422 __field(unsigned int, vcpu_id) 1423 __field(unsigned int, hv_timer_in_use) 1424 ), 1425 TP_fast_assign( 1426 __entry->vcpu_id = vcpu_id; 1427 __entry->hv_timer_in_use = hv_timer_in_use; 1428 ), 1429 TP_printk("vcpu_id %x hv_timer %x", 1430 __entry->vcpu_id, 1431 __entry->hv_timer_in_use) 1432 ); 1433 1434 /* 1435 * Tracepoint for kvm_hv_flush_tlb. 1436 */ 1437 TRACE_EVENT(kvm_hv_flush_tlb, 1438 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1439 TP_ARGS(processor_mask, address_space, flags), 1440 1441 TP_STRUCT__entry( 1442 __field(u64, processor_mask) 1443 __field(u64, address_space) 1444 __field(u64, flags) 1445 ), 1446 1447 TP_fast_assign( 1448 __entry->processor_mask = processor_mask; 1449 __entry->address_space = address_space; 1450 __entry->flags = flags; 1451 ), 1452 1453 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1454 __entry->processor_mask, __entry->address_space, 1455 __entry->flags) 1456 ); 1457 1458 /* 1459 * Tracepoint for kvm_hv_flush_tlb_ex. 1460 */ 1461 TRACE_EVENT(kvm_hv_flush_tlb_ex, 1462 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1463 TP_ARGS(valid_bank_mask, format, address_space, flags), 1464 1465 TP_STRUCT__entry( 1466 __field(u64, valid_bank_mask) 1467 __field(u64, format) 1468 __field(u64, address_space) 1469 __field(u64, flags) 1470 ), 1471 1472 TP_fast_assign( 1473 __entry->valid_bank_mask = valid_bank_mask; 1474 __entry->format = format; 1475 __entry->address_space = address_space; 1476 __entry->flags = flags; 1477 ), 1478 1479 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1480 "address_space 0x%llx flags 0x%llx", 1481 __entry->valid_bank_mask, __entry->format, 1482 __entry->address_space, __entry->flags) 1483 ); 1484 1485 /* 1486 * Tracepoints for kvm_hv_send_ipi. 1487 */ 1488 TRACE_EVENT(kvm_hv_send_ipi, 1489 TP_PROTO(u32 vector, u64 processor_mask), 1490 TP_ARGS(vector, processor_mask), 1491 1492 TP_STRUCT__entry( 1493 __field(u32, vector) 1494 __field(u64, processor_mask) 1495 ), 1496 1497 TP_fast_assign( 1498 __entry->vector = vector; 1499 __entry->processor_mask = processor_mask; 1500 ), 1501 1502 TP_printk("vector %x processor_mask 0x%llx", 1503 __entry->vector, __entry->processor_mask) 1504 ); 1505 1506 TRACE_EVENT(kvm_hv_send_ipi_ex, 1507 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1508 TP_ARGS(vector, format, valid_bank_mask), 1509 1510 TP_STRUCT__entry( 1511 __field(u32, vector) 1512 __field(u64, format) 1513 __field(u64, valid_bank_mask) 1514 ), 1515 1516 TP_fast_assign( 1517 __entry->vector = vector; 1518 __entry->format = format; 1519 __entry->valid_bank_mask = valid_bank_mask; 1520 ), 1521 1522 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1523 __entry->vector, __entry->format, 1524 __entry->valid_bank_mask) 1525 ); 1526 1527 TRACE_EVENT(kvm_pv_tlb_flush, 1528 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1529 TP_ARGS(vcpu_id, need_flush_tlb), 1530 1531 TP_STRUCT__entry( 1532 __field( unsigned int, vcpu_id ) 1533 __field( bool, need_flush_tlb ) 1534 ), 1535 1536 TP_fast_assign( 1537 __entry->vcpu_id = vcpu_id; 1538 __entry->need_flush_tlb = need_flush_tlb; 1539 ), 1540 1541 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1542 __entry->need_flush_tlb ? "true" : "false") 1543 ); 1544 1545 /* 1546 * Tracepoint for failed nested VMX VM-Enter. 1547 */ 1548 TRACE_EVENT(kvm_nested_vmenter_failed, 1549 TP_PROTO(const char *msg, u32 err), 1550 TP_ARGS(msg, err), 1551 1552 TP_STRUCT__entry( 1553 __string(msg, msg) 1554 __field(u32, err) 1555 ), 1556 1557 TP_fast_assign( 1558 __assign_str(msg, msg); 1559 __entry->err = err; 1560 ), 1561 1562 TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : 1563 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1564 ); 1565 1566 /* 1567 * Tracepoint for syndbg_set_msr. 1568 */ 1569 TRACE_EVENT(kvm_hv_syndbg_set_msr, 1570 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1571 TP_ARGS(vcpu_id, vp_index, msr, data), 1572 1573 TP_STRUCT__entry( 1574 __field(int, vcpu_id) 1575 __field(u32, vp_index) 1576 __field(u32, msr) 1577 __field(u64, data) 1578 ), 1579 1580 TP_fast_assign( 1581 __entry->vcpu_id = vcpu_id; 1582 __entry->vp_index = vp_index; 1583 __entry->msr = msr; 1584 __entry->data = data; 1585 ), 1586 1587 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1588 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1589 __entry->data) 1590 ); 1591 1592 /* 1593 * Tracepoint for syndbg_get_msr. 1594 */ 1595 TRACE_EVENT(kvm_hv_syndbg_get_msr, 1596 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1597 TP_ARGS(vcpu_id, vp_index, msr, data), 1598 1599 TP_STRUCT__entry( 1600 __field(int, vcpu_id) 1601 __field(u32, vp_index) 1602 __field(u32, msr) 1603 __field(u64, data) 1604 ), 1605 1606 TP_fast_assign( 1607 __entry->vcpu_id = vcpu_id; 1608 __entry->vp_index = vp_index; 1609 __entry->msr = msr; 1610 __entry->data = data; 1611 ), 1612 1613 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1614 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1615 __entry->data) 1616 ); 1617 1618 /* 1619 * Tracepoint for the start of VMGEXIT processing 1620 */ 1621 TRACE_EVENT(kvm_vmgexit_enter, 1622 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1623 TP_ARGS(vcpu_id, ghcb), 1624 1625 TP_STRUCT__entry( 1626 __field(unsigned int, vcpu_id) 1627 __field(u64, exit_reason) 1628 __field(u64, info1) 1629 __field(u64, info2) 1630 ), 1631 1632 TP_fast_assign( 1633 __entry->vcpu_id = vcpu_id; 1634 __entry->exit_reason = ghcb->save.sw_exit_code; 1635 __entry->info1 = ghcb->save.sw_exit_info_1; 1636 __entry->info2 = ghcb->save.sw_exit_info_2; 1637 ), 1638 1639 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1640 __entry->vcpu_id, __entry->exit_reason, 1641 __entry->info1, __entry->info2) 1642 ); 1643 1644 /* 1645 * Tracepoint for the end of VMGEXIT processing 1646 */ 1647 TRACE_EVENT(kvm_vmgexit_exit, 1648 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1649 TP_ARGS(vcpu_id, ghcb), 1650 1651 TP_STRUCT__entry( 1652 __field(unsigned int, vcpu_id) 1653 __field(u64, exit_reason) 1654 __field(u64, info1) 1655 __field(u64, info2) 1656 ), 1657 1658 TP_fast_assign( 1659 __entry->vcpu_id = vcpu_id; 1660 __entry->exit_reason = ghcb->save.sw_exit_code; 1661 __entry->info1 = ghcb->save.sw_exit_info_1; 1662 __entry->info2 = ghcb->save.sw_exit_info_2; 1663 ), 1664 1665 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1666 __entry->vcpu_id, __entry->exit_reason, 1667 __entry->info1, __entry->info2) 1668 ); 1669 1670 /* 1671 * Tracepoint for the start of VMGEXIT MSR procotol processing 1672 */ 1673 TRACE_EVENT(kvm_vmgexit_msr_protocol_enter, 1674 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa), 1675 TP_ARGS(vcpu_id, ghcb_gpa), 1676 1677 TP_STRUCT__entry( 1678 __field(unsigned int, vcpu_id) 1679 __field(u64, ghcb_gpa) 1680 ), 1681 1682 TP_fast_assign( 1683 __entry->vcpu_id = vcpu_id; 1684 __entry->ghcb_gpa = ghcb_gpa; 1685 ), 1686 1687 TP_printk("vcpu %u, ghcb_gpa %016llx", 1688 __entry->vcpu_id, __entry->ghcb_gpa) 1689 ); 1690 1691 /* 1692 * Tracepoint for the end of VMGEXIT MSR procotol processing 1693 */ 1694 TRACE_EVENT(kvm_vmgexit_msr_protocol_exit, 1695 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result), 1696 TP_ARGS(vcpu_id, ghcb_gpa, result), 1697 1698 TP_STRUCT__entry( 1699 __field(unsigned int, vcpu_id) 1700 __field(u64, ghcb_gpa) 1701 __field(int, result) 1702 ), 1703 1704 TP_fast_assign( 1705 __entry->vcpu_id = vcpu_id; 1706 __entry->ghcb_gpa = ghcb_gpa; 1707 __entry->result = result; 1708 ), 1709 1710 TP_printk("vcpu %u, ghcb_gpa %016llx, result %d", 1711 __entry->vcpu_id, __entry->ghcb_gpa, __entry->result) 1712 ); 1713 1714 #endif /* _TRACE_KVM_H */ 1715 1716 #undef TRACE_INCLUDE_PATH 1717 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1718 #undef TRACE_INCLUDE_FILE 1719 #define TRACE_INCLUDE_FILE trace 1720 1721 /* This part must be outside protection */ 1722 #include <trace/define_trace.h> 1723