1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_KVM_H 4 5 #include <linux/tracepoint.h> 6 #include <asm/vmx.h> 7 #include <asm/svm.h> 8 #include <asm/clocksource.h> 9 #include <asm/pvclock-abi.h> 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM kvm 13 14 /* 15 * Tracepoint for guest mode entry. 16 */ 17 TRACE_EVENT(kvm_entry, 18 TP_PROTO(struct kvm_vcpu *vcpu), 19 TP_ARGS(vcpu), 20 21 TP_STRUCT__entry( 22 __field( unsigned int, vcpu_id ) 23 __field( unsigned long, rip ) 24 ), 25 26 TP_fast_assign( 27 __entry->vcpu_id = vcpu->vcpu_id; 28 __entry->rip = kvm_rip_read(vcpu); 29 ), 30 31 TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) 32 ); 33 34 /* 35 * Tracepoint for hypercall. 36 */ 37 TRACE_EVENT(kvm_hypercall, 38 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 39 unsigned long a2, unsigned long a3), 40 TP_ARGS(nr, a0, a1, a2, a3), 41 42 TP_STRUCT__entry( 43 __field( unsigned long, nr ) 44 __field( unsigned long, a0 ) 45 __field( unsigned long, a1 ) 46 __field( unsigned long, a2 ) 47 __field( unsigned long, a3 ) 48 ), 49 50 TP_fast_assign( 51 __entry->nr = nr; 52 __entry->a0 = a0; 53 __entry->a1 = a1; 54 __entry->a2 = a2; 55 __entry->a3 = a3; 56 ), 57 58 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 59 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 60 __entry->a3) 61 ); 62 63 /* 64 * Tracepoint for hypercall. 65 */ 66 TRACE_EVENT(kvm_hv_hypercall, 67 TP_PROTO(__u16 code, bool fast, __u16 var_cnt, __u16 rep_cnt, 68 __u16 rep_idx, __u64 ingpa, __u64 outgpa), 69 TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa), 70 71 TP_STRUCT__entry( 72 __field( __u16, rep_cnt ) 73 __field( __u16, rep_idx ) 74 __field( __u64, ingpa ) 75 __field( __u64, outgpa ) 76 __field( __u16, code ) 77 __field( __u16, var_cnt ) 78 __field( bool, fast ) 79 ), 80 81 TP_fast_assign( 82 __entry->rep_cnt = rep_cnt; 83 __entry->rep_idx = rep_idx; 84 __entry->ingpa = ingpa; 85 __entry->outgpa = outgpa; 86 __entry->code = code; 87 __entry->var_cnt = var_cnt; 88 __entry->fast = fast; 89 ), 90 91 TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 92 __entry->code, __entry->fast ? "fast" : "slow", 93 __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx, 94 __entry->ingpa, __entry->outgpa) 95 ); 96 97 TRACE_EVENT(kvm_hv_hypercall_done, 98 TP_PROTO(u64 result), 99 TP_ARGS(result), 100 101 TP_STRUCT__entry( 102 __field(__u64, result) 103 ), 104 105 TP_fast_assign( 106 __entry->result = result; 107 ), 108 109 TP_printk("result 0x%llx", __entry->result) 110 ); 111 112 /* 113 * Tracepoint for Xen hypercall. 114 */ 115 TRACE_EVENT(kvm_xen_hypercall, 116 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 117 unsigned long a2, unsigned long a3, unsigned long a4, 118 unsigned long a5), 119 TP_ARGS(nr, a0, a1, a2, a3, a4, a5), 120 121 TP_STRUCT__entry( 122 __field(unsigned long, nr) 123 __field(unsigned long, a0) 124 __field(unsigned long, a1) 125 __field(unsigned long, a2) 126 __field(unsigned long, a3) 127 __field(unsigned long, a4) 128 __field(unsigned long, a5) 129 ), 130 131 TP_fast_assign( 132 __entry->nr = nr; 133 __entry->a0 = a0; 134 __entry->a1 = a1; 135 __entry->a2 = a2; 136 __entry->a3 = a3; 137 __entry->a4 = a4; 138 __entry->a4 = a5; 139 ), 140 141 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", 142 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 143 __entry->a3, __entry->a4, __entry->a5) 144 ); 145 146 147 148 /* 149 * Tracepoint for PIO. 150 */ 151 152 #define KVM_PIO_IN 0 153 #define KVM_PIO_OUT 1 154 155 TRACE_EVENT(kvm_pio, 156 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 157 unsigned int count, const void *data), 158 TP_ARGS(rw, port, size, count, data), 159 160 TP_STRUCT__entry( 161 __field( unsigned int, rw ) 162 __field( unsigned int, port ) 163 __field( unsigned int, size ) 164 __field( unsigned int, count ) 165 __field( unsigned int, val ) 166 ), 167 168 TP_fast_assign( 169 __entry->rw = rw; 170 __entry->port = port; 171 __entry->size = size; 172 __entry->count = count; 173 if (size == 1) 174 __entry->val = *(unsigned char *)data; 175 else if (size == 2) 176 __entry->val = *(unsigned short *)data; 177 else 178 __entry->val = *(unsigned int *)data; 179 ), 180 181 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 182 __entry->rw ? "write" : "read", 183 __entry->port, __entry->size, __entry->count, __entry->val, 184 __entry->count > 1 ? "(...)" : "") 185 ); 186 187 /* 188 * Tracepoint for fast mmio. 189 */ 190 TRACE_EVENT(kvm_fast_mmio, 191 TP_PROTO(u64 gpa), 192 TP_ARGS(gpa), 193 194 TP_STRUCT__entry( 195 __field(u64, gpa) 196 ), 197 198 TP_fast_assign( 199 __entry->gpa = gpa; 200 ), 201 202 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 203 ); 204 205 /* 206 * Tracepoint for cpuid. 207 */ 208 TRACE_EVENT(kvm_cpuid, 209 TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 210 unsigned long rbx, unsigned long rcx, unsigned long rdx, 211 bool found, bool used_max_basic), 212 TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 213 214 TP_STRUCT__entry( 215 __field( unsigned int, function ) 216 __field( unsigned int, index ) 217 __field( unsigned long, rax ) 218 __field( unsigned long, rbx ) 219 __field( unsigned long, rcx ) 220 __field( unsigned long, rdx ) 221 __field( bool, found ) 222 __field( bool, used_max_basic ) 223 ), 224 225 TP_fast_assign( 226 __entry->function = function; 227 __entry->index = index; 228 __entry->rax = rax; 229 __entry->rbx = rbx; 230 __entry->rcx = rcx; 231 __entry->rdx = rdx; 232 __entry->found = found; 233 __entry->used_max_basic = used_max_basic; 234 ), 235 236 TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 237 __entry->function, __entry->index, __entry->rax, 238 __entry->rbx, __entry->rcx, __entry->rdx, 239 __entry->found ? "found" : "not found", 240 __entry->used_max_basic ? ", used max basic" : "") 241 ); 242 243 #define AREG(x) { APIC_##x, "APIC_" #x } 244 245 #define kvm_trace_symbol_apic \ 246 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 247 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 248 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 249 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 250 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 251 AREG(ECTRL) 252 /* 253 * Tracepoint for apic access. 254 */ 255 TRACE_EVENT(kvm_apic, 256 TP_PROTO(unsigned int rw, unsigned int reg, u64 val), 257 TP_ARGS(rw, reg, val), 258 259 TP_STRUCT__entry( 260 __field( unsigned int, rw ) 261 __field( unsigned int, reg ) 262 __field( u64, val ) 263 ), 264 265 TP_fast_assign( 266 __entry->rw = rw; 267 __entry->reg = reg; 268 __entry->val = val; 269 ), 270 271 TP_printk("apic_%s %s = 0x%llx", 272 __entry->rw ? "write" : "read", 273 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 274 __entry->val) 275 ); 276 277 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 278 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 279 280 #define KVM_ISA_VMX 1 281 #define KVM_ISA_SVM 2 282 283 #define kvm_print_exit_reason(exit_reason, isa) \ 284 (isa == KVM_ISA_VMX) ? \ 285 __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ 286 __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ 287 (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ 288 (isa == KVM_ISA_VMX) ? \ 289 __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" 290 291 #define TRACE_EVENT_KVM_EXIT(name) \ 292 TRACE_EVENT(name, \ 293 TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \ 294 TP_ARGS(vcpu, isa), \ 295 \ 296 TP_STRUCT__entry( \ 297 __field( unsigned int, exit_reason ) \ 298 __field( unsigned long, guest_rip ) \ 299 __field( u32, isa ) \ 300 __field( u64, info1 ) \ 301 __field( u64, info2 ) \ 302 __field( u32, intr_info ) \ 303 __field( u32, error_code ) \ 304 __field( unsigned int, vcpu_id ) \ 305 ), \ 306 \ 307 TP_fast_assign( \ 308 __entry->guest_rip = kvm_rip_read(vcpu); \ 309 __entry->isa = isa; \ 310 __entry->vcpu_id = vcpu->vcpu_id; \ 311 static_call(kvm_x86_get_exit_info)(vcpu, \ 312 &__entry->exit_reason, \ 313 &__entry->info1, \ 314 &__entry->info2, \ 315 &__entry->intr_info, \ 316 &__entry->error_code); \ 317 ), \ 318 \ 319 TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ 320 "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \ 321 __entry->vcpu_id, \ 322 kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ 323 __entry->guest_rip, __entry->info1, __entry->info2, \ 324 __entry->intr_info, __entry->error_code) \ 325 ) 326 327 /* 328 * Tracepoint for kvm guest exit: 329 */ 330 TRACE_EVENT_KVM_EXIT(kvm_exit); 331 332 /* 333 * Tracepoint for kvm interrupt injection: 334 */ 335 TRACE_EVENT(kvm_inj_virq, 336 TP_PROTO(unsigned int vector, bool soft, bool reinjected), 337 TP_ARGS(vector, soft, reinjected), 338 339 TP_STRUCT__entry( 340 __field( unsigned int, vector ) 341 __field( bool, soft ) 342 __field( bool, reinjected ) 343 ), 344 345 TP_fast_assign( 346 __entry->vector = vector; 347 __entry->soft = soft; 348 __entry->reinjected = reinjected; 349 ), 350 351 TP_printk("%s 0x%x%s", 352 __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector, 353 __entry->reinjected ? " [reinjected]" : "") 354 ); 355 356 #define EXS(x) { x##_VECTOR, "#" #x } 357 358 #define kvm_trace_sym_exc \ 359 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 360 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 361 EXS(MF), EXS(AC), EXS(MC) 362 363 /* 364 * Tracepoint for kvm interrupt injection: 365 */ 366 TRACE_EVENT(kvm_inj_exception, 367 TP_PROTO(unsigned exception, bool has_error, unsigned error_code, 368 bool reinjected), 369 TP_ARGS(exception, has_error, error_code, reinjected), 370 371 TP_STRUCT__entry( 372 __field( u8, exception ) 373 __field( u8, has_error ) 374 __field( u32, error_code ) 375 __field( bool, reinjected ) 376 ), 377 378 TP_fast_assign( 379 __entry->exception = exception; 380 __entry->has_error = has_error; 381 __entry->error_code = error_code; 382 __entry->reinjected = reinjected; 383 ), 384 385 TP_printk("%s%s%s%s%s", 386 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 387 !__entry->has_error ? "" : " (", 388 !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }), 389 !__entry->has_error ? "" : ")", 390 __entry->reinjected ? " [reinjected]" : "") 391 ); 392 393 /* 394 * Tracepoint for page fault. 395 */ 396 TRACE_EVENT(kvm_page_fault, 397 TP_PROTO(unsigned long fault_address, unsigned int error_code), 398 TP_ARGS(fault_address, error_code), 399 400 TP_STRUCT__entry( 401 __field( unsigned long, fault_address ) 402 __field( unsigned int, error_code ) 403 ), 404 405 TP_fast_assign( 406 __entry->fault_address = fault_address; 407 __entry->error_code = error_code; 408 ), 409 410 TP_printk("address %lx error_code %x", 411 __entry->fault_address, __entry->error_code) 412 ); 413 414 /* 415 * Tracepoint for guest MSR access. 416 */ 417 TRACE_EVENT(kvm_msr, 418 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 419 TP_ARGS(write, ecx, data, exception), 420 421 TP_STRUCT__entry( 422 __field( unsigned, write ) 423 __field( u32, ecx ) 424 __field( u64, data ) 425 __field( u8, exception ) 426 ), 427 428 TP_fast_assign( 429 __entry->write = write; 430 __entry->ecx = ecx; 431 __entry->data = data; 432 __entry->exception = exception; 433 ), 434 435 TP_printk("msr_%s %x = 0x%llx%s", 436 __entry->write ? "write" : "read", 437 __entry->ecx, __entry->data, 438 __entry->exception ? " (#GP)" : "") 439 ); 440 441 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 442 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 443 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 444 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 445 446 /* 447 * Tracepoint for guest CR access. 448 */ 449 TRACE_EVENT(kvm_cr, 450 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 451 TP_ARGS(rw, cr, val), 452 453 TP_STRUCT__entry( 454 __field( unsigned int, rw ) 455 __field( unsigned int, cr ) 456 __field( unsigned long, val ) 457 ), 458 459 TP_fast_assign( 460 __entry->rw = rw; 461 __entry->cr = cr; 462 __entry->val = val; 463 ), 464 465 TP_printk("cr_%s %x = 0x%lx", 466 __entry->rw ? "write" : "read", 467 __entry->cr, __entry->val) 468 ); 469 470 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 471 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 472 473 TRACE_EVENT(kvm_pic_set_irq, 474 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 475 TP_ARGS(chip, pin, elcr, imr, coalesced), 476 477 TP_STRUCT__entry( 478 __field( __u8, chip ) 479 __field( __u8, pin ) 480 __field( __u8, elcr ) 481 __field( __u8, imr ) 482 __field( bool, coalesced ) 483 ), 484 485 TP_fast_assign( 486 __entry->chip = chip; 487 __entry->pin = pin; 488 __entry->elcr = elcr; 489 __entry->imr = imr; 490 __entry->coalesced = coalesced; 491 ), 492 493 TP_printk("chip %u pin %u (%s%s)%s", 494 __entry->chip, __entry->pin, 495 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 496 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 497 __entry->coalesced ? " (coalesced)" : "") 498 ); 499 500 #define kvm_apic_dst_shorthand \ 501 {0x0, "dst"}, \ 502 {0x1, "self"}, \ 503 {0x2, "all"}, \ 504 {0x3, "all-but-self"} 505 506 TRACE_EVENT(kvm_apic_ipi, 507 TP_PROTO(__u32 icr_low, __u32 dest_id), 508 TP_ARGS(icr_low, dest_id), 509 510 TP_STRUCT__entry( 511 __field( __u32, icr_low ) 512 __field( __u32, dest_id ) 513 ), 514 515 TP_fast_assign( 516 __entry->icr_low = icr_low; 517 __entry->dest_id = dest_id; 518 ), 519 520 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 521 __entry->dest_id, (u8)__entry->icr_low, 522 __print_symbolic((__entry->icr_low >> 8 & 0x7), 523 kvm_deliver_mode), 524 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 525 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 526 (__entry->icr_low & (1<<15)) ? "level" : "edge", 527 __print_symbolic((__entry->icr_low >> 18 & 0x3), 528 kvm_apic_dst_shorthand)) 529 ); 530 531 TRACE_EVENT(kvm_apic_accept_irq, 532 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 533 TP_ARGS(apicid, dm, tm, vec), 534 535 TP_STRUCT__entry( 536 __field( __u32, apicid ) 537 __field( __u16, dm ) 538 __field( __u16, tm ) 539 __field( __u8, vec ) 540 ), 541 542 TP_fast_assign( 543 __entry->apicid = apicid; 544 __entry->dm = dm; 545 __entry->tm = tm; 546 __entry->vec = vec; 547 ), 548 549 TP_printk("apicid %x vec %u (%s|%s)", 550 __entry->apicid, __entry->vec, 551 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 552 __entry->tm ? "level" : "edge") 553 ); 554 555 TRACE_EVENT(kvm_eoi, 556 TP_PROTO(struct kvm_lapic *apic, int vector), 557 TP_ARGS(apic, vector), 558 559 TP_STRUCT__entry( 560 __field( __u32, apicid ) 561 __field( int, vector ) 562 ), 563 564 TP_fast_assign( 565 __entry->apicid = apic->vcpu->vcpu_id; 566 __entry->vector = vector; 567 ), 568 569 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 570 ); 571 572 TRACE_EVENT(kvm_pv_eoi, 573 TP_PROTO(struct kvm_lapic *apic, int vector), 574 TP_ARGS(apic, vector), 575 576 TP_STRUCT__entry( 577 __field( __u32, apicid ) 578 __field( int, vector ) 579 ), 580 581 TP_fast_assign( 582 __entry->apicid = apic->vcpu->vcpu_id; 583 __entry->vector = vector; 584 ), 585 586 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 587 ); 588 589 /* 590 * Tracepoint for nested VMRUN 591 */ 592 TRACE_EVENT(kvm_nested_vmrun, 593 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 594 __u32 event_inj, bool npt), 595 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 596 597 TP_STRUCT__entry( 598 __field( __u64, rip ) 599 __field( __u64, vmcb ) 600 __field( __u64, nested_rip ) 601 __field( __u32, int_ctl ) 602 __field( __u32, event_inj ) 603 __field( bool, npt ) 604 ), 605 606 TP_fast_assign( 607 __entry->rip = rip; 608 __entry->vmcb = vmcb; 609 __entry->nested_rip = nested_rip; 610 __entry->int_ctl = int_ctl; 611 __entry->event_inj = event_inj; 612 __entry->npt = npt; 613 ), 614 615 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 616 "event_inj: 0x%08x npt: %s", 617 __entry->rip, __entry->vmcb, __entry->nested_rip, 618 __entry->int_ctl, __entry->event_inj, 619 __entry->npt ? "on" : "off") 620 ); 621 622 TRACE_EVENT(kvm_nested_intercepts, 623 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, 624 __u32 intercept1, __u32 intercept2, __u32 intercept3), 625 TP_ARGS(cr_read, cr_write, exceptions, intercept1, 626 intercept2, intercept3), 627 628 TP_STRUCT__entry( 629 __field( __u16, cr_read ) 630 __field( __u16, cr_write ) 631 __field( __u32, exceptions ) 632 __field( __u32, intercept1 ) 633 __field( __u32, intercept2 ) 634 __field( __u32, intercept3 ) 635 ), 636 637 TP_fast_assign( 638 __entry->cr_read = cr_read; 639 __entry->cr_write = cr_write; 640 __entry->exceptions = exceptions; 641 __entry->intercept1 = intercept1; 642 __entry->intercept2 = intercept2; 643 __entry->intercept3 = intercept3; 644 ), 645 646 TP_printk("cr_read: %04x cr_write: %04x excp: %08x " 647 "intercepts: %08x %08x %08x", 648 __entry->cr_read, __entry->cr_write, __entry->exceptions, 649 __entry->intercept1, __entry->intercept2, __entry->intercept3) 650 ); 651 /* 652 * Tracepoint for #VMEXIT while nested 653 */ 654 TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); 655 656 /* 657 * Tracepoint for #VMEXIT reinjected to the guest 658 */ 659 TRACE_EVENT(kvm_nested_vmexit_inject, 660 TP_PROTO(__u32 exit_code, 661 __u64 exit_info1, __u64 exit_info2, 662 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 663 TP_ARGS(exit_code, exit_info1, exit_info2, 664 exit_int_info, exit_int_info_err, isa), 665 666 TP_STRUCT__entry( 667 __field( __u32, exit_code ) 668 __field( __u64, exit_info1 ) 669 __field( __u64, exit_info2 ) 670 __field( __u32, exit_int_info ) 671 __field( __u32, exit_int_info_err ) 672 __field( __u32, isa ) 673 ), 674 675 TP_fast_assign( 676 __entry->exit_code = exit_code; 677 __entry->exit_info1 = exit_info1; 678 __entry->exit_info2 = exit_info2; 679 __entry->exit_int_info = exit_int_info; 680 __entry->exit_int_info_err = exit_int_info_err; 681 __entry->isa = isa; 682 ), 683 684 TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " 685 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 686 kvm_print_exit_reason(__entry->exit_code, __entry->isa), 687 __entry->exit_info1, __entry->exit_info2, 688 __entry->exit_int_info, __entry->exit_int_info_err) 689 ); 690 691 /* 692 * Tracepoint for nested #vmexit because of interrupt pending 693 */ 694 TRACE_EVENT(kvm_nested_intr_vmexit, 695 TP_PROTO(__u64 rip), 696 TP_ARGS(rip), 697 698 TP_STRUCT__entry( 699 __field( __u64, rip ) 700 ), 701 702 TP_fast_assign( 703 __entry->rip = rip 704 ), 705 706 TP_printk("rip: 0x%016llx", __entry->rip) 707 ); 708 709 /* 710 * Tracepoint for nested #vmexit because of interrupt pending 711 */ 712 TRACE_EVENT(kvm_invlpga, 713 TP_PROTO(__u64 rip, int asid, u64 address), 714 TP_ARGS(rip, asid, address), 715 716 TP_STRUCT__entry( 717 __field( __u64, rip ) 718 __field( int, asid ) 719 __field( __u64, address ) 720 ), 721 722 TP_fast_assign( 723 __entry->rip = rip; 724 __entry->asid = asid; 725 __entry->address = address; 726 ), 727 728 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 729 __entry->rip, __entry->asid, __entry->address) 730 ); 731 732 /* 733 * Tracepoint for nested #vmexit because of interrupt pending 734 */ 735 TRACE_EVENT(kvm_skinit, 736 TP_PROTO(__u64 rip, __u32 slb), 737 TP_ARGS(rip, slb), 738 739 TP_STRUCT__entry( 740 __field( __u64, rip ) 741 __field( __u32, slb ) 742 ), 743 744 TP_fast_assign( 745 __entry->rip = rip; 746 __entry->slb = slb; 747 ), 748 749 TP_printk("rip: 0x%016llx slb: 0x%08x", 750 __entry->rip, __entry->slb) 751 ); 752 753 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 754 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 755 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 756 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 757 758 #define kvm_trace_symbol_emul_flags \ 759 { 0, "real" }, \ 760 { KVM_EMUL_INSN_F_CR0_PE \ 761 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 762 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 763 { KVM_EMUL_INSN_F_CR0_PE \ 764 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 765 { KVM_EMUL_INSN_F_CR0_PE \ 766 | KVM_EMUL_INSN_F_CS_L, "prot64" } 767 768 #define kei_decode_mode(mode) ({ \ 769 u8 flags = 0xff; \ 770 switch (mode) { \ 771 case X86EMUL_MODE_REAL: \ 772 flags = 0; \ 773 break; \ 774 case X86EMUL_MODE_VM86: \ 775 flags = KVM_EMUL_INSN_F_EFL_VM; \ 776 break; \ 777 case X86EMUL_MODE_PROT16: \ 778 flags = KVM_EMUL_INSN_F_CR0_PE; \ 779 break; \ 780 case X86EMUL_MODE_PROT32: \ 781 flags = KVM_EMUL_INSN_F_CR0_PE \ 782 | KVM_EMUL_INSN_F_CS_D; \ 783 break; \ 784 case X86EMUL_MODE_PROT64: \ 785 flags = KVM_EMUL_INSN_F_CR0_PE \ 786 | KVM_EMUL_INSN_F_CS_L; \ 787 break; \ 788 } \ 789 flags; \ 790 }) 791 792 TRACE_EVENT(kvm_emulate_insn, 793 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 794 TP_ARGS(vcpu, failed), 795 796 TP_STRUCT__entry( 797 __field( __u64, rip ) 798 __field( __u32, csbase ) 799 __field( __u8, len ) 800 __array( __u8, insn, 15 ) 801 __field( __u8, flags ) 802 __field( __u8, failed ) 803 ), 804 805 TP_fast_assign( 806 __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); 807 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 808 - vcpu->arch.emulate_ctxt->fetch.data; 809 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 810 memcpy(__entry->insn, 811 vcpu->arch.emulate_ctxt->fetch.data, 812 15); 813 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 814 __entry->failed = failed; 815 ), 816 817 TP_printk("%x:%llx:%s (%s)%s", 818 __entry->csbase, __entry->rip, 819 __print_hex(__entry->insn, __entry->len), 820 __print_symbolic(__entry->flags, 821 kvm_trace_symbol_emul_flags), 822 __entry->failed ? " failed" : "" 823 ) 824 ); 825 826 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 827 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 828 829 TRACE_EVENT( 830 vcpu_match_mmio, 831 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 832 TP_ARGS(gva, gpa, write, gpa_match), 833 834 TP_STRUCT__entry( 835 __field(gva_t, gva) 836 __field(gpa_t, gpa) 837 __field(bool, write) 838 __field(bool, gpa_match) 839 ), 840 841 TP_fast_assign( 842 __entry->gva = gva; 843 __entry->gpa = gpa; 844 __entry->write = write; 845 __entry->gpa_match = gpa_match 846 ), 847 848 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 849 __entry->write ? "Write" : "Read", 850 __entry->gpa_match ? "GPA" : "GVA") 851 ); 852 853 TRACE_EVENT(kvm_write_tsc_offset, 854 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 855 __u64 next_tsc_offset), 856 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 857 858 TP_STRUCT__entry( 859 __field( unsigned int, vcpu_id ) 860 __field( __u64, previous_tsc_offset ) 861 __field( __u64, next_tsc_offset ) 862 ), 863 864 TP_fast_assign( 865 __entry->vcpu_id = vcpu_id; 866 __entry->previous_tsc_offset = previous_tsc_offset; 867 __entry->next_tsc_offset = next_tsc_offset; 868 ), 869 870 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 871 __entry->previous_tsc_offset, __entry->next_tsc_offset) 872 ); 873 874 #ifdef CONFIG_X86_64 875 876 #define host_clocks \ 877 {VDSO_CLOCKMODE_NONE, "none"}, \ 878 {VDSO_CLOCKMODE_TSC, "tsc"} \ 879 880 TRACE_EVENT(kvm_update_master_clock, 881 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 882 TP_ARGS(use_master_clock, host_clock, offset_matched), 883 884 TP_STRUCT__entry( 885 __field( bool, use_master_clock ) 886 __field( unsigned int, host_clock ) 887 __field( bool, offset_matched ) 888 ), 889 890 TP_fast_assign( 891 __entry->use_master_clock = use_master_clock; 892 __entry->host_clock = host_clock; 893 __entry->offset_matched = offset_matched; 894 ), 895 896 TP_printk("masterclock %d hostclock %s offsetmatched %u", 897 __entry->use_master_clock, 898 __print_symbolic(__entry->host_clock, host_clocks), 899 __entry->offset_matched) 900 ); 901 902 TRACE_EVENT(kvm_track_tsc, 903 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 904 unsigned int online_vcpus, bool use_master_clock, 905 unsigned int host_clock), 906 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 907 host_clock), 908 909 TP_STRUCT__entry( 910 __field( unsigned int, vcpu_id ) 911 __field( unsigned int, nr_vcpus_matched_tsc ) 912 __field( unsigned int, online_vcpus ) 913 __field( bool, use_master_clock ) 914 __field( unsigned int, host_clock ) 915 ), 916 917 TP_fast_assign( 918 __entry->vcpu_id = vcpu_id; 919 __entry->nr_vcpus_matched_tsc = nr_matched; 920 __entry->online_vcpus = online_vcpus; 921 __entry->use_master_clock = use_master_clock; 922 __entry->host_clock = host_clock; 923 ), 924 925 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 926 " hostclock %s", 927 __entry->vcpu_id, __entry->use_master_clock, 928 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 929 __print_symbolic(__entry->host_clock, host_clocks)) 930 ); 931 932 #endif /* CONFIG_X86_64 */ 933 934 /* 935 * Tracepoint for PML full VMEXIT. 936 */ 937 TRACE_EVENT(kvm_pml_full, 938 TP_PROTO(unsigned int vcpu_id), 939 TP_ARGS(vcpu_id), 940 941 TP_STRUCT__entry( 942 __field( unsigned int, vcpu_id ) 943 ), 944 945 TP_fast_assign( 946 __entry->vcpu_id = vcpu_id; 947 ), 948 949 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 950 ); 951 952 TRACE_EVENT(kvm_ple_window_update, 953 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 954 TP_ARGS(vcpu_id, new, old), 955 956 TP_STRUCT__entry( 957 __field( unsigned int, vcpu_id ) 958 __field( unsigned int, new ) 959 __field( unsigned int, old ) 960 ), 961 962 TP_fast_assign( 963 __entry->vcpu_id = vcpu_id; 964 __entry->new = new; 965 __entry->old = old; 966 ), 967 968 TP_printk("vcpu %u old %u new %u (%s)", 969 __entry->vcpu_id, __entry->old, __entry->new, 970 __entry->old < __entry->new ? "growed" : "shrinked") 971 ); 972 973 TRACE_EVENT(kvm_pvclock_update, 974 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 975 TP_ARGS(vcpu_id, pvclock), 976 977 TP_STRUCT__entry( 978 __field( unsigned int, vcpu_id ) 979 __field( __u32, version ) 980 __field( __u64, tsc_timestamp ) 981 __field( __u64, system_time ) 982 __field( __u32, tsc_to_system_mul ) 983 __field( __s8, tsc_shift ) 984 __field( __u8, flags ) 985 ), 986 987 TP_fast_assign( 988 __entry->vcpu_id = vcpu_id; 989 __entry->version = pvclock->version; 990 __entry->tsc_timestamp = pvclock->tsc_timestamp; 991 __entry->system_time = pvclock->system_time; 992 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 993 __entry->tsc_shift = pvclock->tsc_shift; 994 __entry->flags = pvclock->flags; 995 ), 996 997 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 998 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 999 "flags 0x%x }", 1000 __entry->vcpu_id, 1001 __entry->version, 1002 __entry->tsc_timestamp, 1003 __entry->system_time, 1004 __entry->tsc_to_system_mul, 1005 __entry->tsc_shift, 1006 __entry->flags) 1007 ); 1008 1009 TRACE_EVENT(kvm_wait_lapic_expire, 1010 TP_PROTO(unsigned int vcpu_id, s64 delta), 1011 TP_ARGS(vcpu_id, delta), 1012 1013 TP_STRUCT__entry( 1014 __field( unsigned int, vcpu_id ) 1015 __field( s64, delta ) 1016 ), 1017 1018 TP_fast_assign( 1019 __entry->vcpu_id = vcpu_id; 1020 __entry->delta = delta; 1021 ), 1022 1023 TP_printk("vcpu %u: delta %lld (%s)", 1024 __entry->vcpu_id, 1025 __entry->delta, 1026 __entry->delta < 0 ? "early" : "late") 1027 ); 1028 1029 TRACE_EVENT(kvm_smm_transition, 1030 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 1031 TP_ARGS(vcpu_id, smbase, entering), 1032 1033 TP_STRUCT__entry( 1034 __field( unsigned int, vcpu_id ) 1035 __field( u64, smbase ) 1036 __field( bool, entering ) 1037 ), 1038 1039 TP_fast_assign( 1040 __entry->vcpu_id = vcpu_id; 1041 __entry->smbase = smbase; 1042 __entry->entering = entering; 1043 ), 1044 1045 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 1046 __entry->vcpu_id, 1047 __entry->entering ? "entering" : "leaving", 1048 __entry->smbase) 1049 ); 1050 1051 /* 1052 * Tracepoint for VT-d posted-interrupts. 1053 */ 1054 TRACE_EVENT(kvm_pi_irte_update, 1055 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1056 unsigned int gsi, unsigned int gvec, 1057 u64 pi_desc_addr, bool set), 1058 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1059 1060 TP_STRUCT__entry( 1061 __field( unsigned int, host_irq ) 1062 __field( unsigned int, vcpu_id ) 1063 __field( unsigned int, gsi ) 1064 __field( unsigned int, gvec ) 1065 __field( u64, pi_desc_addr ) 1066 __field( bool, set ) 1067 ), 1068 1069 TP_fast_assign( 1070 __entry->host_irq = host_irq; 1071 __entry->vcpu_id = vcpu_id; 1072 __entry->gsi = gsi; 1073 __entry->gvec = gvec; 1074 __entry->pi_desc_addr = pi_desc_addr; 1075 __entry->set = set; 1076 ), 1077 1078 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1079 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1080 __entry->set ? "enabled and being updated" : "disabled", 1081 __entry->host_irq, 1082 __entry->vcpu_id, 1083 __entry->gsi, 1084 __entry->gvec, 1085 __entry->pi_desc_addr) 1086 ); 1087 1088 /* 1089 * Tracepoint for kvm_hv_notify_acked_sint. 1090 */ 1091 TRACE_EVENT(kvm_hv_notify_acked_sint, 1092 TP_PROTO(int vcpu_id, u32 sint), 1093 TP_ARGS(vcpu_id, sint), 1094 1095 TP_STRUCT__entry( 1096 __field(int, vcpu_id) 1097 __field(u32, sint) 1098 ), 1099 1100 TP_fast_assign( 1101 __entry->vcpu_id = vcpu_id; 1102 __entry->sint = sint; 1103 ), 1104 1105 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1106 ); 1107 1108 /* 1109 * Tracepoint for synic_set_irq. 1110 */ 1111 TRACE_EVENT(kvm_hv_synic_set_irq, 1112 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1113 TP_ARGS(vcpu_id, sint, vector, ret), 1114 1115 TP_STRUCT__entry( 1116 __field(int, vcpu_id) 1117 __field(u32, sint) 1118 __field(int, vector) 1119 __field(int, ret) 1120 ), 1121 1122 TP_fast_assign( 1123 __entry->vcpu_id = vcpu_id; 1124 __entry->sint = sint; 1125 __entry->vector = vector; 1126 __entry->ret = ret; 1127 ), 1128 1129 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1130 __entry->vcpu_id, __entry->sint, __entry->vector, 1131 __entry->ret) 1132 ); 1133 1134 /* 1135 * Tracepoint for kvm_hv_synic_send_eoi. 1136 */ 1137 TRACE_EVENT(kvm_hv_synic_send_eoi, 1138 TP_PROTO(int vcpu_id, int vector), 1139 TP_ARGS(vcpu_id, vector), 1140 1141 TP_STRUCT__entry( 1142 __field(int, vcpu_id) 1143 __field(u32, sint) 1144 __field(int, vector) 1145 __field(int, ret) 1146 ), 1147 1148 TP_fast_assign( 1149 __entry->vcpu_id = vcpu_id; 1150 __entry->vector = vector; 1151 ), 1152 1153 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1154 ); 1155 1156 /* 1157 * Tracepoint for synic_set_msr. 1158 */ 1159 TRACE_EVENT(kvm_hv_synic_set_msr, 1160 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1161 TP_ARGS(vcpu_id, msr, data, host), 1162 1163 TP_STRUCT__entry( 1164 __field(int, vcpu_id) 1165 __field(u32, msr) 1166 __field(u64, data) 1167 __field(bool, host) 1168 ), 1169 1170 TP_fast_assign( 1171 __entry->vcpu_id = vcpu_id; 1172 __entry->msr = msr; 1173 __entry->data = data; 1174 __entry->host = host 1175 ), 1176 1177 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1178 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1179 ); 1180 1181 /* 1182 * Tracepoint for stimer_set_config. 1183 */ 1184 TRACE_EVENT(kvm_hv_stimer_set_config, 1185 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1186 TP_ARGS(vcpu_id, timer_index, config, host), 1187 1188 TP_STRUCT__entry( 1189 __field(int, vcpu_id) 1190 __field(int, timer_index) 1191 __field(u64, config) 1192 __field(bool, host) 1193 ), 1194 1195 TP_fast_assign( 1196 __entry->vcpu_id = vcpu_id; 1197 __entry->timer_index = timer_index; 1198 __entry->config = config; 1199 __entry->host = host; 1200 ), 1201 1202 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1203 __entry->vcpu_id, __entry->timer_index, __entry->config, 1204 __entry->host) 1205 ); 1206 1207 /* 1208 * Tracepoint for stimer_set_count. 1209 */ 1210 TRACE_EVENT(kvm_hv_stimer_set_count, 1211 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1212 TP_ARGS(vcpu_id, timer_index, count, host), 1213 1214 TP_STRUCT__entry( 1215 __field(int, vcpu_id) 1216 __field(int, timer_index) 1217 __field(u64, count) 1218 __field(bool, host) 1219 ), 1220 1221 TP_fast_assign( 1222 __entry->vcpu_id = vcpu_id; 1223 __entry->timer_index = timer_index; 1224 __entry->count = count; 1225 __entry->host = host; 1226 ), 1227 1228 TP_printk("vcpu_id %d timer %d count %llu host %d", 1229 __entry->vcpu_id, __entry->timer_index, __entry->count, 1230 __entry->host) 1231 ); 1232 1233 /* 1234 * Tracepoint for stimer_start(periodic timer case). 1235 */ 1236 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1237 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1238 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1239 1240 TP_STRUCT__entry( 1241 __field(int, vcpu_id) 1242 __field(int, timer_index) 1243 __field(u64, time_now) 1244 __field(u64, exp_time) 1245 ), 1246 1247 TP_fast_assign( 1248 __entry->vcpu_id = vcpu_id; 1249 __entry->timer_index = timer_index; 1250 __entry->time_now = time_now; 1251 __entry->exp_time = exp_time; 1252 ), 1253 1254 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1255 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1256 __entry->exp_time) 1257 ); 1258 1259 /* 1260 * Tracepoint for stimer_start(one-shot timer case). 1261 */ 1262 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1263 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1264 TP_ARGS(vcpu_id, timer_index, time_now, count), 1265 1266 TP_STRUCT__entry( 1267 __field(int, vcpu_id) 1268 __field(int, timer_index) 1269 __field(u64, time_now) 1270 __field(u64, count) 1271 ), 1272 1273 TP_fast_assign( 1274 __entry->vcpu_id = vcpu_id; 1275 __entry->timer_index = timer_index; 1276 __entry->time_now = time_now; 1277 __entry->count = count; 1278 ), 1279 1280 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1281 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1282 __entry->count) 1283 ); 1284 1285 /* 1286 * Tracepoint for stimer_timer_callback. 1287 */ 1288 TRACE_EVENT(kvm_hv_stimer_callback, 1289 TP_PROTO(int vcpu_id, int timer_index), 1290 TP_ARGS(vcpu_id, timer_index), 1291 1292 TP_STRUCT__entry( 1293 __field(int, vcpu_id) 1294 __field(int, timer_index) 1295 ), 1296 1297 TP_fast_assign( 1298 __entry->vcpu_id = vcpu_id; 1299 __entry->timer_index = timer_index; 1300 ), 1301 1302 TP_printk("vcpu_id %d timer %d", 1303 __entry->vcpu_id, __entry->timer_index) 1304 ); 1305 1306 /* 1307 * Tracepoint for stimer_expiration. 1308 */ 1309 TRACE_EVENT(kvm_hv_stimer_expiration, 1310 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1311 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1312 1313 TP_STRUCT__entry( 1314 __field(int, vcpu_id) 1315 __field(int, timer_index) 1316 __field(int, direct) 1317 __field(int, msg_send_result) 1318 ), 1319 1320 TP_fast_assign( 1321 __entry->vcpu_id = vcpu_id; 1322 __entry->timer_index = timer_index; 1323 __entry->direct = direct; 1324 __entry->msg_send_result = msg_send_result; 1325 ), 1326 1327 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1328 __entry->vcpu_id, __entry->timer_index, 1329 __entry->direct, __entry->msg_send_result) 1330 ); 1331 1332 /* 1333 * Tracepoint for stimer_cleanup. 1334 */ 1335 TRACE_EVENT(kvm_hv_stimer_cleanup, 1336 TP_PROTO(int vcpu_id, int timer_index), 1337 TP_ARGS(vcpu_id, timer_index), 1338 1339 TP_STRUCT__entry( 1340 __field(int, vcpu_id) 1341 __field(int, timer_index) 1342 ), 1343 1344 TP_fast_assign( 1345 __entry->vcpu_id = vcpu_id; 1346 __entry->timer_index = timer_index; 1347 ), 1348 1349 TP_printk("vcpu_id %d timer %d", 1350 __entry->vcpu_id, __entry->timer_index) 1351 ); 1352 1353 TRACE_EVENT(kvm_apicv_inhibit_changed, 1354 TP_PROTO(int reason, bool set, unsigned long inhibits), 1355 TP_ARGS(reason, set, inhibits), 1356 1357 TP_STRUCT__entry( 1358 __field(int, reason) 1359 __field(bool, set) 1360 __field(unsigned long, inhibits) 1361 ), 1362 1363 TP_fast_assign( 1364 __entry->reason = reason; 1365 __entry->set = set; 1366 __entry->inhibits = inhibits; 1367 ), 1368 1369 TP_printk("%s reason=%u, inhibits=0x%lx", 1370 __entry->set ? "set" : "cleared", 1371 __entry->reason, __entry->inhibits) 1372 ); 1373 1374 TRACE_EVENT(kvm_apicv_accept_irq, 1375 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 1376 TP_ARGS(apicid, dm, tm, vec), 1377 1378 TP_STRUCT__entry( 1379 __field( __u32, apicid ) 1380 __field( __u16, dm ) 1381 __field( __u16, tm ) 1382 __field( __u8, vec ) 1383 ), 1384 1385 TP_fast_assign( 1386 __entry->apicid = apicid; 1387 __entry->dm = dm; 1388 __entry->tm = tm; 1389 __entry->vec = vec; 1390 ), 1391 1392 TP_printk("apicid %x vec %u (%s|%s)", 1393 __entry->apicid, __entry->vec, 1394 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 1395 __entry->tm ? "level" : "edge") 1396 ); 1397 1398 /* 1399 * Tracepoint for AMD AVIC 1400 */ 1401 TRACE_EVENT(kvm_avic_incomplete_ipi, 1402 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1403 TP_ARGS(vcpu, icrh, icrl, id, index), 1404 1405 TP_STRUCT__entry( 1406 __field(u32, vcpu) 1407 __field(u32, icrh) 1408 __field(u32, icrl) 1409 __field(u32, id) 1410 __field(u32, index) 1411 ), 1412 1413 TP_fast_assign( 1414 __entry->vcpu = vcpu; 1415 __entry->icrh = icrh; 1416 __entry->icrl = icrl; 1417 __entry->id = id; 1418 __entry->index = index; 1419 ), 1420 1421 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1422 __entry->vcpu, __entry->icrh, __entry->icrl, 1423 __entry->id, __entry->index) 1424 ); 1425 1426 TRACE_EVENT(kvm_avic_unaccelerated_access, 1427 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1428 TP_ARGS(vcpu, offset, ft, rw, vec), 1429 1430 TP_STRUCT__entry( 1431 __field(u32, vcpu) 1432 __field(u32, offset) 1433 __field(bool, ft) 1434 __field(bool, rw) 1435 __field(u32, vec) 1436 ), 1437 1438 TP_fast_assign( 1439 __entry->vcpu = vcpu; 1440 __entry->offset = offset; 1441 __entry->ft = ft; 1442 __entry->rw = rw; 1443 __entry->vec = vec; 1444 ), 1445 1446 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1447 __entry->vcpu, 1448 __entry->offset, 1449 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1450 __entry->ft ? "trap" : "fault", 1451 __entry->rw ? "write" : "read", 1452 __entry->vec) 1453 ); 1454 1455 TRACE_EVENT(kvm_avic_ga_log, 1456 TP_PROTO(u32 vmid, u32 vcpuid), 1457 TP_ARGS(vmid, vcpuid), 1458 1459 TP_STRUCT__entry( 1460 __field(u32, vmid) 1461 __field(u32, vcpuid) 1462 ), 1463 1464 TP_fast_assign( 1465 __entry->vmid = vmid; 1466 __entry->vcpuid = vcpuid; 1467 ), 1468 1469 TP_printk("vmid=%u, vcpuid=%u", 1470 __entry->vmid, __entry->vcpuid) 1471 ); 1472 1473 TRACE_EVENT(kvm_avic_kick_vcpu_slowpath, 1474 TP_PROTO(u32 icrh, u32 icrl, u32 index), 1475 TP_ARGS(icrh, icrl, index), 1476 1477 TP_STRUCT__entry( 1478 __field(u32, icrh) 1479 __field(u32, icrl) 1480 __field(u32, index) 1481 ), 1482 1483 TP_fast_assign( 1484 __entry->icrh = icrh; 1485 __entry->icrl = icrl; 1486 __entry->index = index; 1487 ), 1488 1489 TP_printk("icrh:icrl=%#08x:%08x, index=%u", 1490 __entry->icrh, __entry->icrl, __entry->index) 1491 ); 1492 1493 TRACE_EVENT(kvm_avic_doorbell, 1494 TP_PROTO(u32 vcpuid, u32 apicid), 1495 TP_ARGS(vcpuid, apicid), 1496 1497 TP_STRUCT__entry( 1498 __field(u32, vcpuid) 1499 __field(u32, apicid) 1500 ), 1501 1502 TP_fast_assign( 1503 __entry->vcpuid = vcpuid; 1504 __entry->apicid = apicid; 1505 ), 1506 1507 TP_printk("vcpuid=%u, apicid=%u", 1508 __entry->vcpuid, __entry->apicid) 1509 ); 1510 1511 TRACE_EVENT(kvm_hv_timer_state, 1512 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1513 TP_ARGS(vcpu_id, hv_timer_in_use), 1514 TP_STRUCT__entry( 1515 __field(unsigned int, vcpu_id) 1516 __field(unsigned int, hv_timer_in_use) 1517 ), 1518 TP_fast_assign( 1519 __entry->vcpu_id = vcpu_id; 1520 __entry->hv_timer_in_use = hv_timer_in_use; 1521 ), 1522 TP_printk("vcpu_id %x hv_timer %x", 1523 __entry->vcpu_id, 1524 __entry->hv_timer_in_use) 1525 ); 1526 1527 /* 1528 * Tracepoint for kvm_hv_flush_tlb. 1529 */ 1530 TRACE_EVENT(kvm_hv_flush_tlb, 1531 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1532 TP_ARGS(processor_mask, address_space, flags), 1533 1534 TP_STRUCT__entry( 1535 __field(u64, processor_mask) 1536 __field(u64, address_space) 1537 __field(u64, flags) 1538 ), 1539 1540 TP_fast_assign( 1541 __entry->processor_mask = processor_mask; 1542 __entry->address_space = address_space; 1543 __entry->flags = flags; 1544 ), 1545 1546 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1547 __entry->processor_mask, __entry->address_space, 1548 __entry->flags) 1549 ); 1550 1551 /* 1552 * Tracepoint for kvm_hv_flush_tlb_ex. 1553 */ 1554 TRACE_EVENT(kvm_hv_flush_tlb_ex, 1555 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1556 TP_ARGS(valid_bank_mask, format, address_space, flags), 1557 1558 TP_STRUCT__entry( 1559 __field(u64, valid_bank_mask) 1560 __field(u64, format) 1561 __field(u64, address_space) 1562 __field(u64, flags) 1563 ), 1564 1565 TP_fast_assign( 1566 __entry->valid_bank_mask = valid_bank_mask; 1567 __entry->format = format; 1568 __entry->address_space = address_space; 1569 __entry->flags = flags; 1570 ), 1571 1572 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1573 "address_space 0x%llx flags 0x%llx", 1574 __entry->valid_bank_mask, __entry->format, 1575 __entry->address_space, __entry->flags) 1576 ); 1577 1578 /* 1579 * Tracepoints for kvm_hv_send_ipi. 1580 */ 1581 TRACE_EVENT(kvm_hv_send_ipi, 1582 TP_PROTO(u32 vector, u64 processor_mask), 1583 TP_ARGS(vector, processor_mask), 1584 1585 TP_STRUCT__entry( 1586 __field(u32, vector) 1587 __field(u64, processor_mask) 1588 ), 1589 1590 TP_fast_assign( 1591 __entry->vector = vector; 1592 __entry->processor_mask = processor_mask; 1593 ), 1594 1595 TP_printk("vector %x processor_mask 0x%llx", 1596 __entry->vector, __entry->processor_mask) 1597 ); 1598 1599 TRACE_EVENT(kvm_hv_send_ipi_ex, 1600 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1601 TP_ARGS(vector, format, valid_bank_mask), 1602 1603 TP_STRUCT__entry( 1604 __field(u32, vector) 1605 __field(u64, format) 1606 __field(u64, valid_bank_mask) 1607 ), 1608 1609 TP_fast_assign( 1610 __entry->vector = vector; 1611 __entry->format = format; 1612 __entry->valid_bank_mask = valid_bank_mask; 1613 ), 1614 1615 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1616 __entry->vector, __entry->format, 1617 __entry->valid_bank_mask) 1618 ); 1619 1620 TRACE_EVENT(kvm_pv_tlb_flush, 1621 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1622 TP_ARGS(vcpu_id, need_flush_tlb), 1623 1624 TP_STRUCT__entry( 1625 __field( unsigned int, vcpu_id ) 1626 __field( bool, need_flush_tlb ) 1627 ), 1628 1629 TP_fast_assign( 1630 __entry->vcpu_id = vcpu_id; 1631 __entry->need_flush_tlb = need_flush_tlb; 1632 ), 1633 1634 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1635 __entry->need_flush_tlb ? "true" : "false") 1636 ); 1637 1638 /* 1639 * Tracepoint for failed nested VMX VM-Enter. 1640 */ 1641 TRACE_EVENT(kvm_nested_vmenter_failed, 1642 TP_PROTO(const char *msg, u32 err), 1643 TP_ARGS(msg, err), 1644 1645 TP_STRUCT__entry( 1646 __string(msg, msg) 1647 __field(u32, err) 1648 ), 1649 1650 TP_fast_assign( 1651 __assign_str(msg, msg); 1652 __entry->err = err; 1653 ), 1654 1655 TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : 1656 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1657 ); 1658 1659 /* 1660 * Tracepoint for syndbg_set_msr. 1661 */ 1662 TRACE_EVENT(kvm_hv_syndbg_set_msr, 1663 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1664 TP_ARGS(vcpu_id, vp_index, msr, data), 1665 1666 TP_STRUCT__entry( 1667 __field(int, vcpu_id) 1668 __field(u32, vp_index) 1669 __field(u32, msr) 1670 __field(u64, data) 1671 ), 1672 1673 TP_fast_assign( 1674 __entry->vcpu_id = vcpu_id; 1675 __entry->vp_index = vp_index; 1676 __entry->msr = msr; 1677 __entry->data = data; 1678 ), 1679 1680 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1681 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1682 __entry->data) 1683 ); 1684 1685 /* 1686 * Tracepoint for syndbg_get_msr. 1687 */ 1688 TRACE_EVENT(kvm_hv_syndbg_get_msr, 1689 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1690 TP_ARGS(vcpu_id, vp_index, msr, data), 1691 1692 TP_STRUCT__entry( 1693 __field(int, vcpu_id) 1694 __field(u32, vp_index) 1695 __field(u32, msr) 1696 __field(u64, data) 1697 ), 1698 1699 TP_fast_assign( 1700 __entry->vcpu_id = vcpu_id; 1701 __entry->vp_index = vp_index; 1702 __entry->msr = msr; 1703 __entry->data = data; 1704 ), 1705 1706 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1707 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1708 __entry->data) 1709 ); 1710 1711 /* 1712 * Tracepoint for the start of VMGEXIT processing 1713 */ 1714 TRACE_EVENT(kvm_vmgexit_enter, 1715 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1716 TP_ARGS(vcpu_id, ghcb), 1717 1718 TP_STRUCT__entry( 1719 __field(unsigned int, vcpu_id) 1720 __field(u64, exit_reason) 1721 __field(u64, info1) 1722 __field(u64, info2) 1723 ), 1724 1725 TP_fast_assign( 1726 __entry->vcpu_id = vcpu_id; 1727 __entry->exit_reason = ghcb->save.sw_exit_code; 1728 __entry->info1 = ghcb->save.sw_exit_info_1; 1729 __entry->info2 = ghcb->save.sw_exit_info_2; 1730 ), 1731 1732 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1733 __entry->vcpu_id, __entry->exit_reason, 1734 __entry->info1, __entry->info2) 1735 ); 1736 1737 /* 1738 * Tracepoint for the end of VMGEXIT processing 1739 */ 1740 TRACE_EVENT(kvm_vmgexit_exit, 1741 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1742 TP_ARGS(vcpu_id, ghcb), 1743 1744 TP_STRUCT__entry( 1745 __field(unsigned int, vcpu_id) 1746 __field(u64, exit_reason) 1747 __field(u64, info1) 1748 __field(u64, info2) 1749 ), 1750 1751 TP_fast_assign( 1752 __entry->vcpu_id = vcpu_id; 1753 __entry->exit_reason = ghcb->save.sw_exit_code; 1754 __entry->info1 = ghcb->save.sw_exit_info_1; 1755 __entry->info2 = ghcb->save.sw_exit_info_2; 1756 ), 1757 1758 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1759 __entry->vcpu_id, __entry->exit_reason, 1760 __entry->info1, __entry->info2) 1761 ); 1762 1763 /* 1764 * Tracepoint for the start of VMGEXIT MSR procotol processing 1765 */ 1766 TRACE_EVENT(kvm_vmgexit_msr_protocol_enter, 1767 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa), 1768 TP_ARGS(vcpu_id, ghcb_gpa), 1769 1770 TP_STRUCT__entry( 1771 __field(unsigned int, vcpu_id) 1772 __field(u64, ghcb_gpa) 1773 ), 1774 1775 TP_fast_assign( 1776 __entry->vcpu_id = vcpu_id; 1777 __entry->ghcb_gpa = ghcb_gpa; 1778 ), 1779 1780 TP_printk("vcpu %u, ghcb_gpa %016llx", 1781 __entry->vcpu_id, __entry->ghcb_gpa) 1782 ); 1783 1784 /* 1785 * Tracepoint for the end of VMGEXIT MSR procotol processing 1786 */ 1787 TRACE_EVENT(kvm_vmgexit_msr_protocol_exit, 1788 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result), 1789 TP_ARGS(vcpu_id, ghcb_gpa, result), 1790 1791 TP_STRUCT__entry( 1792 __field(unsigned int, vcpu_id) 1793 __field(u64, ghcb_gpa) 1794 __field(int, result) 1795 ), 1796 1797 TP_fast_assign( 1798 __entry->vcpu_id = vcpu_id; 1799 __entry->ghcb_gpa = ghcb_gpa; 1800 __entry->result = result; 1801 ), 1802 1803 TP_printk("vcpu %u, ghcb_gpa %016llx, result %d", 1804 __entry->vcpu_id, __entry->ghcb_gpa, __entry->result) 1805 ); 1806 1807 #endif /* _TRACE_KVM_H */ 1808 1809 #undef TRACE_INCLUDE_PATH 1810 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1811 #undef TRACE_INCLUDE_FILE 1812 #define TRACE_INCLUDE_FILE trace 1813 1814 /* This part must be outside protection */ 1815 #include <trace/define_trace.h> 1816