1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3 #define _TRACE_KVM_H 4 5 #include <linux/tracepoint.h> 6 #include <asm/vmx.h> 7 #include <asm/svm.h> 8 #include <asm/clocksource.h> 9 #include <asm/pvclock-abi.h> 10 11 #undef TRACE_SYSTEM 12 #define TRACE_SYSTEM kvm 13 14 /* 15 * Tracepoint for guest mode entry. 16 */ 17 TRACE_EVENT(kvm_entry, 18 TP_PROTO(unsigned int vcpu_id), 19 TP_ARGS(vcpu_id), 20 21 TP_STRUCT__entry( 22 __field( unsigned int, vcpu_id ) 23 ), 24 25 TP_fast_assign( 26 __entry->vcpu_id = vcpu_id; 27 ), 28 29 TP_printk("vcpu %u", __entry->vcpu_id) 30 ); 31 32 /* 33 * Tracepoint for hypercall. 34 */ 35 TRACE_EVENT(kvm_hypercall, 36 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 37 unsigned long a2, unsigned long a3), 38 TP_ARGS(nr, a0, a1, a2, a3), 39 40 TP_STRUCT__entry( 41 __field( unsigned long, nr ) 42 __field( unsigned long, a0 ) 43 __field( unsigned long, a1 ) 44 __field( unsigned long, a2 ) 45 __field( unsigned long, a3 ) 46 ), 47 48 TP_fast_assign( 49 __entry->nr = nr; 50 __entry->a0 = a0; 51 __entry->a1 = a1; 52 __entry->a2 = a2; 53 __entry->a3 = a3; 54 ), 55 56 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 57 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 58 __entry->a3) 59 ); 60 61 /* 62 * Tracepoint for hypercall. 63 */ 64 TRACE_EVENT(kvm_hv_hypercall, 65 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, 66 __u64 ingpa, __u64 outgpa), 67 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), 68 69 TP_STRUCT__entry( 70 __field( __u16, rep_cnt ) 71 __field( __u16, rep_idx ) 72 __field( __u64, ingpa ) 73 __field( __u64, outgpa ) 74 __field( __u16, code ) 75 __field( bool, fast ) 76 ), 77 78 TP_fast_assign( 79 __entry->rep_cnt = rep_cnt; 80 __entry->rep_idx = rep_idx; 81 __entry->ingpa = ingpa; 82 __entry->outgpa = outgpa; 83 __entry->code = code; 84 __entry->fast = fast; 85 ), 86 87 TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 88 __entry->code, __entry->fast ? "fast" : "slow", 89 __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, 90 __entry->outgpa) 91 ); 92 93 /* 94 * Tracepoint for PIO. 95 */ 96 97 #define KVM_PIO_IN 0 98 #define KVM_PIO_OUT 1 99 100 TRACE_EVENT(kvm_pio, 101 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 102 unsigned int count, void *data), 103 TP_ARGS(rw, port, size, count, data), 104 105 TP_STRUCT__entry( 106 __field( unsigned int, rw ) 107 __field( unsigned int, port ) 108 __field( unsigned int, size ) 109 __field( unsigned int, count ) 110 __field( unsigned int, val ) 111 ), 112 113 TP_fast_assign( 114 __entry->rw = rw; 115 __entry->port = port; 116 __entry->size = size; 117 __entry->count = count; 118 if (size == 1) 119 __entry->val = *(unsigned char *)data; 120 else if (size == 2) 121 __entry->val = *(unsigned short *)data; 122 else 123 __entry->val = *(unsigned int *)data; 124 ), 125 126 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 127 __entry->rw ? "write" : "read", 128 __entry->port, __entry->size, __entry->count, __entry->val, 129 __entry->count > 1 ? "(...)" : "") 130 ); 131 132 /* 133 * Tracepoint for fast mmio. 134 */ 135 TRACE_EVENT(kvm_fast_mmio, 136 TP_PROTO(u64 gpa), 137 TP_ARGS(gpa), 138 139 TP_STRUCT__entry( 140 __field(u64, gpa) 141 ), 142 143 TP_fast_assign( 144 __entry->gpa = gpa; 145 ), 146 147 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 148 ); 149 150 /* 151 * Tracepoint for cpuid. 152 */ 153 TRACE_EVENT(kvm_cpuid, 154 TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 155 unsigned long rbx, unsigned long rcx, unsigned long rdx, 156 bool found, bool used_max_basic), 157 TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 158 159 TP_STRUCT__entry( 160 __field( unsigned int, function ) 161 __field( unsigned int, index ) 162 __field( unsigned long, rax ) 163 __field( unsigned long, rbx ) 164 __field( unsigned long, rcx ) 165 __field( unsigned long, rdx ) 166 __field( bool, found ) 167 __field( bool, used_max_basic ) 168 ), 169 170 TP_fast_assign( 171 __entry->function = function; 172 __entry->index = index; 173 __entry->rax = rax; 174 __entry->rbx = rbx; 175 __entry->rcx = rcx; 176 __entry->rdx = rdx; 177 __entry->found = found; 178 __entry->used_max_basic = used_max_basic; 179 ), 180 181 TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 182 __entry->function, __entry->index, __entry->rax, 183 __entry->rbx, __entry->rcx, __entry->rdx, 184 __entry->found ? "found" : "not found", 185 __entry->used_max_basic ? ", used max basic" : "") 186 ); 187 188 #define AREG(x) { APIC_##x, "APIC_" #x } 189 190 #define kvm_trace_symbol_apic \ 191 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 192 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 193 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 194 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 195 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 196 AREG(ECTRL) 197 /* 198 * Tracepoint for apic access. 199 */ 200 TRACE_EVENT(kvm_apic, 201 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), 202 TP_ARGS(rw, reg, val), 203 204 TP_STRUCT__entry( 205 __field( unsigned int, rw ) 206 __field( unsigned int, reg ) 207 __field( unsigned int, val ) 208 ), 209 210 TP_fast_assign( 211 __entry->rw = rw; 212 __entry->reg = reg; 213 __entry->val = val; 214 ), 215 216 TP_printk("apic_%s %s = 0x%x", 217 __entry->rw ? "write" : "read", 218 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 219 __entry->val) 220 ); 221 222 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 223 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 224 225 #define KVM_ISA_VMX 1 226 #define KVM_ISA_SVM 2 227 228 /* 229 * Tracepoint for kvm guest exit: 230 */ 231 TRACE_EVENT(kvm_exit, 232 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), 233 TP_ARGS(exit_reason, vcpu, isa), 234 235 TP_STRUCT__entry( 236 __field( unsigned int, exit_reason ) 237 __field( unsigned long, guest_rip ) 238 __field( u32, isa ) 239 __field( u64, info1 ) 240 __field( u64, info2 ) 241 __field( unsigned int, vcpu_id ) 242 ), 243 244 TP_fast_assign( 245 __entry->exit_reason = exit_reason; 246 __entry->guest_rip = kvm_rip_read(vcpu); 247 __entry->isa = isa; 248 __entry->vcpu_id = vcpu->vcpu_id; 249 kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, 250 &__entry->info2); 251 ), 252 253 TP_printk("vcpu %u reason %s rip 0x%lx info %llx %llx", 254 __entry->vcpu_id, 255 (__entry->isa == KVM_ISA_VMX) ? 256 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) : 257 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS), 258 __entry->guest_rip, __entry->info1, __entry->info2) 259 ); 260 261 /* 262 * Tracepoint for kvm interrupt injection: 263 */ 264 TRACE_EVENT(kvm_inj_virq, 265 TP_PROTO(unsigned int irq), 266 TP_ARGS(irq), 267 268 TP_STRUCT__entry( 269 __field( unsigned int, irq ) 270 ), 271 272 TP_fast_assign( 273 __entry->irq = irq; 274 ), 275 276 TP_printk("irq %u", __entry->irq) 277 ); 278 279 #define EXS(x) { x##_VECTOR, "#" #x } 280 281 #define kvm_trace_sym_exc \ 282 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 283 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 284 EXS(MF), EXS(AC), EXS(MC) 285 286 /* 287 * Tracepoint for kvm interrupt injection: 288 */ 289 TRACE_EVENT(kvm_inj_exception, 290 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 291 TP_ARGS(exception, has_error, error_code), 292 293 TP_STRUCT__entry( 294 __field( u8, exception ) 295 __field( u8, has_error ) 296 __field( u32, error_code ) 297 ), 298 299 TP_fast_assign( 300 __entry->exception = exception; 301 __entry->has_error = has_error; 302 __entry->error_code = error_code; 303 ), 304 305 TP_printk("%s (0x%x)", 306 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 307 /* FIXME: don't print error_code if not present */ 308 __entry->has_error ? __entry->error_code : 0) 309 ); 310 311 /* 312 * Tracepoint for page fault. 313 */ 314 TRACE_EVENT(kvm_page_fault, 315 TP_PROTO(unsigned long fault_address, unsigned int error_code), 316 TP_ARGS(fault_address, error_code), 317 318 TP_STRUCT__entry( 319 __field( unsigned long, fault_address ) 320 __field( unsigned int, error_code ) 321 ), 322 323 TP_fast_assign( 324 __entry->fault_address = fault_address; 325 __entry->error_code = error_code; 326 ), 327 328 TP_printk("address %lx error_code %x", 329 __entry->fault_address, __entry->error_code) 330 ); 331 332 /* 333 * Tracepoint for guest MSR access. 334 */ 335 TRACE_EVENT(kvm_msr, 336 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 337 TP_ARGS(write, ecx, data, exception), 338 339 TP_STRUCT__entry( 340 __field( unsigned, write ) 341 __field( u32, ecx ) 342 __field( u64, data ) 343 __field( u8, exception ) 344 ), 345 346 TP_fast_assign( 347 __entry->write = write; 348 __entry->ecx = ecx; 349 __entry->data = data; 350 __entry->exception = exception; 351 ), 352 353 TP_printk("msr_%s %x = 0x%llx%s", 354 __entry->write ? "write" : "read", 355 __entry->ecx, __entry->data, 356 __entry->exception ? " (#GP)" : "") 357 ); 358 359 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 360 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 361 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 362 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 363 364 /* 365 * Tracepoint for guest CR access. 366 */ 367 TRACE_EVENT(kvm_cr, 368 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 369 TP_ARGS(rw, cr, val), 370 371 TP_STRUCT__entry( 372 __field( unsigned int, rw ) 373 __field( unsigned int, cr ) 374 __field( unsigned long, val ) 375 ), 376 377 TP_fast_assign( 378 __entry->rw = rw; 379 __entry->cr = cr; 380 __entry->val = val; 381 ), 382 383 TP_printk("cr_%s %x = 0x%lx", 384 __entry->rw ? "write" : "read", 385 __entry->cr, __entry->val) 386 ); 387 388 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 389 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 390 391 TRACE_EVENT(kvm_pic_set_irq, 392 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 393 TP_ARGS(chip, pin, elcr, imr, coalesced), 394 395 TP_STRUCT__entry( 396 __field( __u8, chip ) 397 __field( __u8, pin ) 398 __field( __u8, elcr ) 399 __field( __u8, imr ) 400 __field( bool, coalesced ) 401 ), 402 403 TP_fast_assign( 404 __entry->chip = chip; 405 __entry->pin = pin; 406 __entry->elcr = elcr; 407 __entry->imr = imr; 408 __entry->coalesced = coalesced; 409 ), 410 411 TP_printk("chip %u pin %u (%s%s)%s", 412 __entry->chip, __entry->pin, 413 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 414 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 415 __entry->coalesced ? " (coalesced)" : "") 416 ); 417 418 #define kvm_apic_dst_shorthand \ 419 {0x0, "dst"}, \ 420 {0x1, "self"}, \ 421 {0x2, "all"}, \ 422 {0x3, "all-but-self"} 423 424 TRACE_EVENT(kvm_apic_ipi, 425 TP_PROTO(__u32 icr_low, __u32 dest_id), 426 TP_ARGS(icr_low, dest_id), 427 428 TP_STRUCT__entry( 429 __field( __u32, icr_low ) 430 __field( __u32, dest_id ) 431 ), 432 433 TP_fast_assign( 434 __entry->icr_low = icr_low; 435 __entry->dest_id = dest_id; 436 ), 437 438 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 439 __entry->dest_id, (u8)__entry->icr_low, 440 __print_symbolic((__entry->icr_low >> 8 & 0x7), 441 kvm_deliver_mode), 442 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 443 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 444 (__entry->icr_low & (1<<15)) ? "level" : "edge", 445 __print_symbolic((__entry->icr_low >> 18 & 0x3), 446 kvm_apic_dst_shorthand)) 447 ); 448 449 TRACE_EVENT(kvm_apic_accept_irq, 450 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 451 TP_ARGS(apicid, dm, tm, vec), 452 453 TP_STRUCT__entry( 454 __field( __u32, apicid ) 455 __field( __u16, dm ) 456 __field( __u16, tm ) 457 __field( __u8, vec ) 458 ), 459 460 TP_fast_assign( 461 __entry->apicid = apicid; 462 __entry->dm = dm; 463 __entry->tm = tm; 464 __entry->vec = vec; 465 ), 466 467 TP_printk("apicid %x vec %u (%s|%s)", 468 __entry->apicid, __entry->vec, 469 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 470 __entry->tm ? "level" : "edge") 471 ); 472 473 TRACE_EVENT(kvm_eoi, 474 TP_PROTO(struct kvm_lapic *apic, int vector), 475 TP_ARGS(apic, vector), 476 477 TP_STRUCT__entry( 478 __field( __u32, apicid ) 479 __field( int, vector ) 480 ), 481 482 TP_fast_assign( 483 __entry->apicid = apic->vcpu->vcpu_id; 484 __entry->vector = vector; 485 ), 486 487 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 488 ); 489 490 TRACE_EVENT(kvm_pv_eoi, 491 TP_PROTO(struct kvm_lapic *apic, int vector), 492 TP_ARGS(apic, vector), 493 494 TP_STRUCT__entry( 495 __field( __u32, apicid ) 496 __field( int, vector ) 497 ), 498 499 TP_fast_assign( 500 __entry->apicid = apic->vcpu->vcpu_id; 501 __entry->vector = vector; 502 ), 503 504 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 505 ); 506 507 /* 508 * Tracepoint for nested VMRUN 509 */ 510 TRACE_EVENT(kvm_nested_vmrun, 511 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 512 __u32 event_inj, bool npt), 513 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 514 515 TP_STRUCT__entry( 516 __field( __u64, rip ) 517 __field( __u64, vmcb ) 518 __field( __u64, nested_rip ) 519 __field( __u32, int_ctl ) 520 __field( __u32, event_inj ) 521 __field( bool, npt ) 522 ), 523 524 TP_fast_assign( 525 __entry->rip = rip; 526 __entry->vmcb = vmcb; 527 __entry->nested_rip = nested_rip; 528 __entry->int_ctl = int_ctl; 529 __entry->event_inj = event_inj; 530 __entry->npt = npt; 531 ), 532 533 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 534 "event_inj: 0x%08x npt: %s", 535 __entry->rip, __entry->vmcb, __entry->nested_rip, 536 __entry->int_ctl, __entry->event_inj, 537 __entry->npt ? "on" : "off") 538 ); 539 540 TRACE_EVENT(kvm_nested_intercepts, 541 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept), 542 TP_ARGS(cr_read, cr_write, exceptions, intercept), 543 544 TP_STRUCT__entry( 545 __field( __u16, cr_read ) 546 __field( __u16, cr_write ) 547 __field( __u32, exceptions ) 548 __field( __u64, intercept ) 549 ), 550 551 TP_fast_assign( 552 __entry->cr_read = cr_read; 553 __entry->cr_write = cr_write; 554 __entry->exceptions = exceptions; 555 __entry->intercept = intercept; 556 ), 557 558 TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx", 559 __entry->cr_read, __entry->cr_write, __entry->exceptions, 560 __entry->intercept) 561 ); 562 /* 563 * Tracepoint for #VMEXIT while nested 564 */ 565 TRACE_EVENT(kvm_nested_vmexit, 566 TP_PROTO(__u64 rip, __u32 exit_code, 567 __u64 exit_info1, __u64 exit_info2, 568 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 569 TP_ARGS(rip, exit_code, exit_info1, exit_info2, 570 exit_int_info, exit_int_info_err, isa), 571 572 TP_STRUCT__entry( 573 __field( __u64, rip ) 574 __field( __u32, exit_code ) 575 __field( __u64, exit_info1 ) 576 __field( __u64, exit_info2 ) 577 __field( __u32, exit_int_info ) 578 __field( __u32, exit_int_info_err ) 579 __field( __u32, isa ) 580 ), 581 582 TP_fast_assign( 583 __entry->rip = rip; 584 __entry->exit_code = exit_code; 585 __entry->exit_info1 = exit_info1; 586 __entry->exit_info2 = exit_info2; 587 __entry->exit_int_info = exit_int_info; 588 __entry->exit_int_info_err = exit_int_info_err; 589 __entry->isa = isa; 590 ), 591 TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx " 592 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 593 __entry->rip, 594 (__entry->isa == KVM_ISA_VMX) ? 595 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 596 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 597 __entry->exit_info1, __entry->exit_info2, 598 __entry->exit_int_info, __entry->exit_int_info_err) 599 ); 600 601 /* 602 * Tracepoint for #VMEXIT reinjected to the guest 603 */ 604 TRACE_EVENT(kvm_nested_vmexit_inject, 605 TP_PROTO(__u32 exit_code, 606 __u64 exit_info1, __u64 exit_info2, 607 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 608 TP_ARGS(exit_code, exit_info1, exit_info2, 609 exit_int_info, exit_int_info_err, isa), 610 611 TP_STRUCT__entry( 612 __field( __u32, exit_code ) 613 __field( __u64, exit_info1 ) 614 __field( __u64, exit_info2 ) 615 __field( __u32, exit_int_info ) 616 __field( __u32, exit_int_info_err ) 617 __field( __u32, isa ) 618 ), 619 620 TP_fast_assign( 621 __entry->exit_code = exit_code; 622 __entry->exit_info1 = exit_info1; 623 __entry->exit_info2 = exit_info2; 624 __entry->exit_int_info = exit_int_info; 625 __entry->exit_int_info_err = exit_int_info_err; 626 __entry->isa = isa; 627 ), 628 629 TP_printk("reason: %s ext_inf1: 0x%016llx " 630 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 631 (__entry->isa == KVM_ISA_VMX) ? 632 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : 633 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), 634 __entry->exit_info1, __entry->exit_info2, 635 __entry->exit_int_info, __entry->exit_int_info_err) 636 ); 637 638 /* 639 * Tracepoint for nested #vmexit because of interrupt pending 640 */ 641 TRACE_EVENT(kvm_nested_intr_vmexit, 642 TP_PROTO(__u64 rip), 643 TP_ARGS(rip), 644 645 TP_STRUCT__entry( 646 __field( __u64, rip ) 647 ), 648 649 TP_fast_assign( 650 __entry->rip = rip 651 ), 652 653 TP_printk("rip: 0x%016llx", __entry->rip) 654 ); 655 656 /* 657 * Tracepoint for nested #vmexit because of interrupt pending 658 */ 659 TRACE_EVENT(kvm_invlpga, 660 TP_PROTO(__u64 rip, int asid, u64 address), 661 TP_ARGS(rip, asid, address), 662 663 TP_STRUCT__entry( 664 __field( __u64, rip ) 665 __field( int, asid ) 666 __field( __u64, address ) 667 ), 668 669 TP_fast_assign( 670 __entry->rip = rip; 671 __entry->asid = asid; 672 __entry->address = address; 673 ), 674 675 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 676 __entry->rip, __entry->asid, __entry->address) 677 ); 678 679 /* 680 * Tracepoint for nested #vmexit because of interrupt pending 681 */ 682 TRACE_EVENT(kvm_skinit, 683 TP_PROTO(__u64 rip, __u32 slb), 684 TP_ARGS(rip, slb), 685 686 TP_STRUCT__entry( 687 __field( __u64, rip ) 688 __field( __u32, slb ) 689 ), 690 691 TP_fast_assign( 692 __entry->rip = rip; 693 __entry->slb = slb; 694 ), 695 696 TP_printk("rip: 0x%016llx slb: 0x%08x", 697 __entry->rip, __entry->slb) 698 ); 699 700 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 701 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 702 #define KVM_EMUL_INSN_F_CS_D (1 << 2) 703 #define KVM_EMUL_INSN_F_CS_L (1 << 3) 704 705 #define kvm_trace_symbol_emul_flags \ 706 { 0, "real" }, \ 707 { KVM_EMUL_INSN_F_CR0_PE \ 708 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 709 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 710 { KVM_EMUL_INSN_F_CR0_PE \ 711 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 712 { KVM_EMUL_INSN_F_CR0_PE \ 713 | KVM_EMUL_INSN_F_CS_L, "prot64" } 714 715 #define kei_decode_mode(mode) ({ \ 716 u8 flags = 0xff; \ 717 switch (mode) { \ 718 case X86EMUL_MODE_REAL: \ 719 flags = 0; \ 720 break; \ 721 case X86EMUL_MODE_VM86: \ 722 flags = KVM_EMUL_INSN_F_EFL_VM; \ 723 break; \ 724 case X86EMUL_MODE_PROT16: \ 725 flags = KVM_EMUL_INSN_F_CR0_PE; \ 726 break; \ 727 case X86EMUL_MODE_PROT32: \ 728 flags = KVM_EMUL_INSN_F_CR0_PE \ 729 | KVM_EMUL_INSN_F_CS_D; \ 730 break; \ 731 case X86EMUL_MODE_PROT64: \ 732 flags = KVM_EMUL_INSN_F_CR0_PE \ 733 | KVM_EMUL_INSN_F_CS_L; \ 734 break; \ 735 } \ 736 flags; \ 737 }) 738 739 TRACE_EVENT(kvm_emulate_insn, 740 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 741 TP_ARGS(vcpu, failed), 742 743 TP_STRUCT__entry( 744 __field( __u64, rip ) 745 __field( __u32, csbase ) 746 __field( __u8, len ) 747 __array( __u8, insn, 15 ) 748 __field( __u8, flags ) 749 __field( __u8, failed ) 750 ), 751 752 TP_fast_assign( 753 __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS); 754 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 755 - vcpu->arch.emulate_ctxt->fetch.data; 756 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 757 memcpy(__entry->insn, 758 vcpu->arch.emulate_ctxt->fetch.data, 759 15); 760 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 761 __entry->failed = failed; 762 ), 763 764 TP_printk("%x:%llx:%s (%s)%s", 765 __entry->csbase, __entry->rip, 766 __print_hex(__entry->insn, __entry->len), 767 __print_symbolic(__entry->flags, 768 kvm_trace_symbol_emul_flags), 769 __entry->failed ? " failed" : "" 770 ) 771 ); 772 773 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 774 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 775 776 TRACE_EVENT( 777 vcpu_match_mmio, 778 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 779 TP_ARGS(gva, gpa, write, gpa_match), 780 781 TP_STRUCT__entry( 782 __field(gva_t, gva) 783 __field(gpa_t, gpa) 784 __field(bool, write) 785 __field(bool, gpa_match) 786 ), 787 788 TP_fast_assign( 789 __entry->gva = gva; 790 __entry->gpa = gpa; 791 __entry->write = write; 792 __entry->gpa_match = gpa_match 793 ), 794 795 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 796 __entry->write ? "Write" : "Read", 797 __entry->gpa_match ? "GPA" : "GVA") 798 ); 799 800 TRACE_EVENT(kvm_write_tsc_offset, 801 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 802 __u64 next_tsc_offset), 803 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 804 805 TP_STRUCT__entry( 806 __field( unsigned int, vcpu_id ) 807 __field( __u64, previous_tsc_offset ) 808 __field( __u64, next_tsc_offset ) 809 ), 810 811 TP_fast_assign( 812 __entry->vcpu_id = vcpu_id; 813 __entry->previous_tsc_offset = previous_tsc_offset; 814 __entry->next_tsc_offset = next_tsc_offset; 815 ), 816 817 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 818 __entry->previous_tsc_offset, __entry->next_tsc_offset) 819 ); 820 821 #ifdef CONFIG_X86_64 822 823 #define host_clocks \ 824 {VDSO_CLOCKMODE_NONE, "none"}, \ 825 {VDSO_CLOCKMODE_TSC, "tsc"} \ 826 827 TRACE_EVENT(kvm_update_master_clock, 828 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 829 TP_ARGS(use_master_clock, host_clock, offset_matched), 830 831 TP_STRUCT__entry( 832 __field( bool, use_master_clock ) 833 __field( unsigned int, host_clock ) 834 __field( bool, offset_matched ) 835 ), 836 837 TP_fast_assign( 838 __entry->use_master_clock = use_master_clock; 839 __entry->host_clock = host_clock; 840 __entry->offset_matched = offset_matched; 841 ), 842 843 TP_printk("masterclock %d hostclock %s offsetmatched %u", 844 __entry->use_master_clock, 845 __print_symbolic(__entry->host_clock, host_clocks), 846 __entry->offset_matched) 847 ); 848 849 TRACE_EVENT(kvm_track_tsc, 850 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 851 unsigned int online_vcpus, bool use_master_clock, 852 unsigned int host_clock), 853 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 854 host_clock), 855 856 TP_STRUCT__entry( 857 __field( unsigned int, vcpu_id ) 858 __field( unsigned int, nr_vcpus_matched_tsc ) 859 __field( unsigned int, online_vcpus ) 860 __field( bool, use_master_clock ) 861 __field( unsigned int, host_clock ) 862 ), 863 864 TP_fast_assign( 865 __entry->vcpu_id = vcpu_id; 866 __entry->nr_vcpus_matched_tsc = nr_matched; 867 __entry->online_vcpus = online_vcpus; 868 __entry->use_master_clock = use_master_clock; 869 __entry->host_clock = host_clock; 870 ), 871 872 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 873 " hostclock %s", 874 __entry->vcpu_id, __entry->use_master_clock, 875 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 876 __print_symbolic(__entry->host_clock, host_clocks)) 877 ); 878 879 #endif /* CONFIG_X86_64 */ 880 881 /* 882 * Tracepoint for PML full VMEXIT. 883 */ 884 TRACE_EVENT(kvm_pml_full, 885 TP_PROTO(unsigned int vcpu_id), 886 TP_ARGS(vcpu_id), 887 888 TP_STRUCT__entry( 889 __field( unsigned int, vcpu_id ) 890 ), 891 892 TP_fast_assign( 893 __entry->vcpu_id = vcpu_id; 894 ), 895 896 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 897 ); 898 899 TRACE_EVENT(kvm_ple_window_update, 900 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 901 TP_ARGS(vcpu_id, new, old), 902 903 TP_STRUCT__entry( 904 __field( unsigned int, vcpu_id ) 905 __field( unsigned int, new ) 906 __field( unsigned int, old ) 907 ), 908 909 TP_fast_assign( 910 __entry->vcpu_id = vcpu_id; 911 __entry->new = new; 912 __entry->old = old; 913 ), 914 915 TP_printk("vcpu %u old %u new %u (%s)", 916 __entry->vcpu_id, __entry->old, __entry->new, 917 __entry->old < __entry->new ? "growed" : "shrinked") 918 ); 919 920 TRACE_EVENT(kvm_pvclock_update, 921 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 922 TP_ARGS(vcpu_id, pvclock), 923 924 TP_STRUCT__entry( 925 __field( unsigned int, vcpu_id ) 926 __field( __u32, version ) 927 __field( __u64, tsc_timestamp ) 928 __field( __u64, system_time ) 929 __field( __u32, tsc_to_system_mul ) 930 __field( __s8, tsc_shift ) 931 __field( __u8, flags ) 932 ), 933 934 TP_fast_assign( 935 __entry->vcpu_id = vcpu_id; 936 __entry->version = pvclock->version; 937 __entry->tsc_timestamp = pvclock->tsc_timestamp; 938 __entry->system_time = pvclock->system_time; 939 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 940 __entry->tsc_shift = pvclock->tsc_shift; 941 __entry->flags = pvclock->flags; 942 ), 943 944 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 945 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 946 "flags 0x%x }", 947 __entry->vcpu_id, 948 __entry->version, 949 __entry->tsc_timestamp, 950 __entry->system_time, 951 __entry->tsc_to_system_mul, 952 __entry->tsc_shift, 953 __entry->flags) 954 ); 955 956 TRACE_EVENT(kvm_wait_lapic_expire, 957 TP_PROTO(unsigned int vcpu_id, s64 delta), 958 TP_ARGS(vcpu_id, delta), 959 960 TP_STRUCT__entry( 961 __field( unsigned int, vcpu_id ) 962 __field( s64, delta ) 963 ), 964 965 TP_fast_assign( 966 __entry->vcpu_id = vcpu_id; 967 __entry->delta = delta; 968 ), 969 970 TP_printk("vcpu %u: delta %lld (%s)", 971 __entry->vcpu_id, 972 __entry->delta, 973 __entry->delta < 0 ? "early" : "late") 974 ); 975 976 TRACE_EVENT(kvm_enter_smm, 977 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 978 TP_ARGS(vcpu_id, smbase, entering), 979 980 TP_STRUCT__entry( 981 __field( unsigned int, vcpu_id ) 982 __field( u64, smbase ) 983 __field( bool, entering ) 984 ), 985 986 TP_fast_assign( 987 __entry->vcpu_id = vcpu_id; 988 __entry->smbase = smbase; 989 __entry->entering = entering; 990 ), 991 992 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 993 __entry->vcpu_id, 994 __entry->entering ? "entering" : "leaving", 995 __entry->smbase) 996 ); 997 998 /* 999 * Tracepoint for VT-d posted-interrupts. 1000 */ 1001 TRACE_EVENT(kvm_pi_irte_update, 1002 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1003 unsigned int gsi, unsigned int gvec, 1004 u64 pi_desc_addr, bool set), 1005 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1006 1007 TP_STRUCT__entry( 1008 __field( unsigned int, host_irq ) 1009 __field( unsigned int, vcpu_id ) 1010 __field( unsigned int, gsi ) 1011 __field( unsigned int, gvec ) 1012 __field( u64, pi_desc_addr ) 1013 __field( bool, set ) 1014 ), 1015 1016 TP_fast_assign( 1017 __entry->host_irq = host_irq; 1018 __entry->vcpu_id = vcpu_id; 1019 __entry->gsi = gsi; 1020 __entry->gvec = gvec; 1021 __entry->pi_desc_addr = pi_desc_addr; 1022 __entry->set = set; 1023 ), 1024 1025 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1026 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1027 __entry->set ? "enabled and being updated" : "disabled", 1028 __entry->host_irq, 1029 __entry->vcpu_id, 1030 __entry->gsi, 1031 __entry->gvec, 1032 __entry->pi_desc_addr) 1033 ); 1034 1035 /* 1036 * Tracepoint for kvm_hv_notify_acked_sint. 1037 */ 1038 TRACE_EVENT(kvm_hv_notify_acked_sint, 1039 TP_PROTO(int vcpu_id, u32 sint), 1040 TP_ARGS(vcpu_id, sint), 1041 1042 TP_STRUCT__entry( 1043 __field(int, vcpu_id) 1044 __field(u32, sint) 1045 ), 1046 1047 TP_fast_assign( 1048 __entry->vcpu_id = vcpu_id; 1049 __entry->sint = sint; 1050 ), 1051 1052 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1053 ); 1054 1055 /* 1056 * Tracepoint for synic_set_irq. 1057 */ 1058 TRACE_EVENT(kvm_hv_synic_set_irq, 1059 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1060 TP_ARGS(vcpu_id, sint, vector, ret), 1061 1062 TP_STRUCT__entry( 1063 __field(int, vcpu_id) 1064 __field(u32, sint) 1065 __field(int, vector) 1066 __field(int, ret) 1067 ), 1068 1069 TP_fast_assign( 1070 __entry->vcpu_id = vcpu_id; 1071 __entry->sint = sint; 1072 __entry->vector = vector; 1073 __entry->ret = ret; 1074 ), 1075 1076 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1077 __entry->vcpu_id, __entry->sint, __entry->vector, 1078 __entry->ret) 1079 ); 1080 1081 /* 1082 * Tracepoint for kvm_hv_synic_send_eoi. 1083 */ 1084 TRACE_EVENT(kvm_hv_synic_send_eoi, 1085 TP_PROTO(int vcpu_id, int vector), 1086 TP_ARGS(vcpu_id, vector), 1087 1088 TP_STRUCT__entry( 1089 __field(int, vcpu_id) 1090 __field(u32, sint) 1091 __field(int, vector) 1092 __field(int, ret) 1093 ), 1094 1095 TP_fast_assign( 1096 __entry->vcpu_id = vcpu_id; 1097 __entry->vector = vector; 1098 ), 1099 1100 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1101 ); 1102 1103 /* 1104 * Tracepoint for synic_set_msr. 1105 */ 1106 TRACE_EVENT(kvm_hv_synic_set_msr, 1107 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1108 TP_ARGS(vcpu_id, msr, data, host), 1109 1110 TP_STRUCT__entry( 1111 __field(int, vcpu_id) 1112 __field(u32, msr) 1113 __field(u64, data) 1114 __field(bool, host) 1115 ), 1116 1117 TP_fast_assign( 1118 __entry->vcpu_id = vcpu_id; 1119 __entry->msr = msr; 1120 __entry->data = data; 1121 __entry->host = host 1122 ), 1123 1124 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1125 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1126 ); 1127 1128 /* 1129 * Tracepoint for stimer_set_config. 1130 */ 1131 TRACE_EVENT(kvm_hv_stimer_set_config, 1132 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1133 TP_ARGS(vcpu_id, timer_index, config, host), 1134 1135 TP_STRUCT__entry( 1136 __field(int, vcpu_id) 1137 __field(int, timer_index) 1138 __field(u64, config) 1139 __field(bool, host) 1140 ), 1141 1142 TP_fast_assign( 1143 __entry->vcpu_id = vcpu_id; 1144 __entry->timer_index = timer_index; 1145 __entry->config = config; 1146 __entry->host = host; 1147 ), 1148 1149 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1150 __entry->vcpu_id, __entry->timer_index, __entry->config, 1151 __entry->host) 1152 ); 1153 1154 /* 1155 * Tracepoint for stimer_set_count. 1156 */ 1157 TRACE_EVENT(kvm_hv_stimer_set_count, 1158 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1159 TP_ARGS(vcpu_id, timer_index, count, host), 1160 1161 TP_STRUCT__entry( 1162 __field(int, vcpu_id) 1163 __field(int, timer_index) 1164 __field(u64, count) 1165 __field(bool, host) 1166 ), 1167 1168 TP_fast_assign( 1169 __entry->vcpu_id = vcpu_id; 1170 __entry->timer_index = timer_index; 1171 __entry->count = count; 1172 __entry->host = host; 1173 ), 1174 1175 TP_printk("vcpu_id %d timer %d count %llu host %d", 1176 __entry->vcpu_id, __entry->timer_index, __entry->count, 1177 __entry->host) 1178 ); 1179 1180 /* 1181 * Tracepoint for stimer_start(periodic timer case). 1182 */ 1183 TRACE_EVENT(kvm_hv_stimer_start_periodic, 1184 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1185 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1186 1187 TP_STRUCT__entry( 1188 __field(int, vcpu_id) 1189 __field(int, timer_index) 1190 __field(u64, time_now) 1191 __field(u64, exp_time) 1192 ), 1193 1194 TP_fast_assign( 1195 __entry->vcpu_id = vcpu_id; 1196 __entry->timer_index = timer_index; 1197 __entry->time_now = time_now; 1198 __entry->exp_time = exp_time; 1199 ), 1200 1201 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1202 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1203 __entry->exp_time) 1204 ); 1205 1206 /* 1207 * Tracepoint for stimer_start(one-shot timer case). 1208 */ 1209 TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1210 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1211 TP_ARGS(vcpu_id, timer_index, time_now, count), 1212 1213 TP_STRUCT__entry( 1214 __field(int, vcpu_id) 1215 __field(int, timer_index) 1216 __field(u64, time_now) 1217 __field(u64, count) 1218 ), 1219 1220 TP_fast_assign( 1221 __entry->vcpu_id = vcpu_id; 1222 __entry->timer_index = timer_index; 1223 __entry->time_now = time_now; 1224 __entry->count = count; 1225 ), 1226 1227 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1228 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1229 __entry->count) 1230 ); 1231 1232 /* 1233 * Tracepoint for stimer_timer_callback. 1234 */ 1235 TRACE_EVENT(kvm_hv_stimer_callback, 1236 TP_PROTO(int vcpu_id, int timer_index), 1237 TP_ARGS(vcpu_id, timer_index), 1238 1239 TP_STRUCT__entry( 1240 __field(int, vcpu_id) 1241 __field(int, timer_index) 1242 ), 1243 1244 TP_fast_assign( 1245 __entry->vcpu_id = vcpu_id; 1246 __entry->timer_index = timer_index; 1247 ), 1248 1249 TP_printk("vcpu_id %d timer %d", 1250 __entry->vcpu_id, __entry->timer_index) 1251 ); 1252 1253 /* 1254 * Tracepoint for stimer_expiration. 1255 */ 1256 TRACE_EVENT(kvm_hv_stimer_expiration, 1257 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1258 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1259 1260 TP_STRUCT__entry( 1261 __field(int, vcpu_id) 1262 __field(int, timer_index) 1263 __field(int, direct) 1264 __field(int, msg_send_result) 1265 ), 1266 1267 TP_fast_assign( 1268 __entry->vcpu_id = vcpu_id; 1269 __entry->timer_index = timer_index; 1270 __entry->direct = direct; 1271 __entry->msg_send_result = msg_send_result; 1272 ), 1273 1274 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1275 __entry->vcpu_id, __entry->timer_index, 1276 __entry->direct, __entry->msg_send_result) 1277 ); 1278 1279 /* 1280 * Tracepoint for stimer_cleanup. 1281 */ 1282 TRACE_EVENT(kvm_hv_stimer_cleanup, 1283 TP_PROTO(int vcpu_id, int timer_index), 1284 TP_ARGS(vcpu_id, timer_index), 1285 1286 TP_STRUCT__entry( 1287 __field(int, vcpu_id) 1288 __field(int, timer_index) 1289 ), 1290 1291 TP_fast_assign( 1292 __entry->vcpu_id = vcpu_id; 1293 __entry->timer_index = timer_index; 1294 ), 1295 1296 TP_printk("vcpu_id %d timer %d", 1297 __entry->vcpu_id, __entry->timer_index) 1298 ); 1299 1300 TRACE_EVENT(kvm_apicv_update_request, 1301 TP_PROTO(bool activate, unsigned long bit), 1302 TP_ARGS(activate, bit), 1303 1304 TP_STRUCT__entry( 1305 __field(bool, activate) 1306 __field(unsigned long, bit) 1307 ), 1308 1309 TP_fast_assign( 1310 __entry->activate = activate; 1311 __entry->bit = bit; 1312 ), 1313 1314 TP_printk("%s bit=%lu", 1315 __entry->activate ? "activate" : "deactivate", 1316 __entry->bit) 1317 ); 1318 1319 /* 1320 * Tracepoint for AMD AVIC 1321 */ 1322 TRACE_EVENT(kvm_avic_incomplete_ipi, 1323 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1324 TP_ARGS(vcpu, icrh, icrl, id, index), 1325 1326 TP_STRUCT__entry( 1327 __field(u32, vcpu) 1328 __field(u32, icrh) 1329 __field(u32, icrl) 1330 __field(u32, id) 1331 __field(u32, index) 1332 ), 1333 1334 TP_fast_assign( 1335 __entry->vcpu = vcpu; 1336 __entry->icrh = icrh; 1337 __entry->icrl = icrl; 1338 __entry->id = id; 1339 __entry->index = index; 1340 ), 1341 1342 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1343 __entry->vcpu, __entry->icrh, __entry->icrl, 1344 __entry->id, __entry->index) 1345 ); 1346 1347 TRACE_EVENT(kvm_avic_unaccelerated_access, 1348 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1349 TP_ARGS(vcpu, offset, ft, rw, vec), 1350 1351 TP_STRUCT__entry( 1352 __field(u32, vcpu) 1353 __field(u32, offset) 1354 __field(bool, ft) 1355 __field(bool, rw) 1356 __field(u32, vec) 1357 ), 1358 1359 TP_fast_assign( 1360 __entry->vcpu = vcpu; 1361 __entry->offset = offset; 1362 __entry->ft = ft; 1363 __entry->rw = rw; 1364 __entry->vec = vec; 1365 ), 1366 1367 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1368 __entry->vcpu, 1369 __entry->offset, 1370 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1371 __entry->ft ? "trap" : "fault", 1372 __entry->rw ? "write" : "read", 1373 __entry->vec) 1374 ); 1375 1376 TRACE_EVENT(kvm_avic_ga_log, 1377 TP_PROTO(u32 vmid, u32 vcpuid), 1378 TP_ARGS(vmid, vcpuid), 1379 1380 TP_STRUCT__entry( 1381 __field(u32, vmid) 1382 __field(u32, vcpuid) 1383 ), 1384 1385 TP_fast_assign( 1386 __entry->vmid = vmid; 1387 __entry->vcpuid = vcpuid; 1388 ), 1389 1390 TP_printk("vmid=%u, vcpuid=%u", 1391 __entry->vmid, __entry->vcpuid) 1392 ); 1393 1394 TRACE_EVENT(kvm_hv_timer_state, 1395 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1396 TP_ARGS(vcpu_id, hv_timer_in_use), 1397 TP_STRUCT__entry( 1398 __field(unsigned int, vcpu_id) 1399 __field(unsigned int, hv_timer_in_use) 1400 ), 1401 TP_fast_assign( 1402 __entry->vcpu_id = vcpu_id; 1403 __entry->hv_timer_in_use = hv_timer_in_use; 1404 ), 1405 TP_printk("vcpu_id %x hv_timer %x", 1406 __entry->vcpu_id, 1407 __entry->hv_timer_in_use) 1408 ); 1409 1410 /* 1411 * Tracepoint for kvm_hv_flush_tlb. 1412 */ 1413 TRACE_EVENT(kvm_hv_flush_tlb, 1414 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1415 TP_ARGS(processor_mask, address_space, flags), 1416 1417 TP_STRUCT__entry( 1418 __field(u64, processor_mask) 1419 __field(u64, address_space) 1420 __field(u64, flags) 1421 ), 1422 1423 TP_fast_assign( 1424 __entry->processor_mask = processor_mask; 1425 __entry->address_space = address_space; 1426 __entry->flags = flags; 1427 ), 1428 1429 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1430 __entry->processor_mask, __entry->address_space, 1431 __entry->flags) 1432 ); 1433 1434 /* 1435 * Tracepoint for kvm_hv_flush_tlb_ex. 1436 */ 1437 TRACE_EVENT(kvm_hv_flush_tlb_ex, 1438 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1439 TP_ARGS(valid_bank_mask, format, address_space, flags), 1440 1441 TP_STRUCT__entry( 1442 __field(u64, valid_bank_mask) 1443 __field(u64, format) 1444 __field(u64, address_space) 1445 __field(u64, flags) 1446 ), 1447 1448 TP_fast_assign( 1449 __entry->valid_bank_mask = valid_bank_mask; 1450 __entry->format = format; 1451 __entry->address_space = address_space; 1452 __entry->flags = flags; 1453 ), 1454 1455 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1456 "address_space 0x%llx flags 0x%llx", 1457 __entry->valid_bank_mask, __entry->format, 1458 __entry->address_space, __entry->flags) 1459 ); 1460 1461 /* 1462 * Tracepoints for kvm_hv_send_ipi. 1463 */ 1464 TRACE_EVENT(kvm_hv_send_ipi, 1465 TP_PROTO(u32 vector, u64 processor_mask), 1466 TP_ARGS(vector, processor_mask), 1467 1468 TP_STRUCT__entry( 1469 __field(u32, vector) 1470 __field(u64, processor_mask) 1471 ), 1472 1473 TP_fast_assign( 1474 __entry->vector = vector; 1475 __entry->processor_mask = processor_mask; 1476 ), 1477 1478 TP_printk("vector %x processor_mask 0x%llx", 1479 __entry->vector, __entry->processor_mask) 1480 ); 1481 1482 TRACE_EVENT(kvm_hv_send_ipi_ex, 1483 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1484 TP_ARGS(vector, format, valid_bank_mask), 1485 1486 TP_STRUCT__entry( 1487 __field(u32, vector) 1488 __field(u64, format) 1489 __field(u64, valid_bank_mask) 1490 ), 1491 1492 TP_fast_assign( 1493 __entry->vector = vector; 1494 __entry->format = format; 1495 __entry->valid_bank_mask = valid_bank_mask; 1496 ), 1497 1498 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1499 __entry->vector, __entry->format, 1500 __entry->valid_bank_mask) 1501 ); 1502 1503 TRACE_EVENT(kvm_pv_tlb_flush, 1504 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1505 TP_ARGS(vcpu_id, need_flush_tlb), 1506 1507 TP_STRUCT__entry( 1508 __field( unsigned int, vcpu_id ) 1509 __field( bool, need_flush_tlb ) 1510 ), 1511 1512 TP_fast_assign( 1513 __entry->vcpu_id = vcpu_id; 1514 __entry->need_flush_tlb = need_flush_tlb; 1515 ), 1516 1517 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1518 __entry->need_flush_tlb ? "true" : "false") 1519 ); 1520 1521 /* 1522 * Tracepoint for failed nested VMX VM-Enter. 1523 */ 1524 TRACE_EVENT(kvm_nested_vmenter_failed, 1525 TP_PROTO(const char *msg, u32 err), 1526 TP_ARGS(msg, err), 1527 1528 TP_STRUCT__entry( 1529 __field(const char *, msg) 1530 __field(u32, err) 1531 ), 1532 1533 TP_fast_assign( 1534 __entry->msg = msg; 1535 __entry->err = err; 1536 ), 1537 1538 TP_printk("%s%s", __entry->msg, !__entry->err ? "" : 1539 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1540 ); 1541 1542 #endif /* _TRACE_KVM_H */ 1543 1544 #undef TRACE_INCLUDE_PATH 1545 #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1546 #undef TRACE_INCLUDE_FILE 1547 #define TRACE_INCLUDE_FILE trace 1548 1549 /* This part must be outside protection */ 1550 #include <trace/define_trace.h> 1551