1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support 4 * 5 * Copyright 2015 Red Hat, Inc. and/or its affiliates. 6 * 7 * Authors: 8 * Avi Kivity <avi@redhat.com> 9 * Gleb Natapov <gleb@redhat.com> 10 * Wei Huang <wei@redhat.com> 11 */ 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/types.h> 15 #include <linux/kvm_host.h> 16 #include <linux/perf_event.h> 17 #include <linux/bsearch.h> 18 #include <linux/sort.h> 19 #include <asm/perf_event.h> 20 #include <asm/cpu_device_id.h> 21 #include "x86.h" 22 #include "cpuid.h" 23 #include "lapic.h" 24 #include "pmu.h" 25 26 /* This is enough to filter the vast majority of currently defined events. */ 27 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 28 29 struct x86_pmu_capability __read_mostly kvm_pmu_cap; 30 EXPORT_SYMBOL_GPL(kvm_pmu_cap); 31 32 /* Precise Distribution of Instructions Retired (PDIR) */ 33 static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = { 34 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), 35 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL), 36 /* Instruction-Accurate PDIR (PDIR++) */ 37 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), 38 {} 39 }; 40 41 /* Precise Distribution (PDist) */ 42 static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = { 43 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), 44 {} 45 }; 46 47 /* NOTE: 48 * - Each perf counter is defined as "struct kvm_pmc"; 49 * - There are two types of perf counters: general purpose (gp) and fixed. 50 * gp counters are stored in gp_counters[] and fixed counters are stored 51 * in fixed_counters[] respectively. Both of them are part of "struct 52 * kvm_pmu"; 53 * - pmu.c understands the difference between gp counters and fixed counters. 54 * However AMD doesn't support fixed-counters; 55 * - There are three types of index to access perf counters (PMC): 56 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD 57 * has MSR_K7_PERFCTRn and, for families 15H and later, 58 * MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are 59 * aliased to MSR_K7_PERFCTRn. 60 * 2. MSR Index (named idx): This normally is used by RDPMC instruction. 61 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access 62 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except 63 * that it also supports fixed counters. idx can be used to as index to 64 * gp and fixed counters. 65 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU 66 * code. Each pmc, stored in kvm_pmc.idx field, is unique across 67 * all perf counters (both gp and fixed). The mapping relationship 68 * between pmc and perf counters is as the following: 69 * * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters 70 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed 71 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H 72 * and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters 73 */ 74 75 static struct kvm_pmu_ops kvm_pmu_ops __read_mostly; 76 77 #define KVM_X86_PMU_OP(func) \ 78 DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \ 79 *(((struct kvm_pmu_ops *)0)->func)); 80 #define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP 81 #include <asm/kvm-x86-pmu-ops.h> 82 83 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops) 84 { 85 memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops)); 86 87 #define __KVM_X86_PMU_OP(func) \ 88 static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func); 89 #define KVM_X86_PMU_OP(func) \ 90 WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func) 91 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP 92 #include <asm/kvm-x86-pmu-ops.h> 93 #undef __KVM_X86_PMU_OP 94 } 95 96 static void kvm_pmi_trigger_fn(struct irq_work *irq_work) 97 { 98 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); 99 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); 100 101 kvm_pmu_deliver_pmi(vcpu); 102 } 103 104 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) 105 { 106 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 107 bool skip_pmi = false; 108 109 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { 110 if (!in_pmi) { 111 /* 112 * TODO: KVM is currently _choosing_ to not generate records 113 * for emulated instructions, avoiding BUFFER_OVF PMI when 114 * there are no records. Strictly speaking, it should be done 115 * as well in the right context to improve sampling accuracy. 116 */ 117 skip_pmi = true; 118 } else { 119 /* Indicate PEBS overflow PMI to guest. */ 120 skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, 121 (unsigned long *)&pmu->global_status); 122 } 123 } else { 124 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); 125 } 126 127 if (!pmc->intr || skip_pmi) 128 return; 129 130 /* 131 * Inject PMI. If vcpu was in a guest mode during NMI PMI 132 * can be ejected on a guest mode re-entry. Otherwise we can't 133 * be sure that vcpu wasn't executing hlt instruction at the 134 * time of vmexit and is not going to re-enter guest mode until 135 * woken up. So we should wake it, but this is impossible from 136 * NMI context. Do it from irq work instead. 137 */ 138 if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu)) 139 irq_work_queue(&pmc_to_pmu(pmc)->irq_work); 140 else 141 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); 142 } 143 144 static void kvm_perf_overflow(struct perf_event *perf_event, 145 struct perf_sample_data *data, 146 struct pt_regs *regs) 147 { 148 struct kvm_pmc *pmc = perf_event->overflow_handler_context; 149 150 /* 151 * Ignore overflow events for counters that are scheduled to be 152 * reprogrammed, e.g. if a PMI for the previous event races with KVM's 153 * handling of a related guest WRMSR. 154 */ 155 if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi)) 156 return; 157 158 __kvm_perf_overflow(pmc, true); 159 160 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); 161 } 162 163 static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc) 164 { 165 /* 166 * For some model specific pebs counters with special capabilities 167 * (PDIR, PDIR++, PDIST), KVM needs to raise the event precise 168 * level to the maximum value (currently 3, backwards compatible) 169 * so that the perf subsystem would assign specific hardware counter 170 * with that capability for vPMC. 171 */ 172 if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) || 173 (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu))) 174 return 3; 175 176 /* 177 * The non-zero precision level of guest event makes the ordinary 178 * guest event becomes a guest PEBS event and triggers the host 179 * PEBS PMI handler to determine whether the PEBS overflow PMI 180 * comes from the host counters or the guest. 181 */ 182 return 1; 183 } 184 185 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config, 186 bool exclude_user, bool exclude_kernel, 187 bool intr) 188 { 189 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 190 struct perf_event *event; 191 struct perf_event_attr attr = { 192 .type = type, 193 .size = sizeof(attr), 194 .pinned = true, 195 .exclude_idle = true, 196 .exclude_host = 1, 197 .exclude_user = exclude_user, 198 .exclude_kernel = exclude_kernel, 199 .config = config, 200 }; 201 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); 202 203 attr.sample_period = get_sample_period(pmc, pmc->counter); 204 205 if ((attr.config & HSW_IN_TX_CHECKPOINTED) && 206 guest_cpuid_is_intel(pmc->vcpu)) { 207 /* 208 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero 209 * period. Just clear the sample period so at least 210 * allocating the counter doesn't fail. 211 */ 212 attr.sample_period = 0; 213 } 214 if (pebs) { 215 /* 216 * For most PEBS hardware events, the difference in the software 217 * precision levels of guest and host PEBS events will not affect 218 * the accuracy of the PEBS profiling result, because the "event IP" 219 * in the PEBS record is calibrated on the guest side. 220 */ 221 attr.precise_ip = pmc_get_pebs_precise_level(pmc); 222 } 223 224 event = perf_event_create_kernel_counter(&attr, -1, current, 225 kvm_perf_overflow, pmc); 226 if (IS_ERR(event)) { 227 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", 228 PTR_ERR(event), pmc->idx); 229 return PTR_ERR(event); 230 } 231 232 pmc->perf_event = event; 233 pmc_to_pmu(pmc)->event_count++; 234 pmc->is_paused = false; 235 pmc->intr = intr || pebs; 236 return 0; 237 } 238 239 static void pmc_pause_counter(struct kvm_pmc *pmc) 240 { 241 u64 counter = pmc->counter; 242 243 if (!pmc->perf_event || pmc->is_paused) 244 return; 245 246 /* update counter, reset event value to avoid redundant accumulation */ 247 counter += perf_event_pause(pmc->perf_event, true); 248 pmc->counter = counter & pmc_bitmask(pmc); 249 pmc->is_paused = true; 250 } 251 252 static bool pmc_resume_counter(struct kvm_pmc *pmc) 253 { 254 if (!pmc->perf_event) 255 return false; 256 257 /* recalibrate sample period and check if it's accepted by perf core */ 258 if (is_sampling_event(pmc->perf_event) && 259 perf_event_period(pmc->perf_event, 260 get_sample_period(pmc, pmc->counter))) 261 return false; 262 263 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) != 264 (!!pmc->perf_event->attr.precise_ip)) 265 return false; 266 267 /* reuse perf_event to serve as pmc_reprogram_counter() does*/ 268 perf_event_enable(pmc->perf_event); 269 pmc->is_paused = false; 270 271 return true; 272 } 273 274 static int filter_cmp(const void *pa, const void *pb, u64 mask) 275 { 276 u64 a = *(u64 *)pa & mask; 277 u64 b = *(u64 *)pb & mask; 278 279 return (a > b) - (a < b); 280 } 281 282 283 static int filter_sort_cmp(const void *pa, const void *pb) 284 { 285 return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT | 286 KVM_PMU_MASKED_ENTRY_EXCLUDE)); 287 } 288 289 /* 290 * For the event filter, searching is done on the 'includes' list and 291 * 'excludes' list separately rather than on the 'events' list (which 292 * has both). As a result the exclude bit can be ignored. 293 */ 294 static int filter_event_cmp(const void *pa, const void *pb) 295 { 296 return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT)); 297 } 298 299 static int find_filter_index(u64 *events, u64 nevents, u64 key) 300 { 301 u64 *fe = bsearch(&key, events, nevents, sizeof(events[0]), 302 filter_event_cmp); 303 304 if (!fe) 305 return -1; 306 307 return fe - events; 308 } 309 310 static bool is_filter_entry_match(u64 filter_event, u64 umask) 311 { 312 u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8); 313 u64 match = filter_event & KVM_PMU_MASKED_ENTRY_UMASK_MATCH; 314 315 BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0, false) >> 316 (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) != 317 ARCH_PERFMON_EVENTSEL_UMASK); 318 319 return (umask & mask) == match; 320 } 321 322 static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel) 323 { 324 u64 event_select = eventsel & kvm_pmu_ops.EVENTSEL_EVENT; 325 u64 umask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK; 326 int i, index; 327 328 index = find_filter_index(events, nevents, event_select); 329 if (index < 0) 330 return false; 331 332 /* 333 * Entries are sorted by the event select. Walk the list in both 334 * directions to process all entries with the targeted event select. 335 */ 336 for (i = index; i < nevents; i++) { 337 if (filter_event_cmp(&events[i], &event_select)) 338 break; 339 340 if (is_filter_entry_match(events[i], umask)) 341 return true; 342 } 343 344 for (i = index - 1; i >= 0; i--) { 345 if (filter_event_cmp(&events[i], &event_select)) 346 break; 347 348 if (is_filter_entry_match(events[i], umask)) 349 return true; 350 } 351 352 return false; 353 } 354 355 static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f, 356 u64 eventsel) 357 { 358 if (filter_contains_match(f->includes, f->nr_includes, eventsel) && 359 !filter_contains_match(f->excludes, f->nr_excludes, eventsel)) 360 return f->action == KVM_PMU_EVENT_ALLOW; 361 362 return f->action == KVM_PMU_EVENT_DENY; 363 } 364 365 static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter, 366 int idx) 367 { 368 int fixed_idx = idx - INTEL_PMC_IDX_FIXED; 369 370 if (filter->action == KVM_PMU_EVENT_DENY && 371 test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap)) 372 return false; 373 if (filter->action == KVM_PMU_EVENT_ALLOW && 374 !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap)) 375 return false; 376 377 return true; 378 } 379 380 static bool check_pmu_event_filter(struct kvm_pmc *pmc) 381 { 382 struct kvm_x86_pmu_event_filter *filter; 383 struct kvm *kvm = pmc->vcpu->kvm; 384 385 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); 386 if (!filter) 387 return true; 388 389 if (pmc_is_gp(pmc)) 390 return is_gp_event_allowed(filter, pmc->eventsel); 391 392 return is_fixed_event_allowed(filter, pmc->idx); 393 } 394 395 static bool pmc_event_is_allowed(struct kvm_pmc *pmc) 396 { 397 return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) && 398 static_call(kvm_x86_pmu_hw_event_available)(pmc) && 399 check_pmu_event_filter(pmc); 400 } 401 402 static void reprogram_counter(struct kvm_pmc *pmc) 403 { 404 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 405 u64 eventsel = pmc->eventsel; 406 u64 new_config = eventsel; 407 u8 fixed_ctr_ctrl; 408 409 pmc_pause_counter(pmc); 410 411 if (!pmc_event_is_allowed(pmc)) 412 goto reprogram_complete; 413 414 if (pmc->counter < pmc->prev_counter) 415 __kvm_perf_overflow(pmc, false); 416 417 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) 418 printk_once("kvm pmu: pin control bit is ignored\n"); 419 420 if (pmc_is_fixed(pmc)) { 421 fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, 422 pmc->idx - INTEL_PMC_IDX_FIXED); 423 if (fixed_ctr_ctrl & 0x1) 424 eventsel |= ARCH_PERFMON_EVENTSEL_OS; 425 if (fixed_ctr_ctrl & 0x2) 426 eventsel |= ARCH_PERFMON_EVENTSEL_USR; 427 if (fixed_ctr_ctrl & 0x8) 428 eventsel |= ARCH_PERFMON_EVENTSEL_INT; 429 new_config = (u64)fixed_ctr_ctrl; 430 } 431 432 if (pmc->current_config == new_config && pmc_resume_counter(pmc)) 433 goto reprogram_complete; 434 435 pmc_release_perf_event(pmc); 436 437 pmc->current_config = new_config; 438 439 /* 440 * If reprogramming fails, e.g. due to contention, leave the counter's 441 * regprogram bit set, i.e. opportunistically try again on the next PMU 442 * refresh. Don't make a new request as doing so can stall the guest 443 * if reprogramming repeatedly fails. 444 */ 445 if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW, 446 (eventsel & pmu->raw_event_mask), 447 !(eventsel & ARCH_PERFMON_EVENTSEL_USR), 448 !(eventsel & ARCH_PERFMON_EVENTSEL_OS), 449 eventsel & ARCH_PERFMON_EVENTSEL_INT)) 450 return; 451 452 reprogram_complete: 453 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); 454 pmc->prev_counter = 0; 455 } 456 457 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) 458 { 459 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 460 int bit; 461 462 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) { 463 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit); 464 465 if (unlikely(!pmc)) { 466 clear_bit(bit, pmu->reprogram_pmi); 467 continue; 468 } 469 470 reprogram_counter(pmc); 471 } 472 473 /* 474 * Unused perf_events are only released if the corresponding MSRs 475 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in 476 * triggers KVM_REQ_PMU if cleanup is needed. 477 */ 478 if (unlikely(pmu->need_cleanup)) 479 kvm_pmu_cleanup(vcpu); 480 } 481 482 /* check if idx is a valid index to access PMU */ 483 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) 484 { 485 return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx); 486 } 487 488 bool is_vmware_backdoor_pmc(u32 pmc_idx) 489 { 490 switch (pmc_idx) { 491 case VMWARE_BACKDOOR_PMC_HOST_TSC: 492 case VMWARE_BACKDOOR_PMC_REAL_TIME: 493 case VMWARE_BACKDOOR_PMC_APPARENT_TIME: 494 return true; 495 } 496 return false; 497 } 498 499 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) 500 { 501 u64 ctr_val; 502 503 switch (idx) { 504 case VMWARE_BACKDOOR_PMC_HOST_TSC: 505 ctr_val = rdtsc(); 506 break; 507 case VMWARE_BACKDOOR_PMC_REAL_TIME: 508 ctr_val = ktime_get_boottime_ns(); 509 break; 510 case VMWARE_BACKDOOR_PMC_APPARENT_TIME: 511 ctr_val = ktime_get_boottime_ns() + 512 vcpu->kvm->arch.kvmclock_offset; 513 break; 514 default: 515 return 1; 516 } 517 518 *data = ctr_val; 519 return 0; 520 } 521 522 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) 523 { 524 bool fast_mode = idx & (1u << 31); 525 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 526 struct kvm_pmc *pmc; 527 u64 mask = fast_mode ? ~0u : ~0ull; 528 529 if (!pmu->version) 530 return 1; 531 532 if (is_vmware_backdoor_pmc(idx)) 533 return kvm_pmu_rdpmc_vmware(vcpu, idx, data); 534 535 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask); 536 if (!pmc) 537 return 1; 538 539 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) && 540 (static_call(kvm_x86_get_cpl)(vcpu) != 0) && 541 kvm_is_cr0_bit_set(vcpu, X86_CR0_PE)) 542 return 1; 543 544 *data = pmc_read_counter(pmc) & mask; 545 return 0; 546 } 547 548 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) 549 { 550 if (lapic_in_kernel(vcpu)) { 551 static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu); 552 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); 553 } 554 } 555 556 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) 557 { 558 switch (msr) { 559 case MSR_CORE_PERF_GLOBAL_STATUS: 560 case MSR_CORE_PERF_GLOBAL_CTRL: 561 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 562 return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)); 563 default: 564 break; 565 } 566 return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) || 567 static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr); 568 } 569 570 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr) 571 { 572 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 573 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr); 574 575 if (pmc) 576 __set_bit(pmc->idx, pmu->pmc_in_use); 577 } 578 579 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 580 { 581 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 582 u32 msr = msr_info->index; 583 584 switch (msr) { 585 case MSR_CORE_PERF_GLOBAL_STATUS: 586 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: 587 msr_info->data = pmu->global_status; 588 break; 589 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: 590 case MSR_CORE_PERF_GLOBAL_CTRL: 591 msr_info->data = pmu->global_ctrl; 592 break; 593 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: 594 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 595 msr_info->data = 0; 596 break; 597 default: 598 return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info); 599 } 600 601 return 0; 602 } 603 604 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 605 { 606 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 607 u32 msr = msr_info->index; 608 u64 data = msr_info->data; 609 u64 diff; 610 611 /* 612 * Note, AMD ignores writes to reserved bits and read-only PMU MSRs, 613 * whereas Intel generates #GP on attempts to write reserved/RO MSRs. 614 */ 615 switch (msr) { 616 case MSR_CORE_PERF_GLOBAL_STATUS: 617 if (!msr_info->host_initiated) 618 return 1; /* RO MSR */ 619 fallthrough; 620 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: 621 /* Per PPR, Read-only MSR. Writes are ignored. */ 622 if (!msr_info->host_initiated) 623 break; 624 625 if (data & pmu->global_status_mask) 626 return 1; 627 628 pmu->global_status = data; 629 break; 630 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: 631 data &= ~pmu->global_ctrl_mask; 632 fallthrough; 633 case MSR_CORE_PERF_GLOBAL_CTRL: 634 if (!kvm_valid_perf_global_ctrl(pmu, data)) 635 return 1; 636 637 if (pmu->global_ctrl != data) { 638 diff = pmu->global_ctrl ^ data; 639 pmu->global_ctrl = data; 640 reprogram_counters(pmu, diff); 641 } 642 break; 643 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 644 /* 645 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in 646 * GLOBAL_STATUS, and so the set of reserved bits is the same. 647 */ 648 if (data & pmu->global_status_mask) 649 return 1; 650 fallthrough; 651 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: 652 if (!msr_info->host_initiated) 653 pmu->global_status &= ~data; 654 break; 655 default: 656 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); 657 return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info); 658 } 659 660 return 0; 661 } 662 663 /* refresh PMU settings. This function generally is called when underlying 664 * settings are changed (such as changes of PMU CPUID by guest VMs), which 665 * should rarely happen. 666 */ 667 void kvm_pmu_refresh(struct kvm_vcpu *vcpu) 668 { 669 if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm)) 670 return; 671 672 bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX); 673 static_call(kvm_x86_pmu_refresh)(vcpu); 674 } 675 676 void kvm_pmu_reset(struct kvm_vcpu *vcpu) 677 { 678 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 679 680 irq_work_sync(&pmu->irq_work); 681 static_call(kvm_x86_pmu_reset)(vcpu); 682 } 683 684 void kvm_pmu_init(struct kvm_vcpu *vcpu) 685 { 686 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 687 688 memset(pmu, 0, sizeof(*pmu)); 689 static_call(kvm_x86_pmu_init)(vcpu); 690 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); 691 pmu->event_count = 0; 692 pmu->need_cleanup = false; 693 kvm_pmu_refresh(vcpu); 694 } 695 696 /* Release perf_events for vPMCs that have been unused for a full time slice. */ 697 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) 698 { 699 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 700 struct kvm_pmc *pmc = NULL; 701 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX); 702 int i; 703 704 pmu->need_cleanup = false; 705 706 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx, 707 pmu->pmc_in_use, X86_PMC_IDX_MAX); 708 709 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) { 710 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); 711 712 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) 713 pmc_stop_counter(pmc); 714 } 715 716 static_call_cond(kvm_x86_pmu_cleanup)(vcpu); 717 718 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); 719 } 720 721 void kvm_pmu_destroy(struct kvm_vcpu *vcpu) 722 { 723 kvm_pmu_reset(vcpu); 724 } 725 726 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) 727 { 728 pmc->prev_counter = pmc->counter; 729 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc); 730 kvm_pmu_request_counter_reprogram(pmc); 731 } 732 733 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, 734 unsigned int perf_hw_id) 735 { 736 return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) & 737 AMD64_RAW_EVENT_MASK_NB); 738 } 739 740 static inline bool cpl_is_matched(struct kvm_pmc *pmc) 741 { 742 bool select_os, select_user; 743 u64 config; 744 745 if (pmc_is_gp(pmc)) { 746 config = pmc->eventsel; 747 select_os = config & ARCH_PERFMON_EVENTSEL_OS; 748 select_user = config & ARCH_PERFMON_EVENTSEL_USR; 749 } else { 750 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, 751 pmc->idx - INTEL_PMC_IDX_FIXED); 752 select_os = config & 0x1; 753 select_user = config & 0x2; 754 } 755 756 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user; 757 } 758 759 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id) 760 { 761 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 762 struct kvm_pmc *pmc; 763 int i; 764 765 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { 766 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); 767 768 if (!pmc || !pmc_event_is_allowed(pmc)) 769 continue; 770 771 /* Ignore checks for edge detect, pin control, invert and CMASK bits */ 772 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc)) 773 kvm_pmu_incr_counter(pmc); 774 } 775 } 776 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event); 777 778 static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter) 779 { 780 u64 mask = kvm_pmu_ops.EVENTSEL_EVENT | 781 KVM_PMU_MASKED_ENTRY_UMASK_MASK | 782 KVM_PMU_MASKED_ENTRY_UMASK_MATCH | 783 KVM_PMU_MASKED_ENTRY_EXCLUDE; 784 int i; 785 786 for (i = 0; i < filter->nevents; i++) { 787 if (filter->events[i] & ~mask) 788 return false; 789 } 790 791 return true; 792 } 793 794 static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter) 795 { 796 int i, j; 797 798 for (i = 0, j = 0; i < filter->nevents; i++) { 799 /* 800 * Skip events that are impossible to match against a guest 801 * event. When filtering, only the event select + unit mask 802 * of the guest event is used. To maintain backwards 803 * compatibility, impossible filters can't be rejected :-( 804 */ 805 if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT | 806 ARCH_PERFMON_EVENTSEL_UMASK)) 807 continue; 808 /* 809 * Convert userspace events to a common in-kernel event so 810 * only one code path is needed to support both events. For 811 * the in-kernel events use masked events because they are 812 * flexible enough to handle both cases. To convert to masked 813 * events all that's needed is to add an "all ones" umask_mask, 814 * (unmasked filter events don't support EXCLUDE). 815 */ 816 filter->events[j++] = filter->events[i] | 817 (0xFFULL << KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT); 818 } 819 820 filter->nevents = j; 821 } 822 823 static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter) 824 { 825 int i; 826 827 if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS)) 828 convert_to_masked_filter(filter); 829 else if (!is_masked_filter_valid(filter)) 830 return -EINVAL; 831 832 /* 833 * Sort entries by event select and includes vs. excludes so that all 834 * entries for a given event select can be processed efficiently during 835 * filtering. The EXCLUDE flag uses a more significant bit than the 836 * event select, and so the sorted list is also effectively split into 837 * includes and excludes sub-lists. 838 */ 839 sort(&filter->events, filter->nevents, sizeof(filter->events[0]), 840 filter_sort_cmp, NULL); 841 842 i = filter->nevents; 843 /* Find the first EXCLUDE event (only supported for masked events). */ 844 if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) { 845 for (i = 0; i < filter->nevents; i++) { 846 if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE) 847 break; 848 } 849 } 850 851 filter->nr_includes = i; 852 filter->nr_excludes = filter->nevents - filter->nr_includes; 853 filter->includes = filter->events; 854 filter->excludes = filter->events + filter->nr_includes; 855 856 return 0; 857 } 858 859 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) 860 { 861 struct kvm_pmu_event_filter __user *user_filter = argp; 862 struct kvm_x86_pmu_event_filter *filter; 863 struct kvm_pmu_event_filter tmp; 864 struct kvm_vcpu *vcpu; 865 unsigned long i; 866 size_t size; 867 int r; 868 869 if (copy_from_user(&tmp, user_filter, sizeof(tmp))) 870 return -EFAULT; 871 872 if (tmp.action != KVM_PMU_EVENT_ALLOW && 873 tmp.action != KVM_PMU_EVENT_DENY) 874 return -EINVAL; 875 876 if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK) 877 return -EINVAL; 878 879 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS) 880 return -E2BIG; 881 882 size = struct_size(filter, events, tmp.nevents); 883 filter = kzalloc(size, GFP_KERNEL_ACCOUNT); 884 if (!filter) 885 return -ENOMEM; 886 887 filter->action = tmp.action; 888 filter->nevents = tmp.nevents; 889 filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap; 890 filter->flags = tmp.flags; 891 892 r = -EFAULT; 893 if (copy_from_user(filter->events, user_filter->events, 894 sizeof(filter->events[0]) * filter->nevents)) 895 goto cleanup; 896 897 r = prepare_filter_lists(filter); 898 if (r) 899 goto cleanup; 900 901 mutex_lock(&kvm->lock); 902 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter, 903 mutex_is_locked(&kvm->lock)); 904 mutex_unlock(&kvm->lock); 905 synchronize_srcu_expedited(&kvm->srcu); 906 907 BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) > 908 sizeof(((struct kvm_pmu *)0)->__reprogram_pmi)); 909 910 kvm_for_each_vcpu(i, vcpu, kvm) 911 atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull); 912 913 kvm_make_all_cpus_request(kvm, KVM_REQ_PMU); 914 915 r = 0; 916 cleanup: 917 kfree(filter); 918 return r; 919 } 920