1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7 #include <linux/cpu.h> 8 #include <linux/kvm.h> 9 #include <linux/kvm_host.h> 10 #include <linux/list.h> 11 #include <linux/perf_event.h> 12 #include <linux/perf/arm_pmu.h> 13 #include <linux/uaccess.h> 14 #include <asm/kvm_emulate.h> 15 #include <kvm/arm_pmu.h> 16 #include <kvm/arm_vgic.h> 17 18 #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) 19 20 static LIST_HEAD(arm_pmus); 21 static DEFINE_MUTEX(arm_pmus_lock); 22 23 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc); 24 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc); 25 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc); 26 27 bool kvm_supports_guest_pmuv3(void) 28 { 29 guard(mutex)(&arm_pmus_lock); 30 return !list_empty(&arm_pmus); 31 } 32 33 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc) 34 { 35 return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]); 36 } 37 38 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx) 39 { 40 return &vcpu->arch.pmu.pmc[cnt_idx]; 41 } 42 43 static u32 __kvm_pmu_event_mask(unsigned int pmuver) 44 { 45 switch (pmuver) { 46 case ID_AA64DFR0_EL1_PMUVer_IMP: 47 return GENMASK(9, 0); 48 case ID_AA64DFR0_EL1_PMUVer_V3P1: 49 case ID_AA64DFR0_EL1_PMUVer_V3P4: 50 case ID_AA64DFR0_EL1_PMUVer_V3P5: 51 case ID_AA64DFR0_EL1_PMUVer_V3P7: 52 return GENMASK(15, 0); 53 default: /* Shouldn't be here, just for sanity */ 54 WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); 55 return 0; 56 } 57 } 58 59 static u32 kvm_pmu_event_mask(struct kvm *kvm) 60 { 61 u64 dfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1); 62 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0); 63 64 return __kvm_pmu_event_mask(pmuver); 65 } 66 67 u64 kvm_pmu_evtyper_mask(struct kvm *kvm) 68 { 69 u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 | 70 kvm_pmu_event_mask(kvm); 71 72 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP)) 73 mask |= ARMV8_PMU_INCLUDE_EL2; 74 75 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP)) 76 mask |= ARMV8_PMU_EXCLUDE_NS_EL0 | 77 ARMV8_PMU_EXCLUDE_NS_EL1 | 78 ARMV8_PMU_EXCLUDE_EL3; 79 80 return mask; 81 } 82 83 /** 84 * kvm_pmc_is_64bit - determine if counter is 64bit 85 * @pmc: counter context 86 */ 87 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) 88 { 89 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 90 91 return (pmc->idx == ARMV8_PMU_CYCLE_IDX || 92 kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)); 93 } 94 95 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) 96 { 97 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 98 u64 val = kvm_vcpu_read_pmcr(vcpu); 99 100 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) 101 return __vcpu_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HLP; 102 103 return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || 104 (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); 105 } 106 107 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc) 108 { 109 return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX && 110 !kvm_pmc_has_64bit_overflow(pmc)); 111 } 112 113 static u32 counter_index_to_reg(u64 idx) 114 { 115 return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx; 116 } 117 118 static u32 counter_index_to_evtreg(u64 idx) 119 { 120 return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx; 121 } 122 123 static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc) 124 { 125 return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx)); 126 } 127 128 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc) 129 { 130 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 131 u64 counter, reg, enabled, running; 132 133 reg = counter_index_to_reg(pmc->idx); 134 counter = __vcpu_sys_reg(vcpu, reg); 135 136 /* 137 * The real counter value is equal to the value of counter register plus 138 * the value perf event counts. 139 */ 140 if (pmc->perf_event) 141 counter += perf_event_read_value(pmc->perf_event, &enabled, 142 &running); 143 144 if (!kvm_pmc_is_64bit(pmc)) 145 counter = lower_32_bits(counter); 146 147 return counter; 148 } 149 150 /** 151 * kvm_pmu_get_counter_value - get PMU counter value 152 * @vcpu: The vcpu pointer 153 * @select_idx: The counter index 154 */ 155 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) 156 { 157 return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); 158 } 159 160 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force) 161 { 162 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 163 u64 reg; 164 165 kvm_pmu_release_perf_event(pmc); 166 167 reg = counter_index_to_reg(pmc->idx); 168 169 if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX && 170 !force) { 171 /* 172 * Even with PMUv3p5, AArch32 cannot write to the top 173 * 32bit of the counters. The only possible course of 174 * action is to use PMCR.P, which will reset them to 175 * 0 (the only use of the 'force' parameter). 176 */ 177 val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32); 178 val |= lower_32_bits(val); 179 } 180 181 __vcpu_sys_reg(vcpu, reg) = val; 182 183 /* Recreate the perf event to reflect the updated sample_period */ 184 kvm_pmu_create_perf_event(pmc); 185 } 186 187 /** 188 * kvm_pmu_set_counter_value - set PMU counter value 189 * @vcpu: The vcpu pointer 190 * @select_idx: The counter index 191 * @val: The counter value 192 */ 193 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 194 { 195 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false); 196 } 197 198 /** 199 * kvm_pmu_set_counter_value_user - set PMU counter value from user 200 * @vcpu: The vcpu pointer 201 * @select_idx: The counter index 202 * @val: The counter value 203 */ 204 void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 205 { 206 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); 207 __vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val; 208 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 209 } 210 211 /** 212 * kvm_pmu_release_perf_event - remove the perf event 213 * @pmc: The PMU counter pointer 214 */ 215 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) 216 { 217 if (pmc->perf_event) { 218 perf_event_disable(pmc->perf_event); 219 perf_event_release_kernel(pmc->perf_event); 220 pmc->perf_event = NULL; 221 } 222 } 223 224 /** 225 * kvm_pmu_stop_counter - stop PMU counter 226 * @pmc: The PMU counter pointer 227 * 228 * If this counter has been configured to monitor some event, release it here. 229 */ 230 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc) 231 { 232 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 233 u64 reg, val; 234 235 if (!pmc->perf_event) 236 return; 237 238 val = kvm_pmu_get_pmc_value(pmc); 239 240 reg = counter_index_to_reg(pmc->idx); 241 242 __vcpu_sys_reg(vcpu, reg) = val; 243 244 kvm_pmu_release_perf_event(pmc); 245 } 246 247 /** 248 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu 249 * @vcpu: The vcpu pointer 250 * 251 */ 252 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) 253 { 254 int i; 255 struct kvm_pmu *pmu = &vcpu->arch.pmu; 256 257 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) 258 pmu->pmc[i].idx = i; 259 } 260 261 /** 262 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu 263 * @vcpu: The vcpu pointer 264 * 265 */ 266 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) 267 { 268 int i; 269 270 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) 271 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i)); 272 irq_work_sync(&vcpu->arch.pmu.overflow_work); 273 } 274 275 static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu) 276 { 277 unsigned int hpmn, n; 278 279 if (!vcpu_has_nv(vcpu)) 280 return 0; 281 282 hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2)); 283 n = vcpu->kvm->arch.pmcr_n; 284 285 /* 286 * Programming HPMN to a value greater than PMCR_EL0.N is 287 * CONSTRAINED UNPREDICTABLE. Make the implementation choice that an 288 * UNKNOWN number of counters (in our case, zero) are reserved for EL2. 289 */ 290 if (hpmn >= n) 291 return 0; 292 293 /* 294 * Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't 295 * implemented. Since KVM's ability to emulate HPMN=0 does not directly 296 * depend on hardware (all PMU registers are trapped), make the 297 * implementation choice that all counters are included in the second 298 * range reserved for EL2/EL3. 299 */ 300 return GENMASK(n - 1, hpmn); 301 } 302 303 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) 304 { 305 return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx); 306 } 307 308 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) 309 { 310 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); 311 312 if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu)) 313 return mask; 314 315 return mask & ~kvm_pmu_hyp_counter_mask(vcpu); 316 } 317 318 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) 319 { 320 u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu)); 321 322 if (val == 0) 323 return BIT(ARMV8_PMU_CYCLE_IDX); 324 else 325 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); 326 } 327 328 static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc) 329 { 330 if (!pmc->perf_event) { 331 kvm_pmu_create_perf_event(pmc); 332 return; 333 } 334 335 perf_event_enable(pmc->perf_event); 336 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) 337 kvm_debug("fail to enable perf event\n"); 338 } 339 340 static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc) 341 { 342 if (pmc->perf_event) 343 perf_event_disable(pmc->perf_event); 344 } 345 346 void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) 347 { 348 int i; 349 350 if (!val) 351 return; 352 353 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) { 354 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); 355 356 if (!(val & BIT(i))) 357 continue; 358 359 if (kvm_pmu_counter_is_enabled(pmc)) 360 kvm_pmc_enable_perf_event(pmc); 361 else 362 kvm_pmc_disable_perf_event(pmc); 363 } 364 365 kvm_vcpu_pmu_restore_guest(vcpu); 366 } 367 368 /* 369 * Returns the PMU overflow state, which is true if there exists an event 370 * counter where the values of the global enable control, PMOVSSET_EL0[n], and 371 * PMINTENSET_EL1[n] are all 1. 372 */ 373 static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) 374 { 375 u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); 376 377 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); 378 379 /* 380 * PMCR_EL0.E is the global enable control for event counters available 381 * to EL0 and EL1. 382 */ 383 if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) 384 reg &= kvm_pmu_hyp_counter_mask(vcpu); 385 386 /* 387 * Otherwise, MDCR_EL2.HPME is the global enable control for event 388 * counters reserved for EL2. 389 */ 390 if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME)) 391 reg &= ~kvm_pmu_hyp_counter_mask(vcpu); 392 393 return reg; 394 } 395 396 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) 397 { 398 struct kvm_pmu *pmu = &vcpu->arch.pmu; 399 bool overflow; 400 401 overflow = kvm_pmu_overflow_status(vcpu); 402 if (pmu->irq_level == overflow) 403 return; 404 405 pmu->irq_level = overflow; 406 407 if (likely(irqchip_in_kernel(vcpu->kvm))) { 408 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu, 409 pmu->irq_num, overflow, pmu); 410 WARN_ON(ret); 411 } 412 } 413 414 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 415 { 416 struct kvm_pmu *pmu = &vcpu->arch.pmu; 417 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; 418 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; 419 420 if (likely(irqchip_in_kernel(vcpu->kvm))) 421 return false; 422 423 return pmu->irq_level != run_level; 424 } 425 426 /* 427 * Reflect the PMU overflow interrupt output level into the kvm_run structure 428 */ 429 void kvm_pmu_update_run(struct kvm_vcpu *vcpu) 430 { 431 struct kvm_sync_regs *regs = &vcpu->run->s.regs; 432 433 /* Populate the timer bitmap for user space */ 434 regs->device_irq_level &= ~KVM_ARM_DEV_PMU; 435 if (vcpu->arch.pmu.irq_level) 436 regs->device_irq_level |= KVM_ARM_DEV_PMU; 437 } 438 439 /** 440 * kvm_pmu_flush_hwstate - flush pmu state to cpu 441 * @vcpu: The vcpu pointer 442 * 443 * Check if the PMU has overflowed while we were running in the host, and inject 444 * an interrupt if that was the case. 445 */ 446 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) 447 { 448 kvm_pmu_update_state(vcpu); 449 } 450 451 /** 452 * kvm_pmu_sync_hwstate - sync pmu state from cpu 453 * @vcpu: The vcpu pointer 454 * 455 * Check if the PMU has overflowed while we were running in the guest, and 456 * inject an interrupt if that was the case. 457 */ 458 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) 459 { 460 kvm_pmu_update_state(vcpu); 461 } 462 463 /* 464 * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding 465 * to the event. 466 * This is why we need a callback to do it once outside of the NMI context. 467 */ 468 static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) 469 { 470 struct kvm_vcpu *vcpu; 471 472 vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work); 473 kvm_vcpu_kick(vcpu); 474 } 475 476 /* 477 * Perform an increment on any of the counters described in @mask, 478 * generating the overflow if required, and propagate it as a chained 479 * event if possible. 480 */ 481 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, 482 unsigned long mask, u32 event) 483 { 484 int i; 485 486 if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) 487 return; 488 489 /* Weed out disabled counters */ 490 mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 491 492 for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) { 493 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); 494 u64 type, reg; 495 496 /* Filter on event type */ 497 type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i)); 498 type &= kvm_pmu_event_mask(vcpu->kvm); 499 if (type != event) 500 continue; 501 502 /* Increment this counter */ 503 reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; 504 if (!kvm_pmc_is_64bit(pmc)) 505 reg = lower_32_bits(reg); 506 __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg; 507 508 /* No overflow? move on */ 509 if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg)) 510 continue; 511 512 /* Mark overflow */ 513 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); 514 515 if (kvm_pmu_counter_can_chain(pmc)) 516 kvm_pmu_counter_increment(vcpu, BIT(i + 1), 517 ARMV8_PMUV3_PERFCTR_CHAIN); 518 } 519 } 520 521 /* Compute the sample period for a given counter value */ 522 static u64 compute_period(struct kvm_pmc *pmc, u64 counter) 523 { 524 u64 val; 525 526 if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc)) 527 val = (-counter) & GENMASK(63, 0); 528 else 529 val = (-counter) & GENMASK(31, 0); 530 531 return val; 532 } 533 534 /* 535 * When the perf event overflows, set the overflow status and inform the vcpu. 536 */ 537 static void kvm_pmu_perf_overflow(struct perf_event *perf_event, 538 struct perf_sample_data *data, 539 struct pt_regs *regs) 540 { 541 struct kvm_pmc *pmc = perf_event->overflow_handler_context; 542 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); 543 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 544 int idx = pmc->idx; 545 u64 period; 546 547 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); 548 549 /* 550 * Reset the sample period to the architectural limit, 551 * i.e. the point where the counter overflows. 552 */ 553 period = compute_period(pmc, local64_read(&perf_event->count)); 554 555 local64_set(&perf_event->hw.period_left, 0); 556 perf_event->attr.sample_period = period; 557 perf_event->hw.sample_period = period; 558 559 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); 560 561 if (kvm_pmu_counter_can_chain(pmc)) 562 kvm_pmu_counter_increment(vcpu, BIT(idx + 1), 563 ARMV8_PMUV3_PERFCTR_CHAIN); 564 565 if (kvm_pmu_overflow_status(vcpu)) { 566 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 567 568 if (!in_nmi()) 569 kvm_vcpu_kick(vcpu); 570 else 571 irq_work_queue(&vcpu->arch.pmu.overflow_work); 572 } 573 574 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); 575 } 576 577 /** 578 * kvm_pmu_software_increment - do software increment 579 * @vcpu: The vcpu pointer 580 * @val: the value guest writes to PMSWINC register 581 */ 582 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) 583 { 584 kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR); 585 } 586 587 /** 588 * kvm_pmu_handle_pmcr - handle PMCR register 589 * @vcpu: The vcpu pointer 590 * @val: the value guest writes to PMCR register 591 */ 592 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) 593 { 594 int i; 595 596 /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */ 597 if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) 598 val &= ~ARMV8_PMU_PMCR_LP; 599 600 /* Request a reload of the PMU to enable/disable affected counters */ 601 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E) 602 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 603 604 /* The reset bits don't indicate any state, and shouldn't be saved. */ 605 __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P); 606 607 if (val & ARMV8_PMU_PMCR_C) 608 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); 609 610 if (val & ARMV8_PMU_PMCR_P) { 611 /* 612 * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply 613 * to the 'guest' range of counters and never the 'hyp' range. 614 */ 615 unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) & 616 ~kvm_pmu_hyp_counter_mask(vcpu) & 617 ~BIT(ARMV8_PMU_CYCLE_IDX); 618 619 for_each_set_bit(i, &mask, 32) 620 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); 621 } 622 } 623 624 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) 625 { 626 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 627 unsigned int mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2); 628 629 if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx))) 630 return false; 631 632 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) 633 return mdcr & MDCR_EL2_HPME; 634 635 return kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E; 636 } 637 638 static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc) 639 { 640 u64 evtreg = kvm_pmc_read_evtreg(pmc); 641 bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0; 642 bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0; 643 644 return u == nsu; 645 } 646 647 static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc) 648 { 649 u64 evtreg = kvm_pmc_read_evtreg(pmc); 650 bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1; 651 bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1; 652 653 return p == nsk; 654 } 655 656 static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc) 657 { 658 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 659 u64 mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2); 660 661 if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD)) 662 return false; 663 664 return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2; 665 } 666 667 static int kvm_map_pmu_event(struct kvm *kvm, unsigned int eventsel) 668 { 669 struct arm_pmu *pmu = kvm->arch.arm_pmu; 670 671 /* 672 * The CPU PMU likely isn't PMUv3; let the driver provide a mapping 673 * for the guest's PMUv3 event ID. 674 */ 675 if (unlikely(pmu->map_pmuv3_event)) 676 return pmu->map_pmuv3_event(eventsel); 677 678 return eventsel; 679 } 680 681 /** 682 * kvm_pmu_create_perf_event - create a perf event for a counter 683 * @pmc: Counter context 684 */ 685 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) 686 { 687 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 688 struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; 689 struct perf_event *event; 690 struct perf_event_attr attr; 691 int eventsel; 692 u64 evtreg; 693 694 evtreg = kvm_pmc_read_evtreg(pmc); 695 696 kvm_pmu_stop_counter(pmc); 697 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) 698 eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; 699 else 700 eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm); 701 702 /* 703 * Neither SW increment nor chained events need to be backed 704 * by a perf event. 705 */ 706 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR || 707 eventsel == ARMV8_PMUV3_PERFCTR_CHAIN) 708 return; 709 710 /* 711 * If we have a filter in place and that the event isn't allowed, do 712 * not install a perf event either. 713 */ 714 if (vcpu->kvm->arch.pmu_filter && 715 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) 716 return; 717 718 /* 719 * Don't create an event if we're running on hardware that requires 720 * PMUv3 event translation and we couldn't find a valid mapping. 721 */ 722 eventsel = kvm_map_pmu_event(vcpu->kvm, eventsel); 723 if (eventsel < 0) 724 return; 725 726 memset(&attr, 0, sizeof(struct perf_event_attr)); 727 attr.type = arm_pmu->pmu.type; 728 attr.size = sizeof(attr); 729 attr.pinned = 1; 730 attr.disabled = !kvm_pmu_counter_is_enabled(pmc); 731 attr.exclude_user = !kvm_pmc_counts_at_el0(pmc); 732 attr.exclude_hv = 1; /* Don't count EL2 events */ 733 attr.exclude_host = 1; /* Don't count host events */ 734 attr.config = eventsel; 735 736 /* 737 * Filter events at EL1 (i.e. vEL2) when in a hyp context based on the 738 * guest's EL2 filter. 739 */ 740 if (unlikely(is_hyp_ctxt(vcpu))) 741 attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc); 742 else 743 attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc); 744 745 /* 746 * If counting with a 64bit counter, advertise it to the perf 747 * code, carefully dealing with the initial sample period 748 * which also depends on the overflow. 749 */ 750 if (kvm_pmc_is_64bit(pmc)) 751 attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT; 752 753 attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc)); 754 755 event = perf_event_create_kernel_counter(&attr, -1, current, 756 kvm_pmu_perf_overflow, pmc); 757 758 if (IS_ERR(event)) { 759 pr_err_once("kvm: pmu event creation failed %ld\n", 760 PTR_ERR(event)); 761 return; 762 } 763 764 pmc->perf_event = event; 765 } 766 767 /** 768 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event 769 * @vcpu: The vcpu pointer 770 * @data: The data guest writes to PMXEVTYPER_EL0 771 * @select_idx: The number of selected counter 772 * 773 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an 774 * event with given hardware event number. Here we call perf_event API to 775 * emulate this action and create a kernel perf event for it. 776 */ 777 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 778 u64 select_idx) 779 { 780 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); 781 u64 reg; 782 783 reg = counter_index_to_evtreg(pmc->idx); 784 __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); 785 786 kvm_pmu_create_perf_event(pmc); 787 } 788 789 void kvm_host_pmu_init(struct arm_pmu *pmu) 790 { 791 struct arm_pmu_entry *entry; 792 793 /* 794 * Check the sanitised PMU version for the system, as KVM does not 795 * support implementations where PMUv3 exists on a subset of CPUs. 796 */ 797 if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit())) 798 return; 799 800 guard(mutex)(&arm_pmus_lock); 801 802 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 803 if (!entry) 804 return; 805 806 entry->arm_pmu = pmu; 807 list_add_tail(&entry->entry, &arm_pmus); 808 } 809 810 static struct arm_pmu *kvm_pmu_probe_armpmu(void) 811 { 812 struct arm_pmu_entry *entry; 813 struct arm_pmu *pmu; 814 int cpu; 815 816 guard(mutex)(&arm_pmus_lock); 817 818 /* 819 * It is safe to use a stale cpu to iterate the list of PMUs so long as 820 * the same value is used for the entirety of the loop. Given this, and 821 * the fact that no percpu data is used for the lookup there is no need 822 * to disable preemption. 823 * 824 * It is still necessary to get a valid cpu, though, to probe for the 825 * default PMU instance as userspace is not required to specify a PMU 826 * type. In order to uphold the preexisting behavior KVM selects the 827 * PMU instance for the core during vcpu init. A dependent use 828 * case would be a user with disdain of all things big.LITTLE that 829 * affines the VMM to a particular cluster of cores. 830 * 831 * In any case, userspace should just do the sane thing and use the UAPI 832 * to select a PMU type directly. But, be wary of the baggage being 833 * carried here. 834 */ 835 cpu = raw_smp_processor_id(); 836 list_for_each_entry(entry, &arm_pmus, entry) { 837 pmu = entry->arm_pmu; 838 839 if (cpumask_test_cpu(cpu, &pmu->supported_cpus)) 840 return pmu; 841 } 842 843 return NULL; 844 } 845 846 static u64 __compute_pmceid(struct arm_pmu *pmu, bool pmceid1) 847 { 848 u32 hi[2], lo[2]; 849 850 bitmap_to_arr32(lo, pmu->pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); 851 bitmap_to_arr32(hi, pmu->pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); 852 853 return ((u64)hi[pmceid1] << 32) | lo[pmceid1]; 854 } 855 856 static u64 compute_pmceid0(struct arm_pmu *pmu) 857 { 858 u64 val = __compute_pmceid(pmu, 0); 859 860 /* always support SW_INCR */ 861 val |= BIT(ARMV8_PMUV3_PERFCTR_SW_INCR); 862 /* always support CHAIN */ 863 val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN); 864 return val; 865 } 866 867 static u64 compute_pmceid1(struct arm_pmu *pmu) 868 { 869 u64 val = __compute_pmceid(pmu, 1); 870 871 /* 872 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled 873 * as RAZ 874 */ 875 val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) | 876 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) | 877 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32)); 878 return val; 879 } 880 881 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 882 { 883 struct arm_pmu *cpu_pmu = vcpu->kvm->arch.arm_pmu; 884 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; 885 u64 val, mask = 0; 886 int base, i, nr_events; 887 888 if (!pmceid1) { 889 val = compute_pmceid0(cpu_pmu); 890 base = 0; 891 } else { 892 val = compute_pmceid1(cpu_pmu); 893 base = 32; 894 } 895 896 if (!bmap) 897 return val; 898 899 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; 900 901 for (i = 0; i < 32; i += 8) { 902 u64 byte; 903 904 byte = bitmap_get_value8(bmap, base + i); 905 mask |= byte << i; 906 if (nr_events >= (0x4000 + base + 32)) { 907 byte = bitmap_get_value8(bmap, 0x4000 + base + i); 908 mask |= byte << (32 + i); 909 } 910 } 911 912 return val & mask; 913 } 914 915 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) 916 { 917 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); 918 919 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; 920 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; 921 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; 922 923 kvm_pmu_reprogram_counter_mask(vcpu, mask); 924 } 925 926 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 927 { 928 if (!vcpu->arch.pmu.created) 929 return -EINVAL; 930 931 /* 932 * A valid interrupt configuration for the PMU is either to have a 933 * properly configured interrupt number and using an in-kernel 934 * irqchip, or to not have an in-kernel GIC and not set an IRQ. 935 */ 936 if (irqchip_in_kernel(vcpu->kvm)) { 937 int irq = vcpu->arch.pmu.irq_num; 938 /* 939 * If we are using an in-kernel vgic, at this point we know 940 * the vgic will be initialized, so we can check the PMU irq 941 * number against the dimensions of the vgic and make sure 942 * it's valid. 943 */ 944 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) 945 return -EINVAL; 946 } else if (kvm_arm_pmu_irq_initialized(vcpu)) { 947 return -EINVAL; 948 } 949 950 return 0; 951 } 952 953 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) 954 { 955 if (irqchip_in_kernel(vcpu->kvm)) { 956 int ret; 957 958 /* 959 * If using the PMU with an in-kernel virtual GIC 960 * implementation, we require the GIC to be already 961 * initialized when initializing the PMU. 962 */ 963 if (!vgic_initialized(vcpu->kvm)) 964 return -ENODEV; 965 966 if (!kvm_arm_pmu_irq_initialized(vcpu)) 967 return -ENXIO; 968 969 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, 970 &vcpu->arch.pmu); 971 if (ret) 972 return ret; 973 } 974 975 init_irq_work(&vcpu->arch.pmu.overflow_work, 976 kvm_pmu_perf_overflow_notify_vcpu); 977 978 vcpu->arch.pmu.created = true; 979 return 0; 980 } 981 982 /* 983 * For one VM the interrupt type must be same for each vcpu. 984 * As a PPI, the interrupt number is the same for all vcpus, 985 * while as an SPI it must be a separate number per vcpu. 986 */ 987 static bool pmu_irq_is_valid(struct kvm *kvm, int irq) 988 { 989 unsigned long i; 990 struct kvm_vcpu *vcpu; 991 992 kvm_for_each_vcpu(i, vcpu, kvm) { 993 if (!kvm_arm_pmu_irq_initialized(vcpu)) 994 continue; 995 996 if (irq_is_ppi(irq)) { 997 if (vcpu->arch.pmu.irq_num != irq) 998 return false; 999 } else { 1000 if (vcpu->arch.pmu.irq_num == irq) 1001 return false; 1002 } 1003 } 1004 1005 return true; 1006 } 1007 1008 /** 1009 * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters. 1010 * @kvm: The kvm pointer 1011 */ 1012 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) 1013 { 1014 struct arm_pmu *arm_pmu = kvm->arch.arm_pmu; 1015 1016 /* 1017 * PMUv3 requires that all event counters are capable of counting any 1018 * event, though the same may not be true of non-PMUv3 hardware. 1019 */ 1020 if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS)) 1021 return 1; 1022 1023 /* 1024 * The arm_pmu->cntr_mask considers the fixed counter(s) as well. 1025 * Ignore those and return only the general-purpose counters. 1026 */ 1027 return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS); 1028 } 1029 1030 static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) 1031 { 1032 lockdep_assert_held(&kvm->arch.config_lock); 1033 1034 kvm->arch.arm_pmu = arm_pmu; 1035 kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm); 1036 } 1037 1038 /** 1039 * kvm_arm_set_default_pmu - No PMU set, get the default one. 1040 * @kvm: The kvm pointer 1041 * 1042 * The observant among you will notice that the supported_cpus 1043 * mask does not get updated for the default PMU even though it 1044 * is quite possible the selected instance supports only a 1045 * subset of cores in the system. This is intentional, and 1046 * upholds the preexisting behavior on heterogeneous systems 1047 * where vCPUs can be scheduled on any core but the guest 1048 * counters could stop working. 1049 */ 1050 int kvm_arm_set_default_pmu(struct kvm *kvm) 1051 { 1052 struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu(); 1053 1054 if (!arm_pmu) 1055 return -ENODEV; 1056 1057 kvm_arm_set_pmu(kvm, arm_pmu); 1058 return 0; 1059 } 1060 1061 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) 1062 { 1063 struct kvm *kvm = vcpu->kvm; 1064 struct arm_pmu_entry *entry; 1065 struct arm_pmu *arm_pmu; 1066 int ret = -ENXIO; 1067 1068 lockdep_assert_held(&kvm->arch.config_lock); 1069 mutex_lock(&arm_pmus_lock); 1070 1071 list_for_each_entry(entry, &arm_pmus, entry) { 1072 arm_pmu = entry->arm_pmu; 1073 if (arm_pmu->pmu.type == pmu_id) { 1074 if (kvm_vm_has_ran_once(kvm) || 1075 (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { 1076 ret = -EBUSY; 1077 break; 1078 } 1079 1080 kvm_arm_set_pmu(kvm, arm_pmu); 1081 cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); 1082 ret = 0; 1083 break; 1084 } 1085 } 1086 1087 mutex_unlock(&arm_pmus_lock); 1088 return ret; 1089 } 1090 1091 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1092 { 1093 struct kvm *kvm = vcpu->kvm; 1094 1095 lockdep_assert_held(&kvm->arch.config_lock); 1096 1097 if (!kvm_vcpu_has_pmu(vcpu)) 1098 return -ENODEV; 1099 1100 if (vcpu->arch.pmu.created) 1101 return -EBUSY; 1102 1103 switch (attr->attr) { 1104 case KVM_ARM_VCPU_PMU_V3_IRQ: { 1105 int __user *uaddr = (int __user *)(long)attr->addr; 1106 int irq; 1107 1108 if (!irqchip_in_kernel(kvm)) 1109 return -EINVAL; 1110 1111 if (get_user(irq, uaddr)) 1112 return -EFAULT; 1113 1114 /* The PMU overflow interrupt can be a PPI or a valid SPI. */ 1115 if (!(irq_is_ppi(irq) || irq_is_spi(irq))) 1116 return -EINVAL; 1117 1118 if (!pmu_irq_is_valid(kvm, irq)) 1119 return -EINVAL; 1120 1121 if (kvm_arm_pmu_irq_initialized(vcpu)) 1122 return -EBUSY; 1123 1124 kvm_debug("Set kvm ARM PMU irq: %d\n", irq); 1125 vcpu->arch.pmu.irq_num = irq; 1126 return 0; 1127 } 1128 case KVM_ARM_VCPU_PMU_V3_FILTER: { 1129 u8 pmuver = kvm_arm_pmu_get_pmuver_limit(); 1130 struct kvm_pmu_event_filter __user *uaddr; 1131 struct kvm_pmu_event_filter filter; 1132 int nr_events; 1133 1134 /* 1135 * Allow userspace to specify an event filter for the entire 1136 * event range supported by PMUVer of the hardware, rather 1137 * than the guest's PMUVer for KVM backward compatibility. 1138 */ 1139 nr_events = __kvm_pmu_event_mask(pmuver) + 1; 1140 1141 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; 1142 1143 if (copy_from_user(&filter, uaddr, sizeof(filter))) 1144 return -EFAULT; 1145 1146 if (((u32)filter.base_event + filter.nevents) > nr_events || 1147 (filter.action != KVM_PMU_EVENT_ALLOW && 1148 filter.action != KVM_PMU_EVENT_DENY)) 1149 return -EINVAL; 1150 1151 if (kvm_vm_has_ran_once(kvm)) 1152 return -EBUSY; 1153 1154 if (!kvm->arch.pmu_filter) { 1155 kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); 1156 if (!kvm->arch.pmu_filter) 1157 return -ENOMEM; 1158 1159 /* 1160 * The default depends on the first applied filter. 1161 * If it allows events, the default is to deny. 1162 * Conversely, if the first filter denies a set of 1163 * events, the default is to allow. 1164 */ 1165 if (filter.action == KVM_PMU_EVENT_ALLOW) 1166 bitmap_zero(kvm->arch.pmu_filter, nr_events); 1167 else 1168 bitmap_fill(kvm->arch.pmu_filter, nr_events); 1169 } 1170 1171 if (filter.action == KVM_PMU_EVENT_ALLOW) 1172 bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents); 1173 else 1174 bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); 1175 1176 return 0; 1177 } 1178 case KVM_ARM_VCPU_PMU_V3_SET_PMU: { 1179 int __user *uaddr = (int __user *)(long)attr->addr; 1180 int pmu_id; 1181 1182 if (get_user(pmu_id, uaddr)) 1183 return -EFAULT; 1184 1185 return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); 1186 } 1187 case KVM_ARM_VCPU_PMU_V3_INIT: 1188 return kvm_arm_pmu_v3_init(vcpu); 1189 } 1190 1191 return -ENXIO; 1192 } 1193 1194 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1195 { 1196 switch (attr->attr) { 1197 case KVM_ARM_VCPU_PMU_V3_IRQ: { 1198 int __user *uaddr = (int __user *)(long)attr->addr; 1199 int irq; 1200 1201 if (!irqchip_in_kernel(vcpu->kvm)) 1202 return -EINVAL; 1203 1204 if (!kvm_vcpu_has_pmu(vcpu)) 1205 return -ENODEV; 1206 1207 if (!kvm_arm_pmu_irq_initialized(vcpu)) 1208 return -ENXIO; 1209 1210 irq = vcpu->arch.pmu.irq_num; 1211 return put_user(irq, uaddr); 1212 } 1213 } 1214 1215 return -ENXIO; 1216 } 1217 1218 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1219 { 1220 switch (attr->attr) { 1221 case KVM_ARM_VCPU_PMU_V3_IRQ: 1222 case KVM_ARM_VCPU_PMU_V3_INIT: 1223 case KVM_ARM_VCPU_PMU_V3_FILTER: 1224 case KVM_ARM_VCPU_PMU_V3_SET_PMU: 1225 if (kvm_vcpu_has_pmu(vcpu)) 1226 return 0; 1227 } 1228 1229 return -ENXIO; 1230 } 1231 1232 u8 kvm_arm_pmu_get_pmuver_limit(void) 1233 { 1234 unsigned int pmuver; 1235 1236 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, 1237 read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1)); 1238 1239 /* 1240 * Spoof a barebones PMUv3 implementation if the system supports IMPDEF 1241 * traps of the PMUv3 sysregs 1242 */ 1243 if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS)) 1244 return ID_AA64DFR0_EL1_PMUVer_IMP; 1245 1246 /* 1247 * Otherwise, treat IMPLEMENTATION DEFINED functionality as 1248 * unimplemented 1249 */ 1250 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 1251 return 0; 1252 1253 return min(pmuver, ID_AA64DFR0_EL1_PMUVer_V3P5); 1254 } 1255 1256 /** 1257 * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU 1258 * @vcpu: The vcpu pointer 1259 */ 1260 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) 1261 { 1262 u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); 1263 1264 return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N); 1265 } 1266 1267 void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) 1268 { 1269 bool reprogrammed = false; 1270 unsigned long mask; 1271 int i; 1272 1273 mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 1274 for_each_set_bit(i, &mask, 32) { 1275 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); 1276 1277 /* 1278 * We only need to reconfigure events where the filter is 1279 * different at EL1 vs. EL2, as we're multiplexing the true EL1 1280 * event filter bit for nested. 1281 */ 1282 if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc)) 1283 continue; 1284 1285 kvm_pmu_create_perf_event(pmc); 1286 reprogrammed = true; 1287 } 1288 1289 if (reprogrammed) 1290 kvm_vcpu_pmu_restore_guest(vcpu); 1291 } 1292