1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2023 Rivos Inc 4 * 5 * Authors: 6 * Atish Patra <atishp@rivosinc.com> 7 */ 8 9 #define pr_fmt(fmt) "riscv-kvm-pmu: " fmt 10 #include <linux/errno.h> 11 #include <linux/err.h> 12 #include <linux/kvm_host.h> 13 #include <linux/perf/riscv_pmu.h> 14 #include <asm/csr.h> 15 #include <asm/kvm_vcpu_sbi.h> 16 #include <asm/kvm_vcpu_pmu.h> 17 #include <asm/sbi.h> 18 #include <linux/bitops.h> 19 20 #define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs) 21 #define get_event_type(x) (((x) & SBI_PMU_EVENT_IDX_TYPE_MASK) >> 16) 22 #define get_event_code(x) ((x) & SBI_PMU_EVENT_IDX_CODE_MASK) 23 24 static enum perf_hw_id hw_event_perf_map[SBI_PMU_HW_GENERAL_MAX] = { 25 [SBI_PMU_HW_CPU_CYCLES] = PERF_COUNT_HW_CPU_CYCLES, 26 [SBI_PMU_HW_INSTRUCTIONS] = PERF_COUNT_HW_INSTRUCTIONS, 27 [SBI_PMU_HW_CACHE_REFERENCES] = PERF_COUNT_HW_CACHE_REFERENCES, 28 [SBI_PMU_HW_CACHE_MISSES] = PERF_COUNT_HW_CACHE_MISSES, 29 [SBI_PMU_HW_BRANCH_INSTRUCTIONS] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, 30 [SBI_PMU_HW_BRANCH_MISSES] = PERF_COUNT_HW_BRANCH_MISSES, 31 [SBI_PMU_HW_BUS_CYCLES] = PERF_COUNT_HW_BUS_CYCLES, 32 [SBI_PMU_HW_STALLED_CYCLES_FRONTEND] = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND, 33 [SBI_PMU_HW_STALLED_CYCLES_BACKEND] = PERF_COUNT_HW_STALLED_CYCLES_BACKEND, 34 [SBI_PMU_HW_REF_CPU_CYCLES] = PERF_COUNT_HW_REF_CPU_CYCLES, 35 }; 36 37 static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc) 38 { 39 u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0); 40 u64 sample_period; 41 42 if (!pmc->counter_val) 43 sample_period = counter_val_mask; 44 else 45 sample_period = (-pmc->counter_val) & counter_val_mask; 46 47 return sample_period; 48 } 49 50 static u32 kvm_pmu_get_perf_event_type(unsigned long eidx) 51 { 52 enum sbi_pmu_event_type etype = get_event_type(eidx); 53 u32 type = PERF_TYPE_MAX; 54 55 switch (etype) { 56 case SBI_PMU_EVENT_TYPE_HW: 57 type = PERF_TYPE_HARDWARE; 58 break; 59 case SBI_PMU_EVENT_TYPE_CACHE: 60 type = PERF_TYPE_HW_CACHE; 61 break; 62 case SBI_PMU_EVENT_TYPE_RAW: 63 case SBI_PMU_EVENT_TYPE_RAW_V2: 64 case SBI_PMU_EVENT_TYPE_FW: 65 type = PERF_TYPE_RAW; 66 break; 67 default: 68 break; 69 } 70 71 return type; 72 } 73 74 static bool kvm_pmu_is_fw_event(unsigned long eidx) 75 { 76 return get_event_type(eidx) == SBI_PMU_EVENT_TYPE_FW; 77 } 78 79 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) 80 { 81 if (pmc->perf_event) { 82 perf_event_disable(pmc->perf_event); 83 perf_event_release_kernel(pmc->perf_event); 84 pmc->perf_event = NULL; 85 } 86 } 87 88 static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code) 89 { 90 return hw_event_perf_map[sbi_event_code]; 91 } 92 93 static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code) 94 { 95 u64 config = U64_MAX; 96 unsigned int cache_type, cache_op, cache_result; 97 98 /* All the cache event masks lie within 0xFF. No separate masking is necessary */ 99 cache_type = (sbi_event_code & SBI_PMU_EVENT_CACHE_ID_CODE_MASK) >> 100 SBI_PMU_EVENT_CACHE_ID_SHIFT; 101 cache_op = (sbi_event_code & SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK) >> 102 SBI_PMU_EVENT_CACHE_OP_SHIFT; 103 cache_result = sbi_event_code & SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK; 104 105 if (cache_type >= PERF_COUNT_HW_CACHE_MAX || 106 cache_op >= PERF_COUNT_HW_CACHE_OP_MAX || 107 cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 108 return config; 109 110 config = cache_type | (cache_op << 8) | (cache_result << 16); 111 112 return config; 113 } 114 115 static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data) 116 { 117 enum sbi_pmu_event_type etype = get_event_type(eidx); 118 u32 ecode = get_event_code(eidx); 119 u64 config = U64_MAX; 120 121 switch (etype) { 122 case SBI_PMU_EVENT_TYPE_HW: 123 if (ecode < SBI_PMU_HW_GENERAL_MAX) 124 config = kvm_pmu_get_perf_event_hw_config(ecode); 125 break; 126 case SBI_PMU_EVENT_TYPE_CACHE: 127 config = kvm_pmu_get_perf_event_cache_config(ecode); 128 break; 129 case SBI_PMU_EVENT_TYPE_RAW: 130 config = evt_data & RISCV_PMU_RAW_EVENT_MASK; 131 break; 132 case SBI_PMU_EVENT_TYPE_RAW_V2: 133 config = evt_data & RISCV_PMU_RAW_EVENT_V2_MASK; 134 break; 135 case SBI_PMU_EVENT_TYPE_FW: 136 if (ecode < SBI_PMU_FW_MAX) 137 config = (1ULL << 63) | ecode; 138 break; 139 default: 140 break; 141 } 142 143 return config; 144 } 145 146 static int kvm_pmu_get_fixed_pmc_index(unsigned long eidx) 147 { 148 u32 etype = kvm_pmu_get_perf_event_type(eidx); 149 u32 ecode = get_event_code(eidx); 150 151 if (etype != SBI_PMU_EVENT_TYPE_HW) 152 return -EINVAL; 153 154 if (ecode == SBI_PMU_HW_CPU_CYCLES) 155 return 0; 156 else if (ecode == SBI_PMU_HW_INSTRUCTIONS) 157 return 2; 158 else 159 return -EINVAL; 160 } 161 162 static int kvm_pmu_get_programmable_pmc_index(struct kvm_pmu *kvpmu, unsigned long eidx, 163 unsigned long cbase, unsigned long cmask) 164 { 165 int ctr_idx = -1; 166 int i, pmc_idx; 167 int min, max; 168 169 if (kvm_pmu_is_fw_event(eidx)) { 170 /* Firmware counters are mapped 1:1 starting from num_hw_ctrs for simplicity */ 171 min = kvpmu->num_hw_ctrs; 172 max = min + kvpmu->num_fw_ctrs; 173 } else { 174 /* First 3 counters are reserved for fixed counters */ 175 min = 3; 176 max = kvpmu->num_hw_ctrs; 177 } 178 179 for_each_set_bit(i, &cmask, BITS_PER_LONG) { 180 pmc_idx = i + cbase; 181 if ((pmc_idx >= min && pmc_idx < max) && 182 !test_bit(pmc_idx, kvpmu->pmc_in_use)) { 183 ctr_idx = pmc_idx; 184 break; 185 } 186 } 187 188 return ctr_idx; 189 } 190 191 static int pmu_get_pmc_index(struct kvm_pmu *pmu, unsigned long eidx, 192 unsigned long cbase, unsigned long cmask) 193 { 194 int ret; 195 196 /* Fixed counters need to be have fixed mapping as they have different width */ 197 ret = kvm_pmu_get_fixed_pmc_index(eidx); 198 if (ret >= 0) 199 return ret; 200 201 return kvm_pmu_get_programmable_pmc_index(pmu, eidx, cbase, cmask); 202 } 203 204 static int pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx, 205 unsigned long *out_val) 206 { 207 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 208 struct kvm_pmc *pmc; 209 int fevent_code; 210 211 if (!IS_ENABLED(CONFIG_32BIT)) { 212 pr_warn("%s: should be invoked for only RV32\n", __func__); 213 return -EINVAL; 214 } 215 216 if (cidx >= kvm_pmu_num_counters(kvpmu) || cidx == 1) { 217 pr_warn("Invalid counter id [%ld]during read\n", cidx); 218 return -EINVAL; 219 } 220 221 pmc = &kvpmu->pmc[cidx]; 222 223 if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW) 224 return -EINVAL; 225 226 fevent_code = get_event_code(pmc->event_idx); 227 pmc->counter_val = kvpmu->fw_event[fevent_code].value; 228 229 *out_val = pmc->counter_val >> 32; 230 231 return 0; 232 } 233 234 static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, 235 unsigned long *out_val) 236 { 237 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 238 struct kvm_pmc *pmc; 239 u64 enabled, running; 240 int fevent_code; 241 242 if (cidx >= kvm_pmu_num_counters(kvpmu) || cidx == 1) { 243 pr_warn("Invalid counter id [%ld] during read\n", cidx); 244 return -EINVAL; 245 } 246 247 pmc = &kvpmu->pmc[cidx]; 248 249 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { 250 fevent_code = get_event_code(pmc->event_idx); 251 pmc->counter_val = kvpmu->fw_event[fevent_code].value; 252 } else if (pmc->perf_event) { 253 pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); 254 } else { 255 return -EINVAL; 256 } 257 *out_val = pmc->counter_val; 258 259 return 0; 260 } 261 262 static int kvm_pmu_validate_counter_mask(struct kvm_pmu *kvpmu, unsigned long ctr_base, 263 unsigned long ctr_mask) 264 { 265 /* Make sure the we have a valid counter mask requested from the caller */ 266 if (!ctr_mask || (ctr_base + __fls(ctr_mask) >= kvm_pmu_num_counters(kvpmu))) 267 return -EINVAL; 268 269 return 0; 270 } 271 272 static void kvm_riscv_pmu_overflow(struct perf_event *perf_event, 273 struct perf_sample_data *data, 274 struct pt_regs *regs) 275 { 276 struct kvm_pmc *pmc = perf_event->overflow_handler_context; 277 struct kvm_vcpu *vcpu = pmc->vcpu; 278 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 279 struct riscv_pmu *rpmu = to_riscv_pmu(perf_event->pmu); 280 u64 period; 281 282 /* 283 * Stop the event counting by directly accessing the perf_event. 284 * Otherwise, this needs to deferred via a workqueue. 285 * That will introduce skew in the counter value because the actual 286 * physical counter would start after returning from this function. 287 * It will be stopped again once the workqueue is scheduled 288 */ 289 rpmu->pmu.stop(perf_event, PERF_EF_UPDATE); 290 291 /* 292 * The hw counter would start automatically when this function returns. 293 * Thus, the host may continue to interrupt and inject it to the guest 294 * even without the guest configuring the next event. Depending on the hardware 295 * the host may have some sluggishness only if privilege mode filtering is not 296 * available. In an ideal world, where qemu is not the only capable hardware, 297 * this can be removed. 298 * FYI: ARM64 does this way while x86 doesn't do anything as such. 299 * TODO: Should we keep it for RISC-V ? 300 */ 301 period = -(local64_read(&perf_event->count)); 302 303 local64_set(&perf_event->hw.period_left, 0); 304 perf_event->attr.sample_period = period; 305 perf_event->hw.sample_period = period; 306 307 set_bit(pmc->idx, kvpmu->pmc_overflown); 308 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_PMU_OVF); 309 310 rpmu->pmu.start(perf_event, PERF_EF_RELOAD); 311 } 312 313 static long kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr, 314 unsigned long flags, unsigned long eidx, 315 unsigned long evtdata) 316 { 317 struct perf_event *event; 318 319 kvm_pmu_release_perf_event(pmc); 320 attr->config = kvm_pmu_get_perf_event_config(eidx, evtdata); 321 if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE) { 322 //TODO: Do we really want to clear the value in hardware counter 323 pmc->counter_val = 0; 324 } 325 326 /* 327 * Set the default sample_period for now. The guest specified value 328 * will be updated in the start call. 329 */ 330 attr->sample_period = kvm_pmu_get_sample_period(pmc); 331 332 event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc); 333 if (IS_ERR(event)) { 334 pr_debug("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event)); 335 return PTR_ERR(event); 336 } 337 338 pmc->perf_event = event; 339 if (flags & SBI_PMU_CFG_FLAG_AUTO_START) 340 perf_event_enable(pmc->perf_event); 341 342 return 0; 343 } 344 345 int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) 346 { 347 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 348 struct kvm_fw_event *fevent; 349 350 if (!kvpmu || fid >= SBI_PMU_FW_MAX) 351 return -EINVAL; 352 353 fevent = &kvpmu->fw_event[fid]; 354 if (fevent->started) 355 fevent->value++; 356 357 return 0; 358 } 359 360 int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, 361 unsigned long *val, unsigned long new_val, 362 unsigned long wr_mask) 363 { 364 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 365 int cidx, ret = KVM_INSN_CONTINUE_NEXT_SEPC; 366 367 if (!kvpmu || !kvpmu->init_done) { 368 /* 369 * In absence of sscofpmf in the platform, the guest OS may use 370 * the legacy PMU driver to read cycle/instret. In that case, 371 * just return 0 to avoid any illegal trap. However, any other 372 * hpmcounter access should result in illegal trap as they must 373 * be access through SBI PMU only. 374 */ 375 if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { 376 *val = 0; 377 return ret; 378 } else { 379 return KVM_INSN_ILLEGAL_TRAP; 380 } 381 } 382 383 /* The counter CSR are read only. Thus, any write should result in illegal traps */ 384 if (wr_mask) 385 return KVM_INSN_ILLEGAL_TRAP; 386 387 cidx = csr_num - CSR_CYCLE; 388 389 if (pmu_ctr_read(vcpu, cidx, val) < 0) 390 return KVM_INSN_ILLEGAL_TRAP; 391 392 return ret; 393 } 394 395 static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu) 396 { 397 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 398 399 kfree(kvpmu->sdata); 400 kvpmu->sdata = NULL; 401 kvpmu->snapshot_addr = INVALID_GPA; 402 } 403 404 int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low, 405 unsigned long saddr_high, unsigned long flags, 406 struct kvm_vcpu_sbi_return *retdata) 407 { 408 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 409 int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data); 410 int sbiret = 0; 411 gpa_t saddr; 412 413 if (!kvpmu || flags) { 414 sbiret = SBI_ERR_INVALID_PARAM; 415 goto out; 416 } 417 418 if (saddr_low == SBI_SHMEM_DISABLE && saddr_high == SBI_SHMEM_DISABLE) { 419 kvm_pmu_clear_snapshot_area(vcpu); 420 return 0; 421 } 422 423 saddr = saddr_low; 424 425 if (saddr_high != 0) { 426 if (IS_ENABLED(CONFIG_32BIT)) 427 saddr |= ((gpa_t)saddr_high << 32); 428 else 429 sbiret = SBI_ERR_INVALID_ADDRESS; 430 goto out; 431 } 432 433 kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC); 434 if (!kvpmu->sdata) 435 return -ENOMEM; 436 437 /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */ 438 if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) { 439 kfree(kvpmu->sdata); 440 sbiret = SBI_ERR_INVALID_ADDRESS; 441 goto out; 442 } 443 444 kvpmu->snapshot_addr = saddr; 445 446 out: 447 retdata->err_val = sbiret; 448 449 return 0; 450 } 451 452 int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low, 453 unsigned long saddr_high, unsigned long num_events, 454 unsigned long flags, struct kvm_vcpu_sbi_return *retdata) 455 { 456 struct riscv_pmu_event_info *einfo = NULL; 457 int shmem_size = num_events * sizeof(*einfo); 458 gpa_t shmem; 459 u32 eidx, etype; 460 u64 econfig; 461 int ret; 462 463 if (flags != 0 || (saddr_low & (SZ_16 - 1) || num_events == 0)) { 464 ret = SBI_ERR_INVALID_PARAM; 465 goto out; 466 } 467 468 shmem = saddr_low; 469 if (saddr_high != 0) { 470 if (IS_ENABLED(CONFIG_32BIT)) { 471 shmem |= ((gpa_t)saddr_high << 32); 472 } else { 473 ret = SBI_ERR_INVALID_ADDRESS; 474 goto out; 475 } 476 } 477 478 einfo = kzalloc(shmem_size, GFP_KERNEL); 479 if (!einfo) 480 return -ENOMEM; 481 482 ret = kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size); 483 if (ret) { 484 ret = SBI_ERR_FAILURE; 485 goto free_mem; 486 } 487 488 for (int i = 0; i < num_events; i++) { 489 eidx = einfo[i].event_idx; 490 etype = kvm_pmu_get_perf_event_type(eidx); 491 econfig = kvm_pmu_get_perf_event_config(eidx, einfo[i].event_data); 492 ret = riscv_pmu_get_event_info(etype, econfig, NULL); 493 einfo[i].output = (ret > 0) ? 1 : 0; 494 } 495 496 ret = kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size); 497 if (ret) 498 ret = SBI_ERR_INVALID_ADDRESS; 499 500 free_mem: 501 kfree(einfo); 502 out: 503 retdata->err_val = ret; 504 505 return 0; 506 } 507 508 int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, 509 struct kvm_vcpu_sbi_return *retdata) 510 { 511 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 512 513 retdata->out_val = kvm_pmu_num_counters(kvpmu); 514 515 return 0; 516 } 517 518 int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, 519 struct kvm_vcpu_sbi_return *retdata) 520 { 521 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 522 523 if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) { 524 retdata->err_val = SBI_ERR_INVALID_PARAM; 525 return 0; 526 } 527 528 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; 529 530 return 0; 531 } 532 533 int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, 534 unsigned long ctr_mask, unsigned long flags, u64 ival, 535 struct kvm_vcpu_sbi_return *retdata) 536 { 537 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 538 int i, pmc_index, sbiret = 0; 539 struct kvm_pmc *pmc; 540 int fevent_code; 541 bool snap_flag_set = flags & SBI_PMU_START_FLAG_INIT_SNAPSHOT; 542 543 if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { 544 sbiret = SBI_ERR_INVALID_PARAM; 545 goto out; 546 } 547 548 if (snap_flag_set) { 549 if (kvpmu->snapshot_addr == INVALID_GPA) { 550 sbiret = SBI_ERR_NO_SHMEM; 551 goto out; 552 } 553 if (kvm_vcpu_read_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata, 554 sizeof(struct riscv_pmu_snapshot_data))) { 555 pr_warn("Unable to read snapshot shared memory while starting counters\n"); 556 sbiret = SBI_ERR_FAILURE; 557 goto out; 558 } 559 } 560 /* Start the counters that have been configured and requested by the guest */ 561 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 562 pmc_index = i + ctr_base; 563 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 564 continue; 565 /* The guest started the counter again. Reset the overflow status */ 566 clear_bit(pmc_index, kvpmu->pmc_overflown); 567 pmc = &kvpmu->pmc[pmc_index]; 568 if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE) { 569 pmc->counter_val = ival; 570 } else if (snap_flag_set) { 571 /* The counter index in the snapshot are relative to the counter base */ 572 pmc->counter_val = kvpmu->sdata->ctr_values[i]; 573 } 574 575 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { 576 fevent_code = get_event_code(pmc->event_idx); 577 if (fevent_code >= SBI_PMU_FW_MAX) { 578 sbiret = SBI_ERR_INVALID_PARAM; 579 goto out; 580 } 581 582 /* Check if the counter was already started for some reason */ 583 if (kvpmu->fw_event[fevent_code].started) { 584 sbiret = SBI_ERR_ALREADY_STARTED; 585 continue; 586 } 587 588 kvpmu->fw_event[fevent_code].started = true; 589 kvpmu->fw_event[fevent_code].value = pmc->counter_val; 590 } else if (pmc->perf_event) { 591 if (unlikely(pmc->started)) { 592 sbiret = SBI_ERR_ALREADY_STARTED; 593 continue; 594 } 595 perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc)); 596 perf_event_enable(pmc->perf_event); 597 pmc->started = true; 598 } else { 599 sbiret = SBI_ERR_INVALID_PARAM; 600 } 601 } 602 603 out: 604 retdata->err_val = sbiret; 605 606 return 0; 607 } 608 609 int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, 610 unsigned long ctr_mask, unsigned long flags, 611 struct kvm_vcpu_sbi_return *retdata) 612 { 613 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 614 int i, pmc_index, sbiret = 0; 615 u64 enabled, running; 616 struct kvm_pmc *pmc; 617 int fevent_code; 618 bool snap_flag_set = flags & SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT; 619 bool shmem_needs_update = false; 620 621 if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { 622 sbiret = SBI_ERR_INVALID_PARAM; 623 goto out; 624 } 625 626 if (snap_flag_set && kvpmu->snapshot_addr == INVALID_GPA) { 627 sbiret = SBI_ERR_NO_SHMEM; 628 goto out; 629 } 630 631 /* Stop the counters that have been configured and requested by the guest */ 632 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 633 pmc_index = i + ctr_base; 634 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 635 continue; 636 pmc = &kvpmu->pmc[pmc_index]; 637 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { 638 fevent_code = get_event_code(pmc->event_idx); 639 if (fevent_code >= SBI_PMU_FW_MAX) { 640 sbiret = SBI_ERR_INVALID_PARAM; 641 goto out; 642 } 643 644 if (!kvpmu->fw_event[fevent_code].started) 645 sbiret = SBI_ERR_ALREADY_STOPPED; 646 647 kvpmu->fw_event[fevent_code].started = false; 648 } else if (pmc->perf_event) { 649 if (pmc->started) { 650 /* Stop counting the counter */ 651 perf_event_disable(pmc->perf_event); 652 pmc->started = false; 653 } else { 654 sbiret = SBI_ERR_ALREADY_STOPPED; 655 } 656 657 if (flags & SBI_PMU_STOP_FLAG_RESET) 658 /* Release the counter if this is a reset request */ 659 kvm_pmu_release_perf_event(pmc); 660 } else { 661 sbiret = SBI_ERR_INVALID_PARAM; 662 } 663 664 if (snap_flag_set && !sbiret) { 665 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) 666 pmc->counter_val = kvpmu->fw_event[fevent_code].value; 667 else if (pmc->perf_event) 668 pmc->counter_val += perf_event_read_value(pmc->perf_event, 669 &enabled, &running); 670 /* 671 * The counter and overflow indicies in the snapshot region are w.r.to 672 * cbase. Modify the set bit in the counter mask instead of the pmc_index 673 * which indicates the absolute counter index. 674 */ 675 if (test_bit(pmc_index, kvpmu->pmc_overflown)) 676 kvpmu->sdata->ctr_overflow_mask |= BIT(i); 677 kvpmu->sdata->ctr_values[i] = pmc->counter_val; 678 shmem_needs_update = true; 679 } 680 681 if (flags & SBI_PMU_STOP_FLAG_RESET) { 682 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; 683 clear_bit(pmc_index, kvpmu->pmc_in_use); 684 clear_bit(pmc_index, kvpmu->pmc_overflown); 685 if (snap_flag_set) { 686 /* 687 * Only clear the given counter as the caller is responsible to 688 * validate both the overflow mask and configured counters. 689 */ 690 kvpmu->sdata->ctr_overflow_mask &= ~BIT(i); 691 shmem_needs_update = true; 692 } 693 } 694 } 695 696 if (shmem_needs_update) 697 kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata, 698 sizeof(struct riscv_pmu_snapshot_data)); 699 700 out: 701 retdata->err_val = sbiret; 702 703 return 0; 704 } 705 706 int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, 707 unsigned long ctr_mask, unsigned long flags, 708 unsigned long eidx, u64 evtdata, 709 struct kvm_vcpu_sbi_return *retdata) 710 { 711 int ctr_idx, sbiret = 0; 712 long ret; 713 bool is_fevent; 714 unsigned long event_code; 715 u32 etype = kvm_pmu_get_perf_event_type(eidx); 716 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 717 struct kvm_pmc *pmc = NULL; 718 struct perf_event_attr attr = { 719 .type = etype, 720 .size = sizeof(struct perf_event_attr), 721 .pinned = true, 722 .disabled = true, 723 /* 724 * It should never reach here if the platform doesn't support the sscofpmf 725 * extension as mode filtering won't work without it. 726 */ 727 .exclude_host = true, 728 .exclude_hv = true, 729 .exclude_user = !!(flags & SBI_PMU_CFG_FLAG_SET_UINH), 730 .exclude_kernel = !!(flags & SBI_PMU_CFG_FLAG_SET_SINH), 731 .config1 = RISCV_PMU_CONFIG1_GUEST_EVENTS, 732 }; 733 734 if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) { 735 sbiret = SBI_ERR_INVALID_PARAM; 736 goto out; 737 } 738 739 event_code = get_event_code(eidx); 740 is_fevent = kvm_pmu_is_fw_event(eidx); 741 if (is_fevent && event_code >= SBI_PMU_FW_MAX) { 742 sbiret = SBI_ERR_NOT_SUPPORTED; 743 goto out; 744 } 745 746 /* 747 * SKIP_MATCH flag indicates the caller is aware of the assigned counter 748 * for this event. Just do a sanity check if it already marked used. 749 */ 750 if (flags & SBI_PMU_CFG_FLAG_SKIP_MATCH) { 751 if (!test_bit(ctr_base + __ffs(ctr_mask), kvpmu->pmc_in_use)) { 752 sbiret = SBI_ERR_FAILURE; 753 goto out; 754 } 755 ctr_idx = ctr_base + __ffs(ctr_mask); 756 } else { 757 ctr_idx = pmu_get_pmc_index(kvpmu, eidx, ctr_base, ctr_mask); 758 if (ctr_idx < 0) { 759 sbiret = SBI_ERR_NOT_SUPPORTED; 760 goto out; 761 } 762 } 763 764 pmc = &kvpmu->pmc[ctr_idx]; 765 pmc->idx = ctr_idx; 766 767 if (is_fevent) { 768 if (flags & SBI_PMU_CFG_FLAG_AUTO_START) 769 kvpmu->fw_event[event_code].started = true; 770 } else { 771 ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata); 772 if (ret) { 773 sbiret = SBI_ERR_NOT_SUPPORTED; 774 goto out; 775 } 776 } 777 778 set_bit(ctr_idx, kvpmu->pmc_in_use); 779 pmc->event_idx = eidx; 780 retdata->out_val = ctr_idx; 781 out: 782 retdata->err_val = sbiret; 783 784 return 0; 785 } 786 787 int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx, 788 struct kvm_vcpu_sbi_return *retdata) 789 { 790 int ret; 791 792 ret = pmu_fw_ctr_read_hi(vcpu, cidx, &retdata->out_val); 793 if (ret == -EINVAL) 794 retdata->err_val = SBI_ERR_INVALID_PARAM; 795 796 return 0; 797 } 798 799 int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, 800 struct kvm_vcpu_sbi_return *retdata) 801 { 802 int ret; 803 804 ret = pmu_ctr_read(vcpu, cidx, &retdata->out_val); 805 if (ret == -EINVAL) 806 retdata->err_val = SBI_ERR_INVALID_PARAM; 807 808 return 0; 809 } 810 811 void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) 812 { 813 int i = 0, ret, num_hw_ctrs = 0, hpm_width = 0; 814 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 815 struct kvm_pmc *pmc; 816 817 /* 818 * PMU functionality should be only available to guests if privilege mode 819 * filtering is available in the host. Otherwise, guest will always count 820 * events while the execution is in hypervisor mode. 821 */ 822 if (!riscv_isa_extension_available(NULL, SSCOFPMF)) 823 return; 824 825 ret = riscv_pmu_get_hpm_info(&hpm_width, &num_hw_ctrs); 826 if (ret < 0 || !hpm_width || !num_hw_ctrs) 827 return; 828 829 /* 830 * Increase the number of hardware counters to offset the time counter. 831 */ 832 kvpmu->num_hw_ctrs = num_hw_ctrs + 1; 833 kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX; 834 memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); 835 kvpmu->snapshot_addr = INVALID_GPA; 836 837 if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) { 838 pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA"); 839 kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS; 840 } 841 842 /* 843 * There is no correlation between the logical hardware counter and virtual counters. 844 * However, we need to encode a hpmcounter CSR in the counter info field so that 845 * KVM can trap n emulate the read. This works well in the migration use case as 846 * KVM doesn't care if the actual hpmcounter is available in the hardware or not. 847 */ 848 for (i = 0; i < kvm_pmu_num_counters(kvpmu); i++) { 849 /* TIME CSR shouldn't be read from perf interface */ 850 if (i == 1) 851 continue; 852 pmc = &kvpmu->pmc[i]; 853 pmc->idx = i; 854 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; 855 pmc->vcpu = vcpu; 856 if (i < kvpmu->num_hw_ctrs) { 857 pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW; 858 if (i < 3) 859 /* CY, IR counters */ 860 pmc->cinfo.width = 63; 861 else 862 pmc->cinfo.width = hpm_width; 863 /* 864 * The CSR number doesn't have any relation with the logical 865 * hardware counters. The CSR numbers are encoded sequentially 866 * to avoid maintaining a map between the virtual counter 867 * and CSR number. 868 */ 869 pmc->cinfo.csr = CSR_CYCLE + i; 870 } else { 871 pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; 872 pmc->cinfo.width = 63; 873 } 874 } 875 876 kvpmu->init_done = true; 877 } 878 879 void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) 880 { 881 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 882 struct kvm_pmc *pmc; 883 int i; 884 885 if (!kvpmu) 886 return; 887 888 for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_KVM_MAX_COUNTERS) { 889 pmc = &kvpmu->pmc[i]; 890 pmc->counter_val = 0; 891 kvm_pmu_release_perf_event(pmc); 892 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; 893 } 894 bitmap_zero(kvpmu->pmc_in_use, RISCV_KVM_MAX_COUNTERS); 895 bitmap_zero(kvpmu->pmc_overflown, RISCV_KVM_MAX_COUNTERS); 896 memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); 897 kvm_pmu_clear_snapshot_area(vcpu); 898 } 899 900 void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) 901 { 902 kvm_riscv_vcpu_pmu_deinit(vcpu); 903 } 904