1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (c) 2023 Rivos Inc 4 * 5 * Authors: 6 * Atish Patra <atishp@rivosinc.com> 7 */ 8 9 #ifndef __KVM_VCPU_RISCV_PMU_H 10 #define __KVM_VCPU_RISCV_PMU_H 11 12 #include <linux/perf/riscv_pmu.h> 13 #include <asm/kvm_vcpu_insn.h> 14 #include <asm/sbi.h> 15 16 #ifdef CONFIG_RISCV_PMU_SBI 17 #define RISCV_KVM_MAX_FW_CTRS 32 18 #define RISCV_KVM_MAX_HW_CTRS 32 19 #define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS) 20 static_assert(RISCV_KVM_MAX_COUNTERS <= 64); 21 22 struct kvm_fw_event { 23 /* Current value of the event */ 24 u64 value; 25 26 /* Event monitoring status */ 27 bool started; 28 }; 29 30 /* Per virtual pmu counter data */ 31 struct kvm_pmc { 32 u8 idx; 33 struct perf_event *perf_event; 34 u64 counter_val; 35 union sbi_pmu_ctr_info cinfo; 36 /* Event monitoring status */ 37 bool started; 38 /* Monitoring event ID */ 39 unsigned long event_idx; 40 struct kvm_vcpu *vcpu; 41 }; 42 43 /* PMU data structure per vcpu */ 44 struct kvm_pmu { 45 struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS]; 46 struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS]; 47 /* Number of the virtual firmware counters available */ 48 int num_fw_ctrs; 49 /* Number of the virtual hardware counters available */ 50 int num_hw_ctrs; 51 /* A flag to indicate that pmu initialization is done */ 52 bool init_done; 53 /* Bit map of all the virtual counter used */ 54 DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS); 55 /* Bit map of all the virtual counter overflown */ 56 DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS); 57 /* The address of the counter snapshot area (guest physical address) */ 58 gpa_t snapshot_addr; 59 /* The actual data of the snapshot */ 60 struct riscv_pmu_snapshot_data *sdata; 61 }; 62 63 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context) 64 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context)) 65 66 #if defined(CONFIG_32BIT) 67 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ 68 {.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ 69 {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, 70 #else 71 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ 72 {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, 73 #endif 74 75 int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); 76 int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, 77 unsigned long *val, unsigned long new_val, 78 unsigned long wr_mask); 79 80 int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata); 81 int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, 82 struct kvm_vcpu_sbi_return *retdata); 83 int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, 84 unsigned long ctr_mask, unsigned long flags, u64 ival, 85 struct kvm_vcpu_sbi_return *retdata); 86 int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, 87 unsigned long ctr_mask, unsigned long flags, 88 struct kvm_vcpu_sbi_return *retdata); 89 int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, 90 unsigned long ctr_mask, unsigned long flags, 91 unsigned long eidx, u64 evtdata, 92 struct kvm_vcpu_sbi_return *retdata); 93 int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, 94 struct kvm_vcpu_sbi_return *retdata); 95 int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx, 96 struct kvm_vcpu_sbi_return *retdata); 97 void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu); 98 int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low, 99 unsigned long saddr_high, unsigned long flags, 100 struct kvm_vcpu_sbi_return *retdata); 101 int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low, 102 unsigned long saddr_high, unsigned long num_events, 103 unsigned long flags, struct kvm_vcpu_sbi_return *retdata); 104 void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu); 105 void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); 106 107 #else 108 struct kvm_pmu { 109 }; 110 111 static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num, 112 unsigned long *val, unsigned long new_val, 113 unsigned long wr_mask) 114 { 115 if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { 116 *val = 0; 117 return KVM_INSN_CONTINUE_NEXT_SEPC; 118 } else { 119 return KVM_INSN_ILLEGAL_TRAP; 120 } 121 } 122 123 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ 124 {.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy }, 125 126 static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} 127 static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) 128 { 129 return 0; 130 } 131 132 static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {} 133 static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {} 134 #endif /* CONFIG_RISCV_PMU_SBI */ 135 #endif /* !__KVM_VCPU_RISCV_PMU_H */ 136