xref: /linux/arch/riscv/include/asm/kvm_vcpu_pmu.h (revision 64dd3b6a79f0907d36de481b0f15fab323a53e5a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2023 Rivos Inc
4  *
5  * Authors:
6  *     Atish Patra <atishp@rivosinc.com>
7  */
8 
9 #ifndef __KVM_VCPU_RISCV_PMU_H
10 #define __KVM_VCPU_RISCV_PMU_H
11 
12 #include <linux/perf/riscv_pmu.h>
13 #include <asm/kvm_vcpu_insn.h>
14 #include <asm/sbi.h>
15 
16 #ifdef CONFIG_RISCV_PMU_SBI
17 #define RISCV_KVM_MAX_FW_CTRS	32
18 #define RISCV_KVM_MAX_HW_CTRS	32
19 #define RISCV_KVM_MAX_COUNTERS	(RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
20 static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
21 
22 struct kvm_fw_event {
23 	/* Current value of the event */
24 	u64 value;
25 
26 	/* Event monitoring status */
27 	bool started;
28 };
29 
30 /* Per virtual pmu counter data */
31 struct kvm_pmc {
32 	u8 idx;
33 	struct perf_event *perf_event;
34 	u64 counter_val;
35 	union sbi_pmu_ctr_info cinfo;
36 	/* Event monitoring status */
37 	bool started;
38 	/* Monitoring event ID */
39 	unsigned long event_idx;
40 	struct kvm_vcpu *vcpu;
41 };
42 
43 /* PMU data structure per vcpu */
44 struct kvm_pmu {
45 	struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
46 	struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
47 	/* Number of the virtual firmware counters available */
48 	int num_fw_ctrs;
49 	/* Number of the virtual hardware counters available */
50 	int num_hw_ctrs;
51 	/* A flag to indicate that pmu initialization is done */
52 	bool init_done;
53 	/* Bit map of all the virtual counter used */
54 	DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
55 	/* Bit map of all the virtual counter overflown */
56 	DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS);
57 	/* The address of the counter snapshot area (guest physical address) */
58 	gpa_t snapshot_addr;
59 	/* The actual data of the snapshot */
60 	struct riscv_pmu_snapshot_data *sdata;
61 };
62 
63 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
64 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
65 
66 #if defined(CONFIG_32BIT)
67 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
68 {.base = CSR_CYCLEH,	.count = 32,	.func = kvm_riscv_vcpu_pmu_read_hpm }, \
69 {.base = CSR_CYCLE,	.count = 32,	.func = kvm_riscv_vcpu_pmu_read_hpm },
70 #else
71 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
72 {.base = CSR_CYCLE,	.count = 32,	.func = kvm_riscv_vcpu_pmu_read_hpm },
73 #endif
74 
75 int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
76 int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
77 				unsigned long *val, unsigned long new_val,
78 				unsigned long wr_mask);
79 
80 int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
81 int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
82 				struct kvm_vcpu_sbi_return *retdata);
83 int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
84 				 unsigned long ctr_mask, unsigned long flags, u64 ival,
85 				 struct kvm_vcpu_sbi_return *retdata);
86 int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
87 				unsigned long ctr_mask, unsigned long flags,
88 				struct kvm_vcpu_sbi_return *retdata);
89 int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
90 				     unsigned long ctr_mask, unsigned long flags,
91 				     unsigned long eidx, u64 evtdata,
92 				     struct kvm_vcpu_sbi_return *retdata);
93 int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
94 				struct kvm_vcpu_sbi_return *retdata);
95 int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
96 				      struct kvm_vcpu_sbi_return *retdata);
97 void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
98 int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
99 				      unsigned long saddr_high, unsigned long flags,
100 				      struct kvm_vcpu_sbi_return *retdata);
101 void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
102 void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
103 
104 #else
105 struct kvm_pmu {
106 };
107 
kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)108 static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
109 						 unsigned long *val, unsigned long new_val,
110 						 unsigned long wr_mask)
111 {
112 	if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
113 		*val = 0;
114 		return KVM_INSN_CONTINUE_NEXT_SEPC;
115 	} else {
116 		return KVM_INSN_ILLEGAL_TRAP;
117 	}
118 }
119 
120 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
121 {.base = CSR_CYCLE,	.count = 3,	.func = kvm_riscv_vcpu_pmu_read_legacy },
122 
kvm_riscv_vcpu_pmu_init(struct kvm_vcpu * vcpu)123 static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu * vcpu,unsigned long fid)124 static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
125 {
126 	return 0;
127 }
128 
kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu * vcpu)129 static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu * vcpu)130 static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
131 #endif /* CONFIG_RISCV_PMU_SBI */
132 #endif /* !__KVM_VCPU_RISCV_PMU_H */
133