xref: /linux/arch/riscv/include/asm/kvm_vcpu_pmu.h (revision 6af91e3d2cfc8bb579b1aa2d22cd91f8c34acdf6)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2023 Rivos Inc
4  *
5  * Authors:
6  *     Atish Patra <atishp@rivosinc.com>
7  */
8 
9 #ifndef __KVM_VCPU_RISCV_PMU_H
10 #define __KVM_VCPU_RISCV_PMU_H
11 
12 #include <linux/perf/riscv_pmu.h>
13 #include <asm/sbi.h>
14 
15 #ifdef CONFIG_RISCV_PMU_SBI
16 #define RISCV_KVM_MAX_FW_CTRS	32
17 #define RISCV_KVM_MAX_HW_CTRS	32
18 #define RISCV_KVM_MAX_COUNTERS	(RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
19 static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
20 
21 struct kvm_fw_event {
22 	/* Current value of the event */
23 	u64 value;
24 
25 	/* Event monitoring status */
26 	bool started;
27 };
28 
29 /* Per virtual pmu counter data */
30 struct kvm_pmc {
31 	u8 idx;
32 	struct perf_event *perf_event;
33 	u64 counter_val;
34 	union sbi_pmu_ctr_info cinfo;
35 	/* Event monitoring status */
36 	bool started;
37 	/* Monitoring event ID */
38 	unsigned long event_idx;
39 	struct kvm_vcpu *vcpu;
40 };
41 
42 /* PMU data structure per vcpu */
43 struct kvm_pmu {
44 	struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
45 	struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
46 	/* Number of the virtual firmware counters available */
47 	int num_fw_ctrs;
48 	/* Number of the virtual hardware counters available */
49 	int num_hw_ctrs;
50 	/* A flag to indicate that pmu initialization is done */
51 	bool init_done;
52 	/* Bit map of all the virtual counter used */
53 	DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
54 	/* Bit map of all the virtual counter overflown */
55 	DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS);
56 	/* The address of the counter snapshot area (guest physical address) */
57 	gpa_t snapshot_addr;
58 	/* The actual data of the snapshot */
59 	struct riscv_pmu_snapshot_data *sdata;
60 };
61 
62 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
63 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
64 
65 #if defined(CONFIG_32BIT)
66 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
67 {.base = CSR_CYCLEH,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm }, \
68 {.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
69 #else
70 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
71 {.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
72 #endif
73 
74 int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
75 int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
76 				unsigned long *val, unsigned long new_val,
77 				unsigned long wr_mask);
78 
79 int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
80 int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
81 				struct kvm_vcpu_sbi_return *retdata);
82 int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
83 				 unsigned long ctr_mask, unsigned long flags, u64 ival,
84 				 struct kvm_vcpu_sbi_return *retdata);
85 int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
86 				unsigned long ctr_mask, unsigned long flags,
87 				struct kvm_vcpu_sbi_return *retdata);
88 int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
89 				     unsigned long ctr_mask, unsigned long flags,
90 				     unsigned long eidx, u64 evtdata,
91 				     struct kvm_vcpu_sbi_return *retdata);
92 int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
93 				struct kvm_vcpu_sbi_return *retdata);
94 int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
95 				      struct kvm_vcpu_sbi_return *retdata);
96 void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
97 int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
98 				      unsigned long saddr_high, unsigned long flags,
99 				      struct kvm_vcpu_sbi_return *retdata);
100 void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
101 void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
102 
103 #else
104 struct kvm_pmu {
105 };
106 
107 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
108 {.base = 0,	.count = 0,	.func = NULL },
109 
110 static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
111 static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
112 {
113 	return 0;
114 }
115 
116 static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
117 static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
118 #endif /* CONFIG_RISCV_PMU_SBI */
119 #endif /* !__KVM_VCPU_RISCV_PMU_H */
120