xref: /linux/include/kvm/arm_pmu.h (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 Linaro Ltd.
4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
5  */
6 
7 #ifndef __ASM_ARM_KVM_PMU_H
8 #define __ASM_ARM_KVM_PMU_H
9 
10 #include <linux/perf_event.h>
11 #include <linux/perf/arm_pmuv3.h>
12 
13 #define KVM_ARMV8_PMU_MAX_COUNTERS	32
14 
15 /* PPI #23 - architecturally specified for GICv5 */
16 #define KVM_ARMV8_PMU_GICV5_IRQ		0x20000017
17 
18 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
19 struct kvm_pmc {
20 	u8 idx;	/* index into the pmu->pmc array */
21 	struct perf_event *perf_event;
22 };
23 
24 struct kvm_pmu_events {
25 	u64 events_host;
26 	u64 events_guest;
27 };
28 
29 struct kvm_pmu {
30 	struct irq_work overflow_work;
31 	struct kvm_pmu_events events;
32 	struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
33 	int irq_num;
34 	bool created;
35 	bool irq_level;
36 };
37 
38 struct arm_pmu_entry {
39 	struct list_head entry;
40 	struct arm_pmu *arm_pmu;
41 };
42 
43 bool kvm_supports_guest_pmuv3(void);
44 #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num != 0)
45 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
46 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
47 void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
48 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
49 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
50 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
51 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
52 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
53 void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
54 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
55 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
56 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
57 void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
58 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
59 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
60 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
61 				    u64 select_idx);
62 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
63 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
64 			    struct kvm_device_attr *attr);
65 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
66 			    struct kvm_device_attr *attr);
67 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
68 			    struct kvm_device_attr *attr);
69 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
70 
71 struct kvm_pmu_events *kvm_get_pmu_events(void);
72 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
73 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
74 void kvm_vcpu_pmu_resync_el0(void);
75 
76 #define kvm_vcpu_has_pmu(vcpu)					\
77 	(vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
78 
79 /*
80  * Updates the vcpu's view of the pmu events for this cpu.
81  * Must be called before every vcpu run after disabling interrupts, to ensure
82  * that an interrupt cannot fire and update the structure.
83  */
84 #define kvm_pmu_update_vcpu_events(vcpu)				\
85 	do {								\
86 		if (!has_vhe() && system_supports_pmuv3())		\
87 			vcpu->arch.pmu.events = *kvm_get_pmu_events();	\
88 	} while (0)
89 
90 u8 kvm_arm_pmu_get_pmuver_limit(void);
91 u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
92 int kvm_arm_set_default_pmu(struct kvm *kvm);
93 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
94 
95 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
96 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
97 void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
98 #else
99 struct kvm_pmu {
100 };
101 
102 static inline bool kvm_supports_guest_pmuv3(void)
103 {
104 	return false;
105 }
106 
107 #define kvm_arm_pmu_irq_initialized(v)	(false)
108 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
109 					    u64 select_idx)
110 {
111 	return 0;
112 }
113 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
114 					     u64 select_idx, u64 val) {}
115 static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
116 						  u64 select_idx, u64 val) {}
117 static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
118 {
119 	return 0;
120 }
121 static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
122 {
123 	return 0;
124 }
125 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
126 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
127 static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
128 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
129 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
130 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
131 {
132 	return false;
133 }
134 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
135 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
136 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
137 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
138 						  u64 data, u64 select_idx) {}
139 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
140 					  struct kvm_device_attr *attr)
141 {
142 	return -ENXIO;
143 }
144 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
145 					  struct kvm_device_attr *attr)
146 {
147 	return -ENXIO;
148 }
149 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
150 					  struct kvm_device_attr *attr)
151 {
152 	return -ENXIO;
153 }
154 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
155 {
156 	return 0;
157 }
158 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
159 {
160 	return 0;
161 }
162 
163 #define kvm_vcpu_has_pmu(vcpu)		({ false; })
164 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
165 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
166 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
167 static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
168 static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
169 {
170 	return 0;
171 }
172 static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
173 {
174 	return 0;
175 }
176 static inline void kvm_vcpu_pmu_resync_el0(void) {}
177 
178 static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
179 {
180 	return -ENODEV;
181 }
182 
183 static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
184 {
185 	return 0;
186 }
187 
188 static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
189 {
190 	return 0;
191 }
192 
193 static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
194 {
195 	return false;
196 }
197 
198 static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
199 
200 #endif
201 
202 #endif
203