1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7 #ifndef __ASM_ARM_KVM_PMU_H 8 #define __ASM_ARM_KVM_PMU_H 9 10 #include <linux/perf_event.h> 11 #include <linux/perf/arm_pmuv3.h> 12 13 #define KVM_ARMV8_PMU_MAX_COUNTERS 32 14 15 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) 16 struct kvm_pmc { 17 u8 idx; /* index into the pmu->pmc array */ 18 struct perf_event *perf_event; 19 }; 20 21 struct kvm_pmu_events { 22 u64 events_host; 23 u64 events_guest; 24 }; 25 26 struct kvm_pmu { 27 struct irq_work overflow_work; 28 struct kvm_pmu_events events; 29 struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS]; 30 int irq_num; 31 bool created; 32 bool irq_level; 33 }; 34 35 struct arm_pmu_entry { 36 struct list_head entry; 37 struct arm_pmu *arm_pmu; 38 }; 39 40 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); 41 42 static __always_inline bool kvm_arm_support_pmu_v3(void) 43 { 44 return static_branch_likely(&kvm_arm_pmu_available); 45 } 46 47 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 48 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 49 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 50 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); 51 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu); 52 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 53 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 54 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 55 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 56 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 57 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 58 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 59 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 60 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 61 void kvm_pmu_update_run(struct kvm_vcpu *vcpu); 62 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 63 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 64 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 65 u64 select_idx); 66 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); 67 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 68 struct kvm_device_attr *attr); 69 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 70 struct kvm_device_attr *attr); 71 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 72 struct kvm_device_attr *attr); 73 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 74 75 struct kvm_pmu_events *kvm_get_pmu_events(void); 76 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 77 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 78 void kvm_vcpu_pmu_resync_el0(void); 79 80 #define kvm_vcpu_has_pmu(vcpu) \ 81 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) 82 83 /* 84 * Updates the vcpu's view of the pmu events for this cpu. 85 * Must be called before every vcpu run after disabling interrupts, to ensure 86 * that an interrupt cannot fire and update the structure. 87 */ 88 #define kvm_pmu_update_vcpu_events(vcpu) \ 89 do { \ 90 if (!has_vhe() && kvm_arm_support_pmu_v3()) \ 91 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ 92 } while (0) 93 94 u8 kvm_arm_pmu_get_pmuver_limit(void); 95 u64 kvm_pmu_evtyper_mask(struct kvm *kvm); 96 int kvm_arm_set_default_pmu(struct kvm *kvm); 97 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); 98 99 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); 100 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); 101 void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu); 102 #else 103 struct kvm_pmu { 104 }; 105 106 static inline bool kvm_arm_support_pmu_v3(void) 107 { 108 return false; 109 } 110 111 #define kvm_arm_pmu_irq_initialized(v) (false) 112 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 113 u64 select_idx) 114 { 115 return 0; 116 } 117 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 118 u64 select_idx, u64 val) {} 119 static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) 120 { 121 return 0; 122 } 123 static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) 124 { 125 return 0; 126 } 127 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} 128 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 129 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 130 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 131 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 132 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 133 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 134 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 135 { 136 return false; 137 } 138 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} 139 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 140 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 141 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 142 u64 data, u64 select_idx) {} 143 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 144 struct kvm_device_attr *attr) 145 { 146 return -ENXIO; 147 } 148 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 149 struct kvm_device_attr *attr) 150 { 151 return -ENXIO; 152 } 153 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 154 struct kvm_device_attr *attr) 155 { 156 return -ENXIO; 157 } 158 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 159 { 160 return 0; 161 } 162 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 163 { 164 return 0; 165 } 166 167 #define kvm_vcpu_has_pmu(vcpu) ({ false; }) 168 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} 169 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} 170 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} 171 static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} 172 static inline u8 kvm_arm_pmu_get_pmuver_limit(void) 173 { 174 return 0; 175 } 176 static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) 177 { 178 return 0; 179 } 180 static inline void kvm_vcpu_pmu_resync_el0(void) {} 181 182 static inline int kvm_arm_set_default_pmu(struct kvm *kvm) 183 { 184 return -ENODEV; 185 } 186 187 static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) 188 { 189 return 0; 190 } 191 192 static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) 193 { 194 return 0; 195 } 196 197 static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) 198 { 199 return false; 200 } 201 202 static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {} 203 204 #endif 205 206 #endif 207