1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7 #ifndef __ASM_ARM_KVM_PMU_H 8 #define __ASM_ARM_KVM_PMU_H 9 10 #include <linux/perf_event.h> 11 #include <linux/perf/arm_pmuv3.h> 12 13 #define KVM_ARMV8_PMU_MAX_COUNTERS 32 14 15 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) 16 struct kvm_pmc { 17 u8 idx; /* index into the pmu->pmc array */ 18 struct perf_event *perf_event; 19 }; 20 21 struct kvm_pmu_events { 22 u64 events_host; 23 u64 events_guest; 24 }; 25 26 struct kvm_pmu { 27 struct irq_work overflow_work; 28 struct kvm_pmu_events events; 29 struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS]; 30 int irq_num; 31 bool created; 32 bool irq_level; 33 }; 34 35 struct arm_pmu_entry { 36 struct list_head entry; 37 struct arm_pmu *arm_pmu; 38 }; 39 40 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); 41 42 static __always_inline bool kvm_arm_support_pmu_v3(void) 43 { 44 return static_branch_likely(&kvm_arm_pmu_available); 45 } 46 47 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 48 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 49 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 50 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); 51 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu); 52 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 53 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 54 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 55 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 56 void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val); 57 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 58 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 59 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 60 void kvm_pmu_update_run(struct kvm_vcpu *vcpu); 61 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 62 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 63 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 64 u64 select_idx); 65 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); 66 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 67 struct kvm_device_attr *attr); 68 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 69 struct kvm_device_attr *attr); 70 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 71 struct kvm_device_attr *attr); 72 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 73 74 struct kvm_pmu_events *kvm_get_pmu_events(void); 75 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 76 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 77 void kvm_vcpu_pmu_resync_el0(void); 78 79 #define kvm_vcpu_has_pmu(vcpu) \ 80 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) 81 82 /* 83 * Updates the vcpu's view of the pmu events for this cpu. 84 * Must be called before every vcpu run after disabling interrupts, to ensure 85 * that an interrupt cannot fire and update the structure. 86 */ 87 #define kvm_pmu_update_vcpu_events(vcpu) \ 88 do { \ 89 if (!has_vhe() && kvm_arm_support_pmu_v3()) \ 90 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ 91 } while (0) 92 93 u8 kvm_arm_pmu_get_pmuver_limit(void); 94 u64 kvm_pmu_evtyper_mask(struct kvm *kvm); 95 int kvm_arm_set_default_pmu(struct kvm *kvm); 96 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); 97 98 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); 99 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); 100 void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu); 101 #else 102 struct kvm_pmu { 103 }; 104 105 static inline bool kvm_arm_support_pmu_v3(void) 106 { 107 return false; 108 } 109 110 #define kvm_arm_pmu_irq_initialized(v) (false) 111 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 112 u64 select_idx) 113 { 114 return 0; 115 } 116 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 117 u64 select_idx, u64 val) {} 118 static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) 119 { 120 return 0; 121 } 122 static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) 123 { 124 return 0; 125 } 126 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} 127 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 128 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 129 static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 130 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 131 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 132 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 133 { 134 return false; 135 } 136 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} 137 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 138 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 139 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 140 u64 data, u64 select_idx) {} 141 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 142 struct kvm_device_attr *attr) 143 { 144 return -ENXIO; 145 } 146 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 147 struct kvm_device_attr *attr) 148 { 149 return -ENXIO; 150 } 151 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 152 struct kvm_device_attr *attr) 153 { 154 return -ENXIO; 155 } 156 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 157 { 158 return 0; 159 } 160 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 161 { 162 return 0; 163 } 164 165 #define kvm_vcpu_has_pmu(vcpu) ({ false; }) 166 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} 167 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} 168 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} 169 static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} 170 static inline u8 kvm_arm_pmu_get_pmuver_limit(void) 171 { 172 return 0; 173 } 174 static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) 175 { 176 return 0; 177 } 178 static inline void kvm_vcpu_pmu_resync_el0(void) {} 179 180 static inline int kvm_arm_set_default_pmu(struct kvm *kvm) 181 { 182 return -ENODEV; 183 } 184 185 static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) 186 { 187 return 0; 188 } 189 190 static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) 191 { 192 return 0; 193 } 194 195 static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) 196 { 197 return false; 198 } 199 200 static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {} 201 202 #endif 203 204 #endif 205