1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7 #ifndef __ASM_ARM_KVM_PMU_H 8 #define __ASM_ARM_KVM_PMU_H 9 10 #include <linux/perf_event.h> 11 #include <asm/perf_event.h> 12 13 #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 14 #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) 15 16 #ifdef CONFIG_HW_PERF_EVENTS 17 18 struct kvm_pmc { 19 u8 idx; /* index into the pmu->pmc array */ 20 struct perf_event *perf_event; 21 }; 22 23 struct kvm_pmu { 24 int irq_num; 25 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; 26 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); 27 bool created; 28 bool irq_level; 29 struct irq_work overflow_work; 30 }; 31 32 struct arm_pmu_entry { 33 struct list_head entry; 34 struct arm_pmu *arm_pmu; 35 }; 36 37 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); 38 39 static __always_inline bool kvm_arm_support_pmu_v3(void) 40 { 41 return static_branch_likely(&kvm_arm_pmu_available); 42 } 43 44 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 45 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 46 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 47 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); 48 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 49 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 50 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 51 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 52 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 53 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 54 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 55 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 56 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 57 void kvm_pmu_update_run(struct kvm_vcpu *vcpu); 58 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 59 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 60 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 61 u64 select_idx); 62 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 63 struct kvm_device_attr *attr); 64 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 65 struct kvm_device_attr *attr); 66 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 67 struct kvm_device_attr *attr); 68 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 69 #else 70 struct kvm_pmu { 71 }; 72 73 static inline bool kvm_arm_support_pmu_v3(void) 74 { 75 return false; 76 } 77 78 #define kvm_arm_pmu_irq_initialized(v) (false) 79 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 80 u64 select_idx) 81 { 82 return 0; 83 } 84 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 85 u64 select_idx, u64 val) {} 86 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) 87 { 88 return 0; 89 } 90 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} 91 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 92 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 93 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 94 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 95 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 96 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 97 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 98 { 99 return false; 100 } 101 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} 102 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 103 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 104 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 105 u64 data, u64 select_idx) {} 106 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 107 struct kvm_device_attr *attr) 108 { 109 return -ENXIO; 110 } 111 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 112 struct kvm_device_attr *attr) 113 { 114 return -ENXIO; 115 } 116 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 117 struct kvm_device_attr *attr) 118 { 119 return -ENXIO; 120 } 121 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 122 { 123 return 0; 124 } 125 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 126 { 127 return 0; 128 } 129 130 #endif 131 132 #endif 133