xref: /linux/arch/x86/kvm/vmx/x86_ops.h (revision dee264c16a6334dcdbea5c186f5ff35f98b1df42)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_X86_OPS_H
3 #define __KVM_X86_VMX_X86_OPS_H
4 
5 #include <linux/kvm_host.h>
6 
7 #include "x86.h"
8 
9 __init int vmx_hardware_setup(void);
10 
11 extern struct kvm_x86_ops vt_x86_ops __initdata;
12 extern struct kvm_x86_init_ops vt_init_ops __initdata;
13 
14 void vmx_hardware_unsetup(void);
15 int vmx_check_processor_compat(void);
16 int vmx_enable_virtualization_cpu(void);
17 void vmx_disable_virtualization_cpu(void);
18 void vmx_emergency_disable_virtualization_cpu(void);
19 int vmx_vm_init(struct kvm *kvm);
20 void vmx_vm_destroy(struct kvm *kvm);
21 int vmx_vcpu_precreate(struct kvm *kvm);
22 int vmx_vcpu_create(struct kvm_vcpu *vcpu);
23 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu);
24 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
25 void vmx_vcpu_free(struct kvm_vcpu *vcpu);
26 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
27 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
28 void vmx_vcpu_put(struct kvm_vcpu *vcpu);
29 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath);
30 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
31 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu);
32 void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu);
33 int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
34 #ifdef CONFIG_KVM_SMM
35 int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
36 int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram);
37 int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
38 void vmx_enable_smi_window(struct kvm_vcpu *vcpu);
39 #endif
40 int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
41 				  void *insn, int insn_len);
42 int vmx_check_intercept(struct kvm_vcpu *vcpu,
43 			struct x86_instruction_info *info,
44 			enum x86_intercept_stage stage,
45 			struct x86_exception *exception);
46 bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
47 void vmx_migrate_timers(struct kvm_vcpu *vcpu);
48 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
49 void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
50 int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
51 void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
52 			   int trig_mode, int vector);
53 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
54 bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
55 void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
56 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
57 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
58 int vmx_get_feature_msr(u32 msr, u64 *data);
59 int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
60 u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
61 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
62 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
63 int vmx_get_cpl(struct kvm_vcpu *vcpu);
64 void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
65 bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
66 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
67 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
68 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
69 bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
70 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
71 void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
72 void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
73 void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
74 void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
75 void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val);
76 void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val);
77 void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu);
78 void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
79 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
80 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
81 bool vmx_get_if_flag(struct kvm_vcpu *vcpu);
82 void vmx_flush_tlb_all(struct kvm_vcpu *vcpu);
83 void vmx_flush_tlb_current(struct kvm_vcpu *vcpu);
84 void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr);
85 void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu);
86 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
87 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
88 void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall);
89 void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected);
90 void vmx_inject_nmi(struct kvm_vcpu *vcpu);
91 void vmx_inject_exception(struct kvm_vcpu *vcpu);
92 void vmx_cancel_injection(struct kvm_vcpu *vcpu);
93 int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection);
94 int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection);
95 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
96 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
97 void vmx_enable_nmi_window(struct kvm_vcpu *vcpu);
98 void vmx_enable_irq_window(struct kvm_vcpu *vcpu);
99 void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr);
100 void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu);
101 void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
102 void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
103 int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
104 int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr);
105 u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
106 
107 void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
108 		       u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
109 void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code);
110 
111 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
112 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
113 void vmx_write_tsc_offset(struct kvm_vcpu *vcpu);
114 void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu);
115 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
116 #ifdef CONFIG_X86_64
117 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
118 		     bool *expired);
119 void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu);
120 #endif
121 void vmx_setup_mce(struct kvm_vcpu *vcpu);
122 
123 #ifdef CONFIG_KVM_INTEL_TDX
124 void tdx_disable_virtualization_cpu(void);
125 int tdx_vm_init(struct kvm *kvm);
126 void tdx_mmu_release_hkid(struct kvm *kvm);
127 void tdx_vm_destroy(struct kvm *kvm);
128 int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
129 
130 int tdx_vcpu_create(struct kvm_vcpu *vcpu);
131 void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
132 void tdx_vcpu_free(struct kvm_vcpu *vcpu);
133 void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
134 int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
135 fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
136 void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
137 void tdx_vcpu_put(struct kvm_vcpu *vcpu);
138 bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
139 int tdx_handle_exit(struct kvm_vcpu *vcpu,
140 		enum exit_fastpath_completion fastpath);
141 
142 void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
143 			   int trig_mode, int vector);
144 void tdx_inject_nmi(struct kvm_vcpu *vcpu);
145 void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
146 		u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
147 bool tdx_has_emulated_msr(u32 index);
148 int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
149 int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
150 
151 int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
152 
153 int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
154 			      enum pg_level level, void *private_spt);
155 int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
156 			      enum pg_level level, void *private_spt);
157 int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
158 			      enum pg_level level, kvm_pfn_t pfn);
159 int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
160 				 enum pg_level level, kvm_pfn_t pfn);
161 
162 void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
163 void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
164 void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
165 int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
166 #else
167 static inline void tdx_disable_virtualization_cpu(void) {}
168 static inline int tdx_vm_init(struct kvm *kvm) { return -EOPNOTSUPP; }
169 static inline void tdx_mmu_release_hkid(struct kvm *kvm) {}
170 static inline void tdx_vm_destroy(struct kvm *kvm) {}
171 static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
172 
173 static inline int tdx_vcpu_create(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; }
174 static inline void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {}
175 static inline void tdx_vcpu_free(struct kvm_vcpu *vcpu) {}
176 static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
177 static inline int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; }
178 static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
179 {
180 	return EXIT_FASTPATH_NONE;
181 }
182 static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
183 static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
184 static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; }
185 static inline int tdx_handle_exit(struct kvm_vcpu *vcpu,
186 		enum exit_fastpath_completion fastpath) { return 0; }
187 
188 static inline void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
189 					 int trig_mode, int vector) {}
190 static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
191 static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1,
192 				     u64 *info2, u32 *intr_info, u32 *error_code) {}
193 static inline bool tdx_has_emulated_msr(u32 index) { return false; }
194 static inline int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
195 static inline int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
196 
197 static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }
198 
199 static inline int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
200 					    enum pg_level level,
201 					    void *private_spt)
202 {
203 	return -EOPNOTSUPP;
204 }
205 
206 static inline int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
207 					    enum pg_level level,
208 					    void *private_spt)
209 {
210 	return -EOPNOTSUPP;
211 }
212 
213 static inline int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
214 					    enum pg_level level,
215 					    kvm_pfn_t pfn)
216 {
217 	return -EOPNOTSUPP;
218 }
219 
220 static inline int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
221 					       enum pg_level level,
222 					       kvm_pfn_t pfn)
223 {
224 	return -EOPNOTSUPP;
225 }
226 
227 static inline void tdx_flush_tlb_current(struct kvm_vcpu *vcpu) {}
228 static inline void tdx_flush_tlb_all(struct kvm_vcpu *vcpu) {}
229 static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
230 static inline int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) { return 0; }
231 #endif
232 
233 #endif /* __KVM_X86_VMX_X86_OPS_H */
234