xref: /linux/arch/riscv/include/asm/kvm_host.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
11 
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_aia.h>
18 #include <asm/ptrace.h>
19 #include <asm/kvm_vcpu_fp.h>
20 #include <asm/kvm_vcpu_insn.h>
21 #include <asm/kvm_vcpu_sbi.h>
22 #include <asm/kvm_vcpu_timer.h>
23 #include <asm/kvm_vcpu_pmu.h>
24 
25 #define KVM_MAX_VCPUS			1024
26 
27 #define KVM_HALT_POLL_NS_DEFAULT	500000
28 
29 #define KVM_VCPU_MAX_FEATURES		0
30 
31 #define KVM_IRQCHIP_NUM_PINS		1024
32 
33 #define KVM_REQ_SLEEP \
34 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_VCPU_RESET		KVM_ARCH_REQ(1)
36 #define KVM_REQ_UPDATE_HGATP		KVM_ARCH_REQ(2)
37 #define KVM_REQ_FENCE_I			\
38 	KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39 #define KVM_REQ_HFENCE_GVMA_VMID_ALL	KVM_REQ_TLB_FLUSH
40 #define KVM_REQ_HFENCE_VVMA_ALL		\
41 	KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
42 #define KVM_REQ_HFENCE			\
43 	KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 #define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(6)
45 
46 #define KVM_HEDELEG_DEFAULT		(BIT(EXC_INST_MISALIGNED) | \
47 					 BIT(EXC_BREAKPOINT)      | \
48 					 BIT(EXC_SYSCALL)         | \
49 					 BIT(EXC_INST_PAGE_FAULT) | \
50 					 BIT(EXC_LOAD_PAGE_FAULT) | \
51 					 BIT(EXC_STORE_PAGE_FAULT))
52 
53 #define KVM_HIDELEG_DEFAULT		(BIT(IRQ_VS_SOFT)  | \
54 					 BIT(IRQ_VS_TIMER) | \
55 					 BIT(IRQ_VS_EXT))
56 
57 enum kvm_riscv_hfence_type {
58 	KVM_RISCV_HFENCE_UNKNOWN = 0,
59 	KVM_RISCV_HFENCE_GVMA_VMID_GPA,
60 	KVM_RISCV_HFENCE_VVMA_ASID_GVA,
61 	KVM_RISCV_HFENCE_VVMA_ASID_ALL,
62 	KVM_RISCV_HFENCE_VVMA_GVA,
63 };
64 
65 struct kvm_riscv_hfence {
66 	enum kvm_riscv_hfence_type type;
67 	unsigned long asid;
68 	unsigned long order;
69 	gpa_t addr;
70 	gpa_t size;
71 };
72 
73 #define KVM_RISCV_VCPU_MAX_HFENCE	64
74 
75 struct kvm_vm_stat {
76 	struct kvm_vm_stat_generic generic;
77 };
78 
79 struct kvm_vcpu_stat {
80 	struct kvm_vcpu_stat_generic generic;
81 	u64 ecall_exit_stat;
82 	u64 wfi_exit_stat;
83 	u64 wrs_exit_stat;
84 	u64 mmio_exit_user;
85 	u64 mmio_exit_kernel;
86 	u64 csr_exit_user;
87 	u64 csr_exit_kernel;
88 	u64 signal_exits;
89 	u64 exits;
90 };
91 
92 struct kvm_arch_memory_slot {
93 };
94 
95 struct kvm_vmid {
96 	/*
97 	 * Writes to vmid_version and vmid happen with vmid_lock held
98 	 * whereas reads happen without any lock held.
99 	 */
100 	unsigned long vmid_version;
101 	unsigned long vmid;
102 };
103 
104 struct kvm_arch {
105 	/* G-stage vmid */
106 	struct kvm_vmid vmid;
107 
108 	/* G-stage page table */
109 	pgd_t *pgd;
110 	phys_addr_t pgd_phys;
111 
112 	/* Guest Timer */
113 	struct kvm_guest_timer timer;
114 
115 	/* AIA Guest/VM context */
116 	struct kvm_aia aia;
117 };
118 
119 struct kvm_cpu_trap {
120 	unsigned long sepc;
121 	unsigned long scause;
122 	unsigned long stval;
123 	unsigned long htval;
124 	unsigned long htinst;
125 };
126 
127 struct kvm_cpu_context {
128 	unsigned long zero;
129 	unsigned long ra;
130 	unsigned long sp;
131 	unsigned long gp;
132 	unsigned long tp;
133 	unsigned long t0;
134 	unsigned long t1;
135 	unsigned long t2;
136 	unsigned long s0;
137 	unsigned long s1;
138 	unsigned long a0;
139 	unsigned long a1;
140 	unsigned long a2;
141 	unsigned long a3;
142 	unsigned long a4;
143 	unsigned long a5;
144 	unsigned long a6;
145 	unsigned long a7;
146 	unsigned long s2;
147 	unsigned long s3;
148 	unsigned long s4;
149 	unsigned long s5;
150 	unsigned long s6;
151 	unsigned long s7;
152 	unsigned long s8;
153 	unsigned long s9;
154 	unsigned long s10;
155 	unsigned long s11;
156 	unsigned long t3;
157 	unsigned long t4;
158 	unsigned long t5;
159 	unsigned long t6;
160 	unsigned long sepc;
161 	unsigned long sstatus;
162 	unsigned long hstatus;
163 	union __riscv_fp_state fp;
164 	struct __riscv_v_ext_state vector;
165 };
166 
167 struct kvm_vcpu_csr {
168 	unsigned long vsstatus;
169 	unsigned long vsie;
170 	unsigned long vstvec;
171 	unsigned long vsscratch;
172 	unsigned long vsepc;
173 	unsigned long vscause;
174 	unsigned long vstval;
175 	unsigned long hvip;
176 	unsigned long vsatp;
177 	unsigned long scounteren;
178 	unsigned long senvcfg;
179 };
180 
181 struct kvm_vcpu_config {
182 	u64 henvcfg;
183 	u64 hstateen0;
184 	unsigned long hedeleg;
185 };
186 
187 struct kvm_vcpu_smstateen_csr {
188 	unsigned long sstateen0;
189 };
190 
191 struct kvm_vcpu_arch {
192 	/* VCPU ran at least once */
193 	bool ran_atleast_once;
194 
195 	/* Last Host CPU on which Guest VCPU exited */
196 	int last_exit_cpu;
197 
198 	/* ISA feature bits (similar to MISA) */
199 	DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
200 
201 	/* Vendor, Arch, and Implementation details */
202 	unsigned long mvendorid;
203 	unsigned long marchid;
204 	unsigned long mimpid;
205 
206 	/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
207 	unsigned long host_sscratch;
208 	unsigned long host_stvec;
209 	unsigned long host_scounteren;
210 	unsigned long host_senvcfg;
211 	unsigned long host_sstateen0;
212 
213 	/* CPU context of Host */
214 	struct kvm_cpu_context host_context;
215 
216 	/* CPU context of Guest VCPU */
217 	struct kvm_cpu_context guest_context;
218 
219 	/* CPU CSR context of Guest VCPU */
220 	struct kvm_vcpu_csr guest_csr;
221 
222 	/* CPU Smstateen CSR context of Guest VCPU */
223 	struct kvm_vcpu_smstateen_csr smstateen_csr;
224 
225 	/* CPU context upon Guest VCPU reset */
226 	struct kvm_cpu_context guest_reset_context;
227 	spinlock_t reset_cntx_lock;
228 
229 	/* CPU CSR context upon Guest VCPU reset */
230 	struct kvm_vcpu_csr guest_reset_csr;
231 
232 	/*
233 	 * VCPU interrupts
234 	 *
235 	 * We have a lockless approach for tracking pending VCPU interrupts
236 	 * implemented using atomic bitops. The irqs_pending bitmap represent
237 	 * pending interrupts whereas irqs_pending_mask represent bits changed
238 	 * in irqs_pending. Our approach is modeled around multiple producer
239 	 * and single consumer problem where the consumer is the VCPU itself.
240 	 */
241 #define KVM_RISCV_VCPU_NR_IRQS	64
242 	DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
243 	DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
244 
245 	/* VCPU Timer */
246 	struct kvm_vcpu_timer timer;
247 
248 	/* HFENCE request queue */
249 	spinlock_t hfence_lock;
250 	unsigned long hfence_head;
251 	unsigned long hfence_tail;
252 	struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
253 
254 	/* MMIO instruction details */
255 	struct kvm_mmio_decode mmio_decode;
256 
257 	/* CSR instruction details */
258 	struct kvm_csr_decode csr_decode;
259 
260 	/* SBI context */
261 	struct kvm_vcpu_sbi_context sbi_context;
262 
263 	/* AIA VCPU context */
264 	struct kvm_vcpu_aia aia_context;
265 
266 	/* Cache pages needed to program page tables with spinlock held */
267 	struct kvm_mmu_memory_cache mmu_page_cache;
268 
269 	/* VCPU power state */
270 	struct kvm_mp_state mp_state;
271 	spinlock_t mp_state_lock;
272 
273 	/* Don't run the VCPU (blocked) */
274 	bool pause;
275 
276 	/* Performance monitoring context */
277 	struct kvm_pmu pmu_context;
278 
279 	/* 'static' configurations which are set only once */
280 	struct kvm_vcpu_config cfg;
281 
282 	/* SBI steal-time accounting */
283 	struct {
284 		gpa_t shmem;
285 		u64 last_steal;
286 	} sta;
287 };
288 
289 /*
290  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
291  * arrived in guest context.  For riscv, any event that arrives while a vCPU is
292  * loaded is considered to be "in guest".
293  */
294 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
295 {
296 	return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
297 }
298 
299 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
300 
301 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
302 
303 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
304 					  gpa_t gpa, gpa_t gpsz,
305 					  unsigned long order);
306 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
307 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
308 				     unsigned long order);
309 void kvm_riscv_local_hfence_gvma_all(void);
310 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
311 					  unsigned long asid,
312 					  unsigned long gva,
313 					  unsigned long gvsz,
314 					  unsigned long order);
315 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
316 					  unsigned long asid);
317 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
318 				     unsigned long gva, unsigned long gvsz,
319 				     unsigned long order);
320 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
321 
322 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
323 
324 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
325 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
326 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
327 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
328 
329 void kvm_riscv_fence_i(struct kvm *kvm,
330 		       unsigned long hbase, unsigned long hmask);
331 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
332 				    unsigned long hbase, unsigned long hmask,
333 				    gpa_t gpa, gpa_t gpsz,
334 				    unsigned long order);
335 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
336 				    unsigned long hbase, unsigned long hmask);
337 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
338 				    unsigned long hbase, unsigned long hmask,
339 				    unsigned long gva, unsigned long gvsz,
340 				    unsigned long order, unsigned long asid);
341 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
342 				    unsigned long hbase, unsigned long hmask,
343 				    unsigned long asid);
344 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
345 			       unsigned long hbase, unsigned long hmask,
346 			       unsigned long gva, unsigned long gvsz,
347 			       unsigned long order);
348 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
349 			       unsigned long hbase, unsigned long hmask);
350 
351 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
352 			     phys_addr_t hpa, unsigned long size,
353 			     bool writable, bool in_atomic);
354 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
355 			      unsigned long size);
356 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
357 			 struct kvm_memory_slot *memslot,
358 			 gpa_t gpa, unsigned long hva, bool is_write);
359 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
360 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
361 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
362 void __init kvm_riscv_gstage_mode_detect(void);
363 unsigned long __init kvm_riscv_gstage_mode(void);
364 int kvm_riscv_gstage_gpa_bits(void);
365 
366 void __init kvm_riscv_gstage_vmid_detect(void);
367 unsigned long kvm_riscv_gstage_vmid_bits(void);
368 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
369 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
370 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
371 
372 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
373 
374 void __kvm_riscv_unpriv_trap(void);
375 
376 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
377 					 bool read_insn,
378 					 unsigned long guest_addr,
379 					 struct kvm_cpu_trap *trap);
380 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
381 				  struct kvm_cpu_trap *trap);
382 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
383 			struct kvm_cpu_trap *trap);
384 
385 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
386 
387 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
388 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
389 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
390 				    u64 __user *uindices);
391 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
392 			   const struct kvm_one_reg *reg);
393 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
394 			   const struct kvm_one_reg *reg);
395 
396 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
397 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
398 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
399 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
400 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
401 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
402 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
403 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
404 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
405 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
406 
407 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
408 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
409 
410 #endif /* __RISCV_KVM_HOST_H__ */
411