xref: /linux/arch/riscv/include/asm/kvm_host.h (revision afca12e35e711ae8f97e835a3704cc305592eac9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
11 
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_aia.h>
18 #include <asm/ptrace.h>
19 #include <asm/kvm_vcpu_fp.h>
20 #include <asm/kvm_vcpu_insn.h>
21 #include <asm/kvm_vcpu_sbi.h>
22 #include <asm/kvm_vcpu_timer.h>
23 #include <asm/kvm_vcpu_pmu.h>
24 
25 #define KVM_MAX_VCPUS			1024
26 
27 #define KVM_HALT_POLL_NS_DEFAULT	500000
28 
29 #define KVM_VCPU_MAX_FEATURES		0
30 
31 #define KVM_IRQCHIP_NUM_PINS		1024
32 
33 #define KVM_REQ_SLEEP \
34 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_VCPU_RESET		KVM_ARCH_REQ(1)
36 #define KVM_REQ_UPDATE_HGATP		KVM_ARCH_REQ(2)
37 #define KVM_REQ_FENCE_I			\
38 	KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39 #define KVM_REQ_HFENCE_GVMA_VMID_ALL	KVM_REQ_TLB_FLUSH
40 #define KVM_REQ_HFENCE_VVMA_ALL		\
41 	KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
42 #define KVM_REQ_HFENCE			\
43 	KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 
45 enum kvm_riscv_hfence_type {
46 	KVM_RISCV_HFENCE_UNKNOWN = 0,
47 	KVM_RISCV_HFENCE_GVMA_VMID_GPA,
48 	KVM_RISCV_HFENCE_VVMA_ASID_GVA,
49 	KVM_RISCV_HFENCE_VVMA_ASID_ALL,
50 	KVM_RISCV_HFENCE_VVMA_GVA,
51 };
52 
53 struct kvm_riscv_hfence {
54 	enum kvm_riscv_hfence_type type;
55 	unsigned long asid;
56 	unsigned long order;
57 	gpa_t addr;
58 	gpa_t size;
59 };
60 
61 #define KVM_RISCV_VCPU_MAX_HFENCE	64
62 
63 struct kvm_vm_stat {
64 	struct kvm_vm_stat_generic generic;
65 };
66 
67 struct kvm_vcpu_stat {
68 	struct kvm_vcpu_stat_generic generic;
69 	u64 ecall_exit_stat;
70 	u64 wfi_exit_stat;
71 	u64 mmio_exit_user;
72 	u64 mmio_exit_kernel;
73 	u64 csr_exit_user;
74 	u64 csr_exit_kernel;
75 	u64 signal_exits;
76 	u64 exits;
77 };
78 
79 struct kvm_arch_memory_slot {
80 };
81 
82 struct kvm_vmid {
83 	/*
84 	 * Writes to vmid_version and vmid happen with vmid_lock held
85 	 * whereas reads happen without any lock held.
86 	 */
87 	unsigned long vmid_version;
88 	unsigned long vmid;
89 };
90 
91 struct kvm_arch {
92 	/* G-stage vmid */
93 	struct kvm_vmid vmid;
94 
95 	/* G-stage page table */
96 	pgd_t *pgd;
97 	phys_addr_t pgd_phys;
98 
99 	/* Guest Timer */
100 	struct kvm_guest_timer timer;
101 
102 	/* AIA Guest/VM context */
103 	struct kvm_aia aia;
104 };
105 
106 struct kvm_cpu_trap {
107 	unsigned long sepc;
108 	unsigned long scause;
109 	unsigned long stval;
110 	unsigned long htval;
111 	unsigned long htinst;
112 };
113 
114 struct kvm_cpu_context {
115 	unsigned long zero;
116 	unsigned long ra;
117 	unsigned long sp;
118 	unsigned long gp;
119 	unsigned long tp;
120 	unsigned long t0;
121 	unsigned long t1;
122 	unsigned long t2;
123 	unsigned long s0;
124 	unsigned long s1;
125 	unsigned long a0;
126 	unsigned long a1;
127 	unsigned long a2;
128 	unsigned long a3;
129 	unsigned long a4;
130 	unsigned long a5;
131 	unsigned long a6;
132 	unsigned long a7;
133 	unsigned long s2;
134 	unsigned long s3;
135 	unsigned long s4;
136 	unsigned long s5;
137 	unsigned long s6;
138 	unsigned long s7;
139 	unsigned long s8;
140 	unsigned long s9;
141 	unsigned long s10;
142 	unsigned long s11;
143 	unsigned long t3;
144 	unsigned long t4;
145 	unsigned long t5;
146 	unsigned long t6;
147 	unsigned long sepc;
148 	unsigned long sstatus;
149 	unsigned long hstatus;
150 	union __riscv_fp_state fp;
151 	struct __riscv_v_ext_state vector;
152 };
153 
154 struct kvm_vcpu_csr {
155 	unsigned long vsstatus;
156 	unsigned long vsie;
157 	unsigned long vstvec;
158 	unsigned long vsscratch;
159 	unsigned long vsepc;
160 	unsigned long vscause;
161 	unsigned long vstval;
162 	unsigned long hvip;
163 	unsigned long vsatp;
164 	unsigned long scounteren;
165 };
166 
167 struct kvm_vcpu_arch {
168 	/* VCPU ran at least once */
169 	bool ran_atleast_once;
170 
171 	/* Last Host CPU on which Guest VCPU exited */
172 	int last_exit_cpu;
173 
174 	/* ISA feature bits (similar to MISA) */
175 	DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
176 
177 	/* Vendor, Arch, and Implementation details */
178 	unsigned long mvendorid;
179 	unsigned long marchid;
180 	unsigned long mimpid;
181 
182 	/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
183 	unsigned long host_sscratch;
184 	unsigned long host_stvec;
185 	unsigned long host_scounteren;
186 
187 	/* CPU context of Host */
188 	struct kvm_cpu_context host_context;
189 
190 	/* CPU context of Guest VCPU */
191 	struct kvm_cpu_context guest_context;
192 
193 	/* CPU CSR context of Guest VCPU */
194 	struct kvm_vcpu_csr guest_csr;
195 
196 	/* CPU context upon Guest VCPU reset */
197 	struct kvm_cpu_context guest_reset_context;
198 
199 	/* CPU CSR context upon Guest VCPU reset */
200 	struct kvm_vcpu_csr guest_reset_csr;
201 
202 	/*
203 	 * VCPU interrupts
204 	 *
205 	 * We have a lockless approach for tracking pending VCPU interrupts
206 	 * implemented using atomic bitops. The irqs_pending bitmap represent
207 	 * pending interrupts whereas irqs_pending_mask represent bits changed
208 	 * in irqs_pending. Our approach is modeled around multiple producer
209 	 * and single consumer problem where the consumer is the VCPU itself.
210 	 */
211 #define KVM_RISCV_VCPU_NR_IRQS	64
212 	DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
213 	DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
214 
215 	/* VCPU Timer */
216 	struct kvm_vcpu_timer timer;
217 
218 	/* HFENCE request queue */
219 	spinlock_t hfence_lock;
220 	unsigned long hfence_head;
221 	unsigned long hfence_tail;
222 	struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
223 
224 	/* MMIO instruction details */
225 	struct kvm_mmio_decode mmio_decode;
226 
227 	/* CSR instruction details */
228 	struct kvm_csr_decode csr_decode;
229 
230 	/* SBI context */
231 	struct kvm_vcpu_sbi_context sbi_context;
232 
233 	/* AIA VCPU context */
234 	struct kvm_vcpu_aia aia_context;
235 
236 	/* Cache pages needed to program page tables with spinlock held */
237 	struct kvm_mmu_memory_cache mmu_page_cache;
238 
239 	/* VCPU power-off state */
240 	bool power_off;
241 
242 	/* Don't run the VCPU (blocked) */
243 	bool pause;
244 
245 	/* Performance monitoring context */
246 	struct kvm_pmu pmu_context;
247 };
248 
249 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
250 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
251 
252 #define KVM_ARCH_WANT_MMU_NOTIFIER
253 
254 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
255 
256 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
257 					  gpa_t gpa, gpa_t gpsz,
258 					  unsigned long order);
259 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
260 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
261 				     unsigned long order);
262 void kvm_riscv_local_hfence_gvma_all(void);
263 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
264 					  unsigned long asid,
265 					  unsigned long gva,
266 					  unsigned long gvsz,
267 					  unsigned long order);
268 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
269 					  unsigned long asid);
270 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
271 				     unsigned long gva, unsigned long gvsz,
272 				     unsigned long order);
273 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
274 
275 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
276 
277 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
278 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
279 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
280 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
281 
282 void kvm_riscv_fence_i(struct kvm *kvm,
283 		       unsigned long hbase, unsigned long hmask);
284 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
285 				    unsigned long hbase, unsigned long hmask,
286 				    gpa_t gpa, gpa_t gpsz,
287 				    unsigned long order);
288 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
289 				    unsigned long hbase, unsigned long hmask);
290 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
291 				    unsigned long hbase, unsigned long hmask,
292 				    unsigned long gva, unsigned long gvsz,
293 				    unsigned long order, unsigned long asid);
294 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
295 				    unsigned long hbase, unsigned long hmask,
296 				    unsigned long asid);
297 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
298 			       unsigned long hbase, unsigned long hmask,
299 			       unsigned long gva, unsigned long gvsz,
300 			       unsigned long order);
301 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
302 			       unsigned long hbase, unsigned long hmask);
303 
304 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
305 			     phys_addr_t hpa, unsigned long size,
306 			     bool writable, bool in_atomic);
307 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
308 			      unsigned long size);
309 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
310 			 struct kvm_memory_slot *memslot,
311 			 gpa_t gpa, unsigned long hva, bool is_write);
312 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
313 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
314 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
315 void __init kvm_riscv_gstage_mode_detect(void);
316 unsigned long __init kvm_riscv_gstage_mode(void);
317 int kvm_riscv_gstage_gpa_bits(void);
318 
319 void __init kvm_riscv_gstage_vmid_detect(void);
320 unsigned long kvm_riscv_gstage_vmid_bits(void);
321 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
322 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
323 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
324 
325 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
326 
327 void __kvm_riscv_unpriv_trap(void);
328 
329 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
330 					 bool read_insn,
331 					 unsigned long guest_addr,
332 					 struct kvm_cpu_trap *trap);
333 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
334 				  struct kvm_cpu_trap *trap);
335 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
336 			struct kvm_cpu_trap *trap);
337 
338 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
339 
340 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
341 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
342 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
343 				    u64 __user *uindices);
344 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
345 			   const struct kvm_one_reg *reg);
346 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
347 			   const struct kvm_one_reg *reg);
348 
349 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
350 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
351 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
352 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
353 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
354 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
355 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
356 
357 #endif /* __RISCV_KVM_HOST_H__ */
358