xref: /linux/arch/loongarch/include/asm/kvm_host.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__
7 #define __ASM_LOONGARCH_KVM_HOST_H__
8 
9 #include <linux/cpumask.h>
10 #include <linux/hrtimer.h>
11 #include <linux/interrupt.h>
12 #include <linux/kvm.h>
13 #include <linux/kvm_types.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/threads.h>
17 #include <linux/types.h>
18 
19 #include <asm/inst.h>
20 #include <asm/kvm_mmu.h>
21 #include <asm/kvm_ipi.h>
22 #include <asm/loongarch.h>
23 
24 /* Loongarch KVM register ids */
25 #define KVM_GET_IOC_CSR_IDX(id)		((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
26 #define KVM_GET_IOC_CPUCFG_IDX(id)	((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
27 
28 #define KVM_MAX_VCPUS			256
29 #define KVM_MAX_CPUCFG_REGS		21
30 
31 #define KVM_HALT_POLL_NS_DEFAULT	500000
32 #define KVM_REQ_TLB_FLUSH_GPA		KVM_ARCH_REQ(0)
33 #define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(1)
34 #define KVM_REQ_PMU			KVM_ARCH_REQ(2)
35 
36 #define KVM_GUESTDBG_SW_BP_MASK		\
37 	(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
38 #define KVM_GUESTDBG_VALID_MASK		\
39 	(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
40 
41 #define KVM_DIRTY_LOG_MANUAL_CAPS	\
42 	(KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET)
43 
44 struct kvm_vm_stat {
45 	struct kvm_vm_stat_generic generic;
46 	u64 pages;
47 	u64 hugepages;
48 };
49 
50 struct kvm_vcpu_stat {
51 	struct kvm_vcpu_stat_generic generic;
52 	u64 int_exits;
53 	u64 idle_exits;
54 	u64 cpucfg_exits;
55 	u64 signal_exits;
56 	u64 hypercall_exits;
57 };
58 
59 #define KVM_MEM_HUGEPAGE_CAPABLE	(1UL << 0)
60 #define KVM_MEM_HUGEPAGE_INCAPABLE	(1UL << 1)
61 struct kvm_arch_memory_slot {
62 	unsigned long flags;
63 };
64 
65 #define HOST_MAX_PMNUM			16
66 struct kvm_context {
67 	unsigned long vpid_cache;
68 	struct kvm_vcpu *last_vcpu;
69 	/* Host PMU CSR */
70 	u64 perf_ctrl[HOST_MAX_PMNUM];
71 	u64 perf_cntr[HOST_MAX_PMNUM];
72 };
73 
74 struct kvm_world_switch {
75 	int (*exc_entry)(void);
76 	int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
77 	unsigned long page_order;
78 };
79 
80 #define MAX_PGTABLE_LEVELS	4
81 
82 /*
83  * Physical CPUID is used for interrupt routing, there are different
84  * definitions about physical cpuid on different hardwares.
85  *
86  *  For LOONGARCH_CSR_CPUID register, max CPUID size if 512
87  *  For IPI hardware, max destination CPUID size 1024
88  *  For extioi interrupt controller, max destination CPUID size is 256
89  *  For msgint interrupt controller, max supported CPUID size is 65536
90  *
91  * Currently max CPUID is defined as 256 for KVM hypervisor, in future
92  * it will be expanded to 4096, including 16 packages at most. And every
93  * package supports at most 256 vcpus
94  */
95 #define KVM_MAX_PHYID		256
96 
97 struct kvm_phyid_info {
98 	struct kvm_vcpu	*vcpu;
99 	bool		enabled;
100 };
101 
102 struct kvm_phyid_map {
103 	int max_phyid;
104 	struct kvm_phyid_info phys_map[KVM_MAX_PHYID];
105 };
106 
107 struct kvm_arch {
108 	/* Guest physical mm */
109 	kvm_pte_t *pgd;
110 	unsigned long gpa_size;
111 	unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
112 	unsigned int  pte_shifts[MAX_PGTABLE_LEVELS];
113 	unsigned int  root_level;
114 	spinlock_t    phyid_map_lock;
115 	struct kvm_phyid_map  *phyid_map;
116 	/* Enabled PV features */
117 	unsigned long pv_features;
118 
119 	s64 time_offset;
120 	struct kvm_context __percpu *vmcs;
121 	struct loongarch_ipi *ipi;
122 };
123 
124 #define CSR_MAX_NUMS		0x800
125 
126 struct loongarch_csrs {
127 	unsigned long csrs[CSR_MAX_NUMS];
128 };
129 
130 /* Resume Flags */
131 #define RESUME_HOST		0
132 #define RESUME_GUEST		1
133 
134 enum emulation_result {
135 	EMULATE_DONE,		/* no further processing */
136 	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
137 	EMULATE_DO_IOCSR,	/* handle IOCSR request */
138 	EMULATE_FAIL,		/* can't emulate this instruction */
139 	EMULATE_EXCEPT,		/* A guest exception has been generated */
140 };
141 
142 #define KVM_LARCH_FPU		(0x1 << 0)
143 #define KVM_LARCH_LSX		(0x1 << 1)
144 #define KVM_LARCH_LASX		(0x1 << 2)
145 #define KVM_LARCH_LBT		(0x1 << 3)
146 #define KVM_LARCH_PMU		(0x1 << 4)
147 #define KVM_LARCH_SWCSR_LATEST	(0x1 << 5)
148 #define KVM_LARCH_HWCSR_USABLE	(0x1 << 6)
149 
150 #define LOONGARCH_PV_FEAT_UPDATED	BIT_ULL(63)
151 #define LOONGARCH_PV_FEAT_MASK		(BIT(KVM_FEATURE_IPI) |		\
152 					 BIT(KVM_FEATURE_STEAL_TIME) |	\
153 					 BIT(KVM_FEATURE_VIRT_EXTIOI))
154 
155 struct kvm_vcpu_arch {
156 	/*
157 	 * Switch pointer-to-function type to unsigned long
158 	 * for loading the value into register directly.
159 	 */
160 	unsigned long host_eentry;
161 	unsigned long guest_eentry;
162 
163 	/* Pointers stored here for easy accessing from assembly code */
164 	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
165 
166 	/* Host registers preserved across guest mode execution */
167 	unsigned long host_sp;
168 	unsigned long host_tp;
169 	unsigned long host_pgd;
170 
171 	/* Host CSRs are used when handling exits from guest */
172 	unsigned long badi;
173 	unsigned long badv;
174 	unsigned long host_ecfg;
175 	unsigned long host_estat;
176 	unsigned long host_percpu;
177 
178 	/* GPRs */
179 	unsigned long gprs[32];
180 	unsigned long pc;
181 
182 	/* Which auxiliary state is loaded (KVM_LARCH_*) */
183 	unsigned int aux_inuse;
184 
185 	/* FPU state */
186 	struct loongarch_fpu fpu FPU_ALIGN;
187 	struct loongarch_lbt lbt;
188 
189 	/* CSR state */
190 	struct loongarch_csrs *csr;
191 
192 	/* Guest max PMU CSR id */
193 	int max_pmu_csrid;
194 
195 	/* GPR used as IO source/target */
196 	u32 io_gpr;
197 
198 	/* KVM register to control count timer */
199 	u32 count_ctl;
200 	struct hrtimer swtimer;
201 
202 	/* Bitmask of intr that are pending */
203 	unsigned long irq_pending;
204 	/* Bitmask of pending intr to be cleared */
205 	unsigned long irq_clear;
206 
207 	/* Bitmask of exceptions that are pending */
208 	unsigned long exception_pending;
209 	unsigned int  esubcode;
210 
211 	/* Cache for pages needed inside spinlock regions */
212 	struct kvm_mmu_memory_cache mmu_page_cache;
213 
214 	/* vcpu's vpid */
215 	u64 vpid;
216 	gpa_t flush_gpa;
217 
218 	/* Frequency of stable timer in Hz */
219 	u64 timer_mhz;
220 	ktime_t expire;
221 
222 	/* Last CPU the vCPU state was loaded on */
223 	int last_sched_cpu;
224 	/* mp state */
225 	struct kvm_mp_state mp_state;
226 	/* ipi state */
227 	struct ipi_state ipi_state;
228 	/* cpucfg */
229 	u32 cpucfg[KVM_MAX_CPUCFG_REGS];
230 
231 	/* paravirt steal time */
232 	struct {
233 		u64 guest_addr;
234 		u64 last_steal;
235 		struct gfn_to_hva_cache cache;
236 	} st;
237 };
238 
239 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
240 {
241 	return csr->csrs[reg];
242 }
243 
244 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
245 {
246 	csr->csrs[reg] = val;
247 }
248 
249 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
250 {
251 	return arch->cpucfg[2] & CPUCFG2_FP;
252 }
253 
254 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
255 {
256 	return arch->cpucfg[2] & CPUCFG2_LSX;
257 }
258 
259 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
260 {
261 	return arch->cpucfg[2] & CPUCFG2_LASX;
262 }
263 
264 static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
265 {
266 	return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
267 }
268 
269 static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
270 {
271 	return arch->cpucfg[6] & CPUCFG6_PMP;
272 }
273 
274 static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
275 {
276 	return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
277 }
278 
279 /* Debug: dump vcpu state */
280 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
281 
282 /* MMU handling */
283 void kvm_flush_tlb_all(void);
284 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
285 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
286 
287 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
288 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
289 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
290 
291 static inline void update_pc(struct kvm_vcpu_arch *arch)
292 {
293 	arch->pc += 4;
294 }
295 
296 /*
297  * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
298  * @vcpu:	Virtual CPU.
299  *
300  * Returns:	Whether the TLBL exception was likely due to an instruction
301  *		fetch fault rather than a data load fault.
302  */
303 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
304 {
305 	return arch->pc == arch->badv;
306 }
307 
308 /* Misc */
309 static inline void kvm_arch_hardware_unsetup(void) {}
310 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
311 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
312 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
313 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
314 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
315 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {}
316 void kvm_check_vpid(struct kvm_vcpu *vcpu);
317 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
318 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
319 void kvm_init_vmcs(struct kvm *kvm);
320 void kvm_exc_entry(void);
321 int  kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
322 
323 extern unsigned long vpid_mask;
324 extern const unsigned long kvm_exception_size;
325 extern const unsigned long kvm_enter_guest_size;
326 extern struct kvm_world_switch *kvm_loongarch_ops;
327 
328 #define SW_GCSR		(1 << 0)
329 #define HW_GCSR		(1 << 1)
330 #define INVALID_GCSR	(1 << 2)
331 
332 int get_gcsr_flag(int csr);
333 void set_hw_gcsr(int csr_id, unsigned long val);
334 
335 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
336