1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__ 7 #define __ASM_LOONGARCH_KVM_HOST_H__ 8 9 #include <linux/cpumask.h> 10 #include <linux/hrtimer.h> 11 #include <linux/interrupt.h> 12 #include <linux/kvm.h> 13 #include <linux/kvm_types.h> 14 #include <linux/mutex.h> 15 #include <linux/perf_event.h> 16 #include <linux/spinlock.h> 17 #include <linux/threads.h> 18 #include <linux/types.h> 19 20 #include <asm/inst.h> 21 #include <asm/kvm_mmu.h> 22 #include <asm/kvm_ipi.h> 23 #include <asm/kvm_dmsintc.h> 24 #include <asm/kvm_eiointc.h> 25 #include <asm/kvm_pch_pic.h> 26 #include <asm/loongarch.h> 27 28 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 29 30 /* Loongarch KVM register ids */ 31 #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) 32 #define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) 33 34 #define KVM_MAX_VCPUS 256 35 #define KVM_MAX_CPUCFG_REGS 21 36 37 #define KVM_HALT_POLL_NS_DEFAULT 500000 38 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 39 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 40 #define KVM_REQ_PMU KVM_ARCH_REQ(2) 41 #define KVM_REQ_AUX_LOAD KVM_ARCH_REQ(3) 42 43 #define KVM_GUESTDBG_SW_BP_MASK \ 44 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) 45 #define KVM_GUESTDBG_VALID_MASK \ 46 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) 47 48 #define KVM_DIRTY_LOG_MANUAL_CAPS \ 49 (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) 50 51 struct kvm_vm_stat { 52 struct kvm_vm_stat_generic generic; 53 u64 pages; 54 u64 hugepages; 55 }; 56 57 struct kvm_vcpu_stat { 58 struct kvm_vcpu_stat_generic generic; 59 u64 int_exits; 60 u64 idle_exits; 61 u64 cpucfg_exits; 62 u64 signal_exits; 63 u64 hypercall_exits; 64 u64 ipi_read_exits; 65 u64 ipi_write_exits; 66 u64 eiointc_read_exits; 67 u64 eiointc_write_exits; 68 u64 pch_pic_read_exits; 69 u64 pch_pic_write_exits; 70 }; 71 72 #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) 73 #define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) 74 struct kvm_arch_memory_slot { 75 unsigned long flags; 76 }; 77 78 #define HOST_MAX_PMNUM 16 79 struct kvm_context { 80 unsigned long vpid_cache; 81 struct kvm_vcpu *last_vcpu; 82 /* Host PMU CSR */ 83 u64 perf_ctrl[HOST_MAX_PMNUM]; 84 u64 perf_cntr[HOST_MAX_PMNUM]; 85 }; 86 87 struct kvm_world_switch { 88 int (*exc_entry)(void); 89 int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); 90 unsigned long page_order; 91 }; 92 93 #define MAX_PGTABLE_LEVELS 4 94 95 /* 96 * Physical CPUID is used for interrupt routing, there are different 97 * definitions about physical cpuid on different hardwares. 98 * 99 * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 100 * For IPI hardware, max destination CPUID size 1024 101 * For eiointc interrupt controller, max destination CPUID size is 256 102 * For msgint interrupt controller, max supported CPUID size is 65536 103 * 104 * Currently max CPUID is defined as 256 for KVM hypervisor, in future 105 * it will be expanded to 4096, including 16 packages at most. And every 106 * package supports at most 256 vcpus 107 */ 108 #define KVM_MAX_PHYID 256 109 110 struct kvm_phyid_info { 111 struct kvm_vcpu *vcpu; 112 bool enabled; 113 }; 114 115 struct kvm_phyid_map { 116 int max_phyid; 117 struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; 118 }; 119 120 struct kvm_arch { 121 /* Guest physical mm */ 122 kvm_pte_t *pgd; 123 unsigned long gpa_size; 124 unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; 125 unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; 126 unsigned int root_level; 127 spinlock_t phyid_map_lock; 128 struct kvm_phyid_map *phyid_map; 129 /* Enabled PV features */ 130 unsigned long pv_features; 131 /* Supported KVM features */ 132 unsigned long kvm_features; 133 134 s64 time_offset; 135 struct kvm_context __percpu *vmcs; 136 struct loongarch_ipi *ipi; 137 struct loongarch_dmsintc *dmsintc; 138 struct loongarch_eiointc *eiointc; 139 struct loongarch_pch_pic *pch_pic; 140 }; 141 142 #define CSR_MAX_NUMS 0x800 143 144 struct loongarch_csrs { 145 unsigned long csrs[CSR_MAX_NUMS]; 146 }; 147 148 /* Resume Flags */ 149 #define RESUME_HOST 0 150 #define RESUME_GUEST 1 151 152 enum emulation_result { 153 EMULATE_DONE, /* no further processing */ 154 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 155 EMULATE_DO_IOCSR, /* handle IOCSR request */ 156 EMULATE_FAIL, /* can't emulate this instruction */ 157 EMULATE_EXCEPT, /* A guest exception has been generated */ 158 }; 159 160 #define KVM_LARCH_FPU (0x1 << 0) 161 #define KVM_LARCH_LSX (0x1 << 1) 162 #define KVM_LARCH_LASX (0x1 << 2) 163 #define KVM_LARCH_LBT (0x1 << 3) 164 #define KVM_LARCH_PMU (0x1 << 4) 165 #define KVM_LARCH_SWCSR_LATEST (0x1 << 5) 166 #define KVM_LARCH_HWCSR_USABLE (0x1 << 6) 167 168 #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) 169 #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ 170 BIT(KVM_FEATURE_PREEMPT) | \ 171 BIT(KVM_FEATURE_STEAL_TIME) | \ 172 BIT(KVM_FEATURE_USER_HCALL) | \ 173 BIT(KVM_FEATURE_VIRT_EXTIOI)) 174 175 struct kvm_vcpu_arch { 176 /* 177 * Switch pointer-to-function type to unsigned long 178 * for loading the value into register directly. 179 */ 180 unsigned long host_eentry; 181 unsigned long guest_eentry; 182 183 /* Pointers stored here for easy accessing from assembly code */ 184 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 185 186 /* GPA (=HVA) of PGD for secondary mmu */ 187 unsigned long kvm_pgd; 188 189 /* Host registers preserved across guest mode execution */ 190 unsigned long host_sp; 191 unsigned long host_tp; 192 unsigned long host_pgd; 193 194 /* Host CSRs are used when handling exits from guest */ 195 unsigned long badi; 196 unsigned long badv; 197 unsigned long host_ecfg; 198 unsigned long host_estat; 199 unsigned long host_percpu; 200 201 /* GPRs */ 202 unsigned long gprs[32]; 203 unsigned long pc; 204 205 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 206 unsigned int aux_inuse; 207 unsigned int aux_ldtype; 208 209 /* FPU state */ 210 struct loongarch_fpu fpu FPU_ALIGN; 211 struct loongarch_lbt lbt; 212 213 /* CSR state */ 214 struct loongarch_csrs *csr; 215 216 /* Guest max PMU CSR id */ 217 int max_pmu_csrid; 218 219 /* GPR used as IO source/target */ 220 u32 io_gpr; 221 222 /* KVM register to control count timer */ 223 u32 count_ctl; 224 struct hrtimer swtimer; 225 226 /* Bitmask of intr that are pending */ 227 unsigned long irq_pending; 228 /* Bitmask of pending intr to be cleared */ 229 unsigned long irq_clear; 230 231 /* Bitmask of exceptions that are pending */ 232 unsigned long exception_pending; 233 unsigned int esubcode; 234 235 /* Cache for pages needed inside spinlock regions */ 236 struct kvm_mmu_memory_cache mmu_page_cache; 237 238 /* vcpu's vpid */ 239 u64 vpid; 240 gpa_t flush_gpa; 241 242 /* Frequency of stable timer in Hz */ 243 u64 timer_mhz; 244 ktime_t expire; 245 246 /* Last CPU the vCPU state was loaded on */ 247 int last_sched_cpu; 248 /* mp state */ 249 struct kvm_mp_state mp_state; 250 /* ipi state */ 251 struct ipi_state ipi_state; 252 struct dmsintc_state dmsintc_state; 253 /* cpucfg */ 254 u32 cpucfg[KVM_MAX_CPUCFG_REGS]; 255 256 /* paravirt steal time */ 257 struct { 258 u64 guest_addr; 259 u64 last_steal; 260 struct gfn_to_hva_cache cache; 261 u8 preempted; 262 } st; 263 }; 264 265 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) 266 { 267 return csr->csrs[reg]; 268 } 269 270 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 271 { 272 csr->csrs[reg] = val; 273 } 274 275 static inline bool kvm_guest_has_msgint(struct kvm_vcpu_arch *arch) 276 { 277 return arch->cpucfg[1] & CPUCFG1_MSGINT; 278 } 279 280 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) 281 { 282 return arch->cpucfg[2] & CPUCFG2_FP; 283 } 284 285 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) 286 { 287 return arch->cpucfg[2] & CPUCFG2_LSX; 288 } 289 290 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) 291 { 292 return arch->cpucfg[2] & CPUCFG2_LASX; 293 } 294 295 static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) 296 { 297 return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); 298 } 299 300 static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) 301 { 302 return arch->cpucfg[6] & CPUCFG6_PMP; 303 } 304 305 static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) 306 { 307 return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; 308 } 309 310 /* Check whether KVM support this feature (VMM may disable it) */ 311 static inline bool kvm_vm_support(struct kvm_arch *arch, int feature) 312 { 313 return !!(arch->kvm_features & BIT_ULL(feature)); 314 } 315 316 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu); 317 318 /* Debug: dump vcpu state */ 319 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 320 321 /* MMU handling */ 322 void kvm_flush_tlb_all(void); 323 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); 324 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode); 325 326 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); 327 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 328 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 329 330 static inline void update_pc(struct kvm_vcpu_arch *arch) 331 { 332 arch->pc += 4; 333 } 334 335 /* 336 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 337 * @vcpu: Virtual CPU. 338 * 339 * Returns: Whether the TLBL exception was likely due to an instruction 340 * fetch fault rather than a data load fault. 341 */ 342 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) 343 { 344 return arch->pc == arch->badv; 345 } 346 347 /* Misc */ 348 static inline void kvm_arch_hardware_unsetup(void) {} 349 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 350 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 351 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 352 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 353 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} 354 void kvm_check_vpid(struct kvm_vcpu *vcpu); 355 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); 356 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); 357 void kvm_init_vmcs(struct kvm *kvm); 358 void kvm_exc_entry(void); 359 int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); 360 361 extern unsigned long vpid_mask; 362 extern const unsigned long kvm_exception_size; 363 extern const unsigned long kvm_enter_guest_size; 364 extern struct kvm_world_switch *kvm_loongarch_ops; 365 366 #define SW_GCSR (1 << 0) 367 #define HW_GCSR (1 << 1) 368 #define INVALID_GCSR (1 << 2) 369 370 int get_gcsr_flag(int csr); 371 void set_hw_gcsr(int csr_id, unsigned long val); 372 373 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ 374