1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__ 7 #define __ASM_LOONGARCH_KVM_HOST_H__ 8 9 #include <linux/cpumask.h> 10 #include <linux/hrtimer.h> 11 #include <linux/interrupt.h> 12 #include <linux/kvm.h> 13 #include <linux/kvm_types.h> 14 #include <linux/mutex.h> 15 #include <linux/perf_event.h> 16 #include <linux/spinlock.h> 17 #include <linux/threads.h> 18 #include <linux/types.h> 19 20 #include <asm/inst.h> 21 #include <asm/kvm_mmu.h> 22 #include <asm/kvm_ipi.h> 23 #include <asm/kvm_eiointc.h> 24 #include <asm/kvm_pch_pic.h> 25 #include <asm/loongarch.h> 26 27 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 28 29 /* Loongarch KVM register ids */ 30 #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) 31 #define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) 32 33 #define KVM_MAX_VCPUS 256 34 #define KVM_MAX_CPUCFG_REGS 21 35 36 #define KVM_HALT_POLL_NS_DEFAULT 500000 37 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 38 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 39 #define KVM_REQ_PMU KVM_ARCH_REQ(2) 40 #define KVM_REQ_AUX_LOAD KVM_ARCH_REQ(3) 41 42 #define KVM_GUESTDBG_SW_BP_MASK \ 43 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) 44 #define KVM_GUESTDBG_VALID_MASK \ 45 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) 46 47 #define KVM_DIRTY_LOG_MANUAL_CAPS \ 48 (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) 49 50 struct kvm_vm_stat { 51 struct kvm_vm_stat_generic generic; 52 u64 pages; 53 u64 hugepages; 54 }; 55 56 struct kvm_vcpu_stat { 57 struct kvm_vcpu_stat_generic generic; 58 u64 int_exits; 59 u64 idle_exits; 60 u64 cpucfg_exits; 61 u64 signal_exits; 62 u64 hypercall_exits; 63 u64 ipi_read_exits; 64 u64 ipi_write_exits; 65 u64 eiointc_read_exits; 66 u64 eiointc_write_exits; 67 u64 pch_pic_read_exits; 68 u64 pch_pic_write_exits; 69 }; 70 71 #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) 72 #define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) 73 struct kvm_arch_memory_slot { 74 unsigned long flags; 75 }; 76 77 #define HOST_MAX_PMNUM 16 78 struct kvm_context { 79 unsigned long vpid_cache; 80 struct kvm_vcpu *last_vcpu; 81 /* Host PMU CSR */ 82 u64 perf_ctrl[HOST_MAX_PMNUM]; 83 u64 perf_cntr[HOST_MAX_PMNUM]; 84 }; 85 86 struct kvm_world_switch { 87 int (*exc_entry)(void); 88 int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); 89 unsigned long page_order; 90 }; 91 92 #define MAX_PGTABLE_LEVELS 4 93 94 /* 95 * Physical CPUID is used for interrupt routing, there are different 96 * definitions about physical cpuid on different hardwares. 97 * 98 * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 99 * For IPI hardware, max destination CPUID size 1024 100 * For eiointc interrupt controller, max destination CPUID size is 256 101 * For msgint interrupt controller, max supported CPUID size is 65536 102 * 103 * Currently max CPUID is defined as 256 for KVM hypervisor, in future 104 * it will be expanded to 4096, including 16 packages at most. And every 105 * package supports at most 256 vcpus 106 */ 107 #define KVM_MAX_PHYID 256 108 109 struct kvm_phyid_info { 110 struct kvm_vcpu *vcpu; 111 bool enabled; 112 }; 113 114 struct kvm_phyid_map { 115 int max_phyid; 116 struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; 117 }; 118 119 struct kvm_arch { 120 /* Guest physical mm */ 121 kvm_pte_t *pgd; 122 unsigned long gpa_size; 123 unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; 124 unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; 125 unsigned int root_level; 126 spinlock_t phyid_map_lock; 127 struct kvm_phyid_map *phyid_map; 128 /* Enabled PV features */ 129 unsigned long pv_features; 130 /* Supported KVM features */ 131 unsigned long kvm_features; 132 133 s64 time_offset; 134 struct kvm_context __percpu *vmcs; 135 struct loongarch_ipi *ipi; 136 struct loongarch_eiointc *eiointc; 137 struct loongarch_pch_pic *pch_pic; 138 }; 139 140 #define CSR_MAX_NUMS 0x800 141 142 struct loongarch_csrs { 143 unsigned long csrs[CSR_MAX_NUMS]; 144 }; 145 146 /* Resume Flags */ 147 #define RESUME_HOST 0 148 #define RESUME_GUEST 1 149 150 enum emulation_result { 151 EMULATE_DONE, /* no further processing */ 152 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 153 EMULATE_DO_IOCSR, /* handle IOCSR request */ 154 EMULATE_FAIL, /* can't emulate this instruction */ 155 EMULATE_EXCEPT, /* A guest exception has been generated */ 156 }; 157 158 #define KVM_LARCH_FPU (0x1 << 0) 159 #define KVM_LARCH_LSX (0x1 << 1) 160 #define KVM_LARCH_LASX (0x1 << 2) 161 #define KVM_LARCH_LBT (0x1 << 3) 162 #define KVM_LARCH_PMU (0x1 << 4) 163 #define KVM_LARCH_SWCSR_LATEST (0x1 << 5) 164 #define KVM_LARCH_HWCSR_USABLE (0x1 << 6) 165 166 #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) 167 #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ 168 BIT(KVM_FEATURE_PREEMPT) | \ 169 BIT(KVM_FEATURE_STEAL_TIME) | \ 170 BIT(KVM_FEATURE_USER_HCALL) | \ 171 BIT(KVM_FEATURE_VIRT_EXTIOI)) 172 173 struct kvm_vcpu_arch { 174 /* 175 * Switch pointer-to-function type to unsigned long 176 * for loading the value into register directly. 177 */ 178 unsigned long host_eentry; 179 unsigned long guest_eentry; 180 181 /* Pointers stored here for easy accessing from assembly code */ 182 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 183 184 /* GPA (=HVA) of PGD for secondary mmu */ 185 unsigned long kvm_pgd; 186 187 /* Host registers preserved across guest mode execution */ 188 unsigned long host_sp; 189 unsigned long host_tp; 190 unsigned long host_pgd; 191 192 /* Host CSRs are used when handling exits from guest */ 193 unsigned long badi; 194 unsigned long badv; 195 unsigned long host_ecfg; 196 unsigned long host_estat; 197 unsigned long host_percpu; 198 199 /* GPRs */ 200 unsigned long gprs[32]; 201 unsigned long pc; 202 203 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 204 unsigned int aux_inuse; 205 unsigned int aux_ldtype; 206 207 /* FPU state */ 208 struct loongarch_fpu fpu FPU_ALIGN; 209 struct loongarch_lbt lbt; 210 211 /* CSR state */ 212 struct loongarch_csrs *csr; 213 214 /* Guest max PMU CSR id */ 215 int max_pmu_csrid; 216 217 /* GPR used as IO source/target */ 218 u32 io_gpr; 219 220 /* KVM register to control count timer */ 221 u32 count_ctl; 222 struct hrtimer swtimer; 223 224 /* Bitmask of intr that are pending */ 225 unsigned long irq_pending; 226 /* Bitmask of pending intr to be cleared */ 227 unsigned long irq_clear; 228 229 /* Bitmask of exceptions that are pending */ 230 unsigned long exception_pending; 231 unsigned int esubcode; 232 233 /* Cache for pages needed inside spinlock regions */ 234 struct kvm_mmu_memory_cache mmu_page_cache; 235 236 /* vcpu's vpid */ 237 u64 vpid; 238 gpa_t flush_gpa; 239 240 /* Frequency of stable timer in Hz */ 241 u64 timer_mhz; 242 ktime_t expire; 243 244 /* Last CPU the vCPU state was loaded on */ 245 int last_sched_cpu; 246 /* mp state */ 247 struct kvm_mp_state mp_state; 248 /* ipi state */ 249 struct ipi_state ipi_state; 250 /* cpucfg */ 251 u32 cpucfg[KVM_MAX_CPUCFG_REGS]; 252 253 /* paravirt steal time */ 254 struct { 255 u64 guest_addr; 256 u64 last_steal; 257 struct gfn_to_hva_cache cache; 258 u8 preempted; 259 } st; 260 }; 261 262 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) 263 { 264 return csr->csrs[reg]; 265 } 266 267 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 268 { 269 csr->csrs[reg] = val; 270 } 271 272 static inline bool kvm_guest_has_msgint(struct kvm_vcpu_arch *arch) 273 { 274 return arch->cpucfg[1] & CPUCFG1_MSGINT; 275 } 276 277 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) 278 { 279 return arch->cpucfg[2] & CPUCFG2_FP; 280 } 281 282 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) 283 { 284 return arch->cpucfg[2] & CPUCFG2_LSX; 285 } 286 287 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) 288 { 289 return arch->cpucfg[2] & CPUCFG2_LASX; 290 } 291 292 static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) 293 { 294 return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); 295 } 296 297 static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) 298 { 299 return arch->cpucfg[6] & CPUCFG6_PMP; 300 } 301 302 static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) 303 { 304 return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; 305 } 306 307 /* Check whether KVM support this feature (VMM may disable it) */ 308 static inline bool kvm_vm_support(struct kvm_arch *arch, int feature) 309 { 310 return !!(arch->kvm_features & BIT_ULL(feature)); 311 } 312 313 bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu); 314 315 /* Debug: dump vcpu state */ 316 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 317 318 /* MMU handling */ 319 void kvm_flush_tlb_all(void); 320 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); 321 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write, int ecode); 322 323 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); 324 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 325 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 326 327 static inline void update_pc(struct kvm_vcpu_arch *arch) 328 { 329 arch->pc += 4; 330 } 331 332 /* 333 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 334 * @vcpu: Virtual CPU. 335 * 336 * Returns: Whether the TLBL exception was likely due to an instruction 337 * fetch fault rather than a data load fault. 338 */ 339 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) 340 { 341 return arch->pc == arch->badv; 342 } 343 344 /* Misc */ 345 static inline void kvm_arch_hardware_unsetup(void) {} 346 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 347 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 348 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 349 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 350 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} 351 void kvm_check_vpid(struct kvm_vcpu *vcpu); 352 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); 353 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); 354 void kvm_init_vmcs(struct kvm *kvm); 355 void kvm_exc_entry(void); 356 int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); 357 358 extern unsigned long vpid_mask; 359 extern const unsigned long kvm_exception_size; 360 extern const unsigned long kvm_enter_guest_size; 361 extern struct kvm_world_switch *kvm_loongarch_ops; 362 363 #define SW_GCSR (1 << 0) 364 #define HW_GCSR (1 << 1) 365 #define INVALID_GCSR (1 << 2) 366 367 int get_gcsr_flag(int csr); 368 void set_hw_gcsr(int csr_id, unsigned long val); 369 370 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ 371