1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__ 7 #define __ASM_LOONGARCH_KVM_HOST_H__ 8 9 #include <linux/cpumask.h> 10 #include <linux/hrtimer.h> 11 #include <linux/interrupt.h> 12 #include <linux/kvm.h> 13 #include <linux/kvm_types.h> 14 #include <linux/mutex.h> 15 #include <linux/spinlock.h> 16 #include <linux/threads.h> 17 #include <linux/types.h> 18 19 #include <asm/inst.h> 20 #include <asm/kvm_mmu.h> 21 #include <asm/kvm_ipi.h> 22 #include <asm/kvm_eiointc.h> 23 #include <asm/kvm_pch_pic.h> 24 #include <asm/loongarch.h> 25 26 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 27 28 /* Loongarch KVM register ids */ 29 #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) 30 #define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) 31 32 #define KVM_MAX_VCPUS 256 33 #define KVM_MAX_CPUCFG_REGS 21 34 35 #define KVM_HALT_POLL_NS_DEFAULT 500000 36 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 37 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 38 #define KVM_REQ_PMU KVM_ARCH_REQ(2) 39 40 #define KVM_GUESTDBG_SW_BP_MASK \ 41 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) 42 #define KVM_GUESTDBG_VALID_MASK \ 43 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) 44 45 #define KVM_DIRTY_LOG_MANUAL_CAPS \ 46 (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) 47 48 struct kvm_vm_stat { 49 struct kvm_vm_stat_generic generic; 50 u64 pages; 51 u64 hugepages; 52 u64 ipi_read_exits; 53 u64 ipi_write_exits; 54 u64 eiointc_read_exits; 55 u64 eiointc_write_exits; 56 u64 pch_pic_read_exits; 57 u64 pch_pic_write_exits; 58 }; 59 60 struct kvm_vcpu_stat { 61 struct kvm_vcpu_stat_generic generic; 62 u64 int_exits; 63 u64 idle_exits; 64 u64 cpucfg_exits; 65 u64 signal_exits; 66 u64 hypercall_exits; 67 }; 68 69 #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) 70 #define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) 71 struct kvm_arch_memory_slot { 72 unsigned long flags; 73 }; 74 75 #define HOST_MAX_PMNUM 16 76 struct kvm_context { 77 unsigned long vpid_cache; 78 struct kvm_vcpu *last_vcpu; 79 /* Host PMU CSR */ 80 u64 perf_ctrl[HOST_MAX_PMNUM]; 81 u64 perf_cntr[HOST_MAX_PMNUM]; 82 }; 83 84 struct kvm_world_switch { 85 int (*exc_entry)(void); 86 int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); 87 unsigned long page_order; 88 }; 89 90 #define MAX_PGTABLE_LEVELS 4 91 92 /* 93 * Physical CPUID is used for interrupt routing, there are different 94 * definitions about physical cpuid on different hardwares. 95 * 96 * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 97 * For IPI hardware, max destination CPUID size 1024 98 * For eiointc interrupt controller, max destination CPUID size is 256 99 * For msgint interrupt controller, max supported CPUID size is 65536 100 * 101 * Currently max CPUID is defined as 256 for KVM hypervisor, in future 102 * it will be expanded to 4096, including 16 packages at most. And every 103 * package supports at most 256 vcpus 104 */ 105 #define KVM_MAX_PHYID 256 106 107 struct kvm_phyid_info { 108 struct kvm_vcpu *vcpu; 109 bool enabled; 110 }; 111 112 struct kvm_phyid_map { 113 int max_phyid; 114 struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; 115 }; 116 117 struct kvm_arch { 118 /* Guest physical mm */ 119 kvm_pte_t *pgd; 120 unsigned long gpa_size; 121 unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; 122 unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; 123 unsigned int root_level; 124 spinlock_t phyid_map_lock; 125 struct kvm_phyid_map *phyid_map; 126 /* Enabled PV features */ 127 unsigned long pv_features; 128 129 s64 time_offset; 130 struct kvm_context __percpu *vmcs; 131 struct loongarch_ipi *ipi; 132 struct loongarch_eiointc *eiointc; 133 struct loongarch_pch_pic *pch_pic; 134 }; 135 136 #define CSR_MAX_NUMS 0x800 137 138 struct loongarch_csrs { 139 unsigned long csrs[CSR_MAX_NUMS]; 140 }; 141 142 /* Resume Flags */ 143 #define RESUME_HOST 0 144 #define RESUME_GUEST 1 145 146 enum emulation_result { 147 EMULATE_DONE, /* no further processing */ 148 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 149 EMULATE_DO_IOCSR, /* handle IOCSR request */ 150 EMULATE_FAIL, /* can't emulate this instruction */ 151 EMULATE_EXCEPT, /* A guest exception has been generated */ 152 }; 153 154 #define KVM_LARCH_FPU (0x1 << 0) 155 #define KVM_LARCH_LSX (0x1 << 1) 156 #define KVM_LARCH_LASX (0x1 << 2) 157 #define KVM_LARCH_LBT (0x1 << 3) 158 #define KVM_LARCH_PMU (0x1 << 4) 159 #define KVM_LARCH_SWCSR_LATEST (0x1 << 5) 160 #define KVM_LARCH_HWCSR_USABLE (0x1 << 6) 161 162 #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) 163 #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ 164 BIT(KVM_FEATURE_STEAL_TIME) | \ 165 BIT(KVM_FEATURE_VIRT_EXTIOI)) 166 167 struct kvm_vcpu_arch { 168 /* 169 * Switch pointer-to-function type to unsigned long 170 * for loading the value into register directly. 171 */ 172 unsigned long host_eentry; 173 unsigned long guest_eentry; 174 175 /* Pointers stored here for easy accessing from assembly code */ 176 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 177 178 /* Host registers preserved across guest mode execution */ 179 unsigned long host_sp; 180 unsigned long host_tp; 181 unsigned long host_pgd; 182 183 /* Host CSRs are used when handling exits from guest */ 184 unsigned long badi; 185 unsigned long badv; 186 unsigned long host_ecfg; 187 unsigned long host_estat; 188 unsigned long host_percpu; 189 190 /* GPRs */ 191 unsigned long gprs[32]; 192 unsigned long pc; 193 194 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 195 unsigned int aux_inuse; 196 197 /* FPU state */ 198 struct loongarch_fpu fpu FPU_ALIGN; 199 struct loongarch_lbt lbt; 200 201 /* CSR state */ 202 struct loongarch_csrs *csr; 203 204 /* Guest max PMU CSR id */ 205 int max_pmu_csrid; 206 207 /* GPR used as IO source/target */ 208 u32 io_gpr; 209 210 /* KVM register to control count timer */ 211 u32 count_ctl; 212 struct hrtimer swtimer; 213 214 /* Bitmask of intr that are pending */ 215 unsigned long irq_pending; 216 /* Bitmask of pending intr to be cleared */ 217 unsigned long irq_clear; 218 219 /* Bitmask of exceptions that are pending */ 220 unsigned long exception_pending; 221 unsigned int esubcode; 222 223 /* Cache for pages needed inside spinlock regions */ 224 struct kvm_mmu_memory_cache mmu_page_cache; 225 226 /* vcpu's vpid */ 227 u64 vpid; 228 gpa_t flush_gpa; 229 230 /* Frequency of stable timer in Hz */ 231 u64 timer_mhz; 232 ktime_t expire; 233 234 /* Last CPU the vCPU state was loaded on */ 235 int last_sched_cpu; 236 /* mp state */ 237 struct kvm_mp_state mp_state; 238 /* ipi state */ 239 struct ipi_state ipi_state; 240 /* cpucfg */ 241 u32 cpucfg[KVM_MAX_CPUCFG_REGS]; 242 243 /* paravirt steal time */ 244 struct { 245 u64 guest_addr; 246 u64 last_steal; 247 struct gfn_to_hva_cache cache; 248 } st; 249 }; 250 251 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) 252 { 253 return csr->csrs[reg]; 254 } 255 256 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 257 { 258 csr->csrs[reg] = val; 259 } 260 261 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) 262 { 263 return arch->cpucfg[2] & CPUCFG2_FP; 264 } 265 266 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) 267 { 268 return arch->cpucfg[2] & CPUCFG2_LSX; 269 } 270 271 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) 272 { 273 return arch->cpucfg[2] & CPUCFG2_LASX; 274 } 275 276 static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) 277 { 278 return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); 279 } 280 281 static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) 282 { 283 return arch->cpucfg[6] & CPUCFG6_PMP; 284 } 285 286 static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) 287 { 288 return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; 289 } 290 291 /* Debug: dump vcpu state */ 292 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 293 294 /* MMU handling */ 295 void kvm_flush_tlb_all(void); 296 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); 297 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); 298 299 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); 300 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 301 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 302 303 static inline void update_pc(struct kvm_vcpu_arch *arch) 304 { 305 arch->pc += 4; 306 } 307 308 /* 309 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 310 * @vcpu: Virtual CPU. 311 * 312 * Returns: Whether the TLBL exception was likely due to an instruction 313 * fetch fault rather than a data load fault. 314 */ 315 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) 316 { 317 return arch->pc == arch->badv; 318 } 319 320 /* Misc */ 321 static inline void kvm_arch_hardware_unsetup(void) {} 322 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 323 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 324 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 325 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 326 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 327 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} 328 void kvm_check_vpid(struct kvm_vcpu *vcpu); 329 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); 330 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); 331 void kvm_init_vmcs(struct kvm *kvm); 332 void kvm_exc_entry(void); 333 int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); 334 335 extern unsigned long vpid_mask; 336 extern const unsigned long kvm_exception_size; 337 extern const unsigned long kvm_enter_guest_size; 338 extern struct kvm_world_switch *kvm_loongarch_ops; 339 340 #define SW_GCSR (1 << 0) 341 #define HW_GCSR (1 << 1) 342 #define INVALID_GCSR (1 << 2) 343 344 int get_gcsr_flag(int csr); 345 void set_hw_gcsr(int csr_id, unsigned long val); 346 347 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ 348