1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__ 7 #define __ASM_LOONGARCH_KVM_HOST_H__ 8 9 #include <linux/cpumask.h> 10 #include <linux/hrtimer.h> 11 #include <linux/interrupt.h> 12 #include <linux/kvm.h> 13 #include <linux/kvm_types.h> 14 #include <linux/mutex.h> 15 #include <linux/spinlock.h> 16 #include <linux/threads.h> 17 #include <linux/types.h> 18 19 #include <asm/inst.h> 20 #include <asm/kvm_mmu.h> 21 #include <asm/kvm_ipi.h> 22 #include <asm/loongarch.h> 23 24 /* Loongarch KVM register ids */ 25 #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) 26 #define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) 27 28 #define KVM_MAX_VCPUS 256 29 #define KVM_MAX_CPUCFG_REGS 21 30 31 #define KVM_HALT_POLL_NS_DEFAULT 500000 32 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 33 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 34 #define KVM_REQ_PMU KVM_ARCH_REQ(2) 35 36 #define KVM_GUESTDBG_SW_BP_MASK \ 37 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) 38 #define KVM_GUESTDBG_VALID_MASK \ 39 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) 40 41 #define KVM_DIRTY_LOG_MANUAL_CAPS \ 42 (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) 43 44 struct kvm_vm_stat { 45 struct kvm_vm_stat_generic generic; 46 u64 pages; 47 u64 hugepages; 48 u64 ipi_read_exits; 49 u64 ipi_write_exits; 50 }; 51 52 struct kvm_vcpu_stat { 53 struct kvm_vcpu_stat_generic generic; 54 u64 int_exits; 55 u64 idle_exits; 56 u64 cpucfg_exits; 57 u64 signal_exits; 58 u64 hypercall_exits; 59 }; 60 61 #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) 62 #define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) 63 struct kvm_arch_memory_slot { 64 unsigned long flags; 65 }; 66 67 #define HOST_MAX_PMNUM 16 68 struct kvm_context { 69 unsigned long vpid_cache; 70 struct kvm_vcpu *last_vcpu; 71 /* Host PMU CSR */ 72 u64 perf_ctrl[HOST_MAX_PMNUM]; 73 u64 perf_cntr[HOST_MAX_PMNUM]; 74 }; 75 76 struct kvm_world_switch { 77 int (*exc_entry)(void); 78 int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); 79 unsigned long page_order; 80 }; 81 82 #define MAX_PGTABLE_LEVELS 4 83 84 /* 85 * Physical CPUID is used for interrupt routing, there are different 86 * definitions about physical cpuid on different hardwares. 87 * 88 * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 89 * For IPI hardware, max destination CPUID size 1024 90 * For extioi interrupt controller, max destination CPUID size is 256 91 * For msgint interrupt controller, max supported CPUID size is 65536 92 * 93 * Currently max CPUID is defined as 256 for KVM hypervisor, in future 94 * it will be expanded to 4096, including 16 packages at most. And every 95 * package supports at most 256 vcpus 96 */ 97 #define KVM_MAX_PHYID 256 98 99 struct kvm_phyid_info { 100 struct kvm_vcpu *vcpu; 101 bool enabled; 102 }; 103 104 struct kvm_phyid_map { 105 int max_phyid; 106 struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; 107 }; 108 109 struct kvm_arch { 110 /* Guest physical mm */ 111 kvm_pte_t *pgd; 112 unsigned long gpa_size; 113 unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; 114 unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; 115 unsigned int root_level; 116 spinlock_t phyid_map_lock; 117 struct kvm_phyid_map *phyid_map; 118 /* Enabled PV features */ 119 unsigned long pv_features; 120 121 s64 time_offset; 122 struct kvm_context __percpu *vmcs; 123 struct loongarch_ipi *ipi; 124 }; 125 126 #define CSR_MAX_NUMS 0x800 127 128 struct loongarch_csrs { 129 unsigned long csrs[CSR_MAX_NUMS]; 130 }; 131 132 /* Resume Flags */ 133 #define RESUME_HOST 0 134 #define RESUME_GUEST 1 135 136 enum emulation_result { 137 EMULATE_DONE, /* no further processing */ 138 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 139 EMULATE_DO_IOCSR, /* handle IOCSR request */ 140 EMULATE_FAIL, /* can't emulate this instruction */ 141 EMULATE_EXCEPT, /* A guest exception has been generated */ 142 }; 143 144 #define KVM_LARCH_FPU (0x1 << 0) 145 #define KVM_LARCH_LSX (0x1 << 1) 146 #define KVM_LARCH_LASX (0x1 << 2) 147 #define KVM_LARCH_LBT (0x1 << 3) 148 #define KVM_LARCH_PMU (0x1 << 4) 149 #define KVM_LARCH_SWCSR_LATEST (0x1 << 5) 150 #define KVM_LARCH_HWCSR_USABLE (0x1 << 6) 151 152 #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) 153 #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ 154 BIT(KVM_FEATURE_STEAL_TIME) | \ 155 BIT(KVM_FEATURE_VIRT_EXTIOI)) 156 157 struct kvm_vcpu_arch { 158 /* 159 * Switch pointer-to-function type to unsigned long 160 * for loading the value into register directly. 161 */ 162 unsigned long host_eentry; 163 unsigned long guest_eentry; 164 165 /* Pointers stored here for easy accessing from assembly code */ 166 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 167 168 /* Host registers preserved across guest mode execution */ 169 unsigned long host_sp; 170 unsigned long host_tp; 171 unsigned long host_pgd; 172 173 /* Host CSRs are used when handling exits from guest */ 174 unsigned long badi; 175 unsigned long badv; 176 unsigned long host_ecfg; 177 unsigned long host_estat; 178 unsigned long host_percpu; 179 180 /* GPRs */ 181 unsigned long gprs[32]; 182 unsigned long pc; 183 184 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 185 unsigned int aux_inuse; 186 187 /* FPU state */ 188 struct loongarch_fpu fpu FPU_ALIGN; 189 struct loongarch_lbt lbt; 190 191 /* CSR state */ 192 struct loongarch_csrs *csr; 193 194 /* Guest max PMU CSR id */ 195 int max_pmu_csrid; 196 197 /* GPR used as IO source/target */ 198 u32 io_gpr; 199 200 /* KVM register to control count timer */ 201 u32 count_ctl; 202 struct hrtimer swtimer; 203 204 /* Bitmask of intr that are pending */ 205 unsigned long irq_pending; 206 /* Bitmask of pending intr to be cleared */ 207 unsigned long irq_clear; 208 209 /* Bitmask of exceptions that are pending */ 210 unsigned long exception_pending; 211 unsigned int esubcode; 212 213 /* Cache for pages needed inside spinlock regions */ 214 struct kvm_mmu_memory_cache mmu_page_cache; 215 216 /* vcpu's vpid */ 217 u64 vpid; 218 gpa_t flush_gpa; 219 220 /* Frequency of stable timer in Hz */ 221 u64 timer_mhz; 222 ktime_t expire; 223 224 /* Last CPU the vCPU state was loaded on */ 225 int last_sched_cpu; 226 /* mp state */ 227 struct kvm_mp_state mp_state; 228 /* ipi state */ 229 struct ipi_state ipi_state; 230 /* cpucfg */ 231 u32 cpucfg[KVM_MAX_CPUCFG_REGS]; 232 233 /* paravirt steal time */ 234 struct { 235 u64 guest_addr; 236 u64 last_steal; 237 struct gfn_to_hva_cache cache; 238 } st; 239 }; 240 241 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) 242 { 243 return csr->csrs[reg]; 244 } 245 246 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 247 { 248 csr->csrs[reg] = val; 249 } 250 251 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) 252 { 253 return arch->cpucfg[2] & CPUCFG2_FP; 254 } 255 256 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) 257 { 258 return arch->cpucfg[2] & CPUCFG2_LSX; 259 } 260 261 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) 262 { 263 return arch->cpucfg[2] & CPUCFG2_LASX; 264 } 265 266 static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) 267 { 268 return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); 269 } 270 271 static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) 272 { 273 return arch->cpucfg[6] & CPUCFG6_PMP; 274 } 275 276 static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) 277 { 278 return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; 279 } 280 281 /* Debug: dump vcpu state */ 282 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 283 284 /* MMU handling */ 285 void kvm_flush_tlb_all(void); 286 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); 287 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); 288 289 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); 290 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 291 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 292 293 static inline void update_pc(struct kvm_vcpu_arch *arch) 294 { 295 arch->pc += 4; 296 } 297 298 /* 299 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 300 * @vcpu: Virtual CPU. 301 * 302 * Returns: Whether the TLBL exception was likely due to an instruction 303 * fetch fault rather than a data load fault. 304 */ 305 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) 306 { 307 return arch->pc == arch->badv; 308 } 309 310 /* Misc */ 311 static inline void kvm_arch_hardware_unsetup(void) {} 312 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 313 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 314 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 315 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 316 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 317 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} 318 void kvm_check_vpid(struct kvm_vcpu *vcpu); 319 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); 320 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); 321 void kvm_init_vmcs(struct kvm *kvm); 322 void kvm_exc_entry(void); 323 int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); 324 325 extern unsigned long vpid_mask; 326 extern const unsigned long kvm_exception_size; 327 extern const unsigned long kvm_enter_guest_size; 328 extern struct kvm_world_switch *kvm_loongarch_ops; 329 330 #define SW_GCSR (1 << 0) 331 #define HW_GCSR (1 << 1) 332 #define INVALID_GCSR (1 << 2) 333 334 int get_gcsr_flag(int csr); 335 void set_hw_gcsr(int csr_id, unsigned long val); 336 337 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ 338