1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__ 7 #define __ASM_LOONGARCH_KVM_HOST_H__ 8 9 #include <linux/cpumask.h> 10 #include <linux/hrtimer.h> 11 #include <linux/interrupt.h> 12 #include <linux/kvm.h> 13 #include <linux/kvm_types.h> 14 #include <linux/mutex.h> 15 #include <linux/spinlock.h> 16 #include <linux/threads.h> 17 #include <linux/types.h> 18 19 #include <asm/inst.h> 20 #include <asm/kvm_mmu.h> 21 #include <asm/kvm_ipi.h> 22 #include <asm/kvm_eiointc.h> 23 #include <asm/kvm_pch_pic.h> 24 #include <asm/loongarch.h> 25 26 /* Loongarch KVM register ids */ 27 #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) 28 #define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) 29 30 #define KVM_MAX_VCPUS 256 31 #define KVM_MAX_CPUCFG_REGS 21 32 33 #define KVM_HALT_POLL_NS_DEFAULT 500000 34 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 35 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 36 #define KVM_REQ_PMU KVM_ARCH_REQ(2) 37 38 #define KVM_GUESTDBG_SW_BP_MASK \ 39 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) 40 #define KVM_GUESTDBG_VALID_MASK \ 41 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) 42 43 #define KVM_DIRTY_LOG_MANUAL_CAPS \ 44 (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) 45 46 struct kvm_vm_stat { 47 struct kvm_vm_stat_generic generic; 48 u64 pages; 49 u64 hugepages; 50 u64 ipi_read_exits; 51 u64 ipi_write_exits; 52 u64 eiointc_read_exits; 53 u64 eiointc_write_exits; 54 u64 pch_pic_read_exits; 55 u64 pch_pic_write_exits; 56 }; 57 58 struct kvm_vcpu_stat { 59 struct kvm_vcpu_stat_generic generic; 60 u64 int_exits; 61 u64 idle_exits; 62 u64 cpucfg_exits; 63 u64 signal_exits; 64 u64 hypercall_exits; 65 }; 66 67 #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) 68 #define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) 69 struct kvm_arch_memory_slot { 70 unsigned long flags; 71 }; 72 73 #define HOST_MAX_PMNUM 16 74 struct kvm_context { 75 unsigned long vpid_cache; 76 struct kvm_vcpu *last_vcpu; 77 /* Host PMU CSR */ 78 u64 perf_ctrl[HOST_MAX_PMNUM]; 79 u64 perf_cntr[HOST_MAX_PMNUM]; 80 }; 81 82 struct kvm_world_switch { 83 int (*exc_entry)(void); 84 int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); 85 unsigned long page_order; 86 }; 87 88 #define MAX_PGTABLE_LEVELS 4 89 90 /* 91 * Physical CPUID is used for interrupt routing, there are different 92 * definitions about physical cpuid on different hardwares. 93 * 94 * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 95 * For IPI hardware, max destination CPUID size 1024 96 * For eiointc interrupt controller, max destination CPUID size is 256 97 * For msgint interrupt controller, max supported CPUID size is 65536 98 * 99 * Currently max CPUID is defined as 256 for KVM hypervisor, in future 100 * it will be expanded to 4096, including 16 packages at most. And every 101 * package supports at most 256 vcpus 102 */ 103 #define KVM_MAX_PHYID 256 104 105 struct kvm_phyid_info { 106 struct kvm_vcpu *vcpu; 107 bool enabled; 108 }; 109 110 struct kvm_phyid_map { 111 int max_phyid; 112 struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; 113 }; 114 115 struct kvm_arch { 116 /* Guest physical mm */ 117 kvm_pte_t *pgd; 118 unsigned long gpa_size; 119 unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; 120 unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; 121 unsigned int root_level; 122 spinlock_t phyid_map_lock; 123 struct kvm_phyid_map *phyid_map; 124 /* Enabled PV features */ 125 unsigned long pv_features; 126 127 s64 time_offset; 128 struct kvm_context __percpu *vmcs; 129 struct loongarch_ipi *ipi; 130 struct loongarch_eiointc *eiointc; 131 struct loongarch_pch_pic *pch_pic; 132 }; 133 134 #define CSR_MAX_NUMS 0x800 135 136 struct loongarch_csrs { 137 unsigned long csrs[CSR_MAX_NUMS]; 138 }; 139 140 /* Resume Flags */ 141 #define RESUME_HOST 0 142 #define RESUME_GUEST 1 143 144 enum emulation_result { 145 EMULATE_DONE, /* no further processing */ 146 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 147 EMULATE_DO_IOCSR, /* handle IOCSR request */ 148 EMULATE_FAIL, /* can't emulate this instruction */ 149 EMULATE_EXCEPT, /* A guest exception has been generated */ 150 }; 151 152 #define KVM_LARCH_FPU (0x1 << 0) 153 #define KVM_LARCH_LSX (0x1 << 1) 154 #define KVM_LARCH_LASX (0x1 << 2) 155 #define KVM_LARCH_LBT (0x1 << 3) 156 #define KVM_LARCH_PMU (0x1 << 4) 157 #define KVM_LARCH_SWCSR_LATEST (0x1 << 5) 158 #define KVM_LARCH_HWCSR_USABLE (0x1 << 6) 159 160 #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) 161 #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ 162 BIT(KVM_FEATURE_STEAL_TIME) | \ 163 BIT(KVM_FEATURE_VIRT_EXTIOI)) 164 165 struct kvm_vcpu_arch { 166 /* 167 * Switch pointer-to-function type to unsigned long 168 * for loading the value into register directly. 169 */ 170 unsigned long host_eentry; 171 unsigned long guest_eentry; 172 173 /* Pointers stored here for easy accessing from assembly code */ 174 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 175 176 /* Host registers preserved across guest mode execution */ 177 unsigned long host_sp; 178 unsigned long host_tp; 179 unsigned long host_pgd; 180 181 /* Host CSRs are used when handling exits from guest */ 182 unsigned long badi; 183 unsigned long badv; 184 unsigned long host_ecfg; 185 unsigned long host_estat; 186 unsigned long host_percpu; 187 188 /* GPRs */ 189 unsigned long gprs[32]; 190 unsigned long pc; 191 192 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 193 unsigned int aux_inuse; 194 195 /* FPU state */ 196 struct loongarch_fpu fpu FPU_ALIGN; 197 struct loongarch_lbt lbt; 198 199 /* CSR state */ 200 struct loongarch_csrs *csr; 201 202 /* Guest max PMU CSR id */ 203 int max_pmu_csrid; 204 205 /* GPR used as IO source/target */ 206 u32 io_gpr; 207 208 /* KVM register to control count timer */ 209 u32 count_ctl; 210 struct hrtimer swtimer; 211 212 /* Bitmask of intr that are pending */ 213 unsigned long irq_pending; 214 /* Bitmask of pending intr to be cleared */ 215 unsigned long irq_clear; 216 217 /* Bitmask of exceptions that are pending */ 218 unsigned long exception_pending; 219 unsigned int esubcode; 220 221 /* Cache for pages needed inside spinlock regions */ 222 struct kvm_mmu_memory_cache mmu_page_cache; 223 224 /* vcpu's vpid */ 225 u64 vpid; 226 gpa_t flush_gpa; 227 228 /* Frequency of stable timer in Hz */ 229 u64 timer_mhz; 230 ktime_t expire; 231 232 /* Last CPU the vCPU state was loaded on */ 233 int last_sched_cpu; 234 /* mp state */ 235 struct kvm_mp_state mp_state; 236 /* ipi state */ 237 struct ipi_state ipi_state; 238 /* cpucfg */ 239 u32 cpucfg[KVM_MAX_CPUCFG_REGS]; 240 241 /* paravirt steal time */ 242 struct { 243 u64 guest_addr; 244 u64 last_steal; 245 struct gfn_to_hva_cache cache; 246 } st; 247 }; 248 249 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) 250 { 251 return csr->csrs[reg]; 252 } 253 254 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 255 { 256 csr->csrs[reg] = val; 257 } 258 259 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) 260 { 261 return arch->cpucfg[2] & CPUCFG2_FP; 262 } 263 264 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) 265 { 266 return arch->cpucfg[2] & CPUCFG2_LSX; 267 } 268 269 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) 270 { 271 return arch->cpucfg[2] & CPUCFG2_LASX; 272 } 273 274 static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) 275 { 276 return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); 277 } 278 279 static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) 280 { 281 return arch->cpucfg[6] & CPUCFG6_PMP; 282 } 283 284 static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) 285 { 286 return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; 287 } 288 289 /* Debug: dump vcpu state */ 290 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 291 292 /* MMU handling */ 293 void kvm_flush_tlb_all(void); 294 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); 295 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); 296 297 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); 298 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 299 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 300 301 static inline void update_pc(struct kvm_vcpu_arch *arch) 302 { 303 arch->pc += 4; 304 } 305 306 /* 307 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 308 * @vcpu: Virtual CPU. 309 * 310 * Returns: Whether the TLBL exception was likely due to an instruction 311 * fetch fault rather than a data load fault. 312 */ 313 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) 314 { 315 return arch->pc == arch->badv; 316 } 317 318 /* Misc */ 319 static inline void kvm_arch_hardware_unsetup(void) {} 320 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 321 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 322 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 323 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 324 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 325 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} 326 void kvm_check_vpid(struct kvm_vcpu *vcpu); 327 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); 328 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); 329 void kvm_init_vmcs(struct kvm *kvm); 330 void kvm_exc_entry(void); 331 int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); 332 333 extern unsigned long vpid_mask; 334 extern const unsigned long kvm_exception_size; 335 extern const unsigned long kvm_enter_guest_size; 336 extern struct kvm_world_switch *kvm_loongarch_ops; 337 338 #define SW_GCSR (1 << 0) 339 #define HW_GCSR (1 << 1) 340 #define INVALID_GCSR (1 << 2) 341 342 int get_gcsr_flag(int csr); 343 void set_hw_gcsr(int csr_id, unsigned long val); 344 345 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ 346