1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__ 7 #define __ASM_LOONGARCH_KVM_HOST_H__ 8 9 #include <linux/cpumask.h> 10 #include <linux/hrtimer.h> 11 #include <linux/interrupt.h> 12 #include <linux/kvm.h> 13 #include <linux/kvm_types.h> 14 #include <linux/mutex.h> 15 #include <linux/spinlock.h> 16 #include <linux/threads.h> 17 #include <linux/types.h> 18 19 #include <asm/inst.h> 20 #include <asm/kvm_mmu.h> 21 #include <asm/loongarch.h> 22 23 /* Loongarch KVM register ids */ 24 #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) 25 #define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) 26 27 #define KVM_MAX_VCPUS 256 28 #define KVM_MAX_CPUCFG_REGS 21 29 30 #define KVM_HALT_POLL_NS_DEFAULT 500000 31 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 32 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 33 #define KVM_REQ_PMU KVM_ARCH_REQ(2) 34 35 #define KVM_GUESTDBG_SW_BP_MASK \ 36 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) 37 #define KVM_GUESTDBG_VALID_MASK \ 38 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) 39 40 #define KVM_DIRTY_LOG_MANUAL_CAPS \ 41 (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) 42 43 struct kvm_vm_stat { 44 struct kvm_vm_stat_generic generic; 45 u64 pages; 46 u64 hugepages; 47 }; 48 49 struct kvm_vcpu_stat { 50 struct kvm_vcpu_stat_generic generic; 51 u64 int_exits; 52 u64 idle_exits; 53 u64 cpucfg_exits; 54 u64 signal_exits; 55 u64 hypercall_exits; 56 }; 57 58 #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) 59 #define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) 60 struct kvm_arch_memory_slot { 61 unsigned long flags; 62 }; 63 64 #define HOST_MAX_PMNUM 16 65 struct kvm_context { 66 unsigned long vpid_cache; 67 struct kvm_vcpu *last_vcpu; 68 /* Host PMU CSR */ 69 u64 perf_ctrl[HOST_MAX_PMNUM]; 70 u64 perf_cntr[HOST_MAX_PMNUM]; 71 }; 72 73 struct kvm_world_switch { 74 int (*exc_entry)(void); 75 int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); 76 unsigned long page_order; 77 }; 78 79 #define MAX_PGTABLE_LEVELS 4 80 81 /* 82 * Physical CPUID is used for interrupt routing, there are different 83 * definitions about physical cpuid on different hardwares. 84 * 85 * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 86 * For IPI hardware, max destination CPUID size 1024 87 * For extioi interrupt controller, max destination CPUID size is 256 88 * For msgint interrupt controller, max supported CPUID size is 65536 89 * 90 * Currently max CPUID is defined as 256 for KVM hypervisor, in future 91 * it will be expanded to 4096, including 16 packages at most. And every 92 * package supports at most 256 vcpus 93 */ 94 #define KVM_MAX_PHYID 256 95 96 struct kvm_phyid_info { 97 struct kvm_vcpu *vcpu; 98 bool enabled; 99 }; 100 101 struct kvm_phyid_map { 102 int max_phyid; 103 struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; 104 }; 105 106 struct kvm_arch { 107 /* Guest physical mm */ 108 kvm_pte_t *pgd; 109 unsigned long gpa_size; 110 unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; 111 unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; 112 unsigned int root_level; 113 spinlock_t phyid_map_lock; 114 struct kvm_phyid_map *phyid_map; 115 /* Enabled PV features */ 116 unsigned long pv_features; 117 118 s64 time_offset; 119 struct kvm_context __percpu *vmcs; 120 }; 121 122 #define CSR_MAX_NUMS 0x800 123 124 struct loongarch_csrs { 125 unsigned long csrs[CSR_MAX_NUMS]; 126 }; 127 128 /* Resume Flags */ 129 #define RESUME_HOST 0 130 #define RESUME_GUEST 1 131 132 enum emulation_result { 133 EMULATE_DONE, /* no further processing */ 134 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 135 EMULATE_DO_IOCSR, /* handle IOCSR request */ 136 EMULATE_FAIL, /* can't emulate this instruction */ 137 EMULATE_EXCEPT, /* A guest exception has been generated */ 138 }; 139 140 #define KVM_LARCH_FPU (0x1 << 0) 141 #define KVM_LARCH_LSX (0x1 << 1) 142 #define KVM_LARCH_LASX (0x1 << 2) 143 #define KVM_LARCH_LBT (0x1 << 3) 144 #define KVM_LARCH_PMU (0x1 << 4) 145 #define KVM_LARCH_SWCSR_LATEST (0x1 << 5) 146 #define KVM_LARCH_HWCSR_USABLE (0x1 << 6) 147 148 #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) 149 #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ 150 BIT(KVM_FEATURE_STEAL_TIME) | \ 151 BIT(KVM_FEATURE_VIRT_EXTIOI)) 152 153 struct kvm_vcpu_arch { 154 /* 155 * Switch pointer-to-function type to unsigned long 156 * for loading the value into register directly. 157 */ 158 unsigned long host_eentry; 159 unsigned long guest_eentry; 160 161 /* Pointers stored here for easy accessing from assembly code */ 162 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 163 164 /* Host registers preserved across guest mode execution */ 165 unsigned long host_sp; 166 unsigned long host_tp; 167 unsigned long host_pgd; 168 169 /* Host CSRs are used when handling exits from guest */ 170 unsigned long badi; 171 unsigned long badv; 172 unsigned long host_ecfg; 173 unsigned long host_estat; 174 unsigned long host_percpu; 175 176 /* GPRs */ 177 unsigned long gprs[32]; 178 unsigned long pc; 179 180 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 181 unsigned int aux_inuse; 182 183 /* FPU state */ 184 struct loongarch_fpu fpu FPU_ALIGN; 185 struct loongarch_lbt lbt; 186 187 /* CSR state */ 188 struct loongarch_csrs *csr; 189 190 /* Guest max PMU CSR id */ 191 int max_pmu_csrid; 192 193 /* GPR used as IO source/target */ 194 u32 io_gpr; 195 196 /* KVM register to control count timer */ 197 u32 count_ctl; 198 struct hrtimer swtimer; 199 200 /* Bitmask of intr that are pending */ 201 unsigned long irq_pending; 202 /* Bitmask of pending intr to be cleared */ 203 unsigned long irq_clear; 204 205 /* Bitmask of exceptions that are pending */ 206 unsigned long exception_pending; 207 unsigned int esubcode; 208 209 /* Cache for pages needed inside spinlock regions */ 210 struct kvm_mmu_memory_cache mmu_page_cache; 211 212 /* vcpu's vpid */ 213 u64 vpid; 214 gpa_t flush_gpa; 215 216 /* Frequency of stable timer in Hz */ 217 u64 timer_mhz; 218 ktime_t expire; 219 220 /* Last CPU the vCPU state was loaded on */ 221 int last_sched_cpu; 222 /* mp state */ 223 struct kvm_mp_state mp_state; 224 /* cpucfg */ 225 u32 cpucfg[KVM_MAX_CPUCFG_REGS]; 226 227 /* paravirt steal time */ 228 struct { 229 u64 guest_addr; 230 u64 last_steal; 231 struct gfn_to_hva_cache cache; 232 } st; 233 }; 234 235 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) 236 { 237 return csr->csrs[reg]; 238 } 239 240 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 241 { 242 csr->csrs[reg] = val; 243 } 244 245 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) 246 { 247 return arch->cpucfg[2] & CPUCFG2_FP; 248 } 249 250 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) 251 { 252 return arch->cpucfg[2] & CPUCFG2_LSX; 253 } 254 255 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) 256 { 257 return arch->cpucfg[2] & CPUCFG2_LASX; 258 } 259 260 static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) 261 { 262 return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); 263 } 264 265 static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) 266 { 267 return arch->cpucfg[6] & CPUCFG6_PMP; 268 } 269 270 static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) 271 { 272 return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; 273 } 274 275 /* Debug: dump vcpu state */ 276 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 277 278 /* MMU handling */ 279 void kvm_flush_tlb_all(void); 280 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); 281 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); 282 283 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); 284 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 285 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 286 287 static inline void update_pc(struct kvm_vcpu_arch *arch) 288 { 289 arch->pc += 4; 290 } 291 292 /* 293 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 294 * @vcpu: Virtual CPU. 295 * 296 * Returns: Whether the TLBL exception was likely due to an instruction 297 * fetch fault rather than a data load fault. 298 */ 299 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) 300 { 301 return arch->pc == arch->badv; 302 } 303 304 /* Misc */ 305 static inline void kvm_arch_hardware_unsetup(void) {} 306 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 307 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 308 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 309 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 310 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 311 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} 312 void kvm_check_vpid(struct kvm_vcpu *vcpu); 313 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); 314 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); 315 void kvm_init_vmcs(struct kvm *kvm); 316 void kvm_exc_entry(void); 317 int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); 318 319 extern unsigned long vpid_mask; 320 extern const unsigned long kvm_exception_size; 321 extern const unsigned long kvm_enter_guest_size; 322 extern struct kvm_world_switch *kvm_loongarch_ops; 323 324 #define SW_GCSR (1 << 0) 325 #define HW_GCSR (1 << 1) 326 #define INVALID_GCSR (1 << 2) 327 328 int get_gcsr_flag(int csr); 329 void set_hw_gcsr(int csr_id, unsigned long val); 330 331 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ 332