1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #ifndef __ASM_LOONGARCH_KVM_HOST_H__ 7 #define __ASM_LOONGARCH_KVM_HOST_H__ 8 9 #include <linux/cpumask.h> 10 #include <linux/hrtimer.h> 11 #include <linux/interrupt.h> 12 #include <linux/kvm.h> 13 #include <linux/kvm_types.h> 14 #include <linux/mutex.h> 15 #include <linux/spinlock.h> 16 #include <linux/threads.h> 17 #include <linux/types.h> 18 19 #include <asm/inst.h> 20 #include <asm/kvm_mmu.h> 21 #include <asm/loongarch.h> 22 23 /* Loongarch KVM register ids */ 24 #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) 25 #define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) 26 27 #define KVM_MAX_VCPUS 256 28 #define KVM_MAX_CPUCFG_REGS 21 29 30 #define KVM_HALT_POLL_NS_DEFAULT 500000 31 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 32 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 33 34 #define KVM_GUESTDBG_SW_BP_MASK \ 35 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) 36 #define KVM_GUESTDBG_VALID_MASK \ 37 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) 38 39 #define KVM_DIRTY_LOG_MANUAL_CAPS \ 40 (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) 41 42 struct kvm_vm_stat { 43 struct kvm_vm_stat_generic generic; 44 u64 pages; 45 u64 hugepages; 46 }; 47 48 struct kvm_vcpu_stat { 49 struct kvm_vcpu_stat_generic generic; 50 u64 int_exits; 51 u64 idle_exits; 52 u64 cpucfg_exits; 53 u64 signal_exits; 54 u64 hypercall_exits; 55 }; 56 57 #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) 58 #define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) 59 struct kvm_arch_memory_slot { 60 unsigned long flags; 61 }; 62 63 struct kvm_context { 64 unsigned long vpid_cache; 65 struct kvm_vcpu *last_vcpu; 66 }; 67 68 struct kvm_world_switch { 69 int (*exc_entry)(void); 70 int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); 71 unsigned long page_order; 72 }; 73 74 #define MAX_PGTABLE_LEVELS 4 75 76 /* 77 * Physical CPUID is used for interrupt routing, there are different 78 * definitions about physical cpuid on different hardwares. 79 * 80 * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 81 * For IPI hardware, max destination CPUID size 1024 82 * For extioi interrupt controller, max destination CPUID size is 256 83 * For msgint interrupt controller, max supported CPUID size is 65536 84 * 85 * Currently max CPUID is defined as 256 for KVM hypervisor, in future 86 * it will be expanded to 4096, including 16 packages at most. And every 87 * package supports at most 256 vcpus 88 */ 89 #define KVM_MAX_PHYID 256 90 91 struct kvm_phyid_info { 92 struct kvm_vcpu *vcpu; 93 bool enabled; 94 }; 95 96 struct kvm_phyid_map { 97 int max_phyid; 98 struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; 99 }; 100 101 struct kvm_arch { 102 /* Guest physical mm */ 103 kvm_pte_t *pgd; 104 unsigned long gpa_size; 105 unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; 106 unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; 107 unsigned int root_level; 108 spinlock_t phyid_map_lock; 109 struct kvm_phyid_map *phyid_map; 110 111 s64 time_offset; 112 struct kvm_context __percpu *vmcs; 113 }; 114 115 #define CSR_MAX_NUMS 0x800 116 117 struct loongarch_csrs { 118 unsigned long csrs[CSR_MAX_NUMS]; 119 }; 120 121 /* Resume Flags */ 122 #define RESUME_HOST 0 123 #define RESUME_GUEST 1 124 125 enum emulation_result { 126 EMULATE_DONE, /* no further processing */ 127 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 128 EMULATE_DO_IOCSR, /* handle IOCSR request */ 129 EMULATE_FAIL, /* can't emulate this instruction */ 130 EMULATE_EXCEPT, /* A guest exception has been generated */ 131 }; 132 133 #define KVM_LARCH_FPU (0x1 << 0) 134 #define KVM_LARCH_LSX (0x1 << 1) 135 #define KVM_LARCH_LASX (0x1 << 2) 136 #define KVM_LARCH_SWCSR_LATEST (0x1 << 3) 137 #define KVM_LARCH_HWCSR_USABLE (0x1 << 4) 138 139 struct kvm_vcpu_arch { 140 /* 141 * Switch pointer-to-function type to unsigned long 142 * for loading the value into register directly. 143 */ 144 unsigned long host_eentry; 145 unsigned long guest_eentry; 146 147 /* Pointers stored here for easy accessing from assembly code */ 148 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); 149 150 /* Host registers preserved across guest mode execution */ 151 unsigned long host_sp; 152 unsigned long host_tp; 153 unsigned long host_pgd; 154 155 /* Host CSRs are used when handling exits from guest */ 156 unsigned long badi; 157 unsigned long badv; 158 unsigned long host_ecfg; 159 unsigned long host_estat; 160 unsigned long host_percpu; 161 162 /* GPRs */ 163 unsigned long gprs[32]; 164 unsigned long pc; 165 166 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 167 unsigned int aux_inuse; 168 169 /* FPU state */ 170 struct loongarch_fpu fpu FPU_ALIGN; 171 172 /* CSR state */ 173 struct loongarch_csrs *csr; 174 175 /* GPR used as IO source/target */ 176 u32 io_gpr; 177 178 /* KVM register to control count timer */ 179 u32 count_ctl; 180 struct hrtimer swtimer; 181 182 /* Bitmask of intr that are pending */ 183 unsigned long irq_pending; 184 /* Bitmask of pending intr to be cleared */ 185 unsigned long irq_clear; 186 187 /* Bitmask of exceptions that are pending */ 188 unsigned long exception_pending; 189 unsigned int esubcode; 190 191 /* Cache for pages needed inside spinlock regions */ 192 struct kvm_mmu_memory_cache mmu_page_cache; 193 194 /* vcpu's vpid */ 195 u64 vpid; 196 gpa_t flush_gpa; 197 198 /* Frequency of stable timer in Hz */ 199 u64 timer_mhz; 200 ktime_t expire; 201 202 /* Last CPU the vCPU state was loaded on */ 203 int last_sched_cpu; 204 /* mp state */ 205 struct kvm_mp_state mp_state; 206 /* cpucfg */ 207 u32 cpucfg[KVM_MAX_CPUCFG_REGS]; 208 209 /* paravirt steal time */ 210 struct { 211 u64 guest_addr; 212 u64 last_steal; 213 struct gfn_to_hva_cache cache; 214 } st; 215 }; 216 217 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) 218 { 219 return csr->csrs[reg]; 220 } 221 222 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 223 { 224 csr->csrs[reg] = val; 225 } 226 227 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) 228 { 229 return arch->cpucfg[2] & CPUCFG2_FP; 230 } 231 232 static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) 233 { 234 return arch->cpucfg[2] & CPUCFG2_LSX; 235 } 236 237 static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) 238 { 239 return arch->cpucfg[2] & CPUCFG2_LASX; 240 } 241 242 /* Debug: dump vcpu state */ 243 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 244 245 /* MMU handling */ 246 void kvm_flush_tlb_all(void); 247 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); 248 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); 249 250 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); 251 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 252 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 253 254 static inline void update_pc(struct kvm_vcpu_arch *arch) 255 { 256 arch->pc += 4; 257 } 258 259 /* 260 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 261 * @vcpu: Virtual CPU. 262 * 263 * Returns: Whether the TLBL exception was likely due to an instruction 264 * fetch fault rather than a data load fault. 265 */ 266 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) 267 { 268 return arch->pc == arch->badv; 269 } 270 271 /* Misc */ 272 static inline void kvm_arch_hardware_unsetup(void) {} 273 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 274 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 275 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 276 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 277 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 278 static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} 279 void kvm_check_vpid(struct kvm_vcpu *vcpu); 280 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); 281 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); 282 void kvm_init_vmcs(struct kvm *kvm); 283 void kvm_exc_entry(void); 284 int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); 285 286 extern unsigned long vpid_mask; 287 extern const unsigned long kvm_exception_size; 288 extern const unsigned long kvm_enter_guest_size; 289 extern struct kvm_world_switch *kvm_loongarch_ops; 290 291 #define SW_GCSR (1 << 0) 292 #define HW_GCSR (1 << 1) 293 #define INVALID_GCSR (1 << 2) 294 295 int get_gcsr_flag(int csr); 296 void set_hw_gcsr(int csr_id, unsigned long val); 297 298 #endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ 299