1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #ifndef __RISCV_KVM_HOST_H__ 10 #define __RISCV_KVM_HOST_H__ 11 12 #include <linux/types.h> 13 #include <linux/kvm.h> 14 #include <linux/kvm_types.h> 15 #include <linux/spinlock.h> 16 #include <asm/hwcap.h> 17 #include <asm/kvm_aia.h> 18 #include <asm/ptrace.h> 19 #include <asm/kvm_vcpu_fp.h> 20 #include <asm/kvm_vcpu_insn.h> 21 #include <asm/kvm_vcpu_sbi.h> 22 #include <asm/kvm_vcpu_timer.h> 23 #include <asm/kvm_vcpu_pmu.h> 24 25 #define KVM_MAX_VCPUS 1024 26 27 #define KVM_HALT_POLL_NS_DEFAULT 500000 28 29 #define KVM_VCPU_MAX_FEATURES 0 30 31 #define KVM_IRQCHIP_NUM_PINS 1024 32 33 #define KVM_REQ_SLEEP \ 34 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 35 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) 36 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) 37 #define KVM_REQ_FENCE_I \ 38 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 39 #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH 40 #define KVM_REQ_HFENCE_VVMA_ALL \ 41 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 42 #define KVM_REQ_HFENCE \ 43 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 44 45 enum kvm_riscv_hfence_type { 46 KVM_RISCV_HFENCE_UNKNOWN = 0, 47 KVM_RISCV_HFENCE_GVMA_VMID_GPA, 48 KVM_RISCV_HFENCE_VVMA_ASID_GVA, 49 KVM_RISCV_HFENCE_VVMA_ASID_ALL, 50 KVM_RISCV_HFENCE_VVMA_GVA, 51 }; 52 53 struct kvm_riscv_hfence { 54 enum kvm_riscv_hfence_type type; 55 unsigned long asid; 56 unsigned long order; 57 gpa_t addr; 58 gpa_t size; 59 }; 60 61 #define KVM_RISCV_VCPU_MAX_HFENCE 64 62 63 struct kvm_vm_stat { 64 struct kvm_vm_stat_generic generic; 65 }; 66 67 struct kvm_vcpu_stat { 68 struct kvm_vcpu_stat_generic generic; 69 u64 ecall_exit_stat; 70 u64 wfi_exit_stat; 71 u64 mmio_exit_user; 72 u64 mmio_exit_kernel; 73 u64 csr_exit_user; 74 u64 csr_exit_kernel; 75 u64 signal_exits; 76 u64 exits; 77 }; 78 79 struct kvm_arch_memory_slot { 80 }; 81 82 struct kvm_vmid { 83 /* 84 * Writes to vmid_version and vmid happen with vmid_lock held 85 * whereas reads happen without any lock held. 86 */ 87 unsigned long vmid_version; 88 unsigned long vmid; 89 }; 90 91 struct kvm_arch { 92 /* G-stage vmid */ 93 struct kvm_vmid vmid; 94 95 /* G-stage page table */ 96 pgd_t *pgd; 97 phys_addr_t pgd_phys; 98 99 /* Guest Timer */ 100 struct kvm_guest_timer timer; 101 102 /* AIA Guest/VM context */ 103 struct kvm_aia aia; 104 }; 105 106 struct kvm_cpu_trap { 107 unsigned long sepc; 108 unsigned long scause; 109 unsigned long stval; 110 unsigned long htval; 111 unsigned long htinst; 112 }; 113 114 struct kvm_cpu_context { 115 unsigned long zero; 116 unsigned long ra; 117 unsigned long sp; 118 unsigned long gp; 119 unsigned long tp; 120 unsigned long t0; 121 unsigned long t1; 122 unsigned long t2; 123 unsigned long s0; 124 unsigned long s1; 125 unsigned long a0; 126 unsigned long a1; 127 unsigned long a2; 128 unsigned long a3; 129 unsigned long a4; 130 unsigned long a5; 131 unsigned long a6; 132 unsigned long a7; 133 unsigned long s2; 134 unsigned long s3; 135 unsigned long s4; 136 unsigned long s5; 137 unsigned long s6; 138 unsigned long s7; 139 unsigned long s8; 140 unsigned long s9; 141 unsigned long s10; 142 unsigned long s11; 143 unsigned long t3; 144 unsigned long t4; 145 unsigned long t5; 146 unsigned long t6; 147 unsigned long sepc; 148 unsigned long sstatus; 149 unsigned long hstatus; 150 union __riscv_fp_state fp; 151 struct __riscv_v_ext_state vector; 152 }; 153 154 struct kvm_vcpu_csr { 155 unsigned long vsstatus; 156 unsigned long vsie; 157 unsigned long vstvec; 158 unsigned long vsscratch; 159 unsigned long vsepc; 160 unsigned long vscause; 161 unsigned long vstval; 162 unsigned long hvip; 163 unsigned long vsatp; 164 unsigned long scounteren; 165 unsigned long senvcfg; 166 }; 167 168 struct kvm_vcpu_config { 169 u64 henvcfg; 170 u64 hstateen0; 171 }; 172 173 struct kvm_vcpu_smstateen_csr { 174 unsigned long sstateen0; 175 }; 176 177 struct kvm_vcpu_arch { 178 /* VCPU ran at least once */ 179 bool ran_atleast_once; 180 181 /* Last Host CPU on which Guest VCPU exited */ 182 int last_exit_cpu; 183 184 /* ISA feature bits (similar to MISA) */ 185 DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); 186 187 /* Vendor, Arch, and Implementation details */ 188 unsigned long mvendorid; 189 unsigned long marchid; 190 unsigned long mimpid; 191 192 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ 193 unsigned long host_sscratch; 194 unsigned long host_stvec; 195 unsigned long host_scounteren; 196 unsigned long host_senvcfg; 197 unsigned long host_sstateen0; 198 199 /* CPU context of Host */ 200 struct kvm_cpu_context host_context; 201 202 /* CPU context of Guest VCPU */ 203 struct kvm_cpu_context guest_context; 204 205 /* CPU CSR context of Guest VCPU */ 206 struct kvm_vcpu_csr guest_csr; 207 208 /* CPU Smstateen CSR context of Guest VCPU */ 209 struct kvm_vcpu_smstateen_csr smstateen_csr; 210 211 /* CPU context upon Guest VCPU reset */ 212 struct kvm_cpu_context guest_reset_context; 213 214 /* CPU CSR context upon Guest VCPU reset */ 215 struct kvm_vcpu_csr guest_reset_csr; 216 217 /* 218 * VCPU interrupts 219 * 220 * We have a lockless approach for tracking pending VCPU interrupts 221 * implemented using atomic bitops. The irqs_pending bitmap represent 222 * pending interrupts whereas irqs_pending_mask represent bits changed 223 * in irqs_pending. Our approach is modeled around multiple producer 224 * and single consumer problem where the consumer is the VCPU itself. 225 */ 226 #define KVM_RISCV_VCPU_NR_IRQS 64 227 DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS); 228 DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); 229 230 /* VCPU Timer */ 231 struct kvm_vcpu_timer timer; 232 233 /* HFENCE request queue */ 234 spinlock_t hfence_lock; 235 unsigned long hfence_head; 236 unsigned long hfence_tail; 237 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE]; 238 239 /* MMIO instruction details */ 240 struct kvm_mmio_decode mmio_decode; 241 242 /* CSR instruction details */ 243 struct kvm_csr_decode csr_decode; 244 245 /* SBI context */ 246 struct kvm_vcpu_sbi_context sbi_context; 247 248 /* AIA VCPU context */ 249 struct kvm_vcpu_aia aia_context; 250 251 /* Cache pages needed to program page tables with spinlock held */ 252 struct kvm_mmu_memory_cache mmu_page_cache; 253 254 /* VCPU power-off state */ 255 bool power_off; 256 257 /* Don't run the VCPU (blocked) */ 258 bool pause; 259 260 /* Performance monitoring context */ 261 struct kvm_pmu pmu_context; 262 263 /* 'static' configurations which are set only once */ 264 struct kvm_vcpu_config cfg; 265 }; 266 267 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 268 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 269 270 #define KVM_ARCH_WANT_MMU_NOTIFIER 271 272 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 273 274 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, 275 gpa_t gpa, gpa_t gpsz, 276 unsigned long order); 277 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); 278 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, 279 unsigned long order); 280 void kvm_riscv_local_hfence_gvma_all(void); 281 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, 282 unsigned long asid, 283 unsigned long gva, 284 unsigned long gvsz, 285 unsigned long order); 286 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, 287 unsigned long asid); 288 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, 289 unsigned long gva, unsigned long gvsz, 290 unsigned long order); 291 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); 292 293 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu); 294 295 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); 296 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); 297 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); 298 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu); 299 300 void kvm_riscv_fence_i(struct kvm *kvm, 301 unsigned long hbase, unsigned long hmask); 302 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, 303 unsigned long hbase, unsigned long hmask, 304 gpa_t gpa, gpa_t gpsz, 305 unsigned long order); 306 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, 307 unsigned long hbase, unsigned long hmask); 308 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, 309 unsigned long hbase, unsigned long hmask, 310 unsigned long gva, unsigned long gvsz, 311 unsigned long order, unsigned long asid); 312 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, 313 unsigned long hbase, unsigned long hmask, 314 unsigned long asid); 315 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, 316 unsigned long hbase, unsigned long hmask, 317 unsigned long gva, unsigned long gvsz, 318 unsigned long order); 319 void kvm_riscv_hfence_vvma_all(struct kvm *kvm, 320 unsigned long hbase, unsigned long hmask); 321 322 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, 323 phys_addr_t hpa, unsigned long size, 324 bool writable, bool in_atomic); 325 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, 326 unsigned long size); 327 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, 328 struct kvm_memory_slot *memslot, 329 gpa_t gpa, unsigned long hva, bool is_write); 330 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); 331 void kvm_riscv_gstage_free_pgd(struct kvm *kvm); 332 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); 333 void __init kvm_riscv_gstage_mode_detect(void); 334 unsigned long __init kvm_riscv_gstage_mode(void); 335 int kvm_riscv_gstage_gpa_bits(void); 336 337 void __init kvm_riscv_gstage_vmid_detect(void); 338 unsigned long kvm_riscv_gstage_vmid_bits(void); 339 int kvm_riscv_gstage_vmid_init(struct kvm *kvm); 340 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); 341 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); 342 343 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines); 344 345 void __kvm_riscv_unpriv_trap(void); 346 347 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, 348 bool read_insn, 349 unsigned long guest_addr, 350 struct kvm_cpu_trap *trap); 351 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, 352 struct kvm_cpu_trap *trap); 353 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 354 struct kvm_cpu_trap *trap); 355 356 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); 357 358 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu); 359 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu); 360 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, 361 u64 __user *uindices); 362 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, 363 const struct kvm_one_reg *reg); 364 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, 365 const struct kvm_one_reg *reg); 366 367 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); 368 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); 369 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); 370 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); 371 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); 372 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); 373 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); 374 375 #endif /* __RISCV_KVM_HOST_H__ */ 376