1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #ifndef __RISCV_KVM_HOST_H__ 10 #define __RISCV_KVM_HOST_H__ 11 12 #include <linux/types.h> 13 #include <linux/kvm.h> 14 #include <linux/kvm_types.h> 15 #include <linux/spinlock.h> 16 #include <asm/hwcap.h> 17 #include <asm/kvm_aia.h> 18 #include <asm/ptrace.h> 19 #include <asm/kvm_tlb.h> 20 #include <asm/kvm_vmid.h> 21 #include <asm/kvm_vcpu_config.h> 22 #include <asm/kvm_vcpu_fp.h> 23 #include <asm/kvm_vcpu_insn.h> 24 #include <asm/kvm_vcpu_sbi.h> 25 #include <asm/kvm_vcpu_sbi_fwft.h> 26 #include <asm/kvm_vcpu_timer.h> 27 #include <asm/kvm_vcpu_pmu.h> 28 29 #define KVM_MAX_VCPUS 1024 30 31 #define KVM_HALT_POLL_NS_DEFAULT 500000 32 33 #define KVM_VCPU_MAX_FEATURES 0 34 35 #define KVM_IRQCHIP_NUM_PINS 1024 36 37 #define KVM_REQ_SLEEP \ 38 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 39 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) 40 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) 41 #define KVM_REQ_FENCE_I \ 42 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 43 #define KVM_REQ_HFENCE_VVMA_ALL \ 44 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 45 #define KVM_REQ_HFENCE \ 46 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 47 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6) 48 49 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE 50 51 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 52 KVM_DIRTY_LOG_INITIALLY_SET) 53 54 struct kvm_vm_stat { 55 struct kvm_vm_stat_generic generic; 56 }; 57 58 struct kvm_vcpu_stat { 59 struct kvm_vcpu_stat_generic generic; 60 u64 ecall_exit_stat; 61 u64 wfi_exit_stat; 62 u64 wrs_exit_stat; 63 u64 mmio_exit_user; 64 u64 mmio_exit_kernel; 65 u64 csr_exit_user; 66 u64 csr_exit_kernel; 67 u64 signal_exits; 68 u64 exits; 69 u64 instr_illegal_exits; 70 u64 load_misaligned_exits; 71 u64 store_misaligned_exits; 72 u64 load_access_exits; 73 u64 store_access_exits; 74 }; 75 76 struct kvm_arch_memory_slot { 77 }; 78 79 struct kvm_arch { 80 /* G-stage vmid */ 81 struct kvm_vmid vmid; 82 83 /* G-stage page table */ 84 pgd_t *pgd; 85 phys_addr_t pgd_phys; 86 unsigned long pgd_levels; 87 88 /* Guest Timer */ 89 struct kvm_guest_timer timer; 90 91 /* AIA Guest/VM context */ 92 struct kvm_aia aia; 93 94 /* KVM_CAP_RISCV_MP_STATE_RESET */ 95 bool mp_state_reset; 96 }; 97 98 struct kvm_cpu_trap { 99 unsigned long sepc; 100 unsigned long scause; 101 unsigned long stval; 102 unsigned long htval; 103 unsigned long htinst; 104 }; 105 106 struct kvm_cpu_context { 107 unsigned long zero; 108 unsigned long ra; 109 unsigned long sp; 110 unsigned long gp; 111 unsigned long tp; 112 unsigned long t0; 113 unsigned long t1; 114 unsigned long t2; 115 unsigned long s0; 116 unsigned long s1; 117 unsigned long a0; 118 unsigned long a1; 119 unsigned long a2; 120 unsigned long a3; 121 unsigned long a4; 122 unsigned long a5; 123 unsigned long a6; 124 unsigned long a7; 125 unsigned long s2; 126 unsigned long s3; 127 unsigned long s4; 128 unsigned long s5; 129 unsigned long s6; 130 unsigned long s7; 131 unsigned long s8; 132 unsigned long s9; 133 unsigned long s10; 134 unsigned long s11; 135 unsigned long t3; 136 unsigned long t4; 137 unsigned long t5; 138 unsigned long t6; 139 unsigned long sepc; 140 unsigned long sstatus; 141 unsigned long hstatus; 142 union __riscv_fp_state fp; 143 struct __riscv_v_ext_state vector; 144 }; 145 146 struct kvm_vcpu_csr { 147 unsigned long vsstatus; 148 unsigned long vsie; 149 unsigned long vstvec; 150 unsigned long vsscratch; 151 unsigned long vsepc; 152 unsigned long vscause; 153 unsigned long vstval; 154 unsigned long hvip; 155 unsigned long vsatp; 156 unsigned long scounteren; 157 unsigned long senvcfg; 158 }; 159 160 struct kvm_vcpu_smstateen_csr { 161 unsigned long sstateen0; 162 }; 163 164 struct kvm_vcpu_reset_state { 165 spinlock_t lock; 166 unsigned long pc; 167 unsigned long a1; 168 }; 169 170 struct kvm_vcpu_arch { 171 /* VCPU ran at least once */ 172 bool ran_atleast_once; 173 174 /* Last Host CPU on which Guest VCPU exited */ 175 int last_exit_cpu; 176 177 /* ISA feature bits (similar to MISA) */ 178 DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); 179 180 /* Vendor, Arch, and Implementation details */ 181 unsigned long mvendorid; 182 unsigned long marchid; 183 unsigned long mimpid; 184 185 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ 186 unsigned long host_sscratch; 187 unsigned long host_stvec; 188 unsigned long host_scounteren; 189 unsigned long host_senvcfg; 190 unsigned long host_sstateen0; 191 192 /* CPU context of Host */ 193 struct kvm_cpu_context host_context; 194 195 /* CPU context of Guest VCPU */ 196 struct kvm_cpu_context guest_context; 197 198 /* CPU CSR context of Guest VCPU */ 199 struct kvm_vcpu_csr guest_csr; 200 201 /* CPU Smstateen CSR context of Guest VCPU */ 202 struct kvm_vcpu_smstateen_csr smstateen_csr; 203 204 /* CPU reset state of Guest VCPU */ 205 struct kvm_vcpu_reset_state reset_state; 206 207 /* 208 * VCPU interrupts 209 * 210 * We have a lockless approach for tracking pending VCPU interrupts 211 * implemented using atomic bitops. The irqs_pending bitmap represent 212 * pending interrupts whereas irqs_pending_mask represent bits changed 213 * in irqs_pending. Our approach is modeled around multiple producer 214 * and single consumer problem where the consumer is the VCPU itself. 215 */ 216 #define KVM_RISCV_VCPU_NR_IRQS 64 217 DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS); 218 DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); 219 220 /* VCPU Timer */ 221 struct kvm_vcpu_timer timer; 222 223 /* HFENCE request queue */ 224 spinlock_t hfence_lock; 225 unsigned long hfence_head; 226 unsigned long hfence_tail; 227 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE]; 228 229 /* MMIO instruction details */ 230 struct kvm_mmio_decode mmio_decode; 231 232 /* CSR instruction details */ 233 struct kvm_csr_decode csr_decode; 234 235 /* SBI context */ 236 struct kvm_vcpu_sbi_context sbi_context; 237 238 /* AIA VCPU context */ 239 struct kvm_vcpu_aia aia_context; 240 241 /* Cache pages needed to program page tables with spinlock held */ 242 struct kvm_mmu_memory_cache mmu_page_cache; 243 244 /* VCPU power state */ 245 struct kvm_mp_state mp_state; 246 spinlock_t mp_state_lock; 247 248 /* Don't run the VCPU (blocked) */ 249 bool pause; 250 251 /* Performance monitoring context */ 252 struct kvm_pmu pmu_context; 253 254 /* Firmware feature SBI extension context */ 255 struct kvm_sbi_fwft fwft_context; 256 257 /* 'static' configurations which are set only once */ 258 struct kvm_vcpu_config cfg; 259 260 /* Indicates modified guest CSRs */ 261 bool csr_dirty; 262 263 /* SBI steal-time accounting */ 264 struct { 265 gpa_t shmem; 266 u64 last_steal; 267 } sta; 268 }; 269 270 /* 271 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, 272 * arrived in guest context. For riscv, any event that arrives while a vCPU is 273 * loaded is considered to be "in guest". 274 */ 275 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) 276 { 277 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; 278 } 279 280 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 281 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 282 283 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines); 284 285 void __kvm_riscv_unpriv_trap(void); 286 287 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, 288 bool read_insn, 289 unsigned long guest_addr, 290 struct kvm_cpu_trap *trap); 291 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, 292 struct kvm_cpu_trap *trap); 293 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 294 struct kvm_cpu_trap *trap); 295 296 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); 297 298 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu); 299 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu); 300 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, 301 u64 __user *uindices); 302 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, 303 const struct kvm_one_reg *reg); 304 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, 305 const struct kvm_one_reg *reg); 306 307 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); 308 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); 309 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); 310 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); 311 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); 312 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); 313 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); 314 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); 315 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); 316 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu); 317 318 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu); 319 320 /* Flags representing implementation specific details */ 321 DECLARE_STATIC_KEY_FALSE(kvm_riscv_vsstage_tlb_no_gpa); 322 323 #endif /* __RISCV_KVM_HOST_H__ */ 324