1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 * Authors: Sanjay Lal <sanjayl@kymasys.com> 8 */ 9 10 #ifndef __MIPS_KVM_HOST_H__ 11 #define __MIPS_KVM_HOST_H__ 12 13 #include <linux/mutex.h> 14 #include <linux/hrtimer.h> 15 #include <linux/interrupt.h> 16 #include <linux/types.h> 17 #include <linux/kvm.h> 18 #include <linux/kvm_types.h> 19 #include <linux/threads.h> 20 #include <linux/spinlock.h> 21 22 #include <asm/inst.h> 23 #include <asm/mipsregs.h> 24 25 /* MIPS KVM register ids */ 26 #define MIPS_CP0_32(_R, _S) \ 27 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) 28 29 #define MIPS_CP0_64(_R, _S) \ 30 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) 31 32 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 33 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) 34 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) 35 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 36 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 37 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 38 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) 39 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 40 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 41 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 42 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 43 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 44 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 45 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 46 #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1) 47 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 48 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 49 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) 50 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) 51 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 52 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 53 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 54 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 55 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) 56 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) 57 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) 58 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) 59 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 60 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) 61 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) 62 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4) 63 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5) 64 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6) 65 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) 66 67 68 #define KVM_MAX_VCPUS 8 69 #define KVM_USER_MEM_SLOTS 8 70 /* memory slots that does not exposed to userspace */ 71 #define KVM_PRIVATE_MEM_SLOTS 0 72 73 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 74 #define KVM_HALT_POLL_NS_DEFAULT 500000 75 76 77 78 /* 79 * Special address that contains the comm page, used for reducing # of traps 80 * This needs to be within 32Kb of 0x0 (so the zero register can be used), but 81 * preferably not at 0x0 so that most kernel NULL pointer dereferences can be 82 * caught. 83 */ 84 #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \ 85 (0x8000 - PAGE_SIZE)) 86 87 #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ 88 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) 89 90 #define KVM_GUEST_KUSEG 0x00000000UL 91 #define KVM_GUEST_KSEG0 0x40000000UL 92 #define KVM_GUEST_KSEG1 0x40000000UL 93 #define KVM_GUEST_KSEG23 0x60000000UL 94 #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) 95 #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) 96 97 #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 98 #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 99 #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 100 101 /* 102 * Map an address to a certain kernel segment 103 */ 104 #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 105 #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 106 #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 107 108 #define KVM_INVALID_PAGE 0xdeadbeef 109 #define KVM_INVALID_ADDR 0xdeadbeef 110 111 /* 112 * EVA has overlapping user & kernel address spaces, so user VAs may be > 113 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of 114 * PAGE_OFFSET. 115 */ 116 117 #define KVM_HVA_ERR_BAD (-1UL) 118 #define KVM_HVA_ERR_RO_BAD (-2UL) 119 120 static inline bool kvm_is_error_hva(unsigned long addr) 121 { 122 return IS_ERR_VALUE(addr); 123 } 124 125 struct kvm_vm_stat { 126 ulong remote_tlb_flush; 127 }; 128 129 struct kvm_vcpu_stat { 130 u64 wait_exits; 131 u64 cache_exits; 132 u64 signal_exits; 133 u64 int_exits; 134 u64 cop_unusable_exits; 135 u64 tlbmod_exits; 136 u64 tlbmiss_ld_exits; 137 u64 tlbmiss_st_exits; 138 u64 addrerr_st_exits; 139 u64 addrerr_ld_exits; 140 u64 syscall_exits; 141 u64 resvd_inst_exits; 142 u64 break_inst_exits; 143 u64 trap_inst_exits; 144 u64 msa_fpe_exits; 145 u64 fpe_exits; 146 u64 msa_disabled_exits; 147 u64 flush_dcache_exits; 148 u64 halt_successful_poll; 149 u64 halt_attempted_poll; 150 u64 halt_poll_invalid; 151 u64 halt_wakeup; 152 }; 153 154 struct kvm_arch_memory_slot { 155 }; 156 157 struct kvm_arch { 158 /* Guest physical mm */ 159 struct mm_struct gpa_mm; 160 }; 161 162 #define N_MIPS_COPROC_REGS 32 163 #define N_MIPS_COPROC_SEL 8 164 165 struct mips_coproc { 166 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 167 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 168 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 169 #endif 170 }; 171 172 /* 173 * Coprocessor 0 register names 174 */ 175 #define MIPS_CP0_TLB_INDEX 0 176 #define MIPS_CP0_TLB_RANDOM 1 177 #define MIPS_CP0_TLB_LOW 2 178 #define MIPS_CP0_TLB_LO0 2 179 #define MIPS_CP0_TLB_LO1 3 180 #define MIPS_CP0_TLB_CONTEXT 4 181 #define MIPS_CP0_TLB_PG_MASK 5 182 #define MIPS_CP0_TLB_WIRED 6 183 #define MIPS_CP0_HWRENA 7 184 #define MIPS_CP0_BAD_VADDR 8 185 #define MIPS_CP0_COUNT 9 186 #define MIPS_CP0_TLB_HI 10 187 #define MIPS_CP0_COMPARE 11 188 #define MIPS_CP0_STATUS 12 189 #define MIPS_CP0_CAUSE 13 190 #define MIPS_CP0_EXC_PC 14 191 #define MIPS_CP0_PRID 15 192 #define MIPS_CP0_CONFIG 16 193 #define MIPS_CP0_LLADDR 17 194 #define MIPS_CP0_WATCH_LO 18 195 #define MIPS_CP0_WATCH_HI 19 196 #define MIPS_CP0_TLB_XCONTEXT 20 197 #define MIPS_CP0_ECC 26 198 #define MIPS_CP0_CACHE_ERR 27 199 #define MIPS_CP0_TAG_LO 28 200 #define MIPS_CP0_TAG_HI 29 201 #define MIPS_CP0_ERROR_PC 30 202 #define MIPS_CP0_DEBUG 23 203 #define MIPS_CP0_DEPC 24 204 #define MIPS_CP0_PERFCNT 25 205 #define MIPS_CP0_ERRCTL 26 206 #define MIPS_CP0_DATA_LO 28 207 #define MIPS_CP0_DATA_HI 29 208 #define MIPS_CP0_DESAVE 31 209 210 #define MIPS_CP0_CONFIG_SEL 0 211 #define MIPS_CP0_CONFIG1_SEL 1 212 #define MIPS_CP0_CONFIG2_SEL 2 213 #define MIPS_CP0_CONFIG3_SEL 3 214 #define MIPS_CP0_CONFIG4_SEL 4 215 #define MIPS_CP0_CONFIG5_SEL 5 216 217 /* Resume Flags */ 218 #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ 219 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 220 221 #define RESUME_GUEST 0 222 #define RESUME_GUEST_DR RESUME_FLAG_DR 223 #define RESUME_HOST RESUME_FLAG_HOST 224 225 enum emulation_result { 226 EMULATE_DONE, /* no further processing */ 227 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 228 EMULATE_FAIL, /* can't emulate this instruction */ 229 EMULATE_WAIT, /* WAIT instruction */ 230 EMULATE_PRIV_FAIL, 231 EMULATE_EXCEPT, /* A guest exception has been generated */ 232 }; 233 234 #define mips3_paddr_to_tlbpfn(x) \ 235 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) 236 #define mips3_tlbpfn_to_paddr(x) \ 237 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) 238 239 #define MIPS3_PG_SHIFT 6 240 #define MIPS3_PG_FRAME 0x3fffffc0 241 242 #define VPN2_MASK 0xffffe000 243 #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID 244 #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) 245 #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 246 #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) 247 #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1) 248 #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V) 249 #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D) 250 #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ 251 ((y) & VPN2_MASK & ~(x).tlb_mask)) 252 #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ 253 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID)) 254 255 struct kvm_mips_tlb { 256 long tlb_mask; 257 long tlb_hi; 258 long tlb_lo[2]; 259 }; 260 261 #define KVM_NR_MEM_OBJS 4 262 263 /* 264 * We don't want allocation failures within the mmu code, so we preallocate 265 * enough memory for a single page fault in a cache. 266 */ 267 struct kvm_mmu_memory_cache { 268 int nobjs; 269 void *objects[KVM_NR_MEM_OBJS]; 270 }; 271 272 #define KVM_MIPS_AUX_FPU 0x1 273 #define KVM_MIPS_AUX_MSA 0x2 274 275 #define KVM_MIPS_GUEST_TLB_SIZE 64 276 struct kvm_vcpu_arch { 277 void *guest_ebase; 278 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); 279 unsigned long host_stack; 280 unsigned long host_gp; 281 282 /* Host CP0 registers used when handling exits from guest */ 283 unsigned long host_cp0_badvaddr; 284 unsigned long host_cp0_epc; 285 u32 host_cp0_cause; 286 u32 host_cp0_badinstr; 287 u32 host_cp0_badinstrp; 288 289 /* GPRS */ 290 unsigned long gprs[32]; 291 unsigned long hi; 292 unsigned long lo; 293 unsigned long pc; 294 295 /* FPU State */ 296 struct mips_fpu_struct fpu; 297 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */ 298 unsigned int aux_inuse; 299 300 /* COP0 State */ 301 struct mips_coproc *cop0; 302 303 /* Host KSEG0 address of the EI/DI offset */ 304 void *kseg0_commpage; 305 306 /* Resume PC after MMIO completion */ 307 unsigned long io_pc; 308 /* GPR used as IO source/target */ 309 u32 io_gpr; 310 311 struct hrtimer comparecount_timer; 312 /* Count timer control KVM register */ 313 u32 count_ctl; 314 /* Count bias from the raw time */ 315 u32 count_bias; 316 /* Frequency of timer in Hz */ 317 u32 count_hz; 318 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ 319 s64 count_dyn_bias; 320 /* Resume time */ 321 ktime_t count_resume; 322 /* Period of timer tick in ns */ 323 u64 count_period; 324 325 /* Bitmask of exceptions that are pending */ 326 unsigned long pending_exceptions; 327 328 /* Bitmask of pending exceptions to be cleared */ 329 unsigned long pending_exceptions_clr; 330 331 /* S/W Based TLB for guest */ 332 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; 333 334 /* Guest kernel/user [partial] mm */ 335 struct mm_struct guest_kernel_mm, guest_user_mm; 336 337 /* Guest ASID of last user mode execution */ 338 unsigned int last_user_gasid; 339 340 /* Cache some mmu pages needed inside spinlock regions */ 341 struct kvm_mmu_memory_cache mmu_page_cache; 342 343 int last_sched_cpu; 344 345 /* WAIT executed */ 346 int wait; 347 348 u8 fpu_enabled; 349 u8 msa_enabled; 350 }; 351 352 353 #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) 354 #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) 355 #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) 356 #define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val)) 357 #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) 358 #define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val)) 359 #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) 360 #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) 361 #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) 362 #define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val)) 363 #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) 364 #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) 365 #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) 366 #define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) 367 #define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0]) 368 #define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val)) 369 #define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) 370 #define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) 371 #define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) 372 #define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) 373 #define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) 374 #define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) 375 #define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) 376 #define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) 377 #define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) 378 #define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) 379 #define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) 380 #define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) 381 #define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) 382 #define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) 383 #define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) 384 #define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) 385 #define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) 386 #define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) 387 #define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) 388 #define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) 389 #define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) 390 #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) 391 #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) 392 #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) 393 #define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4]) 394 #define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5]) 395 #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) 396 #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) 397 #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) 398 #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) 399 #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) 400 #define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val)) 401 #define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val)) 402 #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) 403 #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) 404 #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) 405 #define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2]) 406 #define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3]) 407 #define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4]) 408 #define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5]) 409 #define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6]) 410 #define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7]) 411 #define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val)) 412 #define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val)) 413 #define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val)) 414 #define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val)) 415 #define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val)) 416 #define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val)) 417 418 /* 419 * Some of the guest registers may be modified asynchronously (e.g. from a 420 * hrtimer callback in hard irq context) and therefore need stronger atomicity 421 * guarantees than other registers. 422 */ 423 424 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, 425 unsigned long val) 426 { 427 unsigned long temp; 428 do { 429 __asm__ __volatile__( 430 " .set "MIPS_ISA_ARCH_LEVEL" \n" 431 " " __LL "%0, %1 \n" 432 " or %0, %2 \n" 433 " " __SC "%0, %1 \n" 434 " .set mips0 \n" 435 : "=&r" (temp), "+m" (*reg) 436 : "r" (val)); 437 } while (unlikely(!temp)); 438 } 439 440 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, 441 unsigned long val) 442 { 443 unsigned long temp; 444 do { 445 __asm__ __volatile__( 446 " .set "MIPS_ISA_ARCH_LEVEL" \n" 447 " " __LL "%0, %1 \n" 448 " and %0, %2 \n" 449 " " __SC "%0, %1 \n" 450 " .set mips0 \n" 451 : "=&r" (temp), "+m" (*reg) 452 : "r" (~val)); 453 } while (unlikely(!temp)); 454 } 455 456 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, 457 unsigned long change, 458 unsigned long val) 459 { 460 unsigned long temp; 461 do { 462 __asm__ __volatile__( 463 " .set "MIPS_ISA_ARCH_LEVEL" \n" 464 " " __LL "%0, %1 \n" 465 " and %0, %2 \n" 466 " or %0, %3 \n" 467 " " __SC "%0, %1 \n" 468 " .set mips0 \n" 469 : "=&r" (temp), "+m" (*reg) 470 : "r" (~change), "r" (val & change)); 471 } while (unlikely(!temp)); 472 } 473 474 #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) 475 #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) 476 477 /* Cause can be modified asynchronously from hardirq hrtimer callback */ 478 #define kvm_set_c0_guest_cause(cop0, val) \ 479 _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) 480 #define kvm_clear_c0_guest_cause(cop0, val) \ 481 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) 482 #define kvm_change_c0_guest_cause(cop0, change, val) \ 483 _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \ 484 change, val) 485 486 #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) 487 #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) 488 #define kvm_change_c0_guest_ebase(cop0, change, val) \ 489 { \ 490 kvm_clear_c0_guest_ebase(cop0, change); \ 491 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ 492 } 493 494 /* Helpers */ 495 496 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) 497 { 498 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) && 499 vcpu->fpu_enabled; 500 } 501 502 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) 503 { 504 return kvm_mips_guest_can_have_fpu(vcpu) && 505 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; 506 } 507 508 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) 509 { 510 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && 511 vcpu->msa_enabled; 512 } 513 514 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) 515 { 516 return kvm_mips_guest_can_have_msa(vcpu) && 517 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; 518 } 519 520 struct kvm_mips_callbacks { 521 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); 522 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); 523 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); 524 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); 525 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); 526 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); 527 int (*handle_syscall)(struct kvm_vcpu *vcpu); 528 int (*handle_res_inst)(struct kvm_vcpu *vcpu); 529 int (*handle_break)(struct kvm_vcpu *vcpu); 530 int (*handle_trap)(struct kvm_vcpu *vcpu); 531 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); 532 int (*handle_fpe)(struct kvm_vcpu *vcpu); 533 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); 534 int (*vcpu_init)(struct kvm_vcpu *vcpu); 535 void (*vcpu_uninit)(struct kvm_vcpu *vcpu); 536 int (*vcpu_setup)(struct kvm_vcpu *vcpu); 537 void (*flush_shadow_all)(struct kvm *kvm); 538 /* 539 * Must take care of flushing any cached GPA PTEs (e.g. guest entries in 540 * VZ root TLB, or T&E GVA page tables and corresponding root TLB 541 * mappings). 542 */ 543 void (*flush_shadow_memslot)(struct kvm *kvm, 544 const struct kvm_memory_slot *slot); 545 gpa_t (*gva_to_gpa)(gva_t gva); 546 void (*queue_timer_int)(struct kvm_vcpu *vcpu); 547 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); 548 void (*queue_io_int)(struct kvm_vcpu *vcpu, 549 struct kvm_mips_interrupt *irq); 550 void (*dequeue_io_int)(struct kvm_vcpu *vcpu, 551 struct kvm_mips_interrupt *irq); 552 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, 553 u32 cause); 554 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, 555 u32 cause); 556 unsigned long (*num_regs)(struct kvm_vcpu *vcpu); 557 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices); 558 int (*get_one_reg)(struct kvm_vcpu *vcpu, 559 const struct kvm_one_reg *reg, s64 *v); 560 int (*set_one_reg)(struct kvm_vcpu *vcpu, 561 const struct kvm_one_reg *reg, s64 v); 562 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 563 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); 564 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); 565 void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu); 566 }; 567 extern struct kvm_mips_callbacks *kvm_mips_callbacks; 568 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); 569 570 /* Debug: dump vcpu state */ 571 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 572 573 extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); 574 575 /* Building of entry/exception code */ 576 int kvm_mips_entry_setup(void); 577 void *kvm_mips_build_vcpu_run(void *addr); 578 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler); 579 void *kvm_mips_build_exception(void *addr, void *handler); 580 void *kvm_mips_build_exit(void *addr); 581 582 /* FPU/MSA context management */ 583 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); 584 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); 585 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); 586 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); 587 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); 588 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); 589 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); 590 void kvm_own_fpu(struct kvm_vcpu *vcpu); 591 void kvm_own_msa(struct kvm_vcpu *vcpu); 592 void kvm_drop_fpu(struct kvm_vcpu *vcpu); 593 void kvm_lose_fpu(struct kvm_vcpu *vcpu); 594 595 /* TLB handling */ 596 u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu); 597 598 u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); 599 600 u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); 601 602 extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, 603 struct kvm_vcpu *vcpu, 604 bool write_fault); 605 606 extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 607 struct kvm_vcpu *vcpu); 608 609 extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 610 struct kvm_mips_tlb *tlb, 611 unsigned long gva, 612 bool write_fault); 613 614 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 615 u32 *opc, 616 struct kvm_run *run, 617 struct kvm_vcpu *vcpu, 618 bool write_fault); 619 620 extern void kvm_mips_dump_host_tlbs(void); 621 extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); 622 extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, 623 bool user, bool kernel); 624 625 extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, 626 unsigned long entryhi); 627 628 void kvm_mips_suspend_mm(int cpu); 629 void kvm_mips_resume_mm(int cpu); 630 631 /* MMU handling */ 632 633 /** 634 * enum kvm_mips_flush - Types of MMU flushes. 635 * @KMF_USER: Flush guest user virtual memory mappings. 636 * Guest USeg only. 637 * @KMF_KERN: Flush guest kernel virtual memory mappings. 638 * Guest USeg and KSeg2/3. 639 * @KMF_GPA: Flush guest physical memory mappings. 640 * Also includes KSeg0 if KMF_KERN is set. 641 */ 642 enum kvm_mips_flush { 643 KMF_USER = 0x0, 644 KMF_KERN = 0x1, 645 KMF_GPA = 0x2, 646 }; 647 void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); 648 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 649 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 650 pgd_t *kvm_pgd_alloc(void); 651 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 652 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, 653 bool user); 654 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu); 655 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu); 656 657 enum kvm_mips_fault_result { 658 KVM_MIPS_MAPPED = 0, 659 KVM_MIPS_GVA, 660 KVM_MIPS_GPA, 661 KVM_MIPS_TLB, 662 KVM_MIPS_TLBINV, 663 KVM_MIPS_TLBMOD, 664 }; 665 enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, 666 unsigned long gva, 667 bool write); 668 669 #define KVM_ARCH_WANT_MMU_NOTIFIER 670 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 671 int kvm_unmap_hva_range(struct kvm *kvm, 672 unsigned long start, unsigned long end); 673 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 674 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 675 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 676 677 static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, 678 unsigned long address) 679 { 680 } 681 682 /* Emulation */ 683 int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 684 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); 685 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 686 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 687 688 /** 689 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 690 * @vcpu: Virtual CPU. 691 * 692 * Returns: Whether the TLBL exception was likely due to an instruction 693 * fetch fault rather than a data load fault. 694 */ 695 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu) 696 { 697 unsigned long badvaddr = vcpu->host_cp0_badvaddr; 698 unsigned long epc = msk_isa16_mode(vcpu->pc); 699 u32 cause = vcpu->host_cp0_cause; 700 701 if (epc == badvaddr) 702 return true; 703 704 /* 705 * Branches may be 32-bit or 16-bit instructions. 706 * This isn't exact, but we don't really support MIPS16 or microMIPS yet 707 * in KVM anyway. 708 */ 709 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4) 710 return true; 711 712 return false; 713 } 714 715 extern enum emulation_result kvm_mips_emulate_inst(u32 cause, 716 u32 *opc, 717 struct kvm_run *run, 718 struct kvm_vcpu *vcpu); 719 720 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu); 721 722 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, 723 u32 *opc, 724 struct kvm_run *run, 725 struct kvm_vcpu *vcpu); 726 727 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, 728 u32 *opc, 729 struct kvm_run *run, 730 struct kvm_vcpu *vcpu); 731 732 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, 733 u32 *opc, 734 struct kvm_run *run, 735 struct kvm_vcpu *vcpu); 736 737 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, 738 u32 *opc, 739 struct kvm_run *run, 740 struct kvm_vcpu *vcpu); 741 742 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, 743 u32 *opc, 744 struct kvm_run *run, 745 struct kvm_vcpu *vcpu); 746 747 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, 748 u32 *opc, 749 struct kvm_run *run, 750 struct kvm_vcpu *vcpu); 751 752 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, 753 u32 *opc, 754 struct kvm_run *run, 755 struct kvm_vcpu *vcpu); 756 757 extern enum emulation_result kvm_mips_handle_ri(u32 cause, 758 u32 *opc, 759 struct kvm_run *run, 760 struct kvm_vcpu *vcpu); 761 762 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, 763 u32 *opc, 764 struct kvm_run *run, 765 struct kvm_vcpu *vcpu); 766 767 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, 768 u32 *opc, 769 struct kvm_run *run, 770 struct kvm_vcpu *vcpu); 771 772 extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, 773 u32 *opc, 774 struct kvm_run *run, 775 struct kvm_vcpu *vcpu); 776 777 extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, 778 u32 *opc, 779 struct kvm_run *run, 780 struct kvm_vcpu *vcpu); 781 782 extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, 783 u32 *opc, 784 struct kvm_run *run, 785 struct kvm_vcpu *vcpu); 786 787 extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, 788 u32 *opc, 789 struct kvm_run *run, 790 struct kvm_vcpu *vcpu); 791 792 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 793 struct kvm_run *run); 794 795 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); 796 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count); 797 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack); 798 void kvm_mips_init_count(struct kvm_vcpu *vcpu); 799 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); 800 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); 801 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); 802 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); 803 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); 804 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); 805 806 enum emulation_result kvm_mips_check_privilege(u32 cause, 807 u32 *opc, 808 struct kvm_run *run, 809 struct kvm_vcpu *vcpu); 810 811 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 812 u32 *opc, 813 u32 cause, 814 struct kvm_run *run, 815 struct kvm_vcpu *vcpu); 816 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, 817 u32 *opc, 818 u32 cause, 819 struct kvm_run *run, 820 struct kvm_vcpu *vcpu); 821 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, 822 u32 cause, 823 struct kvm_run *run, 824 struct kvm_vcpu *vcpu); 825 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, 826 u32 cause, 827 struct kvm_run *run, 828 struct kvm_vcpu *vcpu); 829 830 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); 831 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); 832 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); 833 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); 834 835 /* Dynamic binary translation */ 836 extern int kvm_mips_trans_cache_index(union mips_instruction inst, 837 u32 *opc, struct kvm_vcpu *vcpu); 838 extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, 839 struct kvm_vcpu *vcpu); 840 extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, 841 struct kvm_vcpu *vcpu); 842 extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, 843 struct kvm_vcpu *vcpu); 844 845 /* Misc */ 846 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); 847 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); 848 849 static inline void kvm_arch_hardware_disable(void) {} 850 static inline void kvm_arch_hardware_unsetup(void) {} 851 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 852 static inline void kvm_arch_free_memslot(struct kvm *kvm, 853 struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} 854 static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} 855 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 856 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 857 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 858 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 859 860 #endif /* __MIPS_KVM_HOST_H__ */ 861