1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright SUSE Linux Products GmbH 2009 5 * 6 * Authors: Alexander Graf <agraf@suse.de> 7 */ 8 9 #ifndef __ASM_KVM_BOOK3S_H__ 10 #define __ASM_KVM_BOOK3S_H__ 11 12 #include <linux/types.h> 13 #include <linux/kvm_host.h> 14 #include <asm/kvm_book3s_asm.h> 15 16 struct kvmppc_bat { 17 u64 raw; 18 u32 bepi; 19 u32 bepi_mask; 20 u32 brpn; 21 u8 wimg; 22 u8 pp; 23 bool vs : 1; 24 bool vp : 1; 25 }; 26 27 struct kvmppc_sid_map { 28 u64 guest_vsid; 29 u64 guest_esid; 30 u64 host_vsid; 31 bool valid : 1; 32 }; 33 34 #define SID_MAP_BITS 9 35 #define SID_MAP_NUM (1 << SID_MAP_BITS) 36 #define SID_MAP_MASK (SID_MAP_NUM - 1) 37 38 #ifdef CONFIG_PPC_BOOK3S_64 39 #define SID_CONTEXTS 1 40 #else 41 #define SID_CONTEXTS 128 42 #define VSID_POOL_SIZE (SID_CONTEXTS * 16) 43 #endif 44 45 struct hpte_cache { 46 struct hlist_node list_pte; 47 struct hlist_node list_pte_long; 48 struct hlist_node list_vpte; 49 struct hlist_node list_vpte_long; 50 #ifdef CONFIG_PPC_BOOK3S_64 51 struct hlist_node list_vpte_64k; 52 #endif 53 struct rcu_head rcu_head; 54 u64 host_vpn; 55 u64 pfn; 56 ulong slot; 57 struct kvmppc_pte pte; 58 int pagesize; 59 }; 60 61 /* 62 * Struct for a virtual core. 63 * Note: entry_exit_map combines a bitmap of threads that have entered 64 * in the bottom 8 bits and a bitmap of threads that have exited in the 65 * next 8 bits. This is so that we can atomically set the entry bit 66 * iff the exit map is 0 without taking a lock. 67 */ 68 struct kvmppc_vcore { 69 int n_runnable; 70 int num_threads; 71 int entry_exit_map; 72 int napping_threads; 73 int first_vcpuid; 74 u16 pcpu; 75 u16 last_cpu; 76 u8 vcore_state; 77 u8 in_guest; 78 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS]; 79 struct list_head preempt_list; 80 spinlock_t lock; 81 struct rcuwait wait; 82 spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ 83 u64 stolen_tb; 84 u64 preempt_tb; 85 struct kvm_vcpu *runner; 86 struct kvm *kvm; 87 u64 tb_offset; /* guest timebase - host timebase */ 88 u64 tb_offset_applied; /* timebase offset currently in force */ 89 ulong lpcr; 90 u32 arch_compat; 91 ulong pcr; 92 ulong dpdes; /* doorbell state (POWER8) */ 93 ulong vtb; /* virtual timebase */ 94 ulong conferring_threads; 95 unsigned int halt_poll_ns; 96 atomic_t online_count; 97 }; 98 99 struct kvmppc_vcpu_book3s { 100 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 101 struct { 102 u64 esid; 103 u64 vsid; 104 } slb_shadow[64]; 105 u8 slb_shadow_max; 106 struct kvmppc_bat ibat[8]; 107 struct kvmppc_bat dbat[8]; 108 u64 hid[6]; 109 u64 gqr[8]; 110 u64 sdr1; 111 u64 hior; 112 u64 msr_mask; 113 u64 vtb; 114 #ifdef CONFIG_PPC_BOOK3S_32 115 u32 vsid_pool[VSID_POOL_SIZE]; 116 u32 vsid_next; 117 #else 118 u64 proto_vsid_first; 119 u64 proto_vsid_max; 120 u64 proto_vsid_next; 121 #endif 122 int context_id[SID_CONTEXTS]; 123 124 bool hior_explicit; /* HIOR is set by ioctl, not PVR */ 125 126 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 127 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 128 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 129 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 130 #ifdef CONFIG_PPC_BOOK3S_64 131 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; 132 #endif 133 int hpte_cache_count; 134 spinlock_t mmu_lock; 135 }; 136 137 #define VSID_REAL 0x07ffffffffc00000ULL 138 #define VSID_BAT 0x07ffffffffb00000ULL 139 #define VSID_64K 0x0800000000000000ULL 140 #define VSID_1T 0x1000000000000000ULL 141 #define VSID_REAL_DR 0x2000000000000000ULL 142 #define VSID_REAL_IR 0x4000000000000000ULL 143 #define VSID_PR 0x8000000000000000ULL 144 145 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); 146 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 147 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 148 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 149 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 150 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 151 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 152 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, 153 bool iswrite); 154 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 157 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 158 extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, 159 unsigned long addr, unsigned long status); 160 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 161 unsigned long slb_v, unsigned long valid); 162 extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, 163 unsigned long gpa, gva_t ea, int is_store); 164 165 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 166 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 167 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte); 168 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); 169 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); 170 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 171 extern int kvmppc_mmu_hpte_sysinit(void); 172 extern void kvmppc_mmu_hpte_sysexit(void); 173 extern int kvmppc_mmu_hv_init(void); 174 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); 175 176 extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, 177 unsigned long ea, unsigned long dsisr); 178 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, 179 gva_t eaddr, void *to, void *from, 180 unsigned long n); 181 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 182 void *to, unsigned long n); 183 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 184 void *from, unsigned long n); 185 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, 186 struct kvmppc_pte *gpte, u64 root, 187 u64 *pte_ret_p); 188 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, 189 struct kvmppc_pte *gpte, u64 table, 190 int table_index, u64 *pte_ret_p); 191 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 192 struct kvmppc_pte *gpte, bool data, bool iswrite); 193 extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, 194 unsigned int pshift, unsigned int lpid); 195 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, 196 unsigned int shift, 197 const struct kvm_memory_slot *memslot, 198 unsigned int lpid); 199 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, 200 bool writing, unsigned long gpa, 201 unsigned int lpid); 202 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, 203 unsigned long gpa, 204 struct kvm_memory_slot *memslot, 205 bool writing, bool kvm_ro, 206 pte_t *inserted_pte, unsigned int *levelp); 207 extern int kvmppc_init_vm_radix(struct kvm *kvm); 208 extern void kvmppc_free_radix(struct kvm *kvm); 209 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, 210 unsigned int lpid); 211 extern int kvmppc_radix_init(void); 212 extern void kvmppc_radix_exit(void); 213 extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 214 unsigned long gfn); 215 extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 216 unsigned long gfn); 217 extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 218 unsigned long gfn); 219 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, 220 struct kvm_memory_slot *memslot, unsigned long *map); 221 extern void kvmppc_radix_flush_memslot(struct kvm *kvm, 222 const struct kvm_memory_slot *memslot); 223 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); 224 225 /* XXX remove this export when load_last_inst() is generic */ 226 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 227 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 228 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 229 unsigned int vec); 230 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); 231 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); 232 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 233 bool upper, u32 val); 234 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 235 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); 236 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, 237 bool writing, bool *writable); 238 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 239 unsigned long *rmap, long pte_index, int realmode); 240 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, 241 unsigned long gfn, unsigned long psize); 242 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, 243 unsigned long pte_index); 244 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, 245 unsigned long pte_index); 246 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 247 unsigned long *nb_ret); 248 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr, 249 unsigned long gpa, bool dirty); 250 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 251 long pte_index, unsigned long pteh, unsigned long ptel, 252 pgd_t *pgdir, bool realmode, unsigned long *idx_ret); 253 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, 254 unsigned long pte_index, unsigned long avpn, 255 unsigned long *hpret); 256 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, 257 struct kvm_memory_slot *memslot, unsigned long *map); 258 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, 259 struct kvm_memory_slot *memslot, 260 unsigned long *map); 261 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, 262 unsigned long mask); 263 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr); 264 265 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu); 266 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu); 267 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu); 268 269 extern void kvmppc_entry_trampoline(void); 270 extern void kvmppc_hv_entry_trampoline(void); 271 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 272 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 273 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 274 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); 275 extern int kvmppc_hcall_impl_pr(unsigned long cmd); 276 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); 277 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); 278 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); 279 280 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 281 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); 282 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); 283 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu); 284 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); 285 #else 286 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} 287 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} 288 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} 289 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} 290 #endif 291 292 long kvmhv_nested_init(void); 293 void kvmhv_nested_exit(void); 294 void kvmhv_vm_nested_init(struct kvm *kvm); 295 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); 296 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu); 297 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); 298 void kvmhv_release_all_nested(struct kvm *kvm); 299 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); 300 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); 301 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, 302 u64 time_limit, unsigned long lpcr); 303 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); 304 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, 305 struct hv_guest_state *hr); 306 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); 307 308 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); 309 310 extern int kvm_irq_bypass; 311 312 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 313 { 314 return vcpu->arch.book3s; 315 } 316 317 /* Also add subarch specific defines */ 318 319 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 320 #include <asm/kvm_book3s_32.h> 321 #endif 322 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 323 #include <asm/kvm_book3s_64.h> 324 #endif 325 326 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 327 { 328 vcpu->arch.regs.gpr[num] = val; 329 } 330 331 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 332 { 333 return vcpu->arch.regs.gpr[num]; 334 } 335 336 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 337 { 338 vcpu->arch.regs.ccr = val; 339 } 340 341 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 342 { 343 return vcpu->arch.regs.ccr; 344 } 345 346 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) 347 { 348 vcpu->arch.regs.xer = val; 349 } 350 351 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) 352 { 353 return vcpu->arch.regs.xer; 354 } 355 356 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 357 { 358 vcpu->arch.regs.ctr = val; 359 } 360 361 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 362 { 363 return vcpu->arch.regs.ctr; 364 } 365 366 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 367 { 368 vcpu->arch.regs.link = val; 369 } 370 371 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 372 { 373 return vcpu->arch.regs.link; 374 } 375 376 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 377 { 378 vcpu->arch.regs.nip = val; 379 } 380 381 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 382 { 383 return vcpu->arch.regs.nip; 384 } 385 386 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); 387 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 388 { 389 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); 390 } 391 392 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 393 { 394 return vcpu->arch.fault_dar; 395 } 396 397 static inline bool is_kvmppc_resume_guest(int r) 398 { 399 return (r == RESUME_GUEST || r == RESUME_GUEST_NV); 400 } 401 402 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm); 403 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu) 404 { 405 /* Only PR KVM supports the magic page */ 406 return !is_kvmppc_hv_enabled(vcpu->kvm); 407 } 408 409 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu); 410 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); 411 412 /* Magic register values loaded into r3 and r4 before the 'sc' assembly 413 * instruction for the OSI hypercalls */ 414 #define OSI_SC_MAGIC_R3 0x113724FA 415 #define OSI_SC_MAGIC_R4 0x77810F9B 416 417 #define INS_DCBZ 0x7c0007ec 418 /* TO = 31 for unconditional trap */ 419 #define INS_TW 0x7fe00008 420 421 #define SPLIT_HACK_MASK 0xff000000 422 #define SPLIT_HACK_OFFS 0xfb000000 423 424 /* 425 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the 426 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride 427 * (but not its actual threading mode, which is not available) to avoid 428 * collisions. 429 * 430 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block 431 * 0) unchanged: if the guest is filling each VCORE completely then it will be 432 * using consecutive IDs and it will fill the space without any packing. 433 * 434 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo 435 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is 436 * added to avoid collisions. 437 * 438 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only 439 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs 440 * can be safely packed into the second half of each VCORE by adding an offset 441 * of (stride / 2). 442 * 443 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4)) 444 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each 445 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4). 446 * 447 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a 448 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7 449 * must be free to use. 450 * 451 * (The offsets for each block are stored in block_offsets[], indexed by the 452 * block number if the stride is 8. For cases where the guest's stride is less 453 * than 8, we can re-use the block_offsets array by multiplying the block 454 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.) 455 */ 456 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id) 457 { 458 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7}; 459 int stride = kvm->arch.emul_smt_mode; 460 int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride); 461 u32 packed_id; 462 463 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack")) 464 return 0; 465 packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block]; 466 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed")) 467 return 0; 468 return packed_id; 469 } 470 471 #endif /* __ASM_KVM_BOOK3S_H__ */ 472