xref: /linux/arch/powerpc/include/asm/kvm_ppc.h (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7  */
8 
9 #ifndef __POWERPC_KVM_PPC_H__
10 #define __POWERPC_KVM_PPC_H__
11 
12 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
13  * dependencies. */
14 
15 #include <linux/mutex.h>
16 #include <linux/timer.h>
17 #include <linux/types.h>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bug.h>
21 #ifdef CONFIG_PPC_BOOK3S
22 #include <asm/kvm_book3s.h>
23 #else
24 #include <asm/kvm_booke.h>
25 #endif
26 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27 #include <asm/paca.h>
28 #include <asm/xive.h>
29 #include <asm/cpu_has_feature.h>
30 #endif
31 
32 /*
33  * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
34  * for supporting software breakpoint.
35  */
36 #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
37 
38 enum emulation_result {
39 	EMULATE_DONE,         /* no further processing */
40 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
41 	EMULATE_FAIL,         /* can't emulate this instruction */
42 	EMULATE_AGAIN,        /* something went wrong. go again */
43 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
44 };
45 
46 enum instruction_fetch_type {
47 	INST_GENERIC,
48 	INST_SC,		/* system call */
49 };
50 
51 enum xlate_instdata {
52 	XLATE_INST,		/* translate instruction address */
53 	XLATE_DATA		/* translate data address */
54 };
55 
56 enum xlate_readwrite {
57 	XLATE_READ,		/* check for read permissions */
58 	XLATE_WRITE		/* check for write permissions */
59 };
60 
61 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
62 extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63 extern void kvmppc_handler_highmem(void);
64 
65 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66 extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
67                               unsigned int rt, unsigned int bytes,
68 			      int is_default_endian);
69 extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
70                                unsigned int rt, unsigned int bytes,
71 			       int is_default_endian);
72 extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
73 				unsigned int rt, unsigned int bytes,
74 			int is_default_endian, int mmio_sign_extend);
75 extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
76 		unsigned int rt, unsigned int bytes, int is_default_endian);
77 extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
78 		unsigned int rs, unsigned int bytes, int is_default_endian);
79 extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
80 			       u64 val, unsigned int bytes,
81 			       int is_default_endian);
82 extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
83 				int rs, unsigned int bytes,
84 				int is_default_endian);
85 
86 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87 				 enum instruction_fetch_type type, u32 *inst);
88 
89 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 		     bool data);
91 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 		     bool data);
93 extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102 
103 /* Core-specific hooks */
104 
105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106                            unsigned int gtlb_idx);
107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
110 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
111 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
112                               gva_t eaddr);
113 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
114 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
115 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
116 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
117 			struct kvmppc_pte *pte);
118 
119 extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
120 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
121 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
122 extern int kvmppc_core_check_processor_compat(void);
123 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124                                       struct kvm_translation *tr);
125 
126 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128 
129 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
130 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
131 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
132 extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
133 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
134 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
135 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
136 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
137 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
138 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
139 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
140                                        struct kvm_interrupt *irq);
141 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
142 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
143 					ulong esr_flags);
144 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
145 					   ulong dear_flags,
146 					   ulong esr_flags);
147 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
148 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
149 					   ulong esr_flags);
150 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
151 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
152 
153 extern int kvmppc_booke_init(void);
154 extern void kvmppc_booke_exit(void);
155 
156 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
157 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
158 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
159 
160 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
161 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
162 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
163 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
164 extern void kvmppc_rmap_reset(struct kvm *kvm);
165 extern long kvmppc_prepare_vrma(struct kvm *kvm,
166 				struct kvm_userspace_memory_region *mem);
167 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
168 			struct kvm_memory_slot *memslot, unsigned long porder);
169 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
170 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
171 		struct iommu_group *grp);
172 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
173 		struct iommu_group *grp);
174 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
175 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
176 extern void kvmppc_setup_partition_table(struct kvm *kvm);
177 
178 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
179 				struct kvm_create_spapr_tce_64 *args);
180 #define kvmppc_ioba_validate(stt, ioba, npages)                         \
181 		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
182 				(stt)->size, (ioba), (npages)) ?        \
183 				H_PARAMETER : H_SUCCESS)
184 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
185 			     unsigned long ioba, unsigned long tce);
186 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
187 		unsigned long liobn, unsigned long ioba,
188 		unsigned long tce_list, unsigned long npages);
189 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
190 		unsigned long liobn, unsigned long ioba,
191 		unsigned long tce_value, unsigned long npages);
192 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
193 			     unsigned long ioba);
194 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
195 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
196 extern int kvmppc_core_init_vm(struct kvm *kvm);
197 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
198 extern void kvmppc_core_free_memslot(struct kvm *kvm,
199 				     struct kvm_memory_slot *slot);
200 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
201 				const struct kvm_memory_slot *old,
202 				struct kvm_memory_slot *new,
203 				enum kvm_mr_change change);
204 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
205 				struct kvm_memory_slot *old,
206 				const struct kvm_memory_slot *new,
207 				enum kvm_mr_change change);
208 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
209 				      struct kvm_ppc_smmu_info *info);
210 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
211 				      struct kvm_memory_slot *memslot);
212 
213 extern int kvmppc_bookehv_init(void);
214 extern void kvmppc_bookehv_exit(void);
215 
216 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
217 
218 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
219 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
220 					    struct kvm_ppc_resize_hpt *rhpt);
221 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
222 					   struct kvm_ppc_resize_hpt *rhpt);
223 
224 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
225 
226 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
227 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
228 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
229 
230 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
231 				u32 priority);
232 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
233 				u32 *priority);
234 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
235 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
236 
237 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
238 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
239 
240 union kvmppc_one_reg {
241 	u32	wval;
242 	u64	dval;
243 	vector128 vval;
244 	u64	vsxval[2];
245 	u32	vsx32val[4];
246 	u16	vsx16val[8];
247 	u8	vsx8val[16];
248 	struct {
249 		u64	addr;
250 		u64	length;
251 	}	vpaval;
252 	u64	xive_timaval[2];
253 };
254 
255 struct kvmppc_ops {
256 	struct module *owner;
257 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
258 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
259 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
260 			   union kvmppc_one_reg *val);
261 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
262 			   union kvmppc_one_reg *val);
263 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
264 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
265 	void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
266 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
267 	int (*vcpu_run)(struct kvm_vcpu *vcpu);
268 	int (*vcpu_create)(struct kvm_vcpu *vcpu);
269 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
270 	int (*check_requests)(struct kvm_vcpu *vcpu);
271 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
272 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
273 	int (*prepare_memory_region)(struct kvm *kvm,
274 				     const struct kvm_memory_slot *old,
275 				     struct kvm_memory_slot *new,
276 				     enum kvm_mr_change change);
277 	void (*commit_memory_region)(struct kvm *kvm,
278 				     struct kvm_memory_slot *old,
279 				     const struct kvm_memory_slot *new,
280 				     enum kvm_mr_change change);
281 	bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
282 	bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
283 	bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
284 	bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
285 	void (*free_memslot)(struct kvm_memory_slot *slot);
286 	int (*init_vm)(struct kvm *kvm);
287 	void (*destroy_vm)(struct kvm *kvm);
288 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
289 	int (*emulate_op)(struct kvm_vcpu *vcpu,
290 			  unsigned int inst, int *advance);
291 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
292 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
293 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
294 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
295 			      unsigned long arg);
296 	int (*hcall_implemented)(unsigned long hcall);
297 	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
298 				       struct irq_bypass_producer *);
299 	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
300 					struct irq_bypass_producer *);
301 	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
302 	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
303 	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
304 			    unsigned long flags);
305 	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
306 	int (*enable_nested)(struct kvm *kvm);
307 	int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
308 			       int size);
309 	int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
310 			      int size);
311 	int (*enable_svm)(struct kvm *kvm);
312 	int (*svm_off)(struct kvm *kvm);
313 	int (*enable_dawr1)(struct kvm *kvm);
314 	bool (*hash_v3_possible)(void);
315 	int (*create_vm_debugfs)(struct kvm *kvm);
316 	int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
317 };
318 
319 extern struct kvmppc_ops *kvmppc_hv_ops;
320 extern struct kvmppc_ops *kvmppc_pr_ops;
321 
322 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
323 				enum instruction_fetch_type type, u32 *inst)
324 {
325 	int ret = EMULATE_DONE;
326 	u32 fetched_inst;
327 
328 	/* Load the instruction manually if it failed to do so in the
329 	 * exit path */
330 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
331 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
332 
333 	/*  Write fetch_failed unswapped if the fetch failed */
334 	if (ret == EMULATE_DONE)
335 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
336 				swab32(vcpu->arch.last_inst) :
337 				vcpu->arch.last_inst;
338 	else
339 		fetched_inst = vcpu->arch.last_inst;
340 
341 	*inst = fetched_inst;
342 	return ret;
343 }
344 
345 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
346 {
347 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
348 }
349 
350 extern int kvmppc_hwrng_present(void);
351 
352 /*
353  * Cuts out inst bits with ordering according to spec.
354  * That means the leftmost bit is zero. All given bits are included.
355  */
356 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
357 {
358 	u32 r;
359 	u32 mask;
360 
361 	BUG_ON(msb > lsb);
362 
363 	mask = (1 << (lsb - msb + 1)) - 1;
364 	r = (inst >> (63 - lsb)) & mask;
365 
366 	return r;
367 }
368 
369 /*
370  * Replaces inst bits with ordering according to spec.
371  */
372 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
373 {
374 	u32 r;
375 	u32 mask;
376 
377 	BUG_ON(msb > lsb);
378 
379 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
380 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
381 
382 	return r;
383 }
384 
385 #define one_reg_size(id)	\
386 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
387 
388 #define get_reg_val(id, reg)	({		\
389 	union kvmppc_one_reg __u;		\
390 	switch (one_reg_size(id)) {		\
391 	case 4: __u.wval = (reg); break;	\
392 	case 8: __u.dval = (reg); break;	\
393 	default: BUG();				\
394 	}					\
395 	__u;					\
396 })
397 
398 
399 #define set_reg_val(id, val)	({		\
400 	u64 __v;				\
401 	switch (one_reg_size(id)) {		\
402 	case 4: __v = (val).wval; break;	\
403 	case 8: __v = (val).dval; break;	\
404 	default: BUG();				\
405 	}					\
406 	__v;					\
407 })
408 
409 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
410 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
411 
412 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
413 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
414 
415 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
416 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
417 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
418 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
419 
420 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
421 
422 struct openpic;
423 
424 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
425 extern void kvm_cma_reserve(void) __init;
426 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
427 {
428 	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
429 }
430 
431 static inline void kvmppc_set_xive_tima(int cpu,
432 					unsigned long phys_addr,
433 					void __iomem *virt_addr)
434 {
435 	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
436 	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
437 }
438 
439 static inline u32 kvmppc_get_xics_latch(void)
440 {
441 	u32 xirr;
442 
443 	xirr = get_paca()->kvm_hstate.saved_xirr;
444 	get_paca()->kvm_hstate.saved_xirr = 0;
445 	return xirr;
446 }
447 
448 /*
449  * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
450  * a CPU thread that's running/napping inside of a guest is by default regarded
451  * as a request to wake the CPU (if needed) and continue execution within the
452  * guest, potentially to process new state like externally-generated
453  * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
454  *
455  * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
456  * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
457  * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
458  * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
459  * the receiving side prior to processing the IPI work.
460  *
461  * NOTE:
462  *
463  * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
464  * This is to guard against sequences such as the following:
465  *
466  *      CPU
467  *        X: smp_muxed_ipi_set_message():
468  *        X:   smp_mb()
469  *        X:   message[RESCHEDULE] = 1
470  *        X: doorbell_global_ipi(42):
471  *        X:   kvmppc_set_host_ipi(42)
472  *        X:   ppc_msgsnd_sync()/smp_mb()
473  *        X:   ppc_msgsnd() -> 42
474  *       42: doorbell_exception(): // from CPU X
475  *       42:   ppc_msgsync()
476  *      105: smp_muxed_ipi_set_message():
477  *      105:   smb_mb()
478  *           // STORE DEFERRED DUE TO RE-ORDERING
479  *    --105:   message[CALL_FUNCTION] = 1
480  *    | 105: doorbell_global_ipi(42):
481  *    | 105:   kvmppc_set_host_ipi(42)
482  *    |  42:   kvmppc_clear_host_ipi(42)
483  *    |  42: smp_ipi_demux_relaxed()
484  *    |  42: // returns to executing guest
485  *    |      // RE-ORDERED STORE COMPLETES
486  *    ->105:   message[CALL_FUNCTION] = 1
487  *      105:   ppc_msgsnd_sync()/smp_mb()
488  *      105:   ppc_msgsnd() -> 42
489  *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
490  *      105: // hangs waiting on 42 to process messages/call_single_queue
491  *
492  * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
493  * to guard against sequences such as the following (as well as to create
494  * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
495  *
496  *      CPU
497  *        X: smp_muxed_ipi_set_message():
498  *        X:   smp_mb()
499  *        X:   message[RESCHEDULE] = 1
500  *        X: doorbell_global_ipi(42):
501  *        X:   kvmppc_set_host_ipi(42)
502  *        X:   ppc_msgsnd_sync()/smp_mb()
503  *        X:   ppc_msgsnd() -> 42
504  *       42: doorbell_exception(): // from CPU X
505  *       42:   ppc_msgsync()
506  *           // STORE DEFERRED DUE TO RE-ORDERING
507  *    -- 42:   kvmppc_clear_host_ipi(42)
508  *    |  42: smp_ipi_demux_relaxed()
509  *    | 105: smp_muxed_ipi_set_message():
510  *    | 105:   smb_mb()
511  *    | 105:   message[CALL_FUNCTION] = 1
512  *    | 105: doorbell_global_ipi(42):
513  *    | 105:   kvmppc_set_host_ipi(42)
514  *    |      // RE-ORDERED STORE COMPLETES
515  *    -> 42:   kvmppc_clear_host_ipi(42)
516  *       42: // returns to executing guest
517  *      105:   ppc_msgsnd_sync()/smp_mb()
518  *      105:   ppc_msgsnd() -> 42
519  *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
520  *      105: // hangs waiting on 42 to process messages/call_single_queue
521  */
522 static inline void kvmppc_set_host_ipi(int cpu)
523 {
524 	/*
525 	 * order stores of IPI messages vs. setting of host_ipi flag
526 	 *
527 	 * pairs with the barrier in kvmppc_clear_host_ipi()
528 	 */
529 	smp_mb();
530 	paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
531 }
532 
533 static inline void kvmppc_clear_host_ipi(int cpu)
534 {
535 	paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
536 	/*
537 	 * order clearing of host_ipi flag vs. processing of IPI messages
538 	 *
539 	 * pairs with the barrier in kvmppc_set_host_ipi()
540 	 */
541 	smp_mb();
542 }
543 
544 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
545 {
546 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
547 }
548 
549 extern void kvm_hv_vm_activated(void);
550 extern void kvm_hv_vm_deactivated(void);
551 extern bool kvm_hv_mode_active(void);
552 
553 extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
554 
555 #else
556 static inline void __init kvm_cma_reserve(void)
557 {}
558 
559 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
560 {}
561 
562 static inline void kvmppc_set_xive_tima(int cpu,
563 					unsigned long phys_addr,
564 					void __iomem *virt_addr)
565 {}
566 
567 static inline u32 kvmppc_get_xics_latch(void)
568 {
569 	return 0;
570 }
571 
572 static inline void kvmppc_set_host_ipi(int cpu)
573 {}
574 
575 static inline void kvmppc_clear_host_ipi(int cpu)
576 {}
577 
578 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
579 {
580 	kvm_vcpu_kick(vcpu);
581 }
582 
583 static inline bool kvm_hv_mode_active(void)		{ return false; }
584 
585 #endif
586 
587 #ifdef CONFIG_PPC_PSERIES
588 static inline bool kvmhv_on_pseries(void)
589 {
590 	return !cpu_has_feature(CPU_FTR_HVMODE);
591 }
592 #else
593 static inline bool kvmhv_on_pseries(void)
594 {
595 	return false;
596 }
597 #endif
598 
599 #ifdef CONFIG_KVM_XICS
600 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
601 {
602 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
603 }
604 
605 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
606 				struct kvm *kvm)
607 {
608 	if (kvm && kvm_irq_bypass)
609 		return kvm->arch.pimap;
610 	return NULL;
611 }
612 
613 extern void kvmppc_alloc_host_rm_ops(void);
614 extern void kvmppc_free_host_rm_ops(void);
615 extern void kvmppc_free_pimap(struct kvm *kvm);
616 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
617 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
618 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
619 extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
620 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
621 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
622 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
623 			struct kvm_vcpu *vcpu, u32 cpu);
624 extern void kvmppc_xics_ipi_action(void);
625 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
626 				   unsigned long host_irq);
627 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
628 				   unsigned long host_irq);
629 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
630 					struct kvmppc_irq_map *irq_map,
631 					struct kvmppc_passthru_irqmap *pimap,
632 					bool *again);
633 
634 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
635 			       int level, bool line_status);
636 
637 extern int h_ipi_redirect;
638 #else
639 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
640 				struct kvm *kvm)
641 	{ return NULL; }
642 static inline void kvmppc_alloc_host_rm_ops(void) {}
643 static inline void kvmppc_free_host_rm_ops(void) {}
644 static inline void kvmppc_free_pimap(struct kvm *kvm) {}
645 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
646 	{ return 0; }
647 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
648 	{ return 0; }
649 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
650 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
651 	{ return 0; }
652 static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
653 	{ return 0; }
654 #endif
655 
656 #ifdef CONFIG_KVM_XIVE
657 /*
658  * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
659  * ie. P9 new interrupt controller, while the second "xive" is the legacy
660  * "eXternal Interrupt Vector Entry" which is the configuration of an
661  * interrupt on the "xics" interrupt controller on P8 and earlier. Those
662  * two function consume or produce a legacy "XIVE" state from the
663  * new "XIVE" interrupt controller.
664  */
665 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
666 				u32 priority);
667 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
668 				u32 *priority);
669 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
670 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
671 
672 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
673 				    struct kvm_vcpu *vcpu, u32 cpu);
674 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
675 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
676 				  unsigned long host_irq);
677 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
678 				  unsigned long host_irq);
679 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
680 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
681 
682 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
683 			       int level, bool line_status);
684 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
685 extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
686 extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
687 
688 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
689 {
690 	return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
691 }
692 
693 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
694 					   struct kvm_vcpu *vcpu, u32 cpu);
695 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
696 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
697 				     union kvmppc_one_reg *val);
698 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
699 				     union kvmppc_one_reg *val);
700 extern bool kvmppc_xive_native_supported(void);
701 
702 #else
703 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
704 				       u32 priority) { return -1; }
705 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
706 				       u32 *priority) { return -1; }
707 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
708 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
709 
710 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
711 					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
712 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
713 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
714 					 struct irq_desc *host_desc) { return -ENODEV; }
715 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
716 					 struct irq_desc *host_desc) { return -ENODEV; }
717 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
718 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
719 
720 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
721 				      int level, bool line_status) { return -ENODEV; }
722 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
723 static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
724 static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
725 
726 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
727 	{ return 0; }
728 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
729 			  struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
730 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
731 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
732 					    union kvmppc_one_reg *val)
733 { return 0; }
734 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
735 					    union kvmppc_one_reg *val)
736 { return -ENOENT; }
737 
738 #endif /* CONFIG_KVM_XIVE */
739 
740 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
741 static inline bool xics_on_xive(void)
742 {
743 	return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
744 }
745 #else
746 static inline bool xics_on_xive(void)
747 {
748 	return false;
749 }
750 #endif
751 
752 /*
753  * Prototypes for functions called only from assembler code.
754  * Having prototypes reduces sparse errors.
755  */
756 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
757 			 unsigned long ioba, unsigned long tce);
758 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
759 				  unsigned long liobn, unsigned long ioba,
760 				  unsigned long tce_list, unsigned long npages);
761 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
762 			   unsigned long liobn, unsigned long ioba,
763 			   unsigned long tce_value, unsigned long npages);
764 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
765                             unsigned int yield_count);
766 long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
767 void kvmhv_commence_exit(int trap);
768 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
769 void kvmppc_subcore_enter_guest(void);
770 void kvmppc_subcore_exit_guest(void);
771 long kvmppc_realmode_hmi_handler(void);
772 long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
773 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
774                     long pte_index, unsigned long pteh, unsigned long ptel);
775 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
776                      unsigned long pte_index, unsigned long avpn);
777 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
778 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
779                       unsigned long pte_index, unsigned long avpn);
780 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
781                    unsigned long pte_index);
782 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
783                         unsigned long pte_index);
784 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
785                         unsigned long pte_index);
786 long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
787 			   unsigned long dest, unsigned long src);
788 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
789                           unsigned long slb_v, unsigned int status, bool data);
790 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
791 
792 /*
793  * Host-side operations we want to set up while running in real
794  * mode in the guest operating on the xics.
795  * Currently only VCPU wakeup is supported.
796  */
797 
798 union kvmppc_rm_state {
799 	unsigned long raw;
800 	struct {
801 		u32 in_host;
802 		u32 rm_action;
803 	};
804 };
805 
806 struct kvmppc_host_rm_core {
807 	union kvmppc_rm_state rm_state;
808 	void *rm_data;
809 	char pad[112];
810 };
811 
812 struct kvmppc_host_rm_ops {
813 	struct kvmppc_host_rm_core	*rm_core;
814 	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
815 };
816 
817 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
818 
819 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
820 {
821 #ifdef CONFIG_KVM_BOOKE_HV
822 	return mfspr(SPRN_GEPR);
823 #elif defined(CONFIG_BOOKE)
824 	return vcpu->arch.epr;
825 #else
826 	return 0;
827 #endif
828 }
829 
830 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
831 {
832 #ifdef CONFIG_KVM_BOOKE_HV
833 	mtspr(SPRN_GEPR, epr);
834 #elif defined(CONFIG_BOOKE)
835 	vcpu->arch.epr = epr;
836 #endif
837 }
838 
839 #ifdef CONFIG_KVM_MPIC
840 
841 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
842 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
843 			     u32 cpu);
844 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
845 
846 #else
847 
848 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
849 {
850 }
851 
852 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
853 		struct kvm_vcpu *vcpu, u32 cpu)
854 {
855 	return -EINVAL;
856 }
857 
858 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
859 		struct kvm_vcpu *vcpu)
860 {
861 }
862 
863 #endif /* CONFIG_KVM_MPIC */
864 
865 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
866 			      struct kvm_config_tlb *cfg);
867 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
868 			     struct kvm_dirty_tlb *cfg);
869 
870 long kvmppc_alloc_lpid(void);
871 void kvmppc_free_lpid(long lpid);
872 void kvmppc_init_lpid(unsigned long nr_lpids);
873 
874 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
875 {
876 	struct page *page;
877 	/*
878 	 * We can only access pages that the kernel maps
879 	 * as memory. Bail out for unmapped ones.
880 	 */
881 	if (!pfn_valid(pfn))
882 		return;
883 
884 	/* Clear i-cache for new pages */
885 	page = pfn_to_page(pfn);
886 	if (!test_bit(PG_dcache_clean, &page->flags)) {
887 		flush_dcache_icache_page(page);
888 		set_bit(PG_dcache_clean, &page->flags);
889 	}
890 }
891 
892 /*
893  * Shared struct helpers. The shared struct can be little or big endian,
894  * depending on the guest endianness. So expose helpers to all of them.
895  */
896 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
897 {
898 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
899 	/* Only Book3S_64 PR supports bi-endian for now */
900 	return vcpu->arch.shared_big_endian;
901 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
902 	/* Book3s_64 HV on little endian is always little endian */
903 	return false;
904 #else
905 	return true;
906 #endif
907 }
908 
909 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
910 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
911 {									\
912 	return mfspr(bookehv_spr);					\
913 }									\
914 
915 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
916 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
917 {									\
918 	mtspr(bookehv_spr, val);						\
919 }									\
920 
921 #define SHARED_WRAPPER_GET(reg, size)					\
922 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
923 {									\
924 	if (kvmppc_shared_big_endian(vcpu))				\
925 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
926 	else								\
927 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
928 }									\
929 
930 #define SHARED_WRAPPER_SET(reg, size)					\
931 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
932 {									\
933 	if (kvmppc_shared_big_endian(vcpu))				\
934 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
935 	else								\
936 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
937 }									\
938 
939 #define SHARED_WRAPPER(reg, size)					\
940 	SHARED_WRAPPER_GET(reg, size)					\
941 	SHARED_WRAPPER_SET(reg, size)					\
942 
943 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
944 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
945 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
946 
947 #ifdef CONFIG_KVM_BOOKE_HV
948 
949 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
950 	SPRNG_WRAPPER(reg, bookehv_spr)					\
951 
952 #else
953 
954 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
955 	SHARED_WRAPPER(reg, size)					\
956 
957 #endif
958 
959 SHARED_WRAPPER(critical, 64)
960 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
961 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
962 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
963 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
964 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
965 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
966 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
967 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
968 SHARED_WRAPPER_GET(msr, 64)
969 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
970 {
971 	if (kvmppc_shared_big_endian(vcpu))
972 	       vcpu->arch.shared->msr = cpu_to_be64(val);
973 	else
974 	       vcpu->arch.shared->msr = cpu_to_le64(val);
975 }
976 SHARED_WRAPPER(dsisr, 32)
977 SHARED_WRAPPER(int_pending, 32)
978 SHARED_WRAPPER(sprg4, 64)
979 SHARED_WRAPPER(sprg5, 64)
980 SHARED_WRAPPER(sprg6, 64)
981 SHARED_WRAPPER(sprg7, 64)
982 
983 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
984 {
985 	if (kvmppc_shared_big_endian(vcpu))
986 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
987 	else
988 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
989 }
990 
991 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
992 {
993 	if (kvmppc_shared_big_endian(vcpu))
994 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
995 	else
996 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
997 }
998 
999 /*
1000  * Please call after prepare_to_enter. This function puts the lazy ee and irq
1001  * disabled tracking state back to normal mode, without actually enabling
1002  * interrupts.
1003  */
1004 static inline void kvmppc_fix_ee_before_entry(void)
1005 {
1006 	trace_hardirqs_on();
1007 
1008 #ifdef CONFIG_PPC64
1009 	/*
1010 	 * To avoid races, the caller must have gone directly from having
1011 	 * interrupts fully-enabled to hard-disabled.
1012 	 */
1013 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1014 
1015 	/* Only need to enable IRQs by hard enabling them after this */
1016 	local_paca->irq_happened = 0;
1017 	irq_soft_mask_set(IRQS_ENABLED);
1018 #endif
1019 }
1020 
1021 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1022 {
1023 	ulong ea;
1024 	ulong msr_64bit = 0;
1025 
1026 	ea = kvmppc_get_gpr(vcpu, rb);
1027 	if (ra)
1028 		ea += kvmppc_get_gpr(vcpu, ra);
1029 
1030 #if defined(CONFIG_PPC_BOOK3E_64)
1031 	msr_64bit = MSR_CM;
1032 #elif defined(CONFIG_PPC_BOOK3S_64)
1033 	msr_64bit = MSR_SF;
1034 #endif
1035 
1036 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1037 		ea = (uint32_t)ea;
1038 
1039 	return ea;
1040 }
1041 
1042 extern void xics_wake_cpu(int cpu);
1043 
1044 #endif /* __POWERPC_KVM_PPC_H__ */
1045