xref: /linux/arch/powerpc/include/asm/kvm_ppc.h (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22 
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24  * dependencies. */
25 
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #endif
40 
41 /*
42  * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43  * for supporting software breakpoint.
44  */
45 #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
46 
47 enum emulation_result {
48 	EMULATE_DONE,         /* no further processing */
49 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
50 	EMULATE_FAIL,         /* can't emulate this instruction */
51 	EMULATE_AGAIN,        /* something went wrong. go again */
52 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
53 };
54 
55 enum instruction_fetch_type {
56 	INST_GENERIC,
57 	INST_SC,		/* system call */
58 };
59 
60 enum xlate_instdata {
61 	XLATE_INST,		/* translate instruction address */
62 	XLATE_DATA		/* translate data address */
63 };
64 
65 enum xlate_readwrite {
66 	XLATE_READ,		/* check for read permissions */
67 	XLATE_WRITE		/* check for write permissions */
68 };
69 
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
73 
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76                               unsigned int rt, unsigned int bytes,
77 			      int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79                                unsigned int rt, unsigned int bytes,
80 			       int is_default_endian);
81 extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 				unsigned int rt, unsigned int bytes,
83 			int is_default_endian, int mmio_sign_extend);
84 extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
85 		unsigned int rt, unsigned int bytes, int is_default_endian);
86 extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 		unsigned int rs, unsigned int bytes, int is_default_endian);
88 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
89 			       u64 val, unsigned int bytes,
90 			       int is_default_endian);
91 extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
92 				int rs, unsigned int bytes,
93 				int is_default_endian);
94 
95 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
96 				 enum instruction_fetch_type type, u32 *inst);
97 
98 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
99 		     bool data);
100 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
101 		     bool data);
102 extern int kvmppc_emulate_instruction(struct kvm_run *run,
103                                       struct kvm_vcpu *vcpu);
104 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
105 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
106 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
107 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
108 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
109 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
110 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
111 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
112 
113 /* Core-specific hooks */
114 
115 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
116                            unsigned int gtlb_idx);
117 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
118 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
119 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
120 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
121 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
122 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
123 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
124                               gva_t eaddr);
125 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
126 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
127 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
128 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
129 			struct kvmppc_pte *pte);
130 
131 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
132                                                 unsigned int id);
133 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
134 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
135 extern int kvmppc_core_check_processor_compat(void);
136 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
137                                       struct kvm_translation *tr);
138 
139 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
140 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
141 
142 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
143 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
144 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
145 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
147 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
148 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
149 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
150 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
151                                        struct kvm_interrupt *irq);
152 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
153 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
154 					ulong esr_flags);
155 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
156 					   ulong dear_flags,
157 					   ulong esr_flags);
158 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
159 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
160 					   ulong esr_flags);
161 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
162 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
163 
164 extern int kvmppc_booke_init(void);
165 extern void kvmppc_booke_exit(void);
166 
167 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
168 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
169 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
170 
171 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
172 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
173 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
174 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
175 extern void kvmppc_rmap_reset(struct kvm *kvm);
176 extern long kvmppc_prepare_vrma(struct kvm *kvm,
177 				struct kvm_userspace_memory_region *mem);
178 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
179 			struct kvm_memory_slot *memslot, unsigned long porder);
180 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
181 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
182 		struct iommu_group *grp);
183 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
184 		struct iommu_group *grp);
185 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
186 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
187 extern void kvmppc_setup_partition_table(struct kvm *kvm);
188 
189 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
190 				struct kvm_create_spapr_tce_64 *args);
191 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
192 		struct kvm *kvm, unsigned long liobn);
193 #define kvmppc_ioba_validate(stt, ioba, npages)                         \
194 		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
195 				(stt)->size, (ioba), (npages)) ?        \
196 				H_PARAMETER : H_SUCCESS)
197 extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
198 		unsigned long *ua, unsigned long **prmap);
199 extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
200 		unsigned long idx, unsigned long tce);
201 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
202 			     unsigned long ioba, unsigned long tce);
203 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
204 		unsigned long liobn, unsigned long ioba,
205 		unsigned long tce_list, unsigned long npages);
206 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
207 		unsigned long liobn, unsigned long ioba,
208 		unsigned long tce_value, unsigned long npages);
209 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
210 			     unsigned long ioba);
211 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
212 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
213 extern int kvmppc_core_init_vm(struct kvm *kvm);
214 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
215 extern void kvmppc_core_free_memslot(struct kvm *kvm,
216 				     struct kvm_memory_slot *free,
217 				     struct kvm_memory_slot *dont);
218 extern int kvmppc_core_create_memslot(struct kvm *kvm,
219 				      struct kvm_memory_slot *slot,
220 				      unsigned long npages);
221 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
222 				struct kvm_memory_slot *memslot,
223 				const struct kvm_userspace_memory_region *mem);
224 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
225 				const struct kvm_userspace_memory_region *mem,
226 				const struct kvm_memory_slot *old,
227 				const struct kvm_memory_slot *new,
228 				enum kvm_mr_change change);
229 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
230 				      struct kvm_ppc_smmu_info *info);
231 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
232 				      struct kvm_memory_slot *memslot);
233 
234 extern int kvmppc_bookehv_init(void);
235 extern void kvmppc_bookehv_exit(void);
236 
237 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
238 
239 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
240 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
241 					    struct kvm_ppc_resize_hpt *rhpt);
242 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
243 					   struct kvm_ppc_resize_hpt *rhpt);
244 
245 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
246 
247 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
248 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
249 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
250 
251 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
252 				u32 priority);
253 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
254 				u32 *priority);
255 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
256 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
257 
258 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
259 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
260 
261 union kvmppc_one_reg {
262 	u32	wval;
263 	u64	dval;
264 	vector128 vval;
265 	u64	vsxval[2];
266 	u32	vsx32val[4];
267 	u16	vsx16val[8];
268 	u8	vsx8val[16];
269 	struct {
270 		u64	addr;
271 		u64	length;
272 	}	vpaval;
273 };
274 
275 struct kvmppc_ops {
276 	struct module *owner;
277 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
278 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
279 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
280 			   union kvmppc_one_reg *val);
281 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
282 			   union kvmppc_one_reg *val);
283 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
284 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
285 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
286 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
287 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
288 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
289 	int (*check_requests)(struct kvm_vcpu *vcpu);
290 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
291 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
292 	int (*prepare_memory_region)(struct kvm *kvm,
293 				     struct kvm_memory_slot *memslot,
294 				     const struct kvm_userspace_memory_region *mem);
295 	void (*commit_memory_region)(struct kvm *kvm,
296 				     const struct kvm_userspace_memory_region *mem,
297 				     const struct kvm_memory_slot *old,
298 				     const struct kvm_memory_slot *new,
299 				     enum kvm_mr_change change);
300 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
301 			   unsigned long end);
302 	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
303 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
304 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
305 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
306 	void (*free_memslot)(struct kvm_memory_slot *free,
307 			     struct kvm_memory_slot *dont);
308 	int (*create_memslot)(struct kvm_memory_slot *slot,
309 			      unsigned long npages);
310 	int (*init_vm)(struct kvm *kvm);
311 	void (*destroy_vm)(struct kvm *kvm);
312 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
313 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
314 			  unsigned int inst, int *advance);
315 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
316 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
317 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
318 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
319 			      unsigned long arg);
320 	int (*hcall_implemented)(unsigned long hcall);
321 	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
322 				       struct irq_bypass_producer *);
323 	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
324 					struct irq_bypass_producer *);
325 	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
326 	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
327 	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
328 			    unsigned long flags);
329 	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
330 	int (*enable_nested)(struct kvm *kvm);
331 	int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
332 			       int size);
333 	int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
334 			      int size);
335 };
336 
337 extern struct kvmppc_ops *kvmppc_hv_ops;
338 extern struct kvmppc_ops *kvmppc_pr_ops;
339 
340 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
341 				enum instruction_fetch_type type, u32 *inst)
342 {
343 	int ret = EMULATE_DONE;
344 	u32 fetched_inst;
345 
346 	/* Load the instruction manually if it failed to do so in the
347 	 * exit path */
348 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
349 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
350 
351 	/*  Write fetch_failed unswapped if the fetch failed */
352 	if (ret == EMULATE_DONE)
353 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
354 				swab32(vcpu->arch.last_inst) :
355 				vcpu->arch.last_inst;
356 	else
357 		fetched_inst = vcpu->arch.last_inst;
358 
359 	*inst = fetched_inst;
360 	return ret;
361 }
362 
363 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
364 {
365 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
366 }
367 
368 extern int kvmppc_hwrng_present(void);
369 
370 /*
371  * Cuts out inst bits with ordering according to spec.
372  * That means the leftmost bit is zero. All given bits are included.
373  */
374 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
375 {
376 	u32 r;
377 	u32 mask;
378 
379 	BUG_ON(msb > lsb);
380 
381 	mask = (1 << (lsb - msb + 1)) - 1;
382 	r = (inst >> (63 - lsb)) & mask;
383 
384 	return r;
385 }
386 
387 /*
388  * Replaces inst bits with ordering according to spec.
389  */
390 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
391 {
392 	u32 r;
393 	u32 mask;
394 
395 	BUG_ON(msb > lsb);
396 
397 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
398 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
399 
400 	return r;
401 }
402 
403 #define one_reg_size(id)	\
404 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
405 
406 #define get_reg_val(id, reg)	({		\
407 	union kvmppc_one_reg __u;		\
408 	switch (one_reg_size(id)) {		\
409 	case 4: __u.wval = (reg); break;	\
410 	case 8: __u.dval = (reg); break;	\
411 	default: BUG();				\
412 	}					\
413 	__u;					\
414 })
415 
416 
417 #define set_reg_val(id, val)	({		\
418 	u64 __v;				\
419 	switch (one_reg_size(id)) {		\
420 	case 4: __v = (val).wval; break;	\
421 	case 8: __v = (val).dval; break;	\
422 	default: BUG();				\
423 	}					\
424 	__v;					\
425 })
426 
427 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
428 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
429 
430 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
431 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
432 
433 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
434 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
435 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
436 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
437 
438 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
439 
440 struct openpic;
441 
442 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
443 extern void kvm_cma_reserve(void) __init;
444 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
445 {
446 	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
447 }
448 
449 static inline void kvmppc_set_xive_tima(int cpu,
450 					unsigned long phys_addr,
451 					void __iomem *virt_addr)
452 {
453 	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
454 	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
455 }
456 
457 static inline u32 kvmppc_get_xics_latch(void)
458 {
459 	u32 xirr;
460 
461 	xirr = get_paca()->kvm_hstate.saved_xirr;
462 	get_paca()->kvm_hstate.saved_xirr = 0;
463 	return xirr;
464 }
465 
466 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
467 {
468 	paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
469 }
470 
471 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
472 {
473 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
474 }
475 
476 extern void kvm_hv_vm_activated(void);
477 extern void kvm_hv_vm_deactivated(void);
478 extern bool kvm_hv_mode_active(void);
479 
480 #else
481 static inline void __init kvm_cma_reserve(void)
482 {}
483 
484 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
485 {}
486 
487 static inline void kvmppc_set_xive_tima(int cpu,
488 					unsigned long phys_addr,
489 					void __iomem *virt_addr)
490 {}
491 
492 static inline u32 kvmppc_get_xics_latch(void)
493 {
494 	return 0;
495 }
496 
497 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
498 {}
499 
500 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
501 {
502 	kvm_vcpu_kick(vcpu);
503 }
504 
505 static inline bool kvm_hv_mode_active(void)		{ return false; }
506 
507 #endif
508 
509 #ifdef CONFIG_KVM_XICS
510 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
511 {
512 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
513 }
514 
515 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
516 				struct kvm *kvm)
517 {
518 	if (kvm && kvm_irq_bypass)
519 		return kvm->arch.pimap;
520 	return NULL;
521 }
522 
523 extern void kvmppc_alloc_host_rm_ops(void);
524 extern void kvmppc_free_host_rm_ops(void);
525 extern void kvmppc_free_pimap(struct kvm *kvm);
526 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
527 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
528 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
529 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
530 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
531 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
532 			struct kvm_vcpu *vcpu, u32 cpu);
533 extern void kvmppc_xics_ipi_action(void);
534 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
535 				   unsigned long host_irq);
536 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
537 				   unsigned long host_irq);
538 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
539 					struct kvmppc_irq_map *irq_map,
540 					struct kvmppc_passthru_irqmap *pimap,
541 					bool *again);
542 
543 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
544 			       int level, bool line_status);
545 
546 extern int h_ipi_redirect;
547 #else
548 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
549 				struct kvm *kvm)
550 	{ return NULL; }
551 static inline void kvmppc_alloc_host_rm_ops(void) {};
552 static inline void kvmppc_free_host_rm_ops(void) {};
553 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
554 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
555 	{ return 0; }
556 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
557 	{ return 0; }
558 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
559 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
560 	{ return 0; }
561 #endif
562 
563 #ifdef CONFIG_KVM_XIVE
564 /*
565  * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
566  * ie. P9 new interrupt controller, while the second "xive" is the legacy
567  * "eXternal Interrupt Vector Entry" which is the configuration of an
568  * interrupt on the "xics" interrupt controller on P8 and earlier. Those
569  * two function consume or produce a legacy "XIVE" state from the
570  * new "XIVE" interrupt controller.
571  */
572 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
573 				u32 priority);
574 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
575 				u32 *priority);
576 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
577 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
578 extern void kvmppc_xive_init_module(void);
579 extern void kvmppc_xive_exit_module(void);
580 
581 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
582 				    struct kvm_vcpu *vcpu, u32 cpu);
583 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
584 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
585 				  struct irq_desc *host_desc);
586 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
587 				  struct irq_desc *host_desc);
588 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
589 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
590 
591 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
592 			       int level, bool line_status);
593 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
594 #else
595 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
596 				       u32 priority) { return -1; }
597 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
598 				       u32 *priority) { return -1; }
599 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
600 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
601 static inline void kvmppc_xive_init_module(void) { }
602 static inline void kvmppc_xive_exit_module(void) { }
603 
604 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
605 					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
606 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
607 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
608 					 struct irq_desc *host_desc) { return -ENODEV; }
609 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
610 					 struct irq_desc *host_desc) { return -ENODEV; }
611 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
612 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
613 
614 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
615 				      int level, bool line_status) { return -ENODEV; }
616 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
617 #endif /* CONFIG_KVM_XIVE */
618 
619 /*
620  * Prototypes for functions called only from assembler code.
621  * Having prototypes reduces sparse errors.
622  */
623 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
624 			 unsigned long ioba, unsigned long tce);
625 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
626 				  unsigned long liobn, unsigned long ioba,
627 				  unsigned long tce_list, unsigned long npages);
628 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
629 			   unsigned long liobn, unsigned long ioba,
630 			   unsigned long tce_value, unsigned long npages);
631 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
632                             unsigned int yield_count);
633 long kvmppc_h_random(struct kvm_vcpu *vcpu);
634 void kvmhv_commence_exit(int trap);
635 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
636 void kvmppc_subcore_enter_guest(void);
637 void kvmppc_subcore_exit_guest(void);
638 long kvmppc_realmode_hmi_handler(void);
639 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
640                     long pte_index, unsigned long pteh, unsigned long ptel);
641 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
642                      unsigned long pte_index, unsigned long avpn);
643 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
644 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
645                       unsigned long pte_index, unsigned long avpn,
646                       unsigned long va);
647 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
648                    unsigned long pte_index);
649 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
650                         unsigned long pte_index);
651 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
652                         unsigned long pte_index);
653 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
654                           unsigned long slb_v, unsigned int status, bool data);
655 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
656 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
657 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
658 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
659                     unsigned long mfrr);
660 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
661 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
662 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
663 
664 /*
665  * Host-side operations we want to set up while running in real
666  * mode in the guest operating on the xics.
667  * Currently only VCPU wakeup is supported.
668  */
669 
670 union kvmppc_rm_state {
671 	unsigned long raw;
672 	struct {
673 		u32 in_host;
674 		u32 rm_action;
675 	};
676 };
677 
678 struct kvmppc_host_rm_core {
679 	union kvmppc_rm_state rm_state;
680 	void *rm_data;
681 	char pad[112];
682 };
683 
684 struct kvmppc_host_rm_ops {
685 	struct kvmppc_host_rm_core	*rm_core;
686 	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
687 };
688 
689 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
690 
691 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
692 {
693 #ifdef CONFIG_KVM_BOOKE_HV
694 	return mfspr(SPRN_GEPR);
695 #elif defined(CONFIG_BOOKE)
696 	return vcpu->arch.epr;
697 #else
698 	return 0;
699 #endif
700 }
701 
702 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
703 {
704 #ifdef CONFIG_KVM_BOOKE_HV
705 	mtspr(SPRN_GEPR, epr);
706 #elif defined(CONFIG_BOOKE)
707 	vcpu->arch.epr = epr;
708 #endif
709 }
710 
711 #ifdef CONFIG_KVM_MPIC
712 
713 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
714 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
715 			     u32 cpu);
716 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
717 
718 #else
719 
720 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
721 {
722 }
723 
724 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
725 		struct kvm_vcpu *vcpu, u32 cpu)
726 {
727 	return -EINVAL;
728 }
729 
730 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
731 		struct kvm_vcpu *vcpu)
732 {
733 }
734 
735 #endif /* CONFIG_KVM_MPIC */
736 
737 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
738 			      struct kvm_config_tlb *cfg);
739 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
740 			     struct kvm_dirty_tlb *cfg);
741 
742 long kvmppc_alloc_lpid(void);
743 void kvmppc_claim_lpid(long lpid);
744 void kvmppc_free_lpid(long lpid);
745 void kvmppc_init_lpid(unsigned long nr_lpids);
746 
747 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
748 {
749 	struct page *page;
750 	/*
751 	 * We can only access pages that the kernel maps
752 	 * as memory. Bail out for unmapped ones.
753 	 */
754 	if (!pfn_valid(pfn))
755 		return;
756 
757 	/* Clear i-cache for new pages */
758 	page = pfn_to_page(pfn);
759 	if (!test_bit(PG_arch_1, &page->flags)) {
760 		flush_dcache_icache_page(page);
761 		set_bit(PG_arch_1, &page->flags);
762 	}
763 }
764 
765 /*
766  * Shared struct helpers. The shared struct can be little or big endian,
767  * depending on the guest endianness. So expose helpers to all of them.
768  */
769 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
770 {
771 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
772 	/* Only Book3S_64 PR supports bi-endian for now */
773 	return vcpu->arch.shared_big_endian;
774 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
775 	/* Book3s_64 HV on little endian is always little endian */
776 	return false;
777 #else
778 	return true;
779 #endif
780 }
781 
782 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
783 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
784 {									\
785 	return mfspr(bookehv_spr);					\
786 }									\
787 
788 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
789 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
790 {									\
791 	mtspr(bookehv_spr, val);						\
792 }									\
793 
794 #define SHARED_WRAPPER_GET(reg, size)					\
795 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
796 {									\
797 	if (kvmppc_shared_big_endian(vcpu))				\
798 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
799 	else								\
800 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
801 }									\
802 
803 #define SHARED_WRAPPER_SET(reg, size)					\
804 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
805 {									\
806 	if (kvmppc_shared_big_endian(vcpu))				\
807 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
808 	else								\
809 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
810 }									\
811 
812 #define SHARED_WRAPPER(reg, size)					\
813 	SHARED_WRAPPER_GET(reg, size)					\
814 	SHARED_WRAPPER_SET(reg, size)					\
815 
816 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
817 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
818 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
819 
820 #ifdef CONFIG_KVM_BOOKE_HV
821 
822 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
823 	SPRNG_WRAPPER(reg, bookehv_spr)					\
824 
825 #else
826 
827 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
828 	SHARED_WRAPPER(reg, size)					\
829 
830 #endif
831 
832 SHARED_WRAPPER(critical, 64)
833 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
834 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
835 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
836 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
837 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
838 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
839 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
840 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
841 SHARED_WRAPPER_GET(msr, 64)
842 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
843 {
844 	if (kvmppc_shared_big_endian(vcpu))
845 	       vcpu->arch.shared->msr = cpu_to_be64(val);
846 	else
847 	       vcpu->arch.shared->msr = cpu_to_le64(val);
848 }
849 SHARED_WRAPPER(dsisr, 32)
850 SHARED_WRAPPER(int_pending, 32)
851 SHARED_WRAPPER(sprg4, 64)
852 SHARED_WRAPPER(sprg5, 64)
853 SHARED_WRAPPER(sprg6, 64)
854 SHARED_WRAPPER(sprg7, 64)
855 
856 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
857 {
858 	if (kvmppc_shared_big_endian(vcpu))
859 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
860 	else
861 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
862 }
863 
864 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
865 {
866 	if (kvmppc_shared_big_endian(vcpu))
867 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
868 	else
869 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
870 }
871 
872 /*
873  * Please call after prepare_to_enter. This function puts the lazy ee and irq
874  * disabled tracking state back to normal mode, without actually enabling
875  * interrupts.
876  */
877 static inline void kvmppc_fix_ee_before_entry(void)
878 {
879 	trace_hardirqs_on();
880 
881 #ifdef CONFIG_PPC64
882 	/*
883 	 * To avoid races, the caller must have gone directly from having
884 	 * interrupts fully-enabled to hard-disabled.
885 	 */
886 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
887 
888 	/* Only need to enable IRQs by hard enabling them after this */
889 	local_paca->irq_happened = 0;
890 	irq_soft_mask_set(IRQS_ENABLED);
891 #endif
892 }
893 
894 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
895 {
896 	ulong ea;
897 	ulong msr_64bit = 0;
898 
899 	ea = kvmppc_get_gpr(vcpu, rb);
900 	if (ra)
901 		ea += kvmppc_get_gpr(vcpu, ra);
902 
903 #if defined(CONFIG_PPC_BOOK3E_64)
904 	msr_64bit = MSR_CM;
905 #elif defined(CONFIG_PPC_BOOK3S_64)
906 	msr_64bit = MSR_SF;
907 #endif
908 
909 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
910 		ea = (uint32_t)ea;
911 
912 	return ea;
913 }
914 
915 extern void xics_wake_cpu(int cpu);
916 
917 #endif /* __POWERPC_KVM_PPC_H__ */
918