xref: /linux/arch/powerpc/include/asm/kvm_ppc.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22 
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24  * dependencies. */
25 
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #endif
40 
41 /*
42  * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43  * for supporting software breakpoint.
44  */
45 #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
46 
47 enum emulation_result {
48 	EMULATE_DONE,         /* no further processing */
49 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
50 	EMULATE_FAIL,         /* can't emulate this instruction */
51 	EMULATE_AGAIN,        /* something went wrong. go again */
52 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
53 };
54 
55 enum instruction_type {
56 	INST_GENERIC,
57 	INST_SC,		/* system call */
58 };
59 
60 enum xlate_instdata {
61 	XLATE_INST,		/* translate instruction address */
62 	XLATE_DATA		/* translate data address */
63 };
64 
65 enum xlate_readwrite {
66 	XLATE_READ,		/* check for read permissions */
67 	XLATE_WRITE		/* check for write permissions */
68 };
69 
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
73 
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76                               unsigned int rt, unsigned int bytes,
77 			      int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79                                unsigned int rt, unsigned int bytes,
80 			       int is_default_endian);
81 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 			       u64 val, unsigned int bytes,
83 			       int is_default_endian);
84 
85 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
86 				 enum instruction_type type, u32 *inst);
87 
88 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
89 		     bool data);
90 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
91 		     bool data);
92 extern int kvmppc_emulate_instruction(struct kvm_run *run,
93                                       struct kvm_vcpu *vcpu);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102 
103 /* Core-specific hooks */
104 
105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106                            unsigned int gtlb_idx);
107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
110 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
111 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
114                               gva_t eaddr);
115 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
116 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
117 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
118 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
119 			struct kvmppc_pte *pte);
120 
121 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
122                                                 unsigned int id);
123 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
124 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
125 extern int kvmppc_core_check_processor_compat(void);
126 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
127                                       struct kvm_translation *tr);
128 
129 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
130 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
131 
132 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
133 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
134 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
135 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
136 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
137 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
138                                        struct kvm_interrupt *irq);
139 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
141 					ulong esr_flags);
142 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
143 					   ulong dear_flags,
144 					   ulong esr_flags);
145 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
147 					   ulong esr_flags);
148 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
149 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
150 
151 extern int kvmppc_booke_init(void);
152 extern void kvmppc_booke_exit(void);
153 
154 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
155 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
156 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
157 
158 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
159 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
160 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
161 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
162 extern long kvmppc_prepare_vrma(struct kvm *kvm,
163 				struct kvm_userspace_memory_region *mem);
164 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
165 			struct kvm_memory_slot *memslot, unsigned long porder);
166 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
167 
168 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
169 				struct kvm_create_spapr_tce_64 *args);
170 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
171 		struct kvm_vcpu *vcpu, unsigned long liobn);
172 extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
173 		unsigned long ioba, unsigned long npages);
174 extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
175 		unsigned long tce);
176 extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
177 		unsigned long *ua, unsigned long **prmap);
178 extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
179 		unsigned long idx, unsigned long tce);
180 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
181 			     unsigned long ioba, unsigned long tce);
182 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
183 		unsigned long liobn, unsigned long ioba,
184 		unsigned long tce_list, unsigned long npages);
185 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
186 		unsigned long liobn, unsigned long ioba,
187 		unsigned long tce_value, unsigned long npages);
188 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
189 			     unsigned long ioba);
190 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
191 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
192 extern int kvmppc_core_init_vm(struct kvm *kvm);
193 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
194 extern void kvmppc_core_free_memslot(struct kvm *kvm,
195 				     struct kvm_memory_slot *free,
196 				     struct kvm_memory_slot *dont);
197 extern int kvmppc_core_create_memslot(struct kvm *kvm,
198 				      struct kvm_memory_slot *slot,
199 				      unsigned long npages);
200 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
201 				struct kvm_memory_slot *memslot,
202 				const struct kvm_userspace_memory_region *mem);
203 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
204 				const struct kvm_userspace_memory_region *mem,
205 				const struct kvm_memory_slot *old,
206 				const struct kvm_memory_slot *new);
207 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
208 				      struct kvm_ppc_smmu_info *info);
209 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
210 				      struct kvm_memory_slot *memslot);
211 
212 extern int kvmppc_bookehv_init(void);
213 extern void kvmppc_bookehv_exit(void);
214 
215 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
216 
217 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
218 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
219 					    struct kvm_ppc_resize_hpt *rhpt);
220 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
221 					   struct kvm_ppc_resize_hpt *rhpt);
222 
223 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
224 
225 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
226 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
227 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
228 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
229 				u32 priority);
230 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
231 				u32 *priority);
232 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
233 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
234 
235 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
236 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
237 
238 union kvmppc_one_reg {
239 	u32	wval;
240 	u64	dval;
241 	vector128 vval;
242 	u64	vsxval[2];
243 	struct {
244 		u64	addr;
245 		u64	length;
246 	}	vpaval;
247 };
248 
249 struct kvmppc_ops {
250 	struct module *owner;
251 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
252 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
253 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
254 			   union kvmppc_one_reg *val);
255 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
256 			   union kvmppc_one_reg *val);
257 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
258 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
259 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
260 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
261 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
262 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
263 	int (*check_requests)(struct kvm_vcpu *vcpu);
264 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
265 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
266 	int (*prepare_memory_region)(struct kvm *kvm,
267 				     struct kvm_memory_slot *memslot,
268 				     const struct kvm_userspace_memory_region *mem);
269 	void (*commit_memory_region)(struct kvm *kvm,
270 				     const struct kvm_userspace_memory_region *mem,
271 				     const struct kvm_memory_slot *old,
272 				     const struct kvm_memory_slot *new);
273 	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
274 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
275 			   unsigned long end);
276 	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
277 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
278 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
279 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
280 	void (*free_memslot)(struct kvm_memory_slot *free,
281 			     struct kvm_memory_slot *dont);
282 	int (*create_memslot)(struct kvm_memory_slot *slot,
283 			      unsigned long npages);
284 	int (*init_vm)(struct kvm *kvm);
285 	void (*destroy_vm)(struct kvm *kvm);
286 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
287 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
288 			  unsigned int inst, int *advance);
289 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
290 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
291 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
292 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
293 			      unsigned long arg);
294 	int (*hcall_implemented)(unsigned long hcall);
295 	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
296 				       struct irq_bypass_producer *);
297 	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
298 					struct irq_bypass_producer *);
299 	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
300 	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
301 };
302 
303 extern struct kvmppc_ops *kvmppc_hv_ops;
304 extern struct kvmppc_ops *kvmppc_pr_ops;
305 
306 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
307 					enum instruction_type type, u32 *inst)
308 {
309 	int ret = EMULATE_DONE;
310 	u32 fetched_inst;
311 
312 	/* Load the instruction manually if it failed to do so in the
313 	 * exit path */
314 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
315 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
316 
317 	/*  Write fetch_failed unswapped if the fetch failed */
318 	if (ret == EMULATE_DONE)
319 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
320 				swab32(vcpu->arch.last_inst) :
321 				vcpu->arch.last_inst;
322 	else
323 		fetched_inst = vcpu->arch.last_inst;
324 
325 	*inst = fetched_inst;
326 	return ret;
327 }
328 
329 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
330 {
331 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
332 }
333 
334 extern int kvmppc_hwrng_present(void);
335 
336 /*
337  * Cuts out inst bits with ordering according to spec.
338  * That means the leftmost bit is zero. All given bits are included.
339  */
340 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
341 {
342 	u32 r;
343 	u32 mask;
344 
345 	BUG_ON(msb > lsb);
346 
347 	mask = (1 << (lsb - msb + 1)) - 1;
348 	r = (inst >> (63 - lsb)) & mask;
349 
350 	return r;
351 }
352 
353 /*
354  * Replaces inst bits with ordering according to spec.
355  */
356 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
357 {
358 	u32 r;
359 	u32 mask;
360 
361 	BUG_ON(msb > lsb);
362 
363 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
364 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
365 
366 	return r;
367 }
368 
369 #define one_reg_size(id)	\
370 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
371 
372 #define get_reg_val(id, reg)	({		\
373 	union kvmppc_one_reg __u;		\
374 	switch (one_reg_size(id)) {		\
375 	case 4: __u.wval = (reg); break;	\
376 	case 8: __u.dval = (reg); break;	\
377 	default: BUG();				\
378 	}					\
379 	__u;					\
380 })
381 
382 
383 #define set_reg_val(id, val)	({		\
384 	u64 __v;				\
385 	switch (one_reg_size(id)) {		\
386 	case 4: __v = (val).wval; break;	\
387 	case 8: __v = (val).dval; break;	\
388 	default: BUG();				\
389 	}					\
390 	__v;					\
391 })
392 
393 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
394 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
395 
396 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
397 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
398 
399 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
400 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
401 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
402 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
403 
404 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
405 
406 struct openpic;
407 
408 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
409 extern void kvm_cma_reserve(void) __init;
410 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
411 {
412 	paca[cpu].kvm_hstate.xics_phys = addr;
413 }
414 
415 static inline u32 kvmppc_get_xics_latch(void)
416 {
417 	u32 xirr;
418 
419 	xirr = get_paca()->kvm_hstate.saved_xirr;
420 	get_paca()->kvm_hstate.saved_xirr = 0;
421 	return xirr;
422 }
423 
424 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
425 {
426 	paca[cpu].kvm_hstate.host_ipi = host_ipi;
427 }
428 
429 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
430 {
431 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
432 }
433 
434 extern void kvm_hv_vm_activated(void);
435 extern void kvm_hv_vm_deactivated(void);
436 extern bool kvm_hv_mode_active(void);
437 
438 #else
439 static inline void __init kvm_cma_reserve(void)
440 {}
441 
442 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
443 {}
444 
445 static inline u32 kvmppc_get_xics_latch(void)
446 {
447 	return 0;
448 }
449 
450 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
451 {}
452 
453 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
454 {
455 	kvm_vcpu_kick(vcpu);
456 }
457 
458 static inline bool kvm_hv_mode_active(void)		{ return false; }
459 
460 #endif
461 
462 #ifdef CONFIG_KVM_XICS
463 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
464 {
465 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
466 }
467 
468 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
469 				struct kvm *kvm)
470 {
471 	if (kvm && kvm_irq_bypass)
472 		return kvm->arch.pimap;
473 	return NULL;
474 }
475 
476 extern void kvmppc_alloc_host_rm_ops(void);
477 extern void kvmppc_free_host_rm_ops(void);
478 extern void kvmppc_free_pimap(struct kvm *kvm);
479 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
480 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
481 extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
482 extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
483 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
484 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
485 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
486 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
487 			struct kvm_vcpu *vcpu, u32 cpu);
488 extern void kvmppc_xics_ipi_action(void);
489 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
490 				   unsigned long host_irq);
491 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
492 				   unsigned long host_irq);
493 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
494 					struct kvmppc_irq_map *irq_map,
495 					struct kvmppc_passthru_irqmap *pimap,
496 					bool *again);
497 extern int h_ipi_redirect;
498 #else
499 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
500 				struct kvm *kvm)
501 	{ return NULL; }
502 static inline void kvmppc_alloc_host_rm_ops(void) {};
503 static inline void kvmppc_free_host_rm_ops(void) {};
504 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
505 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
506 	{ return 0; }
507 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
508 	{ return 0; }
509 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
510 static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
511 					 unsigned long server)
512 	{ return -EINVAL; }
513 static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
514 					struct kvm_irq_level *args)
515 	{ return -ENOTTY; }
516 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
517 	{ return 0; }
518 #endif
519 
520 /*
521  * Prototypes for functions called only from assembler code.
522  * Having prototypes reduces sparse errors.
523  */
524 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
525 			 unsigned long ioba, unsigned long tce);
526 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
527 				  unsigned long liobn, unsigned long ioba,
528 				  unsigned long tce_list, unsigned long npages);
529 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
530 			   unsigned long liobn, unsigned long ioba,
531 			   unsigned long tce_value, unsigned long npages);
532 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
533                             unsigned int yield_count);
534 long kvmppc_h_random(struct kvm_vcpu *vcpu);
535 void kvmhv_commence_exit(int trap);
536 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
537 void kvmppc_subcore_enter_guest(void);
538 void kvmppc_subcore_exit_guest(void);
539 long kvmppc_realmode_hmi_handler(void);
540 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
541                     long pte_index, unsigned long pteh, unsigned long ptel);
542 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
543                      unsigned long pte_index, unsigned long avpn);
544 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
545 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
546                       unsigned long pte_index, unsigned long avpn,
547                       unsigned long va);
548 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
549                    unsigned long pte_index);
550 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
551                         unsigned long pte_index);
552 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
553                         unsigned long pte_index);
554 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
555                           unsigned long slb_v, unsigned int status, bool data);
556 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
557 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
558                     unsigned long mfrr);
559 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
560 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
561 
562 /*
563  * Host-side operations we want to set up while running in real
564  * mode in the guest operating on the xics.
565  * Currently only VCPU wakeup is supported.
566  */
567 
568 union kvmppc_rm_state {
569 	unsigned long raw;
570 	struct {
571 		u32 in_host;
572 		u32 rm_action;
573 	};
574 };
575 
576 struct kvmppc_host_rm_core {
577 	union kvmppc_rm_state rm_state;
578 	void *rm_data;
579 	char pad[112];
580 };
581 
582 struct kvmppc_host_rm_ops {
583 	struct kvmppc_host_rm_core	*rm_core;
584 	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
585 };
586 
587 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
588 
589 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
590 {
591 #ifdef CONFIG_KVM_BOOKE_HV
592 	return mfspr(SPRN_GEPR);
593 #elif defined(CONFIG_BOOKE)
594 	return vcpu->arch.epr;
595 #else
596 	return 0;
597 #endif
598 }
599 
600 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
601 {
602 #ifdef CONFIG_KVM_BOOKE_HV
603 	mtspr(SPRN_GEPR, epr);
604 #elif defined(CONFIG_BOOKE)
605 	vcpu->arch.epr = epr;
606 #endif
607 }
608 
609 #ifdef CONFIG_KVM_MPIC
610 
611 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
612 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
613 			     u32 cpu);
614 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
615 
616 #else
617 
618 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
619 {
620 }
621 
622 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
623 		struct kvm_vcpu *vcpu, u32 cpu)
624 {
625 	return -EINVAL;
626 }
627 
628 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
629 		struct kvm_vcpu *vcpu)
630 {
631 }
632 
633 #endif /* CONFIG_KVM_MPIC */
634 
635 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
636 			      struct kvm_config_tlb *cfg);
637 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
638 			     struct kvm_dirty_tlb *cfg);
639 
640 long kvmppc_alloc_lpid(void);
641 void kvmppc_claim_lpid(long lpid);
642 void kvmppc_free_lpid(long lpid);
643 void kvmppc_init_lpid(unsigned long nr_lpids);
644 
645 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
646 {
647 	struct page *page;
648 	/*
649 	 * We can only access pages that the kernel maps
650 	 * as memory. Bail out for unmapped ones.
651 	 */
652 	if (!pfn_valid(pfn))
653 		return;
654 
655 	/* Clear i-cache for new pages */
656 	page = pfn_to_page(pfn);
657 	if (!test_bit(PG_arch_1, &page->flags)) {
658 		flush_dcache_icache_page(page);
659 		set_bit(PG_arch_1, &page->flags);
660 	}
661 }
662 
663 /*
664  * Shared struct helpers. The shared struct can be little or big endian,
665  * depending on the guest endianness. So expose helpers to all of them.
666  */
667 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
668 {
669 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
670 	/* Only Book3S_64 PR supports bi-endian for now */
671 	return vcpu->arch.shared_big_endian;
672 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
673 	/* Book3s_64 HV on little endian is always little endian */
674 	return false;
675 #else
676 	return true;
677 #endif
678 }
679 
680 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
681 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
682 {									\
683 	return mfspr(bookehv_spr);					\
684 }									\
685 
686 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
687 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
688 {									\
689 	mtspr(bookehv_spr, val);						\
690 }									\
691 
692 #define SHARED_WRAPPER_GET(reg, size)					\
693 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
694 {									\
695 	if (kvmppc_shared_big_endian(vcpu))				\
696 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
697 	else								\
698 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
699 }									\
700 
701 #define SHARED_WRAPPER_SET(reg, size)					\
702 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
703 {									\
704 	if (kvmppc_shared_big_endian(vcpu))				\
705 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
706 	else								\
707 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
708 }									\
709 
710 #define SHARED_WRAPPER(reg, size)					\
711 	SHARED_WRAPPER_GET(reg, size)					\
712 	SHARED_WRAPPER_SET(reg, size)					\
713 
714 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
715 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
716 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
717 
718 #ifdef CONFIG_KVM_BOOKE_HV
719 
720 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
721 	SPRNG_WRAPPER(reg, bookehv_spr)					\
722 
723 #else
724 
725 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
726 	SHARED_WRAPPER(reg, size)					\
727 
728 #endif
729 
730 SHARED_WRAPPER(critical, 64)
731 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
732 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
733 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
734 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
735 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
736 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
737 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
738 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
739 SHARED_WRAPPER_GET(msr, 64)
740 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
741 {
742 	if (kvmppc_shared_big_endian(vcpu))
743 	       vcpu->arch.shared->msr = cpu_to_be64(val);
744 	else
745 	       vcpu->arch.shared->msr = cpu_to_le64(val);
746 }
747 SHARED_WRAPPER(dsisr, 32)
748 SHARED_WRAPPER(int_pending, 32)
749 SHARED_WRAPPER(sprg4, 64)
750 SHARED_WRAPPER(sprg5, 64)
751 SHARED_WRAPPER(sprg6, 64)
752 SHARED_WRAPPER(sprg7, 64)
753 
754 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
755 {
756 	if (kvmppc_shared_big_endian(vcpu))
757 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
758 	else
759 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
760 }
761 
762 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
763 {
764 	if (kvmppc_shared_big_endian(vcpu))
765 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
766 	else
767 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
768 }
769 
770 /*
771  * Please call after prepare_to_enter. This function puts the lazy ee and irq
772  * disabled tracking state back to normal mode, without actually enabling
773  * interrupts.
774  */
775 static inline void kvmppc_fix_ee_before_entry(void)
776 {
777 	trace_hardirqs_on();
778 
779 #ifdef CONFIG_PPC64
780 	/*
781 	 * To avoid races, the caller must have gone directly from having
782 	 * interrupts fully-enabled to hard-disabled.
783 	 */
784 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
785 
786 	/* Only need to enable IRQs by hard enabling them after this */
787 	local_paca->irq_happened = 0;
788 	local_paca->soft_enabled = 1;
789 #endif
790 }
791 
792 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
793 {
794 	ulong ea;
795 	ulong msr_64bit = 0;
796 
797 	ea = kvmppc_get_gpr(vcpu, rb);
798 	if (ra)
799 		ea += kvmppc_get_gpr(vcpu, ra);
800 
801 #if defined(CONFIG_PPC_BOOK3E_64)
802 	msr_64bit = MSR_CM;
803 #elif defined(CONFIG_PPC_BOOK3S_64)
804 	msr_64bit = MSR_SF;
805 #endif
806 
807 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
808 		ea = (uint32_t)ea;
809 
810 	return ea;
811 }
812 
813 extern void xics_wake_cpu(int cpu);
814 
815 #endif /* __POWERPC_KVM_PPC_H__ */
816