xref: /linux/arch/powerpc/include/asm/kvm_ppc.h (revision f4b0c4b508364fde023e4f7b9f23f7e38c663dfe)
1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   *
4   * Copyright IBM Corp. 2008
5   *
6   * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7   */
8  
9  #ifndef __POWERPC_KVM_PPC_H__
10  #define __POWERPC_KVM_PPC_H__
11  
12  /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
13   * dependencies. */
14  
15  #include <linux/mutex.h>
16  #include <linux/timer.h>
17  #include <linux/types.h>
18  #include <linux/kvm_types.h>
19  #include <linux/kvm_host.h>
20  #include <linux/bug.h>
21  #ifdef CONFIG_PPC_BOOK3S
22  #include <asm/kvm_book3s.h>
23  #else
24  #include <asm/kvm_booke.h>
25  #endif
26  #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27  #include <asm/paca.h>
28  #include <asm/xive.h>
29  #include <asm/cpu_has_feature.h>
30  #endif
31  #include <asm/inst.h>
32  
33  /*
34   * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
35   * for supporting software breakpoint.
36   */
37  #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
38  
39  enum emulation_result {
40  	EMULATE_DONE,         /* no further processing */
41  	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
42  	EMULATE_FAIL,         /* can't emulate this instruction */
43  	EMULATE_AGAIN,        /* something went wrong. go again */
44  	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
45  };
46  
47  enum instruction_fetch_type {
48  	INST_GENERIC,
49  	INST_SC,		/* system call */
50  };
51  
52  enum xlate_instdata {
53  	XLATE_INST,		/* translate instruction address */
54  	XLATE_DATA		/* translate data address */
55  };
56  
57  enum xlate_readwrite {
58  	XLATE_READ,		/* check for read permissions */
59  	XLATE_WRITE		/* check for write permissions */
60  };
61  
62  extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63  extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
64  extern void kvmppc_handler_highmem(void);
65  
66  extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
67  extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
68                                unsigned int rt, unsigned int bytes,
69  			      int is_default_endian);
70  extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
71                                 unsigned int rt, unsigned int bytes,
72  			       int is_default_endian);
73  extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
74  				unsigned int rt, unsigned int bytes,
75  			int is_default_endian, int mmio_sign_extend);
76  extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
77  		unsigned int rt, unsigned int bytes, int is_default_endian);
78  extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
79  		unsigned int rs, unsigned int bytes, int is_default_endian);
80  extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
81  			       u64 val, unsigned int bytes,
82  			       int is_default_endian);
83  extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
84  				int rs, unsigned int bytes,
85  				int is_default_endian);
86  
87  extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
88  				 enum instruction_fetch_type type,
89  				 unsigned long *inst);
90  
91  extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92  		     bool data);
93  extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
94  		     bool data);
95  extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
96  extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
97  extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
98  extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
99  extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
100  extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
101  extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
102  extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
103  extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
104  
105  /* Core-specific hooks */
106  
107  extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
108                             unsigned int gtlb_idx);
109  extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
110  extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
111  extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112  extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
113                                gva_t eaddr);
114  extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
115  extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
116  extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
117  			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
118  			struct kvmppc_pte *pte);
119  
120  extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
121  extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
122  extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
123  extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124                                        struct kvm_translation *tr);
125  
126  extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127  extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128  
129  extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
130  extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
131  
132  extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu,
133  					    ulong srr1_flags);
134  extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
135  extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu,
136  				      ulong srr1_flags);
137  extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu,
138  					ulong srr1_flags);
139  extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu,
140  					  ulong srr1_flags);
141  extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu,
142  					  ulong srr1_flags);
143  extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
144  extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
145  extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
146                                         struct kvm_interrupt *irq);
147  extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
148  extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
149  					ulong dear_flags,
150  					ulong esr_flags);
151  extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
152  					   ulong srr1_flags,
153  					   ulong dar,
154  					   ulong dsisr);
155  extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
156  extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
157  					   ulong srr1_flags);
158  
159  extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
160  extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
161  
162  extern int kvmppc_booke_init(void);
163  extern void kvmppc_booke_exit(void);
164  
165  extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
166  extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
167  
168  extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
169  extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
170  extern int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
171  extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
172  extern void kvmppc_rmap_reset(struct kvm *kvm);
173  extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
174  			struct kvm_memory_slot *memslot, unsigned long porder);
175  extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
176  extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
177  		struct iommu_group *grp);
178  extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
179  		struct iommu_group *grp);
180  extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
181  extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
182  extern void kvmppc_setup_partition_table(struct kvm *kvm);
183  
184  extern int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
185  				struct kvm_create_spapr_tce_64 *args);
186  #define kvmppc_ioba_validate(stt, ioba, npages)                         \
187  		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
188  				(stt)->size, (ioba), (npages)) ?        \
189  				H_PARAMETER : H_SUCCESS)
190  extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
191  			     unsigned long ioba, unsigned long tce);
192  extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
193  		unsigned long liobn, unsigned long ioba,
194  		unsigned long tce_list, unsigned long npages);
195  extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
196  		unsigned long liobn, unsigned long ioba,
197  		unsigned long tce_value, unsigned long npages);
198  extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
199  			     unsigned long ioba);
200  extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
201  extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
202  extern int kvmppc_core_init_vm(struct kvm *kvm);
203  extern void kvmppc_core_destroy_vm(struct kvm *kvm);
204  extern void kvmppc_core_free_memslot(struct kvm *kvm,
205  				     struct kvm_memory_slot *slot);
206  extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
207  				const struct kvm_memory_slot *old,
208  				struct kvm_memory_slot *new,
209  				enum kvm_mr_change change);
210  extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
211  				struct kvm_memory_slot *old,
212  				const struct kvm_memory_slot *new,
213  				enum kvm_mr_change change);
214  extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
215  				      struct kvm_ppc_smmu_info *info);
216  extern void kvmppc_core_flush_memslot(struct kvm *kvm,
217  				      struct kvm_memory_slot *memslot);
218  
219  extern int kvmppc_bookehv_init(void);
220  extern void kvmppc_bookehv_exit(void);
221  
222  extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
223  
224  extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
225  extern int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
226  					   struct kvm_ppc_resize_hpt *rhpt);
227  extern int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
228  					  struct kvm_ppc_resize_hpt *rhpt);
229  
230  int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
231  
232  extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
233  extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
234  extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
235  
236  extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
237  				u32 priority);
238  extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
239  				u32 *priority);
240  extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
241  extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
242  
243  void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
244  void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
245  
246  union kvmppc_one_reg {
247  	u32	wval;
248  	u64	dval;
249  	vector128 vval;
250  	u64	vsxval[2];
251  	u32	vsx32val[4];
252  	u16	vsx16val[8];
253  	u8	vsx8val[16];
254  	struct {
255  		u64	addr;
256  		u64	length;
257  	}	vpaval;
258  	u64	xive_timaval[2];
259  };
260  
261  struct kvmppc_ops {
262  	struct module *owner;
263  	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
264  	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
265  	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
266  			   union kvmppc_one_reg *val);
267  	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
268  			   union kvmppc_one_reg *val);
269  	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
270  	void (*vcpu_put)(struct kvm_vcpu *vcpu);
271  	void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
272  	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
273  	int (*vcpu_run)(struct kvm_vcpu *vcpu);
274  	int (*vcpu_create)(struct kvm_vcpu *vcpu);
275  	void (*vcpu_free)(struct kvm_vcpu *vcpu);
276  	int (*check_requests)(struct kvm_vcpu *vcpu);
277  	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
278  	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
279  	int (*prepare_memory_region)(struct kvm *kvm,
280  				     const struct kvm_memory_slot *old,
281  				     struct kvm_memory_slot *new,
282  				     enum kvm_mr_change change);
283  	void (*commit_memory_region)(struct kvm *kvm,
284  				     struct kvm_memory_slot *old,
285  				     const struct kvm_memory_slot *new,
286  				     enum kvm_mr_change change);
287  	bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
288  	bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
289  	bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
290  	void (*free_memslot)(struct kvm_memory_slot *slot);
291  	int (*init_vm)(struct kvm *kvm);
292  	void (*destroy_vm)(struct kvm *kvm);
293  	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
294  	int (*emulate_op)(struct kvm_vcpu *vcpu,
295  			  unsigned int inst, int *advance);
296  	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
297  	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
298  	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
299  	int (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
300  			     unsigned long arg);
301  	int (*hcall_implemented)(unsigned long hcall);
302  	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
303  				       struct irq_bypass_producer *);
304  	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
305  					struct irq_bypass_producer *);
306  	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
307  	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
308  	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
309  			    unsigned long flags);
310  	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
311  	int (*enable_nested)(struct kvm *kvm);
312  	int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
313  			       int size);
314  	int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
315  			      int size);
316  	int (*enable_svm)(struct kvm *kvm);
317  	int (*svm_off)(struct kvm *kvm);
318  	int (*enable_dawr1)(struct kvm *kvm);
319  	bool (*hash_v3_possible)(void);
320  	int (*create_vm_debugfs)(struct kvm *kvm);
321  	int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
322  };
323  
324  extern struct kvmppc_ops *kvmppc_hv_ops;
325  extern struct kvmppc_ops *kvmppc_pr_ops;
326  
kvmppc_get_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,ppc_inst_t * inst)327  static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
328  				enum instruction_fetch_type type, ppc_inst_t *inst)
329  {
330  	int ret = EMULATE_DONE;
331  	u32 fetched_inst;
332  
333  	/* Load the instruction manually if it failed to do so in the
334  	 * exit path */
335  	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
336  		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
337  
338  	/*  Write fetch_failed unswapped if the fetch failed */
339  	if (ret != EMULATE_DONE) {
340  		*inst = ppc_inst(KVM_INST_FETCH_FAILED);
341  		return ret;
342  	}
343  
344  #ifdef CONFIG_PPC64
345  	/* Is this a prefixed instruction? */
346  	if ((vcpu->arch.last_inst >> 32) != 0) {
347  		u32 prefix = vcpu->arch.last_inst >> 32;
348  		u32 suffix = vcpu->arch.last_inst;
349  		if (kvmppc_need_byteswap(vcpu)) {
350  			prefix = swab32(prefix);
351  			suffix = swab32(suffix);
352  		}
353  		*inst = ppc_inst_prefix(prefix, suffix);
354  		return EMULATE_DONE;
355  	}
356  #endif
357  
358  	fetched_inst = kvmppc_need_byteswap(vcpu) ?
359  		swab32(vcpu->arch.last_inst) :
360  		vcpu->arch.last_inst;
361  	*inst = ppc_inst(fetched_inst);
362  	return EMULATE_DONE;
363  }
364  
is_kvmppc_hv_enabled(struct kvm * kvm)365  static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
366  {
367  	return kvm->arch.kvm_ops == kvmppc_hv_ops;
368  }
369  
370  extern int kvmppc_hwrng_present(void);
371  
372  /*
373   * Cuts out inst bits with ordering according to spec.
374   * That means the leftmost bit is zero. All given bits are included.
375   */
kvmppc_get_field(u64 inst,int msb,int lsb)376  static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
377  {
378  	u32 r;
379  	u32 mask;
380  
381  	BUG_ON(msb > lsb);
382  
383  	mask = (1 << (lsb - msb + 1)) - 1;
384  	r = (inst >> (63 - lsb)) & mask;
385  
386  	return r;
387  }
388  
389  /*
390   * Replaces inst bits with ordering according to spec.
391   */
kvmppc_set_field(u64 inst,int msb,int lsb,int value)392  static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
393  {
394  	u32 r;
395  	u32 mask;
396  
397  	BUG_ON(msb > lsb);
398  
399  	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
400  	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
401  
402  	return r;
403  }
404  
405  #define one_reg_size(id)	\
406  	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
407  
408  #define get_reg_val(id, reg)	({		\
409  	union kvmppc_one_reg __u;		\
410  	switch (one_reg_size(id)) {		\
411  	case 4: __u.wval = (reg); break;	\
412  	case 8: __u.dval = (reg); break;	\
413  	default: BUG();				\
414  	}					\
415  	__u;					\
416  })
417  
418  
419  #define set_reg_val(id, val)	({		\
420  	u64 __v;				\
421  	switch (one_reg_size(id)) {		\
422  	case 4: __v = (val).wval; break;	\
423  	case 8: __v = (val).dval; break;	\
424  	default: BUG();				\
425  	}					\
426  	__v;					\
427  })
428  
429  int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
430  int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
431  
432  int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
433  int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
434  
435  int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
436  int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
437  int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
438  int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
439  
440  void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
441  
442  struct openpic;
443  
444  #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
445  extern void kvm_cma_reserve(void) __init;
kvmppc_set_xics_phys(int cpu,unsigned long addr)446  static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
447  {
448  	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
449  }
450  
kvmppc_set_xive_tima(int cpu,unsigned long phys_addr,void __iomem * virt_addr)451  static inline void kvmppc_set_xive_tima(int cpu,
452  					unsigned long phys_addr,
453  					void __iomem *virt_addr)
454  {
455  	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
456  	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
457  }
458  
kvmppc_get_xics_latch(void)459  static inline u32 kvmppc_get_xics_latch(void)
460  {
461  	u32 xirr;
462  
463  	xirr = get_paca()->kvm_hstate.saved_xirr;
464  	get_paca()->kvm_hstate.saved_xirr = 0;
465  	return xirr;
466  }
467  
468  /*
469   * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
470   * a CPU thread that's running/napping inside of a guest is by default regarded
471   * as a request to wake the CPU (if needed) and continue execution within the
472   * guest, potentially to process new state like externally-generated
473   * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
474   *
475   * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
476   * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
477   * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
478   * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
479   * the receiving side prior to processing the IPI work.
480   *
481   * NOTE:
482   *
483   * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
484   * This is to guard against sequences such as the following:
485   *
486   *      CPU
487   *        X: smp_muxed_ipi_set_message():
488   *        X:   smp_mb()
489   *        X:   message[RESCHEDULE] = 1
490   *        X: doorbell_global_ipi(42):
491   *        X:   kvmppc_set_host_ipi(42)
492   *        X:   ppc_msgsnd_sync()/smp_mb()
493   *        X:   ppc_msgsnd() -> 42
494   *       42: doorbell_exception(): // from CPU X
495   *       42:   ppc_msgsync()
496   *      105: smp_muxed_ipi_set_message():
497   *      105:   smb_mb()
498   *           // STORE DEFERRED DUE TO RE-ORDERING
499   *    --105:   message[CALL_FUNCTION] = 1
500   *    | 105: doorbell_global_ipi(42):
501   *    | 105:   kvmppc_set_host_ipi(42)
502   *    |  42:   kvmppc_clear_host_ipi(42)
503   *    |  42: smp_ipi_demux_relaxed()
504   *    |  42: // returns to executing guest
505   *    |      // RE-ORDERED STORE COMPLETES
506   *    ->105:   message[CALL_FUNCTION] = 1
507   *      105:   ppc_msgsnd_sync()/smp_mb()
508   *      105:   ppc_msgsnd() -> 42
509   *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
510   *      105: // hangs waiting on 42 to process messages/call_single_queue
511   *
512   * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
513   * to guard against sequences such as the following (as well as to create
514   * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
515   *
516   *      CPU
517   *        X: smp_muxed_ipi_set_message():
518   *        X:   smp_mb()
519   *        X:   message[RESCHEDULE] = 1
520   *        X: doorbell_global_ipi(42):
521   *        X:   kvmppc_set_host_ipi(42)
522   *        X:   ppc_msgsnd_sync()/smp_mb()
523   *        X:   ppc_msgsnd() -> 42
524   *       42: doorbell_exception(): // from CPU X
525   *       42:   ppc_msgsync()
526   *           // STORE DEFERRED DUE TO RE-ORDERING
527   *    -- 42:   kvmppc_clear_host_ipi(42)
528   *    |  42: smp_ipi_demux_relaxed()
529   *    | 105: smp_muxed_ipi_set_message():
530   *    | 105:   smb_mb()
531   *    | 105:   message[CALL_FUNCTION] = 1
532   *    | 105: doorbell_global_ipi(42):
533   *    | 105:   kvmppc_set_host_ipi(42)
534   *    |      // RE-ORDERED STORE COMPLETES
535   *    -> 42:   kvmppc_clear_host_ipi(42)
536   *       42: // returns to executing guest
537   *      105:   ppc_msgsnd_sync()/smp_mb()
538   *      105:   ppc_msgsnd() -> 42
539   *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
540   *      105: // hangs waiting on 42 to process messages/call_single_queue
541   */
kvmppc_set_host_ipi(int cpu)542  static inline void kvmppc_set_host_ipi(int cpu)
543  {
544  	/*
545  	 * order stores of IPI messages vs. setting of host_ipi flag
546  	 *
547  	 * pairs with the barrier in kvmppc_clear_host_ipi()
548  	 */
549  	smp_mb();
550  	WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 1);
551  }
552  
kvmppc_clear_host_ipi(int cpu)553  static inline void kvmppc_clear_host_ipi(int cpu)
554  {
555  	WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 0);
556  	/*
557  	 * order clearing of host_ipi flag vs. processing of IPI messages
558  	 *
559  	 * pairs with the barrier in kvmppc_set_host_ipi()
560  	 */
561  	smp_mb();
562  }
563  
kvmppc_fast_vcpu_kick(struct kvm_vcpu * vcpu)564  static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
565  {
566  	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
567  }
568  
569  extern void kvm_hv_vm_activated(void);
570  extern void kvm_hv_vm_deactivated(void);
571  extern bool kvm_hv_mode_active(void);
572  
573  extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
574  
575  #else
kvm_cma_reserve(void)576  static inline void __init kvm_cma_reserve(void)
577  {}
578  
kvmppc_set_xics_phys(int cpu,unsigned long addr)579  static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
580  {}
581  
kvmppc_set_xive_tima(int cpu,unsigned long phys_addr,void __iomem * virt_addr)582  static inline void kvmppc_set_xive_tima(int cpu,
583  					unsigned long phys_addr,
584  					void __iomem *virt_addr)
585  {}
586  
kvmppc_get_xics_latch(void)587  static inline u32 kvmppc_get_xics_latch(void)
588  {
589  	return 0;
590  }
591  
kvmppc_set_host_ipi(int cpu)592  static inline void kvmppc_set_host_ipi(int cpu)
593  {}
594  
kvmppc_clear_host_ipi(int cpu)595  static inline void kvmppc_clear_host_ipi(int cpu)
596  {}
597  
kvmppc_fast_vcpu_kick(struct kvm_vcpu * vcpu)598  static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
599  {
600  	kvm_vcpu_kick(vcpu);
601  }
602  
kvm_hv_mode_active(void)603  static inline bool kvm_hv_mode_active(void)		{ return false; }
604  
605  #endif
606  
607  #ifdef CONFIG_PPC_PSERIES
kvmhv_on_pseries(void)608  static inline bool kvmhv_on_pseries(void)
609  {
610  	return !cpu_has_feature(CPU_FTR_HVMODE);
611  }
612  #else
kvmhv_on_pseries(void)613  static inline bool kvmhv_on_pseries(void)
614  {
615  	return false;
616  }
617  
618  #endif
619  
620  #ifndef CONFIG_PPC_BOOK3S
621  
kvmhv_is_nestedv2(void)622  static inline bool kvmhv_is_nestedv2(void)
623  {
624  	return false;
625  }
626  
kvmhv_is_nestedv1(void)627  static inline bool kvmhv_is_nestedv1(void)
628  {
629  	return false;
630  }
631  
kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)632  static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
633  					       struct pt_regs *regs)
634  {
635  	return 0;
636  }
kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)637  static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
638  						   struct pt_regs *regs)
639  {
640  	return 0;
641  }
642  
kvmhv_nestedv2_mark_dirty(struct kvm_vcpu * vcpu,u16 iden)643  static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
644  {
645  	return 0;
646  }
647  
kvmhv_nestedv2_cached_reload(struct kvm_vcpu * vcpu,u16 iden)648  static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
649  {
650  	return 0;
651  }
652  
653  #endif
654  
655  #ifdef CONFIG_KVM_XICS
kvmppc_xics_enabled(struct kvm_vcpu * vcpu)656  static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
657  {
658  	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
659  }
660  
kvmppc_get_passthru_irqmap(struct kvm * kvm)661  static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
662  				struct kvm *kvm)
663  {
664  	if (kvm && kvm_irq_bypass)
665  		return kvm->arch.pimap;
666  	return NULL;
667  }
668  
669  extern void kvmppc_alloc_host_rm_ops(void);
670  extern void kvmppc_free_host_rm_ops(void);
671  extern void kvmppc_free_pimap(struct kvm *kvm);
672  extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
673  extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
674  extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
675  extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
676  extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
677  extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
678  extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
679  			struct kvm_vcpu *vcpu, u32 cpu);
680  extern void kvmppc_xics_ipi_action(void);
681  extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
682  				   unsigned long host_irq);
683  extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
684  				   unsigned long host_irq);
685  extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
686  					struct kvmppc_irq_map *irq_map,
687  					struct kvmppc_passthru_irqmap *pimap,
688  					bool *again);
689  
690  extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
691  			       int level, bool line_status);
692  
693  extern int h_ipi_redirect;
694  #else
kvmppc_get_passthru_irqmap(struct kvm * kvm)695  static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
696  				struct kvm *kvm)
697  	{ return NULL; }
kvmppc_alloc_host_rm_ops(void)698  static inline void kvmppc_alloc_host_rm_ops(void) {}
kvmppc_free_host_rm_ops(void)699  static inline void kvmppc_free_host_rm_ops(void) {}
kvmppc_free_pimap(struct kvm * kvm)700  static inline void kvmppc_free_pimap(struct kvm *kvm) {}
kvmppc_xics_rm_complete(struct kvm_vcpu * vcpu,u32 hcall)701  static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
702  	{ return 0; }
kvmppc_xics_enabled(struct kvm_vcpu * vcpu)703  static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
704  	{ return 0; }
kvmppc_xics_free_icp(struct kvm_vcpu * vcpu)705  static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
kvmppc_xics_hcall(struct kvm_vcpu * vcpu,u32 cmd)706  static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
707  	{ return 0; }
kvmppc_xive_xics_hcall(struct kvm_vcpu * vcpu,u32 req)708  static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
709  	{ return 0; }
710  #endif
711  
712  #ifdef CONFIG_KVM_XIVE
713  /*
714   * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
715   * ie. P9 new interrupt controller, while the second "xive" is the legacy
716   * "eXternal Interrupt Vector Entry" which is the configuration of an
717   * interrupt on the "xics" interrupt controller on P8 and earlier. Those
718   * two function consume or produce a legacy "XIVE" state from the
719   * new "XIVE" interrupt controller.
720   */
721  extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
722  				u32 priority);
723  extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
724  				u32 *priority);
725  extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
726  extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
727  
728  extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
729  				    struct kvm_vcpu *vcpu, u32 cpu);
730  extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
731  extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
732  				  unsigned long host_irq);
733  extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
734  				  unsigned long host_irq);
735  extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
736  extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
737  
738  extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
739  			       int level, bool line_status);
740  extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
741  extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
742  extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
743  
kvmppc_xive_enabled(struct kvm_vcpu * vcpu)744  static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
745  {
746  	return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
747  }
748  
749  extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
750  					   struct kvm_vcpu *vcpu, u32 cpu);
751  extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
752  extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
753  				     union kvmppc_one_reg *val);
754  extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
755  				     union kvmppc_one_reg *val);
756  extern bool kvmppc_xive_native_supported(void);
757  
758  #else
kvmppc_xive_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority)759  static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
760  				       u32 priority) { return -1; }
kvmppc_xive_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority)761  static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
762  				       u32 *priority) { return -1; }
kvmppc_xive_int_on(struct kvm * kvm,u32 irq)763  static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
kvmppc_xive_int_off(struct kvm * kvm,u32 irq)764  static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
765  
kvmppc_xive_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)766  static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
767  					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
kvmppc_xive_cleanup_vcpu(struct kvm_vcpu * vcpu)768  static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_set_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)769  static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
770  					 struct irq_desc *host_desc) { return -ENODEV; }
kvmppc_xive_clr_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)771  static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
772  					 struct irq_desc *host_desc) { return -ENODEV; }
kvmppc_xive_get_icp(struct kvm_vcpu * vcpu)773  static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
kvmppc_xive_set_icp(struct kvm_vcpu * vcpu,u64 icpval)774  static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
775  
kvmppc_xive_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)776  static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
777  				      int level, bool line_status) { return -ENODEV; }
kvmppc_xive_push_vcpu(struct kvm_vcpu * vcpu)778  static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_pull_vcpu(struct kvm_vcpu * vcpu)779  static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_rearm_escalation(struct kvm_vcpu * vcpu)780  static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
781  
kvmppc_xive_enabled(struct kvm_vcpu * vcpu)782  static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
783  	{ return 0; }
kvmppc_xive_native_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)784  static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
785  			  struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu * vcpu)786  static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_native_get_vp(struct kvm_vcpu * vcpu,union kvmppc_one_reg * val)787  static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
788  					    union kvmppc_one_reg *val)
789  { return 0; }
kvmppc_xive_native_set_vp(struct kvm_vcpu * vcpu,union kvmppc_one_reg * val)790  static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
791  					    union kvmppc_one_reg *val)
792  { return -ENOENT; }
793  
794  #endif /* CONFIG_KVM_XIVE */
795  
796  #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
xics_on_xive(void)797  static inline bool xics_on_xive(void)
798  {
799  	return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
800  }
801  #else
xics_on_xive(void)802  static inline bool xics_on_xive(void)
803  {
804  	return false;
805  }
806  #endif
807  
808  /*
809   * Prototypes for functions called only from assembler code.
810   * Having prototypes reduces sparse errors.
811   */
812  long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
813  			 unsigned long ioba, unsigned long tce);
814  long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
815  				  unsigned long liobn, unsigned long ioba,
816  				  unsigned long tce_list, unsigned long npages);
817  long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
818  			   unsigned long liobn, unsigned long ioba,
819  			   unsigned long tce_value, unsigned long npages);
820  long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
821                              unsigned int yield_count);
822  long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
823  void kvmhv_commence_exit(int trap);
824  void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
825  void kvmppc_subcore_enter_guest(void);
826  void kvmppc_subcore_exit_guest(void);
827  long kvmppc_realmode_hmi_handler(void);
828  long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
829  long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
830                      long pte_index, unsigned long pteh, unsigned long ptel);
831  long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
832                       unsigned long pte_index, unsigned long avpn);
833  long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
834  long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
835                        unsigned long pte_index, unsigned long avpn);
836  long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
837                     unsigned long pte_index);
838  long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
839                          unsigned long pte_index);
840  long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
841                          unsigned long pte_index);
842  long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
843  			   unsigned long dest, unsigned long src);
844  long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
845                            unsigned long slb_v, unsigned int status, bool data);
846  void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
847  
848  /*
849   * Host-side operations we want to set up while running in real
850   * mode in the guest operating on the xics.
851   * Currently only VCPU wakeup is supported.
852   */
853  
854  union kvmppc_rm_state {
855  	unsigned long raw;
856  	struct {
857  		u32 in_host;
858  		u32 rm_action;
859  	};
860  };
861  
862  struct kvmppc_host_rm_core {
863  	union kvmppc_rm_state rm_state;
864  	void *rm_data;
865  	char pad[112];
866  };
867  
868  struct kvmppc_host_rm_ops {
869  	struct kvmppc_host_rm_core	*rm_core;
870  	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
871  };
872  
873  extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
874  
kvmppc_get_epr(struct kvm_vcpu * vcpu)875  static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
876  {
877  #ifdef CONFIG_KVM_BOOKE_HV
878  	return mfspr(SPRN_GEPR);
879  #elif defined(CONFIG_BOOKE)
880  	return vcpu->arch.epr;
881  #else
882  	return 0;
883  #endif
884  }
885  
kvmppc_set_epr(struct kvm_vcpu * vcpu,u32 epr)886  static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
887  {
888  #ifdef CONFIG_KVM_BOOKE_HV
889  	mtspr(SPRN_GEPR, epr);
890  #elif defined(CONFIG_BOOKE)
891  	vcpu->arch.epr = epr;
892  #endif
893  }
894  
895  #ifdef CONFIG_KVM_MPIC
896  
897  void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
898  int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
899  			     u32 cpu);
900  void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
901  
902  #else
903  
kvmppc_mpic_set_epr(struct kvm_vcpu * vcpu)904  static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
905  {
906  }
907  
kvmppc_mpic_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)908  static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
909  		struct kvm_vcpu *vcpu, u32 cpu)
910  {
911  	return -EINVAL;
912  }
913  
kvmppc_mpic_disconnect_vcpu(struct openpic * opp,struct kvm_vcpu * vcpu)914  static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
915  		struct kvm_vcpu *vcpu)
916  {
917  }
918  
919  #endif /* CONFIG_KVM_MPIC */
920  
921  int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
922  			      struct kvm_config_tlb *cfg);
923  int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
924  			     struct kvm_dirty_tlb *cfg);
925  
926  long kvmppc_alloc_lpid(void);
927  void kvmppc_free_lpid(long lpid);
928  void kvmppc_init_lpid(unsigned long nr_lpids);
929  
kvmppc_mmu_flush_icache(kvm_pfn_t pfn)930  static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
931  {
932  	struct folio *folio;
933  	/*
934  	 * We can only access pages that the kernel maps
935  	 * as memory. Bail out for unmapped ones.
936  	 */
937  	if (!pfn_valid(pfn))
938  		return;
939  
940  	/* Clear i-cache for new pages */
941  	folio = page_folio(pfn_to_page(pfn));
942  	if (!test_bit(PG_dcache_clean, &folio->flags)) {
943  		flush_dcache_icache_folio(folio);
944  		set_bit(PG_dcache_clean, &folio->flags);
945  	}
946  }
947  
948  /*
949   * Shared struct helpers. The shared struct can be little or big endian,
950   * depending on the guest endianness. So expose helpers to all of them.
951   */
kvmppc_shared_big_endian(struct kvm_vcpu * vcpu)952  static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
953  {
954  #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
955  	/* Only Book3S_64 PR supports bi-endian for now */
956  	return vcpu->arch.shared_big_endian;
957  #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
958  	/* Book3s_64 HV on little endian is always little endian */
959  	return false;
960  #else
961  	return true;
962  #endif
963  }
964  
965  #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr)		\
966  static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
967  {									\
968  	return mfspr(bookehv_spr);					\
969  }									\
970  
971  #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr)		\
972  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
973  {									\
974  	mtspr(bookehv_spr, val);						\
975  }									\
976  
977  #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden)		\
978  static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
979  {									\
980  	if (iden)							\
981  		WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0);	\
982  	if (kvmppc_shared_big_endian(vcpu))				\
983  		return be##size##_to_cpu((__be##size __force)vcpu->arch.shared->reg);	\
984  	else								\
985  		return le##size##_to_cpu((__le##size __force)vcpu->arch.shared->reg);	\
986  }									\
987  
988  #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden)		\
989  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
990  {									\
991  	if (kvmppc_shared_big_endian(vcpu))				\
992  		vcpu->arch.shared->reg = (u##size __force)cpu_to_be##size(val);	\
993  	else								\
994  		vcpu->arch.shared->reg = (u##size __force)cpu_to_le##size(val);	\
995  									\
996  	if (iden)							\
997  		kvmhv_nestedv2_mark_dirty(vcpu, iden);			\
998  }									\
999  
1000  #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden)		\
1001  	KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden)		\
1002  	KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden)		\
1003  
1004  #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr)		\
1005  	KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr)		\
1006  	KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr)		\
1007  
1008  #ifdef CONFIG_KVM_BOOKE_HV
1009  
1010  #define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden)	\
1011  	KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr)		\
1012  
1013  #else
1014  
1015  #define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden)	\
1016  	KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden)		\
1017  
1018  #endif
1019  
1020  KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0)
1021  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0)
1022  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1)
1023  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2)
1024  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3)
1025  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0)
1026  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1)
1027  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR)
1028  KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0)
1029  KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR)
kvmppc_set_msr_fast(struct kvm_vcpu * vcpu,u64 val)1030  static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
1031  {
1032  	if (kvmppc_shared_big_endian(vcpu))
1033  	       vcpu->arch.shared->msr = cpu_to_be64(val);
1034  	else
1035  	       vcpu->arch.shared->msr = cpu_to_le64(val);
1036  	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
1037  }
1038  KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR)
1039  KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0)
1040  KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0)
1041  KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0)
1042  KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0)
1043  KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0)
1044  
kvmppc_get_sr(struct kvm_vcpu * vcpu,int nr)1045  static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
1046  {
1047  	if (kvmppc_shared_big_endian(vcpu))
1048  	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
1049  	else
1050  	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
1051  }
1052  
kvmppc_set_sr(struct kvm_vcpu * vcpu,int nr,u32 val)1053  static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
1054  {
1055  	if (kvmppc_shared_big_endian(vcpu))
1056  	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
1057  	else
1058  	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
1059  }
1060  
1061  /*
1062   * Please call after prepare_to_enter. This function puts the lazy ee and irq
1063   * disabled tracking state back to normal mode, without actually enabling
1064   * interrupts.
1065   */
kvmppc_fix_ee_before_entry(void)1066  static inline void kvmppc_fix_ee_before_entry(void)
1067  {
1068  	trace_hardirqs_on();
1069  
1070  #ifdef CONFIG_PPC64
1071  	/*
1072  	 * To avoid races, the caller must have gone directly from having
1073  	 * interrupts fully-enabled to hard-disabled.
1074  	 */
1075  	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1076  
1077  	/* Only need to enable IRQs by hard enabling them after this */
1078  	local_paca->irq_happened = 0;
1079  	irq_soft_mask_set(IRQS_ENABLED);
1080  #endif
1081  }
1082  
kvmppc_fix_ee_after_exit(void)1083  static inline void kvmppc_fix_ee_after_exit(void)
1084  {
1085  #ifdef CONFIG_PPC64
1086  	/* Only need to enable IRQs by hard enabling them after this */
1087  	local_paca->irq_happened = PACA_IRQ_HARD_DIS;
1088  	irq_soft_mask_set(IRQS_ALL_DISABLED);
1089  #endif
1090  
1091  	trace_hardirqs_off();
1092  }
1093  
1094  
kvmppc_get_ea_indexed(struct kvm_vcpu * vcpu,int ra,int rb)1095  static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1096  {
1097  	ulong ea;
1098  	ulong msr_64bit = 0;
1099  
1100  	ea = kvmppc_get_gpr(vcpu, rb);
1101  	if (ra)
1102  		ea += kvmppc_get_gpr(vcpu, ra);
1103  
1104  #if defined(CONFIG_PPC_BOOK3E_64)
1105  	msr_64bit = MSR_CM;
1106  #elif defined(CONFIG_PPC_BOOK3S_64)
1107  	msr_64bit = MSR_SF;
1108  #endif
1109  
1110  	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1111  		ea = (uint32_t)ea;
1112  
1113  	return ea;
1114  }
1115  
1116  extern void xics_wake_cpu(int cpu);
1117  
1118  #endif /* __POWERPC_KVM_PPC_H__ */
1119