xref: /linux/arch/s390/kvm/kvm-s390.h (revision d07b43284ab356daf7ec5ae1858a16c1c7b6adab)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * definition for kvm on s390
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10  */
11 
12 #ifndef ARCH_S390_KVM_S390_H
13 #define ARCH_S390_KVM_S390_H
14 
15 #include <linux/hrtimer.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <linux/lockdep.h>
19 #include <asm/facility.h>
20 #include <asm/processor.h>
21 #include <asm/sclp.h>
22 
kvm_s390_fpu_store(struct kvm_run * run)23 static inline void kvm_s390_fpu_store(struct kvm_run *run)
24 {
25 	fpu_stfpc(&run->s.regs.fpc);
26 	if (cpu_has_vx())
27 		save_vx_regs((__vector128 *)&run->s.regs.vrs);
28 	else
29 		save_fp_regs((freg_t *)&run->s.regs.fprs);
30 }
31 
kvm_s390_fpu_load(struct kvm_run * run)32 static inline void kvm_s390_fpu_load(struct kvm_run *run)
33 {
34 	fpu_lfpc_safe(&run->s.regs.fpc);
35 	if (cpu_has_vx())
36 		load_vx_regs((__vector128 *)&run->s.regs.vrs);
37 	else
38 		load_fp_regs((freg_t *)&run->s.regs.fprs);
39 }
40 
41 /* Transactional Memory Execution related macros */
42 #define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & ECB_TE))
43 #define TDB_FORMAT1		1
44 #define IS_ITDB_VALID(vcpu) \
45 	((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1))
46 
47 extern debug_info_t *kvm_s390_dbf;
48 extern debug_info_t *kvm_s390_dbf_uv;
49 
50 #define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
51 do { \
52 	debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
53 	  d_args); \
54 	debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
55 			    "%d: " d_string "\n", (d_kvm)->userspace_pid, \
56 			    d_args); \
57 } while (0)
58 
59 #define KVM_EVENT(d_loglevel, d_string, d_args...)\
60 do { \
61 	debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
62 	  d_args); \
63 } while (0)
64 
65 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
66 do { \
67 	debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
68 	  d_args); \
69 } while (0)
70 
71 #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
72 do { \
73 	debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
74 	  "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
75 	  d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
76 	  d_args); \
77 } while (0)
78 
kvm_s390_set_cpuflags(struct kvm_vcpu * vcpu,u32 flags)79 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
80 {
81 	atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
82 }
83 
kvm_s390_clear_cpuflags(struct kvm_vcpu * vcpu,u32 flags)84 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
85 {
86 	atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
87 }
88 
kvm_s390_test_cpuflags(struct kvm_vcpu * vcpu,u32 flags)89 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
90 {
91 	return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
92 }
93 
is_vcpu_stopped(struct kvm_vcpu * vcpu)94 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
95 {
96 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
97 }
98 
is_vcpu_idle(struct kvm_vcpu * vcpu)99 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
100 {
101 	return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
102 }
103 
kvm_is_ucontrol(struct kvm * kvm)104 static inline int kvm_is_ucontrol(struct kvm *kvm)
105 {
106 #ifdef CONFIG_KVM_S390_UCONTROL
107 	if (kvm->arch.gmap)
108 		return 0;
109 	return 1;
110 #else
111 	return 0;
112 #endif
113 }
114 
115 #define GUEST_PREFIX_SHIFT 13
kvm_s390_get_prefix(struct kvm_vcpu * vcpu)116 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
117 {
118 	return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
119 }
120 
kvm_s390_set_prefix(struct kvm_vcpu * vcpu,u32 prefix)121 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
122 {
123 	VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
124 		   prefix);
125 	vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
126 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
127 	kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
128 }
129 
kvm_s390_get_base_disp_s(struct kvm_vcpu * vcpu,u8 * ar)130 static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
131 {
132 	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
133 	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
134 
135 	if (ar)
136 		*ar = base2;
137 
138 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
139 }
140 
kvm_s390_get_base_disp_siy(struct kvm_vcpu * vcpu,u8 * ar)141 static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
142 {
143 	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
144 	s64 disp1;
145 
146 	/* The displacement is a 20bit _SIGNED_ value */
147 	disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
148 			      ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
149 
150 	if (ar)
151 		*ar = base1;
152 
153 	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
154 }
155 
kvm_s390_get_base_disp_sse(struct kvm_vcpu * vcpu,u64 * address1,u64 * address2,u8 * ar_b1,u8 * ar_b2)156 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
157 					      u64 *address1, u64 *address2,
158 					      u8 *ar_b1, u8 *ar_b2)
159 {
160 	u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
161 	u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
162 	u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
163 	u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
164 
165 	*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
166 	*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
167 
168 	if (ar_b1)
169 		*ar_b1 = base1;
170 	if (ar_b2)
171 		*ar_b2 = base2;
172 }
173 
kvm_s390_get_regs_rre(struct kvm_vcpu * vcpu,int * r1,int * r2)174 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
175 {
176 	if (r1)
177 		*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
178 	if (r2)
179 		*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
180 }
181 
kvm_s390_get_base_disp_rsy(struct kvm_vcpu * vcpu,u8 * ar)182 static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
183 {
184 	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
185 	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
186 			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
187 	/* The displacement is a 20bit _SIGNED_ value */
188 	if (disp2 & 0x80000)
189 		disp2+=0xfff00000;
190 
191 	if (ar)
192 		*ar = base2;
193 
194 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
195 }
196 
kvm_s390_get_base_disp_rs(struct kvm_vcpu * vcpu,u8 * ar)197 static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
198 {
199 	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
200 	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
201 
202 	if (ar)
203 		*ar = base2;
204 
205 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
206 }
207 
208 /* Set the condition code in the guest program status word */
kvm_s390_set_psw_cc(struct kvm_vcpu * vcpu,unsigned long cc)209 static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
210 {
211 	vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
212 	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
213 }
214 
215 /* test availability of facility in a kvm instance */
test_kvm_facility(struct kvm * kvm,unsigned long nr)216 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
217 {
218 	return __test_facility(nr, kvm->arch.model.fac_mask) &&
219 		__test_facility(nr, kvm->arch.model.fac_list);
220 }
221 
set_kvm_facility(u64 * fac_list,unsigned long nr)222 static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
223 {
224 	unsigned char *ptr;
225 
226 	if (nr >= MAX_FACILITY_BIT)
227 		return -EINVAL;
228 	ptr = (unsigned char *) fac_list + (nr >> 3);
229 	*ptr |= (0x80UL >> (nr & 7));
230 	return 0;
231 }
232 
test_kvm_cpu_feat(struct kvm * kvm,unsigned long nr)233 static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
234 {
235 	WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
236 	return test_bit_inv(nr, kvm->arch.cpu_feat);
237 }
238 
239 /* are cpu states controlled by user space */
kvm_s390_user_cpu_state_ctrl(struct kvm * kvm)240 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
241 {
242 	return kvm->arch.user_cpu_state_ctrl != 0;
243 }
244 
kvm_s390_set_user_cpu_state_ctrl(struct kvm * kvm)245 static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
246 {
247 	if (kvm->arch.user_cpu_state_ctrl)
248 		return;
249 
250 	VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control");
251 	kvm->arch.user_cpu_state_ctrl = 1;
252 }
253 
254 /* get the end gfn of the last (highest gfn) memslot */
kvm_s390_get_gfn_end(struct kvm_memslots * slots)255 static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
256 {
257 	struct rb_node *node;
258 	struct kvm_memory_slot *ms;
259 
260 	if (WARN_ON(kvm_memslots_empty(slots)))
261 		return 0;
262 
263 	node = rb_last(&slots->gfn_tree);
264 	ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
265 	return ms->base_gfn + ms->npages;
266 }
267 
kvm_s390_get_gisa_desc(struct kvm * kvm)268 static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
269 {
270 	u32 gd;
271 
272 	if (!kvm->arch.gisa_int.origin)
273 		return 0;
274 
275 	gd = virt_to_phys(kvm->arch.gisa_int.origin);
276 
277 	if (gd && sclp.has_gisaf)
278 		gd |= GISA_FORMAT1;
279 	return gd;
280 }
281 
282 /* implemented in pv.c */
283 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
284 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
285 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc);
286 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
287 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc);
288 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
289 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
290 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
291 			      u16 *rrc);
292 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
293 		       unsigned long tweak, u16 *rc, u16 *rrc);
294 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
295 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc);
296 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
297 				u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
298 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
299 			      u16 *rc, u16 *rrc);
300 
kvm_s390_pv_get_handle(struct kvm * kvm)301 static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
302 {
303 	return kvm->arch.pv.handle;
304 }
305 
kvm_s390_pv_cpu_get_handle(struct kvm_vcpu * vcpu)306 static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
307 {
308 	return vcpu->arch.pv.handle;
309 }
310 
311 /* implemented in interrupt.c */
312 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
313 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
314 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
315 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
316 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
317 void kvm_s390_clear_float_irqs(struct kvm *kvm);
318 int __must_check kvm_s390_inject_vm(struct kvm *kvm,
319 				    struct kvm_s390_interrupt *s390int);
320 int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
321 				      struct kvm_s390_irq *irq);
kvm_s390_inject_prog_irq(struct kvm_vcpu * vcpu,struct kvm_s390_pgm_info * pgm_info)322 static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
323 					   struct kvm_s390_pgm_info *pgm_info)
324 {
325 	struct kvm_s390_irq irq = {
326 		.type = KVM_S390_PROGRAM_INT,
327 		.u.pgm = *pgm_info,
328 	};
329 
330 	return kvm_s390_inject_vcpu(vcpu, &irq);
331 }
kvm_s390_inject_program_int(struct kvm_vcpu * vcpu,u16 code)332 static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
333 {
334 	struct kvm_s390_irq irq = {
335 		.type = KVM_S390_PROGRAM_INT,
336 		.u.pgm.code = code,
337 	};
338 
339 	return kvm_s390_inject_vcpu(vcpu, &irq);
340 }
341 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
342 						    u64 isc_mask, u32 schid);
343 int kvm_s390_reinject_io_int(struct kvm *kvm,
344 			     struct kvm_s390_interrupt_info *inti);
345 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
346 
347 /* implemented in intercept.c */
348 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
349 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
kvm_s390_rewind_psw(struct kvm_vcpu * vcpu,int ilen)350 static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
351 {
352 	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
353 
354 	sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
355 }
kvm_s390_forward_psw(struct kvm_vcpu * vcpu,int ilen)356 static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
357 {
358 	kvm_s390_rewind_psw(vcpu, -ilen);
359 }
kvm_s390_retry_instr(struct kvm_vcpu * vcpu)360 static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
361 {
362 	/* don't inject PER events if we re-execute the instruction */
363 	vcpu->arch.sie_block->icptstatus &= ~0x02;
364 	kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
365 }
366 
367 int handle_sthyi(struct kvm_vcpu *vcpu);
368 
369 /* implemented in priv.c */
370 int is_valid_psw(psw_t *psw);
371 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
372 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
373 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
374 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
375 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
376 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
377 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
378 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
379 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
380 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
381 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
382 
383 /* implemented in vsie.c */
384 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
385 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
386 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
387 				 unsigned long end);
388 void kvm_s390_vsie_init(struct kvm *kvm);
389 void kvm_s390_vsie_destroy(struct kvm *kvm);
390 
391 /* implemented in sigp.c */
392 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
393 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
394 
395 /* implemented in kvm-s390.c */
396 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
397 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
398 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
399 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
400 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
401 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
402 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
403 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
404 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
405 void exit_sie(struct kvm_vcpu *vcpu);
406 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
407 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
408 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
409 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
410 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
411 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
412 
413 /* implemented in diag.c */
414 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
415 
kvm_s390_vcpu_block_all(struct kvm * kvm)416 static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
417 {
418 	unsigned long i;
419 	struct kvm_vcpu *vcpu;
420 
421 	WARN_ON(!mutex_is_locked(&kvm->lock));
422 	kvm_for_each_vcpu(i, vcpu, kvm)
423 		kvm_s390_vcpu_block(vcpu);
424 }
425 
kvm_s390_vcpu_unblock_all(struct kvm * kvm)426 static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
427 {
428 	unsigned long i;
429 	struct kvm_vcpu *vcpu;
430 
431 	kvm_for_each_vcpu(i, vcpu, kvm)
432 		kvm_s390_vcpu_unblock(vcpu);
433 }
434 
kvm_s390_get_tod_clock_fast(struct kvm * kvm)435 static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
436 {
437 	u64 rc;
438 
439 	preempt_disable();
440 	rc = get_tod_clock_fast() + kvm->arch.epoch;
441 	preempt_enable();
442 	return rc;
443 }
444 
445 /**
446  * kvm_s390_inject_prog_cond - conditionally inject a program check
447  * @vcpu: virtual cpu
448  * @rc: original return/error code
449  *
450  * This function is supposed to be used after regular guest access functions
451  * failed, to conditionally inject a program check to a vcpu. The typical
452  * pattern would look like
453  *
454  * rc = write_guest(vcpu, addr, data, len);
455  * if (rc)
456  *	return kvm_s390_inject_prog_cond(vcpu, rc);
457  *
458  * A negative return code from guest access functions implies an internal error
459  * like e.g. out of memory. In these cases no program check should be injected
460  * to the guest.
461  * A positive value implies that an exception happened while accessing a guest's
462  * memory. In this case all data belonging to the corresponding program check
463  * has been stored in vcpu->arch.pgm and can be injected with
464  * kvm_s390_inject_prog_irq().
465  *
466  * Returns: - the original @rc value if @rc was negative (internal error)
467  *	    - zero if @rc was already zero
468  *	    - zero or error code from injecting if @rc was positive
469  *	      (program check injected to @vcpu)
470  */
kvm_s390_inject_prog_cond(struct kvm_vcpu * vcpu,int rc)471 static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
472 {
473 	if (rc <= 0)
474 		return rc;
475 	return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
476 }
477 
478 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
479 			struct kvm_s390_irq *s390irq);
480 
481 /* implemented in interrupt.c */
482 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
483 int psw_extint_disabled(struct kvm_vcpu *vcpu);
484 void kvm_s390_destroy_adapters(struct kvm *kvm);
485 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
486 extern struct kvm_device_ops kvm_flic_ops;
487 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
488 int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
489 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
490 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
491 			   void __user *buf, int len);
492 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
493 			   __u8 __user *buf, int len);
494 void kvm_s390_gisa_init(struct kvm *kvm);
495 void kvm_s390_gisa_clear(struct kvm *kvm);
496 void kvm_s390_gisa_destroy(struct kvm *kvm);
497 void kvm_s390_gisa_disable(struct kvm *kvm);
498 void kvm_s390_gisa_enable(struct kvm *kvm);
499 int __init kvm_s390_gib_init(u8 nisc);
500 void kvm_s390_gib_destroy(void);
501 
502 /* implemented in guestdbg.c */
503 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
504 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
505 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
506 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
507 			    struct kvm_guest_debug *dbg);
508 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
509 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
510 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
511 int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
512 
513 /* support for Basic/Extended SCA handling */
kvm_s390_get_ipte_control(struct kvm * kvm)514 static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
515 {
516 	struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
517 
518 	return &sca->ipte_control;
519 }
kvm_s390_use_sca_entries(void)520 static inline int kvm_s390_use_sca_entries(void)
521 {
522 	/*
523 	 * Without SIGP interpretation, only SRS interpretation (if available)
524 	 * might use the entries. By not setting the entries and keeping them
525 	 * invalid, hardware will not access them but intercept.
526 	 */
527 	return sclp.has_sigpif;
528 }
529 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
530 				     struct mcck_volatile_info *mcck_info);
531 
532 /**
533  * kvm_s390_vcpu_crypto_reset_all
534  *
535  * Reset the crypto attributes for each vcpu. This can be done while the vcpus
536  * are running as each vcpu will be removed from SIE before resetting the crypt
537  * attributes and restored to SIE afterward.
538  *
539  * Note: The kvm->lock must be held while calling this function
540  *
541  * @kvm: the KVM guest
542  */
543 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
544 
545 /**
546  * kvm_s390_vcpu_pci_enable_interp
547  *
548  * Set the associated PCI attributes for each vcpu to allow for zPCI Load/Store
549  * interpretation as well as adapter interruption forwarding.
550  *
551  * @kvm: the KVM guest
552  */
553 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm);
554 
555 /**
556  * diag9c_forwarding_hz
557  *
558  * Set the maximum number of diag9c forwarding per second
559  */
560 extern unsigned int diag9c_forwarding_hz;
561 
562 #endif
563